diff --git a/.azure-pipelines/ci.yml b/.azure-pipelines/ci.yml index bf164d19ef2234f..b5b2765e43844f9 100644 --- a/.azure-pipelines/ci.yml +++ b/.azure-pipelines/ci.yml @@ -1,97 +1,16 @@ -variables: - coverage: false - -trigger: ['main', '3.11', '3.10', '3.9', '3.8', '3.7'] +trigger: ['main', '3.12', '3.11', '3.10', '3.9', '3.8', '3.7'] jobs: - job: Prebuild displayName: Pre-build checks pool: - vmImage: ubuntu-20.04 + vmImage: ubuntu-22.04 steps: - template: ./prebuild-checks.yml -- job: Docs_PR - displayName: Docs PR - dependsOn: Prebuild - condition: and(succeeded(), eq(dependencies.Prebuild.outputs['docs.run'], 'true')) - - pool: - vmImage: ubuntu-20.04 - - steps: - - template: ./docs-steps.yml - parameters: - upload: true - - -- job: macOS_CI_Tests - displayName: macOS CI Tests - dependsOn: Prebuild - #condition: and(succeeded(), eq(dependencies.Prebuild.outputs['tests.run'], 'true')) - # bpo-39837: macOS tests on Azure Pipelines are disabled - condition: false - - variables: - testRunTitle: '$(build.sourceBranchName)-macos' - testRunPlatform: macos - - pool: - vmImage: macos-10.15 - - steps: - - template: ./macos-steps.yml - - -- job: Ubuntu_CI_Tests - displayName: Ubuntu CI Tests - dependsOn: Prebuild - condition: and(succeeded(), eq(dependencies.Prebuild.outputs['tests.run'], 'true')) - - pool: - vmImage: ubuntu-20.04 - - variables: - testRunTitle: '$(build.sourceBranchName)-linux' - testRunPlatform: linux - openssl_version: 1.1.1q - - steps: - - template: ./posix-steps.yml - parameters: - dependencies: apt - - -- job: Ubuntu_Coverage_CI_Tests - displayName: Ubuntu CI Tests (coverage) - dependsOn: Prebuild - condition: | - and( - and( - succeeded(), - eq(variables['coverage'], 'true') - ), - eq(dependencies.Prebuild.outputs['tests.run'], 'true') - ) - - pool: - vmImage: ubuntu-20.04 - - variables: - testRunTitle: '$(Build.SourceBranchName)-linux-coverage' - testRunPlatform: linux-coverage - openssl_version: 1.1.1q - - steps: - - template: ./posix-steps.yml - parameters: - dependencies: apt - coverage: true - - - job: Windows_CI_Tests displayName: Windows CI Tests dependsOn: Prebuild diff --git a/.azure-pipelines/docs-steps.yml b/.azure-pipelines/docs-steps.yml deleted file mode 100644 index 647daff7a033a8e..000000000000000 --- a/.azure-pipelines/docs-steps.yml +++ /dev/null @@ -1,47 +0,0 @@ -parameters: - latex: false - upload: false - -steps: -- checkout: self - clean: true - fetchDepth: 5 - -- task: UsePythonVersion@0 - displayName: 'Use Python 3.6 or later' - inputs: - versionSpec: '>=3.6' - -- script: python -m pip install -r requirements.txt - workingDirectory: '$(build.sourcesDirectory)/Doc' - displayName: 'Install build dependencies' - -- ${{ if ne(parameters.latex, 'true') }}: - - script: make check html PYTHON=python - workingDirectory: '$(build.sourcesDirectory)/Doc' - displayName: 'Build documentation' - -- ${{ if eq(parameters.latex, 'true') }}: - - script: sudo apt-get update && sudo apt-get install -qy --force-yes texlive-full - displayName: 'Install LaTeX' - - - script: make dist PYTHON=python SPHINXBUILD='python -m sphinx' BLURB='python -m blurb' - workingDirectory: '$(build.sourcesDirectory)/Doc' - displayName: 'Build documentation' - -- ${{ if eq(parameters.upload, 'true') }}: - - task: PublishBuildArtifacts@1 - displayName: 'Publish docs' - - inputs: - PathToPublish: '$(build.sourcesDirectory)/Doc/build' - ArtifactName: docs - publishLocation: Container - - - ${{ if eq(parameters.latex, 'true') }}: - - task: PublishBuildArtifacts@1 - displayName: 'Publish dist' - inputs: - PathToPublish: '$(build.sourcesDirectory)/Doc/dist' - ArtifactName: docs_dist - publishLocation: Container diff --git a/.azure-pipelines/macos-steps.yml b/.azure-pipelines/macos-steps.yml deleted file mode 100644 index fa38a0df8c87b82..000000000000000 --- a/.azure-pipelines/macos-steps.yml +++ /dev/null @@ -1,27 +0,0 @@ -steps: -- checkout: self - clean: true - fetchDepth: 5 - -- script: ./configure --with-pydebug --with-openssl=/usr/local/opt/openssl --prefix=/opt/python-azdev - displayName: 'Configure CPython (debug)' - -- script: make -j4 - displayName: 'Build CPython' - -- script: make pythoninfo - displayName: 'Display build info' - -- script: make buildbottest TESTOPTS="-j4 -uall,-cpu --junit-xml=$(build.binariesDirectory)/test-results.xml" - displayName: 'Tests' - continueOnError: true - timeoutInMinutes: 30 - -- task: PublishTestResults@2 - displayName: 'Publish Test Results' - inputs: - testResultsFiles: '$(build.binariesDirectory)/test-results.xml' - mergeTestResults: true - testRunTitle: $(testRunTitle) - platform: $(testRunPlatform) - condition: succeededOrFailed() diff --git a/.azure-pipelines/posix-steps.yml b/.azure-pipelines/posix-steps.yml index 9d7c5e1279f46da..e23c7b1dcb55c1d 100644 --- a/.azure-pipelines/posix-steps.yml +++ b/.azure-pipelines/posix-steps.yml @@ -1,10 +1,3 @@ -parameters: - coverage: false - sudo_dependencies: sudo - dependencies: apt - patchcheck: true - xvfb: true - steps: - checkout: self clean: true @@ -14,7 +7,7 @@ steps: - script: sudo setfacl -Rb /home/vsts displayName: 'Workaround ACL issue' -- script: ${{ parameters.sudo_dependencies }} ./.azure-pipelines/posix-deps-${{ parameters.dependencies }}.sh $(openssl_version) +- script: sudo ./.azure-pipelines/posix-deps-apt.sh $(openssl_version) displayName: 'Install dependencies' - script: ./configure --with-pydebug @@ -23,61 +16,11 @@ steps: - script: make -j4 displayName: 'Build CPython' -- ${{ if eq(parameters.coverage, 'true') }}: - - script: ./python -m venv venv && ./venv/bin/python -m pip install -U coverage - displayName: 'Set up virtual environment' - - - script: ./venv/bin/python -m test.pythoninfo - displayName: 'Display build info' - - - script: | - $COMMAND -m coverage run --pylib -m test \ - --fail-env-changed \ - -uall,-cpu \ - --junit-xml=$(build.binariesDirectory)/test-results.xml \ - -x test_multiprocessing_fork \ - -x test_multiprocessing_forkserver \ - -x test_multiprocessing_spawn \ - -x test_concurrent_futures - displayName: 'Tests with coverage' - env: - ${{ if eq(parameters.xvfb, 'true') }}: - COMMAND: xvfb-run ./venv/bin/python - ${{ if ne(parameters.xvfb, 'true') }}: - COMMAND: ./venv/bin/python - - - script: ./venv/bin/python -m coverage xml - displayName: 'Generate coverage.xml' - - - script: source ./venv/bin/activate && bash <(curl -s https://codecov.io/bash) -y .github/codecov.yml - displayName: 'Publish code coverage results' - - -- ${{ if ne(parameters.coverage, 'true') }}: - - script: make pythoninfo - displayName: 'Display build info' - - - script: $COMMAND buildbottest TESTOPTS="-j4 -uall,-cpu --junit-xml=$(build.binariesDirectory)/test-results.xml" - displayName: 'Tests' - env: - ${{ if eq(parameters.xvfb, 'true') }}: - COMMAND: xvfb-run make - ${{ if ne(parameters.xvfb, 'true') }}: - COMMAND: make - -- ${{ if eq(parameters.patchcheck, 'true') }}: - - script: | - git fetch origin - ./python Tools/patchcheck/patchcheck.py --ci true - displayName: 'Run patchcheck.py' - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - +- script: make pythoninfo + displayName: 'Display build info' -- task: PublishTestResults@2 - displayName: 'Publish Test Results' - inputs: - testResultsFiles: '$(build.binariesDirectory)/test-results.xml' - mergeTestResults: true - testRunTitle: $(testRunTitle) - platform: $(testRunPlatform) - condition: succeededOrFailed() +- script: | + git fetch origin + ./python Tools/patchcheck/patchcheck.py --ci true + displayName: 'Run patchcheck.py' + condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) diff --git a/.azure-pipelines/pr.yml b/.azure-pipelines/pr.yml index 3cbd19fda982f17..335a4b407cb83cb 100644 --- a/.azure-pipelines/pr.yml +++ b/.azure-pipelines/pr.yml @@ -1,123 +1,28 @@ -variables: - coverage: false - -pr: ['main', '3.11', '3.10', '3.9', '3.8', '3.7'] +pr: ['main', '3.12', '3.11', '3.10', '3.9', '3.8', '3.7'] jobs: - job: Prebuild displayName: Pre-build checks pool: - vmImage: ubuntu-20.04 + vmImage: ubuntu-22.04 steps: - template: ./prebuild-checks.yml -- job: Docs_PR - displayName: Docs PR - dependsOn: Prebuild - condition: and(succeeded(), eq(dependencies.Prebuild.outputs['docs.run'], 'true')) - - pool: - vmImage: ubuntu-20.04 - - steps: - - template: ./docs-steps.yml - - -- job: macOS_PR_Tests - displayName: macOS PR Tests - dependsOn: Prebuild - #condition: and(succeeded(), eq(dependencies.Prebuild.outputs['tests.run'], 'true')) - # bpo-39837: macOS tests on Azure Pipelines are disabled - condition: false - - variables: - testRunTitle: '$(system.pullRequest.TargetBranch)-macos' - testRunPlatform: macos - - pool: - vmImage: macos-10.15 - - steps: - - template: ./macos-steps.yml - parameters: - targetBranch: $(System.PullRequest.TargetBranch) - - -- job: Ubuntu_PR_Tests - displayName: Ubuntu PR Tests +- job: Ubuntu_Patchcheck + displayName: Ubuntu patchcheck dependsOn: Prebuild condition: and(succeeded(), eq(dependencies.Prebuild.outputs['tests.run'], 'true')) pool: - vmImage: ubuntu-20.04 + vmImage: ubuntu-22.04 variables: testRunTitle: '$(system.pullRequest.TargetBranch)-linux' testRunPlatform: linux - openssl_version: 1.1.1q - - steps: - - template: ./posix-steps.yml - parameters: - dependencies: apt - - -- job: Ubuntu_Coverage_PR_Tests - displayName: Ubuntu PR Tests (coverage) - dependsOn: Prebuild - condition: | - and( - and( - succeeded(), - eq(variables['coverage'], 'true') - ), - eq(dependencies.Prebuild.outputs['tests.run'], 'true') - ) - - pool: - vmImage: ubuntu-20.04 - - variables: - testRunTitle: '$(Build.SourceBranchName)-linux-coverage' - testRunPlatform: linux-coverage - openssl_version: 1.1.1q + openssl_version: 1.1.1u steps: - template: ./posix-steps.yml - parameters: - dependencies: apt - coverage: true - - -- job: Windows_PR_Tests - displayName: Windows PR Tests - dependsOn: Prebuild - condition: and(succeeded(), eq(dependencies.Prebuild.outputs['tests.run'], 'true')) - - pool: - vmImage: windows-2022 - - strategy: - matrix: - win32: - arch: win32 - buildOpt: '-p Win32' - testRunTitle: '$(System.PullRequest.TargetBranch)-win32' - testRunPlatform: win32 - win64: - arch: amd64 - buildOpt: '-p x64' - testRunTitle: '$(System.PullRequest.TargetBranch)-win64' - testRunPlatform: win64 - winarm64: - arch: arm64 - buildOpt: '-p arm64' - maxParallel: 4 - - steps: - - template: ./windows-steps.yml - parameters: - targetBranch: $(System.PullRequest.TargetBranch) diff --git a/.azure-pipelines/prebuild-checks.yml b/.azure-pipelines/prebuild-checks.yml index 30ff642d1267a1e..2c6460d2386735f 100644 --- a/.azure-pipelines/prebuild-checks.yml +++ b/.azure-pipelines/prebuild-checks.yml @@ -11,18 +11,6 @@ steps: displayName: Fetch comparison tree condition: and(succeeded(), variables['System.PullRequest.TargetBranch']) -- script: | - if ! git diff --name-only $(diffTarget) | grep -qE '(\.rst$|^Doc|^Misc)' - then - echo "No docs were updated: docs.run=false" - echo "##vso[task.setvariable variable=run;isOutput=true]false" - else - echo "Docs were updated: docs.run=true" - echo "##vso[task.setvariable variable=run;isOutput=true]true" - fi - displayName: Detect documentation changes - name: docs - - script: | if ! git diff --name-only $(diffTarget) | grep -qvE '(\.rst$|^Doc|^Misc)' then diff --git a/.azure-pipelines/windows-layout-steps.yml b/.azure-pipelines/windows-layout-steps.yml index e15729fac3443db..afd897817904947 100644 --- a/.azure-pipelines/windows-layout-steps.yml +++ b/.azure-pipelines/windows-layout-steps.yml @@ -12,7 +12,7 @@ steps: displayName: Show layout info (${{ parameters.kind }}) - ${{ if eq(parameters.fulltest, 'true') }}: - - script: .\python.exe -m test -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 --junit-xml="$(Build.BinariesDirectory)\test-results-${{ parameters.kind }}.xml" --tempdir "$(Build.BinariesDirectory)\tmp-${{ parameters.kind }}-$(arch)" + - script: .\python.exe -m test -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 --junit-xml="$(Build.BinariesDirectory)\test-results-${{ parameters.kind }}.xml" --tempdir "$(Build.BinariesDirectory)\tmp-${{ parameters.kind }}-$(arch)" -i test_launcher workingDirectory: $(Build.BinariesDirectory)\layout-${{ parameters.kind }}-$(arch) displayName: ${{ parameters.kind }} Tests env: diff --git a/.cirrus-DISABLED.yml b/.cirrus-DISABLED.yml new file mode 100644 index 000000000000000..f20835cb6cac2af --- /dev/null +++ b/.cirrus-DISABLED.yml @@ -0,0 +1,29 @@ +# gh-91960: Job disabled since Python is out of free credit (September 2023): +# https://discuss.python.org/t/freebsd-gets-a-new-cirrus-ci-github-action-job-and-a-new-buildbot/33122/26 + +freebsd_task: + freebsd_instance: + matrix: + - image: freebsd-13-2-release-amd64 + # Turn off TCP and UDP blackhole. It is not enabled by default in FreeBSD, + # but it is in the FreeBSD GCE images as used by Cirrus-CI. It causes even + # local local connections to fail with ETIMEDOUT instead of ECONNREFUSED. + # For more information see https://reviews.freebsd.org/D41751 and + # https://github.com/cirruslabs/cirrus-ci-docs/issues/483. + sysctl_script: + - sysctl net.inet.tcp.blackhole=0 + - sysctl net.inet.udp.blackhole=0 + configure_script: + - mkdir build + - cd build + - ../configure --with-pydebug + build_script: + - cd build + - make -j$(sysctl -n hw.ncpu) + pythoninfo_script: + - cd build + - make pythoninfo + test_script: + - cd build + # dtrace fails to build on FreeBSD - see gh-73263 + - make buildbottest TESTOPTS="-j0 -x test_dtrace --timeout=600" diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000000000..b5d94317e8aa8b0 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,24 @@ +[run] +branch = True + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + raise AssertionError\( + + # Empty bodies in protocols or abstract methods + ^\s*def [a-zA-Z0-9_]+\(.*\)(\s*->.*)?:\s*\.\.\.(\s*#.*)?$ + ^\s*\.\.\.(\s*#.*)?$ + + .*# pragma: no cover + .*# pragma: no branch + + # Additions for IDLE: + .*# htest # + if not (_htest or _utest): + if not .*_utest: + if .*_htest: + diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000000000..590d7834b2b8be4 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,24 @@ +FROM docker.io/library/fedora:37 + +ENV CC=clang + +ENV WASI_SDK_VERSION=20 +ENV WASI_SDK_PATH=/opt/wasi-sdk + +ENV WASMTIME_HOME=/opt/wasmtime +ENV WASMTIME_VERSION=9.0.1 +ENV WASMTIME_CPU_ARCH=x86_64 + +RUN dnf -y --nodocs --setopt=install_weak_deps=False install /usr/bin/{blurb,clang,curl,git,ln,tar,xz} 'dnf-command(builddep)' && \ + dnf -y --nodocs --setopt=install_weak_deps=False builddep python3 && \ + dnf -y clean all + +RUN mkdir ${WASI_SDK_PATH} && \ + curl --location https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VERSION}/wasi-sdk-${WASI_SDK_VERSION}.0-linux.tar.gz | \ + tar --strip-components 1 --directory ${WASI_SDK_PATH} --extract --gunzip + +RUN mkdir --parents ${WASMTIME_HOME} && \ + curl --location "https://github.com/bytecodealliance/wasmtime/releases/download/v${WASMTIME_VERSION}/wasmtime-v${WASMTIME_VERSION}-${WASMTIME_CPU_ARCH}-linux.tar.xz" | \ + xz --decompress | \ + tar --strip-components 1 --directory ${WASMTIME_HOME} -x && \ + ln -s ${WASMTIME_HOME}/wasmtime /usr/local/bin diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000000000..0dc303015df5c78 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,88 @@ +{ + "build": { + "dockerfile": "Dockerfile" + }, + "onCreateCommand": [ + // Install common tooling. + "dnf", + "install", + "-y", + "which", + "zsh", + "fish", + // For umask fix below. + "/usr/bin/setfacl" + ], + "updateContentCommand": { + // Using the shell for `nproc` usage. + "python": "./configure --config-cache --with-pydebug && make -s -j `nproc`", + "docs": [ + "make", + "--directory", + "Doc", + "venv", + "html" + ] + }, + "postCreateCommand": { + // https://github.com/orgs/community/discussions/26026 + "umask fix: workspace": ["sudo", "setfacl", "-bnR", "."], + "umask fix: /tmp": ["sudo", "setfacl", "-bnR", "/tmp"] + }, + "customizations": { + "vscode": { + "extensions": [ + // Highlighting for Parser/Python.asdl. + "brettcannon.zephyr-asdl", + // Highlighting for configure.ac. + "maelvalais.autoconf", + // C auto-complete. + "ms-vscode.cpptools", + // To view HTML build of docs. + "ms-vscode.live-server", + // Python auto-complete. + "ms-python.python" + ], + "settings": { + "C_Cpp.default.compilerPath": "/usr/bin/clang", + "C_Cpp.default.cStandard": "c11", + "C_Cpp.default.defines": [ + "CONFIG_64", + "Py_BUILD_CORE" + ], + "C_Cpp.default.includePath": [ + "${workspaceFolder}/*", + "${workspaceFolder}/Include/**" + ], + // https://github.com/microsoft/vscode-cpptools/issues/10732 + "C_Cpp.errorSquiggles": "disabled", + "editor.insertSpaces": true, + "editor.rulers": [ + 80 + ], + "editor.tabSize": 4, + "editor.trimAutoWhitespace": true, + "files.associations": { + "*.h": "c" + }, + "files.encoding": "utf8", + "files.eol": "\n", + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true, + "python.analysis.diagnosticSeverityOverrides": { + // Complains about shadowing the stdlib w/ the stdlib. + "reportShadowedImports": "none", + // Doesn't like _frozen_importlib. + "reportMissingImports": "none" + }, + "python.analysis.extraPaths": [ + "Lib" + ], + "python.defaultInterpreterPath": "./python", + "[restructuredtext]": { + "editor.tabSize": 3 + } + } + } + } +} diff --git a/.editorconfig b/.editorconfig index 81445d2d79c7394..0169eed951cd3f1 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,5 +8,8 @@ indent_style = space [*.{py,c,cpp,h}] indent_size = 4 +[*.rst] +indent_size = 3 + [*.yml] indent_size = 2 diff --git a/.gitattributes b/.gitattributes index 75b33ae5c3645e0..8c37dbbb6310227 100644 --- a/.gitattributes +++ b/.gitattributes @@ -18,20 +18,23 @@ *.zip binary # Specific binary files -Lib/test/sndhdrdata/sndhdr.* binary PC/classicAppCompat.* binary # Text files that should not be subject to eol conversion [attr]noeol -text Lib/test/cjkencodings/* noeol -Lib/test/coding20731.py noeol +Lib/test/tokenizedata/coding20731.py noeol Lib/test/decimaltestdata/*.decTest noeol Lib/test/test_email/data/*.txt noeol Lib/test/test_importlib/resources/data01/* noeol Lib/test/test_importlib/resources/namespacedata01/* noeol Lib/test/xmltestdata/* noeol +# Shell scripts should have LF even on Windows because of Cygwin +Lib/venv/scripts/common/activate text eol=lf +Lib/venv/scripts/posix/* text eol=lf + # CRLF files [attr]dos text eol=crlf @@ -63,15 +66,20 @@ PCbuild/readme.txt dos [attr]generated linguist-generated=true diff=generated **/clinic/*.c.h generated +**/clinic/*.cpp.h generated +**/clinic/*.h.h generated *_db.h generated Doc/data/stable_abi.dat generated Doc/library/token-list.inc generated Include/internal/pycore_ast.h generated Include/internal/pycore_ast_state.h generated Include/internal/pycore_opcode.h generated -Include/internal/pycore_runtime_init_generated.h generated +Include/internal/pycore_opcode_metadata.h generated +Include/internal/pycore_*_generated.h generated Include/opcode.h generated +Include/opcode_ids.h generated Include/token.h generated +Lib/_opcode_metadata.py generated Lib/keyword.py generated Lib/test/levenshtein_examples.json generated Lib/test/test_stable_abi_ctypes.py generated @@ -82,7 +90,9 @@ Parser/parser.c generated Parser/token.c generated Programs/test_frozenmain.h generated Python/Python-ast.c generated +Python/executor_cases.c.h generated Python/generated_cases.c.h generated +Python/abstract_interp_cases.c.h generated Python/opcode_targets.h generated Python/stdlib_module_names.h generated Tools/peg_generator/pegen/grammar_parser.py generated diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 74081b2ef2e8af7..1925363cbeb46e1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,10 +5,17 @@ # https://git-scm.com/docs/gitignore#_pattern_format # GitHub -.github/** @ezio-melotti +.github/** @ezio-melotti @hugovk + +# pre-commit +.pre-commit-config.yaml @hugovk @AlexWaygood +.ruff.toml @hugovk @AlexWaygood + +# Build system +configure* @erlend-aasland @corona10 # asyncio -**/*asyncio* @1st1 @asvetlov @gvanrossum +**/*asyncio* @1st1 @asvetlov @gvanrossum @kumaraditya303 @willingc # Core **/*context* @1st1 @@ -16,15 +23,23 @@ **/*hamt* @1st1 Objects/set* @rhettinger Objects/dict* @methane @markshannon +Objects/typevarobject.c @JelleZijlstra Objects/type* @markshannon Objects/codeobject.c @markshannon Objects/frameobject.c @markshannon Objects/call.c @markshannon -Python/ceval.c @markshannon +Python/ceval*.c @markshannon @gvanrossum +Python/ceval*.h @markshannon @gvanrossum Python/compile.c @markshannon @iritkatriel +Python/assemble.c @markshannon @iritkatriel +Python/flowgraph.c @markshannon @iritkatriel Python/ast_opt.c @isidentical +Python/bytecodes.c @markshannon @gvanrossum +Python/optimizer*.c @markshannon @gvanrossum Lib/test/test_patma.py @brandtbucher Lib/test/test_peepholer.py @brandtbucher +Lib/test/test_type_*.py @JelleZijlstra +Lib/test/test_capi/test_misc.py @markshannon @gvanrossum # Exceptions Lib/traceback.py @iritkatriel @@ -58,12 +73,9 @@ Python/traceback.c @iritkatriel /Tools/build/parse_html5_entities.py @ezio-melotti # Import (including importlib). -# Ignoring importlib.h so as to not get flagged on -# all pull requests that change the emitted -# bytecode. -**/*import*.c @brettcannon @encukou @ericsnowcurrently @ncoghlan @warsaw -**/*import*.py @brettcannon @encukou @ericsnowcurrently @ncoghlan @warsaw -**/*importlib/resources/* @jaraco @warsaw @brettcannon +**/*import* @brettcannon @ericsnowcurrently @ncoghlan @warsaw +/Python/import.c @kumaraditya303 +**/*importlib/resources/* @jaraco @warsaw @FFY00 **/importlib/metadata/* @jaraco @warsaw # Dates and times @@ -73,7 +85,7 @@ Doc/library/time.rst @pganssle @abalkin Lib/test/test_time.py @pganssle @abalkin Modules/timemodule.c @pganssle @abalkin Python/pytime.c @pganssle @abalkin -Include/pytime.h @pganssle @abalkin +Include/internal/pycore_time.h @pganssle @abalkin # Email and related **/*mail* @python/email-team @@ -91,6 +103,11 @@ Include/pytime.h @pganssle @abalkin /Tools/peg_generator/ @pablogsal @lysnikolaou /Lib/test/test_peg_generator/ @pablogsal @lysnikolaou /Grammar/python.gram @pablogsal @lysnikolaou +/Lib/tokenize.py @pablogsal @lysnikolaou +/Lib/test/test_tokenize.py @pablogsal @lysnikolaou + +# Code generator +/Tools/cases_generator/ @gvanrossum # AST Python/ast.c @isidentical @@ -110,6 +127,12 @@ Lib/ast.py @isidentical /Lib/test/test_subprocess.py @gpshead /Modules/*subprocess* @gpshead +# Limited C API & stable ABI +Tools/build/stable_abi.py @encukou +Misc/stable_abi.toml @encukou +Doc/data/*.abi @encukou +Doc/c-api/stable.rst @encukou + # Windows /PC/ @python/windows-team /PCbuild/ @python/windows-team @@ -135,10 +158,8 @@ Lib/ast.py @isidentical **/*idlelib* @terryjreedy -**/*typing* @gvanrossum @Fidget-Spinner @JelleZijlstra @AlexWaygood +**/*typing* @JelleZijlstra @AlexWaygood -**/*asyncore @giampaolo -**/*asynchat @giampaolo **/*ftplib @giampaolo **/*shutil @giampaolo @@ -146,11 +167,26 @@ Lib/ast.py @isidentical **/*cgi* @ethanfurman **/*tarfile* @ethanfurman -**/*tomllib* @encukou +**/*tomllib* @encukou @hauntsaninja + +**/*sysconfig* @FFY00 + +**/*cjkcodecs* @corona10 # macOS /Mac/ @python/macos-team **/*osx_support* @python/macos-team # pathlib -**/*pathlib* @brettcannon +**/*pathlib* @barneygale + +# zipfile.Path +**/*zipfile/_path/* @jaraco + +# Argument Clinic +/Tools/clinic/** @erlend-aasland @AlexWaygood +/Lib/test/test_clinic.py @erlend-aasland @AlexWaygood +Doc/howto/clinic.rst @erlend-aasland + +# WebAssembly +/Tools/wasm/ @brettcannon diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md deleted file mode 100644 index 1d93e0735e50f33..000000000000000 --- a/.github/ISSUE_TEMPLATE/bug.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug report -about: Submit a bug report -labels: "type-bug" ---- - - - -# Bug report - -A clear and concise description of what the bug is. -Include a minimal, reproducible example (https://stackoverflow.com/help/minimal-reproducible-example), if possible. - -# Your environment - - - -- CPython versions tested on: -- Operating system and architecture: - - diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 000000000000000..395516f29b037c5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,56 @@ +name: Bug report +description: Submit a bug report +labels: ["type-bug"] +body: + - type: markdown + attributes: + value: | + **New to Python?** + + For help or advice on using Python, try one of the following options instead of opening a GitHub issue: + + - Asking on [Discourse](https://discuss.python.org/c/users/7) or [Stack Overflow](https://stackoverflow.com) + - Reading the [Python tutorial](https://docs.python.org/3/tutorial/) + - Emailing [python-list](https://mail.python.org/mailman/listinfo/python-list) + + Make sure to also search the [CPython issue tracker](https://github.com/python/cpython/issues?q=is%3Aissue+sort%3Acreated-desc) to check that the bug has not already been reported. + - type: textarea + attributes: + label: "Bug description:" + description: > + Give a clear and concise description of what happened. + Include a [minimal, reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) if possible. + [Copy and paste code where possible rather than using screenshots](https://meta.stackoverflow.com/a/285557/13990016), + and put any code blocks inside triple backticks. + + value: | + ```python + # Add a code block here, if required + ``` + validations: + required: true + - type: dropdown + attributes: + label: "CPython versions tested on:" + multiple: true + options: + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "3.13" + - "CPython main branch" + validations: + required: true + - type: dropdown + attributes: + label: "Operating systems tested on:" + multiple: true + options: + - Linux + - macOS + - Windows + - Other + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/crash.md b/.github/ISSUE_TEMPLATE/crash.md deleted file mode 100644 index dad3423db03410f..000000000000000 --- a/.github/ISSUE_TEMPLATE/crash.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: Crash report -about: A hard crash of the interpreter, possibly with a core dump -labels: "type-crash" ---- - - - -# Crash report - -Tell us what happened, ideally including a minimal, reproducible example (https://stackoverflow.com/help/minimal-reproducible-example). - -# Error messages - -Enter any relevant error message caused by the crash, including a core dump if there is one. - -# Your environment - - - -- CPython versions tested on: -- Operating system and architecture: - - diff --git a/.github/ISSUE_TEMPLATE/crash.yml b/.github/ISSUE_TEMPLATE/crash.yml new file mode 100644 index 000000000000000..c14d7cf2599d4c1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/crash.yml @@ -0,0 +1,54 @@ +name: Crash report +description: A hard crash of the interpreter, possibly with a core dump +labels: ["type-crash"] +body: + - type: markdown + attributes: + value: | + This form is for hard crashes of the Python interpreter, segmentation faults, failed C-level assertions, and similar. Unexpected exceptions raised from Python functions in the standard library count as bugs rather than crashes. + + The CPython interpreter is written in a different programming language, C. A "CPython crash" is when Python itself fails, leading to a traceback in the C stack. + - type: textarea + attributes: + label: What happened? + description: > + Include a [minimal, reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) if possible. + [Copy and paste code where possible rather than using screenshots](https://meta.stackoverflow.com/a/285557/13990016), + and put any code blocks inside triple backticks. + + value: | + ```python + # Add a code block here, if required + ``` + validations: + required: true + - type: dropdown + attributes: + label: "CPython versions tested on:" + multiple: true + options: + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "CPython main branch" + validations: + required: true + - type: dropdown + attributes: + label: "Operating systems tested on:" + multiple: true + options: + - Linux + - macOS + - Windows + - Other + validations: + required: false + - type: input + attributes: + label: "Output from running 'python -VV' on the command line:" + description: If you tested with multiple operating systems or architectures, feel free to provide details in the main bug description. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md deleted file mode 100644 index ed051e945f81207..000000000000000 --- a/.github/ISSUE_TEMPLATE/feature.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Feature or enhancement -about: Submit a proposal for a new CPython feature or enhancement -labels: "type-feature" ---- - -# Feature or enhancement - -(A clear and concise description of your proposal.) - -# Pitch - -(Explain why this feature or enhancement should be implemented and how it would be used. - Add examples, if applicable.) - -# Previous discussion - - - - - diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml new file mode 100644 index 000000000000000..4361ab2bf827fb6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -0,0 +1,40 @@ +name: Feature or enhancement +description: Submit a proposal for a new CPython feature or enhancement +labels: ["type-feature"] +body: + - type: markdown + attributes: + value: | + # Proposing a feature to CPython? + + You'll need to demonstrate widespread support for your idea among the community. + + Major feature proposals should generally be discussed on [Discourse](https://discuss.python.org/c/ideas/6) before opening a GitHub issue. Wait until it's clear that most people support your idea before filling in this form. + - type: textarea + attributes: + label: "Proposal:" + description: > + Explain your proposal, why it should be implemented, and how it would be used. + Add examples, if applicable. + Put any code blocks inside triple backticks. + value: | + ```python + # Add a code block here, if required + ``` + validations: + required: true + - type: dropdown + attributes: + label: Has this already been discussed elsewhere? + options: + - No response given + - I have already discussed this feature proposal on Discourse + - This is a minor feature, which does not need previous discussion elsewhere + multiple: false + validations: + required: true + - type: textarea + attributes: + label: "Links to previous discussion of this feature:" + validations: + required: false diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 555e246e402bf93..c8a3165d6903645 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -12,3 +12,10 @@ updates: update-types: - "version-update:semver-minor" - "version-update:semver-patch" + - package-ecosystem: "pip" + directory: "/Tools/" + schedule: + interval: "monthly" + labels: + - "skip issue" + - "skip news" diff --git a/.github/problem-matchers/sphinx.json b/.github/problem-matchers/sphinx.json deleted file mode 100644 index 09848608a7b0346..000000000000000 --- a/.github/problem-matchers/sphinx.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "problemMatcher": [ - { - "owner": "sphinx-problem-matcher", - "pattern": [ - { - "regexp": "^(.*):(\\d+):\\s+(\\w*):\\s+(.*)$", - "file": 1, - "line": 2, - "severity": 3, - "message": 4 - } - ] - }, - { - "owner": "sphinx-problem-matcher-loose", - "pattern": [ - { - "_comment": "A bit of a looser pattern, doesn't look for line numbers, just looks for file names relying on them to start with / and end with .rst", - "regexp": "(\/.*\\.rst):\\s+(\\w*):\\s+(.*)$", - "file": 1, - "severity": 2, - "message": 3 - } - ] - }, - { - "owner": "sphinx-problem-matcher-loose-no-severity", - "pattern": [ - { - "_comment": "Looks for file names ending with .rst and line numbers but without severity", - "regexp": "^(.*\\.rst):(\\d+):(.*)$", - "file": 1, - "line": 2, - "message": 3 - } - ] - } - ] -} \ No newline at end of file diff --git a/.github/workflows/add-issue-header.yml b/.github/workflows/add-issue-header.yml new file mode 100644 index 000000000000000..1ef9178b95e5f62 --- /dev/null +++ b/.github/workflows/add-issue-header.yml @@ -0,0 +1,53 @@ +name: Add issue header +# Automatically edits an issue's descriptions with a header, +# one of: +# +# - Bug report +# - Crash report +# - Feature or enhancement + +on: + issues: + types: + # Only ever run once + - opened + + +jobs: + add-header: + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - uses: actions/github-script@v6 + with: + # language=JavaScript + script: | + // https://devguide.python.org/triage/labels/#type-labels + const HEADERS = new Map([ + ['type-bug', 'Bug report'], + ['type-crash', 'Crash report'], + ['type-feature', 'Feature or enhancement'], + ]); + let issue_data = await github.rest.issues.get({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo + }).then(issue => issue.data); + let header = ''; + for (const label_data of issue_data.labels) { + const label_name = (typeof label_data === 'string') ? label_data : label_data.name; + if (HEADERS.has(label_name)) { + header = HEADERS.get(label_name); + break; + } + } + if (header !== '') { + console.log(`Setting new header: ${header}`); + await github.rest.issues.update({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `# ${header}\n\n${issue_data.body.replaceAll('\r', '')}` + }); + } diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f0544c0962e1239..cfb36c8c32e18dd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,35 +8,40 @@ on: push: branches: - 'main' + - '3.12' - '3.11' - '3.10' - '3.9' - '3.8' - - '3.7' pull_request: branches: - 'main' + - '3.12' - '3.11' - '3.10' - '3.9' - '3.8' - - '3.7' permissions: contents: read concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-reusable cancel-in-progress: true jobs: check_source: name: 'Check for source changes' runs-on: ubuntu-latest + timeout-minutes: 10 outputs: + run-docs: ${{ steps.docs-changes.outputs.run-docs || false }} run_tests: ${{ steps.check.outputs.run_tests }} + run_hypothesis: ${{ steps.check.outputs.run_hypothesis }} + run_cifuzz: ${{ steps.check.outputs.run_cifuzz }} + config_hash: ${{ steps.config_hash.outputs.hash }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check for source changes id: check run: | @@ -57,40 +62,101 @@ jobs: # into the PR branch anyway. # # https://github.com/python/core-workflow/issues/373 - git diff --name-only origin/$GITHUB_BASE_REF.. | grep -qvE '(\.rst$|^Doc|^Misc)' && echo "run_tests=true" >> $GITHUB_OUTPUT || true + git diff --name-only origin/$GITHUB_BASE_REF.. | grep -qvE '(\.rst$|^Doc|^Misc|^\.pre-commit-config\.yaml$|\.ruff\.toml$)' && echo "run_tests=true" >> $GITHUB_OUTPUT || true fi + # Check if we should run hypothesis tests + GIT_BRANCH=${GITHUB_BASE_REF:-${GITHUB_REF#refs/heads/}} + echo $GIT_BRANCH + if $(echo "$GIT_BRANCH" | grep -q -w '3\.\(8\|9\|10\|11\)'); then + echo "Branch too old for hypothesis tests" + echo "run_hypothesis=false" >> $GITHUB_OUTPUT + else + echo "Run hypothesis tests" + echo "run_hypothesis=true" >> $GITHUB_OUTPUT + fi + + # oss-fuzz maintains a configuration for fuzzing the main branch of + # CPython, so CIFuzz should be run only for code that is likely to be + # merged into the main branch; compatibility with older branches may + # be broken. + FUZZ_RELEVANT_FILES='(\.c$|\.h$|\.cpp$|^configure$|^\.github/workflows/build\.yml$|^Modules/_xxtestfuzz)' + if [ "$GITHUB_BASE_REF" = "main" ] && [ "$(git diff --name-only origin/$GITHUB_BASE_REF.. | grep -qE $FUZZ_RELEVANT_FILES; echo $?)" -eq 0 ]; then + # The tests are pretty slow so they are executed only for PRs + # changing relevant files. + echo "Run CIFuzz tests" + echo "run_cifuzz=true" >> $GITHUB_OUTPUT + else + echo "Branch too old for CIFuzz tests; or no C files were changed" + echo "run_cifuzz=false" >> $GITHUB_OUTPUT + fi + - name: Compute hash for config cache key + id: config_hash + run: | + echo "hash=${{ hashFiles('configure', 'configure.ac', '.github/workflows/build.yml') }}" >> $GITHUB_OUTPUT + - name: Get a list of the changed documentation-related files + if: github.event_name == 'pull_request' + id: changed-docs-files + uses: Ana06/get-changed-files@v2.2.0 + with: + filter: | + Doc/** + Misc/** + .github/workflows/reusable-docs.yml + format: csv # works for paths with spaces + - name: Check for docs changes + if: >- + github.event_name == 'pull_request' + && steps.changed-docs-files.outputs.added_modified_renamed != '' + id: docs-changes + run: | + echo "run-docs=true" >> "${GITHUB_OUTPUT}" + + check-docs: + name: Docs + needs: check_source + if: fromJSON(needs.check_source.outputs.run-docs) + uses: ./.github/workflows/reusable-docs.yml + check_generated_files: name: 'Check if generated files are up to date' - runs-on: ubuntu-latest + # Don't use ubuntu-latest but a specific version to make the job + # reproducible: to get the same tools versions (autoconf, aclocal, ...) + runs-on: ubuntu-22.04 + timeout-minutes: 60 needs: check_source if: needs.check_source.outputs.run_tests == 'true' steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: '3.x' + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ needs.check_source.outputs.config_hash }}-${{ env.pythonLocation }} - name: Install Dependencies run: sudo ./.github/workflows/posix-deps-apt.sh - name: Add ccache to PATH run: echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV - name: Configure ccache action uses: hendrikmuhs/ccache-action@v1.2 - - name: Check Autoconf version 2.69 and aclocal 1.16.3 + - name: Check Autoconf and aclocal versions run: | - grep "Generated by GNU Autoconf 2.69" configure - grep "aclocal 1.16.3" aclocal.m4 + grep "Generated by GNU Autoconf 2.71" configure + grep "aclocal 1.16.5" aclocal.m4 grep -q "runstatedir" configure grep -q "PKG_PROG_PKG_CONFIG" aclocal.m4 - name: Configure CPython run: | # Build Python with the libpython dynamic library - ./configure --with-pydebug --enable-shared - - name: Regenerate autoconf files with container image - run: make regen-configure + ./configure --config-cache --with-pydebug --enable-shared + - name: Regenerate autoconf files + # Same command used by Tools/build/regen-configure.sh ($AUTORECONF) + run: autoreconf -ivf -Werror - name: Build CPython run: | - # Deepfreeze will usually cause global objects to be added or removed, - # so we run it before regen-global-objects gets rum (in regen-all). - make regen-deepfreeze make -j4 regen-all make regen-stdlib-module-names - name: Check for changes @@ -111,75 +177,90 @@ jobs: run: make smelly - name: Check limited ABI symbols run: make check-limited-abi + - name: Check for unsupported C global variables + if: github.event_name == 'pull_request' # $GITHUB_EVENT_NAME + run: make check-c-globals - build_win32: - name: 'Windows (x86)' - runs-on: windows-latest + build_windows: + name: 'Windows' needs: check_source if: needs.check_source.outputs.run_tests == 'true' - env: - IncludeUwp: 'true' - steps: - - uses: actions/checkout@v3 - - name: Build CPython - run: .\PCbuild\build.bat -e -d -p Win32 - timeout-minutes: 30 - - name: Display build info - run: .\python.bat -m test.pythoninfo - - name: Tests - run: .\PCbuild\rt.bat -p Win32 -d -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 + uses: ./.github/workflows/reusable-windows.yml - build_win_amd64: - name: 'Windows (x64)' - runs-on: windows-latest + build_windows_free_threaded: + name: 'Windows (free-threaded)' needs: check_source if: needs.check_source.outputs.run_tests == 'true' - env: - IncludeUwp: 'true' - steps: - - uses: actions/checkout@v3 - - name: Register MSVC problem matcher - run: echo "::add-matcher::.github/problem-matchers/msvc.json" - - name: Build CPython - run: .\PCbuild\build.bat -e -d -p x64 - timeout-minutes: 30 - - name: Display build info - run: .\python.bat -m test.pythoninfo - - name: Tests - run: .\PCbuild\rt.bat -p x64 -d -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 + uses: ./.github/workflows/reusable-windows.yml + with: + free-threaded: true build_macos: name: 'macOS' - runs-on: macos-latest needs: check_source if: needs.check_source.outputs.run_tests == 'true' - env: - PYTHONSTRICTEXTENSIONBUILD: 1 - steps: - - uses: actions/checkout@v3 - - name: Prepare homebrew environment variables - run: | - echo "LDFLAGS=-L$(brew --prefix tcl-tk)/lib" >> $GITHUB_ENV - echo "PKG_CONFIG_PATH=$(brew --prefix openssl@1.1)/lib/pkgconfig:$(brew --prefix tcl-tk)/lib/pkgconfig" >> $GITHUB_ENV - - name: Configure CPython - run: ./configure --with-pydebug --prefix=/opt/python-dev - - name: Build CPython - run: make -j4 - - name: Display build info - run: make pythoninfo - - name: Tests - run: make buildbottest TESTOPTS="-j4 -uall,-cpu" + uses: ./.github/workflows/reusable-macos.yml + with: + config_hash: ${{ needs.check_source.outputs.config_hash }} + + build_macos_free_threaded: + name: 'macOS (free-threaded)' + needs: check_source + if: needs.check_source.outputs.run_tests == 'true' + uses: ./.github/workflows/reusable-macos.yml + with: + config_hash: ${{ needs.check_source.outputs.config_hash }} + free-threaded: true build_ubuntu: name: 'Ubuntu' + needs: check_source + if: needs.check_source.outputs.run_tests == 'true' + uses: ./.github/workflows/reusable-ubuntu.yml + with: + config_hash: ${{ needs.check_source.outputs.config_hash }} + options: | + ../cpython-ro-srcdir/configure \ + --config-cache \ + --with-pydebug \ + --with-openssl=$OPENSSL_DIR + + build_ubuntu_free_threaded: + name: 'Ubuntu (free-threaded)' + needs: check_source + if: needs.check_source.outputs.run_tests == 'true' + uses: ./.github/workflows/reusable-ubuntu.yml + with: + config_hash: ${{ needs.check_source.outputs.config_hash }} + options: | + ../cpython-ro-srcdir/configure \ + --config-cache \ + --with-pydebug \ + --with-openssl=$OPENSSL_DIR \ + --disable-gil + + build_ubuntu_ssltests: + name: 'Ubuntu SSL tests with OpenSSL' runs-on: ubuntu-20.04 + timeout-minutes: 60 needs: check_source if: needs.check_source.outputs.run_tests == 'true' + strategy: + fail-fast: false + matrix: + openssl_ver: [1.1.1w, 3.0.11, 3.1.3] env: - OPENSSL_VER: 1.1.1q - PYTHONSTRICTEXTENSIONBUILD: 1 + OPENSSL_VER: ${{ matrix.openssl_ver }} + MULTISSL_DIR: ${{ github.workspace }}/multissl + OPENSSL_DIR: ${{ github.workspace }}/multissl/openssl/${{ matrix.openssl_ver }} + LD_LIBRARY_PATH: ${{ github.workspace }}/multissl/openssl/${{ matrix.openssl_ver }}/lib steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ needs.check_source.outputs.config_hash }} - name: Register gcc problem matcher run: echo "::add-matcher::.github/problem-matchers/gcc.json" - name: Install Dependencies @@ -203,46 +284,26 @@ jobs: echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV - name: Configure ccache action uses: hendrikmuhs/ccache-action@v1.2 - - name: Setup directory envs for out-of-tree builds - run: | - echo "CPYTHON_RO_SRCDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-ro-srcdir)" >> $GITHUB_ENV - echo "CPYTHON_BUILDDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-builddir)" >> $GITHUB_ENV - - name: Create directories for read-only out-of-tree builds - run: mkdir -p $CPYTHON_RO_SRCDIR $CPYTHON_BUILDDIR - - name: Bind mount sources read-only - run: sudo mount --bind -o ro $GITHUB_WORKSPACE $CPYTHON_RO_SRCDIR - - name: Configure CPython out-of-tree - working-directory: ${{ env.CPYTHON_BUILDDIR }} - run: ../cpython-ro-srcdir/configure --with-pydebug --with-openssl=$OPENSSL_DIR - - name: Build CPython out-of-tree - working-directory: ${{ env.CPYTHON_BUILDDIR }} + - name: Configure CPython + run: ./configure --config-cache --with-pydebug --with-openssl=$OPENSSL_DIR + - name: Build CPython run: make -j4 - name: Display build info - working-directory: ${{ env.CPYTHON_BUILDDIR }} run: make pythoninfo - - name: Remount sources writable for tests - # some tests write to srcdir, lack of pyc files slows down testing - run: sudo mount $CPYTHON_RO_SRCDIR -oremount,rw - - name: Tests - working-directory: ${{ env.CPYTHON_BUILDDIR }} - run: xvfb-run make buildbottest TESTOPTS="-j4 -uall,-cpu" + - name: SSL tests + run: ./python Lib/test/ssltests.py - build_ubuntu_ssltests: - name: 'Ubuntu SSL tests with OpenSSL' + test_hypothesis: + name: "Hypothesis tests on Ubuntu" runs-on: ubuntu-20.04 + timeout-minutes: 60 needs: check_source - if: needs.check_source.outputs.run_tests == 'true' - strategy: - fail-fast: false - matrix: - openssl_ver: [1.1.1q, 3.0.5] + if: needs.check_source.outputs.run_tests == 'true' && needs.check_source.outputs.run_hypothesis == 'true' env: - OPENSSL_VER: ${{ matrix.openssl_ver }} - MULTISSL_DIR: ${{ github.workspace }}/multissl - OPENSSL_DIR: ${{ github.workspace }}/multissl/openssl/${{ matrix.openssl_ver }} - LD_LIBRARY_PATH: ${{ github.workspace }}/multissl/openssl/${{ matrix.openssl_ver }}/lib + OPENSSL_VER: 3.0.11 + PYTHONSTRICTEXTENSIONBUILD: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Register gcc problem matcher run: echo "::add-matcher::.github/problem-matchers/gcc.json" - name: Install Dependencies @@ -266,31 +327,106 @@ jobs: echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV - name: Configure ccache action uses: hendrikmuhs/ccache-action@v1.2 - - name: Configure CPython - run: ./configure --with-pydebug --with-openssl=$OPENSSL_DIR - - name: Build CPython + - name: Setup directory envs for out-of-tree builds + run: | + echo "CPYTHON_RO_SRCDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-ro-srcdir)" >> $GITHUB_ENV + echo "CPYTHON_BUILDDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-builddir)" >> $GITHUB_ENV + - name: Create directories for read-only out-of-tree builds + run: mkdir -p $CPYTHON_RO_SRCDIR $CPYTHON_BUILDDIR + - name: Bind mount sources read-only + run: sudo mount --bind -o ro $GITHUB_WORKSPACE $CPYTHON_RO_SRCDIR + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: ${{ env.CPYTHON_BUILDDIR }}/config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ needs.check_source.outputs.config_hash }} + - name: Configure CPython out-of-tree + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: | + ../cpython-ro-srcdir/configure \ + --config-cache \ + --with-pydebug \ + --with-openssl=$OPENSSL_DIR + - name: Build CPython out-of-tree + working-directory: ${{ env.CPYTHON_BUILDDIR }} run: make -j4 - name: Display build info + working-directory: ${{ env.CPYTHON_BUILDDIR }} run: make pythoninfo - - name: SSL tests - run: ./python Lib/test/ssltests.py + - name: Remount sources writable for tests + # some tests write to srcdir, lack of pyc files slows down testing + run: sudo mount $CPYTHON_RO_SRCDIR -oremount,rw + - name: Setup directory envs for out-of-tree builds + run: | + echo "CPYTHON_BUILDDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-builddir)" >> $GITHUB_ENV + - name: "Create hypothesis venv" + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: | + VENV_LOC=$(realpath -m .)/hypovenv + VENV_PYTHON=$VENV_LOC/bin/python + echo "HYPOVENV=${VENV_LOC}" >> $GITHUB_ENV + echo "VENV_PYTHON=${VENV_PYTHON}" >> $GITHUB_ENV + ./python -m venv $VENV_LOC && $VENV_PYTHON -m pip install -r ${GITHUB_WORKSPACE}/Tools/requirements-hypothesis.txt + - name: 'Restore Hypothesis database' + id: cache-hypothesis-database + uses: actions/cache@v3 + with: + path: ./hypothesis + key: hypothesis-database-${{ github.head_ref || github.run_id }} + restore-keys: | + - hypothesis-database- + - name: "Run tests" + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: | + # Most of the excluded tests are slow test suites with no property tests + # + # (GH-104097) test_sysconfig is skipped because it has tests that are + # failing when executed from inside a virtual environment. + ${{ env.VENV_PYTHON }} -m test \ + -W \ + -o \ + -j4 \ + -x test_asyncio \ + -x test_multiprocessing_fork \ + -x test_multiprocessing_forkserver \ + -x test_multiprocessing_spawn \ + -x test_concurrent_futures \ + -x test_socket \ + -x test_subprocess \ + -x test_signal \ + -x test_sysconfig + - uses: actions/upload-artifact@v3 + if: always() + with: + name: hypothesis-example-db + path: .hypothesis/examples/ build_asan: name: 'Address sanitizer' runs-on: ubuntu-20.04 + timeout-minutes: 60 needs: check_source if: needs.check_source.outputs.run_tests == 'true' env: - OPENSSL_VER: 1.1.1q + OPENSSL_VER: 3.0.11 PYTHONSTRICTEXTENSIONBUILD: 1 ASAN_OPTIONS: detect_leaks=0:allocator_may_return_null=1:handle_segv=0 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ needs.check_source.outputs.config_hash }} - name: Register gcc problem matcher run: echo "::add-matcher::.github/problem-matchers/gcc.json" - name: Install Dependencies run: sudo ./.github/workflows/posix-deps-apt.sh + - name: Set up GCC-10 for ASAN + uses: egor-tensin/setup-gcc@v1 + with: + version: 10 - name: Configure OpenSSL env vars run: | echo "MULTISSL_DIR=${GITHUB_WORKSPACE}/multissl" >> $GITHUB_ENV @@ -311,10 +447,118 @@ jobs: - name: Configure ccache action uses: hendrikmuhs/ccache-action@v1.2 - name: Configure CPython - run: ./configure --with-address-sanitizer --without-pymalloc + run: ./configure --config-cache --with-address-sanitizer --without-pymalloc - name: Build CPython run: make -j4 - name: Display build info run: make pythoninfo - name: Tests - run: xvfb-run make buildbottest TESTOPTS="-j4 -uall,-cpu" + run: xvfb-run make test + + # CIFuzz job based on https://google.github.io/oss-fuzz/getting-started/continuous-integration/ + cifuzz: + name: CIFuzz + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: check_source + if: needs.check_source.outputs.run_cifuzz == 'true' + permissions: + security-events: write + strategy: + fail-fast: false + matrix: + sanitizer: [address, undefined, memory] + steps: + - name: Build fuzzers (${{ matrix.sanitizer }}) + id: build + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + with: + oss-fuzz-project-name: cpython3 + sanitizer: ${{ matrix.sanitizer }} + - name: Run fuzzers (${{ matrix.sanitizer }}) + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + with: + fuzz-seconds: 600 + oss-fuzz-project-name: cpython3 + output-sarif: true + sanitizer: ${{ matrix.sanitizer }} + - name: Upload crash + uses: actions/upload-artifact@v3 + if: failure() && steps.build.outcome == 'success' + with: + name: ${{ matrix.sanitizer }}-artifacts + path: ./out/artifacts + - name: Upload SARIF + if: always() && steps.build.outcome == 'success' + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: cifuzz-sarif/results.sarif + checkout_path: cifuzz-sarif + + all-required-green: # This job does nothing and is only used for the branch protection + name: All required checks pass + if: always() + + needs: + - check_source # Transitive dependency, needed to access `run_tests` value + - check-docs + - check_generated_files + - build_macos + - build_macos_free_threaded + - build_ubuntu + - build_ubuntu_free_threaded + - build_ubuntu_ssltests + - build_windows + - build_windows_free_threaded + - test_hypothesis + - build_asan + - cifuzz + + runs-on: ubuntu-latest + + steps: + - name: Check whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe + with: + allowed-failures: >- + build_ubuntu_ssltests, + cifuzz, + test_hypothesis, + allowed-skips: >- + ${{ + !fromJSON(needs.check_source.outputs.run-docs) + && ' + check-docs, + ' + || '' + }} + ${{ + needs.check_source.outputs.run_tests != 'true' + && ' + check_generated_files, + build_macos, + build_macos_free_threaded, + build_ubuntu, + build_ubuntu_free_threaded, + build_ubuntu_ssltests, + build_windows, + build_windows_free_threaded, + build_asan, + ' + || '' + }} + ${{ + !fromJSON(needs.check_source.outputs.run_cifuzz) + && ' + cifuzz, + ' + || '' + }} + ${{ + !fromJSON(needs.check_source.outputs.run_hypothesis) + && ' + test_hypothesis, + ' + || '' + }} + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/build_msi.yml b/.github/workflows/build_msi.yml index 5f1dcae190efbc2..29282dffa37ec0d 100644 --- a/.github/workflows/build_msi.yml +++ b/.github/workflows/build_msi.yml @@ -8,12 +8,14 @@ on: - '3.*' paths: - 'Tools/msi/**' + - '.github/workflows/build_msi.yml' pull_request: branches: - 'main' - '3.*' paths: - 'Tools/msi/**' + - '.github/workflows/build_msi.yml' permissions: contents: read @@ -26,10 +28,11 @@ jobs: build: name: Windows Installer runs-on: windows-latest + timeout-minutes: 60 strategy: matrix: type: [x86, x64, arm64] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build CPython installer - run: .\Tools\msi\build.bat -${{ matrix.type }} + run: .\Tools\msi\build.bat --doc -${{ matrix.type }} diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml deleted file mode 100644 index 10e4cf074a590a6..000000000000000 --- a/.github/workflows/doc.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Docs - -on: - workflow_dispatch: - #push: - # branches: - # - 'main' - # - '3.11' - # - '3.10' - # - '3.9' - # - '3.8' - # - '3.7' - # paths: - # - 'Doc/**' - pull_request: - branches: - - 'main' - - '3.11' - - '3.10' - - '3.9' - - '3.8' - - '3.7' - paths: - - 'Doc/**' - - 'Misc/**' - - '.github/workflows/doc.yml' - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - build_doc: - name: 'Docs' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Register Sphinx problem matcher - run: echo "::add-matcher::.github/problem-matchers/sphinx.json" - - name: 'Set up Python' - uses: actions/setup-python@v4 - with: - python-version: '3' - cache: 'pip' - cache-dependency-path: 'Doc/requirements.txt' - - name: 'Install build dependencies' - run: make -C Doc/ venv - - name: 'Check documentation' - run: make -C Doc/ check - - name: 'Build HTML documentation' - run: make -C Doc/ SPHINXOPTS="-q" SPHINXERRORHANDLING="-W --keep-going" html - - name: 'Upload' - uses: actions/upload-artifact@v3 - with: - name: doc-html - path: Doc/build/html - - # Run "doctest" on HEAD as new syntax doesn't exist in the latest stable release - doctest: - name: 'Doctest' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Register Sphinx problem matcher - run: echo "::add-matcher::.github/problem-matchers/sphinx.json" - - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ubuntu-doc-${{ hashFiles('Doc/requirements.txt') }} - restore-keys: | - ubuntu-doc- - - name: 'Install Dependencies' - run: sudo ./.github/workflows/posix-deps-apt.sh && sudo apt-get install wamerican - - name: 'Configure CPython' - run: ./configure --with-pydebug - - name: 'Build CPython' - run: make -j4 - - name: 'Install build dependencies' - run: make -C Doc/ PYTHON=../python venv - # Use "xvfb-run" since some doctest tests open GUI windows - - name: 'Run documentation doctest' - run: xvfb-run make -C Doc/ PYTHON=../python SPHINXERRORHANDLING="-W --keep-going" doctest diff --git a/.github/workflows/documentation-links.yml b/.github/workflows/documentation-links.yml new file mode 100644 index 000000000000000..43a7afec73884e8 --- /dev/null +++ b/.github/workflows/documentation-links.yml @@ -0,0 +1,27 @@ +name: Read the Docs PR preview +# Automatically edits a pull request's descriptions with a link +# to the documentation's preview on Read the Docs. + +on: + pull_request_target: + types: + - opened + paths: + - 'Doc/**' + - '.github/workflows/doc.yml' + +permissions: + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + documentation-links: + runs-on: ubuntu-latest + steps: + - uses: readthedocs/actions/preview@v1 + with: + project-slug: "cpython-previews" + single-version: "true" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000000000..6c1c29a58cf4fc8 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,26 @@ +name: Lint + +on: [push, pull_request, workflow_dispatch] + +permissions: + contents: read + +env: + FORCE_COLOR: 1 + RUFF_OUTPUT_FORMAT: github + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: pre-commit/action@v3.0.0 diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml new file mode 100644 index 000000000000000..405511ca6820b30 --- /dev/null +++ b/.github/workflows/mypy.yml @@ -0,0 +1,51 @@ +# Workflow to run mypy on select parts of the CPython repo +name: mypy + +on: + push: + branches: + - main + pull_request: + paths: + - ".github/workflows/mypy.yml" + - "Tools/cases_generator/**" + - "Tools/clinic/**" + - "Tools/peg_generator/**" + - "Tools/requirements-dev.txt" + - "Tools/wasm/**" + workflow_dispatch: + +permissions: + contents: read + +env: + PIP_DISABLE_PIP_VERSION_CHECK: 1 + FORCE_COLOR: 1 + TERM: xterm-256color # needed for FORCE_COLOR to work on mypy on Ubuntu, see https://github.com/python/mypy/issues/13817 + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + mypy: + strategy: + matrix: + target: [ + "Tools/cases_generator", + "Tools/clinic", + "Tools/peg_generator", + "Tools/wasm", + ] + name: Run mypy on ${{ matrix.target }} + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: "3.11" + cache: pip + cache-dependency-path: Tools/requirements-dev.txt + - run: pip install -r Tools/requirements-dev.txt + - run: mypy --config-file ${{ matrix.target }}/mypy.ini diff --git a/.github/workflows/new-bugs-announce-notifier.yml b/.github/workflows/new-bugs-announce-notifier.yml index b2b63472d834218..4599b21ef35f05a 100644 --- a/.github/workflows/new-bugs-announce-notifier.yml +++ b/.github/workflows/new-bugs-announce-notifier.yml @@ -11,21 +11,22 @@ permissions: jobs: notify-new-bugs-announce: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: - node-version: 14 + node-version: 20 - run: npm install mailgun.js form-data - name: Send notification uses: actions/github-script@v6 env: - MAILGUN_API_KEY: ${{ secrets.PSF_MAILGUN_KEY }} + MAILGUN_API_KEY: ${{ secrets.MAILGUN_PYTHON_ORG_MAILGUN_KEY }} with: script: | const Mailgun = require("mailgun.js"); const formData = require('form-data'); const mailgun = new Mailgun(formData); - const DOMAIN = "mg.python.org"; + const DOMAIN = "mailgun.python.org"; const mg = mailgun.client({username: 'api', key: process.env.MAILGUN_API_KEY}); github.rest.issues.get({ issue_number: context.issue.number, @@ -40,11 +41,14 @@ jobs: url : issue.data.html_url, labels : issue.data.labels.map(label => { return label.name }).join(", "), assignee : issue.data.assignees.map(assignee => { return assignee.login }), - body : issue.data.body + // We need to truncate the body size, because the max size for + // the whole payload is 16kb. We want to be safe and assume that + // body can take up to ~8kb of space. + body : issue.data.body.substring(0, 8000) }; const data = { - from: "CPython Issues ", + from: "CPython Issues ", to: "new-bugs-announce@python.org", subject: `[Issue ${issue.data.number}] ${issue.data.title}`, template: "new-github-issue", diff --git a/.github/workflows/posix-deps-apt.sh b/.github/workflows/posix-deps-apt.sh index a220896f2cd7beb..bbae378f7b994ec 100755 --- a/.github/workflows/posix-deps-apt.sh +++ b/.github/workflows/posix-deps-apt.sh @@ -1,9 +1,11 @@ #!/bin/sh apt-get update +# autoconf-archive is needed by autoreconf (check_generated_files job) apt-get -yq install \ build-essential \ pkg-config \ + autoconf-archive \ ccache \ gdb \ lcov \ diff --git a/.github/workflows/project-updater.yml b/.github/workflows/project-updater.yml index 99c7a05ae8cab07..7574bfc208ff768 100644 --- a/.github/workflows/project-updater.yml +++ b/.github/workflows/project-updater.yml @@ -13,16 +13,15 @@ jobs: add-to-project: name: Add issues to projects runs-on: ubuntu-latest + timeout-minutes: 10 strategy: matrix: include: # if an issue has any of these labels, it will be added # to the corresponding project - { project: 2, label: "release-blocker, deferred-blocker" } - - { project: 3, label: expert-subinterpreters } - - { project: 29, label: expert-asyncio } - { project: 32, label: sprint } - + steps: - uses: actions/add-to-project@v0.1.0 with: diff --git a/.github/workflows/require-pr-label.yml b/.github/workflows/require-pr-label.yml new file mode 100644 index 000000000000000..080204bcfd3b94f --- /dev/null +++ b/.github/workflows/require-pr-label.yml @@ -0,0 +1,22 @@ +name: Check labels + +on: + pull_request: + types: [opened, reopened, labeled, unlabeled, synchronize] + +permissions: + issues: write + pull-requests: write + +jobs: + label: + name: DO-NOT-MERGE / unresolved review + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: mheap/github-action-required-labels@v5 + with: + mode: exactly + count: 0 + labels: "DO-NOT-MERGE, awaiting changes, awaiting change review" diff --git a/.github/workflows/reusable-docs.yml b/.github/workflows/reusable-docs.yml new file mode 100644 index 000000000000000..1c4fa4239c1e340 --- /dev/null +++ b/.github/workflows/reusable-docs.yml @@ -0,0 +1,108 @@ +name: Docs + +on: + workflow_call: + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + build_doc: + name: 'Docs' + runs-on: ubuntu-latest + timeout-minutes: 60 + env: + branch_base: 'origin/${{ github.event.pull_request.base.ref }}' + branch_pr: 'origin/${{ github.event.pull_request.head.ref }}' + refspec_base: '+${{ github.event.pull_request.base.sha }}:remotes/origin/${{ github.event.pull_request.base.ref }}' + refspec_pr: '+${{ github.event.pull_request.head.sha }}:remotes/origin/${{ github.event.pull_request.head.ref }}' + steps: + - name: 'Check out latest PR branch commit' + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + # Adapted from https://github.com/actions/checkout/issues/520#issuecomment-1167205721 + - name: 'Fetch commits to get branch diff' + run: | + # Fetch enough history to find a common ancestor commit (aka merge-base): + git fetch origin ${{ env.refspec_pr }} --depth=$(( ${{ github.event.pull_request.commits }} + 1 )) \ + --no-tags --prune --no-recurse-submodules + + # This should get the oldest commit in the local fetched history (which may not be the commit the PR branched from): + COMMON_ANCESTOR=$( git rev-list --first-parent --max-parents=0 --max-count=1 ${{ env.branch_pr }} ) + DATE=$( git log --date=iso8601 --format=%cd "${COMMON_ANCESTOR}" ) + + # Get all commits since that commit date from the base branch (eg: master or main): + git fetch origin ${{ env.refspec_base }} --shallow-since="${DATE}" \ + --no-tags --prune --no-recurse-submodules + - name: 'Set up Python' + uses: actions/setup-python@v4 + with: + python-version: '3' + cache: 'pip' + cache-dependency-path: 'Doc/requirements.txt' + - name: 'Install build dependencies' + run: make -C Doc/ venv + + # To annotate PRs with Sphinx nitpicks (missing references) + - name: 'Build HTML documentation' + continue-on-error: true + run: | + set -Eeuo pipefail + # Build docs with the '-n' (nit-picky) option; write warnings to file + make -C Doc/ PYTHON=../python SPHINXOPTS="-q -n -W --keep-going -w sphinx-warnings.txt" html + - name: 'Check warnings' + if: github.event_name == 'pull_request' + run: | + python Doc/tools/check-warnings.py \ + --annotate-diff '${{ env.branch_base }}' '${{ env.branch_pr }}' \ + --fail-if-regression \ + --fail-if-improved + + # This build doesn't use problem matchers or check annotations + build_doc_oldest_supported_sphinx: + name: 'Docs (Oldest Sphinx)' + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - name: 'Set up Python' + uses: actions/setup-python@v4 + with: + python-version: '3.11' # known to work with Sphinx 4.2 + cache: 'pip' + cache-dependency-path: 'Doc/requirements-oldest-sphinx.txt' + - name: 'Install build dependencies' + run: make -C Doc/ venv REQUIREMENTS="requirements-oldest-sphinx.txt" + - name: 'Build HTML documentation' + run: make -C Doc/ SPHINXOPTS="-q" SPHINXERRORHANDLING="-W --keep-going" html + + # Run "doctest" on HEAD as new syntax doesn't exist in the latest stable release + doctest: + name: 'Doctest' + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ubuntu-doc-${{ hashFiles('Doc/requirements.txt') }} + restore-keys: | + ubuntu-doc- + - name: 'Install Dependencies' + run: sudo ./.github/workflows/posix-deps-apt.sh && sudo apt-get install wamerican + - name: 'Configure CPython' + run: ./configure --with-pydebug + - name: 'Build CPython' + run: make -j4 + - name: 'Install build dependencies' + run: make -C Doc/ PYTHON=../python venv + # Use "xvfb-run" since some doctest tests open GUI windows + - name: 'Run documentation doctest' + run: xvfb-run make -C Doc/ PYTHON=../python SPHINXERRORHANDLING="-W --keep-going" doctest diff --git a/.github/workflows/reusable-macos.yml b/.github/workflows/reusable-macos.yml new file mode 100644 index 000000000000000..22f46d18e1b43aa --- /dev/null +++ b/.github/workflows/reusable-macos.yml @@ -0,0 +1,46 @@ +on: + workflow_call: + inputs: + config_hash: + required: true + type: string + free-threaded: + required: false + type: boolean + default: false + +jobs: + build_macos: + name: 'build and test' + runs-on: macos-latest + timeout-minutes: 60 + env: + HOMEBREW_NO_ANALYTICS: 1 + HOMEBREW_NO_AUTO_UPDATE: 1 + HOMEBREW_NO_INSTALL_CLEANUP: 1 + PYTHONSTRICTEXTENSIONBUILD: 1 + steps: + - uses: actions/checkout@v4 + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ inputs.config_hash }} + - name: Install Homebrew dependencies + run: brew install pkg-config openssl@3.0 xz gdbm tcl-tk + - name: Configure CPython + run: | + GDBM_CFLAGS="-I$(brew --prefix gdbm)/include" \ + GDBM_LIBS="-L$(brew --prefix gdbm)/lib -lgdbm" \ + ./configure \ + --config-cache \ + --with-pydebug \ + ${{ inputs.free-threaded && '--disable-gil' || '' }} \ + --prefix=/opt/python-dev \ + --with-openssl="$(brew --prefix openssl@3.0)" + - name: Build CPython + run: make -j4 + - name: Display build info + run: make pythoninfo + - name: Tests + run: make test diff --git a/.github/workflows/reusable-ubuntu.yml b/.github/workflows/reusable-ubuntu.yml new file mode 100644 index 000000000000000..819b45bda7f980a --- /dev/null +++ b/.github/workflows/reusable-ubuntu.yml @@ -0,0 +1,71 @@ +on: + workflow_call: + inputs: + config_hash: + required: true + type: string + options: + required: true + type: string + +jobs: + build_ubuntu_reusable: + name: 'build and test' + timeout-minutes: 60 + runs-on: ubuntu-20.04 + env: + OPENSSL_VER: 3.0.11 + PYTHONSTRICTEXTENSIONBUILD: 1 + steps: + - uses: actions/checkout@v4 + - name: Register gcc problem matcher + run: echo "::add-matcher::.github/problem-matchers/gcc.json" + - name: Install dependencies + run: sudo ./.github/workflows/posix-deps-apt.sh + - name: Configure OpenSSL env vars + run: | + echo "MULTISSL_DIR=${GITHUB_WORKSPACE}/multissl" >> $GITHUB_ENV + echo "OPENSSL_DIR=${GITHUB_WORKSPACE}/multissl/openssl/${OPENSSL_VER}" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=${GITHUB_WORKSPACE}/multissl/openssl/${OPENSSL_VER}/lib" >> $GITHUB_ENV + - name: 'Restore OpenSSL build' + id: cache-openssl + uses: actions/cache@v3 + with: + path: ./multissl/openssl/${{ env.OPENSSL_VER }} + key: ${{ runner.os }}-multissl-openssl-${{ env.OPENSSL_VER }} + - name: Install OpenSSL + if: steps.cache-openssl.outputs.cache-hit != 'true' + run: python3 Tools/ssl/multissltests.py --steps=library --base-directory $MULTISSL_DIR --openssl $OPENSSL_VER --system Linux + - name: Add ccache to PATH + run: | + echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV + - name: Configure ccache action + uses: hendrikmuhs/ccache-action@v1.2 + - name: Setup directory envs for out-of-tree builds + run: | + echo "CPYTHON_RO_SRCDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-ro-srcdir)" >> $GITHUB_ENV + echo "CPYTHON_BUILDDIR=$(realpath -m ${GITHUB_WORKSPACE}/../cpython-builddir)" >> $GITHUB_ENV + - name: Create directories for read-only out-of-tree builds + run: mkdir -p $CPYTHON_RO_SRCDIR $CPYTHON_BUILDDIR + - name: Bind mount sources read-only + run: sudo mount --bind -o ro $GITHUB_WORKSPACE $CPYTHON_RO_SRCDIR + - name: Restore config.cache + uses: actions/cache@v3 + with: + path: ${{ env.CPYTHON_BUILDDIR }}/config.cache + key: ${{ github.job }}-${{ runner.os }}-${{ inputs.config_hash }} + - name: Configure CPython out-of-tree + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: ${{ inputs.options }} + - name: Build CPython out-of-tree + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: make -j4 + - name: Display build info + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: make pythoninfo + - name: Remount sources writable for tests + # some tests write to srcdir, lack of pyc files slows down testing + run: sudo mount $CPYTHON_RO_SRCDIR -oremount,rw + - name: Tests + working-directory: ${{ env.CPYTHON_BUILDDIR }} + run: xvfb-run make test diff --git a/.github/workflows/reusable-windows.yml b/.github/workflows/reusable-windows.yml new file mode 100644 index 000000000000000..29e0a7e35b54501 --- /dev/null +++ b/.github/workflows/reusable-windows.yml @@ -0,0 +1,53 @@ +on: + workflow_call: + inputs: + free-threaded: + required: false + type: boolean + default: false + +jobs: + build_win32: + name: 'build and test (x86)' + runs-on: windows-latest + timeout-minutes: 60 + env: + IncludeUwp: 'true' + steps: + - uses: actions/checkout@v4 + - name: Build CPython + run: .\PCbuild\build.bat -e -d -p Win32 ${{ inputs.free-threaded && '--disable-gil' || '' }} + - name: Display build info + run: .\python.bat -m test.pythoninfo + - name: Tests + run: .\PCbuild\rt.bat -p Win32 -d -q --fast-ci + + build_win_amd64: + name: 'build and test (x64)' + runs-on: windows-latest + timeout-minutes: 60 + env: + IncludeUwp: 'true' + steps: + - uses: actions/checkout@v4 + - name: Register MSVC problem matcher + run: echo "::add-matcher::.github/problem-matchers/msvc.json" + - name: Build CPython + run: .\PCbuild\build.bat -e -d -p x64 ${{ inputs.free-threaded && '--disable-gil' || '' }} + - name: Display build info + run: .\python.bat -m test.pythoninfo + - name: Tests + run: .\PCbuild\rt.bat -p x64 -d -q --fast-ci + + build_win_arm64: + name: 'build (arm64)' + runs-on: windows-latest + timeout-minutes: 60 + env: + IncludeUwp: 'true' + steps: + - uses: actions/checkout@v4 + - name: Register MSVC problem matcher + run: echo "::add-matcher::.github/problem-matchers/msvc.json" + - name: Build CPython + run: .\PCbuild\build.bat -e -d -p arm64 ${{ inputs.free-threaded && '--disable-gil' || '' }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 1c6f1641c885c6d..94676f5ee5fffcc 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,10 +12,11 @@ jobs: if: github.repository_owner == 'python' runs-on: ubuntu-latest + timeout-minutes: 10 steps: - name: "Check PRs" - uses: actions/stale@v6 + uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: 'This PR is stale because it has been open for 30 days with no activity.' diff --git a/.github/workflows/verify-ensurepip-wheels.yml b/.github/workflows/verify-ensurepip-wheels.yml index 969515ed287b55c..4a545037bf6e2bc 100644 --- a/.github/workflows/verify-ensurepip-wheels.yml +++ b/.github/workflows/verify-ensurepip-wheels.yml @@ -1,4 +1,4 @@ -name: Verify bundled pip and setuptools +name: Verify bundled wheels on: workflow_dispatch: @@ -23,10 +23,11 @@ concurrency: jobs: verify: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3' - - name: Compare checksums of bundled pip and setuptools to ones published on PyPI + - name: Compare checksum of bundled wheels to the ones published on PyPI run: ./Tools/build/verify_ensurepip_wheels.py diff --git a/.gitignore b/.gitignore index 5055e6d225c7969..8c8273fc7a3aa38 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,10 @@ *.gc?? *.profclang? *.profraw +# Copies of binaries before BOLT optimizations. +*.prebolt +# BOLT profile data. +*.fdata *.dyn .gdb_history .purify @@ -38,10 +42,10 @@ gmon.out .coverage .mypy_cache/ .pytest_cache/ +.ruff_cache/ .DS_Store *.exe -!Lib/distutils/command/*.exe # Ignore core dumps... but not Tools/msi/core/ or the like. core @@ -58,8 +62,6 @@ Doc/.venv/ Doc/env/ Doc/.env/ Include/pydtrace_probes.h -Lib/distutils/command/*.pdb -Lib/lib2to3/*.pickle Lib/site-packages/* !Lib/site-packages/README.txt Lib/test/data/* @@ -126,6 +128,7 @@ Tools/unicode/data/ /platform /profile-clean-stamp /profile-run-stamp +/profile-bolt-stamp /Python/deepfreeze/*.c /pybuilddir.txt /pyconfig.h diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000000000..013c839ed6b7a4b --- /dev/null +++ b/.mailmap @@ -0,0 +1,3 @@ +# This file sets the canonical name for contributors to the repository. +# Documentation: https://git-scm.com/docs/gitmailmap +Amethyst Reese diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000000..35d9c64a8c5c150 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,36 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.2 + hooks: + - id: ruff + name: Run Ruff on Lib/test/ + args: [--exit-non-zero-on-fix] + files: ^Lib/test/ + - id: ruff + name: Run Ruff on Argument Clinic + args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml] + files: ^Tools/clinic/|Lib/test/test_clinic.py + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-toml + exclude: ^Lib/test/test_tomllib/ + - id: check-yaml + - id: end-of-file-fixer + types: [python] + exclude: Lib/test/tokenizedata/coding20731.py + - id: trailing-whitespace + types_or: [c, inc, python, rst] + + - repo: https://github.com/sphinx-contrib/sphinx-lint + rev: v0.8.1 + hooks: + - id: sphinx-lint + args: [--enable=default-role] + files: ^Doc/|^Misc/NEWS.d/next/ + + - repo: meta + hooks: + - id: check-hooks-apply + - id: check-useless-excludes diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 000000000000000..59830c79a404e0f --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,32 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details +# Project page: https://readthedocs.org/projects/cpython-previews/ + +version: 2 + +sphinx: + configuration: Doc/conf.py + +build: + os: ubuntu-22.04 + tools: + python: "3" + + commands: + # https://docs.readthedocs.io/en/stable/build-customization.html#cancel-build-based-on-a-condition + # + # Cancel building pull requests when there aren't changes in the Doc directory. + # + # If there are no changes (git diff exits with 0) we force the command to return with 183. + # This is a special exit code on Read the Docs that will cancel the build immediately. + - | + if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && [ "$(git diff --quiet origin/main -- Doc/ .readthedocs.yml; echo $?)" -eq 0 ]; + then + echo "No changes to Doc/ - exiting the build."; + exit 183; + fi + + - make -C Doc venv html + - mkdir _readthedocs + - mv Doc/build/html _readthedocs/html + diff --git a/Doc/Makefile b/Doc/Makefile index b09a9d754fb5aab..7af56e965e1be40 100644 --- a/Doc/Makefile +++ b/Doc/Makefile @@ -7,30 +7,29 @@ PYTHON = python3 VENVDIR = ./venv SPHINXBUILD = PATH=$(VENVDIR)/bin:$$PATH sphinx-build -SPHINXLINT = PATH=$(VENVDIR)/bin:$$PATH sphinx-lint BLURB = PATH=$(VENVDIR)/bin:$$PATH blurb +JOBS = auto PAPER = SOURCES = DISTVERSION = $(shell $(PYTHON) tools/extensions/patchlevel.py) +REQUIREMENTS = requirements.txt SPHINXERRORHANDLING = -W # Internal variables. PAPEROPT_a4 = -D latex_elements.papersize=a4paper PAPEROPT_letter = -D latex_elements.papersize=letterpaper -ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees $(PAPEROPT_$(PAPER)) -j auto \ +ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees $(PAPEROPT_$(PAPER)) -j $(JOBS) \ $(SPHINXOPTS) $(SPHINXERRORHANDLING) . build/$(BUILDER) $(SOURCES) -.PHONY: help build html htmlhelp latex text texinfo epub changes linkcheck \ - coverage doctest pydoc-topics htmlview clean clean-venv venv dist check serve \ - autobuild-dev autobuild-dev-html autobuild-stable autobuild-stable-html - +.PHONY: help help: @echo "Please use \`make ' where is one of" @echo " clean to remove build files" @echo " venv to create a venv with necessary tools" @echo " html to make standalone HTML files" @echo " htmlview to open the index page built by the html target in your browser" + @echo " htmllive to rebuild and reload HTML files in your browser" @echo " htmlhelp to make HTML files and a HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " text to make plain text files" @@ -44,6 +43,7 @@ help: @echo " dist to create a \"dist\" directory with archived docs for download" @echo " check to run a check for frequent markup errors" +.PHONY: build build: -mkdir -p build # Look first for a Misc/NEWS file (building from a source release tarball @@ -70,38 +70,46 @@ build: $(SPHINXBUILD) $(ALLSPHINXOPTS) @echo +.PHONY: html html: BUILDER = html html: build @echo "Build finished. The HTML pages are in build/html." +.PHONY: htmlhelp htmlhelp: BUILDER = htmlhelp htmlhelp: build @echo "Build finished; now you can run HTML Help Workshop with the" \ "build/htmlhelp/pydoc.hhp project file." +.PHONY: latex latex: BUILDER = latex latex: build @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." +.PHONY: text text: BUILDER = text text: build @echo "Build finished; the text files are in build/text." +.PHONY: texinfo texinfo: BUILDER = texinfo texinfo: build @echo "Build finished; the python.texi file is in build/texinfo." @echo "Run \`make info' in that directory to run it through makeinfo." +.PHONY: epub epub: BUILDER = epub epub: build @echo "Build finished; the epub files are in build/epub." +.PHONY: changes changes: BUILDER = changes changes: build @echo "The overview file is in build/changes." +.PHONY: linkcheck linkcheck: BUILDER = linkcheck linkcheck: @$(MAKE) build BUILDER=$(BUILDER) || { \ @@ -109,10 +117,12 @@ linkcheck: "or in build/$(BUILDER)/output.txt"; \ false; } +.PHONY: coverage coverage: BUILDER = coverage coverage: build @echo "Coverage finished; see c.txt and python.txt in build/coverage" +.PHONY: doctest doctest: BUILDER = doctest doctest: @$(MAKE) build BUILDER=$(BUILDER) || { \ @@ -120,31 +130,42 @@ doctest: "results in build/doctest/output.txt"; \ false; } +.PHONY: pydoc-topics pydoc-topics: BUILDER = pydoc-topics pydoc-topics: build @echo "Building finished; now run this:" \ "cp build/pydoc-topics/topics.py ../Lib/pydoc_data/topics.py" +.PHONY: htmlview htmlview: html $(PYTHON) -c "import os, webbrowser; webbrowser.open('file://' + os.path.realpath('build/html/index.html'))" +.PHONY: htmllive +htmllive: SPHINXBUILD = $(VENVDIR)/bin/sphinx-autobuild +htmllive: SPHINXOPTS = --re-ignore="/venv/" +htmllive: html + +.PHONY: clean clean: clean-venv -rm -rf build/* +.PHONY: clean-venv clean-venv: rm -rf $(VENVDIR) +.PHONY: venv venv: @if [ -d $(VENVDIR) ] ; then \ echo "venv already exists."; \ echo "To recreate it, remove it first with \`make clean-venv'."; \ else \ $(PYTHON) -m venv $(VENVDIR); \ - $(VENVDIR)/bin/python3 -m pip install -U pip setuptools; \ - $(VENVDIR)/bin/python3 -m pip install -r requirements.txt; \ + $(VENVDIR)/bin/python3 -m pip install --upgrade pip; \ + $(VENVDIR)/bin/python3 -m pip install -r $(REQUIREMENTS); \ echo "The venv has been created in the $(VENVDIR) directory"; \ fi +.PHONY: dist dist: rm -rf dist mkdir -p dist @@ -199,12 +220,12 @@ dist: rm -r dist/python-$(DISTVERSION)-docs-texinfo rm dist/python-$(DISTVERSION)-docs-texinfo.tar -check: - # Check the docs and NEWS files with sphinx-lint. - # Ignore the tools and venv dirs and check that the default role is not used. - $(SPHINXLINT) -i tools -i $(VENVDIR) --enable default-role - $(SPHINXLINT) --enable default-role ../Misc/NEWS.d/next/ +.PHONY: check +check: venv + $(VENVDIR)/bin/python3 -m pre_commit --version > /dev/null || $(VENVDIR)/bin/python3 -m pip install pre-commit + $(VENVDIR)/bin/python3 -m pre_commit run --all-files +.PHONY: serve serve: @echo "The serve target was removed, use htmlview instead (see bpo-36329)" @@ -216,15 +237,18 @@ serve: # output files) # for development releases: always build +.PHONY: autobuild-dev autobuild-dev: make dist SPHINXOPTS='$(SPHINXOPTS) -Ea -A daily=1' # for quick rebuilds (HTML only) +.PHONY: autobuild-dev-html autobuild-dev-html: make html SPHINXOPTS='$(SPHINXOPTS) -Ea -A daily=1' # for stable releases: only build if not in pre-release stage (alpha, beta) # release candidate downloads are okay, since the stable tree can be in that stage +.PHONY: autobuild-stable autobuild-stable: @case $(DISTVERSION) in *[ab]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ @@ -232,6 +256,7 @@ autobuild-stable: esac @make autobuild-dev +.PHONY: autobuild-stable-html autobuild-stable-html: @case $(DISTVERSION) in *[ab]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ diff --git a/Doc/_static/og-image.png b/Doc/_static/og-image.png new file mode 100644 index 000000000000000..0e80751e740387f Binary files /dev/null and b/Doc/_static/og-image.png differ diff --git a/Doc/bugs.rst b/Doc/bugs.rst index 69d7c27410d56ae..908987cf41ff6e4 100644 --- a/Doc/bugs.rst +++ b/Doc/bugs.rst @@ -19,6 +19,9 @@ If you find a bug in this documentation or would like to propose an improvement, please submit a bug report on the :ref:`tracker `. If you have a suggestion on how to fix it, include that as well. +You can also open a discussion item on our +`Documentation Discourse forum `_. + If you're short on time, you can also email documentation bug reports to docs@python.org (behavioral bugs can be sent to python-list@python.org). 'docs@' is a mailing list run by volunteers; your request will be noticed, @@ -35,7 +38,7 @@ though it may take a while to be processed. `Helping with Documentation `_ Comprehensive guide for individuals that are interested in contributing to Python documentation. - `Documentation Translations `_ + `Documentation Translations `_ A list of GitHub pages for documentation translation and their primary contacts. @@ -67,7 +70,7 @@ Click on the "New issue" button in the top bar to report a new issue. The submission form has two fields, "Title" and "Comment". For the "Title" field, enter a *very* short description of the problem; -less than ten words is good. +fewer than ten words is good. In the "Comment" field, describe the problem in detail, including what you expected to happen and what did happen. Be sure to include whether any diff --git a/Doc/c-api/abstract.rst b/Doc/c-api/abstract.rst index 1823f9d70c79f3e..f5df09fa7fd7863 100644 --- a/Doc/c-api/abstract.rst +++ b/Doc/c-api/abstract.rst @@ -24,4 +24,3 @@ but whose items have not been set to some non-\ ``NULL`` value yet. mapping.rst iter.rst buffer.rst - objbuffer.rst diff --git a/Doc/c-api/allocation.rst b/Doc/c-api/allocation.rst index 0a8fcc5ae5fcdf6..b3609c233156b6e 100644 --- a/Doc/c-api/allocation.rst +++ b/Doc/c-api/allocation.rst @@ -27,22 +27,26 @@ Allocating Objects on the Heap length information for a variable-size object. -.. c:function:: TYPE* PyObject_New(TYPE, PyTypeObject *type) +.. c:macro:: PyObject_New(TYPE, typeobj) - Allocate a new Python object using the C structure type *TYPE* and the - Python type object *type*. Fields not defined by the Python object header - are not initialized; the object's reference count will be one. The size of - the memory allocation is determined from the :c:member:`~PyTypeObject.tp_basicsize` field of - the type object. + Allocate a new Python object using the C structure type *TYPE* + and the Python type object *typeobj* (``PyTypeObject*``). + Fields not defined by the Python object header are not initialized. + The caller will own the only reference to the object + (i.e. its reference count will be one). + The size of the memory allocation is determined from the + :c:member:`~PyTypeObject.tp_basicsize` field of the type object. -.. c:function:: TYPE* PyObject_NewVar(TYPE, PyTypeObject *type, Py_ssize_t size) +.. c:macro:: PyObject_NewVar(TYPE, typeobj, size) Allocate a new Python object using the C structure type *TYPE* and the - Python type object *type*. Fields not defined by the Python object header + Python type object *typeobj* (``PyTypeObject*``). + Fields not defined by the Python object header are not initialized. The allocated memory allows for the *TYPE* structure - plus *size* fields of the size given by the :c:member:`~PyTypeObject.tp_itemsize` field of - *type*. This is useful for implementing objects like tuples, which are + plus *size* (``Py_ssize_t``) fields of the size + given by the :c:member:`~PyTypeObject.tp_itemsize` field of + *typeobj*. This is useful for implementing objects like tuples, which are able to determine their size at construction time. Embedding the array of fields into the same allocation decreases the number of allocations, improving the memory management efficiency. @@ -50,8 +54,8 @@ Allocating Objects on the Heap .. c:function:: void PyObject_Del(void *op) - Releases memory allocated to an object using :c:func:`PyObject_New` or - :c:func:`PyObject_NewVar`. This is normally called from the + Releases memory allocated to an object using :c:macro:`PyObject_New` or + :c:macro:`PyObject_NewVar`. This is normally called from the :c:member:`~PyTypeObject.tp_dealloc` handler specified in the object's type. The fields of the object should not be accessed after this call as the memory is no longer a valid Python object. diff --git a/Doc/c-api/apiabiversion.rst b/Doc/c-api/apiabiversion.rst index 85b6e2f373877f2..f6c8284daeacb0a 100644 --- a/Doc/c-api/apiabiversion.rst +++ b/Doc/c-api/apiabiversion.rst @@ -58,7 +58,9 @@ See :ref:`stable` for a discussion of API and ABI stability across versions. Thus ``3.4.1a2`` is hexversion ``0x030401a2`` and ``3.10.0`` is hexversion ``0x030a00f0``. - This version is also available via the symbol :data:`Py_Version`. + Use this for numeric comparisons, e.g. ``#if PY_VERSION_HEX >= ...``. + + This version is also available via the symbol :c:var:`Py_Version`. .. c:var:: const unsigned long Py_Version diff --git a/Doc/c-api/arg.rst b/Doc/c-api/arg.rst index c5be453c153308f..62d87d898e682c5 100644 --- a/Doc/c-api/arg.rst +++ b/Doc/c-api/arg.rst @@ -27,40 +27,55 @@ unit; the entry in (round) parentheses is the Python object type that matches the format unit; and the entry in [square] brackets is the type of the C variable(s) whose address should be passed. +.. _arg-parsing-string-and-buffers: + Strings and buffers ------------------- +.. note:: + + On Python 3.12 and older, the macro :c:macro:`!PY_SSIZE_T_CLEAN` must be + defined before including :file:`Python.h` to use all ``#`` variants of + formats (``s#``, ``y#``, etc.) explained below. + This is not necessary on Python 3.13 and later. + These formats allow accessing an object as a contiguous chunk of memory. You don't have to provide raw storage for the returned unicode or bytes area. -In general, when a format sets a pointer to a buffer, the buffer is -managed by the corresponding Python object, and the buffer shares -the lifetime of this object. You won't have to release any memory yourself. -The only exceptions are ``es``, ``es#``, ``et`` and ``et#``. +Unless otherwise stated, buffers are not NUL-terminated. -However, when a :c:type:`Py_buffer` structure gets filled, the underlying -buffer is locked so that the caller can subsequently use the buffer even -inside a :c:type:`Py_BEGIN_ALLOW_THREADS` block without the risk of mutable data -being resized or destroyed. As a result, **you have to call** -:c:func:`PyBuffer_Release` after you have finished processing the data (or -in any early abort case). +There are three ways strings and buffers can be converted to C: -Unless otherwise stated, buffers are not NUL-terminated. +* Formats such as ``y*`` and ``s*`` fill a :c:type:`Py_buffer` structure. + This locks the underlying buffer so that the caller can subsequently use + the buffer even inside a :c:type:`Py_BEGIN_ALLOW_THREADS` + block without the risk of mutable data being resized or destroyed. + As a result, **you have to call** :c:func:`PyBuffer_Release` after you have + finished processing the data (or in any early abort case). -Some formats require a read-only :term:`bytes-like object`, and set a -pointer instead of a buffer structure. They work by checking that -the object's :c:member:`PyBufferProcs.bf_releasebuffer` field is ``NULL``, -which disallows mutable objects such as :class:`bytearray`. +* The ``es``, ``es#``, ``et`` and ``et#`` formats allocate the result buffer. + **You have to call** :c:func:`PyMem_Free` after you have finished + processing the data (or in any early abort case). -.. note:: +* .. _c-arg-borrowed-buffer: + + Other formats take a :class:`str` or a read-only :term:`bytes-like object`, + such as :class:`bytes`, and provide a ``const char *`` pointer to + its buffer. + In this case the buffer is "borrowed": it is managed by the corresponding + Python object, and shares the lifetime of this object. + You won't have to release any memory yourself. - For all ``#`` variants of formats (``s#``, ``y#``, etc.), the macro - :c:macro:`PY_SSIZE_T_CLEAN` must be defined before including - :file:`Python.h`. On Python 3.9 and older, the type of the length argument - is :c:type:`Py_ssize_t` if the :c:macro:`PY_SSIZE_T_CLEAN` macro is defined, - or int otherwise. + To ensure that the underlying buffer may be safely borrowed, the object's + :c:member:`PyBufferProcs.bf_releasebuffer` field must be ``NULL``. + This disallows common mutable objects such as :class:`bytearray`, + but also some read-only objects such as :class:`memoryview` of + :class:`bytes`. + Besides this ``bf_releasebuffer`` requirement, there is no check to verify + whether the input object is immutable (e.g. whether it would honor a request + for a writable buffer, or whether another thread can mutate the data). ``s`` (:class:`str`) [const char \*] Convert a Unicode object to a C pointer to a character string. @@ -89,7 +104,7 @@ which disallows mutable objects such as :class:`bytearray`. Unicode objects are converted to C strings using ``'utf-8'`` encoding. ``s#`` (:class:`str`, read-only :term:`bytes-like object`) [const char \*, :c:type:`Py_ssize_t`] - Like ``s*``, except that it doesn't accept mutable objects. + Like ``s*``, except that it provides a :ref:`borrowed buffer `. The result is stored into two C variables, the first one a pointer to a C string, the second one its length. The string may contain embedded null bytes. Unicode objects are converted @@ -108,8 +123,9 @@ which disallows mutable objects such as :class:`bytearray`. pointer is set to ``NULL``. ``y`` (read-only :term:`bytes-like object`) [const char \*] - This format converts a bytes-like object to a C pointer to a character - string; it does not accept Unicode objects. The bytes buffer must not + This format converts a bytes-like object to a C pointer to a + :ref:`borrowed ` character string; + it does not accept Unicode objects. The bytes buffer must not contain embedded null bytes; if it does, a :exc:`ValueError` exception is raised. @@ -277,8 +293,10 @@ Other objects ``O`` (object) [PyObject \*] Store a Python object (without any conversion) in a C object pointer. The C - program thus receives the actual object that was passed. The object's reference - count is not increased. The pointer stored is not ``NULL``. + program thus receives the actual object that was passed. A new + :term:`strong reference` to the object is not created + (i.e. its reference count is not increased). + The pointer stored is not ``NULL``. ``O!`` (object) [*typeobject*, PyObject \*] Store a Python object in a C object pointer. This is similar to ``O``, but @@ -327,7 +345,7 @@ Other objects *items*. Format units for sequences may be nested. It is possible to pass "long" integers (integers whose value exceeds the -platform's :const:`LONG_MAX`) however no proper range checking is done --- the +platform's :c:macro:`LONG_MAX`) however no proper range checking is done --- the most significant bits are silently truncated when the receiving field is too small to receive the value (actually, the semantics are inherited from downcasts in C --- your mileage may vary). @@ -362,7 +380,8 @@ inside nested parentheses. They are: mutually exclude each other. Note that any Python object references which are provided to the caller are -*borrowed* references; do not decrement their reference count! +*borrowed* references; do not release them +(i.e. do not decrement their reference count)! Additional arguments passed to these functions must be addresses of variables whose type is determined by the format string; these are used to store values @@ -397,8 +416,10 @@ API Functions .. c:function:: int PyArg_ParseTupleAndKeywords(PyObject *args, PyObject *kw, const char *format, char *keywords[], ...) Parse the parameters of a function that takes both positional and keyword - parameters into local variables. The *keywords* argument is a - ``NULL``-terminated array of keyword parameter names. Empty names denote + parameters into local variables. + The *keywords* argument is a ``NULL``-terminated array of keyword parameter + names specified as null-terminated ASCII or UTF-8 encoded C strings. + Empty names denote :ref:`positional-only parameters `. Returns true on success; on failure, it returns false and raises the appropriate exception. @@ -407,6 +428,9 @@ API Functions Added support for :ref:`positional-only parameters `. + .. versionchanged:: 3.13 + Added support for non-ASCII keyword parameter names. + .. c:function:: int PyArg_VaParseTupleAndKeywords(PyObject *args, PyObject *kw, const char *format, char *keywords[], va_list vargs) @@ -423,23 +447,31 @@ API Functions .. versionadded:: 3.2 -.. XXX deprecated, will be removed .. c:function:: int PyArg_Parse(PyObject *args, const char *format, ...) - Function used to deconstruct the argument lists of "old-style" functions --- - these are functions which use the :const:`METH_OLDARGS` parameter parsing - method, which has been removed in Python 3. This is not recommended for use - in parameter parsing in new code, and most code in the standard interpreter - has been modified to no longer use this for that purpose. It does remain a - convenient way to decompose other tuples, however, and may continue to be - used for that purpose. + Parse the parameter of a function that takes a single positional parameter + into a local variable. Returns true on success; on failure, it returns + false and raises the appropriate exception. + + Example:: + + // Function using METH_O calling convention + static PyObject* + my_function(PyObject *module, PyObject *arg) + { + int value; + if (!PyArg_Parse(arg, "i:my_function", &value)) { + return NULL; + } + // ... use value ... + } .. c:function:: int PyArg_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, ...) A simpler form of parameter retrieval which does not use a format string to specify the types of the arguments. Functions which use this method to retrieve - their parameters should be declared as :const:`METH_VARARGS` in function or + their parameters should be declared as :c:macro:`METH_VARARGS` in function or method tables. The tuple containing the actual parameters should be passed as *args*; it must actually be a tuple. The length of the tuple must be at least *min* and no more than *max*; *min* and *max* may be equal. Additional @@ -453,7 +485,7 @@ API Functions will be set if there was a failure. This is an example of the use of this function, taken from the sources for the - :mod:`_weakref` helper module for weak references:: + :mod:`!_weakref` helper module for weak references:: static PyObject * weakref_ref(PyObject *self, PyObject *args) @@ -531,7 +563,7 @@ Building values Same as ``s#``. ``u`` (:class:`str`) [const wchar_t \*] - Convert a null-terminated :c:expr:`wchar_t` buffer of Unicode (UTF-16 or UCS-4) + Convert a null-terminated :c:type:`wchar_t` buffer of Unicode (UTF-16 or UCS-4) data to a Python Unicode object. If the Unicode buffer pointer is ``NULL``, ``None`` is returned. @@ -597,8 +629,10 @@ Building values Convert a C :c:type:`Py_complex` structure to a Python complex number. ``O`` (object) [PyObject \*] - Pass a Python object untouched (except for its reference count, which is - incremented by one). If the object passed in is a ``NULL`` pointer, it is assumed + Pass a Python object untouched but create a new + :term:`strong reference` to it + (i.e. its reference count is incremented by one). + If the object passed in is a ``NULL`` pointer, it is assumed that this was caused because the call producing the argument found an error and set an exception. Therefore, :c:func:`Py_BuildValue` will return ``NULL`` but won't raise an exception. If no exception has been raised yet, :exc:`SystemError` is @@ -608,7 +642,7 @@ Building values Same as ``O``. ``N`` (object) [PyObject \*] - Same as ``O``, except it doesn't increment the reference count on the object. + Same as ``O``, except it doesn't create a new :term:`strong reference`. Useful when the object is created by a call to an object constructor in the argument list. diff --git a/Doc/c-api/bool.rst b/Doc/c-api/bool.rst index c197d447e9618c9..b4dc4849d044e1c 100644 --- a/Doc/c-api/bool.rst +++ b/Doc/c-api/bool.rst @@ -6,11 +6,17 @@ Boolean Objects --------------- Booleans in Python are implemented as a subclass of integers. There are only -two booleans, :const:`Py_False` and :const:`Py_True`. As such, the normal +two booleans, :c:data:`Py_False` and :c:data:`Py_True`. As such, the normal creation and deletion functions don't apply to booleans. The following macros are available, however. +.. c:var:: PyTypeObject PyBool_Type + + This instance of :c:type:`PyTypeObject` represents the Python boolean type; it + is the same object as :class:`bool` in the Python layer. + + .. c:function:: int PyBool_Check(PyObject *o) Return true if *o* is of type :c:data:`PyBool_Type`. This function always @@ -19,29 +25,32 @@ are available, however. .. c:var:: PyObject* Py_False - The Python ``False`` object. This object has no methods. It needs to be - treated just like any other object with respect to reference counts. + The Python ``False`` object. This object has no methods and is + :term:`immortal`. + + .. versionchanged:: 3.12 + :c:data:`Py_False` is :term:`immortal`. .. c:var:: PyObject* Py_True - The Python ``True`` object. This object has no methods. It needs to be treated - just like any other object with respect to reference counts. + The Python ``True`` object. This object has no methods and is + :term:`immortal`. + + .. versionchanged:: 3.12 + :c:data:`Py_True` is :term:`immortal`. .. c:macro:: Py_RETURN_FALSE - Return :const:`Py_False` from a function, properly incrementing its reference - count. + Return :c:data:`Py_False` from a function. .. c:macro:: Py_RETURN_TRUE - Return :const:`Py_True` from a function, properly incrementing its reference - count. + Return :c:data:`Py_True` from a function. .. c:function:: PyObject* PyBool_FromLong(long v) - Return a new reference to :const:`Py_True` or :const:`Py_False` depending on the - truth value of *v*. + Return :c:data:`Py_True` or :c:data:`Py_False`, depending on the truth value of *v*. diff --git a/Doc/c-api/buffer.rst b/Doc/c-api/buffer.rst index a04062fb2a68f1d..e572815ffd6259f 100644 --- a/Doc/c-api/buffer.rst +++ b/Doc/c-api/buffer.rst @@ -44,7 +44,7 @@ the elements exposed by an :class:`array.array` can be multi-byte values. An example consumer of the buffer interface is the :meth:`~io.BufferedIOBase.write` method of file objects: any object that can export a series of bytes through -the buffer interface can be written to a file. While :meth:`write` only +the buffer interface can be written to a file. While :meth:`!write` only needs read-only access to the internal contents of the object passed to it, other methods such as :meth:`~io.BufferedIOBase.readinto` need write access to the contents of their argument. The buffer interface allows objects to @@ -102,7 +102,9 @@ a buffer, see :c:func:`PyObject_GetBuffer`. .. c:member:: PyObject *obj A new reference to the exporting object. The reference is owned by - the consumer and automatically decremented and set to ``NULL`` by + the consumer and automatically released + (i.e. reference count decremented) + and set to ``NULL`` by :c:func:`PyBuffer_Release`. The field is the equivalent of the return value of any standard C-API function. @@ -159,10 +161,7 @@ a buffer, see :c:func:`PyObject_GetBuffer`. If it is ``0``, :c:member:`~Py_buffer.buf` points to a single item representing a scalar. In this case, :c:member:`~Py_buffer.shape`, :c:member:`~Py_buffer.strides` and :c:member:`~Py_buffer.suboffsets` MUST be ``NULL``. - - The macro :c:macro:`PyBUF_MAX_NDIM` limits the maximum number of dimensions - to 64. Exporters MUST respect this limit, consumers of multi-dimensional - buffers SHOULD be able to handle up to :c:macro:`PyBUF_MAX_NDIM` dimensions. + The maximum number of dimensions is given by :c:macro:`PyBUF_MAX_NDIM`. .. c:member:: Py_ssize_t *shape @@ -215,6 +214,17 @@ a buffer, see :c:func:`PyObject_GetBuffer`. freed when the buffer is released. The consumer MUST NOT alter this value. + +Constants: + +.. c:macro:: PyBUF_MAX_NDIM + + The maximum number of dimensions the memory represents. + Exporters MUST respect this limit, consumers of multi-dimensional + buffers SHOULD be able to handle up to :c:macro:`!PyBUF_MAX_NDIM` dimensions. + Currently set to 64. + + .. _buffer-request-types: Buffer request types @@ -225,7 +235,7 @@ object via :c:func:`PyObject_GetBuffer`. Since the complexity of the logical structure of the memory can vary drastically, the consumer uses the *flags* argument to specify the exact buffer type it can handle. -All :c:data:`Py_buffer` fields are unambiguously defined by the request +All :c:type:`Py_buffer` fields are unambiguously defined by the request type. request-independent fields @@ -438,7 +448,7 @@ Buffer-related functions Send a request to *exporter* to fill in *view* as specified by *flags*. If the exporter cannot provide a buffer of the exact type, it MUST raise - :c:data:`PyExc_BufferError`, set ``view->obj`` to ``NULL`` and + :exc:`BufferError`, set ``view->obj`` to ``NULL`` and return ``-1``. On success, fill in *view*, set ``view->obj`` to a new reference @@ -454,7 +464,8 @@ Buffer-related functions .. c:function:: void PyBuffer_Release(Py_buffer *view) - Release the buffer *view* and decrement the reference count for + Release the buffer *view* and release the :term:`strong reference` + (i.e. decrement the reference count) to the view's supporting object, ``view->obj``. This function MUST be called when the buffer is no longer being used, otherwise reference leaks may occur. @@ -464,7 +475,7 @@ Buffer-related functions .. c:function:: Py_ssize_t PyBuffer_SizeFromFormat(const char *format) - Return the implied :c:data:`~Py_buffer.itemsize` from :c:data:`~Py_buffer.format`. + Return the implied :c:member:`~Py_buffer.itemsize` from :c:member:`~Py_buffer.format`. On error, raise an exception and return -1. .. versionadded:: 3.9 @@ -499,7 +510,7 @@ Buffer-related functions This function fails if *len* != *src->len*. -.. c:function:: int PyObject_CopyData(Py_buffer *dest, Py_buffer *src) +.. c:function:: int PyObject_CopyData(PyObject *dest, PyObject *src) Copy data from *src* to *dest* buffer. Can convert between C-style and or Fortran-style buffers. @@ -524,7 +535,7 @@ Buffer-related functions and :c:macro:`PyBUF_WRITABLE` is set in *flags*. On success, set ``view->obj`` to a new reference to *exporter* and - return 0. Otherwise, raise :c:data:`PyExc_BufferError`, set + return 0. Otherwise, raise :exc:`BufferError`, set ``view->obj`` to ``NULL`` and return ``-1``; If this function is used as part of a :ref:`getbufferproc `, diff --git a/Doc/c-api/bytearray.rst b/Doc/c-api/bytearray.rst index 4bf3cfe100cd013..456f7d89bca03cd 100644 --- a/Doc/c-api/bytearray.rst +++ b/Doc/c-api/bytearray.rst @@ -5,7 +5,7 @@ Byte Array Objects ------------------ -.. index:: object: bytearray +.. index:: pair: object; bytearray .. c:type:: PyByteArrayObject diff --git a/Doc/c-api/bytes.rst b/Doc/c-api/bytes.rst index d62962cab45f6bb..4790d3b2da43754 100644 --- a/Doc/c-api/bytes.rst +++ b/Doc/c-api/bytes.rst @@ -8,7 +8,7 @@ Bytes Objects These functions raise :exc:`TypeError` when expecting a bytes parameter and called with a non-bytes parameter. -.. index:: object: bytes +.. index:: pair: object; bytes .. c:type:: PyBytesObject @@ -64,39 +64,39 @@ called with a non-bytes parameter. +-------------------+---------------+--------------------------------+ | Format Characters | Type | Comment | +===================+===============+================================+ - | :attr:`%%` | *n/a* | The literal % character. | + | ``%%`` | *n/a* | The literal % character. | +-------------------+---------------+--------------------------------+ - | :attr:`%c` | int | A single byte, | + | ``%c`` | int | A single byte, | | | | represented as a C int. | +-------------------+---------------+--------------------------------+ - | :attr:`%d` | int | Equivalent to | + | ``%d`` | int | Equivalent to | | | | ``printf("%d")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%u` | unsigned int | Equivalent to | + | ``%u`` | unsigned int | Equivalent to | | | | ``printf("%u")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%ld` | long | Equivalent to | + | ``%ld`` | long | Equivalent to | | | | ``printf("%ld")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%lu` | unsigned long | Equivalent to | + | ``%lu`` | unsigned long | Equivalent to | | | | ``printf("%lu")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%zd` | :c:type:`\ | Equivalent to | + | ``%zd`` | :c:type:`\ | Equivalent to | | | Py_ssize_t` | ``printf("%zd")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%zu` | size_t | Equivalent to | + | ``%zu`` | size_t | Equivalent to | | | | ``printf("%zu")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%i` | int | Equivalent to | + | ``%i`` | int | Equivalent to | | | | ``printf("%i")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%x` | int | Equivalent to | + | ``%x`` | int | Equivalent to | | | | ``printf("%x")``. [1]_ | +-------------------+---------------+--------------------------------+ - | :attr:`%s` | const char\* | A null-terminated C character | + | ``%s`` | const char\* | A null-terminated C character | | | | array. | +-------------------+---------------+--------------------------------+ - | :attr:`%p` | const void\* | The hex representation of a C | + | ``%p`` | const void\* | The hex representation of a C | | | | pointer. Mostly equivalent to | | | | ``printf("%p")`` except that | | | | it is guaranteed to start with | @@ -155,6 +155,7 @@ called with a non-bytes parameter. Return the null-terminated contents of the object *obj* through the output variables *buffer* and *length*. + Returns ``0`` on success. If *length* is ``NULL``, the bytes object may not contain embedded null bytes; @@ -184,8 +185,8 @@ called with a non-bytes parameter. .. c:function:: void PyBytes_ConcatAndDel(PyObject **bytes, PyObject *newpart) Create a new bytes object in *\*bytes* containing the contents of *newpart* - appended to *bytes*. This version decrements the reference count of - *newpart*. + appended to *bytes*. This version releases the :term:`strong reference` + to *newpart* (i.e. decrements its reference count). .. c:function:: int _PyBytes_Resize(PyObject **bytes, Py_ssize_t newsize) diff --git a/Doc/c-api/call.rst b/Doc/c-api/call.rst index 6fb2e15196103d4..7198d6bc056eb43 100644 --- a/Doc/c-api/call.rst +++ b/Doc/c-api/call.rst @@ -59,12 +59,12 @@ This bears repeating: .. versionchanged:: 3.12 - The :const:`Py_TPFLAGS_HAVE_VECTORCALL` flag is now removed from a class + The :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag is now removed from a class when the class's :py:meth:`~object.__call__` method is reassigned. (This internally sets :c:member:`~PyTypeObject.tp_call` only, and thus may make it behave differently than the vectorcall function.) In earlier Python versions, vectorcall should only be used with - :const:`immutable ` or static types. + :c:macro:`immutable ` or static types. A class should not implement vectorcall if that would be slower than *tp_call*. For example, if the callee needs to convert @@ -72,7 +72,7 @@ the arguments to an args tuple and kwargs dict anyway, then there is no point in implementing vectorcall. Classes can implement the vectorcall protocol by enabling the -:const:`Py_TPFLAGS_HAVE_VECTORCALL` flag and setting +:c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag and setting :c:member:`~PyTypeObject.tp_vectorcall_offset` to the offset inside the object structure where a *vectorcallfunc* appears. This is a pointer to a function with the following signature: @@ -84,7 +84,7 @@ This is a pointer to a function with the following signature: values of the keyword arguments. This can be *NULL* if there are no arguments. - *nargsf* is the number of positional arguments plus possibly the - :const:`PY_VECTORCALL_ARGUMENTS_OFFSET` flag. + :c:macro:`PY_VECTORCALL_ARGUMENTS_OFFSET` flag. To get the actual number of positional arguments from *nargsf*, use :c:func:`PyVectorcall_NARGS`. - *kwnames* is a tuple containing the names of the keyword arguments; @@ -104,28 +104,17 @@ This is a pointer to a function with the following signature: ``args[0]`` may be changed. Whenever they can do so cheaply (without additional allocation), callers - are encouraged to use :const:`PY_VECTORCALL_ARGUMENTS_OFFSET`. + are encouraged to use :c:macro:`PY_VECTORCALL_ARGUMENTS_OFFSET`. Doing so will allow callables such as bound methods to make their onward calls (which include a prepended *self* argument) very efficiently. + .. versionadded:: 3.8 + To call an object that implements vectorcall, use a :ref:`call API ` function as with any other callable. :c:func:`PyObject_Vectorcall` will usually be most efficient. -.. note:: - - In CPython 3.8, the vectorcall API and related functions were available - provisionally under names with a leading underscore: - ``_PyObject_Vectorcall``, ``_Py_TPFLAGS_HAVE_VECTORCALL``, - ``_PyObject_VectorcallMethod``, ``_PyVectorcall_Function``, - ``_PyObject_CallOneArg``, ``_PyObject_CallMethodNoArgs``, - ``_PyObject_CallMethodOneArg``. - Additionally, ``PyObject_VectorcallDict`` was available as - ``_PyObject_FastCallDict``. - The old names are still defined as aliases of the new, non-underscored names. - - Recursion Control ................. @@ -165,7 +154,7 @@ Vectorcall Support API This is mostly useful to check whether or not *op* supports vectorcall, which can be done by checking ``PyVectorcall_Function(op) != NULL``. - .. versionadded:: 3.8 + .. versionadded:: 3.9 .. c:function:: PyObject* PyVectorcall_Call(PyObject *callable, PyObject *tuple, PyObject *dict) @@ -174,7 +163,7 @@ Vectorcall Support API This is a specialized function, intended to be put in the :c:member:`~PyTypeObject.tp_call` slot or be used in an implementation of ``tp_call``. - It does not check the :const:`Py_TPFLAGS_HAVE_VECTORCALL` flag + It does not check the :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag and it does not fall back to ``tp_call``. .. versionadded:: 3.8 @@ -392,11 +381,11 @@ please see individual documentation for details. *args[0]*, and the *args* array starting at *args[1]* represents the arguments of the call. There must be at least one positional argument. *nargsf* is the number of positional arguments including *args[0]*, - plus :const:`PY_VECTORCALL_ARGUMENTS_OFFSET` if the value of ``args[0]`` may + plus :c:macro:`PY_VECTORCALL_ARGUMENTS_OFFSET` if the value of ``args[0]`` may temporarily be changed. Keyword arguments can be passed just like in :c:func:`PyObject_Vectorcall`. - If the object has the :const:`Py_TPFLAGS_METHOD_DESCRIPTOR` feature, + If the object has the :c:macro:`Py_TPFLAGS_METHOD_DESCRIPTOR` feature, this will call the unbound method object with the full *args* vector as arguments. diff --git a/Doc/c-api/capsule.rst b/Doc/c-api/capsule.rst index 1c8f432505ef68a..cdb8aa33e9fd325 100644 --- a/Doc/c-api/capsule.rst +++ b/Doc/c-api/capsule.rst @@ -5,7 +5,7 @@ Capsules -------- -.. index:: object: Capsule +.. index:: pair: object; Capsule Refer to :ref:`using-capsules` for more information on using these objects. @@ -64,7 +64,7 @@ Refer to :ref:`using-capsules` for more information on using these objects. The *name* parameter must compare exactly to the name stored in the capsule. If the name stored in the capsule is ``NULL``, the *name* passed in must also - be ``NULL``. Python uses the C function :c:func:`strcmp` to compare capsule + be ``NULL``. Python uses the C function :c:func:`!strcmp` to compare capsule names. @@ -121,7 +121,7 @@ Refer to :ref:`using-capsules` for more information on using these objects. compared.) In other words, if :c:func:`PyCapsule_IsValid` returns a true value, calls to - any of the accessors (any function starting with :c:func:`PyCapsule_Get`) are + any of the accessors (any function starting with ``PyCapsule_Get``) are guaranteed to succeed. Return a nonzero value if the object is valid and matches the name passed in. diff --git a/Doc/c-api/cell.rst b/Doc/c-api/cell.rst index ac4ef5adc5cc20f..f8cd0344fdd1c0c 100644 --- a/Doc/c-api/cell.rst +++ b/Doc/c-api/cell.rst @@ -25,7 +25,7 @@ Cell objects are not likely to be useful elsewhere. The type object corresponding to cell objects. -.. c:function:: int PyCell_Check(ob) +.. c:function:: int PyCell_Check(PyObject *ob) Return true if *ob* is a cell object; *ob* must not be ``NULL``. This function always succeeds. diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst index 9054e7ee3181a52..5082b0cb6ad3f3e 100644 --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -33,27 +33,46 @@ bound into a function. Return the number of free variables in *co*. -.. c:function:: PyCodeObject* PyCode_New(int argcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, int firstlineno, PyObject *linetable, PyObject *exceptiontable) +.. c:function:: PyCodeObject* PyUnstable_Code_New(int argcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, PyObject *qualname, int firstlineno, PyObject *linetable, PyObject *exceptiontable) Return a new code object. If you need a dummy code object to create a frame, - use :c:func:`PyCode_NewEmpty` instead. Calling :c:func:`PyCode_New` directly - will bind you to a precise Python version since the definition of the bytecode - changes often. The many arguments of this function are inter-dependent in complex + use :c:func:`PyCode_NewEmpty` instead. + + Since the definition of the bytecode changes often, calling + :c:func:`PyUnstable_Code_New` directly can bind you to a precise Python version. + + The many arguments of this function are inter-dependent in complex ways, meaning that subtle changes to values are likely to result in incorrect execution or VM crashes. Use this function only with extreme care. .. versionchanged:: 3.11 - Added ``exceptiontable`` parameter. + Added ``qualname`` and ``exceptiontable`` parameters. + + .. index:: single: PyCode_New + + .. versionchanged:: 3.12 -.. c:function:: PyCodeObject* PyCode_NewWithPosOnlyArgs(int argcount, int posonlyargcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, int firstlineno, PyObject *linetable, PyObject *exceptiontable) + Renamed from ``PyCode_New`` as part of :ref:`unstable-c-api`. + The old name is deprecated, but will remain available until the + signature changes again. - Similar to :c:func:`PyCode_New`, but with an extra "posonlyargcount" for positional-only arguments. - The same caveats that apply to ``PyCode_New`` also apply to this function. +.. c:function:: PyCodeObject* PyUnstable_Code_NewWithPosOnlyArgs(int argcount, int posonlyargcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, PyObject *qualname, int firstlineno, PyObject *linetable, PyObject *exceptiontable) - .. versionadded:: 3.8 + Similar to :c:func:`PyUnstable_Code_New`, but with an extra "posonlyargcount" for positional-only arguments. + The same caveats that apply to ``PyUnstable_Code_New`` also apply to this function. + + .. index:: single: PyCode_NewWithPosOnlyArgs + + .. versionadded:: 3.8 as ``PyCode_NewWithPosOnlyArgs`` .. versionchanged:: 3.11 - Added ``exceptiontable`` parameter. + Added ``qualname`` and ``exceptiontable`` parameters. + + .. versionchanged:: 3.12 + + Renamed to ``PyUnstable_Code_NewWithPosOnlyArgs``. + The old name is deprecated, but will remain available until the + signature changes again. .. c:function:: PyCodeObject* PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno) @@ -77,6 +96,8 @@ bound into a function. Returns ``1`` if the function succeeds and 0 otherwise. + .. versionadded:: 3.11 + .. c:function:: PyObject* PyCode_GetCode(PyCodeObject *co) Equivalent to the Python code ``getattr(co, 'co_code')``. @@ -115,3 +136,129 @@ bound into a function. the free variables. On error, ``NULL`` is returned and an exception is raised. .. versionadded:: 3.11 + +.. c:function:: int PyCode_AddWatcher(PyCode_WatchCallback callback) + + Register *callback* as a code object watcher for the current interpreter. + Return an ID which may be passed to :c:func:`PyCode_ClearWatcher`. + In case of error (e.g. no more watcher IDs available), + return ``-1`` and set an exception. + + .. versionadded:: 3.12 + +.. c:function:: int PyCode_ClearWatcher(int watcher_id) + + Clear watcher identified by *watcher_id* previously returned from + :c:func:`PyCode_AddWatcher` for the current interpreter. + Return ``0`` on success, or ``-1`` and set an exception on error + (e.g. if the given *watcher_id* was never registered.) + + .. versionadded:: 3.12 + +.. c:type:: PyCodeEvent + + Enumeration of possible code object watcher events: + - ``PY_CODE_EVENT_CREATE`` + - ``PY_CODE_EVENT_DESTROY`` + + .. versionadded:: 3.12 + +.. c:type:: int (*PyCode_WatchCallback)(PyCodeEvent event, PyCodeObject* co) + + Type of a code object watcher callback function. + + If *event* is ``PY_CODE_EVENT_CREATE``, then the callback is invoked + after `co` has been fully initialized. Otherwise, the callback is invoked + before the destruction of *co* takes place, so the prior state of *co* + can be inspected. + + If *event* is ``PY_CODE_EVENT_DESTROY``, taking a reference in the callback + to the about-to-be-destroyed code object will resurrect it and prevent it + from being freed at this time. When the resurrected object is destroyed + later, any watcher callbacks active at that time will be called again. + + Users of this API should not rely on internal runtime implementation + details. Such details may include, but are not limited to, the exact + order and timing of creation and destruction of code objects. While + changes in these details may result in differences observable by watchers + (including whether a callback is invoked or not), it does not change + the semantics of the Python code being executed. + + If the callback sets an exception, it must return ``-1``; this exception will + be printed as an unraisable exception using :c:func:`PyErr_WriteUnraisable`. + Otherwise it should return ``0``. + + There may already be a pending exception set on entry to the callback. In + this case, the callback should return ``0`` with the same exception still + set. This means the callback may not call any other API that can set an + exception unless it saves and clears the exception state first, and restores + it before returning. + + .. versionadded:: 3.12 + + +Extra information +----------------- + +To support low-level extensions to frame evaluation, such as external +just-in-time compilers, it is possible to attach arbitrary extra data to +code objects. + +These functions are part of the unstable C API tier: +this functionality is a CPython implementation detail, and the API +may change without deprecation warnings. + +.. c:function:: Py_ssize_t PyUnstable_Eval_RequestCodeExtraIndex(freefunc free) + + Return a new an opaque index value used to adding data to code objects. + + You generally call this function once (per interpreter) and use the result + with ``PyCode_GetExtra`` and ``PyCode_SetExtra`` to manipulate + data on individual code objects. + + If *free* is not ``NULL``: when a code object is deallocated, + *free* will be called on non-``NULL`` data stored under the new index. + Use :c:func:`Py_DecRef` when storing :c:type:`PyObject`. + + .. index:: single: _PyEval_RequestCodeExtraIndex + + .. versionadded:: 3.6 as ``_PyEval_RequestCodeExtraIndex`` + + .. versionchanged:: 3.12 + + Renamed to ``PyUnstable_Eval_RequestCodeExtraIndex``. + The old private name is deprecated, but will be available until the API + changes. + +.. c:function:: int PyUnstable_Code_GetExtra(PyObject *code, Py_ssize_t index, void **extra) + + Set *extra* to the extra data stored under the given index. + Return 0 on success. Set an exception and return -1 on failure. + + If no data was set under the index, set *extra* to ``NULL`` and return + 0 without setting an exception. + + .. index:: single: _PyCode_GetExtra + + .. versionadded:: 3.6 as ``_PyCode_GetExtra`` + + .. versionchanged:: 3.12 + + Renamed to ``PyUnstable_Code_GetExtra``. + The old private name is deprecated, but will be available until the API + changes. + +.. c:function:: int PyUnstable_Code_SetExtra(PyObject *code, Py_ssize_t index, void *extra) + + Set the extra data stored under the given index to *extra*. + Return 0 on success. Set an exception and return -1 on failure. + + .. index:: single: _PyCode_SetExtra + + .. versionadded:: 3.6 as ``_PyCode_SetExtra`` + + .. versionchanged:: 3.12 + + Renamed to ``PyUnstable_Code_SetExtra``. + The old private name is deprecated, but will be available until the API + changes. diff --git a/Doc/c-api/codec.rst b/Doc/c-api/codec.rst index 235c77c945cc5be..8ae5c4fecd6248f 100644 --- a/Doc/c-api/codec.rst +++ b/Doc/c-api/codec.rst @@ -7,7 +7,7 @@ Codec registry and support functions Register a new codec search function. - As side effect, this tries to load the :mod:`encodings` package, if not yet + As side effect, this tries to load the :mod:`!encodings` package, if not yet done, to make sure that it is always first in the list of search functions. .. c:function:: int PyCodec_Unregister(PyObject *search_function) diff --git a/Doc/c-api/complex.rst b/Doc/c-api/complex.rst index 9228ce852000230..e3fd001c599c800 100644 --- a/Doc/c-api/complex.rst +++ b/Doc/c-api/complex.rst @@ -5,7 +5,7 @@ Complex Number Objects ---------------------- -.. index:: object: complex number +.. index:: pair: object; complex number Python's complex number objects are implemented as two distinct types when viewed from the C API: one is the Python object exposed to Python programs, and @@ -64,7 +64,7 @@ pointers. This is consistent throughout the API. representation. If *divisor* is null, this method returns zero and sets - :c:data:`errno` to :c:data:`EDOM`. + :c:data:`errno` to :c:macro:`!EDOM`. .. c:function:: Py_complex _Py_c_pow(Py_complex num, Py_complex exp) @@ -73,7 +73,7 @@ pointers. This is consistent throughout the API. representation. If *num* is null and *exp* is not a positive real number, - this method returns zero and sets :c:data:`errno` to :c:data:`EDOM`. + this method returns zero and sets :c:data:`errno` to :c:macro:`!EDOM`. Complex Numbers as Python Objects @@ -127,12 +127,12 @@ Complex Numbers as Python Objects Return the :c:type:`Py_complex` value of the complex number *op*. - If *op* is not a Python complex number object but has a :meth:`__complex__` + If *op* is not a Python complex number object but has a :meth:`~object.__complex__` method, this method will first be called to convert *op* to a Python complex - number object. If ``__complex__()`` is not defined then it falls back to - :meth:`__float__`. If ``__float__()`` is not defined then it falls back - to :meth:`__index__`. Upon failure, this method returns ``-1.0`` as a real + number object. If :meth:`!__complex__` is not defined then it falls back to + :meth:`~object.__float__`. If :meth:`!__float__` is not defined then it falls back + to :meth:`~object.__index__`. Upon failure, this method returns ``-1.0`` as a real value. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. diff --git a/Doc/c-api/concrete.rst b/Doc/c-api/concrete.rst index 8d3124a12fa9d25..880f7b15ce68e82 100644 --- a/Doc/c-api/concrete.rst +++ b/Doc/c-api/concrete.rst @@ -40,7 +40,7 @@ This section describes Python type objects and the singleton object ``None``. Numeric Objects =============== -.. index:: object: numeric +.. index:: pair: object; numeric .. toctree:: @@ -55,7 +55,7 @@ Numeric Objects Sequence Objects ================ -.. index:: object: sequence +.. index:: pair: object; sequence Generic operations on sequence objects were discussed in the previous chapter; this section deals with the specific kinds of sequence objects that are @@ -77,7 +77,7 @@ intrinsic to the Python language. Container Objects ================= -.. index:: object: mapping +.. index:: pair: object; mapping .. toctree:: diff --git a/Doc/c-api/conversion.rst b/Doc/c-api/conversion.rst index fdb321fe7ab3f24..c5350123dfdfdcd 100644 --- a/Doc/c-api/conversion.rst +++ b/Doc/c-api/conversion.rst @@ -119,10 +119,10 @@ The following functions provide locale-independent string to number conversions. .. c:function:: int PyOS_stricmp(const char *s1, const char *s2) Case insensitive comparison of strings. The function works almost - identically to :c:func:`strcmp` except that it ignores the case. + identically to :c:func:`!strcmp` except that it ignores the case. .. c:function:: int PyOS_strnicmp(const char *s1, const char *s2, Py_ssize_t size) Case insensitive comparison of strings. The function works almost - identically to :c:func:`strncmp` except that it ignores the case. + identically to :c:func:`!strncmp` except that it ignores the case. diff --git a/Doc/c-api/datetime.rst b/Doc/c-api/datetime.rst index 72fc07afbf1f4db..97522da773477e8 100644 --- a/Doc/c-api/datetime.rst +++ b/Doc/c-api/datetime.rst @@ -8,11 +8,54 @@ DateTime Objects Various date and time objects are supplied by the :mod:`datetime` module. Before using any of these functions, the header file :file:`datetime.h` must be included in your source (note that this is not included by :file:`Python.h`), -and the macro :c:macro:`PyDateTime_IMPORT` must be invoked, usually as part of +and the macro :c:macro:`!PyDateTime_IMPORT` must be invoked, usually as part of the module initialisation function. The macro puts a pointer to a C structure -into a static variable, :c:data:`PyDateTimeAPI`, that is used by the following +into a static variable, :c:data:`!PyDateTimeAPI`, that is used by the following macros. +.. c:type:: PyDateTime_Date + + This subtype of :c:type:`PyObject` represents a Python date object. + +.. c:type:: PyDateTime_DateTime + + This subtype of :c:type:`PyObject` represents a Python datetime object. + +.. c:type:: PyDateTime_Time + + This subtype of :c:type:`PyObject` represents a Python time object. + +.. c:type:: PyDateTime_Delta + + This subtype of :c:type:`PyObject` represents the difference between two datetime values. + +.. c:var:: PyTypeObject PyDateTime_DateType + + This instance of :c:type:`PyTypeObject` represents the Python date type; + it is the same object as :class:`datetime.date` in the Python layer. + +.. c:var:: PyTypeObject PyDateTime_DateTimeType + + This instance of :c:type:`PyTypeObject` represents the Python datetime type; + it is the same object as :class:`datetime.datetime` in the Python layer. + +.. c:var:: PyTypeObject PyDateTime_TimeType + + This instance of :c:type:`PyTypeObject` represents the Python time type; + it is the same object as :class:`datetime.time` in the Python layer. + +.. c:var:: PyTypeObject PyDateTime_DeltaType + + This instance of :c:type:`PyTypeObject` represents Python type for + the difference between two datetime values; + it is the same object as :class:`datetime.timedelta` in the Python layer. + +.. c:var:: PyTypeObject PyDateTime_TZInfoType + + This instance of :c:type:`PyTypeObject` represents the Python time zone info type; + it is the same object as :class:`datetime.tzinfo` in the Python layer. + + Macro for access to the UTC singleton: .. c:var:: PyObject* PyDateTime_TimeZone_UTC @@ -28,7 +71,7 @@ Type-check macros: .. c:function:: int PyDate_Check(PyObject *ob) Return true if *ob* is of type :c:data:`PyDateTime_DateType` or a subtype of - :c:data:`PyDateTime_DateType`. *ob* must not be ``NULL``. This function always + :c:data:`!PyDateTime_DateType`. *ob* must not be ``NULL``. This function always succeeds. @@ -41,7 +84,7 @@ Type-check macros: .. c:function:: int PyDateTime_Check(PyObject *ob) Return true if *ob* is of type :c:data:`PyDateTime_DateTimeType` or a subtype of - :c:data:`PyDateTime_DateTimeType`. *ob* must not be ``NULL``. This function always + :c:data:`!PyDateTime_DateTimeType`. *ob* must not be ``NULL``. This function always succeeds. @@ -54,7 +97,7 @@ Type-check macros: .. c:function:: int PyTime_Check(PyObject *ob) Return true if *ob* is of type :c:data:`PyDateTime_TimeType` or a subtype of - :c:data:`PyDateTime_TimeType`. *ob* must not be ``NULL``. This function always + :c:data:`!PyDateTime_TimeType`. *ob* must not be ``NULL``. This function always succeeds. @@ -67,7 +110,7 @@ Type-check macros: .. c:function:: int PyDelta_Check(PyObject *ob) Return true if *ob* is of type :c:data:`PyDateTime_DeltaType` or a subtype of - :c:data:`PyDateTime_DeltaType`. *ob* must not be ``NULL``. This function always + :c:data:`!PyDateTime_DeltaType`. *ob* must not be ``NULL``. This function always succeeds. @@ -80,7 +123,7 @@ Type-check macros: .. c:function:: int PyTZInfo_Check(PyObject *ob) Return true if *ob* is of type :c:data:`PyDateTime_TZInfoType` or a subtype of - :c:data:`PyDateTime_TZInfoType`. *ob* must not be ``NULL``. This function always + :c:data:`!PyDateTime_TZInfoType`. *ob* must not be ``NULL``. This function always succeeds. @@ -133,7 +176,7 @@ Macros to create objects: :class:`datetime.timedelta` objects. -.. c:function:: PyObject* PyTimeZone_FromOffset(PyDateTime_DeltaType* offset) +.. c:function:: PyObject* PyTimeZone_FromOffset(PyObject *offset) Return a :class:`datetime.timezone` object with an unnamed fixed offset represented by the *offset* argument. @@ -141,7 +184,7 @@ Macros to create objects: .. versionadded:: 3.7 -.. c:function:: PyObject* PyTimeZone_FromOffsetAndName(PyDateTime_DeltaType* offset, PyUnicode* name) +.. c:function:: PyObject* PyTimeZone_FromOffsetAndName(PyObject *offset, PyObject *name) Return a :class:`datetime.timezone` object with a fixed offset represented by the *offset* argument and with tzname *name*. @@ -150,8 +193,8 @@ Macros to create objects: Macros to extract fields from date objects. The argument must be an instance of -:c:data:`PyDateTime_Date`, including subclasses (such as -:c:data:`PyDateTime_DateTime`). The argument must not be ``NULL``, and the type is +:c:type:`PyDateTime_Date`, including subclasses (such as +:c:type:`PyDateTime_DateTime`). The argument must not be ``NULL``, and the type is not checked: .. c:function:: int PyDateTime_GET_YEAR(PyDateTime_Date *o) @@ -170,7 +213,7 @@ not checked: Macros to extract fields from datetime objects. The argument must be an -instance of :c:data:`PyDateTime_DateTime`, including subclasses. The argument +instance of :c:type:`PyDateTime_DateTime`, including subclasses. The argument must not be ``NULL``, and the type is not checked: .. c:function:: int PyDateTime_DATE_GET_HOUR(PyDateTime_DateTime *o) @@ -208,7 +251,7 @@ must not be ``NULL``, and the type is not checked: Macros to extract fields from time objects. The argument must be an instance of -:c:data:`PyDateTime_Time`, including subclasses. The argument must not be ``NULL``, +:c:type:`PyDateTime_Time`, including subclasses. The argument must not be ``NULL``, and the type is not checked: .. c:function:: int PyDateTime_TIME_GET_HOUR(PyDateTime_Time *o) @@ -246,7 +289,7 @@ and the type is not checked: Macros to extract fields from time delta objects. The argument must be an -instance of :c:data:`PyDateTime_Delta`, including subclasses. The argument must +instance of :c:type:`PyDateTime_Delta`, including subclasses. The argument must not be ``NULL``, and the type is not checked: .. c:function:: int PyDateTime_DELTA_GET_DAYS(PyDateTime_Delta *o) diff --git a/Doc/c-api/dict.rst b/Doc/c-api/dict.rst index e5f28b59a701e00..8471c98d0448721 100644 --- a/Doc/c-api/dict.rst +++ b/Doc/c-api/dict.rst @@ -5,7 +5,7 @@ Dictionary Objects ------------------ -.. index:: object: dictionary +.. index:: pair: object; dictionary .. c:type:: PyDictObject @@ -55,6 +55,15 @@ Dictionary Objects This is equivalent to the Python expression ``key in p``. +.. c:function:: int PyDict_ContainsString(PyObject *p, const char *key) + + This is the same as :c:func:`PyDict_Contains`, but *key* is specified as a + :c:expr:`const char*` UTF-8 encoded bytes string, rather than a + :c:expr:`PyObject*`. + + .. versionadded:: 3.13 + + .. c:function:: PyObject* PyDict_Copy(PyObject *p) Return a new dictionary that contains the same key-value pairs as *p*. @@ -70,17 +79,14 @@ Dictionary Objects .. c:function:: int PyDict_SetItemString(PyObject *p, const char *key, PyObject *val) - .. index:: single: PyUnicode_FromString() - - Insert *val* into the dictionary *p* using *key* as a key. *key* should - be a :c:expr:`const char*`. The key object is created using - ``PyUnicode_FromString(key)``. Return ``0`` on success or ``-1`` on - failure. This function *does not* steal a reference to *val*. + This is the same as :c:func:`PyDict_SetItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. .. c:function:: int PyDict_DelItem(PyObject *p, PyObject *key) - Remove the entry in dictionary *p* with key *key*. *key* must be hashable; + Remove the entry in dictionary *p* with key *key*. *key* must be :term:`hashable`; if it isn't, :exc:`TypeError` is raised. If *key* is not in the dictionary, :exc:`KeyError` is raised. Return ``0`` on success or ``-1`` on failure. @@ -88,19 +94,37 @@ Dictionary Objects .. c:function:: int PyDict_DelItemString(PyObject *p, const char *key) - Remove the entry in dictionary *p* which has a key specified by the string *key*. - If *key* is not in the dictionary, :exc:`KeyError` is raised. - Return ``0`` on success or ``-1`` on failure. + This is the same as :c:func:`PyDict_DelItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + +.. c:function:: int PyDict_GetItemRef(PyObject *p, PyObject *key, PyObject **result) + + Return a new :term:`strong reference` to the object from dictionary *p* + which has a key *key*: + + * If the key is present, set *\*result* to a new :term:`strong reference` + to the value and return ``1``. + * If the key is missing, set *\*result* to ``NULL`` and return ``0``. + * On error, raise an exception and return ``-1``. + + .. versionadded:: 3.13 + + See also the :c:func:`PyObject_GetItem` function. .. c:function:: PyObject* PyDict_GetItem(PyObject *p, PyObject *key) - Return the object from dictionary *p* which has a key *key*. Return ``NULL`` - if the key *key* is not present, but *without* setting an exception. + Return a :term:`borrowed reference` to the object from dictionary *p* which + has a key *key*. Return ``NULL`` if the key *key* is missing *without* + setting an exception. + + .. note:: - Note that exceptions which occur while calling :meth:`__hash__` and - :meth:`__eq__` methods will get suppressed. - To get error reporting use :c:func:`PyDict_GetItemWithError()` instead. + Exceptions that occur while this calls :meth:`~object.__hash__` and + :meth:`~object.__eq__` methods are silently ignored. + Prefer the :c:func:`PyDict_GetItemWithError` function instead. .. versionchanged:: 3.10 Calling this API without :term:`GIL` held had been allowed for historical @@ -118,12 +142,25 @@ Dictionary Objects .. c:function:: PyObject* PyDict_GetItemString(PyObject *p, const char *key) This is the same as :c:func:`PyDict_GetItem`, but *key* is specified as a - :c:expr:`const char*`, rather than a :c:expr:`PyObject*`. + :c:expr:`const char*` UTF-8 encoded bytes string, rather than a + :c:expr:`PyObject*`. + + .. note:: + + Exceptions that occur while this calls :meth:`~object.__hash__` and + :meth:`~object.__eq__` methods or while creating the temporary :class:`str` + object are silently ignored. + Prefer using the :c:func:`PyDict_GetItemWithError` function with your own + :c:func:`PyUnicode_FromString` *key* instead. - Note that exceptions which occur while calling :meth:`__hash__` and - :meth:`__eq__` methods and creating a temporary string object - will get suppressed. - To get error reporting use :c:func:`PyDict_GetItemWithError()` instead. + +.. c:function:: int PyDict_GetItemStringRef(PyObject *p, const char *key, PyObject **result) + + Similar than :c:func:`PyDict_GetItemRef`, but *key* is specified as a + :c:expr:`const char*` UTF-8 encoded bytes string, rather than a + :c:expr:`PyObject*`. + + .. versionadded:: 3.13 .. c:function:: PyObject* PyDict_SetDefault(PyObject *p, PyObject *key, PyObject *defaultobj) @@ -136,6 +173,33 @@ Dictionary Objects .. versionadded:: 3.4 + +.. c:function:: int PyDict_Pop(PyObject *p, PyObject *key, PyObject **result) + + Remove *key* from dictionary *p* and optionally return the removed value. + Do not raise :exc:`KeyError` if the key missing. + + - If the key is present, set *\*result* to a new reference to the removed + value if *result* is not ``NULL``, and return ``1``. + - If the key is missing, set *\*result* to ``NULL`` if *result* is not + ``NULL``, and return ``0``. + - On error, raise an exception and return ``-1``. + + This is similar to :meth:`dict.pop`, but without the default value and + not raising :exc:`KeyError` if the key missing. + + .. versionadded:: 3.13 + + +.. c:function:: int PyDict_PopString(PyObject *p, const char *key, PyObject **result) + + Similar to :c:func:`PyDict_Pop`, but *key* is specified as a + :c:expr:`const char*` UTF-8 encoded bytes string, rather than a + :c:expr:`PyObject*`. + + .. versionadded:: 3.13 + + .. c:function:: PyObject* PyDict_Items(PyObject *p) Return a :c:type:`PyListObject` containing all the items from the dictionary. @@ -154,7 +218,7 @@ Dictionary Objects .. c:function:: Py_ssize_t PyDict_Size(PyObject *p) - .. index:: builtin: len + .. index:: pair: built-in function; len Return the number of items in the dictionary. This is equivalent to ``len(p)`` on a dictionary. @@ -298,13 +362,26 @@ Dictionary Objects dictionary. The callback may inspect but must not modify *dict*; doing so could have - unpredictable effects, including infinite recursion. + unpredictable effects, including infinite recursion. Do not trigger Python + code execution in the callback, as it could modify the dict as a side effect. + + If *event* is ``PyDict_EVENT_DEALLOCATED``, taking a new reference in the + callback to the about-to-be-destroyed dictionary will resurrect it and + prevent it from being freed at this time. When the resurrected object is + destroyed later, any watcher callbacks active at that time will be called + again. Callbacks occur before the notified modification to *dict* takes place, so the prior state of *dict* can be inspected. - If the callback returns with an exception set, it must return ``-1``; this - exception will be printed as an unraisable exception using - :c:func:`PyErr_WriteUnraisable`. Otherwise it should return ``0``. + If the callback sets an exception, it must return ``-1``; this exception will + be printed as an unraisable exception using :c:func:`PyErr_WriteUnraisable`. + Otherwise it should return ``0``. + + There may already be a pending exception set on entry to the callback. In + this case, the callback should return ``0`` with the same exception still + set. This means the callback may not call any other API that can set an + exception unless it saves and clears the exception state first, and restores + it before returning. .. versionadded:: 3.12 diff --git a/Doc/c-api/exceptions.rst b/Doc/c-api/exceptions.rst index 087e0a61d12d59a..a3a63b38c432f2b 100644 --- a/Doc/c-api/exceptions.rst +++ b/Doc/c-api/exceptions.rst @@ -60,9 +60,14 @@ Printing and clearing Call this function **only** when the error indicator is set. Otherwise it will cause a fatal error! - If *set_sys_last_vars* is nonzero, the variables :data:`sys.last_type`, - :data:`sys.last_value` and :data:`sys.last_traceback` will be set to the - type, value and traceback of the printed exception, respectively. + If *set_sys_last_vars* is nonzero, the variable :data:`sys.last_exc` is + set to the printed exception. For backwards compatibility, the + deprecated variables :data:`sys.last_type`, :data:`sys.last_value` and + :data:`sys.last_traceback` are also set to the type, value and traceback + of this exception, respectively. + + .. versionchanged:: 3.12 + The setting of :data:`sys.last_exc` was added. .. c:function:: void PyErr_Print() @@ -78,14 +83,41 @@ Printing and clearing This utility function prints a warning message to ``sys.stderr`` when an exception has been set but it is impossible for the interpreter to actually raise the exception. It is used, for example, when an exception occurs in an - :meth:`__del__` method. + :meth:`~object.__del__` method. The function is called with a single argument *obj* that identifies the context in which the unraisable exception occurred. If possible, the repr of *obj* will be printed in the warning message. + If *obj* is ``NULL``, only the traceback is printed. An exception must be set when calling this function. + .. versionchanged:: 3.4 + Print a traceback. Print only traceback if *obj* is ``NULL``. + + .. versionchanged:: 3.8 + Use :func:`sys.unraisablehook`. + + +.. c:function:: void PyErr_FormatUnraisable(const char *format, ...) + + Similar to :c:func:`PyErr_WriteUnraisable`, but the *format* and subsequent + parameters help format the warning message; they have the same meaning and + values as in :c:func:`PyUnicode_FromFormat`. + ``PyErr_WriteUnraisable(obj)`` is roughtly equivalent to + ``PyErr_FormatUnraisable("Exception ignored in: %R, obj)``. + If *format* is ``NULL``, only the traceback is printed. + + .. versionadded:: 3.13 + + +.. c:function:: void PyErr_DisplayException(PyObject *exc) + + Print the standard traceback display of ``exc`` to ``sys.stderr``, including + chained exceptions and notes. + + .. versionadded:: 3.12 + Raising exceptions ================== @@ -99,7 +131,8 @@ For convenience, some of these functions will always return a This is the most common way to set the error indicator. The first argument specifies the exception type; it is normally one of the standard exceptions, - e.g. :c:data:`PyExc_RuntimeError`. You need not increment its reference count. + e.g. :c:data:`PyExc_RuntimeError`. You need not create a new + :term:`strong reference` to it (e.g. with :c:func:`Py_INCREF`). The second argument is an error message; it is decoded from ``'utf-8'``. @@ -152,9 +185,9 @@ For convenience, some of these functions will always return a This is a convenience function to raise an exception when a C library function has returned an error and set the C variable :c:data:`errno`. It constructs a tuple object whose first item is the integer :c:data:`errno` value and whose - second item is the corresponding error message (gotten from :c:func:`strerror`), + second item is the corresponding error message (gotten from :c:func:`!strerror`), and then calls ``PyErr_SetObject(type, object)``. On Unix, when the - :c:data:`errno` value is :const:`EINTR`, indicating an interrupted system call, + :c:data:`errno` value is :c:macro:`!EINTR`, indicating an interrupted system call, this calls :c:func:`PyErr_CheckSignals`, and if that set the error indicator, leaves it set to that. The function always returns ``NULL``, so a wrapper function around a system call can write ``return PyErr_SetFromErrno(type);`` @@ -166,7 +199,7 @@ For convenience, some of these functions will always return a Similar to :c:func:`PyErr_SetFromErrno`, with the additional behavior that if *filenameObject* is not ``NULL``, it is passed to the constructor of *type* as a third parameter. In the case of :exc:`OSError` exception, - this is used to define the :attr:`filename` attribute of the + this is used to define the :attr:`!filename` attribute of the exception instance. @@ -189,12 +222,12 @@ For convenience, some of these functions will always return a .. c:function:: PyObject* PyErr_SetFromWindowsErr(int ierr) This is a convenience function to raise :exc:`WindowsError`. If called with - *ierr* of ``0``, the error code returned by a call to :c:func:`GetLastError` - is used instead. It calls the Win32 function :c:func:`FormatMessage` to retrieve - the Windows description of error code given by *ierr* or :c:func:`GetLastError`, + *ierr* of ``0``, the error code returned by a call to :c:func:`!GetLastError` + is used instead. It calls the Win32 function :c:func:`!FormatMessage` to retrieve + the Windows description of error code given by *ierr* or :c:func:`!GetLastError`, then it constructs a tuple object whose first item is the *ierr* value and whose second item is the corresponding error message (gotten from - :c:func:`FormatMessage`), and then calls ``PyErr_SetObject(PyExc_WindowsError, + :c:func:`!FormatMessage`), and then calls ``PyErr_SetObject(PyExc_WindowsError, object)``. This function always returns ``NULL``. .. availability:: Windows. @@ -210,17 +243,21 @@ For convenience, some of these functions will always return a .. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, const char *filename) - Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, but the - filename is given as a C string. *filename* is decoded from the filesystem - encoding (:func:`os.fsdecode`). + Similar to :c:func:`PyErr_SetFromWindowsErr`, with the additional behavior + that if *filename* is not ``NULL``, it is decoded from the filesystem + encoding (:func:`os.fsdecode`) and passed to the constructor of + :exc:`OSError` as a third parameter to be used to define the + :attr:`!filename` attribute of the exception instance. .. availability:: Windows. .. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObject(PyObject *type, int ierr, PyObject *filename) - Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, with an - additional parameter specifying the exception type to be raised. + Similar to :c:func:`PyErr_SetExcFromWindowsErr`, with the additional behavior + that if *filename* is not ``NULL``, it is passed to the constructor of + :exc:`OSError` as a third parameter to be used to define the + :attr:`!filename` attribute of the exception instance. .. availability:: Windows. @@ -400,8 +437,48 @@ Querying the error indicator recursively in subtuples) are searched for a match. +.. c:function:: PyObject *PyErr_GetRaisedException(void) + + Return the exception currently being raised, clearing the error indicator at + the same time. + + This function is used by code that needs to catch exceptions, + or code that needs to save and restore the error indicator temporarily. + + For example:: + + { + PyObject *exc = PyErr_GetRaisedException(); + + /* ... code that might produce other errors ... */ + + PyErr_SetRaisedException(exc); + } + + .. seealso:: :c:func:`PyErr_GetHandledException`, + to save the exception currently being handled. + + .. versionadded:: 3.12 + + +.. c:function:: void PyErr_SetRaisedException(PyObject *exc) + + Set *exc* as the exception currently being raised, + clearing the existing exception if one is set. + + .. warning:: + + This call steals a reference to *exc*, which must be a valid exception. + + .. versionadded:: 3.12 + + .. c:function:: void PyErr_Fetch(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback) + .. deprecated:: 3.12 + + Use :c:func:`PyErr_GetRaisedException` instead. + Retrieve the error indicator into three variables whose addresses are passed. If the error indicator is not set, set all three variables to ``NULL``. If it is set, it will be cleared and you own a reference to each object retrieved. The @@ -409,8 +486,10 @@ Querying the error indicator .. note:: - This function is normally only used by code that needs to catch exceptions or - by code that needs to save and restore the error indicator temporarily, e.g.:: + This function is normally only used by legacy code that needs to catch + exceptions or save and restore the error indicator temporarily. + + For example:: { PyObject *type, *value, *traceback; @@ -424,8 +503,14 @@ Querying the error indicator .. c:function:: void PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback) - Set the error indicator from the three objects. If the error indicator is - already set, it is cleared first. If the objects are ``NULL``, the error + .. deprecated:: 3.12 + + Use :c:func:`PyErr_SetRaisedException` instead. + + Set the error indicator from the three objects, + *type*, *value*, and *traceback*, + clearing the existing exception if one is set. + If the objects are ``NULL``, the error indicator is cleared. Do not pass a ``NULL`` type and non-``NULL`` value or traceback. The exception type should be a class. Do not pass an invalid exception type or value. (Violating these rules will cause subtle problems @@ -436,13 +521,18 @@ Querying the error indicator .. note:: - This function is normally only used by code that needs to save and restore the - error indicator temporarily. Use :c:func:`PyErr_Fetch` to save the current - error indicator. + This function is normally only used by legacy code that needs to + save and restore the error indicator temporarily. + Use :c:func:`PyErr_Fetch` to save the current error indicator. .. c:function:: void PyErr_NormalizeException(PyObject **exc, PyObject **val, PyObject **tb) + .. deprecated:: 3.12 + + Use :c:func:`PyErr_GetRaisedException` instead, + to avoid any possible de-normalization. + Under certain circumstances, the values returned by :c:func:`PyErr_Fetch` below can be "unnormalized", meaning that ``*exc`` is a class object but ``*val`` is not an instance of the same class. This function can be used to instantiate @@ -543,7 +633,7 @@ Signal Handling .. c:function:: int PyErr_CheckSignals() .. index:: - module: signal + pair: module; signal single: SIGINT single: KeyboardInterrupt (built-in exception) @@ -567,18 +657,18 @@ Signal Handling be interruptible by user requests (such as by pressing Ctrl-C). .. note:: - The default Python signal handler for :const:`SIGINT` raises the + The default Python signal handler for :c:macro:`!SIGINT` raises the :exc:`KeyboardInterrupt` exception. .. c:function:: void PyErr_SetInterrupt() .. index:: - module: signal + pair: module; signal single: SIGINT single: KeyboardInterrupt (built-in exception) - Simulate the effect of a :const:`SIGINT` signal arriving. + Simulate the effect of a :c:macro:`!SIGINT` signal arriving. This is equivalent to ``PyErr_SetInterruptEx(SIGINT)``. .. note:: @@ -589,7 +679,7 @@ Signal Handling .. c:function:: int PyErr_SetInterruptEx(int signum) .. index:: - module: signal + pair: module; signal single: KeyboardInterrupt (built-in exception) Simulate the effect of a signal arriving. The next time @@ -602,7 +692,7 @@ Signal Handling to interrupt an operation). If the given signal isn't handled by Python (it was set to - :data:`signal.SIG_DFL` or :data:`signal.SIG_IGN`), it will be ignored. + :py:const:`signal.SIG_DFL` or :py:const:`signal.SIG_IGN`), it will be ignored. If *signum* is outside of the allowed range of signal numbers, ``-1`` is returned. Otherwise, ``0`` is returned. The error indicator is @@ -690,7 +780,7 @@ Exception Objects .. c:function:: PyObject* PyException_GetCause(PyObject *ex) - Return the cause (either an exception instance, or :const:`None`, + Return the cause (either an exception instance, or ``None``, set by ``raise ... from ...``) associated with the exception as a new reference, as accessible from Python through :attr:`__cause__`. @@ -699,11 +789,33 @@ Exception Objects Set the cause associated with the exception to *cause*. Use ``NULL`` to clear it. There is no type check to make sure that *cause* is either an exception - instance or :const:`None`. This steals a reference to *cause*. + instance or ``None``. This steals a reference to *cause*. :attr:`__suppress_context__` is implicitly set to ``True`` by this function. +.. c:function:: PyObject* PyException_GetArgs(PyObject *ex) + + Return :attr:`~BaseException.args` of exception *ex*. + + +.. c:function:: void PyException_SetArgs(PyObject *ex, PyObject *args) + + Set :attr:`~BaseException.args` of exception *ex* to *args*. + +.. c:function:: PyObject* PyUnstable_Exc_PrepReraiseStar(PyObject *orig, PyObject *excs) + + Implement part of the interpreter's implementation of :keyword:`!except*`. + *orig* is the original exception that was caught, and *excs* is the list of + the exceptions that need to be raised. This list contains the unhandled + part of *orig*, if any, as well as the exceptions that were raised from the + :keyword:`!except*` clauses (so they have a different traceback from *orig*) and + those that were reraised (and have the same traceback as *orig*). + Return the :exc:`ExceptionGroup` that needs to be reraised in the end, or + ``None`` if there is nothing to reraise. + + .. versionadded:: 3.12 + .. _unicodeexceptions: Unicode Exception Objects @@ -788,7 +900,7 @@ because the :ref:`call protocol ` takes care of recursion handling. Marks a point where a recursive C-level call is about to be performed. - If :const:`USE_STACKCHECK` is defined, this function checks if the OS + If :c:macro:`USE_STACKCHECK` is defined, this function checks if the OS stack overflowed using :c:func:`PyOS_CheckStack`. In this is the case, it sets a :exc:`MemoryError` and returns a nonzero value. @@ -801,7 +913,7 @@ because the :ref:`call protocol ` takes care of recursion handling. depth limit. .. versionchanged:: 3.9 - This function is now also available in the limited API. + This function is now also available in the :ref:`limited API `. .. c:function:: void Py_LeaveRecursiveCall(void) @@ -809,7 +921,7 @@ because the :ref:`call protocol ` takes care of recursion handling. *successful* invocation of :c:func:`Py_EnterRecursiveCall`. .. versionchanged:: 3.9 - This function is now also available in the limited API. + This function is now also available in the :ref:`limited API `. Properly implementing :c:member:`~PyTypeObject.tp_repr` for container types requires special recursion handling. In addition to protecting the stack, diff --git a/Doc/c-api/file.rst b/Doc/c-api/file.rst index 58ed58e54668599..b36c800e00444ac 100644 --- a/Doc/c-api/file.rst +++ b/Doc/c-api/file.rst @@ -5,7 +5,7 @@ File Objects ------------ -.. index:: object: file +.. index:: pair: object; file These APIs are a minimal emulation of the Python 2 C API for built-in file objects, which used to rely on the buffered I/O (:c:expr:`FILE*`) support @@ -93,7 +93,7 @@ the :mod:`io` APIs instead. .. index:: single: Py_PRINT_RAW Write object *obj* to file object *p*. The only supported flag for *flags* is - :const:`Py_PRINT_RAW`; if given, the :func:`str` of the object is written + :c:macro:`Py_PRINT_RAW`; if given, the :func:`str` of the object is written instead of the :func:`repr`. Return ``0`` on success or ``-1`` on failure; the appropriate exception will be set. diff --git a/Doc/c-api/float.rst b/Doc/c-api/float.rst index 023b12c20b7c83f..4f6ac0d8175c6b2 100644 --- a/Doc/c-api/float.rst +++ b/Doc/c-api/float.rst @@ -3,9 +3,9 @@ .. _floatobjects: Floating Point Objects ----------------------- +====================== -.. index:: object: floating point +.. index:: pair: object; floating point .. c:type:: PyFloatObject @@ -45,14 +45,14 @@ Floating Point Objects .. c:function:: double PyFloat_AsDouble(PyObject *pyfloat) Return a C :c:expr:`double` representation of the contents of *pyfloat*. If - *pyfloat* is not a Python floating point object but has a :meth:`__float__` + *pyfloat* is not a Python floating point object but has a :meth:`~object.__float__` method, this method will first be called to convert *pyfloat* into a float. - If ``__float__()`` is not defined then it falls back to :meth:`__index__`. + If :meth:`!__float__` is not defined then it falls back to :meth:`~object.__index__`. This method returns ``-1.0`` upon failure, so one should call :c:func:`PyErr_Occurred` to check for errors. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. c:function:: double PyFloat_AS_DOUBLE(PyObject *pyfloat) @@ -79,7 +79,7 @@ Floating Point Objects Pack and Unpack functions -========================= +------------------------- The pack and unpack functions provide an efficient platform-independent way to store floating-point values as byte strings. The Pack routines produce a bytes @@ -104,12 +104,12 @@ happens in such cases is partly accidental (alas). .. versionadded:: 3.11 Pack functions --------------- +^^^^^^^^^^^^^^ The pack routines write 2, 4 or 8 bytes, starting at *p*. *le* is an :c:expr:`int` argument, non-zero if you want the bytes string in little-endian format (exponent last, at ``p+1``, ``p+3``, or ``p+6`` ``p+7``), zero if you -want big-endian format (exponent first, at *p*). The :c:data:`PY_BIG_ENDIAN` +want big-endian format (exponent first, at *p*). The :c:macro:`PY_BIG_ENDIAN` constant can be used to use the native endian: it is equal to ``1`` on big endian processor, or ``0`` on little endian processor. @@ -135,12 +135,12 @@ There are two problems on non-IEEE platforms: Unpack functions ----------------- +^^^^^^^^^^^^^^^^ The unpack routines read 2, 4 or 8 bytes, starting at *p*. *le* is an :c:expr:`int` argument, non-zero if the bytes string is in little-endian format (exponent last, at ``p+1``, ``p+3`` or ``p+6`` and ``p+7``), zero if big-endian -(exponent first, at *p*). The :c:data:`PY_BIG_ENDIAN` constant can be used to +(exponent first, at *p*). The :c:macro:`PY_BIG_ENDIAN` constant can be used to use the native endian: it is equal to ``1`` on big endian processor, or ``0`` on little endian processor. diff --git a/Doc/c-api/frame.rst b/Doc/c-api/frame.rst index 46ce700abf14748..1accee2767a4852 100644 --- a/Doc/c-api/frame.rst +++ b/Doc/c-api/frame.rst @@ -19,6 +19,24 @@ can be used to get a frame object. See also :ref:`Reflection `. +.. c:var:: PyTypeObject PyFrame_Type + + The type of frame objects. + It is the same object as :py:class:`types.FrameType` in the Python layer. + + .. versionchanged:: 3.11 + + Previously, this type was only available after including + ````. + +.. c:function:: int PyFrame_Check(PyObject *obj) + + Return non-zero if *obj* is a frame object. + + .. versionchanged:: 3.11 + + Previously, this function was only available after including + ````. .. c:function:: PyFrameObject* PyFrame_GetBack(PyFrameObject *frame) @@ -79,6 +97,27 @@ See also :ref:`Reflection `. .. versionadded:: 3.11 +.. c:function:: PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name) + + Get the variable *name* of *frame*. + + * Return a :term:`strong reference` to the variable value on success. + * Raise :exc:`NameError` and return ``NULL`` if the variable does not exist. + * Raise an exception and return ``NULL`` on error. + + *name* type must be a :class:`str`. + + .. versionadded:: 3.12 + + +.. c:function:: PyObject* PyFrame_GetVarString(PyFrameObject *frame, const char *name) + + Similar to :c:func:`PyFrame_GetVar`, but the variable name is a C string + encoded in UTF-8. + + .. versionadded:: 3.12 + + .. c:function:: PyObject* PyFrame_GetLocals(PyFrameObject *frame) Get the *frame*'s ``f_locals`` attribute (:class:`dict`). @@ -91,3 +130,38 @@ See also :ref:`Reflection `. .. c:function:: int PyFrame_GetLineNumber(PyFrameObject *frame) Return the line number that *frame* is currently executing. + + + +Internal Frames +^^^^^^^^^^^^^^^ + +Unless using :pep:`523`, you will not need this. + +.. c:struct:: _PyInterpreterFrame + + The interpreter's internal frame representation. + + .. versionadded:: 3.11 + +.. c:function:: PyObject* PyUnstable_InterpreterFrame_GetCode(struct _PyInterpreterFrame *frame); + + Return a :term:`strong reference` to the code object for the frame. + + .. versionadded:: 3.12 + + +.. c:function:: int PyUnstable_InterpreterFrame_GetLasti(struct _PyInterpreterFrame *frame); + + Return the byte offset into the last executed instruction. + + .. versionadded:: 3.12 + + +.. c:function:: int PyUnstable_InterpreterFrame_GetLine(struct _PyInterpreterFrame *frame); + + Return the currently executing line number, or -1 if there is no line number. + + .. versionadded:: 3.12 + + diff --git a/Doc/c-api/function.rst b/Doc/c-api/function.rst index df88e85e518829e..5857dba82c11c64 100644 --- a/Doc/c-api/function.rst +++ b/Doc/c-api/function.rst @@ -5,7 +5,7 @@ Function Objects ---------------- -.. index:: object: function +.. index:: pair: object; function There are a few functions specific to Python functions. @@ -118,3 +118,74 @@ There are a few functions specific to Python functions. must be a dictionary or ``Py_None``. Raises :exc:`SystemError` and returns ``-1`` on failure. + + +.. c:function:: int PyFunction_AddWatcher(PyFunction_WatchCallback callback) + + Register *callback* as a function watcher for the current interpreter. + Return an ID which may be passed to :c:func:`PyFunction_ClearWatcher`. + In case of error (e.g. no more watcher IDs available), + return ``-1`` and set an exception. + + .. versionadded:: 3.12 + + +.. c:function:: int PyFunction_ClearWatcher(int watcher_id) + + Clear watcher identified by *watcher_id* previously returned from + :c:func:`PyFunction_AddWatcher` for the current interpreter. + Return ``0`` on success, or ``-1`` and set an exception on error + (e.g. if the given *watcher_id* was never registered.) + + .. versionadded:: 3.12 + + +.. c:type:: PyFunction_WatchEvent + + Enumeration of possible function watcher events: + - ``PyFunction_EVENT_CREATE`` + - ``PyFunction_EVENT_DESTROY`` + - ``PyFunction_EVENT_MODIFY_CODE`` + - ``PyFunction_EVENT_MODIFY_DEFAULTS`` + - ``PyFunction_EVENT_MODIFY_KWDEFAULTS`` + + .. versionadded:: 3.12 + + +.. c:type:: int (*PyFunction_WatchCallback)(PyFunction_WatchEvent event, PyFunctionObject *func, PyObject *new_value) + + Type of a function watcher callback function. + + If *event* is ``PyFunction_EVENT_CREATE`` or ``PyFunction_EVENT_DESTROY`` + then *new_value* will be ``NULL``. Otherwise, *new_value* will hold a + :term:`borrowed reference` to the new value that is about to be stored in + *func* for the attribute that is being modified. + + The callback may inspect but must not modify *func*; doing so could have + unpredictable effects, including infinite recursion. + + If *event* is ``PyFunction_EVENT_CREATE``, then the callback is invoked + after `func` has been fully initialized. Otherwise, the callback is invoked + before the modification to *func* takes place, so the prior state of *func* + can be inspected. The runtime is permitted to optimize away the creation of + function objects when possible. In such cases no event will be emitted. + Although this creates the possibility of an observable difference of + runtime behavior depending on optimization decisions, it does not change + the semantics of the Python code being executed. + + If *event* is ``PyFunction_EVENT_DESTROY``, Taking a reference in the + callback to the about-to-be-destroyed function will resurrect it, preventing + it from being freed at this time. When the resurrected object is destroyed + later, any watcher callbacks active at that time will be called again. + + If the callback sets an exception, it must return ``-1``; this exception will + be printed as an unraisable exception using :c:func:`PyErr_WriteUnraisable`. + Otherwise it should return ``0``. + + There may already be a pending exception set on entry to the callback. In + this case, the callback should return ``0`` with the same exception still + set. This means the callback may not call any other API that can set an + exception unless it saves and clears the exception state first, and restores + it before returning. + + .. versionadded:: 3.12 diff --git a/Doc/c-api/gcsupport.rst b/Doc/c-api/gcsupport.rst index 8c90d1e8991c100..6b2494ee4f0ed4c 100644 --- a/Doc/c-api/gcsupport.rst +++ b/Doc/c-api/gcsupport.rst @@ -13,22 +13,20 @@ or strings), do not need to provide any explicit support for garbage collection. To create a container type, the :c:member:`~PyTypeObject.tp_flags` field of the type object must -include the :const:`Py_TPFLAGS_HAVE_GC` and provide an implementation of the +include the :c:macro:`Py_TPFLAGS_HAVE_GC` and provide an implementation of the :c:member:`~PyTypeObject.tp_traverse` handler. If instances of the type are mutable, a :c:member:`~PyTypeObject.tp_clear` implementation must also be provided. -.. data:: Py_TPFLAGS_HAVE_GC - :noindex: - +:c:macro:`Py_TPFLAGS_HAVE_GC` Objects with a type with this flag set must conform with the rules documented here. For convenience these objects will be referred to as container objects. Constructors for container types must conform to two rules: -#. The memory for the object must be allocated using :c:func:`PyObject_GC_New` - or :c:func:`PyObject_GC_NewVar`. +#. The memory for the object must be allocated using :c:macro:`PyObject_GC_New` + or :c:macro:`PyObject_GC_NewVar`. #. Once all the fields which may contain references to other containers are initialized, it must call :c:func:`PyObject_GC_Track`. @@ -52,23 +50,42 @@ rules: :c:member:`~PyTypeObject.tp_flags`, :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` fields if the type inherits from a class that implements the garbage collector protocol and the child class - does *not* include the :const:`Py_TPFLAGS_HAVE_GC` flag. + does *not* include the :c:macro:`Py_TPFLAGS_HAVE_GC` flag. + +.. c:macro:: PyObject_GC_New(TYPE, typeobj) + + Analogous to :c:macro:`PyObject_New` but for container objects with the + :c:macro:`Py_TPFLAGS_HAVE_GC` flag set. + +.. c:macro:: PyObject_GC_NewVar(TYPE, typeobj, size) + + Analogous to :c:macro:`PyObject_NewVar` but for container objects with the + :c:macro:`Py_TPFLAGS_HAVE_GC` flag set. -.. c:function:: TYPE* PyObject_GC_New(TYPE, PyTypeObject *type) +.. c:function:: PyObject* PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *type, size_t extra_size) - Analogous to :c:func:`PyObject_New` but for container objects with the - :const:`Py_TPFLAGS_HAVE_GC` flag set. + Analogous to :c:macro:`PyObject_GC_New` but allocates *extra_size* + bytes at the end of the object (at offset + :c:member:`~PyTypeObject.tp_basicsize`). + The allocated memory is initialized to zeros, + except for the :c:type:`Python object header `. + The extra data will be deallocated with the object, but otherwise it is + not managed by Python. -.. c:function:: TYPE* PyObject_GC_NewVar(TYPE, PyTypeObject *type, Py_ssize_t size) + .. warning:: + The function is marked as unstable because the final mechanism + for reserving extra data after an instance is not yet decided. + For allocating a variable number of fields, prefer using + :c:type:`PyVarObject` and :c:member:`~PyTypeObject.tp_itemsize` + instead. - Analogous to :c:func:`PyObject_NewVar` but for container objects with the - :const:`Py_TPFLAGS_HAVE_GC` flag set. + .. versionadded:: 3.12 .. c:function:: TYPE* PyObject_GC_Resize(TYPE, PyVarObject *op, Py_ssize_t newsize) - Resize an object allocated by :c:func:`PyObject_NewVar`. Returns the + Resize an object allocated by :c:macro:`PyObject_NewVar`. Returns the resized object or ``NULL`` on failure. *op* must not be tracked by the collector yet. @@ -111,8 +128,8 @@ rules: .. c:function:: void PyObject_GC_Del(void *op) - Releases memory allocated to an object using :c:func:`PyObject_GC_New` or - :c:func:`PyObject_GC_NewVar`. + Releases memory allocated to an object using :c:macro:`PyObject_GC_New` or + :c:macro:`PyObject_GC_NewVar`. .. c:function:: void PyObject_GC_UnTrack(void *op) @@ -126,7 +143,7 @@ rules: .. versionchanged:: 3.8 - The :c:func:`_PyObject_GC_TRACK` and :c:func:`_PyObject_GC_UNTRACK` macros + The :c:func:`!_PyObject_GC_TRACK` and :c:func:`!_PyObject_GC_UNTRACK` macros have been removed from the public C API. The :c:member:`~PyTypeObject.tp_traverse` handler accepts a function parameter of this type: @@ -228,3 +245,36 @@ garbage collection runs. Returns the current state, 0 for disabled and 1 for enabled. .. versionadded:: 3.10 + + +Querying Garbage Collector State +-------------------------------- + +The C-API provides the following interface for querying information about +the garbage collector. + +.. c:function:: void PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg) + + Run supplied *callback* on all live GC-capable objects. *arg* is passed through to + all invocations of *callback*. + + .. warning:: + If new objects are (de)allocated by the callback it is undefined if they + will be visited. + + Garbage collection is disabled during operation. Explicitly running a collection + in the callback may lead to undefined behaviour e.g. visiting the same objects + multiple times or not at all. + + .. versionadded:: 3.12 + +.. c:type:: int (*gcvisitobjects_t)(PyObject *object, void *arg) + + Type of the visitor function to be passed to :c:func:`PyUnstable_GC_VisitObjects`. + *arg* is the same as the *arg* passed to ``PyUnstable_GC_VisitObjects``. + Return ``0`` to continue iteration, return ``1`` to stop iteration. Other return + values are reserved for now so behavior on returning anything else is undefined. + + .. versionadded:: 3.12 + + diff --git a/Doc/c-api/hash.rst b/Doc/c-api/hash.rst new file mode 100644 index 000000000000000..4dc121d7fbaa9b4 --- /dev/null +++ b/Doc/c-api/hash.rst @@ -0,0 +1,48 @@ +.. highlight:: c + +PyHash API +---------- + +See also the :c:member:`PyTypeObject.tp_hash` member. + +.. c:type:: Py_hash_t + + Hash value type: signed integer. + + .. versionadded:: 3.2 + +.. c:type:: Py_uhash_t + + Hash value type: unsigned integer. + + .. versionadded:: 3.2 + + +.. c:type:: PyHash_FuncDef + + Hash function definition used by :c:func:`PyHash_GetFuncDef`. + + .. c::member:: Py_hash_t (*const hash)(const void *, Py_ssize_t) + + Hash function. + + .. c:member:: const char *name + + Hash function name (UTF-8 encoded string). + + .. c:member:: const int hash_bits + + Internal size of the hash value in bits. + + .. c:member:: const int seed_bits + + Size of seed input in bits. + + .. versionadded:: 3.4 + + +.. c:function:: PyHash_FuncDef* PyHash_GetFuncDef(void) + + Get the hash function definition. + + .. versionadded:: 3.4 diff --git a/Doc/c-api/import.rst b/Doc/c-api/import.rst index a51619db6d3d97c..137780cc359cf9d 100644 --- a/Doc/c-api/import.rst +++ b/Doc/c-api/import.rst @@ -38,10 +38,13 @@ Importing Modules to per-module locks for most purposes, so this function's special behaviour isn't needed anymore. + .. deprecated-removed:: 3.13 3.15 + Use :c:func:`PyImport_ImportModule` instead. + .. c:function:: PyObject* PyImport_ImportModuleEx(const char *name, PyObject *globals, PyObject *locals, PyObject *fromlist) - .. index:: builtin: __import__ + .. index:: pair: built-in function; __import__ Import a module. This is best described by referring to the built-in Python function :func:`__import__`. @@ -95,50 +98,63 @@ Importing Modules an exception set on failure (the module still exists in this case). -.. c:function:: PyObject* PyImport_AddModuleObject(PyObject *name) +.. c:function:: PyObject* PyImport_AddModuleRef(const char *name) + + Return the module object corresponding to a module name. + + The *name* argument may be of the form ``package.module``. First check the + modules dictionary if there's one there, and if not, create a new one and + insert it in the modules dictionary. + + Return a :term:`strong reference` to the module on success. Return ``NULL`` + with an exception set on failure. + + The module name *name* is decoded from UTF-8. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + :c:func:`PyImport_ImportModule` or one of its variants to import a module. + Package structures implied by a dotted name for *name* are not created if + not already present. - Return the module object corresponding to a module name. The *name* argument - may be of the form ``package.module``. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return ``NULL`` with an exception set on failure. + .. versionadded:: 3.13 - .. note:: - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use :c:func:`PyImport_ImportModule` - or one of its variants to import a module. Package structures implied by a - dotted name for *name* are not created if not already present. +.. c:function:: PyObject* PyImport_AddModuleObject(PyObject *name) + + Similar to :c:func:`PyImport_AddModuleRef`, but return a :term:`borrowed + reference` and *name* is a Python :class:`str` object. .. versionadded:: 3.3 .. c:function:: PyObject* PyImport_AddModule(const char *name) - Similar to :c:func:`PyImport_AddModuleObject`, but the name is a UTF-8 - encoded string instead of a Unicode object. + Similar to :c:func:`PyImport_AddModuleRef`, but return a :term:`borrowed + reference`. .. c:function:: PyObject* PyImport_ExecCodeModule(const char *name, PyObject *co) - .. index:: builtin: compile + .. index:: pair: built-in function; compile Given a module name (possibly of the form ``package.module``) and a code object read from a Python bytecode file or obtained from the built-in function :func:`compile`, load the module. Return a new reference to the module object, or ``NULL`` with an exception set if an error occurred. *name* - is removed from :attr:`sys.modules` in error cases, even if *name* was already - in :attr:`sys.modules` on entry to :c:func:`PyImport_ExecCodeModule`. Leaving - incompletely initialized modules in :attr:`sys.modules` is dangerous, as imports of + is removed from :data:`sys.modules` in error cases, even if *name* was already + in :data:`sys.modules` on entry to :c:func:`PyImport_ExecCodeModule`. Leaving + incompletely initialized modules in :data:`sys.modules` is dangerous, as imports of such modules have no way to know that the module object is an unknown (and probably damaged with respect to the module author's intents) state. The module's :attr:`__spec__` and :attr:`__loader__` will be set, if not set already, with the appropriate values. The spec's loader will be set to the module's ``__loader__`` (if set) and to an instance of - :class:`SourceFileLoader` otherwise. + :class:`~importlib.machinery.SourceFileLoader` otherwise. The module's :attr:`__file__` attribute will be set to the code object's - :c:member:`co_filename`. If applicable, :attr:`__cached__` will also + :attr:`!co_filename`. If applicable, :attr:`__cached__` will also be set. This function will reload the module if it was already imported. See @@ -186,8 +202,10 @@ Importing Modules .. versionadded:: 3.2 .. versionchanged:: 3.3 - Uses :func:`imp.source_from_cache()` in calculating the source path if + Uses :func:`!imp.source_from_cache()` in calculating the source path if only the bytecode path is provided. + .. versionchanged:: 3.12 + No longer uses the removed :mod:`!imp` module. .. c:function:: long PyImport_GetMagicNumber() @@ -223,7 +241,7 @@ Importing Modules .. c:function:: PyObject* PyImport_GetImporter(PyObject *path) - Return a finder object for a :data:`sys.path`/:attr:`pkg.__path__` item + Return a finder object for a :data:`sys.path`/:attr:`!pkg.__path__` item *path*, possibly by fetching it from the :data:`sys.path_importer_cache` dict. If it wasn't yet cached, traverse :data:`sys.path_hooks` until a hook is found that can handle the path item. Return ``None`` if no hook could; @@ -292,23 +310,25 @@ Importing Modules .. c:struct:: _inittab - Structure describing a single entry in the list of built-in modules. Each of - these structures gives the name and initialization function for a module built - into the interpreter. The name is an ASCII encoded string. Programs which + Structure describing a single entry in the list of built-in modules. + Programs which embed Python may use an array of these structures in conjunction with :c:func:`PyImport_ExtendInittab` to provide additional built-in modules. - The structure is defined in :file:`Include/import.h` as:: + The structure consists of two members: - struct _inittab { - const char *name; /* ASCII encoded string */ - PyObject* (*initfunc)(void); - }; + .. c:member:: const char *name + + The module name, as an ASCII encoded string. + + .. c: member:: PyObject* (*initfunc)(void) + + Initialization function for a module built into the interpreter. .. c:function:: int PyImport_ExtendInittab(struct _inittab *newtab) Add a collection of modules to the table of built-in modules. The *newtab* - array must end with a sentinel entry which contains ``NULL`` for the :attr:`name` + array must end with a sentinel entry which contains ``NULL`` for the :c:member:`~_inittab.name` field; failure to provide the sentinel value can result in a memory fault. Returns ``0`` on success or ``-1`` if insufficient memory could be allocated to extend the internal table. In the event of failure, no modules are added to the diff --git a/Doc/c-api/init.rst b/Doc/c-api/init.rst index afb17719a77ab25..e89641f74c7491e 100644 --- a/Doc/c-api/init.rst +++ b/Doc/c-api/init.rst @@ -25,16 +25,10 @@ The following functions can be safely called before Python is initialized: * :c:func:`PyImport_AppendInittab` * :c:func:`PyImport_ExtendInittab` - * :c:func:`PyInitFrozenExtensions` + * :c:func:`!PyInitFrozenExtensions` * :c:func:`PyMem_SetAllocator` * :c:func:`PyMem_SetupDebugHooks` * :c:func:`PyObject_SetArenaAllocator` - * :c:func:`Py_SetPath` - * :c:func:`Py_SetProgramName` - * :c:func:`Py_SetPythonHome` - * :c:func:`Py_SetStandardStreamEncoding` - * :c:func:`PySys_AddWarnOption` - * :c:func:`PySys_AddXOption` * :c:func:`PySys_ResetWarnOptions` * Informative functions: @@ -65,7 +59,7 @@ The following functions can be safely called before Python is initialized: :c:func:`Py_Initialize`: :c:func:`Py_EncodeLocale`, :c:func:`Py_GetPath`, :c:func:`Py_GetPrefix`, :c:func:`Py_GetExecPrefix`, :c:func:`Py_GetProgramFullPath`, :c:func:`Py_GetPythonHome`, - :c:func:`Py_GetProgramName` and :c:func:`PyEval_InitThreads`. + and :c:func:`Py_GetProgramName`. .. _global-conf-vars: @@ -93,7 +87,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-b` option. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_DebugFlag @@ -107,7 +101,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-d` option and the :envvar:`PYTHONDEBUG` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_DontWriteBytecodeFlag @@ -121,7 +115,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-B` option and the :envvar:`PYTHONDONTWRITEBYTECODE` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_FrozenFlag @@ -134,7 +128,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Private flag used by ``_freeze_module`` and ``frozenmain`` programs. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_HashRandomizationFlag @@ -149,7 +143,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. If the flag is non-zero, read the :envvar:`PYTHONHASHSEED` environment variable to initialize the secret hash seed. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_IgnoreEnvironmentFlag @@ -157,12 +151,12 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. :c:member:`PyConfig.use_environment` should be used instead, see :ref:`Python Initialization Configuration `. - Ignore all :envvar:`PYTHON*` environment variables, e.g. + Ignore all :envvar:`!PYTHON*` environment variables, e.g. :envvar:`PYTHONPATH` and :envvar:`PYTHONHOME`, that might be set. Set by the :option:`-E` and :option:`-I` options. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_InspectFlag @@ -177,7 +171,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-i` option and the :envvar:`PYTHONINSPECT` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_InteractiveFlag @@ -202,7 +196,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. .. versionadded:: 3.4 - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_LegacyWindowsFSEncodingFlag @@ -221,7 +215,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. .. availability:: Windows. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_LegacyWindowsStdioFlag @@ -230,7 +224,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. :ref:`Python Initialization Configuration `. If the flag is non-zero, use :class:`io.FileIO` instead of - :class:`WindowsConsoleIO` for :mod:`sys` standard streams. + :class:`!io._WindowsConsoleIO` for :mod:`sys` standard streams. Set to ``1`` if the :envvar:`PYTHONLEGACYWINDOWSSTDIO` environment variable is set to a non-empty string. @@ -239,7 +233,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. .. availability:: Windows. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_NoSiteFlag @@ -254,7 +248,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-S` option. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_NoUserSiteDirectory @@ -268,7 +262,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-s` and :option:`-I` options, and the :envvar:`PYTHONNOUSERSITE` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_OptimizeFlag @@ -279,7 +273,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-O` option and the :envvar:`PYTHONOPTIMIZE` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_QuietFlag @@ -293,7 +287,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. .. versionadded:: 3.2 - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_UnbufferedStdioFlag @@ -306,7 +300,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-u` option and the :envvar:`PYTHONUNBUFFERED` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 .. c:var:: int Py_VerboseFlag @@ -322,7 +316,7 @@ to 1 and ``-bb`` sets :c:data:`Py_BytesWarningFlag` to 2. Set by the :option:`-v` option and the :envvar:`PYTHONVERBOSE` environment variable. - .. deprecated:: 3.12 + .. deprecated-removed:: 3.12 3.14 Initializing and finalizing the interpreter @@ -332,16 +326,12 @@ Initializing and finalizing the interpreter .. c:function:: void Py_Initialize() .. index:: - single: Py_SetProgramName() - single: PyEval_InitThreads() single: modules (in module sys) single: path (in module sys) - module: builtins - module: __main__ - module: sys + pair: module; builtins + pair: module; __main__ + pair: module; sys triple: module; search; path - single: PySys_SetArgv() - single: PySys_SetArgvEx() single: Py_FinalizeEx() Initialize the Python interpreter. In an application embedding Python, @@ -352,7 +342,9 @@ Initializing and finalizing the interpreter the table of loaded modules (``sys.modules``), and creates the fundamental modules :mod:`builtins`, :mod:`__main__` and :mod:`sys`. It also initializes the module search path (``sys.path``). It does not set ``sys.argv``; use - :c:func:`PySys_SetArgvEx` for that. This is a no-op when called for a second time + the new :c:type:`PyConfig` API of the :ref:`Python Initialization + Configuration ` for that. This is a no-op when called for a + second time (without calling :c:func:`Py_FinalizeEx` first). There is no return value; it is a fatal error if the initialization fails. @@ -381,6 +373,14 @@ Initializing and finalizing the interpreter :c:func:`Py_Initialize` is called again. +.. c:function:: int Py_IsFinalizing() + + Return true (non-zero) if the main Python interpreter is + :term:`shutting down `. Return false (zero) otherwise. + + .. versionadded:: 3.13 + + .. c:function:: int Py_FinalizeEx() Undo all initializations made by :c:func:`Py_Initialize` and subsequent use of @@ -401,7 +401,7 @@ Initializing and finalizing the interpreter the application. **Bugs and caveats:** The destruction of modules and objects in modules is done - in random order; this may cause destructors (:meth:`__del__` methods) to fail + in random order; this may cause destructors (:meth:`~object.__del__` methods) to fail when they depend on other objects (even functions) or modules. Dynamically loaded extension modules loaded by Python are not unloaded. Small amounts of memory allocated by the Python interpreter may not be freed (if you find a leak, @@ -425,76 +425,9 @@ Process-wide parameters ======================= -.. c:function:: int Py_SetStandardStreamEncoding(const char *encoding, const char *errors) - - .. index:: - single: Py_Initialize() - single: main() - triple: stdin; stdout; sdterr - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.stdio_encoding` and :c:member:`PyConfig.stdio_errors` - should be used instead, see :ref:`Python Initialization Configuration - `. - - This function should be called before :c:func:`Py_Initialize`, if it is - called at all. It specifies which encoding and error handling to use - with standard IO, with the same meanings as in :func:`str.encode`. - - It overrides :envvar:`PYTHONIOENCODING` values, and allows embedding code - to control IO encoding when the environment variable does not work. - - *encoding* and/or *errors* may be ``NULL`` to use - :envvar:`PYTHONIOENCODING` and/or default values (depending on other - settings). - - Note that :data:`sys.stderr` always uses the "backslashreplace" error - handler, regardless of this (or any other) setting. - - If :c:func:`Py_FinalizeEx` is called, this function will need to be called - again in order to affect subsequent calls to :c:func:`Py_Initialize`. - - Returns ``0`` if successful, a nonzero value on error (e.g. calling after the - interpreter has already been initialized). - - .. versionadded:: 3.4 - - .. deprecated:: 3.11 - - -.. c:function:: void Py_SetProgramName(const wchar_t *name) - - .. index:: - single: Py_Initialize() - single: main() - single: Py_GetPath() - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.program_name` should be used instead, see :ref:`Python - Initialization Configuration `. - - This function should be called before :c:func:`Py_Initialize` is called for - the first time, if it is called at all. It tells the interpreter the value - of the ``argv[0]`` argument to the :c:func:`main` function of the program - (converted to wide characters). - This is used by :c:func:`Py_GetPath` and some other functions below to find - the Python run-time libraries relative to the interpreter executable. The - default value is ``'python'``. The argument should point to a - zero-terminated wide character string in static storage whose contents will not - change for the duration of the program's execution. No code in the Python - interpreter will change the contents of this storage. - - Use :c:func:`Py_DecodeLocale` to decode a bytes string to get a - :c:expr:`wchar_*` string. - - .. deprecated:: 3.11 - - -.. c:function:: wchar* Py_GetProgramName() - - .. index:: single: Py_SetProgramName() +.. c:function:: wchar_t* Py_GetProgramName() - Return the program name set with :c:func:`Py_SetProgramName`, or the default. + Return the program name set with :c:member:`PyConfig.program_name`, or the default. The returned string points into static storage; the caller should not modify its value. @@ -504,16 +437,19 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. + .. deprecated-removed:: 3.13 3.15 + Get :data:`sys.executable` instead. + .. c:function:: wchar_t* Py_GetPrefix() Return the *prefix* for installed platform-independent files. This is derived through a number of complicated rules from the program name set with - :c:func:`Py_SetProgramName` and some environment variables; for example, if the + :c:member:`PyConfig.program_name` and some environment variables; for example, if the program name is ``'/usr/local/bin/python'``, the prefix is ``'/usr/local'``. The returned string points into static storage; the caller should not modify its value. This corresponds to the :makevar:`prefix` variable in the top-level - :file:`Makefile` and the ``--prefix`` argument to the :program:`configure` + :file:`Makefile` and the :option:`--prefix` argument to the :program:`configure` script at build time. The value is available to Python code as ``sys.prefix``. It is only useful on Unix. See also the next function. @@ -523,12 +459,15 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. + .. deprecated-removed:: 3.13 3.15 + Get :data:`sys.prefix` instead. + .. c:function:: wchar_t* Py_GetExecPrefix() Return the *exec-prefix* for installed platform-*dependent* files. This is derived through a number of complicated rules from the program name set with - :c:func:`Py_SetProgramName` and some environment variables; for example, if the + :c:member:`PyConfig.program_name` and some environment variables; for example, if the program name is ``'/usr/local/bin/python'``, the exec-prefix is ``'/usr/local'``. The returned string points into static storage; the caller should not modify its value. This corresponds to the :makevar:`exec_prefix` @@ -564,16 +503,18 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. + .. deprecated-removed:: 3.13 3.15 + Get :data:`sys.exec_prefix` instead. + .. c:function:: wchar_t* Py_GetProgramFullPath() .. index:: - single: Py_SetProgramName() single: executable (in module sys) Return the full program name of the Python executable; this is computed as a side-effect of deriving the default module search path from the program name - (set by :c:func:`Py_SetProgramName` above). The returned string points into + (set by :c:member:`PyConfig.program_name`). The returned string points into static storage; the caller should not modify its value. The value is available to Python code as ``sys.executable``. @@ -583,16 +524,18 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. + .. deprecated-removed:: 3.13 3.15 + Get :data:`sys.executable` instead. + .. c:function:: wchar_t* Py_GetPath() .. index:: triple: module; search; path single: path (in module sys) - single: Py_SetPath() Return the default module search path; this is computed from the program name - (set by :c:func:`Py_SetProgramName` above) and some environment variables. + (set by :c:member:`PyConfig.program_name`) and some environment variables. The returned string consists of a series of directory names separated by a platform dependent delimiter character. The delimiter character is ``':'`` on Unix and macOS, ``';'`` on Windows. The returned string points into @@ -609,43 +552,8 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. - -.. c:function:: void Py_SetPath(const wchar_t *) - - .. index:: - triple: module; search; path - single: path (in module sys) - single: Py_GetPath() - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.module_search_paths` and - :c:member:`PyConfig.module_search_paths_set` should be used instead, see - :ref:`Python Initialization Configuration `. - - Set the default module search path. If this function is called before - :c:func:`Py_Initialize`, then :c:func:`Py_GetPath` won't attempt to compute a - default search path but uses the one provided instead. This is useful if - Python is embedded by an application that has full knowledge of the location - of all modules. The path components should be separated by the platform - dependent delimiter character, which is ``':'`` on Unix and macOS, ``';'`` - on Windows. - - This also causes :data:`sys.executable` to be set to the program - full path (see :c:func:`Py_GetProgramFullPath`) and for :data:`sys.prefix` and - :data:`sys.exec_prefix` to be empty. It is up to the caller to modify these - if required after calling :c:func:`Py_Initialize`. - - Use :c:func:`Py_DecodeLocale` to decode a bytes string to get a - :c:expr:`wchar_*` string. - - The path argument is copied internally, so the caller may free it after the - call completes. - - .. versionchanged:: 3.8 - The program full path is now used for :data:`sys.executable`, instead - of the program name. - - .. deprecated:: 3.11 + .. deprecated-removed:: 3.13 3.15 + Get :data:`sys.path` instead. .. c:function:: const char* Py_GetVersion() @@ -718,110 +626,10 @@ Process-wide parameters ``sys.version``. -.. c:function:: void PySys_SetArgvEx(int argc, wchar_t **argv, int updatepath) - - .. index:: - single: main() - single: Py_FatalError() - single: argv (in module sys) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.argv`, :c:member:`PyConfig.parse_argv` and - :c:member:`PyConfig.safe_path` should be used instead, see :ref:`Python - Initialization Configuration `. - - Set :data:`sys.argv` based on *argc* and *argv*. These parameters are - similar to those passed to the program's :c:func:`main` function with the - difference that the first entry should refer to the script file to be - executed rather than the executable hosting the Python interpreter. If there - isn't a script that will be run, the first entry in *argv* can be an empty - string. If this function fails to initialize :data:`sys.argv`, a fatal - condition is signalled using :c:func:`Py_FatalError`. +.. c:function:: wchar_t* Py_GetPythonHome() - If *updatepath* is zero, this is all the function does. If *updatepath* - is non-zero, the function also modifies :data:`sys.path` according to the - following algorithm: - - - If the name of an existing script is passed in ``argv[0]``, the absolute - path of the directory where the script is located is prepended to - :data:`sys.path`. - - Otherwise (that is, if *argc* is ``0`` or ``argv[0]`` doesn't point - to an existing file name), an empty string is prepended to - :data:`sys.path`, which is the same as prepending the current working - directory (``"."``). - - Use :c:func:`Py_DecodeLocale` to decode a bytes string to get a - :c:expr:`wchar_*` string. - - See also :c:member:`PyConfig.orig_argv` and :c:member:`PyConfig.argv` - members of the :ref:`Python Initialization Configuration `. - - .. note:: - It is recommended that applications embedding the Python interpreter - for purposes other than executing a single script pass ``0`` as *updatepath*, - and update :data:`sys.path` themselves if desired. - See `CVE-2008-5983 `_. - - On versions before 3.1.3, you can achieve the same effect by manually - popping the first :data:`sys.path` element after having called - :c:func:`PySys_SetArgv`, for example using:: - - PyRun_SimpleString("import sys; sys.path.pop(0)\n"); - - .. versionadded:: 3.1.3 - - .. XXX impl. doesn't seem consistent in allowing ``0``/``NULL`` for the params; - check w/ Guido. - - .. deprecated:: 3.11 - - -.. c:function:: void PySys_SetArgv(int argc, wchar_t **argv) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.argv` and :c:member:`PyConfig.parse_argv` should be used - instead, see :ref:`Python Initialization Configuration `. - - This function works like :c:func:`PySys_SetArgvEx` with *updatepath* set - to ``1`` unless the :program:`python` interpreter was started with the - :option:`-I`. - - Use :c:func:`Py_DecodeLocale` to decode a bytes string to get a - :c:expr:`wchar_*` string. - - See also :c:member:`PyConfig.orig_argv` and :c:member:`PyConfig.argv` - members of the :ref:`Python Initialization Configuration `. - - .. versionchanged:: 3.4 The *updatepath* value depends on :option:`-I`. - - .. deprecated:: 3.11 - - -.. c:function:: void Py_SetPythonHome(const wchar_t *home) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.home` should be used instead, see :ref:`Python - Initialization Configuration `. - - Set the default "home" directory, that is, the location of the standard - Python libraries. See :envvar:`PYTHONHOME` for the meaning of the - argument string. - - The argument should point to a zero-terminated character string in static - storage whose contents will not change for the duration of the program's - execution. No code in the Python interpreter will change the contents of - this storage. - - Use :c:func:`Py_DecodeLocale` to decode a bytes string to get a - :c:expr:`wchar_*` string. - - .. deprecated:: 3.11 - - -.. c:function:: w_char* Py_GetPythonHome() - - Return the default "home", that is, the value set by a previous call to - :c:func:`Py_SetPythonHome`, or the value of the :envvar:`PYTHONHOME` + Return the default "home", that is, the value set by + :c:member:`PyConfig.home`, or the value of the :envvar:`PYTHONHOME` environment variable if it is set. This function should not be called before :c:func:`Py_Initialize`, otherwise @@ -830,6 +638,10 @@ Process-wide parameters .. versionchanged:: 3.10 It now returns ``NULL`` if called before :c:func:`Py_Initialize`. + .. deprecated-removed:: 3.13 3.15 + Get :c:member:`PyConfig.home` or :envvar:`PYTHONHOME` environment + variable instead. + .. _threads: @@ -981,7 +793,7 @@ the fork, and releasing them afterwards. In addition, it resets any :ref:`lock-objects` in the child. When extending or embedding Python, there is no way to inform Python of additional (non-Python) locks that need to be acquired before or reset after a fork. OS facilities such as -:c:func:`pthread_atfork` would need to be used to accomplish the same thing. +:c:func:`!pthread_atfork` would need to be used to accomplish the same thing. Additionally, when extending or embedding Python, calling :c:func:`fork` directly rather than through :func:`os.fork` (and returning to or calling into Python) may result in a deadlock by one of Python's internal locks @@ -1023,47 +835,11 @@ code, or when embedding the Python interpreter: .. c:type:: PyThreadState This data structure represents the state of a single thread. The only public - data member is :attr:`interp` (:c:expr:`PyInterpreterState *`), which points to - this thread's interpreter state. - - -.. c:function:: void PyEval_InitThreads() - - .. index:: - single: PyEval_AcquireThread() - single: PyEval_ReleaseThread() - single: PyEval_SaveThread() - single: PyEval_RestoreThread() - - Deprecated function which does nothing. + data member is: - In Python 3.6 and older, this function created the GIL if it didn't exist. - - .. versionchanged:: 3.9 - The function now does nothing. - - .. versionchanged:: 3.7 - This function is now called by :c:func:`Py_Initialize()`, so you don't - have to call it yourself anymore. - - .. versionchanged:: 3.2 - This function cannot be called before :c:func:`Py_Initialize()` anymore. - - .. deprecated-removed:: 3.9 3.11 - - .. index:: module: _thread - - -.. c:function:: int PyEval_ThreadsInitialized() - - Returns a non-zero value if :c:func:`PyEval_InitThreads` has been called. This - function can be called without holding the GIL, and therefore can be used to - avoid calls to the locking API when running single-threaded. - - .. versionchanged:: 3.7 - The :term:`GIL` is now initialized by :c:func:`Py_Initialize()`. + .. c:member:: PyInterpreterState *interp - .. deprecated-removed:: 3.9 3.11 + This thread's interpreter state. .. c:function:: PyThreadState* PyEval_SaveThread() @@ -1084,7 +860,7 @@ code, or when embedding the Python interpreter: .. note:: Calling this function from a thread when the runtime is finalizing will terminate the thread, even if the thread was not created by Python. - You can use :c:func:`_Py_IsFinalizing` or :func:`sys.is_finalizing` to + You can use :c:func:`Py_IsFinalizing` or :func:`sys.is_finalizing` to check if the interpreter is in process of being finalized before calling this function to avoid unwanted termination. @@ -1094,6 +870,19 @@ code, or when embedding the Python interpreter: When the current thread state is ``NULL``, this issues a fatal error (so that the caller needn't check for ``NULL``). + See also :c:func:`PyThreadState_GetUnchecked`. + + +.. c:function:: PyThreadState* PyThreadState_GetUnchecked() + + Similar to :c:func:`PyThreadState_Get`, but don't kill the process with a + fatal error if it is NULL. The caller is responsible to check if the result + is NULL. + + .. versionadded:: 3.13 + In Python 3.5 to 3.12, the function was private and known as + ``_PyThreadState_UncheckedGet()``. + .. c:function:: PyThreadState* PyThreadState_Swap(PyThreadState *tstate) @@ -1130,7 +919,7 @@ with sub-interpreters: .. note:: Calling this function from a thread when the runtime is finalizing will terminate the thread, even if the thread was not created by Python. - You can use :c:func:`_Py_IsFinalizing` or :func:`sys.is_finalizing` to + You can use :c:func:`Py_IsFinalizing` or :func:`sys.is_finalizing` to check if the interpreter is in process of being finalized before calling this function to avoid unwanted termination. @@ -1396,7 +1185,7 @@ All of the following functions must be called after :c:func:`Py_Initialize`. function does not steal any references to *exc*. To prevent naive misuse, you must write your own C extension to call this. Must be called with the GIL held. Returns the number of thread states modified; this is normally one, but will be - zero if the thread id isn't found. If *exc* is :const:`NULL`, the pending + zero if the thread id isn't found. If *exc* is ``NULL``, the pending exception (if any) for the thread is cleared. This raises no exceptions. .. versionchanged:: 3.7 @@ -1412,7 +1201,7 @@ All of the following functions must be called after :c:func:`Py_Initialize`. .. note:: Calling this function from a thread when the runtime is finalizing will terminate the thread, even if the thread was not created by Python. - You can use :c:func:`_Py_IsFinalizing` or :func:`sys.is_finalizing` to + You can use :c:func:`Py_IsFinalizing` or :func:`sys.is_finalizing` to check if the interpreter is in process of being finalized before calling this function to avoid unwanted termination. @@ -1437,39 +1226,6 @@ All of the following functions must be called after :c:func:`Py_Initialize`. available (even when threads have not been initialized). -.. c:function:: void PyEval_AcquireLock() - - Acquire the global interpreter lock. The lock must have been created earlier. - If this thread already has the lock, a deadlock ensues. - - .. deprecated:: 3.2 - This function does not update the current thread state. Please use - :c:func:`PyEval_RestoreThread` or :c:func:`PyEval_AcquireThread` - instead. - - .. note:: - Calling this function from a thread when the runtime is finalizing - will terminate the thread, even if the thread was not created by Python. - You can use :c:func:`_Py_IsFinalizing` or :func:`sys.is_finalizing` to - check if the interpreter is in process of being finalized before calling - this function to avoid unwanted termination. - - .. versionchanged:: 3.8 - Updated to be consistent with :c:func:`PyEval_RestoreThread`, - :c:func:`Py_END_ALLOW_THREADS`, and :c:func:`PyGILState_Ensure`, - and terminate the current thread if called while the interpreter is finalizing. - - -.. c:function:: void PyEval_ReleaseLock() - - Release the global interpreter lock. The lock must have been created earlier. - - .. deprecated:: 3.2 - This function does not update the current thread state. Please use - :c:func:`PyEval_SaveThread` or :c:func:`PyEval_ReleaseThread` - instead. - - .. _sub-interpreter-support: Sub-interpreter support @@ -1491,12 +1247,101 @@ You can switch between sub-interpreters using the :c:func:`PyThreadState_Swap` function. You can create and destroy them using the following functions: -.. c:function:: PyThreadState* Py_NewInterpreter() +.. c:type:: PyInterpreterConfig + + Structure containing most parameters to configure a sub-interpreter. + Its values are used only in :c:func:`Py_NewInterpreterFromConfig` and + never modified by the runtime. + + .. versionadded:: 3.12 + + Structure fields: + + .. c:member:: int use_main_obmalloc + + If this is ``0`` then the sub-interpreter will use its own + "object" allocator state. + Otherwise it will use (share) the main interpreter's. + + If this is ``0`` then + :c:member:`~PyInterpreterConfig.check_multi_interp_extensions` + must be ``1`` (non-zero). + If this is ``1`` then :c:member:`~PyInterpreterConfig.gil` + must not be :c:macro:`PyInterpreterConfig_OWN_GIL`. + + .. c:member:: int allow_fork + + If this is ``0`` then the runtime will not support forking the + process in any thread where the sub-interpreter is currently active. + Otherwise fork is unrestricted. + + Note that the :mod:`subprocess` module still works + when fork is disallowed. + + .. c:member:: int allow_exec + + If this is ``0`` then the runtime will not support replacing the + current process via exec (e.g. :func:`os.execv`) in any thread + where the sub-interpreter is currently active. + Otherwise exec is unrestricted. + + Note that the :mod:`subprocess` module still works + when exec is disallowed. + + .. c:member:: int allow_threads + + If this is ``0`` then the sub-interpreter's :mod:`threading` module + won't create threads. + Otherwise threads are allowed. + + .. c:member:: int allow_daemon_threads + + If this is ``0`` then the sub-interpreter's :mod:`threading` module + won't create daemon threads. + Otherwise daemon threads are allowed (as long as + :c:member:`~PyInterpreterConfig.allow_threads` is non-zero). + + .. c:member:: int check_multi_interp_extensions + + If this is ``0`` then all extension modules may be imported, + including legacy (single-phase init) modules, + in any thread where the sub-interpreter is currently active. + Otherwise only multi-phase init extension modules + (see :pep:`489`) may be imported. + (Also see :c:macro:`Py_mod_multiple_interpreters`.) + + This must be ``1`` (non-zero) if + :c:member:`~PyInterpreterConfig.use_main_obmalloc` is ``0``. + + .. c:member:: int gil + + This determines the operation of the GIL for the sub-interpreter. + It may be one of the following: + + .. c:namespace:: NULL + + .. c:macro:: PyInterpreterConfig_DEFAULT_GIL + + Use the default selection (:c:macro:`PyInterpreterConfig_SHARED_GIL`). + + .. c:macro:: PyInterpreterConfig_SHARED_GIL + + Use (share) the main interpreter's GIL. + + .. c:macro:: PyInterpreterConfig_OWN_GIL + + Use the sub-interpreter's own GIL. + + If this is :c:macro:`PyInterpreterConfig_OWN_GIL` then + :c:member:`PyInterpreterConfig.use_main_obmalloc` must be ``0``. + + +.. c:function:: PyStatus Py_NewInterpreterFromConfig(PyThreadState **tstate_p, const PyInterpreterConfig *config) .. index:: - module: builtins - module: __main__ - module: sys + pair: module; builtins + pair: module; __main__ + pair: module; sys single: stdout (in module sys) single: stderr (in module sys) single: stdin (in module sys) @@ -1511,16 +1356,47 @@ function. You can create and destroy them using the following functions: ``sys.stdout`` and ``sys.stderr`` (however these refer to the same underlying file descriptors). - The return value points to the first thread state created in the new + The given *config* controls the options with which the interpreter + is initialized. + + Upon success, *tstate_p* will be set to the first thread state + created in the new sub-interpreter. This thread state is made in the current thread state. Note that no actual thread is created; see the discussion of thread states - below. If creation of the new interpreter is unsuccessful, ``NULL`` is - returned; no exception is set since the exception state is stored in the - current thread state and there may not be a current thread state. (Like all - other Python/C API functions, the global interpreter lock must be held before - calling this function and is still held when it returns; however, unlike most - other Python/C API functions, there needn't be a current thread state on - entry.) + below. If creation of the new interpreter is unsuccessful, + *tstate_p* is set to ``NULL``; + no exception is set since the exception state is stored in the + current thread state and there may not be a current thread state. + + Like all other Python/C API functions, the global interpreter lock + must be held before calling this function and is still held when it + returns. Likewise a current thread state must be set on entry. On + success, the returned thread state will be set as current. If the + sub-interpreter is created with its own GIL then the GIL of the + calling interpreter will be released. When the function returns, + the new interpreter's GIL will be held by the current thread and + the previously interpreter's GIL will remain released here. + + .. versionadded:: 3.12 + + Sub-interpreters are most effective when isolated from each other, + with certain functionality restricted:: + + PyInterpreterConfig config = { + .use_main_obmalloc = 0, + .allow_fork = 0, + .allow_exec = 0, + .allow_threads = 1, + .allow_daemon_threads = 0, + .check_multi_interp_extensions = 1, + .gil = PyInterpreterConfig_OWN_GIL, + }; + PyThreadState *tstate = Py_NewInterpreterFromConfig(&config); + + Note that the config is used only briefly and does not get modified. + During initialization the config's values are converted into various + :c:type:`PyInterpreterState` values. A read-only copy of the config + may be stored internally on the :c:type:`PyInterpreterState`. .. index:: single: Py_FinalizeEx() @@ -1555,19 +1431,79 @@ function. You can create and destroy them using the following functions: .. index:: single: close() (in module os) +.. c:function:: PyThreadState* Py_NewInterpreter(void) + + .. index:: + pair: module; builtins + pair: module; __main__ + pair: module; sys + single: stdout (in module sys) + single: stderr (in module sys) + single: stdin (in module sys) + + Create a new sub-interpreter. This is essentially just a wrapper + around :c:func:`Py_NewInterpreterFromConfig` with a config that + preserves the existing behavior. The result is an unisolated + sub-interpreter that shares the main interpreter's GIL, allows + fork/exec, allows daemon threads, and allows single-phase init + modules. + + .. c:function:: void Py_EndInterpreter(PyThreadState *tstate) .. index:: single: Py_FinalizeEx() - Destroy the (sub-)interpreter represented by the given thread state. The given - thread state must be the current thread state. See the discussion of thread - states below. When the call returns, the current thread state is ``NULL``. All - thread states associated with this interpreter are destroyed. (The global - interpreter lock must be held before calling this function and is still held - when it returns.) :c:func:`Py_FinalizeEx` will destroy all sub-interpreters that + Destroy the (sub-)interpreter represented by the given thread state. + The given thread state must be the current thread state. See the + discussion of thread states below. When the call returns, + the current thread state is ``NULL``. All thread states associated + with this interpreter are destroyed. The global interpreter lock + used by the target interpreter must be held before calling this + function. No GIL is held when it returns. + + :c:func:`Py_FinalizeEx` will destroy all sub-interpreters that haven't been explicitly destroyed at that point. +A Per-Interpreter GIL +--------------------- + +Using :c:func:`Py_NewInterpreterFromConfig` you can create +a sub-interpreter that is completely isolated from other interpreters, +including having its own GIL. The most important benefit of this +isolation is that such an interpreter can execute Python code without +being blocked by other interpreters or blocking any others. Thus a +single Python process can truly take advantage of multiple CPU cores +when running Python code. The isolation also encourages a different +approach to concurrency than that of just using threads. +(See :pep:`554`.) + +Using an isolated interpreter requires vigilance in preserving that +isolation. That especially means not sharing any objects or mutable +state without guarantees about thread-safety. Even objects that are +otherwise immutable (e.g. ``None``, ``(1, 5)``) can't normally be shared +because of the refcount. One simple but less-efficient approach around +this is to use a global lock around all use of some state (or object). +Alternately, effectively immutable objects (like integers or strings) +can be made safe in spite of their refcounts by making them :term:`immortal`. +In fact, this has been done for the builtin singletons, small integers, +and a number of other builtin objects. + +If you preserve isolation then you will have access to proper multi-core +computing without the complications that come with free-threading. +Failure to preserve isolation will expose you to the full consequences +of free-threading, including races and hard-to-debug crashes. + +Aside from that, one of the main challenges of using multiple isolated +interpreters is how to communicate between them safely (not break +isolation) and efficiently. The runtime and stdlib do not provide +any standard approach to this yet. A future stdlib module would help +mitigate the effort of preserving isolation and expose effective tools +for communicating (and sharing) data between interpreters. + +.. versionadded:: 3.12 + + Bugs and caveats ---------------- @@ -1675,32 +1611,32 @@ Python-level trace functions in previous versions. The type of the trace function registered using :c:func:`PyEval_SetProfile` and :c:func:`PyEval_SetTrace`. The first parameter is the object passed to the registration function as *obj*, *frame* is the frame object to which the event - pertains, *what* is one of the constants :const:`PyTrace_CALL`, - :const:`PyTrace_EXCEPTION`, :const:`PyTrace_LINE`, :const:`PyTrace_RETURN`, - :const:`PyTrace_C_CALL`, :const:`PyTrace_C_EXCEPTION`, :const:`PyTrace_C_RETURN`, - or :const:`PyTrace_OPCODE`, and *arg* depends on the value of *what*: - - +------------------------------+----------------------------------------+ - | Value of *what* | Meaning of *arg* | - +==============================+========================================+ - | :const:`PyTrace_CALL` | Always :c:data:`Py_None`. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_EXCEPTION` | Exception information as returned by | - | | :func:`sys.exc_info`. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_LINE` | Always :c:data:`Py_None`. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_RETURN` | Value being returned to the caller, | - | | or ``NULL`` if caused by an exception. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_C_CALL` | Function object being called. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_C_EXCEPTION` | Function object being called. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_C_RETURN` | Function object being called. | - +------------------------------+----------------------------------------+ - | :const:`PyTrace_OPCODE` | Always :c:data:`Py_None`. | - +------------------------------+----------------------------------------+ + pertains, *what* is one of the constants :c:data:`PyTrace_CALL`, + :c:data:`PyTrace_EXCEPTION`, :c:data:`PyTrace_LINE`, :c:data:`PyTrace_RETURN`, + :c:data:`PyTrace_C_CALL`, :c:data:`PyTrace_C_EXCEPTION`, :c:data:`PyTrace_C_RETURN`, + or :c:data:`PyTrace_OPCODE`, and *arg* depends on the value of *what*: + + +-------------------------------+----------------------------------------+ + | Value of *what* | Meaning of *arg* | + +===============================+========================================+ + | :c:data:`PyTrace_CALL` | Always :c:data:`Py_None`. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_EXCEPTION` | Exception information as returned by | + | | :func:`sys.exc_info`. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_LINE` | Always :c:data:`Py_None`. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_RETURN` | Value being returned to the caller, | + | | or ``NULL`` if caused by an exception. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_C_CALL` | Function object being called. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_C_EXCEPTION` | Function object being called. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_C_RETURN` | Function object being called. | + +-------------------------------+----------------------------------------+ + | :c:data:`PyTrace_OPCODE` | Always :c:data:`Py_None`. | + +-------------------------------+----------------------------------------+ .. c:var:: int PyTrace_CALL @@ -1767,8 +1703,8 @@ Python-level trace functions in previous versions. function as its first parameter, and may be any Python object, or ``NULL``. If the profile function needs to maintain state, using a different value for *obj* for each thread provides a convenient and thread-safe place to store it. The - profile function is called for all monitored events except :const:`PyTrace_LINE` - :const:`PyTrace_OPCODE` and :const:`PyTrace_EXCEPTION`. + profile function is called for all monitored events except :c:data:`PyTrace_LINE` + :c:data:`PyTrace_OPCODE` and :c:data:`PyTrace_EXCEPTION`. See also the :func:`sys.setprofile` function. @@ -1793,8 +1729,8 @@ Python-level trace functions in previous versions. :c:func:`PyEval_SetProfile`, except the tracing function does receive line-number events and per-opcode events, but does not receive any event related to C function objects being called. Any trace function registered using :c:func:`PyEval_SetTrace` - will not receive :const:`PyTrace_C_CALL`, :const:`PyTrace_C_EXCEPTION` or - :const:`PyTrace_C_RETURN` as a value for the *what* parameter. + will not receive :c:data:`PyTrace_C_CALL`, :c:data:`PyTrace_C_EXCEPTION` or + :c:data:`PyTrace_C_RETURN` as a value for the *what* parameter. See also the :func:`sys.settrace` function. diff --git a/Doc/c-api/init_config.rst b/Doc/c-api/init_config.rst index 64bdfefd6494ff5..47a8fbb2cd9c97e 100644 --- a/Doc/c-api/init_config.rst +++ b/Doc/c-api/init_config.rst @@ -82,6 +82,8 @@ PyWideStringList If *length* is non-zero, *items* must be non-``NULL`` and all strings must be non-``NULL``. + .. c:namespace:: NULL + Methods: .. c:function:: PyStatus PyWideStringList_Append(PyWideStringList *list, const wchar_t *item) @@ -101,6 +103,8 @@ PyWideStringList Python must be preinitialized to call this function. + .. c:namespace:: PyWideStringList + Structure fields: .. c:member:: Py_ssize_t length @@ -135,6 +139,8 @@ PyStatus Name of the function which created an error, can be ``NULL``. + .. c:namespace:: NULL + Functions to create a status: .. c:function:: PyStatus PyStatus_Ok(void) @@ -210,6 +216,8 @@ PyPreConfig Structure used to preinitialize Python. + .. c:namespace:: NULL + Function to initialize a preconfiguration: .. c:function:: void PyPreConfig_InitPythonConfig(PyPreConfig *preconfig) @@ -222,6 +230,8 @@ PyPreConfig Initialize the preconfiguration with :ref:`Isolated Configuration `. + .. c:namespace:: PyPreConfig + Structure fields: .. c:member:: int allocator @@ -243,18 +253,28 @@ PyPreConfig * ``PYMEM_ALLOCATOR_PYMALLOC_DEBUG`` (``6``): :ref:`Python pymalloc memory allocator ` with :ref:`debug hooks `. + * ``PYMEM_ALLOCATOR_MIMALLOC`` (``6``): use ``mimalloc``, a fast + malloc replacement. + * ``PYMEM_ALLOCATOR_MIMALLOC_DEBUG`` (``7``): use ``mimalloc``, a fast + malloc replacement with :ref:`debug hooks `. + ``PYMEM_ALLOCATOR_PYMALLOC`` and ``PYMEM_ALLOCATOR_PYMALLOC_DEBUG`` are not supported if Python is :option:`configured using --without-pymalloc <--without-pymalloc>`. + ``PYMEM_ALLOCATOR_MIMALLOC`` and ``PYMEM_ALLOCATOR_MIMALLOC_DEBUG`` are + not supported if Python is :option:`configured using --without-mimalloc + <--without-mimalloc>` or if the underlying atomic support isn't + available. + See :ref:`Memory Management `. Default: ``PYMEM_ALLOCATOR_NOT_SET``. .. c:member:: int configure_locale - Set the LC_CTYPE locale to the user preferred locale? + Set the LC_CTYPE locale to the user preferred locale. If equals to ``0``, set :c:member:`~PyPreConfig.coerce_c_locale` and :c:member:`~PyPreConfig.coerce_c_locale_warn` members to ``0``. @@ -429,6 +449,8 @@ PyConfig When done, the :c:func:`PyConfig_Clear` function must be used to release the configuration memory. + .. c:namespace:: NULL + Structure methods: .. c:function:: void PyConfig_InitPythonConfig(PyConfig *config) @@ -522,16 +544,28 @@ PyConfig Moreover, if :c:func:`PyConfig_SetArgv` or :c:func:`PyConfig_SetBytesArgv` is used, this method must be called before other methods, since the preinitialization configuration depends on command line arguments (if - :c:member:`parse_argv` is non-zero). + :c:member:`~PyConfig.parse_argv` is non-zero). The caller of these methods is responsible to handle exceptions (error or exit) using ``PyStatus_Exception()`` and ``Py_ExitStatusException()``. + .. c:namespace:: PyConfig + Structure fields: .. c:member:: PyWideStringList argv - Command line arguments: :data:`sys.argv`. + .. index:: + single: main() + single: argv (in module sys) + + Set :data:`sys.argv` command line arguments based on + :c:member:`~PyConfig.argv`. These parameters are similar to those passed + to the program's :c:func:`main` function with the difference that the + first entry should refer to the script file to be executed rather than + the executable hosting the Python interpreter. If there isn't a script + that will be run, the first entry in :c:member:`~PyConfig.argv` can be an + empty string. Set :c:member:`~PyConfig.parse_argv` to ``1`` to parse :c:member:`~PyConfig.argv` the same way the regular Python parses Python @@ -572,6 +606,8 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.exec_prefix`. + .. c:member:: wchar_t* base_executable Python base executable: :data:`sys._base_executable`. @@ -584,6 +620,8 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.executable`. + .. c:member:: wchar_t* base_prefix :data:`sys.base_prefix`. @@ -592,6 +630,8 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.prefix`. + .. c:member:: int buffered_stdio If equals to ``0`` and :c:member:`~PyConfig.configure_c_stdio` is non-zero, @@ -686,7 +726,7 @@ PyConfig Set to ``1`` by the :envvar:`PYTHONDUMPREFS` environment variable. - Need a special build of Python with the ``Py_TRACE_REFS`` macro defined: + Needs a special build of Python with the ``Py_TRACE_REFS`` macro defined: see the :option:`configure --with-trace-refs option <--with-trace-refs>`. Default: ``0``. @@ -700,6 +740,8 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.base_exec_prefix`. + .. c:member:: wchar_t* executable The absolute path of the executable binary for the Python interpreter: @@ -709,6 +751,8 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.base_executable`. + .. c:member:: int faulthandler Enable faulthandler? @@ -780,10 +824,8 @@ PyConfig .. c:member:: wchar_t* home - Python home directory. - - If :c:func:`Py_SetPythonHome` has been called, use its argument if it is - not ``NULL``. + Set the default Python "home" directory, that is, the location of the + standard Python libraries (see :envvar:`PYTHONHOME`). Set by the :envvar:`PYTHONHOME` environment variable. @@ -839,13 +881,26 @@ PyConfig will produce an error. Configured by the :option:`-X int_max_str_digits <-X>` command line - flag or the :envvar:`PYTHONINTMAXSTRDIGITS` environment varable. + flag or the :envvar:`PYTHONINTMAXSTRDIGITS` environment variable. Default: ``-1`` in Python mode. 4300 (:data:`sys.int_info.default_max_str_digits`) in isolated mode. .. versionadded:: 3.12 + .. c:member:: int cpu_count + + If the value of :c:member:`~PyConfig.cpu_count` is not ``-1`` then it will + override the return values of :func:`os.cpu_count`, + :func:`os.process_cpu_count`, and :func:`multiprocessing.cpu_count`. + + Configured by the :samp:`-X cpu_count={n|default}` command line + flag or the :envvar:`PYTHON_CPU_COUNT` environment variable. + + Default: ``-1``. + + .. versionadded:: 3.13 + .. c:member:: int isolated If greater than ``0``, enable isolated mode: @@ -871,7 +926,7 @@ PyConfig .. c:member:: int legacy_windows_stdio If non-zero, use :class:`io.FileIO` instead of - :class:`io.WindowsConsoleIO` for :data:`sys.stdin`, :data:`sys.stdout` + :class:`!io._WindowsConsoleIO` for :data:`sys.stdin`, :data:`sys.stdout` and :data:`sys.stderr`. Set to ``1`` if the :envvar:`PYTHONLEGACYWINDOWSSTDIO` environment @@ -920,7 +975,7 @@ PyConfig .. c:member:: wchar_t* pythonpath_env Module search paths (:data:`sys.path`) as a string separated by ``DELIM`` - (:data:`os.path.pathsep`). + (:data:`os.pathsep`). Set by the :envvar:`PYTHONPATH` environment variable. @@ -1003,7 +1058,7 @@ PyConfig Incremented by the :option:`-d` command line option. Set to the :envvar:`PYTHONDEBUG` environment variable value. - Need a :ref:`debug build of Python ` (the ``Py_DEBUG`` macro + Needs a :ref:`debug build of Python ` (the ``Py_DEBUG`` macro must be defined). Default: ``0``. @@ -1029,12 +1084,13 @@ PyConfig Part of the :ref:`Python Path Configuration ` output. + See also :c:member:`PyConfig.base_prefix`. + .. c:member:: wchar_t* program_name Program name used to initialize :c:member:`~PyConfig.executable` and in early error messages during Python initialization. - * If :func:`Py_SetProgramName` has been called, use its argument. * On macOS, use :envvar:`PYTHONEXECUTABLE` environment variable if set. * If the ``WITH_NEXT_FRAMEWORK`` macro is defined, use :envvar:`__PYVENV_LAUNCHER__` environment variable if set. @@ -1054,6 +1110,7 @@ PyConfig Set by the :option:`-X pycache_prefix=PATH <-X>` command line option and the :envvar:`PYTHONPYCACHEPREFIX` environment variable. + The command-line option takes precedence. If ``NULL``, :data:`sys.pycache_prefix` is set to ``None``. @@ -1097,13 +1154,27 @@ PyConfig Default: ``NULL``. + .. c:member:: wchar_t* run_presite + + ``package.module`` path to module that should be imported before + ``site.py`` is run. + + Set by the :option:`-X presite=package.module <-X>` command-line + option and the :envvar:`PYTHON_PRESITE` environment variable. + The command-line option takes precedence. + + Needs a :ref:`debug build of Python ` (the ``Py_DEBUG`` macro + must be defined). + + Default: ``NULL``. + .. c:member:: int show_ref_count - Show total reference count at exit? + Show total reference count at exit (excluding :term:`immortal` objects)? Set to ``1`` by :option:`-X showrefcount <-X>` command line option. - Need a :ref:`debug build of Python ` (the ``Py_REF_DEBUG`` + Needs a :ref:`debug build of Python ` (the ``Py_REF_DEBUG`` macro must be defined). Default: ``0``. @@ -1120,7 +1191,7 @@ PyConfig Set to ``0`` by the :option:`-S` command line option. - :data:`sys.flags.no_site` is set to the inverted value of + :data:`sys.flags.no_site ` is set to the inverted value of :c:member:`~PyConfig.site_import`. Default: ``1``. @@ -1144,9 +1215,6 @@ PyConfig :data:`sys.stderr` (but :data:`sys.stderr` always uses ``"backslashreplace"`` error handler). - If :c:func:`Py_SetStandardStreamEncoding` has been called, use its - *error* and *errors* arguments if they are not ``NULL``. - Use the :envvar:`PYTHONIOENCODING` environment variable if it is non-empty. @@ -1162,6 +1230,8 @@ PyConfig or if the LC_CTYPE locale is "C" or "POSIX". * ``"strict"`` otherwise. + See also :c:member:`PyConfig.legacy_windows_stdio`. + .. c:member:: int tracemalloc Enable tracemalloc? @@ -1509,7 +1579,7 @@ If a ``._pth`` file is present: * Set :c:member:`~PyConfig.safe_path` to ``1``. The ``__PYVENV_LAUNCHER__`` environment variable is used to set -:c:member:`PyConfig.base_executable` +:c:member:`PyConfig.base_executable`. Py_RunMain() @@ -1582,7 +1652,7 @@ applied during the "Main" phase. It may allow to customize Python in Python to override or tune the :ref:`Path Configuration `, maybe install a custom :data:`sys.meta_path` importer or an import hook, etc. -It may become possible to calculatin the :ref:`Path Configuration +It may become possible to calculate the :ref:`Path Configuration ` in Python, after the Core phase and before the Main phase, which is one of the :pep:`432` motivation. diff --git a/Doc/c-api/intro.rst b/Doc/c-api/intro.rst index 85eb24a495b640d..4dbca92b18b5cd2 100644 --- a/Doc/c-api/intro.rst +++ b/Doc/c-api/intro.rst @@ -78,19 +78,19 @@ used by extension writers. Structure member names do not have a reserved prefix. The header files are typically installed with Python. On Unix, these are located in the directories :file:`{prefix}/include/pythonversion/` and -:file:`{exec_prefix}/include/pythonversion/`, where :envvar:`prefix` and -:envvar:`exec_prefix` are defined by the corresponding parameters to Python's +:file:`{exec_prefix}/include/pythonversion/`, where :option:`prefix <--prefix>` and +:option:`exec_prefix <--exec-prefix>` are defined by the corresponding parameters to Python's :program:`configure` script and *version* is ``'%d.%d' % sys.version_info[:2]``. On Windows, the headers are installed -in :file:`{prefix}/include`, where :envvar:`prefix` is the installation +in :file:`{prefix}/include`, where ``prefix`` is the installation directory specified to the installer. To include the headers, place both directories (if different) on your compiler's search path for includes. Do *not* place the parent directories on the search path and then use ``#include ``; this will break on multi-platform builds since the platform independent headers under -:envvar:`prefix` include the platform specific headers from -:envvar:`exec_prefix`. +:option:`prefix <--prefix>` include the platform specific headers from +:option:`exec_prefix <--exec-prefix>`. C++ users should note that although the API is defined entirely using C, the header files properly declare the entry points to be ``extern "C"``. As a result, @@ -105,6 +105,30 @@ defined closer to where they are useful (e.g. :c:macro:`Py_RETURN_NONE`). Others of a more general utility are defined here. This is not necessarily a complete listing. +.. c:macro:: PyMODINIT_FUNC + + Declare an extension module ``PyInit`` initialization function. The function + return type is :c:expr:`PyObject*`. The macro declares any special linkage + declarations required by the platform, and for C++ declares the function as + ``extern "C"``. + + The initialization function must be named :samp:`PyInit_{name}`, where + *name* is the name of the module, and should be the only non-\ ``static`` + item defined in the module file. Example:: + + static struct PyModuleDef spam_module = { + PyModuleDef_HEAD_INIT, + .m_name = "spam", + ... + }; + + PyMODINIT_FUNC + PyInit_spam(void) + { + return PyModule_Create(&spam_module); + } + + .. c:macro:: Py_ABS(x) Return the absolute value of ``x``. @@ -261,7 +285,7 @@ complete listing. Objects, Types and Reference Counts =================================== -.. index:: object: type +.. index:: pair: object; type Most Python/C API functions have one or more arguments as well as a return value of type :c:expr:`PyObject*`. This type is a pointer to an opaque data type @@ -287,52 +311,58 @@ true if (and only if) the object pointed to by *a* is a Python list. Reference Counts ---------------- -The reference count is important because today's computers have a finite (and -often severely limited) memory size; it counts how many different places there -are that have a reference to an object. Such a place could be another object, -or a global (or static) C variable, or a local variable in some C function. -When an object's reference count becomes zero, the object is deallocated. If -it contains references to other objects, their reference count is decremented. -Those other objects may be deallocated in turn, if this decrement makes their -reference count become zero, and so on. (There's an obvious problem with -objects that reference each other here; for now, the solution is "don't do -that.") +The reference count is important because today's computers have a finite +(and often severely limited) memory size; it counts how many different +places there are that have a :term:`strong reference` to an object. +Such a place could be another object, or a global (or static) C variable, +or a local variable in some C function. +When the last :term:`strong reference` to an object is released +(i.e. its reference count becomes zero), the object is deallocated. +If it contains references to other objects, those references are released. +Those other objects may be deallocated in turn, if there are no more +references to them, and so on. (There's an obvious problem with +objects that reference each other here; for now, the solution +is "don't do that.") .. index:: single: Py_INCREF() single: Py_DECREF() -Reference counts are always manipulated explicitly. The normal way is to use -the macro :c:func:`Py_INCREF` to increment an object's reference count by one, -and :c:func:`Py_DECREF` to decrement it by one. The :c:func:`Py_DECREF` macro +Reference counts are always manipulated explicitly. The normal way is +to use the macro :c:func:`Py_INCREF` to take a new reference to an +object (i.e. increment its reference count by one), +and :c:func:`Py_DECREF` to release that reference (i.e. decrement the +reference count by one). The :c:func:`Py_DECREF` macro is considerably more complex than the incref one, since it must check whether the reference count becomes zero and then cause the object's deallocator to be -called. The deallocator is a function pointer contained in the object's type -structure. The type-specific deallocator takes care of decrementing the -reference counts for other objects contained in the object if this is a compound +called. The deallocator is a function pointer contained in the object's type +structure. The type-specific deallocator takes care of releasing references +for other objects contained in the object if this is a compound object type, such as a list, as well as performing any additional finalization that's needed. There's no chance that the reference count can overflow; at least as many bits are used to hold the reference count as there are distinct memory locations in virtual memory (assuming ``sizeof(Py_ssize_t) >= sizeof(void*)``). Thus, the reference count increment is a simple operation. -It is not necessary to increment an object's reference count for every local -variable that contains a pointer to an object. In theory, the object's +It is not necessary to hold a :term:`strong reference` (i.e. increment +the reference count) for every local variable that contains a pointer +to an object. In theory, the object's reference count goes up by one when the variable is made to point to it and it goes down by one when the variable goes out of scope. However, these two cancel each other out, so at the end the reference count hasn't changed. The only real reason to use the reference count is to prevent the object from being deallocated as long as our variable is pointing to it. If we know that there is at least one other reference to the object that lives at least as long as -our variable, there is no need to increment the reference count temporarily. +our variable, there is no need to take a new :term:`strong reference` +(i.e. increment the reference count) temporarily. An important situation where this arises is in objects that are passed as arguments to C functions in an extension module that are called from Python; the call mechanism guarantees to hold a reference to every argument for the duration of the call. However, a common pitfall is to extract an object from a list and hold on to it -for a while without incrementing its reference count. Some other operation might -conceivably remove the object from the list, decrementing its reference count +for a while without taking a new reference. Some other operation might +conceivably remove the object from the list, releasing that reference, and possibly deallocating it. The real danger is that innocent-looking operations may invoke arbitrary Python code which could do this; there is a code path which allows control to flow back to the user from a :c:func:`Py_DECREF`, so @@ -340,7 +370,8 @@ almost any operation is potentially dangerous. A safe approach is to always use the generic operations (functions whose name begins with ``PyObject_``, ``PyNumber_``, ``PySequence_`` or ``PyMapping_``). -These operations always increment the reference count of the object they return. +These operations always create a new :term:`strong reference` +(i.e. increment the reference count) of the object they return. This leaves the caller with the responsibility to call :c:func:`Py_DECREF` when they are done with the result; this soon becomes second nature. @@ -356,7 +387,7 @@ to objects (objects are not owned: they are always shared). "Owning a reference" means being responsible for calling Py_DECREF on it when the reference is no longer needed. Ownership can also be transferred, meaning that the code that receives ownership of the reference then becomes responsible for -eventually decref'ing it by calling :c:func:`Py_DECREF` or :c:func:`Py_XDECREF` +eventually releasing it by calling :c:func:`Py_DECREF` or :c:func:`Py_XDECREF` when it's no longer needed---or passing on this responsibility (usually to its caller). When a function passes ownership of a reference on to its caller, the caller is said to receive a *new* reference. When no ownership is transferred, @@ -414,9 +445,9 @@ For example, the above two blocks of code could be replaced by the following It is much more common to use :c:func:`PyObject_SetItem` and friends with items whose references you are only borrowing, like arguments that were passed in to -the function you are writing. In that case, their behaviour regarding reference -counts is much saner, since you don't have to increment a reference count so you -can give a reference away ("have it be stolen"). For example, this function +the function you are writing. In that case, their behaviour regarding references +is much saner, since you don't have to take a new reference just so you +can give that reference away ("have it be stolen"). For example, this function sets all items of a list (actually, any mutable sequence) to a given item:: int @@ -616,7 +647,7 @@ and lose important information about the exact cause of the error. .. index:: single: sum_sequence() A simple example of detecting exceptions and passing them on is shown in the -:c:func:`sum_sequence` example above. It so happens that this example doesn't +:c:func:`!sum_sequence` example above. It so happens that this example doesn't need to clean up any owned references when it detects an error. The following example function shows some error cleanup. First, to remind you why you like Python, we show the equivalent Python code:: @@ -705,9 +736,9 @@ interpreter can only be used after the interpreter has been initialized. .. index:: single: Py_Initialize() - module: builtins - module: __main__ - module: sys + pair: module; builtins + pair: module; __main__ + pair: module; sys triple: module; search; path single: path (in module sys) @@ -739,14 +770,14 @@ environment variable :envvar:`PYTHONHOME`, or insert additional directories in front of the standard path by setting :envvar:`PYTHONPATH`. .. index:: - single: Py_SetProgramName() single: Py_GetPath() single: Py_GetPrefix() single: Py_GetExecPrefix() single: Py_GetProgramFullPath() -The embedding application can steer the search by calling -``Py_SetProgramName(file)`` *before* calling :c:func:`Py_Initialize`. Note that +The embedding application can steer the search by setting +:c:member:`PyConfig.program_name` *before* calling +:c:func:`Py_InitializeFromConfig`. Note that :envvar:`PYTHONHOME` still overrides this and :envvar:`PYTHONPATH` is still inserted in front of the standard path. An application that requires total control has to provide its own implementation of :c:func:`Py_GetPath`, diff --git a/Doc/c-api/iterator.rst b/Doc/c-api/iterator.rst index 3fcf099134d4dd4..6b7ba8c99791634 100644 --- a/Doc/c-api/iterator.rst +++ b/Doc/c-api/iterator.rst @@ -6,7 +6,7 @@ Iterator Objects ---------------- Python provides two general-purpose iterator objects. The first, a sequence -iterator, works with an arbitrary sequence supporting the :meth:`__getitem__` +iterator, works with an arbitrary sequence supporting the :meth:`~object.__getitem__` method. The second works with a callable object and a sentinel value, calling the callable for each item in the sequence, and ending the iteration when the sentinel value is returned. @@ -19,7 +19,7 @@ sentinel value is returned. types. -.. c:function:: int PySeqIter_Check(op) +.. c:function:: int PySeqIter_Check(PyObject *op) Return true if the type of *op* is :c:data:`PySeqIter_Type`. This function always succeeds. @@ -38,7 +38,7 @@ sentinel value is returned. two-argument form of the :func:`iter` built-in function. -.. c:function:: int PyCallIter_Check(op) +.. c:function:: int PyCallIter_Check(PyObject *op) Return true if the type of *op* is :c:data:`PyCallIter_Type`. This function always succeeds. diff --git a/Doc/c-api/list.rst b/Doc/c-api/list.rst index f9e65354a259f48..c8b64bad702f508 100644 --- a/Doc/c-api/list.rst +++ b/Doc/c-api/list.rst @@ -5,7 +5,7 @@ List Objects ------------ -.. index:: object: list +.. index:: pair: object; list .. c:type:: PyListObject @@ -45,7 +45,7 @@ List Objects .. c:function:: Py_ssize_t PyList_Size(PyObject *list) - .. index:: builtin: len + .. index:: pair: built-in function; len Return the length of the list object in *list*; this is equivalent to ``len(list)`` on a list object. @@ -86,6 +86,10 @@ List Objects Macro form of :c:func:`PyList_SetItem` without error checking. This is normally only used to fill in new lists where there is no previous content. + Bounds checking is performed as an assertion if Python is built in + :ref:`debug mode ` or :option:`with assertions + <--with-assertions>`. + .. note:: This macro "steals" a reference to *item*, and, unlike @@ -124,6 +128,30 @@ List Objects list is not supported. +.. c:function:: int PyList_Extend(PyObject *list, PyObject *iterable) + + Extend *list* with the contents of *iterable*. This is the same as + ``PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable)`` + and analogous to ``list.extend(iterable)`` or ``list += iterable``. + + Raise an exception and return ``-1`` if *list* is not a :class:`list` + object. Return 0 on success. + + .. versionadded:: 3.13 + + +.. c:function:: int PyList_Clear(PyObject *list) + + Remove all items from *list*. This is the same as + ``PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL)`` and analogous to + ``list.clear()`` or ``del list[:]``. + + Raise an exception and return ``-1`` if *list* is not a :class:`list` + object. Return 0 on success. + + .. versionadded:: 3.13 + + .. c:function:: int PyList_Sort(PyObject *list) Sort the items of *list* in place. Return ``0`` on success, ``-1`` on @@ -138,7 +166,7 @@ List Objects .. c:function:: PyObject* PyList_AsTuple(PyObject *list) - .. index:: builtin: tuple + .. index:: pair: built-in function; tuple Return a new tuple object containing the contents of *list*; equivalent to ``tuple(list)``. diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst index 4f6f865db8be13c..045604870d3c840 100644 --- a/Doc/c-api/long.rst +++ b/Doc/c-api/long.rst @@ -5,8 +5,8 @@ Integer Objects --------------- -.. index:: object: long integer - object: integer +.. index:: pair: object; long integer + pair: object; integer All integers are implemented as "long" integer objects of arbitrary size. @@ -94,6 +94,10 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. ignored. If there are no digits or *str* is not NULL-terminated following the digits and trailing whitespace, :exc:`ValueError` will be raised. + .. seealso:: Python methods :meth:`int.to_bytes` and :meth:`int.from_bytes` + to convert a :c:type:`PyLongObject` to/from an array of bytes in base + ``256``. You can call those from C using :c:func:`PyObject_CallMethod`. + .. c:function:: PyObject* PyLong_FromUnicodeObject(PyObject *u, int base) @@ -117,7 +121,7 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. single: OverflowError (built-in exception) Return a C :c:expr:`long` representation of *obj*. If *obj* is not an - instance of :c:type:`PyLongObject`, first call its :meth:`__index__` method + instance of :c:type:`PyLongObject`, first call its :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. Raise :exc:`OverflowError` if the value of *obj* is out of range for a @@ -126,30 +130,38 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. Returns ``-1`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. + + +.. c:function:: int PyLong_AsInt(PyObject *obj) + + Similar to :c:func:`PyLong_AsLong`, but store the result in a C + :c:expr:`int` instead of a C :c:expr:`long`. + + .. versionadded:: 3.13 .. c:function:: long PyLong_AsLongAndOverflow(PyObject *obj, int *overflow) Return a C :c:expr:`long` representation of *obj*. If *obj* is not an - instance of :c:type:`PyLongObject`, first call its :meth:`__index__` + instance of :c:type:`PyLongObject`, first call its :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. - If the value of *obj* is greater than :const:`LONG_MAX` or less than - :const:`LONG_MIN`, set *\*overflow* to ``1`` or ``-1``, respectively, and + If the value of *obj* is greater than :c:macro:`LONG_MAX` or less than + :c:macro:`LONG_MIN`, set *\*overflow* to ``1`` or ``-1``, respectively, and return ``-1``; otherwise, set *\*overflow* to ``0``. If any other exception occurs set *\*overflow* to ``0`` and return ``-1`` as usual. Returns ``-1`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. .. c:function:: long long PyLong_AsLongLong(PyObject *obj) @@ -158,7 +170,7 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. single: OverflowError (built-in exception) Return a C :c:expr:`long long` representation of *obj*. If *obj* is not an - instance of :c:type:`PyLongObject`, first call its :meth:`__index__` method + instance of :c:type:`PyLongObject`, first call its :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. Raise :exc:`OverflowError` if the value of *obj* is out of range for a @@ -167,20 +179,20 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. Returns ``-1`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. .. c:function:: long long PyLong_AsLongLongAndOverflow(PyObject *obj, int *overflow) Return a C :c:expr:`long long` representation of *obj*. If *obj* is not an - instance of :c:type:`PyLongObject`, first call its :meth:`__index__` method + instance of :c:type:`PyLongObject`, first call its :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. - If the value of *obj* is greater than :const:`LLONG_MAX` or less than - :const:`LLONG_MIN`, set *\*overflow* to ``1`` or ``-1``, respectively, + If the value of *obj* is greater than :c:macro:`LLONG_MAX` or less than + :c:macro:`LLONG_MIN`, set *\*overflow* to ``1`` or ``-1``, respectively, and return ``-1``; otherwise, set *\*overflow* to ``0``. If any other exception occurs set *\*overflow* to ``0`` and return ``-1`` as usual. @@ -189,10 +201,10 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. .. versionadded:: 3.2 .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. .. c:function:: Py_ssize_t PyLong_AsSsize_t(PyObject *pylong) @@ -263,7 +275,7 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. .. c:function:: unsigned long PyLong_AsUnsignedLongMask(PyObject *obj) Return a C :c:expr:`unsigned long` representation of *obj*. If *obj* is not - an instance of :c:type:`PyLongObject`, first call its :meth:`__index__` + an instance of :c:type:`PyLongObject`, first call its :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. If the value of *obj* is out of range for an :c:expr:`unsigned long`, @@ -273,17 +285,17 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. disambiguate. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. .. c:function:: unsigned long long PyLong_AsUnsignedLongLongMask(PyObject *obj) Return a C :c:expr:`unsigned long long` representation of *obj*. If *obj* is not an instance of :c:type:`PyLongObject`, first call its - :meth:`__index__` method (if present) to convert it to a + :meth:`~object.__index__` method (if present) to convert it to a :c:type:`PyLongObject`. If the value of *obj* is out of range for an :c:expr:`unsigned long long`, @@ -293,10 +305,10 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. to disambiguate. .. versionchanged:: 3.8 - Use :meth:`__index__` if available. + Use :meth:`~object.__index__` if available. .. versionchanged:: 3.10 - This function will no longer use :meth:`__int__`. + This function will no longer use :meth:`~object.__int__`. .. c:function:: double PyLong_AsDouble(PyObject *pylong) @@ -318,3 +330,27 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. with :c:func:`PyLong_FromVoidPtr`. Returns ``NULL`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. + + +.. c:function:: int PyUnstable_Long_IsCompact(const PyLongObject* op) + + Return 1 if *op* is compact, 0 otherwise. + + This function makes it possible for performance-critical code to implement + a “fast path†for small integers. For compact values use + :c:func:`PyUnstable_Long_CompactValue`; for others fall back to a + :c:func:`PyLong_As* ` function or + :c:func:`calling ` :meth:`int.to_bytes`. + + The speedup is expected to be negligible for most users. + + Exactly what values are considered compact is an implementation detail + and is subject to change. + +.. c:function:: Py_ssize_t PyUnstable_Long_CompactValue(const PyLongObject* op) + + If *op* is compact, as determined by :c:func:`PyUnstable_Long_IsCompact`, + return its value. + + Otherwise, the return value is undefined. + diff --git a/Doc/c-api/mapping.rst b/Doc/c-api/mapping.rst index 3c9d282c6d0ab02..1f55c0aa955c75b 100644 --- a/Doc/c-api/mapping.rst +++ b/Doc/c-api/mapping.rst @@ -13,14 +13,14 @@ See also :c:func:`PyObject_GetItem`, :c:func:`PyObject_SetItem` and Return ``1`` if the object provides the mapping protocol or supports slicing, and ``0`` otherwise. Note that it returns ``1`` for Python classes with - a :meth:`__getitem__` method, since in general it is impossible to + a :meth:`~object.__getitem__` method, since in general it is impossible to determine what type of keys the class supports. This function always succeeds. .. c:function:: Py_ssize_t PyMapping_Size(PyObject *o) Py_ssize_t PyMapping_Length(PyObject *o) - .. index:: builtin: len + .. index:: pair: built-in function; len Returns the number of keys in object *o* on success, and ``-1`` on failure. This is equivalent to the Python expression ``len(o)``. @@ -28,52 +28,100 @@ See also :c:func:`PyObject_GetItem`, :c:func:`PyObject_SetItem` and .. c:function:: PyObject* PyMapping_GetItemString(PyObject *o, const char *key) - Return element of *o* corresponding to the string *key* or ``NULL`` on failure. - This is the equivalent of the Python expression ``o[key]``. - See also :c:func:`PyObject_GetItem`. + This is the same as :c:func:`PyObject_GetItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + +.. c:function:: int PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result) + + Variant of :c:func:`PyObject_GetItem` which doesn't raise + :exc:`KeyError` if the key is not found. + + If the key is found, return ``1`` and set *\*result* to a new + :term:`strong reference` to the corresponding value. + If the key is not found, return ``0`` and set *\*result* to ``NULL``; + the :exc:`KeyError` is silenced. + If an error other than :exc:`KeyError` is raised, return ``-1`` and + set *\*result* to ``NULL``. + + .. versionadded:: 3.13 + + +.. c:function:: int PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result) + + This is the same as :c:func:`PyMapping_GetOptionalItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + .. versionadded:: 3.13 .. c:function:: int PyMapping_SetItemString(PyObject *o, const char *key, PyObject *v) - Map the string *key* to the value *v* in object *o*. Returns ``-1`` on - failure. This is the equivalent of the Python statement ``o[key] = v``. - See also :c:func:`PyObject_SetItem`. This function *does not* steal a - reference to *v*. + This is the same as :c:func:`PyObject_SetItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. .. c:function:: int PyMapping_DelItem(PyObject *o, PyObject *key) - Remove the mapping for the object *key* from the object *o*. Return ``-1`` - on failure. This is equivalent to the Python statement ``del o[key]``. This is an alias of :c:func:`PyObject_DelItem`. .. c:function:: int PyMapping_DelItemString(PyObject *o, const char *key) - Remove the mapping for the string *key* from the object *o*. Return ``-1`` - on failure. This is equivalent to the Python statement ``del o[key]``. + This is the same as :c:func:`PyObject_DelItem`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. -.. c:function:: int PyMapping_HasKey(PyObject *o, PyObject *key) +.. c:function:: int PyMapping_HasKeyWithError(PyObject *o, PyObject *key) Return ``1`` if the mapping object has the key *key* and ``0`` otherwise. This is equivalent to the Python expression ``key in o``. - This function always succeeds. + On failure, return ``-1``. - Note that exceptions which occur while calling the :meth:`__getitem__` - method will get suppressed. - To get error reporting use :c:func:`PyObject_GetItem()` instead. + .. versionadded:: 3.13 -.. c:function:: int PyMapping_HasKeyString(PyObject *o, const char *key) +.. c:function:: int PyMapping_HasKeyStringWithError(PyObject *o, const char *key) + + This is the same as :c:func:`PyMapping_HasKeyWithError`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + .. versionadded:: 3.13 + + +.. c:function:: int PyMapping_HasKey(PyObject *o, PyObject *key) Return ``1`` if the mapping object has the key *key* and ``0`` otherwise. This is equivalent to the Python expression ``key in o``. This function always succeeds. - Note that exceptions which occur while calling the :meth:`__getitem__` - method and creating a temporary string object will get suppressed. - To get error reporting use :c:func:`PyMapping_GetItemString()` instead. + .. note:: + + Exceptions which occur when this calls :meth:`~object.__getitem__` + method are silently ignored. + For proper error handling, use :c:func:`PyMapping_HasKeyWithError`, + :c:func:`PyMapping_GetOptionalItem` or :c:func:`PyObject_GetItem()` instead. + + +.. c:function:: int PyMapping_HasKeyString(PyObject *o, const char *key) + + This is the same as :c:func:`PyMapping_HasKey`, but *key* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + .. note:: + + Exceptions that occur when this calls :meth:`~object.__getitem__` + method or while creating the temporary :class:`str` + object are silently ignored. + For proper error handling, use :c:func:`PyMapping_HasKeyStringWithError`, + :c:func:`PyMapping_GetOptionalItemString` or + :c:func:`PyMapping_GetItemString` instead. .. c:function:: PyObject* PyMapping_Keys(PyObject *o) diff --git a/Doc/c-api/marshal.rst b/Doc/c-api/marshal.rst index 8e25968c6909fd6..489f1580a414b2d 100644 --- a/Doc/c-api/marshal.rst +++ b/Doc/c-api/marshal.rst @@ -25,12 +25,16 @@ unmarshalling. Version 2 uses a binary format for floating point numbers. the least-significant 32 bits of *value*; regardless of the size of the native :c:expr:`long` type. *version* indicates the file format. + This function can fail, in which case it sets the error indicator. + Use :c:func:`PyErr_Occurred` to check for that. .. c:function:: void PyMarshal_WriteObjectToFile(PyObject *value, FILE *file, int version) Marshal a Python object, *value*, to *file*. *version* indicates the file format. + This function can fail, in which case it sets the error indicator. + Use :c:func:`PyErr_Occurred` to check for that. .. c:function:: PyObject* PyMarshal_WriteObjectToString(PyObject *value, int version) diff --git a/Doc/c-api/memory.rst b/Doc/c-api/memory.rst index 7041c15d23fb839..1f392e55078e775 100644 --- a/Doc/c-api/memory.rst +++ b/Doc/c-api/memory.rst @@ -136,7 +136,7 @@ need to be held. The :ref:`default raw memory allocator ` uses the following functions: :c:func:`malloc`, :c:func:`calloc`, :c:func:`realloc` -and :c:func:`free`; call ``malloc(1)`` (or ``calloc(1, 1)``) when requesting +and :c:func:`!free`; call ``malloc(1)`` (or ``calloc(1, 1)``) when requesting zero bytes. .. versionadded:: 3.4 @@ -264,14 +264,14 @@ The following type-oriented macros are provided for convenience. Note that *TYPE* refers to any C type. -.. c:function:: TYPE* PyMem_New(TYPE, size_t n) +.. c:macro:: PyMem_New(TYPE, n) Same as :c:func:`PyMem_Malloc`, but allocates ``(n * sizeof(TYPE))`` bytes of memory. Returns a pointer cast to :c:expr:`TYPE*`. The memory will not have been initialized in any way. -.. c:function:: TYPE* PyMem_Resize(void *p, TYPE, size_t n) +.. c:macro:: PyMem_Resize(p, TYPE, n) Same as :c:func:`PyMem_Realloc`, but the memory block is resized to ``(n * sizeof(TYPE))`` bytes. Returns a pointer cast to :c:expr:`TYPE*`. On return, @@ -391,6 +391,8 @@ Legend: * ``malloc``: system allocators from the standard C library, C functions: :c:func:`malloc`, :c:func:`calloc`, :c:func:`realloc` and :c:func:`free`. * ``pymalloc``: :ref:`pymalloc memory allocator `. +* ``mimalloc``: :ref:`mimalloc memory allocator `. The pymalloc + allocator will be used if mimalloc support isn't available. * "+ debug": with :ref:`debug hooks on the Python memory allocators `. * "Debug build": :ref:`Python build in debug mode `. @@ -423,7 +425,7 @@ Customize Memory Allocators +----------------------------------------------------------+---------------------------------------+ .. versionchanged:: 3.5 - The :c:type:`PyMemAllocator` structure was renamed to + The :c:type:`!PyMemAllocator` structure was renamed to :c:type:`PyMemAllocatorEx` and a new ``calloc`` field was added. @@ -431,6 +433,8 @@ Customize Memory Allocators Enum used to identify an allocator domain. Domains: + .. c:namespace:: NULL + .. c:macro:: PYMEM_DOMAIN_RAW Functions: @@ -470,10 +474,14 @@ Customize Memory Allocators The new allocator must return a distinct non-``NULL`` pointer when requesting zero bytes. - For the :c:data:`PYMEM_DOMAIN_RAW` domain, the allocator must be + For the :c:macro:`PYMEM_DOMAIN_RAW` domain, the allocator must be thread-safe: the :term:`GIL ` is not held when the allocator is called. + For the remaining domains, the allocator must also be thread-safe: + the allocator may be called in different interpreters that do not + share a ``GIL``. + If the new allocator is not a hook (does not call the previous allocator), the :c:func:`PyMem_SetupDebugHooks` function must be called to reinstall the debug hooks on top on the new allocator. @@ -485,19 +493,21 @@ Customize Memory Allocators :c:func:`PyMem_SetAllocator` does have the following contract: - * It can be called after :c:func:`Py_PreInitialize` and before - :c:func:`Py_InitializeFromConfig` to install a custom memory - allocator. There are no restrictions over the installed allocator - other than the ones imposed by the domain (for instance, the Raw - Domain allows the allocator to be called without the GIL held). See - :ref:`the section on allocator domains ` for more - information. + * It can be called after :c:func:`Py_PreInitialize` and before + :c:func:`Py_InitializeFromConfig` to install a custom memory + allocator. There are no restrictions over the installed allocator + other than the ones imposed by the domain (for instance, the Raw + Domain allows the allocator to be called without the GIL held). See + :ref:`the section on allocator domains ` for more + information. - * If called after Python has finish initializing (after - :c:func:`Py_InitializeFromConfig` has been called) the allocator - **must** wrap the existing allocator. Substituting the current - allocator for some other arbitrary one is **not supported**. + * If called after Python has finish initializing (after + :c:func:`Py_InitializeFromConfig` has been called) the allocator + **must** wrap the existing allocator. Substituting the current + allocator for some other arbitrary one is **not supported**. + .. versionchanged:: 3.12 + All allocators must be thread-safe. .. c:function:: void PyMem_SetupDebugHooks(void) @@ -536,8 +546,8 @@ Runtime checks: - Detect write before the start of the buffer (buffer underflow). - Detect write after the end of the buffer (buffer overflow). - Check that the :term:`GIL ` is held when - allocator functions of :c:data:`PYMEM_DOMAIN_OBJ` (ex: - :c:func:`PyObject_Malloc`) and :c:data:`PYMEM_DOMAIN_MEM` (ex: + allocator functions of :c:macro:`PYMEM_DOMAIN_OBJ` (ex: + :c:func:`PyObject_Malloc`) and :c:macro:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) domains are called. On error, the debug hooks use the :mod:`tracemalloc` module to get the @@ -557,9 +567,9 @@ that the treatment of negative indices differs from a Python slice): ``p[-S]`` API identifier (ASCII character): - * ``'r'`` for :c:data:`PYMEM_DOMAIN_RAW`. - * ``'m'`` for :c:data:`PYMEM_DOMAIN_MEM`. - * ``'o'`` for :c:data:`PYMEM_DOMAIN_OBJ`. + * ``'r'`` for :c:macro:`PYMEM_DOMAIN_RAW`. + * ``'m'`` for :c:macro:`PYMEM_DOMAIN_MEM`. + * ``'o'`` for :c:macro:`PYMEM_DOMAIN_OBJ`. ``p[-S+1:0]`` Copies of PYMEM_FORBIDDENBYTE. Used to catch under- writes and reads. @@ -581,7 +591,7 @@ that the treatment of negative indices differs from a Python slice): default). A serial number, incremented by 1 on each call to a malloc-like or - realloc-like function. Big-endian ``size_t``. If "bad memory" is detected + realloc-like function. Big-endian :c:type:`size_t`. If "bad memory" is detected later, the serial number gives an excellent way to set a breakpoint on the next run, to capture the instant at which this block was passed out. The static function bumpserialno() in obmalloc.c is the only place the serial @@ -601,7 +611,7 @@ PYMEM_CLEANBYTE (meaning uninitialized memory is getting used). compiled in release mode. On error, the debug hooks now use :mod:`tracemalloc` to get the traceback where a memory block was allocated. The debug hooks now also check if the GIL is held when functions of - :c:data:`PYMEM_DOMAIN_OBJ` and :c:data:`PYMEM_DOMAIN_MEM` domains are + :c:macro:`PYMEM_DOMAIN_OBJ` and :c:macro:`PYMEM_DOMAIN_MEM` domains are called. .. versionchanged:: 3.8 @@ -618,17 +628,18 @@ The pymalloc allocator Python has a *pymalloc* allocator optimized for small objects (smaller or equal to 512 bytes) with a short lifetime. It uses memory mappings called "arenas" -with a fixed size of 256 KiB. It falls back to :c:func:`PyMem_RawMalloc` and +with a fixed size of either 256 KiB on 32-bit platforms or 1 MiB on 64-bit +platforms. It falls back to :c:func:`PyMem_RawMalloc` and :c:func:`PyMem_RawRealloc` for allocations larger than 512 bytes. *pymalloc* is the :ref:`default allocator ` of the -:c:data:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) and -:c:data:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) domains. +:c:macro:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) and +:c:macro:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) domains. The arena allocator uses the following functions: -* :c:func:`VirtualAlloc` and :c:func:`VirtualFree` on Windows, -* :c:func:`mmap` and :c:func:`munmap` if available, +* :c:func:`!VirtualAlloc` and :c:func:`!VirtualFree` on Windows, +* :c:func:`!mmap` and :c:func:`!munmap` if available, * :c:func:`malloc` and :c:func:`free` otherwise. This allocator is disabled if Python is configured with the @@ -663,6 +674,16 @@ Customize pymalloc Arena Allocator Set the arena allocator. +.. _mimalloc: + +The mimalloc allocator +====================== + +.. versionadded:: 3.13 + +Python supports the mimalloc allocator when the underlying platform support is available. +mimalloc "is a general purpose allocator with excellent performance characteristics. +Initially developed by Daan Leijen for the runtime systems of the Koka and Lean languages." tracemalloc C API ================= @@ -732,8 +753,8 @@ allocators operating on different heaps. :: free(buf1); /* Fatal -- should be PyMem_Del() */ In addition to the functions aimed at handling raw memory blocks from the Python -heap, objects in Python are allocated and released with :c:func:`PyObject_New`, -:c:func:`PyObject_NewVar` and :c:func:`PyObject_Del`. +heap, objects in Python are allocated and released with :c:macro:`PyObject_New`, +:c:macro:`PyObject_NewVar` and :c:func:`PyObject_Del`. These will be explained in the next chapter on defining and implementing new object types in C. diff --git a/Doc/c-api/memoryview.rst b/Doc/c-api/memoryview.rst index ebd5c7760437bf3..2aa43318e7a455c 100644 --- a/Doc/c-api/memoryview.rst +++ b/Doc/c-api/memoryview.rst @@ -3,7 +3,7 @@ .. _memoryview-objects: .. index:: - object: memoryview + pair: object; memoryview MemoryView objects ------------------ diff --git a/Doc/c-api/method.rst b/Doc/c-api/method.rst index 6e7e1e21aa93f29..0d75ab8e1af1118 100644 --- a/Doc/c-api/method.rst +++ b/Doc/c-api/method.rst @@ -5,10 +5,10 @@ Instance Method Objects ----------------------- -.. index:: object: instancemethod +.. index:: pair: object; instancemethod -An instance method is a wrapper for a :c:data:`PyCFunction` and the new way -to bind a :c:data:`PyCFunction` to a class object. It replaces the former call +An instance method is a wrapper for a :c:type:`PyCFunction` and the new way +to bind a :c:type:`PyCFunction` to a class object. It replaces the former call ``PyMethod_New(func, NULL, class)``. @@ -47,7 +47,7 @@ to bind a :c:data:`PyCFunction` to a class object. It replaces the former call Method Objects -------------- -.. index:: object: method +.. index:: pair: object; method Methods are bound function objects. Methods are always bound to an instance of a user-defined class. Unbound methods (methods bound to a class object) are diff --git a/Doc/c-api/module.rst b/Doc/c-api/module.rst index e2ba157b32c7d99..979b22261efa3b3 100644 --- a/Doc/c-api/module.rst +++ b/Doc/c-api/module.rst @@ -5,7 +5,7 @@ Module Objects -------------- -.. index:: object: module +.. index:: pair: object; module .. c:var:: PyTypeObject PyModule_Type @@ -119,7 +119,7 @@ Module Objects encoded to 'utf-8'. .. deprecated:: 3.2 - :c:func:`PyModule_GetFilename` raises :c:type:`UnicodeEncodeError` on + :c:func:`PyModule_GetFilename` raises :exc:`UnicodeEncodeError` on unencodable filenames, use :c:func:`PyModule_GetFilenameObject` instead. @@ -145,7 +145,7 @@ or request "multi-phase initialization" by returning the definition struct itsel .. c:member:: PyModuleDef_Base m_base - Always initialize this member to :const:`PyModuleDef_HEAD_INIT`. + Always initialize this member to :c:macro:`PyModuleDef_HEAD_INIT`. .. c:member:: const char *m_name @@ -164,7 +164,7 @@ or request "multi-phase initialization" by returning the definition struct itsel This memory area is allocated based on *m_size* on module creation, and freed when the module object is deallocated, after the - :c:member:`m_free` function has been called, if present. + :c:member:`~PyModuleDef.m_free` function has been called, if present. Setting ``m_size`` to ``-1`` means that the module does not support sub-interpreters, because it has global state. @@ -202,7 +202,7 @@ or request "multi-phase initialization" by returning the definition struct itsel This function is not called if the module state was requested but is not allocated yet. This is the case immediately after the module is created and before the module is executed (:c:data:`Py_mod_exec` function). More - precisely, this function is not called if :c:member:`m_size` is greater + precisely, this function is not called if :c:member:`~PyModuleDef.m_size` is greater than 0 and the module state (as returned by :c:func:`PyModule_GetState`) is ``NULL``. @@ -217,7 +217,7 @@ or request "multi-phase initialization" by returning the definition struct itsel This function is not called if the module state was requested but is not allocated yet. This is the case immediately after the module is created and before the module is executed (:c:data:`Py_mod_exec` function). More - precisely, this function is not called if :c:member:`m_size` is greater + precisely, this function is not called if :c:member:`~PyModuleDef.m_size` is greater than 0 and the module state (as returned by :c:func:`PyModule_GetState`) is ``NULL``. @@ -238,7 +238,7 @@ or request "multi-phase initialization" by returning the definition struct itsel This function is not called if the module state was requested but is not allocated yet. This is the case immediately after the module is created and before the module is executed (:c:data:`Py_mod_exec` function). More - precisely, this function is not called if :c:member:`m_size` is greater + precisely, this function is not called if :c:member:`~PyModuleDef.m_size` is greater than 0 and the module state (as returned by :c:func:`PyModule_GetState`) is ``NULL``. @@ -256,7 +256,7 @@ of the following two module creation functions: Create a new module object, given the definition in *def*. This behaves like :c:func:`PyModule_Create2` with *module_api_version* set to - :const:`PYTHON_API_VERSION`. + :c:macro:`PYTHON_API_VERSION`. .. c:function:: PyObject* PyModule_Create2(PyModuleDef *def, int module_api_version) @@ -282,7 +282,7 @@ An alternate way to specify extensions is to request "multi-phase initialization Extension modules created this way behave more like Python modules: the initialization is split between the *creation phase*, when the module object is created, and the *execution phase*, when it is populated. -The distinction is similar to the :py:meth:`__new__` and :py:meth:`__init__` methods +The distinction is similar to the :py:meth:`!__new__` and :py:meth:`!__init__` methods of classes. Unlike modules created using single-phase initialization, these modules are not @@ -293,7 +293,7 @@ By default, multiple modules created from the same definition should be independent: changes to one should not affect the others. This means that all state should be specific to the module object (using e.g. using :c:func:`PyModule_GetState`), or its contents (such as the module's -:attr:`__dict__` or individual classes created with :c:func:`PyType_FromSpec`). +:attr:`~object.__dict__` or individual classes created with :c:func:`PyType_FromSpec`). All modules created using multi-phase initialization are expected to support :ref:`sub-interpreters `. Making sure multiple modules @@ -338,6 +338,7 @@ The available slot types are: The *value* pointer of this slot must point to a function of the signature: .. c:function:: PyObject* create_module(PyObject *spec, PyModuleDef *def) + :noindex: The function receives a :py:class:`~importlib.machinery.ModuleSpec` instance, as defined in :PEP:`451`, and the module definition. @@ -372,10 +373,44 @@ The available slot types are: The signature of the function is: .. c:function:: int exec_module(PyObject* module) + :noindex: If multiple ``Py_mod_exec`` slots are specified, they are processed in the order they appear in the *m_slots* array. +.. c:macro:: Py_mod_multiple_interpreters + + Specifies one of the following values: + + .. c:namespace:: NULL + + .. c:macro:: Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED + + The module does not support being imported in subinterpreters. + + .. c:macro:: Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED + + The module supports being imported in subinterpreters, + but only when they share the main interpreter's GIL. + (See :ref:`isolating-extensions-howto`.) + + .. c:macro:: Py_MOD_PER_INTERPRETER_GIL_SUPPORTED + + The module supports being imported in subinterpreters, + even when they have their own GIL. + (See :ref:`isolating-extensions-howto`.) + + This slot determines whether or not importing this module + in a subinterpreter will fail. + + Multiple ``Py_mod_multiple_interpreters`` slots may not be specified + in one module definition. + + If ``Py_mod_multiple_interpreters`` is not specified, the import + machinery defaults to ``Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED``. + + .. versionadded:: 3.12 + See :PEP:`489` for more details on multi-phase initialization. Low-level module creation functions @@ -388,15 +423,15 @@ objects dynamically. Note that both ``PyModule_FromDefAndSpec`` and .. c:function:: PyObject * PyModule_FromDefAndSpec(PyModuleDef *def, PyObject *spec) - Create a new module object, given the definition in *module* and the + Create a new module object, given the definition in *def* and the ModuleSpec *spec*. This behaves like :c:func:`PyModule_FromDefAndSpec2` - with *module_api_version* set to :const:`PYTHON_API_VERSION`. + with *module_api_version* set to :c:macro:`PYTHON_API_VERSION`. .. versionadded:: 3.5 .. c:function:: PyObject * PyModule_FromDefAndSpec2(PyModuleDef *def, PyObject *spec, int module_api_version) - Create a new module object, given the definition in *module* and the + Create a new module object, given the definition in *def* and the ModuleSpec *spec*, assuming the API version *module_api_version*. If that version does not match the version of the running interpreter, a :exc:`RuntimeWarning` is emitted. @@ -486,59 +521,56 @@ state: .. versionadded:: 3.10 +.. c:function:: int PyModule_Add(PyObject *module, const char *name, PyObject *value) + + Similar to :c:func:`PyModule_AddObjectRef`, but "steals" a reference + to *value*. + It can be called with a result of function that returns a new reference + without bothering to check its result or even saving it to a variable. + + Example usage:: + + if (PyModule_Add(module, "spam", PyBytes_FromString(value)) < 0) { + goto error; + } + + .. versionadded:: 3.13 + + .. c:function:: int PyModule_AddObject(PyObject *module, const char *name, PyObject *value) Similar to :c:func:`PyModule_AddObjectRef`, but steals a reference to *value* on success (if it returns ``0``). - The new :c:func:`PyModule_AddObjectRef` function is recommended, since it is + The new :c:func:`PyModule_Add` or :c:func:`PyModule_AddObjectRef` + functions are recommended, since it is easy to introduce reference leaks by misusing the :c:func:`PyModule_AddObject` function. .. note:: Unlike other functions that steal references, ``PyModule_AddObject()`` - only decrements the reference count of *value* **on success**. + only releases the reference to *value* **on success**. This means that its return value must be checked, and calling code must - :c:func:`Py_DECREF` *value* manually on error. + :c:func:`Py_XDECREF` *value* manually on error. Example usage:: - static int - add_spam(PyObject *module, int value) - { - PyObject *obj = PyLong_FromLong(value); - if (obj == NULL) { - return -1; - } - if (PyModule_AddObject(module, "spam", obj) < 0) { - Py_DECREF(obj); - return -1; - } - // PyModule_AddObject() stole a reference to obj: - // Py_DECREF(obj) is not needed here - return 0; - } - - The example can also be written without checking explicitly if *obj* is - ``NULL``:: + PyObject *obj = PyBytes_FromString(value); + if (PyModule_AddObject(module, "spam", obj) < 0) { + // If 'obj' is not NULL and PyModule_AddObject() failed, + // 'obj' strong reference must be deleted with Py_XDECREF(). + // If 'obj' is NULL, Py_XDECREF() does nothing. + Py_XDECREF(obj); + goto error; + } + // PyModule_AddObject() stole a reference to obj: + // Py_XDECREF(obj) is not needed here. - static int - add_spam(PyObject *module, int value) - { - PyObject *obj = PyLong_FromLong(value); - if (PyModule_AddObject(module, "spam", obj) < 0) { - Py_XDECREF(obj); - return -1; - } - // PyModule_AddObject() stole a reference to obj: - // Py_DECREF(obj) is not needed here - return 0; - } + .. deprecated:: 3.13 - Note that ``Py_XDECREF()`` should be used instead of ``Py_DECREF()`` in - this case, since *obj* can be ``NULL``. + :c:func:`PyModule_AddObject` is :term:`soft deprecated`. .. c:function:: int PyModule_AddIntConstant(PyObject *module, const char *name, long value) @@ -555,7 +587,7 @@ state: ``NULL``-terminated. Return ``-1`` on error, ``0`` on success. -.. c:function:: int PyModule_AddIntMacro(PyObject *module, macro) +.. c:macro:: PyModule_AddIntMacro(module, macro) Add an int constant to *module*. The name and the value are taken from *macro*. For example ``PyModule_AddIntMacro(module, AF_INET)`` adds the int @@ -563,7 +595,7 @@ state: Return ``-1`` on error, ``0`` on success. -.. c:function:: int PyModule_AddStringMacro(PyObject *module, macro) +.. c:macro:: PyModule_AddStringMacro(module, macro) Add a string constant to *module*. diff --git a/Doc/c-api/none.rst b/Doc/c-api/none.rst index 26d2b7aab201baf..f1941fc4bc4e850 100644 --- a/Doc/c-api/none.rst +++ b/Doc/c-api/none.rst @@ -5,22 +5,22 @@ The ``None`` Object ------------------- -.. index:: object: None +.. index:: pair: object; None Note that the :c:type:`PyTypeObject` for ``None`` is not directly exposed in the Python/C API. Since ``None`` is a singleton, testing for object identity (using -``==`` in C) is sufficient. There is no :c:func:`PyNone_Check` function for the +``==`` in C) is sufficient. There is no :c:func:`!PyNone_Check` function for the same reason. .. c:var:: PyObject* Py_None - The Python ``None`` object, denoting lack of value. This object has no methods. - It needs to be treated just like any other object with respect to reference - counts. + The Python ``None`` object, denoting lack of value. This object has no methods + and is :term:`immortal`. + .. versionchanged:: 3.12 + :c:data:`Py_None` is :term:`immortal`. .. c:macro:: Py_RETURN_NONE - Properly handle returning :c:data:`Py_None` from within a C function (that is, - increment the reference count of ``None`` and return it.) + Return :c:data:`Py_None` from a function. diff --git a/Doc/c-api/number.rst b/Doc/c-api/number.rst index 70b91f8c2d0ca17..13d3c5af956905a 100644 --- a/Doc/c-api/number.rst +++ b/Doc/c-api/number.rst @@ -64,7 +64,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_Divmod(PyObject *o1, PyObject *o2) - .. index:: builtin: divmod + .. index:: pair: built-in function; divmod See the built-in function :func:`divmod`. Returns ``NULL`` on failure. This is the equivalent of the Python expression ``divmod(o1, o2)``. @@ -72,7 +72,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_Power(PyObject *o1, PyObject *o2, PyObject *o3) - .. index:: builtin: pow + .. index:: pair: built-in function; pow See the built-in function :func:`pow`. Returns ``NULL`` on failure. This is the equivalent of the Python expression ``pow(o1, o2, o3)``, where *o3* is optional. @@ -94,7 +94,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_Absolute(PyObject *o) - .. index:: builtin: abs + .. index:: pair: built-in function; abs Returns the absolute value of *o*, or ``NULL`` on failure. This is the equivalent of the Python expression ``abs(o)``. @@ -192,7 +192,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_InPlacePower(PyObject *o1, PyObject *o2, PyObject *o3) - .. index:: builtin: pow + .. index:: pair: built-in function; pow See the built-in function :func:`pow`. Returns ``NULL`` on failure. The operation is done *in-place* when *o1* supports it. This is the equivalent of the Python @@ -238,7 +238,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_Long(PyObject *o) - .. index:: builtin: int + .. index:: pair: built-in function; int Returns the *o* converted to an integer object on success, or ``NULL`` on failure. This is the equivalent of the Python expression ``int(o)``. @@ -246,7 +246,7 @@ Number Protocol .. c:function:: PyObject* PyNumber_Float(PyObject *o) - .. index:: builtin: float + .. index:: pair: built-in function; float Returns the *o* converted to a float object on success, or ``NULL`` on failure. This is the equivalent of the Python expression ``float(o)``. diff --git a/Doc/c-api/objbuffer.rst b/Doc/c-api/objbuffer.rst deleted file mode 100644 index 6b82a642d7ee427..000000000000000 --- a/Doc/c-api/objbuffer.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. highlight:: c - -Old Buffer Protocol -------------------- - -.. deprecated:: 3.0 - -These functions were part of the "old buffer protocol" API in Python 2. -In Python 3, this protocol doesn't exist anymore but the functions are still -exposed to ease porting 2.x code. They act as a compatibility wrapper -around the :ref:`new buffer protocol `, but they don't give -you control over the lifetime of the resources acquired when a buffer is -exported. - -Therefore, it is recommended that you call :c:func:`PyObject_GetBuffer` -(or the ``y*`` or ``w*`` :ref:`format codes ` with the -:c:func:`PyArg_ParseTuple` family of functions) to get a buffer view over -an object, and :c:func:`PyBuffer_Release` when the buffer view can be released. - - -.. c:function:: int PyObject_AsCharBuffer(PyObject *obj, const char **buffer, Py_ssize_t *buffer_len) - - Returns a pointer to a read-only memory location usable as character-based - input. The *obj* argument must support the single-segment character buffer - interface. On success, returns ``0``, sets *buffer* to the memory location - and *buffer_len* to the buffer length. Returns ``-1`` and sets a - :exc:`TypeError` on error. - - -.. c:function:: int PyObject_AsReadBuffer(PyObject *obj, const void **buffer, Py_ssize_t *buffer_len) - - Returns a pointer to a read-only memory location containing arbitrary data. - The *obj* argument must support the single-segment readable buffer - interface. On success, returns ``0``, sets *buffer* to the memory location - and *buffer_len* to the buffer length. Returns ``-1`` and sets a - :exc:`TypeError` on error. - - -.. c:function:: int PyObject_CheckReadBuffer(PyObject *o) - - Returns ``1`` if *o* supports the single-segment readable buffer interface. - Otherwise returns ``0``. This function always succeeds. - - Note that this function tries to get and release a buffer, and exceptions - which occur while calling corresponding functions will get suppressed. - To get error reporting use :c:func:`PyObject_GetBuffer()` instead. - - -.. c:function:: int PyObject_AsWriteBuffer(PyObject *obj, void **buffer, Py_ssize_t *buffer_len) - - Returns a pointer to a writable memory location. The *obj* argument must - support the single-segment, character buffer interface. On success, - returns ``0``, sets *buffer* to the memory location and *buffer_len* to the - buffer length. Returns ``-1`` and sets a :exc:`TypeError` on error. - diff --git a/Doc/c-api/object.rst b/Doc/c-api/object.rst index 5a25a2b6c9d3dbb..a4e3e74861a3152 100644 --- a/Doc/c-api/object.rst +++ b/Doc/c-api/object.rst @@ -15,39 +15,64 @@ Object Protocol .. c:macro:: Py_RETURN_NOTIMPLEMENTED Properly handle returning :c:data:`Py_NotImplemented` from within a C - function (that is, increment the reference count of NotImplemented and - return it). + function (that is, create a new :term:`strong reference` + to NotImplemented and return it). .. c:function:: int PyObject_Print(PyObject *o, FILE *fp, int flags) Print an object *o*, on file *fp*. Returns ``-1`` on error. The flags argument is used to enable certain printing options. The only option currently supported - is :const:`Py_PRINT_RAW`; if given, the :func:`str` of the object is written + is :c:macro:`Py_PRINT_RAW`; if given, the :func:`str` of the object is written instead of the :func:`repr`. +.. c:function:: int PyObject_HasAttrWithError(PyObject *o, const char *attr_name) + + Returns ``1`` if *o* has the attribute *attr_name*, and ``0`` otherwise. + This is equivalent to the Python expression ``hasattr(o, attr_name)``. + On failure, return ``-1``. + + .. versionadded:: 3.13 + + +.. c:function:: int PyObject_HasAttrStringWithError(PyObject *o, const char *attr_name) + + This is the same as :c:func:`PyObject_HasAttrWithError`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + .. versionadded:: 3.13 + + .. c:function:: int PyObject_HasAttr(PyObject *o, PyObject *attr_name) Returns ``1`` if *o* has the attribute *attr_name*, and ``0`` otherwise. This is equivalent to the Python expression ``hasattr(o, attr_name)``. This function always succeeds. - Note that exceptions which occur while calling :meth:`__getattr__` and - :meth:`__getattribute__` methods will get suppressed. - To get error reporting use :c:func:`PyObject_GetAttr()` instead. + .. note:: + + Exceptions that occur when this calls :meth:`~object.__getattr__` and + :meth:`~object.__getattribute__` methods are silently ignored. + For proper error handling, use :c:func:`PyObject_HasAttrWithError`, + :c:func:`PyObject_GetOptionalAttr` or :c:func:`PyObject_GetAttr` instead. .. c:function:: int PyObject_HasAttrString(PyObject *o, const char *attr_name) - Returns ``1`` if *o* has the attribute *attr_name*, and ``0`` otherwise. This - is equivalent to the Python expression ``hasattr(o, attr_name)``. This function - always succeeds. + This is the same as :c:func:`PyObject_HasAttr`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. - Note that exceptions which occur while calling :meth:`__getattr__` and - :meth:`__getattribute__` methods and creating a temporary string object - will get suppressed. - To get error reporting use :c:func:`PyObject_GetAttrString()` instead. + .. note:: + + Exceptions that occur when this calls :meth:`~object.__getattr__` and + :meth:`~object.__getattribute__` methods or while creating the temporary + :class:`str` object are silently ignored. + For proper error handling, use :c:func:`PyObject_HasAttrStringWithError`, + :c:func:`PyObject_GetOptionalAttrString` + or :c:func:`PyObject_GetAttrString` instead. .. c:function:: PyObject* PyObject_GetAttr(PyObject *o, PyObject *attr_name) @@ -56,14 +81,43 @@ Object Protocol value on success, or ``NULL`` on failure. This is the equivalent of the Python expression ``o.attr_name``. + If the missing attribute should not be treated as a failure, you can use + :c:func:`PyObject_GetOptionalAttr` instead. + .. c:function:: PyObject* PyObject_GetAttrString(PyObject *o, const char *attr_name) - Retrieve an attribute named *attr_name* from object *o*. Returns the attribute - value on success, or ``NULL`` on failure. This is the equivalent of the Python - expression ``o.attr_name``. + This is the same as :c:func:`PyObject_GetAttr`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + If the missing attribute should not be treated as a failure, you can use + :c:func:`PyObject_GetOptionalAttrString` instead. + + +.. c:function:: int PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result); + + Variant of :c:func:`PyObject_GetAttr` which doesn't raise + :exc:`AttributeError` if the attribute is not found. + + If the attribute is found, return ``1`` and set *\*result* to a new + :term:`strong reference` to the attribute. + If the attribute is not found, return ``0`` and set *\*result* to ``NULL``; + the :exc:`AttributeError` is silenced. + If an error other than :exc:`AttributeError` is raised, return ``-1`` and + set *\*result* to ``NULL``. + + .. versionadded:: 3.13 +.. c:function:: int PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result); + + This is the same as :c:func:`PyObject_GetOptionalAttr`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. + + .. versionadded:: 3.13 + .. c:function:: PyObject* PyObject_GenericGetAttr(PyObject *o, PyObject *name) Generic attribute getter function that is meant to be put into a type @@ -88,10 +142,9 @@ Object Protocol .. c:function:: int PyObject_SetAttrString(PyObject *o, const char *attr_name, PyObject *v) - Set the value of the attribute named *attr_name*, for object *o*, to the value - *v*. Raise an exception and return ``-1`` on failure; - return ``0`` on success. This is the equivalent of the Python statement - ``o.attr_name = v``. + This is the same as :c:func:`PyObject_SetAttr`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. If *v* is ``NULL``, the attribute is deleted, but this feature is deprecated in favour of using :c:func:`PyObject_DelAttrString`. @@ -117,8 +170,9 @@ Object Protocol .. c:function:: int PyObject_DelAttrString(PyObject *o, const char *attr_name) - Delete attribute named *attr_name*, for object *o*. Returns ``-1`` on failure. - This is the equivalent of the Python statement ``del o.attr_name``. + This is the same as :c:func:`PyObject_DelAttr`, but *attr_name* is + specified as a :c:expr:`const char*` UTF-8 encoded bytes string, + rather than a :c:expr:`PyObject*`. .. c:function:: PyObject* PyObject_GenericGetDict(PyObject *o, void *context) @@ -158,8 +212,8 @@ Object Protocol .. c:function:: PyObject* PyObject_RichCompare(PyObject *o1, PyObject *o2, int opid) Compare the values of *o1* and *o2* using the operation specified by *opid*, - which must be one of :const:`Py_LT`, :const:`Py_LE`, :const:`Py_EQ`, - :const:`Py_NE`, :const:`Py_GT`, or :const:`Py_GE`, corresponding to ``<``, + which must be one of :c:macro:`Py_LT`, :c:macro:`Py_LE`, :c:macro:`Py_EQ`, + :c:macro:`Py_NE`, :c:macro:`Py_GT`, or :c:macro:`Py_GE`, corresponding to ``<``, ``<=``, ``==``, ``!=``, ``>``, or ``>=`` respectively. This is the equivalent of the Python expression ``o1 op o2``, where ``op`` is the operator corresponding to *opid*. Returns the value of the comparison on success, or ``NULL`` on failure. @@ -168,8 +222,8 @@ Object Protocol .. c:function:: int PyObject_RichCompareBool(PyObject *o1, PyObject *o2, int opid) Compare the values of *o1* and *o2* using the operation specified by *opid*, - which must be one of :const:`Py_LT`, :const:`Py_LE`, :const:`Py_EQ`, - :const:`Py_NE`, :const:`Py_GT`, or :const:`Py_GE`, corresponding to ``<``, + which must be one of :c:macro:`Py_LT`, :c:macro:`Py_LE`, :c:macro:`Py_EQ`, + :c:macro:`Py_NE`, :c:macro:`Py_GT`, or :c:macro:`Py_GE`, corresponding to ``<``, ``<=``, ``==``, ``!=``, ``>``, or ``>=`` respectively. Returns ``-1`` on error, ``0`` if the result is false, ``1`` otherwise. This is the equivalent of the Python expression ``o1 op o2``, where ``op`` is the operator corresponding to @@ -177,11 +231,20 @@ Object Protocol .. note:: If *o1* and *o2* are the same object, :c:func:`PyObject_RichCompareBool` - will always return ``1`` for :const:`Py_EQ` and ``0`` for :const:`Py_NE`. + will always return ``1`` for :c:macro:`Py_EQ` and ``0`` for :c:macro:`Py_NE`. + +.. c:function:: PyObject* PyObject_Format(PyObject *obj, PyObject *format_spec) + + Format *obj* using *format_spec*. This is equivalent to the Python + expression ``format(obj, format_spec)``. + + *format_spec* may be ``NULL``. In this case the call is equivalent + to ``format(obj)``. + Returns the formatted string on success, ``NULL`` on failure. .. c:function:: PyObject* PyObject_Repr(PyObject *o) - .. index:: builtin: repr + .. index:: pair: built-in function; repr Compute a string representation of object *o*. Returns the string representation on success, ``NULL`` on failure. This is the equivalent of the @@ -193,7 +256,7 @@ Object Protocol .. c:function:: PyObject* PyObject_ASCII(PyObject *o) - .. index:: builtin: ascii + .. index:: pair: built-in function; ascii As :c:func:`PyObject_Repr`, compute a string representation of object *o*, but escape the non-ASCII characters in the string returned by @@ -218,7 +281,7 @@ Object Protocol .. c:function:: PyObject* PyObject_Bytes(PyObject *o) - .. index:: builtin: bytes + .. index:: pair: built-in function; bytes Compute a bytes representation of object *o*. ``NULL`` is returned on failure and a bytes object on success. This is equivalent to the Python @@ -243,7 +306,7 @@ Object Protocol Normally only class objects, i.e. instances of :class:`type` or a derived class, are considered classes. However, objects can override this by having - a :attr:`__bases__` attribute (which must be a tuple of base classes). + a :attr:`~class.__bases__` attribute (which must be a tuple of base classes). .. c:function:: int PyObject_IsInstance(PyObject *inst, PyObject *cls) @@ -260,16 +323,16 @@ Object Protocol is an instance of *cls* if its class is a subclass of *cls*. An instance *inst* can override what is considered its class by having a - :attr:`__class__` attribute. + :attr:`~instance.__class__` attribute. An object *cls* can override if it is considered a class, and what its base - classes are, by having a :attr:`__bases__` attribute (which must be a tuple + classes are, by having a :attr:`~class.__bases__` attribute (which must be a tuple of base classes). .. c:function:: Py_hash_t PyObject_Hash(PyObject *o) - .. index:: builtin: hash + .. index:: pair: built-in function; hash Compute and return the hash value of an object *o*. On failure, return ``-1``. This is the equivalent of the Python expression ``hash(o)``. @@ -281,7 +344,7 @@ Object Protocol .. c:function:: Py_hash_t PyObject_HashNotImplemented(PyObject *o) - Set a :exc:`TypeError` indicating that ``type(o)`` is not hashable and return ``-1``. + Set a :exc:`TypeError` indicating that ``type(o)`` is not :term:`hashable` and return ``-1``. This function receives special treatment when stored in a ``tp_hash`` slot, allowing a type to explicitly indicate to the interpreter that it is not hashable. @@ -303,15 +366,16 @@ Object Protocol .. c:function:: PyObject* PyObject_Type(PyObject *o) - .. index:: builtin: type + .. index:: pair: built-in function; type When *o* is non-``NULL``, returns a type object corresponding to the object type of object *o*. On failure, raises :exc:`SystemError` and returns ``NULL``. This - is equivalent to the Python expression ``type(o)``. This function increments the - reference count of the return value. There's really no reason to use this + is equivalent to the Python expression ``type(o)``. + This function creates a new :term:`strong reference` to the return value. + There's really no reason to use this function instead of the :c:func:`Py_TYPE()` function, which returns a - pointer of type :c:expr:`PyTypeObject*`, except when the incremented reference - count is needed. + pointer of type :c:expr:`PyTypeObject*`, except when a new + :term:`strong reference` is needed. .. c:function:: int PyObject_TypeCheck(PyObject *o, PyTypeObject *type) @@ -323,7 +387,7 @@ Object Protocol .. c:function:: Py_ssize_t PyObject_Size(PyObject *o) Py_ssize_t PyObject_Length(PyObject *o) - .. index:: builtin: len + .. index:: pair: built-in function; len Return the length of object *o*. If the object *o* provides either the sequence and mapping protocols, the sequence length is returned. On error, ``-1`` is @@ -386,3 +450,60 @@ Object Protocol returns ``NULL`` if the object cannot be iterated. .. versionadded:: 3.10 + +.. c:function:: void *PyObject_GetTypeData(PyObject *o, PyTypeObject *cls) + + Get a pointer to subclass-specific data reserved for *cls*. + + The object *o* must be an instance of *cls*, and *cls* must have been + created using negative :c:member:`PyType_Spec.basicsize`. + Python does not check this. + + On error, set an exception and return ``NULL``. + + .. versionadded:: 3.12 + +.. c:function:: Py_ssize_t PyType_GetTypeDataSize(PyTypeObject *cls) + + Return the size of the instance memory space reserved for *cls*, i.e. the size of the + memory :c:func:`PyObject_GetTypeData` returns. + + This may be larger than requested using :c:member:`-PyType_Spec.basicsize `; + it is safe to use this larger size (e.g. with :c:func:`!memset`). + + The type *cls* **must** have been created using + negative :c:member:`PyType_Spec.basicsize`. + Python does not check this. + + On error, set an exception and return a negative value. + + .. versionadded:: 3.12 + +.. c:function:: void *PyObject_GetItemData(PyObject *o) + + Get a pointer to per-item data for a class with + :c:macro:`Py_TPFLAGS_ITEMS_AT_END`. + + On error, set an exception and return ``NULL``. + :py:exc:`TypeError` is raised if *o* does not have + :c:macro:`Py_TPFLAGS_ITEMS_AT_END` set. + + .. versionadded:: 3.12 + +.. c:function:: int PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg) + + Visit the managed dictionary of *obj*. + + This function must only be called in a traverse function of the type which + has the :c:macro:`Py_TPFLAGS_MANAGED_DICT` flag set. + + .. versionadded:: 3.13 + +.. c:function:: void PyObject_ClearManagedDict(PyObject *obj) + + Clear the managed dictionary of *obj*. + + This function must only be called in a traverse function of the type which + has the :c:macro:`Py_TPFLAGS_MANAGED_DICT` flag set. + + .. versionadded:: 3.13 diff --git a/Doc/c-api/perfmaps.rst b/Doc/c-api/perfmaps.rst new file mode 100644 index 000000000000000..3d44d2eb6bf41d6 --- /dev/null +++ b/Doc/c-api/perfmaps.rst @@ -0,0 +1,50 @@ +.. highlight:: c + +.. _perfmaps: + +Support for Perf Maps +---------------------- + +On supported platforms (as of this writing, only Linux), the runtime can take +advantage of *perf map files* to make Python functions visible to an external +profiling tool (such as `perf `_). +A running process may create a file in the ``/tmp`` directory, which contains entries +that can map a section of executable code to a name. This interface is described in the +`documentation of the Linux Perf tool `_. + +In Python, these helper APIs can be used by libraries and features that rely +on generating machine code on the fly. + +Note that holding the Global Interpreter Lock (GIL) is not required for these APIs. + +.. c:function:: int PyUnstable_PerfMapState_Init(void) + + Open the ``/tmp/perf-$pid.map`` file, unless it's already opened, and create + a lock to ensure thread-safe writes to the file (provided the writes are + done through :c:func:`PyUnstable_WritePerfMapEntry`). Normally, there's no need + to call this explicitly; just use :c:func:`PyUnstable_WritePerfMapEntry` + and it will initialize the state on first call. + + Returns ``0`` on success, ``-1`` on failure to create/open the perf map file, + or ``-2`` on failure to create a lock. Check ``errno`` for more information + about the cause of a failure. + +.. c:function:: int PyUnstable_WritePerfMapEntry(const void *code_addr, unsigned int code_size, const char *entry_name) + + Write one single entry to the ``/tmp/perf-$pid.map`` file. This function is + thread safe. Here is what an example entry looks like:: + + # address size name + 7f3529fcf759 b py::bar:/run/t.py + + Will call :c:func:`PyUnstable_PerfMapState_Init` before writing the entry, if + the perf map file is not already opened. Returns ``0`` on success, or the + same error codes as :c:func:`PyUnstable_PerfMapState_Init` on failure. + +.. c:function:: void PyUnstable_PerfMapState_Fini(void) + + Close the perf map file opened by :c:func:`PyUnstable_PerfMapState_Init`. + This is called by the runtime itself during interpreter shut-down. In + general, there shouldn't be a reason to explicitly call this, except to + handle specific scenarios such as forking. diff --git a/Doc/c-api/refcounting.rst b/Doc/c-api/refcounting.rst index cd1f2ef7076836c..68119a27b18ec26 100644 --- a/Doc/c-api/refcounting.rst +++ b/Doc/c-api/refcounting.rst @@ -7,14 +7,20 @@ Reference Counting ****************** -The macros in this section are used for managing reference counts of Python -objects. +The functions and macros in this section are used for managing reference counts +of Python objects. .. c:function:: Py_ssize_t Py_REFCNT(PyObject *o) Get the reference count of the Python object *o*. + Note that the returned value may not actually reflect how many + references to the object are actually held. For example, some + objects are :term:`immortal` and have a very high refcount that does not + reflect the actual number of references. Consequently, do not rely + on the returned value to be accurate, other than a value of 0 or 1. + Use the :c:func:`Py_SET_REFCNT()` function to set an object reference count. .. versionchanged:: 3.11 @@ -28,36 +34,53 @@ objects. Set the object *o* reference counter to *refcnt*. + This function has no effect on :term:`immortal` objects. + .. versionadded:: 3.9 + .. versionchanged:: 3.12 + Immortal objects are not modified. + .. c:function:: void Py_INCREF(PyObject *o) - Increment the reference count for object *o*. + Indicate taking a new :term:`strong reference` to object *o*, + indicating it is in use and should not be destroyed. + + This function has no effect on :term:`immortal` objects. This function is usually used to convert a :term:`borrowed reference` to a :term:`strong reference` in-place. The :c:func:`Py_NewRef` function can be used to create a new :term:`strong reference`. + When done using the object, release is by calling :c:func:`Py_DECREF`. + The object must not be ``NULL``; if you aren't sure that it isn't ``NULL``, use :c:func:`Py_XINCREF`. + Do not expect this function to actually modify *o* in any way. + For at least `some objects `_, + this function has no effect. + + .. versionchanged:: 3.12 + Immortal objects are not modified. + .. c:function:: void Py_XINCREF(PyObject *o) - Increment the reference count for object *o*. The object may be ``NULL``, in - which case the macro has no effect. + Similar to :c:func:`Py_INCREF`, but the object *o* can be ``NULL``, + in which case this has no effect. See also :c:func:`Py_XNewRef`. .. c:function:: PyObject* Py_NewRef(PyObject *o) - Create a new :term:`strong reference` to an object: increment the reference - count of the object *o* and return the object *o*. + Create a new :term:`strong reference` to an object: + call :c:func:`Py_INCREF` on *o* and return the object *o*. When the :term:`strong reference` is no longer needed, :c:func:`Py_DECREF` - should be called on it to decrement the object reference count. + should be called on it to release the reference. The object *o* must not be ``NULL``; use :c:func:`Py_XNewRef` if *o* can be ``NULL``. @@ -87,9 +110,14 @@ objects. .. c:function:: void Py_DECREF(PyObject *o) - Decrement the reference count for object *o*. + Release a :term:`strong reference` to object *o*, indicating the + reference is no longer used. - If the reference count reaches zero, the object's type's deallocation + This function has no effect on :term:`immortal` objects. + + Once the last :term:`strong reference` is released + (i.e. the object's reference count reaches 0), + the object's type's deallocation function (which must not be ``NULL``) is invoked. This function is usually used to delete a :term:`strong reference` before @@ -98,10 +126,14 @@ objects. The object must not be ``NULL``; if you aren't sure that it isn't ``NULL``, use :c:func:`Py_XDECREF`. + Do not expect this function to actually modify *o* in any way. + For at least `some objects `_, + this function has no effect. + .. warning:: The deallocation function can cause arbitrary Python code to be invoked (e.g. - when a class instance with a :meth:`__del__` method is deallocated). While + when a class instance with a :meth:`~object.__del__` method is deallocated). While exceptions in such code are not propagated, the executed code has free access to all Python global variables. This means that any object that is reachable from a global variable should be in a consistent state before :c:func:`Py_DECREF` is @@ -109,33 +141,82 @@ objects. reference to the deleted object in a temporary variable, update the list data structure, and then call :c:func:`Py_DECREF` for the temporary variable. + .. versionchanged:: 3.12 + Immortal objects are not modified. + .. c:function:: void Py_XDECREF(PyObject *o) - Decrement the reference count for object *o*. The object may be ``NULL``, in - which case the macro has no effect; otherwise the effect is the same as for - :c:func:`Py_DECREF`, and the same warning applies. + Similar to :c:func:`Py_DECREF`, but the object *o* can be ``NULL``, + in which case this has no effect. + The same warning from :c:func:`Py_DECREF` applies here as well. .. c:function:: void Py_CLEAR(PyObject *o) - Decrement the reference count for object *o*. The object may be ``NULL``, in + Release a :term:`strong reference` for object *o*. + The object may be ``NULL``, in which case the macro has no effect; otherwise the effect is the same as for :c:func:`Py_DECREF`, except that the argument is also set to ``NULL``. The warning for :c:func:`Py_DECREF` does not apply with respect to the object passed because the macro carefully uses a temporary variable and sets the argument to ``NULL`` - before decrementing its reference count. + before releasing the reference. + + It is a good idea to use this macro whenever releasing a reference + to an object that might be traversed during garbage collection. + + .. versionchanged:: 3.12 + The macro argument is now only evaluated once. If the argument has side + effects, these are no longer duplicated. - It is a good idea to use this macro whenever decrementing the reference - count of an object that might be traversed during garbage collection. .. c:function:: void Py_IncRef(PyObject *o) - Increment the reference count for object *o*. A function version of :c:func:`Py_XINCREF`. + Indicate taking a new :term:`strong reference` to object *o*. + A function version of :c:func:`Py_XINCREF`. It can be used for runtime dynamic embedding of Python. .. c:function:: void Py_DecRef(PyObject *o) - Decrement the reference count for object *o*. A function version of :c:func:`Py_XDECREF`. + Release a :term:`strong reference` to object *o*. + A function version of :c:func:`Py_XDECREF`. It can be used for runtime dynamic embedding of Python. + + +.. c:macro:: Py_SETREF(dst, src) + + Macro safely releasing a :term:`strong reference` to object *dst* + and setting *dst* to *src*. + + As in case of :c:func:`Py_CLEAR`, "the obvious" code can be deadly:: + + Py_DECREF(dst); + dst = src; + + The safe way is:: + + Py_SETREF(dst, src); + + That arranges to set *dst* to *src* _before_ releasing the reference + to the old value of *dst*, so that any code triggered as a side-effect + of *dst* getting torn down no longer believes *dst* points + to a valid object. + + .. versionadded:: 3.6 + + .. versionchanged:: 3.12 + The macro arguments are now only evaluated once. If an argument has side + effects, these are no longer duplicated. + + +.. c:macro:: Py_XSETREF(dst, src) + + Variant of :c:macro:`Py_SETREF` macro that uses :c:func:`Py_XDECREF` instead + of :c:func:`Py_DECREF`. + + .. versionadded:: 3.6 + + .. versionchanged:: 3.12 + The macro arguments are now only evaluated once. If an argument has side + effects, these are no longer duplicated. diff --git a/Doc/c-api/sequence.rst b/Doc/c-api/sequence.rst index c78d273f9f149f5..ce28839f5ba739d 100644 --- a/Doc/c-api/sequence.rst +++ b/Doc/c-api/sequence.rst @@ -9,7 +9,7 @@ Sequence Protocol .. c:function:: int PySequence_Check(PyObject *o) Return ``1`` if the object provides the sequence protocol, and ``0`` otherwise. - Note that it returns ``1`` for Python classes with a :meth:`__getitem__` + Note that it returns ``1`` for Python classes with a :meth:`~object.__getitem__` method, unless they are :class:`dict` subclasses, since in general it is impossible to determine what type of keys the class supports. This function always succeeds. @@ -18,7 +18,7 @@ Sequence Protocol .. c:function:: Py_ssize_t PySequence_Size(PyObject *o) Py_ssize_t PySequence_Length(PyObject *o) - .. index:: builtin: len + .. index:: pair: built-in function; len Returns the number of objects in sequence *o* on success, and ``-1`` on failure. This is equivalent to the Python expression ``len(o)``. @@ -120,7 +120,7 @@ Sequence Protocol .. c:function:: PyObject* PySequence_Tuple(PyObject *o) - .. index:: builtin: tuple + .. index:: pair: built-in function; tuple Return a tuple object with the same contents as the sequence or iterable *o*, or ``NULL`` on failure. If *o* is a tuple, a new reference will be returned, diff --git a/Doc/c-api/set.rst b/Doc/c-api/set.rst index f0d905bae8ae44c..cba823aa027bd6d 100644 --- a/Doc/c-api/set.rst +++ b/Doc/c-api/set.rst @@ -9,8 +9,8 @@ Set Objects .. index:: - object: set - object: frozenset + pair: object; set + pair: object; frozenset This section details the public API for :class:`set` and :class:`frozenset` objects. Any functionality not listed below is best accessed using either @@ -107,10 +107,10 @@ or :class:`frozenset` or instances of their subtypes. .. c:function:: Py_ssize_t PySet_Size(PyObject *anyset) - .. index:: builtin: len + .. index:: pair: built-in function; len Return the length of a :class:`set` or :class:`frozenset` object. Equivalent to - ``len(anyset)``. Raises a :exc:`PyExc_SystemError` if *anyset* is not a + ``len(anyset)``. Raises a :exc:`SystemError` if *anyset* is not a :class:`set`, :class:`frozenset`, or an instance of a subtype. @@ -122,9 +122,9 @@ or :class:`frozenset` or instances of their subtypes. .. c:function:: int PySet_Contains(PyObject *anyset, PyObject *key) Return ``1`` if found, ``0`` if not found, and ``-1`` if an error is encountered. Unlike - the Python :meth:`__contains__` method, this function does not automatically + the Python :meth:`~object.__contains__` method, this function does not automatically convert unhashable sets into temporary frozensets. Raise a :exc:`TypeError` if - the *key* is unhashable. Raise :exc:`PyExc_SystemError` if *anyset* is not a + the *key* is unhashable. Raise :exc:`SystemError` if *anyset* is not a :class:`set`, :class:`frozenset`, or an instance of a subtype. @@ -147,9 +147,9 @@ subtypes but not for instances of :class:`frozenset` or its subtypes. Return ``1`` if found and removed, ``0`` if not found (no action taken), and ``-1`` if an error is encountered. Does not raise :exc:`KeyError` for missing keys. Raise a - :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`~set.discard` + :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`~frozenset.discard` method, this function does not automatically convert unhashable sets into - temporary frozensets. Raise :exc:`PyExc_SystemError` if *set* is not an + temporary frozensets. Raise :exc:`SystemError` if *set* is not an instance of :class:`set` or its subtype. @@ -163,4 +163,6 @@ subtypes but not for instances of :class:`frozenset` or its subtypes. .. c:function:: int PySet_Clear(PyObject *set) - Empty an existing set of all elements. + Empty an existing set of all elements. Return ``0`` on + success. Return ``-1`` and raise :exc:`SystemError` if *set* is not an instance of + :class:`set` or its subtype. diff --git a/Doc/c-api/slice.rst b/Doc/c-api/slice.rst index 8271d9acfb645e1..27a1757c745d8bf 100644 --- a/Doc/c-api/slice.rst +++ b/Doc/c-api/slice.rst @@ -34,7 +34,7 @@ Slice Objects *length* as errors. Returns ``0`` on success and ``-1`` on error with no exception set (unless one of - the indices was not :const:`None` and failed to be converted to an integer, + the indices was not ``None`` and failed to be converted to an integer, in which case ``-1`` is returned with an exception set). You probably do not want to use this function. @@ -113,11 +113,13 @@ Slice Objects Ellipsis Object ---------------- +^^^^^^^^^^^^^^^ .. c:var:: PyObject *Py_Ellipsis - The Python ``Ellipsis`` object. This object has no methods. It needs to be - treated just like any other object with respect to reference counts. Like - :c:data:`Py_None` it is a singleton object. + The Python ``Ellipsis`` object. This object has no methods. Like + :c:data:`Py_None`, it is an :term:`immortal` singleton object. + + .. versionchanged:: 3.12 + :c:data:`Py_Ellipsis` is immortal. diff --git a/Doc/c-api/stable.rst b/Doc/c-api/stable.rst index 4ae20e93e36785e..63a100a6f26f24c 100644 --- a/Doc/c-api/stable.rst +++ b/Doc/c-api/stable.rst @@ -6,9 +6,9 @@ C API Stability *************** -Python's C API is covered by the Backwards Compatibility Policy, :pep:`387`. -While the C API will change with every minor release (e.g. from 3.9 to 3.10), -most changes will be source-compatible, typically by only adding new API. +Unless documented otherwise, Python's C API is covered by the Backwards +Compatibility Policy, :pep:`387`. +Most changes to it are source-compatible (typically by only adding new API). Changing existing API or removing API is only done after a deprecation period or to fix serious issues. @@ -18,33 +18,63 @@ way; see :ref:`stable-abi-platform` below). So, code compiled for Python 3.10.0 will work on 3.10.8 and vice versa, but will need to be compiled separately for 3.9.x and 3.10.x. +There are two tiers of C API with different stability expectations: + +- :ref:`Unstable API `, may change in minor versions without + a deprecation period. It is marked by the ``PyUnstable`` prefix in names. +- :ref:`Limited API `, is compatible across several minor releases. + When :c:macro:`Py_LIMITED_API` is defined, only this subset is exposed + from ``Python.h``. + +These are discussed in more detail below. + Names prefixed by an underscore, such as ``_Py_InternalState``, are private API that can change without notice even in patch releases. +If you need to use this API, consider reaching out to +`CPython developers `_ +to discuss adding public API for your use case. + +.. _unstable-c-api: + +Unstable C API +============== + +.. index:: single: PyUnstable + +Any API named with the ``PyUnstable`` prefix exposes CPython implementation +details, and may change in every minor release (e.g. from 3.9 to 3.10) without +any deprecation warnings. +However, it will not change in a bugfix release (e.g. from 3.10.0 to 3.10.1). + +It is generally intended for specialized, low-level tools like debuggers. + +Projects that use this API are expected to follow +CPython development and spend extra effort adjusting to changes. Stable Application Binary Interface =================================== +For simplicity, this document talks about *extensions*, but the Limited API +and Stable ABI work the same way for all uses of the API – for example, +embedding Python. + +.. _limited-c-api: + +Limited C API +------------- + Python 3.2 introduced the *Limited API*, a subset of Python's C API. Extensions that only use the Limited API can be compiled once and work with multiple versions of Python. -Contents of the Limited API are :ref:`listed below `. - -To enable this, Python provides a *Stable ABI*: a set of symbols that will -remain compatible across Python 3.x versions. The Stable ABI contains symbols -exposed in the Limited API, but also other ones – for example, functions -necessary to support older versions of the Limited API. - -(For simplicity, this document talks about *extensions*, but the Limited API -and Stable ABI work the same way for all uses of the API – for example, -embedding Python.) +Contents of the Limited API are :ref:`listed below `. .. c:macro:: Py_LIMITED_API Define this macro before including ``Python.h`` to opt in to only use the Limited API, and to select the Limited API version. - Define ``Py_LIMITED_API`` to the value of :c:data:`PY_VERSION_HEX` + Define ``Py_LIMITED_API`` to the value of :c:macro:`PY_VERSION_HEX` corresponding to the lowest Python version your extension supports. The extension will work without recompilation with all Python 3 releases from the specified one onward, and can use Limited API introduced up to that @@ -57,6 +87,19 @@ embedding Python.) You can also define ``Py_LIMITED_API`` to ``3``. This works the same as ``0x03020000`` (Python 3.2, the version that introduced Limited API). + +.. _stable-abi: + +Stable ABI +---------- + +To enable this, Python provides a *Stable ABI*: a set of symbols that will +remain compatible across Python 3.x versions. + +The Stable ABI contains symbols exposed in the :ref:`Limited API +`, but also other ones – for example, functions necessary to +support older versions of the Limited API. + On Windows, extensions that use the Stable ABI should be linked against ``python3.dll`` rather than a version-specific library such as ``python39.dll``. @@ -101,9 +144,9 @@ Limited API Caveats ------------------- Note that compiling with ``Py_LIMITED_API`` is *not* a complete guarantee that -code conforms to the Limited API or the Stable ABI. ``Py_LIMITED_API`` only -covers definitions, but an API also includes other issues, such as expected -semantics. +code conforms to the :ref:`Limited API ` or the :ref:`Stable ABI +`. ``Py_LIMITED_API`` only covers definitions, but an API also +includes other issues, such as expected semantics. One issue that ``Py_LIMITED_API`` does not guard against is calling a function with arguments that are invalid in a lower Python version. @@ -136,9 +179,9 @@ Platform Considerations ======================= ABI stability depends not only on Python, but also on the compiler used, -lower-level libraries and compiler options. For the purposes of the Stable ABI, -these details define a “platformâ€. They usually depend on the OS -type and processor architecture +lower-level libraries and compiler options. For the purposes of +the :ref:`Stable ABI `, these details define a “platformâ€. They +usually depend on the OS type and processor architecture It is the responsibility of each particular distributor of Python to ensure that all Python versions on a particular platform are built @@ -147,12 +190,12 @@ This is the case with Windows and macOS releases from ``python.org`` and many third-party distributors. -.. _stable-abi-list: +.. _limited-api-list: Contents of Limited API ======================= -Currently, the Limited API includes the following items: +Currently, the :ref:`Limited API ` includes the following items: .. limited-api-list:: diff --git a/Doc/c-api/structures.rst b/Doc/c-api/structures.rst index 183ac144c50dd3a..25cb4ed40f63e72 100644 --- a/Doc/c-api/structures.rst +++ b/Doc/c-api/structures.rst @@ -35,7 +35,7 @@ under :ref:`reference counting `. .. c:type:: PyVarObject - This is an extension of :c:type:`PyObject` that adds the :attr:`ob_size` + This is an extension of :c:type:`PyObject` that adds the :c:member:`~PyVarObject.ob_size` field. This is only used for objects that have some notion of *length*. This type does not often appear in the Python/C API. Access to the members must be done by using the macros @@ -152,7 +152,7 @@ under :ref:`reference counting `. .. c:macro:: PyVarObject_HEAD_INIT(type, size) This is a macro which expands to initialization values for a new - :c:type:`PyVarObject` type, including the :attr:`ob_size` field. + :c:type:`PyVarObject` type, including the :c:member:`~PyVarObject.ob_size` field. This macro expands to:: _PyObject_EXTRA_INIT @@ -179,7 +179,7 @@ Implementing functions and methods .. c:type:: PyCFunctionWithKeywords Type of the functions used to implement Python callables in C - with signature :const:`METH_VARARGS | METH_KEYWORDS`. + with signature :ref:`METH_VARARGS | METH_KEYWORDS `. The function signature is:: PyObject *PyCFunctionWithKeywords(PyObject *self, @@ -190,7 +190,7 @@ Implementing functions and methods .. c:type:: _PyCFunctionFast Type of the functions used to implement Python callables in C - with signature :const:`METH_FASTCALL`. + with signature :c:macro:`METH_FASTCALL`. The function signature is:: PyObject *_PyCFunctionFast(PyObject *self, @@ -200,7 +200,7 @@ Implementing functions and methods .. c:type:: _PyCFunctionFastWithKeywords Type of the functions used to implement Python callables in C - with signature :const:`METH_FASTCALL | METH_KEYWORDS`. + with signature :ref:`METH_FASTCALL | METH_KEYWORDS `. The function signature is:: PyObject *_PyCFunctionFastWithKeywords(PyObject *self, @@ -211,7 +211,7 @@ Implementing functions and methods .. c:type:: PyCMethod Type of the functions used to implement Python callables in C - with signature :const:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS`. + with signature :ref:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS `. The function signature is:: PyObject *PyCMethod(PyObject *self, @@ -228,35 +228,38 @@ Implementing functions and methods Structure used to describe a method of an extension type. This structure has four fields: - +------------------+---------------+-------------------------------+ - | Field | C Type | Meaning | - +==================+===============+===============================+ - | :attr:`ml_name` | const char \* | name of the method | - +------------------+---------------+-------------------------------+ - | :attr:`ml_meth` | PyCFunction | pointer to the C | - | | | implementation | - +------------------+---------------+-------------------------------+ - | :attr:`ml_flags` | int | flag bits indicating how the | - | | | call should be constructed | - +------------------+---------------+-------------------------------+ - | :attr:`ml_doc` | const char \* | points to the contents of the | - | | | docstring | - +------------------+---------------+-------------------------------+ - -The :attr:`ml_meth` is a C function pointer. The functions may be of different + .. c:member:: const char *ml_name + + Name of the method. + + .. c:member:: PyCFunction ml_meth + + Pointer to the C implementation. + + .. c:member:: int ml_flags + + Flags bits indicating how the call should be constructed. + + .. c:member:: const char *ml_doc + + Points to the contents of the docstring. + +The :c:member:`~PyMethodDef.ml_meth` is a C function pointer. +The functions may be of different types, but they always return :c:expr:`PyObject*`. If the function is not of the :c:type:`PyCFunction`, the compiler will require a cast in the method table. Even though :c:type:`PyCFunction` defines the first parameter as :c:expr:`PyObject*`, it is common that the method implementation uses the specific C type of the *self* object. -The :attr:`ml_flags` field is a bitfield which can include the following flags. +The :c:member:`~PyMethodDef.ml_flags` field is a bitfield which can include +the following flags. The individual flags indicate either a calling convention or a binding convention. There are these calling conventions: -.. data:: METH_VARARGS +.. c:macro:: METH_VARARGS This is the typical calling convention, where the methods have the type :c:type:`PyCFunction`. The function expects two :c:expr:`PyObject*` values. @@ -266,8 +269,17 @@ There are these calling conventions: using :c:func:`PyArg_ParseTuple` or :c:func:`PyArg_UnpackTuple`. -.. data:: METH_VARARGS | METH_KEYWORDS +.. c:macro:: METH_KEYWORDS + + Can only be used in certain combinations with other flags: + :ref:`METH_VARARGS | METH_KEYWORDS `, + :ref:`METH_FASTCALL | METH_KEYWORDS ` and + :ref:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS `. + +.. _METH_VARARGS-METH_KEYWORDS: + +:c:expr:`METH_VARARGS | METH_KEYWORDS` Methods with these flags must be of type :c:type:`PyCFunctionWithKeywords`. The function expects three parameters: *self*, *args*, *kwargs* where *kwargs* is a dictionary of all the keyword arguments or possibly ``NULL`` @@ -275,7 +287,7 @@ There are these calling conventions: using :c:func:`PyArg_ParseTupleAndKeywords`. -.. data:: METH_FASTCALL +.. c:macro:: METH_FASTCALL Fast calling convention supporting only positional arguments. The methods have the type :c:type:`_PyCFunctionFast`. @@ -287,12 +299,13 @@ There are these calling conventions: .. versionchanged:: 3.10 - ``METH_FASTCALL`` is now part of the stable ABI. + ``METH_FASTCALL`` is now part of the :ref:`stable ABI `. -.. data:: METH_FASTCALL | METH_KEYWORDS +.. _METH_FASTCALL-METH_KEYWORDS: - Extension of :const:`METH_FASTCALL` supporting also keyword arguments, +:c:expr:`METH_FASTCALL | METH_KEYWORDS` + Extension of :c:macro:`METH_FASTCALL` supporting also keyword arguments, with methods of type :c:type:`_PyCFunctionFastWithKeywords`. Keyword arguments are passed the same way as in the :ref:`vectorcall protocol `: @@ -305,10 +318,18 @@ There are these calling conventions: .. versionadded:: 3.7 -.. data:: METH_METHOD | METH_FASTCALL | METH_KEYWORDS +.. c:macro:: METH_METHOD + + Can only be used in the combination with other flags: + :ref:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS `. + + +.. _METH_METHOD-METH_FASTCALL-METH_KEYWORDS: - Extension of :const:`METH_FASTCALL | METH_KEYWORDS` supporting the *defining - class*, that is, the class that contains the method in question. +:c:expr:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS` + Extension of :ref:`METH_FASTCALL | METH_KEYWORDS ` + supporting the *defining class*, that is, + the class that contains the method in question. The defining class might be a superclass of ``Py_TYPE(self)``. The method needs to be of type :c:type:`PyCMethod`, the same as for @@ -318,10 +339,10 @@ There are these calling conventions: .. versionadded:: 3.9 -.. data:: METH_NOARGS +.. c:macro:: METH_NOARGS Methods without parameters don't need to check whether arguments are given if - they are listed with the :const:`METH_NOARGS` flag. They need to be of type + they are listed with the :c:macro:`METH_NOARGS` flag. They need to be of type :c:type:`PyCFunction`. The first parameter is typically named *self* and will hold a reference to the module or object instance. In all cases the second parameter will be ``NULL``. @@ -330,9 +351,9 @@ There are these calling conventions: :c:macro:`Py_UNUSED` can be used to prevent a compiler warning. -.. data:: METH_O +.. c:macro:: METH_O - Methods with a single object argument can be listed with the :const:`METH_O` + Methods with a single object argument can be listed with the :c:macro:`METH_O` flag, instead of invoking :c:func:`PyArg_ParseTuple` with a ``"O"`` argument. They have the type :c:type:`PyCFunction`, with the *self* parameter, and a :c:expr:`PyObject*` parameter representing the single argument. @@ -344,9 +365,9 @@ defined for modules. At most one of these flags may be set for any given method. -.. data:: METH_CLASS +.. c:macro:: METH_CLASS - .. index:: builtin: classmethod + .. index:: pair: built-in function; classmethod The method will be passed the type object as the first parameter rather than an instance of the type. This is used to create *class methods*, @@ -354,9 +375,9 @@ method. function. -.. data:: METH_STATIC +.. c:macro:: METH_STATIC - .. index:: builtin: staticmethod + .. index:: pair: built-in function; staticmethod The method will be passed ``NULL`` as the first parameter rather than an instance of the type. This is used to create *static methods*, similar to @@ -366,13 +387,13 @@ One other constant controls whether a method is loaded in place of another definition with the same method name. -.. data:: METH_COEXIST +.. c:macro:: METH_COEXIST The method will be loaded in place of existing definitions. Without *METH_COEXIST*, the default is to skip repeated definitions. Since slot wrappers are loaded before the method table, the existence of a *sq_contains* slot, for example, would generate a wrapped method named - :meth:`__contains__` and preclude the loading of a corresponding + :meth:`~object.__contains__` and preclude the loading of a corresponding PyCFunction with the same name. With the flag defined, the PyCFunction will be loaded in place of the wrapper object and will co-exist with the slot. This is helpful because calls to PyCFunctions are optimized more @@ -385,86 +406,71 @@ Accessing attributes of extension types .. c:type:: PyMemberDef Structure which describes an attribute of a type which corresponds to a C - struct member. Its fields are: + struct member. + When defining a class, put a NULL-terminated array of these + structures in the :c:member:`~PyTypeObject.tp_members` slot. - .. c:member:: const char* PyMemberDef.name + Its fields are, in order: - Name of the member + .. c:member:: const char* name - .. c:member:: int PyMemberDef.type + Name of the member. + A NULL value marks the end of a ``PyMemberDef[]`` array. - The type of the member in the C struct. + The string should be static, no copy is made of it. - .. c:member:: Py_ssize_t PyMemberDef.offset + .. c:member:: Py_ssize_t offset The offset in bytes that the member is located on the type’s object struct. - .. c:member:: int PyMemberDef.flags + .. c:member:: int type + + The type of the member in the C struct. + See :ref:`PyMemberDef-types` for the possible values. - Flag bits indicating if the field should be read-only or writable. + .. c:member:: int flags - .. c:member:: const char* PyMemberDef.doc + Zero or more of the :ref:`PyMemberDef-flags`, combined using bitwise OR. - Points to the contents of the docstring. + .. c:member:: const char* doc - :c:member:`PyMemberDef.type` can be one of many ``T_`` macros corresponding to various C - types. When the member is accessed in Python, it will be converted to the - equivalent Python type. - - =============== ================== - Macro name C type - =============== ================== - T_SHORT short - T_INT int - T_LONG long - T_FLOAT float - T_DOUBLE double - T_STRING const char \* - T_OBJECT PyObject \* - T_OBJECT_EX PyObject \* - T_CHAR char - T_BYTE char - T_UBYTE unsigned char - T_UINT unsigned int - T_USHORT unsigned short - T_ULONG unsigned long - T_BOOL char - T_LONGLONG long long - T_ULONGLONG unsigned long long - T_PYSSIZET Py_ssize_t - =============== ================== - - :c:macro:`T_OBJECT` and :c:macro:`T_OBJECT_EX` differ in that - :c:macro:`T_OBJECT` returns ``None`` if the member is ``NULL`` and - :c:macro:`T_OBJECT_EX` raises an :exc:`AttributeError`. Try to use - :c:macro:`T_OBJECT_EX` over :c:macro:`T_OBJECT` because :c:macro:`T_OBJECT_EX` - handles use of the :keyword:`del` statement on that attribute more correctly - than :c:macro:`T_OBJECT`. - - :c:member:`PyMemberDef.flags` can be ``0`` for write and read access or :c:macro:`READONLY` for - read-only access. Using :c:macro:`T_STRING` for :attr:`type` implies - :c:macro:`READONLY`. :c:macro:`T_STRING` data is interpreted as UTF-8. - Only :c:macro:`T_OBJECT` and :c:macro:`T_OBJECT_EX` - members can be deleted. (They are set to ``NULL``). + The docstring, or NULL. + The string should be static, no copy is made of it. + Typically, it is defined using :c:macro:`PyDoc_STR`. + + By default (when :c:member:`~PyMemberDef.flags` is ``0``), members allow + both read and write access. + Use the :c:macro:`Py_READONLY` flag for read-only access. + Certain types, like :c:macro:`Py_T_STRING`, imply :c:macro:`Py_READONLY`. + Only :c:macro:`Py_T_OBJECT_EX` (and legacy :c:macro:`T_OBJECT`) members can + be deleted. .. _pymemberdef-offsets: - Heap allocated types (created using :c:func:`PyType_FromSpec` or similar), - ``PyMemberDef`` may contain definitions for the special member - ``__vectorcalloffset__``, corresponding to + For heap-allocated types (created using :c:func:`PyType_FromSpec` or similar), + ``PyMemberDef`` may contain a definition for the special member + ``"__vectorcalloffset__"``, corresponding to :c:member:`~PyTypeObject.tp_vectorcall_offset` in type objects. - These must be defined with ``T_PYSSIZET`` and ``READONLY``, for example:: + These must be defined with ``Py_T_PYSSIZET`` and ``Py_READONLY``, for example:: static PyMemberDef spam_type_members[] = { - {"__vectorcalloffset__", T_PYSSIZET, offsetof(Spam_object, vectorcall), READONLY}, + {"__vectorcalloffset__", Py_T_PYSSIZET, + offsetof(Spam_object, vectorcall), Py_READONLY}, {NULL} /* Sentinel */ }; + (You may need to ``#include `` for :c:func:`!offsetof`.) + The legacy offsets :c:member:`~PyTypeObject.tp_dictoffset` and - :c:member:`~PyTypeObject.tp_weaklistoffset` are still supported, but extensions are - strongly encouraged to use ``Py_TPFLAGS_MANAGED_DICT`` and - ``Py_TPFLAGS_MANAGED_WEAKREF`` instead. + :c:member:`~PyTypeObject.tp_weaklistoffset` can be defined similarly using + ``"__dictoffset__"`` and ``"__weaklistoffset__"`` members, but extensions + are strongly encouraged to use :c:macro:`Py_TPFLAGS_MANAGED_DICT` and + :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` instead. + + .. versionchanged:: 3.12 + ``PyMemberDef`` is always available. + Previously, it required including ``"structmember.h"``. .. c:function:: PyObject* PyMember_GetOne(const char *obj_addr, struct PyMemberDef *m) @@ -472,6 +478,10 @@ Accessing attributes of extension types attribute is described by ``PyMemberDef`` *m*. Returns ``NULL`` on error. + .. versionchanged:: 3.12 + + ``PyMember_GetOne`` is always available. + Previously, it required including ``"structmember.h"``. .. c:function:: int PyMember_SetOne(char *obj_addr, struct PyMemberDef *m, PyObject *o) @@ -479,29 +489,185 @@ Accessing attributes of extension types The attribute to set is described by ``PyMemberDef`` *m*. Returns ``0`` if successful and a negative value on failure. + .. versionchanged:: 3.12 + + ``PyMember_SetOne`` is always available. + Previously, it required including ``"structmember.h"``. + +.. _PyMemberDef-flags: + +Member flags +^^^^^^^^^^^^ + +The following flags can be used with :c:member:`PyMemberDef.flags`: + +.. c:macro:: Py_READONLY + + Not writable. + +.. c:macro:: Py_AUDIT_READ + + Emit an ``object.__getattr__`` :ref:`audit event ` + before reading. + +.. c:macro:: Py_RELATIVE_OFFSET + + Indicates that the :c:member:`~PyMemberDef.offset` of this ``PyMemberDef`` + entry indicates an offset from the subclass-specific data, rather than + from ``PyObject``. + + Can only be used as part of :c:member:`Py_tp_members ` + :c:type:`slot ` when creating a class using negative + :c:member:`~PyType_Spec.basicsize`. + It is mandatory in that case. + + This flag is only used in :c:type:`PyTypeSlot`. + When setting :c:member:`~PyTypeObject.tp_members` during + class creation, Python clears it and sets + :c:member:`PyMemberDef.offset` to the offset from the ``PyObject`` struct. + +.. index:: + single: READ_RESTRICTED + single: WRITE_RESTRICTED + single: RESTRICTED + +.. versionchanged:: 3.10 + + The :c:macro:`!RESTRICTED`, :c:macro:`!READ_RESTRICTED` and + :c:macro:`!WRITE_RESTRICTED` macros available with + ``#include "structmember.h"`` are deprecated. + :c:macro:`!READ_RESTRICTED` and :c:macro:`!RESTRICTED` are equivalent to + :c:macro:`Py_AUDIT_READ`; :c:macro:`!WRITE_RESTRICTED` does nothing. + +.. index:: + single: READONLY + +.. versionchanged:: 3.12 + + The :c:macro:`!READONLY` macro was renamed to :c:macro:`Py_READONLY`. + The :c:macro:`!PY_AUDIT_READ` macro was renamed with the ``Py_`` prefix. + The new names are now always available. + Previously, these required ``#include "structmember.h"``. + The header is still available and it provides the old names. + +.. _PyMemberDef-types: + +Member types +^^^^^^^^^^^^ + +:c:member:`PyMemberDef.type` can be one of the following macros corresponding +to various C types. +When the member is accessed in Python, it will be converted to the +equivalent Python type. +When it is set from Python, it will be converted back to the C type. +If that is not possible, an exception such as :exc:`TypeError` or +:exc:`ValueError` is raised. + +Unless marked (D), attributes defined this way cannot be deleted +using e.g. :keyword:`del` or :py:func:`delattr`. + +================================ ============================= ====================== +Macro name C type Python type +================================ ============================= ====================== +.. c:macro:: Py_T_BYTE :c:expr:`char` :py:class:`int` +.. c:macro:: Py_T_SHORT :c:expr:`short` :py:class:`int` +.. c:macro:: Py_T_INT :c:expr:`int` :py:class:`int` +.. c:macro:: Py_T_LONG :c:expr:`long` :py:class:`int` +.. c:macro:: Py_T_LONGLONG :c:expr:`long long` :py:class:`int` +.. c:macro:: Py_T_UBYTE :c:expr:`unsigned char` :py:class:`int` +.. c:macro:: Py_T_UINT :c:expr:`unsigned int` :py:class:`int` +.. c:macro:: Py_T_USHORT :c:expr:`unsigned short` :py:class:`int` +.. c:macro:: Py_T_ULONG :c:expr:`unsigned long` :py:class:`int` +.. c:macro:: Py_T_ULONGLONG :c:expr:`unsigned long long` :py:class:`int` +.. c:macro:: Py_T_PYSSIZET :c:expr:`Py_ssize_t` :py:class:`int` +.. c:macro:: Py_T_FLOAT :c:expr:`float` :py:class:`float` +.. c:macro:: Py_T_DOUBLE :c:expr:`double` :py:class:`float` +.. c:macro:: Py_T_BOOL :c:expr:`char` :py:class:`bool` + (written as 0 or 1) +.. c:macro:: Py_T_STRING :c:expr:`const char *` (*) :py:class:`str` (RO) +.. c:macro:: Py_T_STRING_INPLACE :c:expr:`const char[]` (*) :py:class:`str` (RO) +.. c:macro:: Py_T_CHAR :c:expr:`char` (0-127) :py:class:`str` (**) +.. c:macro:: Py_T_OBJECT_EX :c:expr:`PyObject *` :py:class:`object` (D) +================================ ============================= ====================== + + (*): Zero-terminated, UTF8-encoded C string. + With :c:macro:`!Py_T_STRING` the C representation is a pointer; + with :c:macro:`!Py_T_STRING_INLINE` the string is stored directly + in the structure. + + (**): String of length 1. Only ASCII is accepted. + + (RO): Implies :c:macro:`Py_READONLY`. + + (D): Can be deleted, in which case the pointer is set to ``NULL``. + Reading a ``NULL`` pointer raises :py:exc:`AttributeError`. + +.. index:: + single: T_BYTE + single: T_SHORT + single: T_INT + single: T_LONG + single: T_LONGLONG + single: T_UBYTE + single: T_USHORT + single: T_UINT + single: T_ULONG + single: T_ULONGULONG + single: T_PYSSIZET + single: T_FLOAT + single: T_DOUBLE + single: T_BOOL + single: T_CHAR + single: T_STRING + single: T_STRING_INPLACE + single: T_OBJECT_EX + single: structmember.h + +.. versionadded:: 3.12 + + In previous versions, the macros were only available with + ``#include "structmember.h"`` and were named without the ``Py_`` prefix + (e.g. as ``T_INT``). + The header is still available and contains the old names, along with + the following deprecated types: + + .. c:macro:: T_OBJECT + + Like ``Py_T_OBJECT_EX``, but ``NULL`` is converted to ``None``. + This results in surprising behavior in Python: deleting the attribute + effectively sets it to ``None``. + + .. c:macro:: T_NONE + + Always ``None``. Must be used with :c:macro:`Py_READONLY`. + +Defining Getters and Setters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. c:type:: PyGetSetDef Structure to define property-like access for a type. See also description of the :c:member:`PyTypeObject.tp_getset` slot. - +-------------+------------------+-----------------------------------+ - | Field | C Type | Meaning | - +=============+==================+===================================+ - | name | const char \* | attribute name | - +-------------+------------------+-----------------------------------+ - | get | getter | C function to get the attribute | - +-------------+------------------+-----------------------------------+ - | set | setter | optional C function to set or | - | | | delete the attribute, if omitted | - | | | the attribute is readonly | - +-------------+------------------+-----------------------------------+ - | doc | const char \* | optional docstring | - +-------------+------------------+-----------------------------------+ - | closure | void \* | optional function pointer, | - | | | providing additional data for | - | | | getter and setter | - +-------------+------------------+-----------------------------------+ + .. c:member:: const char* name + + attribute name + + .. c:member:: getter get + + C function to get the attribute. + + .. c:member:: setter set + + Optional C function to set or delete the attribute, if omitted the attribute is readonly. + + .. c:member:: const char* doc + + optional docstring + + .. c:member:: void* closure + + Optional function pointer, providing additional data for getter and setter. The ``get`` function takes one :c:expr:`PyObject*` parameter (the instance) and a function pointer (the associated ``closure``):: diff --git a/Doc/c-api/sys.rst b/Doc/c-api/sys.rst index 6fc8a3aff956862..e3c54b075114ff7 100644 --- a/Doc/c-api/sys.rst +++ b/Doc/c-api/sys.rst @@ -8,8 +8,9 @@ Operating System Utilities .. c:function:: PyObject* PyOS_FSPath(PyObject *path) Return the file system representation for *path*. If the object is a - :class:`str` or :class:`bytes` object, then its reference count is - incremented. If the object implements the :class:`os.PathLike` interface, + :class:`str` or :class:`bytes` object, then a new + :term:`strong reference` is returned. + If the object implements the :class:`os.PathLike` interface, then :meth:`~os.PathLike.__fspath__` is returned as long as it is a :class:`str` or :class:`bytes` object. Otherwise :exc:`TypeError` is raised and ``NULL`` is returned. @@ -97,16 +98,16 @@ Operating System Utilities .. c:function:: int PyOS_CheckStack() Return true when the interpreter runs out of stack space. This is a reliable - check, but is only available when :const:`USE_STACKCHECK` is defined (currently + check, but is only available when :c:macro:`USE_STACKCHECK` is defined (currently on certain versions of Windows using the Microsoft Visual C++ compiler). - :const:`USE_STACKCHECK` will be defined automatically; you should never + :c:macro:`USE_STACKCHECK` will be defined automatically; you should never change the definition in your own code. .. c:function:: PyOS_sighandler_t PyOS_getsig(int i) Return the current signal handler for signal *i*. This is a thin wrapper around - either :c:func:`sigaction` or :c:func:`signal`. Do not call those functions + either :c:func:`!sigaction` or :c:func:`!signal`. Do not call those functions directly! :c:type:`PyOS_sighandler_t` is a typedef alias for :c:expr:`void (\*)(int)`. @@ -114,7 +115,7 @@ Operating System Utilities .. c:function:: PyOS_sighandler_t PyOS_setsig(int i, PyOS_sighandler_t h) Set the signal handler for signal *i* to be *h*; return the old signal handler. - This is a thin wrapper around either :c:func:`sigaction` or :c:func:`signal`. Do + This is a thin wrapper around either :c:func:`!sigaction` or :c:func:`!signal`. Do not call those functions directly! :c:type:`PyOS_sighandler_t` is a typedef alias for :c:expr:`void (\*)(int)`. @@ -167,7 +168,7 @@ Operating System Utilities .. versionchanged:: 3.8 The function now uses the UTF-8 encoding on Windows if - :c:member:`PyConfig.legacy_windows_fs_encoding` is zero; + :c:member:`PyPreConfig.legacy_windows_fs_encoding` is zero; .. c:function:: char* Py_EncodeLocale(const wchar_t *text, size_t *error_pos) @@ -209,7 +210,7 @@ Operating System Utilities .. versionchanged:: 3.8 The function now uses the UTF-8 encoding on Windows if - :c:member:`PyConfig.legacy_windows_fs_encoding` is zero. + :c:member:`PyPreConfig.legacy_windows_fs_encoding` is zero. .. _systemfunctions: @@ -237,45 +238,8 @@ accessible to C code. They all work with the current interpreter thread's Reset :data:`sys.warnoptions` to an empty list. This function may be called prior to :c:func:`Py_Initialize`. -.. c:function:: void PySys_AddWarnOption(const wchar_t *s) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.warnoptions` should be used instead, see :ref:`Python - Initialization Configuration `. - - Append *s* to :data:`sys.warnoptions`. This function must be called prior - to :c:func:`Py_Initialize` in order to affect the warnings filter list. - - .. deprecated:: 3.11 - -.. c:function:: void PySys_AddWarnOptionUnicode(PyObject *unicode) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.warnoptions` should be used instead, see :ref:`Python - Initialization Configuration `. - - Append *unicode* to :data:`sys.warnoptions`. - - Note: this function is not currently usable from outside the CPython - implementation, as it must be called prior to the implicit import of - :mod:`warnings` in :c:func:`Py_Initialize` to be effective, but can't be - called until enough of the runtime has been initialized to permit the - creation of Unicode objects. - - .. deprecated:: 3.11 - -.. c:function:: void PySys_SetPath(const wchar_t *path) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.module_search_paths` and - :c:member:`PyConfig.module_search_paths_set` should be used instead, see - :ref:`Python Initialization Configuration `. - - Set :data:`sys.path` to a list object of paths found in *path* which should - be a list of paths separated with the platform's search path delimiter - (``:`` on Unix, ``;`` on Windows). - - .. deprecated:: 3.11 + .. deprecated-removed:: 3.13 3.15 + Clear :data:`sys.warnoptions` and :data:`!warnings.filters` instead. .. c:function:: void PySys_WriteStdout(const char *format, ...) @@ -313,20 +277,6 @@ accessible to C code. They all work with the current interpreter thread's .. versionadded:: 3.2 -.. c:function:: void PySys_AddXOption(const wchar_t *s) - - This API is kept for backward compatibility: setting - :c:member:`PyConfig.xoptions` should be used instead, see :ref:`Python - Initialization Configuration `. - - Parse *s* as a set of :option:`-X` options and add them to the current - options mapping as returned by :c:func:`PySys_GetXOptions`. This function - may be called prior to :c:func:`Py_Initialize`. - - .. versionadded:: 3.2 - - .. deprecated:: 3.11 - .. c:function:: PyObject *PySys_GetXOptions() Return the current dictionary of :option:`-X` options, similarly to @@ -341,19 +291,24 @@ accessible to C code. They all work with the current interpreter thread's Raise an auditing event with any active hooks. Return zero for success and non-zero with an exception set on failure. + The *event* string argument must not be *NULL*. + If any hooks have been added, *format* and other arguments will be used to construct a tuple to pass. Apart from ``N``, the same format characters as used in :c:func:`Py_BuildValue` are available. If the built value is not - a tuple, it will be added into a single-element tuple. (The ``N`` format - option consumes a reference, but since there is no way to know whether - arguments to this function will be consumed, using it may cause reference - leaks.) + a tuple, it will be added into a single-element tuple. + + The ``N`` format option must not be used. It consumes a reference, but since + there is no way to know whether arguments to this function will be consumed, + using it may cause reference leaks. Note that ``#`` format characters should always be treated as :c:type:`Py_ssize_t`, regardless of whether ``PY_SSIZE_T_CLEAN`` was defined. :func:`sys.audit` performs the same function from Python code. + See also :c:func:`PySys_AuditTuple`. + .. versionadded:: 3.8 .. versionchanged:: 3.8.2 @@ -362,6 +317,14 @@ accessible to C code. They all work with the current interpreter thread's unavoidable deprecation warning was raised. +.. c:function:: int PySys_AuditTuple(const char *event, PyObject *args) + + Similar to :c:func:`PySys_Audit`, but pass arguments as a Python object. + *args* must be a :class:`tuple`. To pass no arguments, *args* can be *NULL*. + + .. versionadded:: 3.13 + + .. c:function:: int PySys_AddAuditHook(Py_AuditHookFunction hook, void *userData) Append the callable *hook* to the list of active auditing hooks. @@ -414,7 +377,7 @@ Process Control This function should only be invoked when a condition is detected that would make it dangerous to continue using the Python interpreter; e.g., when the object administration appears to be corrupted. On Unix, the standard C library - function :c:func:`abort` is called which will attempt to produce a :file:`core` + function :c:func:`!abort` is called which will attempt to produce a :file:`core` file. The ``Py_FatalError()`` function is replaced with a macro which logs diff --git a/Doc/c-api/tuple.rst b/Doc/c-api/tuple.rst index 5acddf7849aa33f..b3710560ebe7ac9 100644 --- a/Doc/c-api/tuple.rst +++ b/Doc/c-api/tuple.rst @@ -5,7 +5,7 @@ Tuple Objects ------------- -.. index:: object: tuple +.. index:: pair: object; tuple .. c:type:: PyTupleObject @@ -89,6 +89,9 @@ Tuple Objects Like :c:func:`PyTuple_SetItem`, but does no error checking, and should *only* be used to fill in brand new tuples. + Bounds checking is performed as an assertion if Python is built in + :ref:`debug mode ` or :option:`with assertions <--with-assertions>`. + .. note:: This function "steals" a reference to *o*, and, unlike @@ -111,6 +114,8 @@ Tuple Objects raises :exc:`MemoryError` or :exc:`SystemError`. +.. _struct-sequence-objects: + Struct Sequence Objects ----------------------- @@ -142,39 +147,39 @@ type. Contains the meta information of a struct sequence type to create. - +-------------------+------------------------------+--------------------------------------+ - | Field | C Type | Meaning | - +===================+==============================+======================================+ - | ``name`` | ``const char *`` | name of the struct sequence type | - +-------------------+------------------------------+--------------------------------------+ - | ``doc`` | ``const char *`` | pointer to docstring for the type | - | | | or ``NULL`` to omit | - +-------------------+------------------------------+--------------------------------------+ - | ``fields`` | ``PyStructSequence_Field *`` | pointer to ``NULL``-terminated array | - | | | with field names of the new type | - +-------------------+------------------------------+--------------------------------------+ - | ``n_in_sequence`` | ``int`` | number of fields visible to the | - | | | Python side (if used as tuple) | - +-------------------+------------------------------+--------------------------------------+ + .. c:member:: const char *name + + Name of the struct sequence type. + + .. c:member:: const char *doc + + Pointer to docstring for the type or ``NULL`` to omit. + + .. c:member:: PyStructSequence_Field *fields + + Pointer to ``NULL``-terminated array with field names of the new type. + + .. c:member:: int n_in_sequence + + Number of fields visible to the Python side (if used as tuple). .. c:type:: PyStructSequence_Field Describes a field of a struct sequence. As a struct sequence is modeled as a tuple, all fields are typed as :c:expr:`PyObject*`. The index in the - :attr:`fields` array of the :c:type:`PyStructSequence_Desc` determines which + :c:member:`~PyStructSequence_Desc.fields` array of + the :c:type:`PyStructSequence_Desc` determines which field of the struct sequence is described. - +-----------+------------------+-----------------------------------------+ - | Field | C Type | Meaning | - +===========+==================+=========================================+ - | ``name`` | ``const char *`` | name for the field or ``NULL`` to end | - | | | the list of named fields, set to | - | | | :c:data:`PyStructSequence_UnnamedField` | - | | | to leave unnamed | - +-----------+------------------+-----------------------------------------+ - | ``doc`` | ``const char *`` | field docstring or ``NULL`` to omit | - +-----------+------------------+-----------------------------------------+ + .. c:member:: const char *name + + Name for the field or ``NULL`` to end the list of named fields, + set to :c:data:`PyStructSequence_UnnamedField` to leave unnamed. + + .. c:member:: const char *doc + + Field docstring or ``NULL`` to omit. .. c:var:: const char * const PyStructSequence_UnnamedField @@ -194,12 +199,17 @@ type. .. c:function:: PyObject* PyStructSequence_GetItem(PyObject *p, Py_ssize_t pos) Return the object at position *pos* in the struct sequence pointed to by *p*. - No bounds checking is performed. + + Bounds checking is performed as an assertion if Python is built in + :ref:`debug mode ` or :option:`with assertions <--with-assertions>`. .. c:function:: PyObject* PyStructSequence_GET_ITEM(PyObject *p, Py_ssize_t pos) - Macro equivalent of :c:func:`PyStructSequence_GetItem`. + Alias to :c:func:`PyStructSequence_GetItem`. + + .. versionchanged:: 3.13 + Now implemented as an alias to :c:func:`PyStructSequence_GetItem`. .. c:function:: void PyStructSequence_SetItem(PyObject *p, Py_ssize_t pos, PyObject *o) @@ -208,6 +218,9 @@ type. :c:func:`PyTuple_SET_ITEM`, this should only be used to fill in brand new instances. + Bounds checking is performed as an assertion if Python is built in + :ref:`debug mode ` or :option:`with assertions <--with-assertions>`. + .. note:: This function "steals" a reference to *o*. @@ -215,9 +228,7 @@ type. .. c:function:: void PyStructSequence_SET_ITEM(PyObject *p, Py_ssize_t *pos, PyObject *o) - Similar to :c:func:`PyStructSequence_SetItem`, but implemented as a static - inlined function. - - .. note:: + Alias to :c:func:`PyStructSequence_SetItem`. - This function "steals" a reference to *o*. + .. versionchanged:: 3.13 + Now implemented as an alias to :c:func:`PyStructSequence_SetItem`. diff --git a/Doc/c-api/type.rst b/Doc/c-api/type.rst index 7b5d1fac40ed87d..5aaa8147dd3176a 100644 --- a/Doc/c-api/type.rst +++ b/Doc/c-api/type.rst @@ -5,7 +5,7 @@ Type Objects ------------ -.. index:: object: type +.. index:: pair: object; type .. c:type:: PyTypeObject @@ -42,7 +42,7 @@ Type Objects Return the :c:member:`~PyTypeObject.tp_flags` member of *type*. This function is primarily meant for use with ``Py_LIMITED_API``; the individual flag bits are guaranteed to be stable across Python releases, but access to - :c:member:`~PyTypeObject.tp_flags` itself is not part of the limited API. + :c:member:`~PyTypeObject.tp_flags` itself is not part of the :ref:`limited API `. .. versionadded:: 3.2 @@ -50,6 +50,23 @@ Type Objects The return type is now ``unsigned long`` rather than ``long``. +.. c:function:: PyObject* PyType_GetDict(PyTypeObject* type) + + Return the type object's internal namespace, which is otherwise only + exposed via a read-only proxy (``cls.__dict__``). This is a + replacement for accessing :c:member:`~PyTypeObject.tp_dict` directly. + The returned dictionary must be treated as read-only. + + This function is meant for specific embedding and language-binding cases, + where direct access to the dict is necessary and indirect access + (e.g. via the proxy or :c:func:`PyObject_GetAttr`) isn't adequate. + + Extension modules should continue to use ``tp_dict``, + directly or indirectly, when setting up their own types. + + .. versionadded:: 3.12 + + .. c:function:: void PyType_Modified(PyTypeObject *type) Invalidate the internal lookup cache for the type and all of its @@ -86,7 +103,7 @@ Type Objects :c:func:`PyType_AddWatcher` will be called whenever :c:func:`PyType_Modified` reports a change to *type*. (The callback may be called only once for a series of consecutive modifications to *type*, if - :c:func:`PyType_Lookup` is not called on *type* between the modifications; + :c:func:`!_PyType_Lookup` is not called on *type* between the modifications; this is an implementation detail and subject to change.) An extension should never call ``PyType_Watch`` with a *watcher_id* that was @@ -115,7 +132,7 @@ Type Objects .. c:function:: int PyType_IS_GC(PyTypeObject *o) Return true if the type object includes support for the cycle detector; this - tests the type flag :const:`Py_TPFLAGS_HAVE_GC`. + tests the type flag :c:macro:`Py_TPFLAGS_HAVE_GC`. .. c:function:: int PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b) @@ -148,10 +165,10 @@ Type Objects .. note:: If some of the base classes implements the GC protocol and the provided - type does not include the :const:`Py_TPFLAGS_HAVE_GC` in its flags, then + type does not include the :c:macro:`Py_TPFLAGS_HAVE_GC` in its flags, then the GC protocol will be automatically implemented from its parents. On the contrary, if the type being created does include - :const:`Py_TPFLAGS_HAVE_GC` in its flags then it **must** implement the + :c:macro:`Py_TPFLAGS_HAVE_GC` in its flags then it **must** implement the GC protocol itself by at least implementing the :c:member:`~PyTypeObject.tp_traverse` handle. @@ -198,7 +215,7 @@ Type Objects ``Py_TYPE(self)`` may be a *subclass* of the intended class, and subclasses are not necessarily defined in the same module as their superclass. See :c:type:`PyCMethod` to get the class that defines the method. - See :c:func:`PyType_GetModuleByDef` for cases when ``PyCMethod`` cannot + See :c:func:`PyType_GetModuleByDef` for cases when :c:type:`!PyCMethod` cannot be used. .. versionadded:: 3.9 @@ -232,6 +249,15 @@ Type Objects .. versionadded:: 3.11 +.. c:function:: int PyUnstable_Type_AssignVersionTag(PyTypeObject *type) + + Attempt to assign a version tag to the given type. + + Returns 1 if the type already had a valid version tag or a new one was + assigned, or 0 if a new tag could not be assigned. + + .. versionadded:: 3.12 + Creating Heap-Allocated Types ............................. @@ -242,13 +268,18 @@ The following functions and structs are used to create .. c:function:: PyObject* PyType_FromMetaclass(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases) Create and return a :ref:`heap type ` from the *spec* - (see :const:`Py_TPFLAGS_HEAPTYPE`). + (see :c:macro:`Py_TPFLAGS_HEAPTYPE`). The metaclass *metaclass* is used to construct the resulting type object. When *metaclass* is ``NULL``, the metaclass is derived from *bases* (or *Py_tp_base[s]* slots if *bases* is ``NULL``, see below). - Note that metaclasses that override - :c:member:`~PyTypeObject.tp_new` are not supported. + + Metaclasses that override :c:member:`~PyTypeObject.tp_new` are not + supported, except if ``tp_new`` is ``NULL``. + (For backwards compatibility, other ``PyType_From*`` functions allow + such metaclasses. They ignore ``tp_new``, which may result in incomplete + initialization. This is deprecated and in Python 3.14+ such metaclasses will + not be supported.) The *bases* argument can be used to specify base classes; it can either be only one class or a tuple of classes. @@ -296,6 +327,11 @@ The following functions and structs are used to create The function now finds and uses a metaclass corresponding to the provided base classes. Previously, only :class:`type` instances were returned. + The :c:member:`~PyTypeObject.tp_new` of the metaclass is *ignored*. + which may result in incomplete initialization. + Creating classes whose metaclass overrides + :c:member:`~PyTypeObject.tp_new` is deprecated and in Python 3.14+ it + will be no longer allowed. .. c:function:: PyObject* PyType_FromSpecWithBases(PyType_Spec *spec, PyObject *bases) @@ -308,6 +344,12 @@ The following functions and structs are used to create The function now finds and uses a metaclass corresponding to the provided base classes. Previously, only :class:`type` instances were returned. + The :c:member:`~PyTypeObject.tp_new` of the metaclass is *ignored*. + which may result in incomplete initialization. + Creating classes whose metaclass overrides + :c:member:`~PyTypeObject.tp_new` is deprecated and in Python 3.14+ it + will be no longer allowed. + .. c:function:: PyObject* PyType_FromSpec(PyType_Spec *spec) Equivalent to ``PyType_FromMetaclass(NULL, NULL, spec, NULL)``. @@ -318,41 +360,94 @@ The following functions and structs are used to create base classes provided in *Py_tp_base[s]* slots. Previously, only :class:`type` instances were returned. + The :c:member:`~PyTypeObject.tp_new` of the metaclass is *ignored*. + which may result in incomplete initialization. + Creating classes whose metaclass overrides + :c:member:`~PyTypeObject.tp_new` is deprecated and in Python 3.14+ it + will be no longer allowed. + +.. raw:: html + + + + + + + + .. c:type:: PyType_Spec Structure defining a type's behavior. - .. c:member:: const char* PyType_Spec.name + .. c:member:: const char* name Name of the type, used to set :c:member:`PyTypeObject.tp_name`. - .. c:member:: int PyType_Spec.basicsize - .. c:member:: int PyType_Spec.itemsize + .. c:member:: int basicsize + + If positive, specifies the size of the instance in bytes. + It is used to set :c:member:`PyTypeObject.tp_basicsize`. + + If zero, specifies that :c:member:`~PyTypeObject.tp_basicsize` + should be inherited. + + If negative, the absolute value specifies how much space instances of the + class need *in addition* to the superclass. + Use :c:func:`PyObject_GetTypeData` to get a pointer to subclass-specific + memory reserved this way. + + .. versionchanged:: 3.12 + + Previously, this field could not be negative. + + .. c:member:: int itemsize + + Size of one element of a variable-size type, in bytes. + Used to set :c:member:`PyTypeObject.tp_itemsize`. + See ``tp_itemsize`` documentation for caveats. - Size of the instance in bytes, used to set - :c:member:`PyTypeObject.tp_basicsize` and - :c:member:`PyTypeObject.tp_itemsize`. + If zero, :c:member:`~PyTypeObject.tp_itemsize` is inherited. + Extending arbitrary variable-sized classes is dangerous, + since some types use a fixed offset for variable-sized memory, + which can then overlap fixed-sized memory used by a subclass. + To help prevent mistakes, inheriting ``itemsize`` is only possible + in the following situations: - .. c:member:: int PyType_Spec.flags + - The base is not variable-sized (its + :c:member:`~PyTypeObject.tp_itemsize`). + - The requested :c:member:`PyType_Spec.basicsize` is positive, + suggesting that the memory layout of the base class is known. + - The requested :c:member:`PyType_Spec.basicsize` is zero, + suggesting that the subclass does not access the instance's memory + directly. + - With the :c:macro:`Py_TPFLAGS_ITEMS_AT_END` flag. + + .. c:member:: unsigned int flags Type flags, used to set :c:member:`PyTypeObject.tp_flags`. If the ``Py_TPFLAGS_HEAPTYPE`` flag is not set, :c:func:`PyType_FromSpecWithBases` sets it automatically. - .. c:member:: PyType_Slot *PyType_Spec.slots + .. c:member:: PyType_Slot *slots Array of :c:type:`PyType_Slot` structures. Terminated by the special slot value ``{0, NULL}``. Each slot ID should be specified at most once. +.. raw:: html + + + + + .. c:type:: PyType_Slot Structure defining optional functionality of a type, containing a slot ID and a value pointer. - .. c:member:: int PyType_Slot.slot + .. c:member:: int slot A slot ID. @@ -366,26 +461,39 @@ The following functions and structs are used to create * ``Py_nb_add`` to set :c:member:`PyNumberMethods.nb_add` * ``Py_sq_length`` to set :c:member:`PySequenceMethods.sq_length` - The following fields cannot be set at all using :c:type:`PyType_Spec` and - :c:type:`PyType_Slot`: + The following “offset†fields cannot be set using :c:type:`PyType_Slot`: + + * :c:member:`~PyTypeObject.tp_weaklistoffset` + (use :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` instead if possible) + * :c:member:`~PyTypeObject.tp_dictoffset` + (use :c:macro:`Py_TPFLAGS_MANAGED_DICT` instead if possible) + * :c:member:`~PyTypeObject.tp_vectorcall_offset` + (use ``"__vectorcalloffset__"`` in + :ref:`PyMemberDef `) + + If it is not possible to switch to a ``MANAGED`` flag (for example, + for vectorcall or to support Python older than 3.12), specify the + offset in :c:member:`Py_tp_members `. + See :ref:`PyMemberDef documentation ` + for details. + + The following fields cannot be set at all when creating a heap type: - * :c:member:`~PyTypeObject.tp_dict` - * :c:member:`~PyTypeObject.tp_mro` - * :c:member:`~PyTypeObject.tp_cache` - * :c:member:`~PyTypeObject.tp_subclasses` - * :c:member:`~PyTypeObject.tp_weaklist` * :c:member:`~PyTypeObject.tp_vectorcall` - * :c:member:`~PyTypeObject.tp_weaklistoffset` - (use :const:`Py_TPFLAGS_MANAGED_WEAKREF` instead) - * :c:member:`~PyTypeObject.tp_dictoffset` - (use :const:`Py_TPFLAGS_MANAGED_DICT` instead) - * :c:member:`~PyTypeObject.tp_vectorcall_offset` - (see :ref:`PyMemberDef `) + (use :c:member:`~PyTypeObject.tp_new` and/or + :c:member:`~PyTypeObject.tp_init`) + + * Internal fields: + :c:member:`~PyTypeObject.tp_dict`, + :c:member:`~PyTypeObject.tp_mro`, + :c:member:`~PyTypeObject.tp_cache`, + :c:member:`~PyTypeObject.tp_subclasses`, and + :c:member:`~PyTypeObject.tp_weaklist`. Setting :c:data:`Py_tp_bases` or :c:data:`Py_tp_base` may be problematic on some platforms. To avoid issues, use the *bases* argument of - :py:func:`PyType_FromSpecWithBases` instead. + :c:func:`PyType_FromSpecWithBases` instead. .. versionchanged:: 3.9 @@ -394,9 +502,9 @@ The following functions and structs are used to create .. versionchanged:: 3.11 :c:member:`~PyBufferProcs.bf_getbuffer` and :c:member:`~PyBufferProcs.bf_releasebuffer` are now available - under the limited API. + under the :ref:`limited API `. - .. c:member:: void *PyType_Slot.pfunc + .. c:member:: void *pfunc The desired value of the slot. In most cases, this is a pointer to a function. diff --git a/Doc/c-api/typehints.rst b/Doc/c-api/typehints.rst index 4c1957a2a1dbca0..98fe68737deb81c 100644 --- a/Doc/c-api/typehints.rst +++ b/Doc/c-api/typehints.rst @@ -35,7 +35,7 @@ two types exist -- :ref:`GenericAlias ` and ... } - .. seealso:: The data model method :meth:`__class_getitem__`. + .. seealso:: The data model method :meth:`~object.__class_getitem__`. .. versionadded:: 3.9 diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst index 8ccdece3efc54c9..10c05beda7c66f0 100644 --- a/Doc/c-api/typeobj.rst +++ b/Doc/c-api/typeobj.rst @@ -147,17 +147,25 @@ Quick Reference +------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+ | :c:member:`~PyTypeObject.tp_vectorcall` | :c:type:`vectorcallfunc` | | | | | | +------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+ + | [:c:member:`~PyTypeObject.tp_watched`] | unsigned char | | | | | | + +------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+ .. [#slots] - A slot name in parentheses indicates it is (effectively) deprecated. - Names in angle brackets should be treated as read-only. - Names in square brackets are for internal use only. - "" (as a prefix) means the field is required (must be non-``NULL``). + + **()**: A slot name in parentheses indicates it is (effectively) deprecated. + + **<>**: Names in angle brackets should be initially set to ``NULL`` and + treated as read-only. + + **[]**: Names in square brackets are for internal use only. + + **** (as a prefix) means the field is required (must be non-``NULL``). + .. [#cols] Columns: - **"O"**: set on :c:type:`PyBaseObject_Type` + **"O"**: set on :c:data:`PyBaseObject_Type` - **"T"**: set on :c:type:`PyType_Type` + **"T"**: set on :c:data:`PyType_Type` **"D"**: default (if slot is set to ``NULL``) @@ -444,6 +452,7 @@ slot typedefs | | | | | | :c:type:`PyObject` * | | | | :c:type:`Py_ssize_t` | | +| | :c:type:`PyObject` * | | +-----------------------------+-----------------------------+----------------------+ | :c:type:`objobjproc` | .. line-block:: | int | | | | | @@ -476,17 +485,17 @@ PyObject Slots -------------- The type object structure extends the :c:type:`PyVarObject` structure. The -:attr:`ob_size` field is used for dynamic types (created by :func:`type_new`, +:c:member:`~PyVarObject.ob_size` field is used for dynamic types (created by :c:func:`!type_new`, usually called from a class statement). Note that :c:data:`PyType_Type` (the metatype) initializes :c:member:`~PyTypeObject.tp_itemsize`, which means that its instances (i.e. -type objects) *must* have the :attr:`ob_size` field. +type objects) *must* have the :c:member:`~PyVarObject.ob_size` field. .. c:member:: Py_ssize_t PyObject.ob_refcnt This is the type object's reference count, initialized to ``1`` by the ``PyObject_HEAD_INIT`` macro. Note that for :ref:`statically allocated type - objects `, the type's instances (objects whose :attr:`ob_type` + objects `, the type's instances (objects whose :c:member:`~PyObject.ob_type` points back to the type) do *not* count as references. But for :ref:`dynamically allocated type objects `, the instances *do* count as references. @@ -510,8 +519,8 @@ type objects) *must* have the :attr:`ob_size` field. Foo_Type.ob_type = &PyType_Type; This should be done before any instances of the type are created. - :c:func:`PyType_Ready` checks if :attr:`ob_type` is ``NULL``, and if so, - initializes it to the :attr:`ob_type` field of the base class. + :c:func:`PyType_Ready` checks if :c:member:`~PyObject.ob_type` is ``NULL``, and if so, + initializes it to the :c:member:`~PyObject.ob_type` field of the base class. :c:func:`PyType_Ready` will not change this field if it is non-zero. **Inheritance:** @@ -519,28 +528,6 @@ type objects) *must* have the :attr:`ob_size` field. This field is inherited by subtypes. -.. c:member:: PyObject* PyObject._ob_next - PyObject* PyObject._ob_prev - - These fields are only present when the macro ``Py_TRACE_REFS`` is defined - (see the :option:`configure --with-trace-refs option <--with-trace-refs>`). - - Their initialization to ``NULL`` is taken care of by the - ``PyObject_HEAD_INIT`` macro. For :ref:`statically allocated objects - `, these fields always remain ``NULL``. For :ref:`dynamically - allocated objects `, these two fields are used to link the - object into a doubly linked list of *all* live objects on the heap. - - This could be used for various debugging purposes; currently the only uses - are the :func:`sys.getobjects` function and to print the objects that are - still alive at the end of a run when the environment variable - :envvar:`PYTHONDUMPREFS` is set. - - **Inheritance:** - - These fields are not inherited by subtypes. - - PyVarObject Slots ----------------- @@ -560,8 +547,8 @@ PyTypeObject Slots Each slot has a section describing inheritance. If :c:func:`PyType_Ready` may set a value when the field is set to ``NULL`` then there will also be -a "Default" section. (Note that many fields set on :c:type:`PyBaseObject_Type` -and :c:type:`PyType_Type` effectively act as defaults.) +a "Default" section. (Note that many fields set on :c:data:`PyBaseObject_Type` +and :c:data:`PyType_Type` effectively act as defaults.) .. c:member:: const char* PyTypeObject.tp_name @@ -570,7 +557,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) name, followed by a dot, followed by the type name; for built-in types, it should be just the type name. If the module is a submodule of a package, the full package name is part of the full module name. For example, a type named - :class:`T` defined in module :mod:`M` in subpackage :mod:`Q` in package :mod:`P` + :class:`!T` defined in module :mod:`!M` in subpackage :mod:`!Q` in package :mod:`!P` should have the :c:member:`~PyTypeObject.tp_name` initializer ``"P.Q.M.T"``. For :ref:`dynamically allocated type objects `, @@ -610,20 +597,20 @@ and :c:type:`PyType_Type` effectively act as defaults.) instances have the same size, given in :c:member:`~PyTypeObject.tp_basicsize`. For a type with variable-length instances, the instances must have an - :attr:`ob_size` field, and the instance size is :c:member:`~PyTypeObject.tp_basicsize` plus N + :c:member:`~PyVarObject.ob_size` field, and the instance size is :c:member:`~PyTypeObject.tp_basicsize` plus N times :c:member:`~PyTypeObject.tp_itemsize`, where N is the "length" of the object. The value of - N is typically stored in the instance's :attr:`ob_size` field. There are - exceptions: for example, ints use a negative :attr:`ob_size` to indicate a + N is typically stored in the instance's :c:member:`~PyVarObject.ob_size` field. There are + exceptions: for example, ints use a negative :c:member:`~PyVarObject.ob_size` to indicate a negative number, and N is ``abs(ob_size)`` there. Also, the presence of an - :attr:`ob_size` field in the instance layout doesn't mean that the instance + :c:member:`~PyVarObject.ob_size` field in the instance layout doesn't mean that the instance structure is variable-length (for example, the structure for the list type has - fixed-length instances, yet those instances have a meaningful :attr:`ob_size` + fixed-length instances, yet those instances have a meaningful :c:member:`~PyVarObject.ob_size` field). The basic size includes the fields in the instance declared by the macro :c:macro:`PyObject_HEAD` or :c:macro:`PyObject_VAR_HEAD` (whichever is used to - declare the instance struct) and this in turn includes the :attr:`_ob_prev` and - :attr:`_ob_next` fields if they are present. This means that the only correct + declare the instance struct) and this in turn includes the :c:member:`~PyObject._ob_prev` and + :c:member:`~PyObject._ob_next` fields if they are present. This means that the only correct way to get an initializer for the :c:member:`~PyTypeObject.tp_basicsize` is to use the ``sizeof`` operator on the struct used to declare the instance layout. The basic size does not include the GC header size. @@ -660,15 +647,15 @@ and :c:type:`PyType_Type` effectively act as defaults.) memory buffers owned by the instance (using the freeing function corresponding to the allocation function used to allocate the buffer), and call the type's :c:member:`~PyTypeObject.tp_free` function. If the type is not subtypable - (doesn't have the :const:`Py_TPFLAGS_BASETYPE` flag bit set), it is + (doesn't have the :c:macro:`Py_TPFLAGS_BASETYPE` flag bit set), it is permissible to call the object deallocator directly instead of via :c:member:`~PyTypeObject.tp_free`. The object deallocator should be the one used to allocate the instance; this is normally :c:func:`PyObject_Del` if the instance was allocated - using :c:func:`PyObject_New` or :c:func:`PyObject_VarNew`, or + using :c:macro:`PyObject_New` or :c:macro:`PyObject_NewVar`, or :c:func:`PyObject_GC_Del` if the instance was allocated using - :c:func:`PyObject_GC_New` or :c:func:`PyObject_GC_NewVar`. + :c:macro:`PyObject_GC_New` or :c:macro:`PyObject_GC_NewVar`. - If the type supports garbage collection (has the :const:`Py_TPFLAGS_HAVE_GC` + If the type supports garbage collection (has the :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit set), the destructor should call :c:func:`PyObject_GC_UnTrack` before clearing any member fields. @@ -680,8 +667,9 @@ and :c:type:`PyType_Type` effectively act as defaults.) Py_TYPE(self)->tp_free((PyObject *)self); } - Finally, if the type is heap allocated (:const:`Py_TPFLAGS_HEAPTYPE`), the - deallocator should decrement the reference count for its type object after + Finally, if the type is heap allocated (:c:macro:`Py_TPFLAGS_HEAPTYPE`), the + deallocator should release the owned reference to its type object + (via :c:func:`Py_DECREF`) after calling the type deallocator. In order to avoid dangling pointers, the recommended way to achieve this is: @@ -707,12 +695,12 @@ and :c:type:`PyType_Type` effectively act as defaults.) a more efficient alternative of the simpler :c:member:`~PyTypeObject.tp_call`. - This field is only used if the flag :const:`Py_TPFLAGS_HAVE_VECTORCALL` + This field is only used if the flag :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` is set. If so, this must be a positive integer containing the offset in the instance of a :c:type:`vectorcallfunc` pointer. The *vectorcallfunc* pointer may be ``NULL``, in which case the instance behaves - as if :const:`Py_TPFLAGS_HAVE_VECTORCALL` was not set: calling the instance + as if :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` was not set: calling the instance falls back to :c:member:`~PyTypeObject.tp_call`. Any class that sets ``Py_TPFLAGS_HAVE_VECTORCALL`` must also set @@ -731,15 +719,15 @@ and :c:type:`PyType_Type` effectively act as defaults.) Before version 3.12, it was not recommended for :ref:`mutable heap types ` to implement the vectorcall protocol. - When a user sets :attr:`~type.__call__` in Python code, only *tp_call* is + When a user sets :attr:`~object.__call__` in Python code, only *tp_call* is updated, likely making it inconsistent with the vectorcall function. Since 3.12, setting ``__call__`` will disable vectorcall optimization - by clearing the :const:`Py_TPFLAGS_HAVE_VECTORCALL` flag. + by clearing the :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag. **Inheritance:** This field is always inherited. - However, the :const:`Py_TPFLAGS_HAVE_VECTORCALL` flag is not + However, the :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag is not always inherited. If it's not set, then the subclass won't use :ref:`vectorcall `, except when :c:func:`PyVectorcall_Call` is explicitly called. @@ -755,7 +743,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_getattr`, :attr:`tp_getattro` + Group: :c:member:`~PyTypeObject.tp_getattr`, :c:member:`~PyTypeObject.tp_getattro` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_getattro`: a subtype inherits both :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` from its base type when @@ -772,7 +760,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_setattr`, :attr:`tp_setattro` + Group: :c:member:`~PyTypeObject.tp_setattr`, :c:member:`~PyTypeObject.tp_setattro` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_setattro`: a subtype inherits both :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` from its base type when @@ -796,7 +784,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. c:member:: reprfunc PyTypeObject.tp_repr - .. index:: builtin: repr + .. index:: pair: built-in function; repr An optional pointer to a function that implements the built-in function :func:`repr`. @@ -861,7 +849,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. c:member:: hashfunc PyTypeObject.tp_hash - .. index:: builtin: hash + .. index:: pair: built-in function; hash An optional pointer to a function that implements the built-in function :func:`hash`. @@ -874,7 +862,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) normal return value; when an error occurs during the computation of the hash value, the function should set an exception and return ``-1``. - When this field is not set (*and* :attr:`tp_richcompare` is not set), + When this field is not set (*and* :c:member:`~PyTypeObject.tp_richcompare` is not set), an attempt to take the hash of the object raises :exc:`TypeError`. This is the same as setting it to :c:func:`PyObject_HashNotImplemented`. @@ -888,7 +876,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_hash`, :attr:`tp_richcompare` + Group: :c:member:`~PyTypeObject.tp_hash`, :c:member:`~PyTypeObject.tp_richcompare` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_richcompare`: a subtype inherits both of @@ -947,7 +935,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_getattr`, :attr:`tp_getattro` + Group: :c:member:`~PyTypeObject.tp_getattr`, :c:member:`~PyTypeObject.tp_getattro` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_getattr`: a subtype inherits both :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` from its base type when @@ -955,7 +943,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Default:** - :c:type:`PyBaseObject_Type` uses :c:func:`PyObject_GenericGetAttr`. + :c:data:`PyBaseObject_Type` uses :c:func:`PyObject_GenericGetAttr`. .. c:member:: setattrofunc PyTypeObject.tp_setattro @@ -973,7 +961,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_setattr`, :attr:`tp_setattro` + Group: :c:member:`~PyTypeObject.tp_setattr`, :c:member:`~PyTypeObject.tp_setattro` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_setattr`: a subtype inherits both :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` from its base type when @@ -981,7 +969,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Default:** - :c:type:`PyBaseObject_Type` uses :c:func:`PyObject_GenericSetAttr`. + :c:data:`PyBaseObject_Type` uses :c:func:`PyObject_GenericSetAttr`. .. c:member:: PyBufferProcs* PyTypeObject.tp_as_buffer @@ -1013,30 +1001,32 @@ and :c:type:`PyType_Type` effectively act as defaults.) this flag bit. The flag bits that pertain to extension structures are strictly inherited if the extension structure is inherited, i.e. the base type's value of the flag bit is copied into the subtype together with a pointer to the extension - structure. The :const:`Py_TPFLAGS_HAVE_GC` flag bit is inherited together with + structure. The :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is inherited together with the :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` fields, i.e. if the - :const:`Py_TPFLAGS_HAVE_GC` flag bit is clear in the subtype and the + :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is clear in the subtype and the :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` fields in the subtype exist and have ``NULL`` values. .. XXX are most flag bits *really* inherited individually? **Default:** - :c:type:`PyBaseObject_Type` uses + :c:data:`PyBaseObject_Type` uses ``Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE``. **Bit Masks:** + .. c:namespace:: NULL + The following bit masks are currently defined; these can be ORed together using the ``|`` operator to form the value of the :c:member:`~PyTypeObject.tp_flags` field. The macro :c:func:`PyType_HasFeature` takes a type and a flags value, *tp* and *f*, and checks whether ``tp->tp_flags & f`` is non-zero. - .. data:: Py_TPFLAGS_HEAPTYPE + .. c:macro:: Py_TPFLAGS_HEAPTYPE This bit is set when the type object itself is allocated on the heap, for example, types created dynamically using :c:func:`PyType_FromSpec`. In this - case, the :attr:`ob_type` field of its instances is considered a reference to + case, the :c:member:`~PyObject.ob_type` field of its instances is considered a reference to the type, and the type object is INCREF'ed when a new instance is created, and DECREF'ed when an instance is destroyed (this does not apply to instances of subtypes; only the type referenced by the instance's ob_type gets INCREF'ed or @@ -1047,7 +1037,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) ??? - .. data:: Py_TPFLAGS_BASETYPE + .. c:macro:: Py_TPFLAGS_BASETYPE This bit is set when the type can be used as the base type of another type. If this bit is clear, the type cannot be subtyped (similar to a "final" class in @@ -1058,7 +1048,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) ??? - .. data:: Py_TPFLAGS_READY + .. c:macro:: Py_TPFLAGS_READY This bit is set when the type object has been fully initialized by :c:func:`PyType_Ready`. @@ -1068,7 +1058,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) ??? - .. data:: Py_TPFLAGS_READYING + .. c:macro:: Py_TPFLAGS_READYING This bit is set while :c:func:`PyType_Ready` is in the process of initializing the type object. @@ -1078,10 +1068,10 @@ and :c:type:`PyType_Type` effectively act as defaults.) ??? - .. data:: Py_TPFLAGS_HAVE_GC + .. c:macro:: Py_TPFLAGS_HAVE_GC This bit is set when the object supports garbage collection. If this bit - is set, instances must be created using :c:func:`PyObject_GC_New` and + is set, instances must be created using :c:macro:`PyObject_GC_New` and destroyed using :c:func:`PyObject_GC_Del`. More information in section :ref:`supporting-cycle-detection`. This bit also implies that the GC-related fields :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` are present in @@ -1089,28 +1079,28 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :const:`Py_TPFLAGS_HAVE_GC`, :attr:`tp_traverse`, :attr:`tp_clear` + Group: :c:macro:`Py_TPFLAGS_HAVE_GC`, :c:member:`~PyTypeObject.tp_traverse`, :c:member:`~PyTypeObject.tp_clear` - The :const:`Py_TPFLAGS_HAVE_GC` flag bit is inherited - together with the :attr:`tp_traverse` and :attr:`tp_clear` - fields, i.e. if the :const:`Py_TPFLAGS_HAVE_GC` flag bit is - clear in the subtype and the :attr:`tp_traverse` and - :attr:`tp_clear` fields in the subtype exist and have ``NULL`` + The :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is inherited + together with the :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` + fields, i.e. if the :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is + clear in the subtype and the :c:member:`~PyTypeObject.tp_traverse` and + :c:member:`~PyTypeObject.tp_clear` fields in the subtype exist and have ``NULL`` values. - .. data:: Py_TPFLAGS_DEFAULT + .. c:macro:: Py_TPFLAGS_DEFAULT This is a bitmask of all the bits that pertain to the existence of certain fields in the type object and its extension structures. Currently, it includes - the following bits: :const:`Py_TPFLAGS_HAVE_STACKLESS_EXTENSION`. + the following bits: :c:macro:`Py_TPFLAGS_HAVE_STACKLESS_EXTENSION`. **Inheritance:** ??? - .. data:: Py_TPFLAGS_METHOD_DESCRIPTOR + .. c:macro:: Py_TPFLAGS_METHOD_DESCRIPTOR This bit indicates that objects behave like unbound methods. @@ -1131,17 +1121,20 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** This flag is never inherited by types without the - :const:`Py_TPFLAGS_IMMUTABLETYPE` flag set. For extension types, it is + :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` flag set. For extension types, it is inherited whenever :c:member:`~PyTypeObject.tp_descr_get` is inherited. - .. data:: Py_TPFLAGS_MANAGED_DICT + .. c:macro:: Py_TPFLAGS_MANAGED_DICT - This bit indicates that instances of the class have a ``__dict___`` - attribute, and that the space for the dictionary is managed by the VM. + This bit indicates that instances of the class have a ``__dict__`` + attribute, and that the space for the dictionary is managed by the VM. - If this flag is set, :const:`Py_TPFLAGS_HAVE_GC` should also be set. + If this flag is set, :c:macro:`Py_TPFLAGS_HAVE_GC` should also be set. - .. versionadded:: 3.12 + The type traverse function must call :c:func:`PyObject_VisitManagedDict` + and its clear function must call :c:func:`PyObject_ClearManagedDict`. + + .. versionadded:: 3.12 **Inheritance:** @@ -1149,12 +1142,12 @@ and :c:type:`PyType_Type` effectively act as defaults.) :c:member:`~PyTypeObject.tp_dictoffset` field is set in a superclass. - .. data:: Py_TPFLAGS_MANAGED_WEAKREF + .. c:macro:: Py_TPFLAGS_MANAGED_WEAKREF - This bit indicates that instances of the class should be weakly - referenceable. + This bit indicates that instances of the class should be weakly + referenceable. - .. versionadded:: 3.12 + .. versionadded:: 3.12 **Inheritance:** @@ -1162,17 +1155,37 @@ and :c:type:`PyType_Type` effectively act as defaults.) :c:member:`~PyTypeObject.tp_weaklistoffset` field is set in a superclass. + .. c:macro:: Py_TPFLAGS_ITEMS_AT_END + + Only usable with variable-size types, i.e. ones with non-zero + :c:member:`~PyTypeObject.tp_itemsize`. + + Indicates that the variable-sized portion of an instance of this type is + at the end of the instance's memory area, at an offset of + ``Py_TYPE(obj)->tp_basicsize`` (which may be different in each + subclass). + + When setting this flag, be sure that all superclasses either + use this memory layout, or are not variable-sized. + Python does not check this. + + .. versionadded:: 3.12 + + **Inheritance:** + + This flag is inherited. + .. XXX Document more flags here? - .. data:: Py_TPFLAGS_LONG_SUBCLASS - .. data:: Py_TPFLAGS_LIST_SUBCLASS - .. data:: Py_TPFLAGS_TUPLE_SUBCLASS - .. data:: Py_TPFLAGS_BYTES_SUBCLASS - .. data:: Py_TPFLAGS_UNICODE_SUBCLASS - .. data:: Py_TPFLAGS_DICT_SUBCLASS - .. data:: Py_TPFLAGS_BASE_EXC_SUBCLASS - .. data:: Py_TPFLAGS_TYPE_SUBCLASS + .. c:macro:: Py_TPFLAGS_LONG_SUBCLASS + .. c:macro:: Py_TPFLAGS_LIST_SUBCLASS + .. c:macro:: Py_TPFLAGS_TUPLE_SUBCLASS + .. c:macro:: Py_TPFLAGS_BYTES_SUBCLASS + .. c:macro:: Py_TPFLAGS_UNICODE_SUBCLASS + .. c:macro:: Py_TPFLAGS_DICT_SUBCLASS + .. c:macro:: Py_TPFLAGS_BASE_EXC_SUBCLASS + .. c:macro:: Py_TPFLAGS_TYPE_SUBCLASS These flags are used by functions such as :c:func:`PyLong_Check` to quickly determine if a type is a subclass @@ -1183,7 +1196,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) will behave differently depending on what kind of check is used. - .. data:: Py_TPFLAGS_HAVE_FINALIZE + .. c:macro:: Py_TPFLAGS_HAVE_FINALIZE This bit is set when the :c:member:`~PyTypeObject.tp_finalize` slot is present in the type structure. @@ -1196,7 +1209,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) type structure. - .. data:: Py_TPFLAGS_HAVE_VECTORCALL + .. c:macro:: Py_TPFLAGS_HAVE_VECTORCALL This bit is set when the class implements the :ref:`vectorcall protocol `. @@ -1216,7 +1229,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) This flag can now be inherited by mutable classes. - .. data:: Py_TPFLAGS_IMMUTABLETYPE + .. c:macro:: Py_TPFLAGS_IMMUTABLETYPE This bit is set for type objects that are immutable: type attributes cannot be set nor deleted. @@ -1229,7 +1242,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. versionadded:: 3.10 - .. data:: Py_TPFLAGS_DISALLOW_INSTANTIATION + .. c:macro:: Py_TPFLAGS_DISALLOW_INSTANTIATION Disallow creating instances of the type: set :c:member:`~PyTypeObject.tp_new` to NULL and don't create the ``__new__`` @@ -1245,11 +1258,22 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** This flag is not inherited. + However, subclasses will not be instantiable unless they provide a + non-NULL :c:member:`~PyTypeObject.tp_new` (which is only possible + via the C API). + + .. note:: + + To disallow instantiating a class directly but allow instantiating + its subclasses (e.g. for an :term:`abstract base class`), + do not use this flag. + Instead, make :c:member:`~PyTypeObject.tp_new` only succeed for + subclasses. .. versionadded:: 3.10 - .. data:: Py_TPFLAGS_MAPPING + .. c:macro:: Py_TPFLAGS_MAPPING This bit indicates that instances of the class may match mapping patterns when used as the subject of a :keyword:`match` block. It is automatically @@ -1258,20 +1282,20 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. note:: - :const:`Py_TPFLAGS_MAPPING` and :const:`Py_TPFLAGS_SEQUENCE` are + :c:macro:`Py_TPFLAGS_MAPPING` and :c:macro:`Py_TPFLAGS_SEQUENCE` are mutually exclusive; it is an error to enable both flags simultaneously. **Inheritance:** This flag is inherited by types that do not already set - :const:`Py_TPFLAGS_SEQUENCE`. + :c:macro:`Py_TPFLAGS_SEQUENCE`. .. seealso:: :pep:`634` -- Structural Pattern Matching: Specification .. versionadded:: 3.10 - .. data:: Py_TPFLAGS_SEQUENCE + .. c:macro:: Py_TPFLAGS_SEQUENCE This bit indicates that instances of the class may match sequence patterns when used as the subject of a :keyword:`match` block. It is automatically @@ -1280,19 +1304,29 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. note:: - :const:`Py_TPFLAGS_MAPPING` and :const:`Py_TPFLAGS_SEQUENCE` are + :c:macro:`Py_TPFLAGS_MAPPING` and :c:macro:`Py_TPFLAGS_SEQUENCE` are mutually exclusive; it is an error to enable both flags simultaneously. **Inheritance:** This flag is inherited by types that do not already set - :const:`Py_TPFLAGS_MAPPING`. + :c:macro:`Py_TPFLAGS_MAPPING`. .. seealso:: :pep:`634` -- Structural Pattern Matching: Specification .. versionadded:: 3.10 + .. c:macro:: Py_TPFLAGS_VALID_VERSION_TAG + + Internal. Do not set or unset this flag. + To indicate that a class has changed call :c:func:`PyType_Modified` + + .. warning:: + This flag is present in header files, but is an internal feature and should + not be used. It will be removed in a future version of CPython + + .. c:member:: const char* PyTypeObject.tp_doc An optional pointer to a NUL-terminated C string giving the docstring for this @@ -1307,7 +1341,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. c:member:: traverseproc PyTypeObject.tp_traverse An optional pointer to a traversal function for the garbage collector. This is - only used if the :const:`Py_TPFLAGS_HAVE_GC` flag bit is set. The signature is:: + only used if the :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is set. The signature is:: int tp_traverse(PyObject *self, visitproc visit, void *arg); @@ -1317,8 +1351,8 @@ and :c:type:`PyType_Type` effectively act as defaults.) The :c:member:`~PyTypeObject.tp_traverse` pointer is used by the garbage collector to detect reference cycles. A typical implementation of a :c:member:`~PyTypeObject.tp_traverse` function simply calls :c:func:`Py_VISIT` on each of the instance's members that are Python - objects that the instance owns. For example, this is function :c:func:`local_traverse` from the - :mod:`_thread` extension module:: + objects that the instance owns. For example, this is function :c:func:`!local_traverse` from the + :mod:`!_thread` extension module:: static int local_traverse(localobject *self, visitproc visit, void *arg) @@ -1337,6 +1371,23 @@ and :c:type:`PyType_Type` effectively act as defaults.) debugging aid you may want to visit it anyway just so the :mod:`gc` module's :func:`~gc.get_referents` function will include it. + Heap types (:c:macro:`Py_TPFLAGS_HEAPTYPE`) must visit their type with:: + + Py_VISIT(Py_TYPE(self)); + + It is only needed since Python 3.9. To support Python 3.8 and older, this + line must be conditionnal:: + + #if PY_VERSION_HEX >= 0x03090000 + Py_VISIT(Py_TYPE(self)); + #endif + + If the :c:macro:`Py_TPFLAGS_MANAGED_DICT` bit is set in the + :c:member:`~PyTypeObject.tp_flags` field, the traverse function must call + :c:func:`PyObject_VisitManagedDict` like this:: + + PyObject_VisitManagedDict((PyObject*)self, visit, arg); + .. warning:: When implementing :c:member:`~PyTypeObject.tp_traverse`, only the members that the instance *owns* (by having :term:`strong references @@ -1350,7 +1401,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) are allowed to be removed even if the instance is still alive). Note that :c:func:`Py_VISIT` requires the *visit* and *arg* parameters to - :c:func:`local_traverse` to have these specific names; don't name them just + :c:func:`!local_traverse` to have these specific names; don't name them just anything. Instances of :ref:`heap-allocated types ` hold a reference to @@ -1369,10 +1420,10 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :const:`Py_TPFLAGS_HAVE_GC`, :attr:`tp_traverse`, :attr:`tp_clear` + Group: :c:macro:`Py_TPFLAGS_HAVE_GC`, :c:member:`~PyTypeObject.tp_traverse`, :c:member:`~PyTypeObject.tp_clear` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_clear` and the - :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and + :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and :c:member:`~PyTypeObject.tp_clear` are all inherited from the base type if they are all zero in the subtype. @@ -1380,7 +1431,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. c:member:: inquiry PyTypeObject.tp_clear An optional pointer to a clear function for the garbage collector. This is only - used if the :const:`Py_TPFLAGS_HAVE_GC` flag bit is set. The signature is:: + used if the :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit is set. The signature is:: int tp_clear(PyObject *); @@ -1409,9 +1460,10 @@ and :c:type:`PyType_Type` effectively act as defaults.) } The :c:func:`Py_CLEAR` macro should be used, because clearing references is - delicate: the reference to the contained object must not be decremented until + delicate: the reference to the contained object must not be released + (via :c:func:`Py_DECREF`) until after the pointer to the contained object is set to ``NULL``. This is because - decrementing the reference count may cause the contained object to become trash, + releasing the reference may cause the contained object to become trash, triggering a chain of reclamation activity that may include invoking arbitrary Python code (due to finalizers, or weakref callbacks, associated with the contained object). If it's possible for such code to reference *self* again, @@ -1419,6 +1471,12 @@ and :c:type:`PyType_Type` effectively act as defaults.) so that *self* knows the contained object can no longer be used. The :c:func:`Py_CLEAR` macro performs the operations in a safe order. + If the :c:macro:`Py_TPFLAGS_MANAGED_DICT` bit is set in the + :c:member:`~PyTypeObject.tp_flags` field, the traverse function must call + :c:func:`PyObject_ClearManagedDict` like this:: + + PyObject_ClearManagedDict((PyObject*)self); + Note that :c:member:`~PyTypeObject.tp_clear` is not *always* called before an instance is deallocated. For example, when reference counting is enough to determine that an object is no longer used, the cyclic garbage @@ -1436,10 +1494,10 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :const:`Py_TPFLAGS_HAVE_GC`, :attr:`tp_traverse`, :attr:`tp_clear` + Group: :c:macro:`Py_TPFLAGS_HAVE_GC`, :c:member:`~PyTypeObject.tp_traverse`, :c:member:`~PyTypeObject.tp_clear` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_traverse` and the - :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and + :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and :c:member:`~PyTypeObject.tp_clear` are all inherited from the base type if they are all zero in the subtype. @@ -1461,21 +1519,23 @@ and :c:type:`PyType_Type` effectively act as defaults.) The following constants are defined to be used as the third argument for :c:member:`~PyTypeObject.tp_richcompare` and for :c:func:`PyObject_RichCompare`: - +----------------+------------+ - | Constant | Comparison | - +================+============+ - | :const:`Py_LT` | ``<`` | - +----------------+------------+ - | :const:`Py_LE` | ``<=`` | - +----------------+------------+ - | :const:`Py_EQ` | ``==`` | - +----------------+------------+ - | :const:`Py_NE` | ``!=`` | - +----------------+------------+ - | :const:`Py_GT` | ``>`` | - +----------------+------------+ - | :const:`Py_GE` | ``>=`` | - +----------------+------------+ + .. c:namespace:: NULL + + +--------------------+------------+ + | Constant | Comparison | + +====================+============+ + | .. c:macro:: Py_LT | ``<`` | + +--------------------+------------+ + | .. c:macro:: Py_LE | ``<=`` | + +--------------------+------------+ + | .. c:macro:: Py_EQ | ``==`` | + +--------------------+------------+ + | .. c:macro:: Py_NE | ``!=`` | + +--------------------+------------+ + | .. c:macro:: Py_GT | ``>`` | + +--------------------+------------+ + | .. c:macro:: Py_GE | ``>=`` | + +--------------------+------------+ The following macro is defined to ease writing rich comparison functions: @@ -1487,7 +1547,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) they may be C ints or floats). The third argument specifies the requested operation, as for :c:func:`PyObject_RichCompare`. - The return value's reference count is properly incremented. + The returned value is a new :term:`strong reference`. On error, sets an exception and returns ``NULL`` from the function. @@ -1495,7 +1555,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Inheritance:** - Group: :attr:`tp_hash`, :attr:`tp_richcompare` + Group: :c:member:`~PyTypeObject.tp_hash`, :c:member:`~PyTypeObject.tp_richcompare` This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_hash`: a subtype inherits :c:member:`~PyTypeObject.tp_richcompare` and :c:member:`~PyTypeObject.tp_hash` when @@ -1504,16 +1564,16 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Default:** - :c:type:`PyBaseObject_Type` provides a :attr:`tp_richcompare` + :c:data:`PyBaseObject_Type` provides a :c:member:`~PyTypeObject.tp_richcompare` implementation, which may be inherited. However, if only - :attr:`tp_hash` is defined, not even the inherited function is used + :c:member:`~PyTypeObject.tp_hash` is defined, not even the inherited function is used and instances of the type will not be able to participate in any comparisons. .. c:member:: Py_ssize_t PyTypeObject.tp_weaklistoffset - While this field is still supported, :const:`Py_TPFLAGS_MANAGED_WEAKREF` + While this field is still supported, :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` should be used instead, if at all possible. If the instances of this type are weakly referenceable, this field is greater @@ -1526,7 +1586,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) Do not confuse this field with :c:member:`~PyTypeObject.tp_weaklist`; that is the list head for weak references to the type object itself. - It is an error to set both the :const:`Py_TPFLAGS_MANAGED_WEAKREF` bit and + It is an error to set both the :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` bit and :c:member:`~PyTypeObject.tp_weaklist`. **Inheritance:** @@ -1538,7 +1598,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Default:** - If the :const:`Py_TPFLAGS_MANAGED_WEAKREF` bit is set in the + If the :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` bit is set in the :c:member:`~PyTypeObject.tp_dict` field, then :c:member:`~PyTypeObject.tp_weaklistoffset` will be set to a negative value, to indicate that it is unsafe to use this field. @@ -1641,7 +1701,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) to a pointer, are valid C99 address constants. However, the unary '&' operator applied to a non-static variable - like :c:func:`PyBaseObject_Type` is not required to produce an address + like :c:data:`PyBaseObject_Type` is not required to produce an address constant. Compilers may support this (gcc does), MSVC does not. Both compilers are strictly standard conforming in this particular behavior. @@ -1667,7 +1727,19 @@ and :c:type:`PyType_Type` effectively act as defaults.) called; it may also be initialized to a dictionary containing initial attributes for the type. Once :c:func:`PyType_Ready` has initialized the type, extra attributes for the type may be added to this dictionary only if they don't - correspond to overloaded operations (like :meth:`__add__`). + correspond to overloaded operations (like :meth:`~object.__add__`). Once + initialization for the type has finished, this field should be + treated as read-only. + + Some types may not store their dictionary in this slot. + Use :c:func:`PyType_GetDict` to retrieve the dictionary for an arbitrary + type. + + .. versionchanged:: 3.12 + + Internals detail: For static builtin types, this is always ``NULL``. + Instead, the dict for such types is stored on ``PyInterpreterState``. + Use :c:func:`PyType_GetDict` to get the dict for an arbitrary type. **Inheritance:** @@ -1720,7 +1792,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. c:member:: Py_ssize_t PyTypeObject.tp_dictoffset - While this field is still supported, :const:`Py_TPFLAGS_MANAGED_DICT` should be + While this field is still supported, :c:macro:`Py_TPFLAGS_MANAGED_DICT` should be used instead, if at all possible. If the instances of this type have a dictionary containing instance variables, @@ -1739,7 +1811,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) dictionary, so it is may be more efficient to call :c:func:`PyObject_GetAttr` when accessing an attribute on the object. - It is an error to set both the :const:`Py_TPFLAGS_MANAGED_WEAKREF` bit and + It is an error to set both the :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` bit and :c:member:`~PyTypeObject.tp_dictoffset`. **Inheritance:** @@ -1747,15 +1819,15 @@ and :c:type:`PyType_Type` effectively act as defaults.) This field is inherited by subtypes. A subtype should not override this offset; doing so could be unsafe, if C code tries to access the dictionary at the previous offset. - To properly support inheritance, use :const:`Py_TPFLAGS_MANAGED_DICT`. + To properly support inheritance, use :c:macro:`Py_TPFLAGS_MANAGED_DICT`. **Default:** This slot has no default. For :ref:`static types `, if the - field is ``NULL`` then no :attr:`__dict__` gets created for instances. + field is ``NULL`` then no :attr:`~object.__dict__` gets created for instances. - If the :const:`Py_TPFLAGS_MANAGED_DICT` bit is set in the - :c:member:`~PyTypeObject.tp_dict` field, then + If the :c:macro:`Py_TPFLAGS_MANAGED_DICT` bit is set in the + :c:member:`~PyTypeObject.tp_flags` field, then :c:member:`~PyTypeObject.tp_dictoffset` will be set to ``-1``, to indicate that it is unsafe to use this field. @@ -1764,10 +1836,10 @@ and :c:type:`PyType_Type` effectively act as defaults.) An optional pointer to an instance initialization function. - This function corresponds to the :meth:`__init__` method of classes. Like - :meth:`__init__`, it is possible to create an instance without calling - :meth:`__init__`, and it is possible to reinitialize an instance by calling its - :meth:`__init__` method again. + This function corresponds to the :meth:`~object.__init__` method of classes. Like + :meth:`!__init__`, it is possible to create an instance without calling + :meth:`!__init__`, and it is possible to reinitialize an instance by calling its + :meth:`!__init__` method again. The function signature is:: @@ -1775,7 +1847,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) The self argument is the instance to be initialized; the *args* and *kwds* arguments represent positional and keyword arguments of the call to - :meth:`__init__`. + :meth:`~object.__init__`. The :c:member:`~PyTypeObject.tp_init` function, if not ``NULL``, is called when an instance is created normally by calling its type, after the type's :c:member:`~PyTypeObject.tp_new` function @@ -1814,7 +1886,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) :c:func:`PyType_GenericAlloc`, to force a standard heap allocation strategy. - For static subtypes, :c:type:`PyBaseObject_Type` uses + For static subtypes, :c:data:`PyBaseObject_Type` uses :c:func:`PyType_GenericAlloc`. That is the recommended value for all statically defined types. @@ -1841,7 +1913,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) in :c:member:`~PyTypeObject.tp_new`, while for mutable types, most initialization should be deferred to :c:member:`~PyTypeObject.tp_init`. - Set the :const:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag to disallow creating + Set the :c:macro:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag to disallow creating instances of the type in Python. **Inheritance:** @@ -1875,9 +1947,9 @@ and :c:type:`PyType_Type` effectively act as defaults.) In dynamic subtypes, this field is set to a deallocator suitable to match :c:func:`PyType_GenericAlloc` and the value of the - :const:`Py_TPFLAGS_HAVE_GC` flag bit. + :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit. - For static subtypes, :c:type:`PyBaseObject_Type` uses PyObject_Del. + For static subtypes, :c:data:`PyBaseObject_Type` uses :c:func:`PyObject_Del`. .. c:member:: inquiry PyTypeObject.tp_is_gc @@ -1886,7 +1958,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) The garbage collector needs to know whether a particular object is collectible or not. Normally, it is sufficient to look at the object's type's - :c:member:`~PyTypeObject.tp_flags` field, and check the :const:`Py_TPFLAGS_HAVE_GC` flag bit. But + :c:member:`~PyTypeObject.tp_flags` field, and check the :c:macro:`Py_TPFLAGS_HAVE_GC` flag bit. But some types have a mixture of statically and dynamically allocated instances, and the statically allocated instances are not collectible. Such types should define this function; it should return ``1`` for a collectible instance, and @@ -1905,15 +1977,26 @@ and :c:type:`PyType_Type` effectively act as defaults.) **Default:** This slot has no default. If this field is ``NULL``, - :const:`Py_TPFLAGS_HAVE_GC` is used as the functional equivalent. + :c:macro:`Py_TPFLAGS_HAVE_GC` is used as the functional equivalent. .. c:member:: PyObject* PyTypeObject.tp_bases Tuple of base types. - This is set for types created by a class statement. It should be ``NULL`` for - statically defined types. + This field should be set to ``NULL`` and treated as read-only. + Python will fill it in when the type is :c:func:`initialized `. + + For dynamically created classes, the ``Py_tp_bases`` + :c:type:`slot ` can be used instead of the *bases* argument + of :c:func:`PyType_FromSpecWithBases`. + The argument form is preferred. + + .. warning:: + + Multiple inheritance does not work well for statically defined types. + If you set ``tp_bases`` to a tuple, Python will not raise an error, + but some slots will only be inherited from the first base. **Inheritance:** @@ -1925,6 +2008,8 @@ and :c:type:`PyType_Type` effectively act as defaults.) Tuple containing the expanded set of base types, starting with the type itself and ending with :class:`object`, in Method Resolution Order. + This field should be set to ``NULL`` and treated as read-only. + Python will fill it in when the type is :c:func:`initialized `. **Inheritance:** @@ -2039,7 +2124,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. versionchanged:: 3.8 Before version 3.8 it was necessary to set the - :const:`Py_TPFLAGS_HAVE_FINALIZE` flags bit in order for this field to be + :c:macro:`Py_TPFLAGS_HAVE_FINALIZE` flags bit in order for this field to be used. This is no longer required. .. seealso:: "Safe object finalization" (:pep:`442`) @@ -2051,7 +2136,7 @@ and :c:type:`PyType_Type` effectively act as defaults.) In other words, it is used to implement :ref:`vectorcall ` for ``type.__call__``. If ``tp_vectorcall`` is ``NULL``, the default call implementation - using :attr:`__new__` and :attr:`__init__` is used. + using :meth:`~object.__new__` and :meth:`~object.__init__` is used. **Inheritance:** @@ -2060,6 +2145,13 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. versionadded:: 3.9 (the field exists since 3.8 but it's only used since 3.9) +.. c:member:: unsigned char PyTypeObject.tp_watched + + Internal. Do not use. + + .. versionadded:: 3.12 + + .. _static-types: Static Types @@ -2080,7 +2172,7 @@ This results in types that are limited relative to types defined in Python: include any subinterpreter-specific state. Also, since :c:type:`PyTypeObject` is only part of the :ref:`Limited API -` as an opaque struct, any extension modules using static types must be +` as an opaque struct, any extension modules using static types must be compiled for a specific Python minor version. @@ -2091,7 +2183,7 @@ Heap Types An alternative to :ref:`static types ` is *heap-allocated types*, or *heap types* for short, which correspond closely to classes created by -Python's ``class`` statement. Heap types have the :const:`Py_TPFLAGS_HEAPTYPE` +Python's ``class`` statement. Heap types have the :c:macro:`Py_TPFLAGS_HEAPTYPE` flag set. This is done by filling a :c:type:`PyType_Spec` structure and calling @@ -2171,8 +2263,8 @@ Number Object Structures .. note:: - The :c:data:`nb_reserved` field should always be ``NULL``. It - was previously called :c:data:`nb_long`, and was renamed in + The :c:member:`~PyNumberMethods.nb_reserved` field should always be ``NULL``. It + was previously called :c:member:`!nb_long`, and was renamed in Python 3.0.1. .. c:member:: binaryfunc PyNumberMethods.nb_add @@ -2243,8 +2335,8 @@ Mapping Object Structures .. c:member:: objobjargproc PyMappingMethods.mp_ass_subscript This function is used by :c:func:`PyObject_SetItem`, - :c:func:`PyObject_DelItem`, :c:func:`PyObject_SetSlice` and - :c:func:`PyObject_DelSlice`. It has the same signature as + :c:func:`PyObject_DelItem`, :c:func:`PySequence_SetSlice` and + :c:func:`PySequence_DelSlice`. It has the same signature as :c:func:`!PyObject_SetItem`, but *v* can also be set to ``NULL`` to delete an item. If this slot is ``NULL``, the object does not support item assignment and deletion. @@ -2290,9 +2382,9 @@ Sequence Object Structures This slot must be filled for the :c:func:`PySequence_Check` function to return ``1``, it can be ``NULL`` otherwise. - Negative indexes are handled as follows: if the :attr:`sq_length` slot is + Negative indexes are handled as follows: if the :c:member:`~PySequenceMethods.sq_length` slot is filled, it is called and the sequence length is used to compute a positive - index which is passed to :attr:`sq_item`. If :attr:`sq_length` is ``NULL``, + index which is passed to :c:member:`~PySequenceMethods.sq_item`. If :c:member:`!sq_length` is ``NULL``, the index is passed as is to the function. .. c:member:: ssizeobjargproc PySequenceMethods.sq_ass_item @@ -2355,7 +2447,7 @@ Buffer Object Structures Except for point (3), an implementation of this function MUST take these steps: - (1) Check if the request can be met. If not, raise :c:data:`PyExc_BufferError`, + (1) Check if the request can be met. If not, raise :exc:`BufferError`, set :c:expr:`view->obj` to ``NULL`` and return ``-1``. (2) Fill in the requested fields. @@ -2466,7 +2558,7 @@ Async Object Structures PyObject *am_aiter(PyObject *self); Must return an :term:`asynchronous iterator` object. - See :meth:`__anext__` for details. + See :meth:`~object.__anext__` for details. This slot may be set to ``NULL`` if an object does not implement asynchronous iteration protocol. @@ -2477,7 +2569,8 @@ Async Object Structures PyObject *am_anext(PyObject *self); - Must return an :term:`awaitable` object. See :meth:`__anext__` for details. + Must return an :term:`awaitable` object. + See :meth:`~object.__anext__` for details. This slot may be set to ``NULL``. .. c:member:: sendfunc PyAsyncMethods.am_send @@ -2502,8 +2595,8 @@ Slot Type typedefs The purpose of this function is to separate memory allocation from memory initialization. It should return a pointer to a block of memory of adequate length for the instance, suitably aligned, and initialized to zeros, but with - :attr:`ob_refcnt` set to ``1`` and :attr:`ob_type` set to the type argument. If - the type's :c:member:`~PyTypeObject.tp_itemsize` is non-zero, the object's :attr:`ob_size` field + :c:member:`~PyObject.ob_refcnt` set to ``1`` and :c:member:`~PyObject.ob_type` set to the type argument. If + the type's :c:member:`~PyTypeObject.tp_itemsize` is non-zero, the object's :c:member:`~PyVarObject.ob_size` field should be initialized to *nitems* and the length of the allocated memory block should be ``tp_basicsize + nitems*tp_itemsize``, rounded up to a multiple of ``sizeof(void*)``; otherwise, *nitems* is not used and the length of the block @@ -2594,7 +2687,7 @@ Slot Type typedefs .. c:type:: PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t) -.. c:type:: int (*ssizeobjargproc)(PyObject *, Py_ssize_t) +.. c:type:: int (*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *) .. c:type:: int (*objobjproc)(PyObject *, PyObject *) @@ -2699,7 +2792,7 @@ A type that supports weakrefs, instance dicts, and hashing:: A str subclass that cannot be subclassed and cannot be called to create instances (e.g. uses a separate factory func) using -:c:data:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag:: +:c:macro:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag:: typedef struct { PyUnicodeObject raw; diff --git a/Doc/c-api/unicode.rst b/Doc/c-api/unicode.rst index f062f14e9a75614..e654412965a727d 100644 --- a/Doc/c-api/unicode.rst +++ b/Doc/c-api/unicode.rst @@ -44,7 +44,7 @@ Python: .. c:type:: Py_UNICODE - This is a typedef of :c:expr:`wchar_t`, which is a 16-bit type or 32-bit type + This is a typedef of :c:type:`wchar_t`, which is a 16-bit type or 32-bit type depending on the platform. .. versionchanged:: 3.3 @@ -52,6 +52,8 @@ Python: whether you selected a "narrow" or "wide" Unicode version of Python at build time. + .. deprecated-removed:: 3.13 3.15 + .. c:type:: PyASCIIObject PyCompactUnicodeObject @@ -270,25 +272,16 @@ These APIs can be used for fast direct character conversions: Return the character *ch* converted to lower case. - .. deprecated:: 3.3 - This function uses simple case mappings. - .. c:function:: Py_UCS4 Py_UNICODE_TOUPPER(Py_UCS4 ch) Return the character *ch* converted to upper case. - .. deprecated:: 3.3 - This function uses simple case mappings. - .. c:function:: Py_UCS4 Py_UNICODE_TOTITLE(Py_UCS4 ch) Return the character *ch* converted to title case. - .. deprecated:: 3.3 - This function uses simple case mappings. - .. c:function:: int Py_UNICODE_TODECIMAL(Py_UCS4 ch) @@ -394,98 +387,149 @@ APIs: arguments, calculate the size of the resulting Python Unicode string and return a string with the values formatted into it. The variable arguments must be C types and must correspond exactly to the format characters in the *format* - ASCII-encoded string. The following format characters are allowed: - - .. % This should be exactly the same as the table in PyErr_Format. - - .. tabularcolumns:: |l|l|L| - - +-------------------+---------------------+----------------------------------+ - | Format Characters | Type | Comment | - +===================+=====================+==================================+ - | :attr:`%%` | *n/a* | The literal % character. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%c` | int | A single character, | - | | | represented as a C int. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%d` | int | Equivalent to | - | | | ``printf("%d")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%u` | unsigned int | Equivalent to | - | | | ``printf("%u")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%ld` | long | Equivalent to | - | | | ``printf("%ld")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%li` | long | Equivalent to | - | | | ``printf("%li")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%lu` | unsigned long | Equivalent to | - | | | ``printf("%lu")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%lld` | long long | Equivalent to | - | | | ``printf("%lld")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%lli` | long long | Equivalent to | - | | | ``printf("%lli")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%llu` | unsigned long long | Equivalent to | - | | | ``printf("%llu")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%zd` | :c:type:`\ | Equivalent to | - | | Py_ssize_t` | ``printf("%zd")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%zi` | :c:type:`\ | Equivalent to | - | | Py_ssize_t` | ``printf("%zi")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%zu` | size_t | Equivalent to | - | | | ``printf("%zu")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%i` | int | Equivalent to | - | | | ``printf("%i")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%x` | int | Equivalent to | - | | | ``printf("%x")``. [1]_ | - +-------------------+---------------------+----------------------------------+ - | :attr:`%s` | const char\* | A null-terminated C character | - | | | array. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%p` | const void\* | The hex representation of a C | - | | | pointer. Mostly equivalent to | - | | | ``printf("%p")`` except that | - | | | it is guaranteed to start with | - | | | the literal ``0x`` regardless | - | | | of what the platform's | - | | | ``printf`` yields. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%A` | PyObject\* | The result of calling | - | | | :func:`ascii`. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%U` | PyObject\* | A Unicode object. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%V` | PyObject\*, | A Unicode object (which may be | - | | const char\* | ``NULL``) and a null-terminated | - | | | C character array as a second | - | | | parameter (which will be used, | - | | | if the first parameter is | - | | | ``NULL``). | - +-------------------+---------------------+----------------------------------+ - | :attr:`%S` | PyObject\* | The result of calling | - | | | :c:func:`PyObject_Str`. | - +-------------------+---------------------+----------------------------------+ - | :attr:`%R` | PyObject\* | The result of calling | - | | | :c:func:`PyObject_Repr`. | - +-------------------+---------------------+----------------------------------+ + ASCII-encoded string. + + A conversion specifier contains two or more characters and has the following + components, which must occur in this order: + + #. The ``'%'`` character, which marks the start of the specifier. + + #. Conversion flags (optional), which affect the result of some conversion + types. + + #. Minimum field width (optional). + If specified as an ``'*'`` (asterisk), the actual width is given in the + next argument, which must be of type :c:expr:`int`, and the object to + convert comes after the minimum field width and optional precision. + + #. Precision (optional), given as a ``'.'`` (dot) followed by the precision. + If specified as ``'*'`` (an asterisk), the actual precision is given in + the next argument, which must be of type :c:expr:`int`, and the value to + convert comes after the precision. + + #. Length modifier (optional). + + #. Conversion type. + + The conversion flag characters are: + + .. tabularcolumns:: |l|L| + + +-------+-------------------------------------------------------------+ + | Flag | Meaning | + +=======+=============================================================+ + | ``0`` | The conversion will be zero padded for numeric values. | + +-------+-------------------------------------------------------------+ + | ``-`` | The converted value is left adjusted (overrides the ``0`` | + | | flag if both are given). | + +-------+-------------------------------------------------------------+ + + The length modifiers for following integer conversions (``d``, ``i``, + ``o``, ``u``, ``x``, or ``X``) specify the type of the argument + (:c:expr:`int` by default): + + .. tabularcolumns:: |l|L| + + +----------+-----------------------------------------------------+ + | Modifier | Types | + +==========+=====================================================+ + | ``l`` | :c:expr:`long` or :c:expr:`unsigned long` | + +----------+-----------------------------------------------------+ + | ``ll`` | :c:expr:`long long` or :c:expr:`unsigned long long` | + +----------+-----------------------------------------------------+ + | ``j`` | :c:type:`intmax_t` or :c:type:`uintmax_t` | + +----------+-----------------------------------------------------+ + | ``z`` | :c:type:`size_t` or :c:type:`ssize_t` | + +----------+-----------------------------------------------------+ + | ``t`` | :c:type:`ptrdiff_t` | + +----------+-----------------------------------------------------+ + + The length modifier ``l`` for following conversions ``s`` or ``V`` specify + that the type of the argument is :c:expr:`const wchar_t*`. + + The conversion specifiers are: + + .. list-table:: + :widths: auto + :header-rows: 1 + + * - Conversion Specifier + - Type + - Comment + + * - ``%`` + - *n/a* + - The literal ``%`` character. + + * - ``d``, ``i`` + - Specified by the length modifier + - The decimal representation of a signed C integer. + + * - ``u`` + - Specified by the length modifier + - The decimal representation of an unsigned C integer. + + * - ``o`` + - Specified by the length modifier + - The octal representation of an unsigned C integer. + + * - ``x`` + - Specified by the length modifier + - The hexadecimal representation of an unsigned C integer (lowercase). + + * - ``X`` + - Specified by the length modifier + - The hexadecimal representation of an unsigned C integer (uppercase). + + * - ``c`` + - :c:expr:`int` + - A single character. + + * - ``s`` + - :c:expr:`const char*` or :c:expr:`const wchar_t*` + - A null-terminated C character array. + + * - ``p`` + - :c:expr:`const void*` + - The hex representation of a C pointer. + Mostly equivalent to ``printf("%p")`` except that it is guaranteed to + start with the literal ``0x`` regardless of what the platform's + ``printf`` yields. + + * - ``A`` + - :c:expr:`PyObject*` + - The result of calling :func:`ascii`. + + * - ``U`` + - :c:expr:`PyObject*` + - A Unicode object. + + * - ``V`` + - :c:expr:`PyObject*`, :c:expr:`const char*` or :c:expr:`const wchar_t*` + - A Unicode object (which may be ``NULL``) and a null-terminated + C character array as a second parameter (which will be used, + if the first parameter is ``NULL``). + + * - ``S`` + - :c:expr:`PyObject*` + - The result of calling :c:func:`PyObject_Str`. + + * - ``R`` + - :c:expr:`PyObject*` + - The result of calling :c:func:`PyObject_Repr`. .. note:: The width formatter unit is number of characters rather than bytes. - The precision formatter unit is number of bytes for ``"%s"`` and + The precision formatter unit is number of bytes or :c:type:`wchar_t` + items (if the length modifier ``l`` is used) for ``"%s"`` and ``"%V"`` (if the ``PyObject*`` argument is ``NULL``), and a number of characters for ``"%A"``, ``"%U"``, ``"%S"``, ``"%R"`` and ``"%V"`` (if the ``PyObject*`` argument is not ``NULL``). - .. [1] For integer specifiers (d, u, ld, li, lu, lld, lli, llu, zd, zi, - zu, i, x): the 0-conversion flag has effect even when a precision is given. + .. note:: + Unlike to C :c:func:`printf` the ``0`` flag has effect even when + a precision is given for integer conversions (``d``, ``i``, ``u``, ``o``, + ``x``, or ``X``). .. versionchanged:: 3.2 Support for ``"%lld"`` and ``"%llu"`` added. @@ -498,6 +542,13 @@ APIs: ``"%V"``, ``"%S"``, ``"%R"`` added. .. versionchanged:: 3.12 + Support for conversion specifiers ``o`` and ``X``. + Support for length modifiers ``j`` and ``t``. + Length modifiers are now applied to all integer conversions. + Length modifier ``l`` is now applied to conversion specifiers ``s`` and ``V``. + Support for variable width and precision ``*``. + Support for flag ``-``. + An unrecognized format character now sets a :exc:`SystemError`. In previous versions it caused all the rest of the format string to be copied as-is to the result string, and any extra arguments discarded. @@ -509,6 +560,15 @@ APIs: arguments. +.. c:function:: PyObject* PyUnicode_FromObject(PyObject *obj) + + Copy an instance of a Unicode subtype to a new true Unicode object if + necessary. If *obj* is already a true Unicode object (not a subtype), + return a new :term:`strong reference` to the object. + + Objects other than Unicode or its subtypes will cause a :exc:`TypeError`. + + .. c:function:: PyObject* PyUnicode_FromEncodedObject(PyObject *obj, \ const char *encoding, const char *errors) @@ -541,7 +601,7 @@ APIs: Py_ssize_t how_many) Copy characters from one Unicode object into another. This function performs - character conversion when necessary and falls back to :c:func:`memcpy` if + character conversion when necessary and falls back to :c:func:`!memcpy` if possible. Returns ``-1`` and sets an exception on error, otherwise returns the number of copied characters. @@ -616,15 +676,6 @@ APIs: .. versionadded:: 3.3 -.. c:function:: PyObject* PyUnicode_FromObject(PyObject *obj) - - Copy an instance of a Unicode subtype to a new true Unicode object if - necessary. If *obj* is already a true Unicode object (not a subtype), - return the reference with incremented refcount. - - Objects other than Unicode or its subtypes will cause a :exc:`TypeError`. - - Locale Encoding """"""""""""""" @@ -663,7 +714,7 @@ system. .. c:function:: PyObject* PyUnicode_DecodeLocale(const char *str, const char *errors) Similar to :c:func:`PyUnicode_DecodeLocaleAndSize`, but compute the string - length using :c:func:`strlen`. + length using :c:func:`!strlen`. .. versionadded:: 3.3 @@ -788,11 +839,11 @@ conversion function: wchar_t Support """"""""""""""" -:c:expr:`wchar_t` support for platforms which support it: +:c:type:`wchar_t` support for platforms which support it: .. c:function:: PyObject* PyUnicode_FromWideChar(const wchar_t *w, Py_ssize_t size) - Create a Unicode object from the :c:expr:`wchar_t` buffer *w* of the given *size*. + Create a Unicode object from the :c:type:`wchar_t` buffer *w* of the given *size*. Passing ``-1`` as the *size* indicates that the function must itself compute the length, using wcslen. Return ``NULL`` on failure. @@ -800,9 +851,9 @@ wchar_t Support .. c:function:: Py_ssize_t PyUnicode_AsWideChar(PyObject *unicode, wchar_t *w, Py_ssize_t size) - Copy the Unicode object contents into the :c:expr:`wchar_t` buffer *w*. At most - *size* :c:expr:`wchar_t` characters are copied (excluding a possibly trailing - null termination character). Return the number of :c:expr:`wchar_t` characters + Copy the Unicode object contents into the :c:type:`wchar_t` buffer *w*. At most + *size* :c:type:`wchar_t` characters are copied (excluding a possibly trailing + null termination character). Return the number of :c:type:`wchar_t` characters copied or ``-1`` in case of an error. Note that the resulting :c:expr:`wchar_t*` string may or may not be null-terminated. It is the responsibility of the caller to make sure that the :c:expr:`wchar_t*` string is null-terminated in case this is @@ -816,12 +867,12 @@ wchar_t Support Convert the Unicode object to a wide character string. The output string always ends with a null character. If *size* is not ``NULL``, write the number of wide characters (excluding the trailing null termination character) into - *\*size*. Note that the resulting :c:expr:`wchar_t` string might contain + *\*size*. Note that the resulting :c:type:`wchar_t` string might contain null characters, which would cause the string to be truncated when used with most C functions. If *size* is ``NULL`` and the :c:expr:`wchar_t*` string contains null characters a :exc:`ValueError` is raised. - Returns a buffer allocated by :c:func:`PyMem_New` (use + Returns a buffer allocated by :c:macro:`PyMem_New` (use :c:func:`PyMem_Free` to free it) on success. On error, returns ``NULL`` and *\*size* is undefined. Raises a :exc:`MemoryError` if memory allocation is failed. @@ -920,8 +971,8 @@ These are the UTF-8 codec APIs: returned buffer always has an extra null byte appended (not included in *size*), regardless of whether there are any other null code points. - In the case of an error, ``NULL`` is returned with an exception set and no - *size* is stored. + On error, set an exception, set *size* to ``-1`` (if it's not NULL) and + return ``NULL``. This caches the UTF-8 representation of the string in the Unicode object, and subsequent calls will return a pointer to the same buffer. The caller is not @@ -934,7 +985,7 @@ These are the UTF-8 codec APIs: The return type is now ``const char *`` rather of ``char *``. .. versionchanged:: 3.10 - This function is a part of the :ref:`limited API `. + This function is a part of the :ref:`limited API `. .. c:function:: const char* PyUnicode_AsUTF8(PyObject *unicode) @@ -1154,9 +1205,9 @@ Character Map Codecs This codec is special in that it can be used to implement many different codecs (and this is in fact what was done to obtain most of the standard codecs -included in the :mod:`encodings` package). The codec uses mappings to encode and +included in the :mod:`!encodings` package). The codec uses mappings to encode and decode characters. The mapping objects provided must support the -:meth:`__getitem__` mapping interface; dictionaries and sequences work well. +:meth:`~object.__getitem__` mapping interface; dictionaries and sequences work well. These are the mapping codec APIs: @@ -1199,7 +1250,7 @@ The following codec API is special in that maps Unicode to Unicode. The mapping table must map Unicode ordinal integers to Unicode ordinal integers or ``None`` (causing deletion of the character). - Mapping tables need only provide the :meth:`__getitem__` interface; dictionaries + Mapping tables need only provide the :meth:`~object.__getitem__` interface; dictionaries and sequences work well. Unmapped character ordinals (ones which cause a :exc:`LookupError`) are left untouched and are copied as-is. @@ -1241,7 +1292,7 @@ the user settings on the machine running the codec. Encode the Unicode object using the specified code page and return a Python bytes object. Return ``NULL`` if an exception was raised by the codec. Use - :c:data:`CP_ACP` code page to get the MBCS encoder. + :c:macro:`!CP_ACP` code page to get the MBCS encoder. .. versionadded:: 3.3 @@ -1345,6 +1396,28 @@ They all return ``NULL`` or ``-1`` if an exception occurs. :c:func:`PyErr_Occurred` to check for errors. +.. c:function:: int PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *string, Py_ssize_t size) + + Compare a Unicode object with a char buffer which is interpreted as + being UTF-8 or ASCII encoded and return true (``1``) if they are equal, + or false (``0``) otherwise. + If the Unicode object contains surrogate characters or + the C string is not valid UTF-8, false (``0``) is returned. + + This function does not raise exceptions. + + .. versionadded:: 3.13 + + +.. c:function:: int PyUnicode_EqualToUTF8(PyObject *unicode, const char *string) + + Similar to :c:func:`PyUnicode_EqualToUTF8AndSize`, but compute *string* + length using :c:func:`!strlen`. + If the Unicode object contains null characters, false (``0``) is returned. + + .. versionadded:: 3.13 + + .. c:function:: int PyUnicode_CompareWithASCIIString(PyObject *uni, const char *string) Compare a Unicode object, *uni*, with *string* and return ``-1``, ``0``, ``1`` for less @@ -1360,11 +1433,11 @@ They all return ``NULL`` or ``-1`` if an exception occurs. Rich compare two Unicode strings and return one of the following: * ``NULL`` in case an exception was raised - * :const:`Py_True` or :const:`Py_False` for successful comparisons - * :const:`Py_NotImplemented` in case the type combination is unknown + * :c:data:`Py_True` or :c:data:`Py_False` for successful comparisons + * :c:data:`Py_NotImplemented` in case the type combination is unknown - Possible values for *op* are :const:`Py_GT`, :const:`Py_GE`, :const:`Py_EQ`, - :const:`Py_NE`, :const:`Py_LT`, and :const:`Py_LE`. + Possible values for *op* are :c:macro:`Py_GT`, :c:macro:`Py_GE`, :c:macro:`Py_EQ`, + :c:macro:`Py_NE`, :c:macro:`Py_LT`, and :c:macro:`Py_LE`. .. c:function:: PyObject* PyUnicode_Format(PyObject *format, PyObject *args) @@ -1387,11 +1460,11 @@ They all return ``NULL`` or ``-1`` if an exception occurs. Intern the argument *\*string* in place. The argument must be the address of a pointer variable pointing to a Python Unicode string object. If there is an existing interned string that is the same as *\*string*, it sets *\*string* to - it (decrementing the reference count of the old string object and incrementing - the reference count of the interned string object), otherwise it leaves - *\*string* alone and interns it (incrementing its reference count). - (Clarification: even though there is a lot of talk about reference counts, think - of this function as reference-count-neutral; you own the object after the call + it (releasing the reference to the old string object and creating a new + :term:`strong reference` to the interned string object), otherwise it leaves + *\*string* alone and interns it (creating a new :term:`strong reference`). + (Clarification: even though there is a lot of talk about references, think + of this function as reference-neutral; you own the object after the call if and only if you owned it before the call.) diff --git a/Doc/c-api/utilities.rst b/Doc/c-api/utilities.rst index a805b564763c407..48ae54acebe8879 100644 --- a/Doc/c-api/utilities.rst +++ b/Doc/c-api/utilities.rst @@ -17,5 +17,7 @@ and parsing function arguments and constructing Python values from C values. marshal.rst arg.rst conversion.rst + hash.rst reflection.rst codec.rst + perfmaps.rst diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst index 513856d8a48d70f..324518c035096b4 100644 --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -12,12 +12,12 @@ file or a buffer, but they will not let you interact in a more detailed way with the interpreter. Several of these functions accept a start symbol from the grammar as a -parameter. The available start symbols are :const:`Py_eval_input`, -:const:`Py_file_input`, and :const:`Py_single_input`. These are described +parameter. The available start symbols are :c:data:`Py_eval_input`, +:c:data:`Py_file_input`, and :c:data:`Py_single_input`. These are described following the functions which accept them as parameters. Note also that several of these functions take :c:expr:`FILE*` parameters. One -particular issue which needs to be handled carefully is that the :c:expr:`FILE` +particular issue which needs to be handled carefully is that the :c:type:`FILE` structure for different C libraries can be different and incompatible. Under Windows (at least), it is possible for dynamically linked extensions to actually use different libraries, so care should be taken that :c:expr:`FILE*` parameters @@ -167,6 +167,10 @@ the same library that the Python runtime is using. event loops, as done in the :file:`Modules/_tkinter.c` in the Python source code. + .. versionchanged:: 3.12 + This function is only called from the + :ref:`main interpreter `. + .. c:var:: char* (*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *) @@ -187,6 +191,10 @@ the same library that the Python runtime is using. :c:func:`PyMem_RawRealloc`, instead of being allocated by :c:func:`PyMem_Malloc` or :c:func:`PyMem_Realloc`. + .. versionchanged:: 3.12 + This function is only called from the + :ref:`main interpreter `. + .. c:function:: PyObject* PyRun_String(const char *str, int start, PyObject *globals, PyObject *locals) This is a simplified interface to :c:func:`PyRun_StringFlags` below, leaving @@ -248,8 +256,8 @@ the same library that the Python runtime is using. Parse and compile the Python source code in *str*, returning the resulting code object. The start token is given by *start*; this can be used to constrain the - code which can be compiled and should be :const:`Py_eval_input`, - :const:`Py_file_input`, or :const:`Py_single_input`. The filename specified by + code which can be compiled and should be :c:data:`Py_eval_input`, + :c:data:`Py_file_input`, or :c:data:`Py_single_input`. The filename specified by *filename* is used to construct the code object and may appear in tracebacks or :exc:`SyntaxError` exception messages. This returns ``NULL`` if the code cannot be parsed or compiled. @@ -345,7 +353,7 @@ the same library that the Python runtime is using. executed, it is passed as ``PyCompilerFlags *flags``. In this case, ``from __future__ import`` can modify *flags*. - Whenever ``PyCompilerFlags *flags`` is ``NULL``, :attr:`cf_flags` is treated as + Whenever ``PyCompilerFlags *flags`` is ``NULL``, :c:member:`~PyCompilerFlags.cf_flags` is treated as equal to ``0``, and any modification due to ``from __future__ import`` is discarded. @@ -359,7 +367,7 @@ the same library that the Python runtime is using. initialized to ``PY_MINOR_VERSION``. The field is ignored by default, it is used if and only if - ``PyCF_ONLY_AST`` flag is set in *cf_flags*. + ``PyCF_ONLY_AST`` flag is set in :c:member:`~PyCompilerFlags.cf_flags`. .. versionchanged:: 3.8 Added *cf_feature_version* field. diff --git a/Doc/c-api/weakref.rst b/Doc/c-api/weakref.rst index ace743ba01c5f57..038f54a9751fd13 100644 --- a/Doc/c-api/weakref.rst +++ b/Doc/c-api/weakref.rst @@ -11,20 +11,20 @@ simple reference object, and the second acts as a proxy for the original object as much as it can. -.. c:function:: int PyWeakref_Check(ob) +.. c:function:: int PyWeakref_Check(PyObject *ob) - Return true if *ob* is either a reference or proxy object. This function + Return non-zero if *ob* is either a reference or proxy object. This function always succeeds. -.. c:function:: int PyWeakref_CheckRef(ob) +.. c:function:: int PyWeakref_CheckRef(PyObject *ob) - Return true if *ob* is a reference object. This function always succeeds. + Return non-zero if *ob* is a reference object. This function always succeeds. -.. c:function:: int PyWeakref_CheckProxy(ob) +.. c:function:: int PyWeakref_CheckProxy(PyObject *ob) - Return true if *ob* is a proxy object. This function always succeeds. + Return non-zero if *ob* is a proxy object. This function always succeeds. .. c:function:: PyObject* PyWeakref_NewRef(PyObject *ob, PyObject *callback) @@ -51,10 +51,23 @@ as much as it can. ``None``, or ``NULL``, this will return ``NULL`` and raise :exc:`TypeError`. +.. c:function:: int PyWeakref_GetRef(PyObject *ref, PyObject **pobj) + + Get a :term:`strong reference` to the referenced object from a weak + reference, *ref*, into *\*pobj*. + + * On success, set *\*pobj* to a new :term:`strong reference` to the + referenced object and return 1. + * If the reference is dead, set *\*pobj* to ``NULL`` and return 0. + * On error, raise an exception and return -1. + + .. versionadded:: 3.13 + + .. c:function:: PyObject* PyWeakref_GetObject(PyObject *ref) - Return the referenced object from a weak reference, *ref*. If the referent is - no longer live, returns :const:`Py_None`. + Return a :term:`borrowed reference` to the referenced object from a weak + reference, *ref*. If the referent is no longer live, returns ``Py_None``. .. note:: @@ -63,7 +76,23 @@ as much as it can. except when it cannot be destroyed before the last usage of the borrowed reference. + .. deprecated-removed:: 3.13 3.15 + Use :c:func:`PyWeakref_GetRef` instead. + .. c:function:: PyObject* PyWeakref_GET_OBJECT(PyObject *ref) Similar to :c:func:`PyWeakref_GetObject`, but does no error checking. + + .. deprecated-removed:: 3.13 3.15 + Use :c:func:`PyWeakref_GetRef` instead. + + +.. c:function:: void PyObject_ClearWeakRefs(PyObject *object) + + This function is called by the :c:member:`~PyTypeObject.tp_dealloc` handler + to clear weak references. + + This iterates through the weak references for *object* and calls callbacks + for those references which have one. It returns when all callbacks have + been attempted. diff --git a/Doc/conf.py b/Doc/conf.py index be1c9fff51a277b..f1b411126c4e871 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -13,9 +13,25 @@ # General configuration # --------------------- -extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', - 'pyspecific', 'c_annotations', 'escape4chm', - 'asdl_highlight', 'peg_highlight', 'glossary_search'] +extensions = [ + 'asdl_highlight', + 'c_annotations', + 'escape4chm', + 'glossary_search', + 'peg_highlight', + 'pyspecific', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', +] + +# Skip if downstream redistributors haven't installed it +try: + import sphinxext.opengraph +except ImportError: + pass +else: + extensions.append('sphinxext.opengraph') + doctest_global_setup = ''' try: @@ -50,14 +66,183 @@ highlight_language = 'python3' # Minimum version of sphinx required -needs_sphinx = '3.2' +needs_sphinx = '4.2' +# Ignore any .rst files in the includes/ directory; +# they're embedded in pages but not rendered individually. # Ignore any .rst files in the venv/ directory. -exclude_patterns = ['venv/*', 'README.rst'] +exclude_patterns = ['includes/*.rst', 'venv/*', 'README.rst'] venvdir = os.getenv('VENVDIR') if venvdir is not None: exclude_patterns.append(venvdir + '/*') +nitpick_ignore = [ + # Standard C functions + ('c:func', 'calloc'), + ('c:func', 'dlopen'), + ('c:func', 'exec'), + ('c:func', 'fcntl'), + ('c:func', 'fork'), + ('c:func', 'free'), + ('c:func', 'gmtime'), + ('c:func', 'localtime'), + ('c:func', 'main'), + ('c:func', 'malloc'), + ('c:func', 'printf'), + ('c:func', 'realloc'), + ('c:func', 'snprintf'), + ('c:func', 'sprintf'), + ('c:func', 'stat'), + ('c:func', 'system'), + ('c:func', 'time'), + ('c:func', 'vsnprintf'), + # Standard C types + ('c:type', 'FILE'), + ('c:type', 'int64_t'), + ('c:type', 'intmax_t'), + ('c:type', 'off_t'), + ('c:type', 'ptrdiff_t'), + ('c:type', 'siginfo_t'), + ('c:type', 'size_t'), + ('c:type', 'ssize_t'), + ('c:type', 'time_t'), + ('c:type', 'uint64_t'), + ('c:type', 'uintmax_t'), + ('c:type', 'uintptr_t'), + ('c:type', 'va_list'), + ('c:type', 'wchar_t'), + ('c:type', '__int64'), + ('c:type', 'unsigned __int64'), + # Standard C structures + ('c:struct', 'in6_addr'), + ('c:struct', 'in_addr'), + ('c:struct', 'stat'), + ('c:struct', 'statvfs'), + # Standard C macros + ('c:macro', 'LLONG_MAX'), + ('c:macro', 'LLONG_MIN'), + ('c:macro', 'LONG_MAX'), + ('c:macro', 'LONG_MIN'), + # Standard C variables + ('c:data', 'errno'), + # Standard environment variables + ('envvar', 'BROWSER'), + ('envvar', 'COLUMNS'), + ('envvar', 'COMSPEC'), + ('envvar', 'DISPLAY'), + ('envvar', 'HOME'), + ('envvar', 'HOMEDRIVE'), + ('envvar', 'HOMEPATH'), + ('envvar', 'IDLESTARTUP'), + ('envvar', 'LANG'), + ('envvar', 'LANGUAGE'), + ('envvar', 'LC_ALL'), + ('envvar', 'LC_CTYPE'), + ('envvar', 'LC_COLLATE'), + ('envvar', 'LC_MESSAGES'), + ('envvar', 'LC_MONETARY'), + ('envvar', 'LC_NUMERIC'), + ('envvar', 'LC_TIME'), + ('envvar', 'LINES'), + ('envvar', 'LOGNAME'), + ('envvar', 'PAGER'), + ('envvar', 'PATH'), + ('envvar', 'PATHEXT'), + ('envvar', 'SOURCE_DATE_EPOCH'), + ('envvar', 'TEMP'), + ('envvar', 'TERM'), + ('envvar', 'TMP'), + ('envvar', 'TMPDIR'), + ('envvar', 'TZ'), + ('envvar', 'USER'), + ('envvar', 'USERNAME'), + ('envvar', 'USERPROFILE'), +] + +# Temporary undocumented names. +# In future this list must be empty. +nitpick_ignore += [ + # C API: Standard Python exception classes + ('c:data', 'PyExc_ArithmeticError'), + ('c:data', 'PyExc_AssertionError'), + ('c:data', 'PyExc_AttributeError'), + ('c:data', 'PyExc_BaseException'), + ('c:data', 'PyExc_BlockingIOError'), + ('c:data', 'PyExc_BrokenPipeError'), + ('c:data', 'PyExc_BufferError'), + ('c:data', 'PyExc_ChildProcessError'), + ('c:data', 'PyExc_ConnectionAbortedError'), + ('c:data', 'PyExc_ConnectionError'), + ('c:data', 'PyExc_ConnectionRefusedError'), + ('c:data', 'PyExc_ConnectionResetError'), + ('c:data', 'PyExc_EOFError'), + ('c:data', 'PyExc_Exception'), + ('c:data', 'PyExc_FileExistsError'), + ('c:data', 'PyExc_FileNotFoundError'), + ('c:data', 'PyExc_FloatingPointError'), + ('c:data', 'PyExc_GeneratorExit'), + ('c:data', 'PyExc_ImportError'), + ('c:data', 'PyExc_IndentationError'), + ('c:data', 'PyExc_IndexError'), + ('c:data', 'PyExc_InterruptedError'), + ('c:data', 'PyExc_IsADirectoryError'), + ('c:data', 'PyExc_KeyboardInterrupt'), + ('c:data', 'PyExc_KeyError'), + ('c:data', 'PyExc_LookupError'), + ('c:data', 'PyExc_MemoryError'), + ('c:data', 'PyExc_ModuleNotFoundError'), + ('c:data', 'PyExc_NameError'), + ('c:data', 'PyExc_NotADirectoryError'), + ('c:data', 'PyExc_NotImplementedError'), + ('c:data', 'PyExc_OSError'), + ('c:data', 'PyExc_OverflowError'), + ('c:data', 'PyExc_PermissionError'), + ('c:data', 'PyExc_ProcessLookupError'), + ('c:data', 'PyExc_RecursionError'), + ('c:data', 'PyExc_ReferenceError'), + ('c:data', 'PyExc_RuntimeError'), + ('c:data', 'PyExc_StopAsyncIteration'), + ('c:data', 'PyExc_StopIteration'), + ('c:data', 'PyExc_SyntaxError'), + ('c:data', 'PyExc_SystemError'), + ('c:data', 'PyExc_SystemExit'), + ('c:data', 'PyExc_TabError'), + ('c:data', 'PyExc_TimeoutError'), + ('c:data', 'PyExc_TypeError'), + ('c:data', 'PyExc_UnboundLocalError'), + ('c:data', 'PyExc_UnicodeDecodeError'), + ('c:data', 'PyExc_UnicodeEncodeError'), + ('c:data', 'PyExc_UnicodeError'), + ('c:data', 'PyExc_UnicodeTranslateError'), + ('c:data', 'PyExc_ValueError'), + ('c:data', 'PyExc_ZeroDivisionError'), + # C API: Standard Python warning classes + ('c:data', 'PyExc_BytesWarning'), + ('c:data', 'PyExc_DeprecationWarning'), + ('c:data', 'PyExc_FutureWarning'), + ('c:data', 'PyExc_ImportWarning'), + ('c:data', 'PyExc_PendingDeprecationWarning'), + ('c:data', 'PyExc_ResourceWarning'), + ('c:data', 'PyExc_RuntimeWarning'), + ('c:data', 'PyExc_SyntaxWarning'), + ('c:data', 'PyExc_UnicodeWarning'), + ('c:data', 'PyExc_UserWarning'), + ('c:data', 'PyExc_Warning'), + # Do not error nit-picky mode builds when _SubParsersAction.add_parser cannot + # be resolved, as the method is currently undocumented. For context, see + # https://github.com/python/cpython/pull/103289. + ('py:meth', '_SubParsersAction.add_parser'), +] + +# gh-106948: Copy standard C types declared in the "c:type" domain to the +# "c:identifier" domain, since "c:function" markup looks for types in the +# "c:identifier" domain. Use list() to not iterate on items which are being +# added +for role, name in list(nitpick_ignore): + if role == 'c:type': + nitpick_ignore.append(('c:identifier', name)) +del role, name + # Disable Docutils smartquotes for several translations smartquotes_excludes = { 'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'], 'builders': ['man', 'text'], @@ -66,6 +251,11 @@ # Avoid a warning with Sphinx >= 2.0 master_doc = 'contents' +# Allow translation of index directives +gettext_additional_targets = [ + 'index', +] + # Options for HTML output # ----------------------- @@ -89,9 +279,17 @@ # Short title used e.g. for HTML tags. html_short_title = '%s Documentation' % release -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' +# Deployment preview information +# (See .readthedocs.yml and https://docs.readthedocs.io/en/stable/reference/environment-variables.html) +repository_url = os.getenv("READTHEDOCS_GIT_CLONE_URL") +html_context = { + "is_deployment_preview": os.getenv("READTHEDOCS_VERSION_TYPE") == "external", + "repository_url": repository_url.removesuffix(".git") if repository_url else None, + "pr_id": os.getenv("READTHEDOCS_VERSION") +} + +# This 'Last updated on:' timestamp is inserted at the bottom of every page. +html_last_updated_fmt = time.strftime('%b %d, %Y (%H:%M UTC)', time.gmtime()) # Path to find HTML templates. templates_path = ['tools/templates'] @@ -114,7 +312,7 @@ html_use_opensearch = 'https://docs.python.org/' + version # Additional static files. -html_static_path = ['tools/static'] +html_static_path = ['_static', 'tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') @@ -155,8 +353,6 @@ latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), - ('distributing/index', 'distributing.tex', - 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', @@ -196,8 +392,6 @@ # match any of the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', - r'Tix', - r'distutils.*', ] coverage_ignore_functions = [ @@ -229,8 +423,49 @@ # Options for the link checker # ---------------------------- -# Ignore certain URLs. -linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+'] +linkcheck_allowed_redirects = { + # bpo-NNNN -> BPO -> GH Issues + r'https://bugs.python.org/issue\?@action=redirect&bpo=\d+': r'https://github.com/python/cpython/issues/\d+', + # GH-NNNN used to refer to pull requests + r'https://github.com/python/cpython/issues/\d+': r'https://github.com/python/cpython/pull/\d+', + # :source:`something` linking files in the repository + r'https://github.com/python/cpython/tree/.*': 'https://github.com/python/cpython/blob/.*', + # Intentional HTTP use at Misc/NEWS.d/3.5.0a1.rst + r'http://www.python.org/$': 'https://www.python.org/$', + # Used in license page, keep as is + r'https://www.zope.org/': r'https://www.zope.dev/', + # Microsoft's redirects to learn.microsoft.com + r'https://msdn.microsoft.com/.*': 'https://learn.microsoft.com/.*', + r'https://docs.microsoft.com/.*': 'https://learn.microsoft.com/.*', + r'https://go.microsoft.com/fwlink/\?LinkID=\d+': 'https://learn.microsoft.com/.*', + # Language redirects + r'https://toml.io': 'https://toml.io/en/', + r'https://www.redhat.com': 'https://www.redhat.com/en', + # Other redirects + r'https://www.boost.org/libs/.+': r'https://www.boost.org/doc/libs/\d_\d+_\d/.+', + r'https://support.microsoft.com/en-us/help/\d+': 'https://support.microsoft.com/en-us/topic/.+', + r'https://perf.wiki.kernel.org$': 'https://perf.wiki.kernel.org/index.php/Main_Page', + r'https://www.sqlite.org': 'https://www.sqlite.org/index.html', + r'https://mitpress.mit.edu/sicp$': 'https://mitpress.mit.edu/9780262510875/structure-and-interpretation-of-computer-programs/', + r'https://www.python.org/psf/': 'https://www.python.org/psf-landing/', +} + +linkcheck_anchors_ignore = [ + # ignore anchors that start with a '/', e.g. Wikipedia media files: + # https://en.wikipedia.org/wiki/Walrus#/media/File:Pacific_Walrus_-_Bull_(8247646168).jpg + r'\/.*', +] + +linkcheck_ignore = [ + # The crawler gets "Anchor not found" + r'https://developer.apple.com/documentation/.+?#.*', + r'https://devguide.python.org.+?/#.*', + r'https://github.com.+?#.*', + # Robot crawlers not allowed: "403 Client Error: Forbidden" + r'https://support.enthought.com/hc/.*', + # SSLError CertificateError, even though it is valid + r'https://unix.org/version2/whatsnew/lp64_wp.html', +] # Options for extensions @@ -239,3 +474,13 @@ # Relative filename of the data files refcount_file = 'data/refcounts.dat' stable_abi_file = 'data/stable_abi.dat' + +# sphinxext-opengraph config +ogp_site_url = 'https://docs.python.org/3/' +ogp_site_name = 'Python documentation' +ogp_image = '_static/og-image.png' +ogp_custom_meta_tags = [ + '<meta property="og:image:width" content="200" />', + '<meta property="og:image:height" content="200" />', + '<meta name="theme-color" content="#3776ab" />', +] diff --git a/Doc/constraints.txt b/Doc/constraints.txt new file mode 100644 index 000000000000000..147de1271eb2b7f --- /dev/null +++ b/Doc/constraints.txt @@ -0,0 +1,24 @@ +# We have upper bounds on our transitive dependencies here +# To avoid new releases unexpectedly breaking our build. +# This file can be updated on an ad-hoc basis, +# though it will probably have to be updated +# whenever Doc/requirements.txt is updated. + +# Direct dependencies of Sphinx +babel<3 +colorama<0.5 +imagesize<1.5 +Jinja2<3.2 +packaging<24 +Pygments>=2.16.1,<3 +requests<3 +snowballstemmer<3 +sphinxcontrib-applehelp<1.1 +sphinxcontrib-devhelp<1.1 +sphinxcontrib-htmlhelp<2.1 +sphinxcontrib-jsmath<1.1 +sphinxcontrib-qthelp<1.1 +sphinxcontrib-serializinghtml<1.2 + +# Direct dependencies of Jinja2 (Jinja is a dependency of Sphinx, see above) +MarkupSafe<2.2 diff --git a/Doc/contents.rst b/Doc/contents.rst index 464f93bdf85f95b..24ceacb0076b5eb 100644 --- a/Doc/contents.rst +++ b/Doc/contents.rst @@ -11,7 +11,6 @@ library/index.rst extending/index.rst c-api/index.rst - distributing/index.rst installing/index.rst howto/index.rst faq/index.rst @@ -21,10 +20,3 @@ bugs.rst copyright.rst license.rst - -.. to include legacy packaging docs in build - -.. toctree:: - :hidden: - - install/index.rst diff --git a/Doc/copyright.rst b/Doc/copyright.rst index e64a49328b47232..9b71683155eebe7 100644 --- a/Doc/copyright.rst +++ b/Doc/copyright.rst @@ -4,7 +4,7 @@ Copyright Python and this documentation is: -Copyright © 2001-2022 Python Software Foundation. All rights reserved. +Copyright © 2001-2023 Python Software Foundation. All rights reserved. Copyright © 2000 BeOpen.com. All rights reserved. diff --git a/Doc/data/refcounts.dat b/Doc/data/refcounts.dat index 51ccacf13f9e3be..ef9ac1617a284be 100644 --- a/Doc/data/refcounts.dat +++ b/Doc/data/refcounts.dat @@ -606,6 +606,9 @@ PyErr_GetExcInfo:PyObject**:ptype:+1: PyErr_GetExcInfo:PyObject**:pvalue:+1: PyErr_GetExcInfo:PyObject**:ptraceback:+1: +PyErr_GetRaisedException:PyObject*::+1: +PyErr_SetRaisedException:::: + PyErr_GivenExceptionMatches:int::: PyErr_GivenExceptionMatches:PyObject*:given:0: PyErr_GivenExceptionMatches:PyObject*:exc:0: @@ -761,8 +764,6 @@ PyErr_WarnFormat::...:: PyErr_WriteUnraisable:void::: PyErr_WriteUnraisable:PyObject*:obj:0: -PyEval_AcquireLock:void::: - PyEval_AcquireThread:void::: PyEval_AcquireThread:PyThreadState*:tstate:: @@ -780,10 +781,6 @@ PyEval_GetFuncDesc:PyObject*:func:0: PyEval_GetFuncName:const char*::: PyEval_GetFuncName:PyObject*:func:0: -PyEval_InitThreads:void::: - -PyEval_ReleaseLock:void::: - PyEval_ReleaseThread:void::: PyEval_ReleaseThread:PyThreadState*:tstate:: @@ -836,6 +833,8 @@ PyEval_EvalFrameEx:int:throwflag:: PyEval_MergeCompilerFlags:int::: PyEval_MergeCompilerFlags:PyCompilerFlags*:cf:: +PyException_GetArgs:PyObject*::+1: + PyException_GetCause:PyObject*::+1: PyException_GetCause:PyObject*:ex:0: @@ -975,6 +974,9 @@ PyCoro_New:PyFrameObject*:frame:0: PyCoro_New:PyObject*:name:0: PyCoro_New:PyObject*:qualname:0: +PyImport_AddModuleRef:PyObject*::+1: +PyImport_AddModuleRef:const char*:name:: + PyImport_AddModule:PyObject*::0:reference borrowed from sys.modules PyImport_AddModule:const char*:name:: @@ -1018,10 +1020,10 @@ PyImport_Import:PyObject*::+1: PyImport_Import:PyObject*:name:0: PyImport_ImportFrozenModule:int::: -PyImport_ImportFrozenModule:const char*::: +PyImport_ImportFrozenModule:const char*:name:: PyImport_ImportFrozenModuleObject:int::: -PyImport_ImportFrozenModuleObject:PyObject*::+1: +PyImport_ImportFrozenModuleObject:PyObject*:name:+1: PyImport_ImportModule:PyObject*::+1: PyImport_ImportModule:const char*:name:: @@ -1579,21 +1581,6 @@ PyOS_FSPath:PyObject*:path:0: PyObject_ASCII:PyObject*::+1: PyObject_ASCII:PyObject*:o:0: -PyObject_AsCharBuffer:int::: -PyObject_AsCharBuffer:PyObject*:obj:0: -PyObject_AsCharBuffer:const char**:buffer:: -PyObject_AsCharBuffer:Py_ssize_t*:buffer_len:: - -PyObject_AsReadBuffer:int::: -PyObject_AsReadBuffer:PyObject*:obj:0: -PyObject_AsReadBuffer:const void**:buffer:: -PyObject_AsReadBuffer:Py_ssize_t*:buffer_len:: - -PyObject_AsWriteBuffer:int::: -PyObject_AsWriteBuffer:PyObject*:obj:0: -PyObject_AsWriteBuffer:void**:buffer:: -PyObject_AsWriteBuffer:Py_ssize_t*:buffer_len:: - PyObject_Bytes:PyObject*::+1: PyObject_Bytes:PyObject*:o:0: @@ -1810,6 +1797,9 @@ PyObject_Size:PyObject*:o:0: PyObject_Str:PyObject*::+1: PyObject_Str:PyObject*:o:0: +Py_TYPE:PyObject*::0: +Py_TYPE:PyObject*:ob:0: + PyObject_Type:PyObject*::+1: PyObject_Type:PyObject*:o:0: @@ -2369,76 +2359,56 @@ PyUnicode_KIND:PyObject*:o:0: PyUnicode_MAX_CHAR_VALUE:::: PyUnicode_MAX_CHAR_VALUE:PyObject*:o:0: -PyUnicode_AS_UNICODE:Py_UNICODE*::: -PyUnicode_AS_UNICODE:PyObject*:o:0: - -PyUnicode_AS_DATA:const char*::: -PyUnicode_AS_DATA:PyObject*:o:0: - Py_UNICODE_ISALNUM:int::: -Py_UNICODE_ISALNUM:Py_UNICODE:ch:: +Py_UNICODE_ISALNUM:Py_UCS4:ch:: Py_UNICODE_ISALPHA:int::: -Py_UNICODE_ISALPHA:Py_UNICODE:ch:: +Py_UNICODE_ISALPHA:Py_UCS4:ch:: Py_UNICODE_ISSPACE:int::: -Py_UNICODE_ISSPACE:Py_UNICODE:ch:: +Py_UNICODE_ISSPACE:Py_UCS4:ch:: Py_UNICODE_ISLOWER:int::: -Py_UNICODE_ISLOWER:Py_UNICODE:ch:: +Py_UNICODE_ISLOWER:Py_UCS4:ch:: Py_UNICODE_ISUPPER:int::: -Py_UNICODE_ISUPPER:Py_UNICODE:ch:: +Py_UNICODE_ISUPPER:Py_UCS4:ch:: Py_UNICODE_ISTITLE:int::: -Py_UNICODE_ISTITLE:Py_UNICODE:ch:: +Py_UNICODE_ISTITLE:Py_UCS4:ch:: Py_UNICODE_ISLINEBREAK:int::: -Py_UNICODE_ISLINEBREAK:Py_UNICODE:ch:: +Py_UNICODE_ISLINEBREAK:Py_UCS4:ch:: Py_UNICODE_ISDECIMAL:int::: -Py_UNICODE_ISDECIMAL:Py_UNICODE:ch:: +Py_UNICODE_ISDECIMAL:Py_UCS4:ch:: Py_UNICODE_ISDIGIT:int::: -Py_UNICODE_ISDIGIT:Py_UNICODE:ch:: +Py_UNICODE_ISDIGIT:Py_UCS4:ch:: Py_UNICODE_ISNUMERIC:int::: -Py_UNICODE_ISNUMERIC:Py_UNICODE:ch:: +Py_UNICODE_ISNUMERIC:Py_UCS4:ch:: Py_UNICODE_ISPRINTABLE:int::: -Py_UNICODE_ISPRINTABLE:Py_UNICODE:ch:: +Py_UNICODE_ISPRINTABLE:Py_UCS4:ch:: -Py_UNICODE_TOLOWER:Py_UNICODE::: -Py_UNICODE_TOLOWER:Py_UNICODE:ch:: +Py_UNICODE_TOLOWER:Py_UCS4::: +Py_UNICODE_TOLOWER:Py_UCS4:ch:: -Py_UNICODE_TOUPPER:Py_UNICODE::: -Py_UNICODE_TOUPPER:Py_UNICODE:ch:: +Py_UNICODE_TOUPPER:Py_UCS4::: +Py_UNICODE_TOUPPER:Py_UCS4:ch:: -Py_UNICODE_TOTITLE:Py_UNICODE::: -Py_UNICODE_TOTITLE:Py_UNICODE:ch:: +Py_UNICODE_TOTITLE:Py_UCS4::: +Py_UNICODE_TOTITLE:Py_UCS4:ch:: Py_UNICODE_TODECIMAL:int::: -Py_UNICODE_TODECIMAL:Py_UNICODE:ch:: +Py_UNICODE_TODECIMAL:Py_UCS4:ch:: Py_UNICODE_TODIGIT:int::: -Py_UNICODE_TODIGIT:Py_UNICODE:ch:: +Py_UNICODE_TODIGIT:Py_UCS4:ch:: Py_UNICODE_TONUMERIC:double::: -Py_UNICODE_TONUMERIC:Py_UNICODE:ch:: - -PyUnicode_FromUnicode:PyObject*::+1: -PyUnicode_FromUnicode:const Py_UNICODE*:u:: -PyUnicode_FromUnicode:Py_ssize_t:size:: - -PyUnicode_AsUnicode:Py_UNICODE*::: -PyUnicode_AsUnicode:PyObject*:unicode:0: - -PyUnicode_AsUnicodeAndSize:Py_UNICODE*::: -PyUnicode_AsUnicodeAndSize:PyObject*:unicode:0: -PyUnicode_AsUnicodeAndSize:Py_ssize_t*:size:: - -PyUnicode_GetSize:Py_ssize_t::: -PyUnicode_GetSize:PyObject*:unicode:0: +Py_UNICODE_TONUMERIC:Py_UCS4:ch:: PyUnicode_FromObject:PyObject*::+1: PyUnicode_FromObject:PyObject*:obj:0: @@ -2843,6 +2813,10 @@ PyWeakref_GET_OBJECT:PyObject*:ref:0: PyWeakref_GetObject:PyObject*::0: PyWeakref_GetObject:PyObject*:ref:0: +PyWeakref_GetRef:int::: +PyWeakref_GetRef:PyObject*:ref:0: +PyWeakref_GetRef:PyObject**:pobj:+1: + PyWeakref_NewProxy:PyObject*::+1: PyWeakref_NewProxy:PyObject*:ob:0: PyWeakref_NewProxy:PyObject*:callback:0: diff --git a/Doc/data/stable_abi.dat b/Doc/data/stable_abi.dat index db8fc15d93d15a0..811b1bd84d24174 100644 --- a/Doc/data/stable_abi.dat +++ b/Doc/data/stable_abi.dat @@ -43,7 +43,6 @@ function,PyBytes_Size,3.2,, var,PyBytes_Type,3.2,, type,PyCFunction,3.2,, type,PyCFunctionWithKeywords,3.2,, -function,PyCFunction_Call,3.2,, function,PyCFunction_GetFlags,3.2,, function,PyCFunction_GetFunction,3.2,, function,PyCFunction_GetSelf,3.2,, @@ -112,7 +111,9 @@ function,PyDict_Copy,3.2,, function,PyDict_DelItem,3.2,, function,PyDict_DelItemString,3.2,, function,PyDict_GetItem,3.2,, +function,PyDict_GetItemRef,3.13,, function,PyDict_GetItemString,3.2,, +function,PyDict_GetItemStringRef,3.13,, function,PyDict_GetItemWithError,3.2,, function,PyDict_Items,3.2,, function,PyDict_Keys,3.2,, @@ -133,12 +134,14 @@ function,PyErr_BadInternalCall,3.2,, function,PyErr_CheckSignals,3.2,, function,PyErr_Clear,3.2,, function,PyErr_Display,3.2,, +function,PyErr_DisplayException,3.12,, function,PyErr_ExceptionMatches,3.2,, function,PyErr_Fetch,3.2,, function,PyErr_Format,3.2,, function,PyErr_FormatV,3.5,, function,PyErr_GetExcInfo,3.7,, function,PyErr_GetHandledException,3.11,, +function,PyErr_GetRaisedException,3.12,, function,PyErr_GivenExceptionMatches,3.2,, function,PyErr_NewException,3.2,, function,PyErr_NewExceptionWithDoc,3.2,, @@ -168,6 +171,7 @@ function,PyErr_SetInterrupt,3.2,, function,PyErr_SetInterruptEx,3.10,, function,PyErr_SetNone,3.2,, function,PyErr_SetObject,3.2,, +function,PyErr_SetRaisedException,3.12,, function,PyErr_SetString,3.2,, function,PyErr_SyntaxLocation,3.2,, function,PyErr_SyntaxLocationEx,3.7,, @@ -175,11 +179,7 @@ function,PyErr_WarnEx,3.2,, function,PyErr_WarnExplicit,3.2,, function,PyErr_WarnFormat,3.2,, function,PyErr_WriteUnraisable,3.2,, -function,PyEval_AcquireLock,3.2,, function,PyEval_AcquireThread,3.2,, -function,PyEval_CallFunction,3.2,, -function,PyEval_CallMethod,3.2,, -function,PyEval_CallObjectWithKeywords,3.2,, function,PyEval_EvalCode,3.2,, function,PyEval_EvalCodeEx,3.2,, function,PyEval_EvalFrame,3.2,, @@ -190,12 +190,9 @@ function,PyEval_GetFuncDesc,3.2,, function,PyEval_GetFuncName,3.2,, function,PyEval_GetGlobals,3.2,, function,PyEval_GetLocals,3.2,, -function,PyEval_InitThreads,3.2,, -function,PyEval_ReleaseLock,3.2,, function,PyEval_ReleaseThread,3.2,, function,PyEval_RestoreThread,3.2,, function,PyEval_SaveThread,3.2,, -function,PyEval_ThreadsInitialized,3.2,, var,PyExc_ArithmeticError,3.2,, var,PyExc_AssertionError,3.2,, var,PyExc_AttributeError,3.2,, @@ -266,9 +263,11 @@ var,PyExc_Warning,3.2,, var,PyExc_WindowsError,3.7,on Windows, var,PyExc_ZeroDivisionError,3.2,, function,PyExceptionClass_Name,3.8,, +function,PyException_GetArgs,3.12,, function,PyException_GetCause,3.2,, function,PyException_GetContext,3.2,, function,PyException_GetTraceback,3.2,, +function,PyException_SetArgs,3.12,, function,PyException_SetCause,3.2,, function,PyException_SetContext,3.2,, function,PyException_SetTraceback,3.2,, @@ -301,6 +300,7 @@ type,PyGetSetDef,3.2,,full-abi var,PyGetSetDescr_Type,3.2,, function,PyImport_AddModule,3.2,, function,PyImport_AddModuleObject,3.7,, +function,PyImport_AddModuleRef,3.13,, function,PyImport_AppendInittab,3.2,, function,PyImport_ExecCodeModule,3.2,, function,PyImport_ExecCodeModuleEx,3.2,, @@ -347,6 +347,7 @@ var,PyList_Type,3.2,, type,PyLongObject,3.2,,opaque var,PyLongRangeIter_Type,3.2,, function,PyLong_AsDouble,3.2,, +function,PyLong_AsInt,3.13,, function,PyLong_AsLong,3.2,, function,PyLong_AsLongAndOverflow,3.2,, function,PyLong_AsLongLong,3.2,, @@ -372,8 +373,12 @@ var,PyLong_Type,3.2,, var,PyMap_Type,3.2,, function,PyMapping_Check,3.2,, function,PyMapping_GetItemString,3.2,, +function,PyMapping_GetOptionalItem,3.13,, +function,PyMapping_GetOptionalItemString,3.13,, function,PyMapping_HasKey,3.2,, function,PyMapping_HasKeyString,3.2,, +function,PyMapping_HasKeyStringWithError,3.13,, +function,PyMapping_HasKeyWithError,3.13,, function,PyMapping_Items,3.2,, function,PyMapping_Keys,3.2,, function,PyMapping_Length,3.2,, @@ -383,9 +388,15 @@ function,PyMapping_Values,3.2,, function,PyMem_Calloc,3.7,, function,PyMem_Free,3.2,, function,PyMem_Malloc,3.2,, +function,PyMem_RawCalloc,3.13,, +function,PyMem_RawFree,3.13,, +function,PyMem_RawMalloc,3.13,, +function,PyMem_RawRealloc,3.13,, function,PyMem_Realloc,3.2,, type,PyMemberDef,3.2,,full-abi var,PyMemberDescr_Type,3.2,, +function,PyMember_GetOne,3.2,, +function,PyMember_SetOne,3.2,, function,PyMemoryView_FromBuffer,3.11,, function,PyMemoryView_FromMemory,3.7,, function,PyMemoryView_FromObject,3.2,, @@ -397,6 +408,7 @@ type,PyModuleDef,3.2,,full-abi type,PyModuleDef_Base,3.2,,full-abi function,PyModuleDef_Init,3.5,, var,PyModuleDef_Type,3.5,, +function,PyModule_Add,3.13,, function,PyModule_AddFunctions,3.7,, function,PyModule_AddIntConstant,3.2,, function,PyModule_AddObject,3.2,, @@ -477,10 +489,7 @@ type,PyObject,3.2,,members member,PyObject.ob_refcnt,3.2,, member,PyObject.ob_type,3.2,, function,PyObject_ASCII,3.2,, -function,PyObject_AsCharBuffer,3.2,, function,PyObject_AsFileDescriptor,3.2,, -function,PyObject_AsReadBuffer,3.2,, -function,PyObject_AsWriteBuffer,3.2,, function,PyObject_Bytes,3.2,, function,PyObject_Call,3.2,, function,PyObject_CallFunction,3.2,, @@ -491,9 +500,10 @@ function,PyObject_CallNoArgs,3.10,, function,PyObject_CallObject,3.2,, function,PyObject_Calloc,3.7,, function,PyObject_CheckBuffer,3.11,, -function,PyObject_CheckReadBuffer,3.2,, function,PyObject_ClearWeakRefs,3.2,, function,PyObject_CopyData,3.11,, +function,PyObject_DelAttr,3.13,, +function,PyObject_DelAttrString,3.13,, function,PyObject_DelItem,3.2,, function,PyObject_DelItemString,3.2,, function,PyObject_Dir,3.2,, @@ -514,8 +524,13 @@ function,PyObject_GetAttrString,3.2,, function,PyObject_GetBuffer,3.11,, function,PyObject_GetItem,3.2,, function,PyObject_GetIter,3.2,, +function,PyObject_GetOptionalAttr,3.13,, +function,PyObject_GetOptionalAttrString,3.13,, +function,PyObject_GetTypeData,3.12,, function,PyObject_HasAttr,3.2,, function,PyObject_HasAttrString,3.2,, +function,PyObject_HasAttrStringWithError,3.13,, +function,PyObject_HasAttrWithError,3.13,, function,PyObject_Hash,3.2,, function,PyObject_HashNotImplemented,3.2,, function,PyObject_Init,3.2,, @@ -591,19 +606,14 @@ function,PyStructSequence_NewType,3.2,, function,PyStructSequence_SetItem,3.2,, var,PyStructSequence_UnnamedField,3.11,, var,PySuper_Type,3.2,, -function,PySys_AddWarnOption,3.2,, -function,PySys_AddWarnOptionUnicode,3.2,, -function,PySys_AddXOption,3.7,, +function,PySys_Audit,3.13,, +function,PySys_AuditTuple,3.13,, function,PySys_FormatStderr,3.2,, function,PySys_FormatStdout,3.2,, function,PySys_GetObject,3.2,, function,PySys_GetXOptions,3.7,, -function,PySys_HasWarnOptions,3.2,, function,PySys_ResetWarnOptions,3.2,, -function,PySys_SetArgv,3.2,, -function,PySys_SetArgvEx,3.2,, function,PySys_SetObject,3.2,, -function,PySys_SetPath,3.2,, function,PySys_WriteStderr,3.2,, function,PySys_WriteStdout,3.2,, type,PyThreadState,3.2,,opaque @@ -668,6 +678,7 @@ function,PyType_GetModuleState,3.10,, function,PyType_GetName,3.11,, function,PyType_GetQualName,3.11,, function,PyType_GetSlot,3.4,, +function,PyType_GetTypeDataSize,3.12,, function,PyType_IsSubtype,3.2,, function,PyType_Modified,3.2,, function,PyType_Ready,3.2,, @@ -750,6 +761,8 @@ function,PyUnicode_DecodeUnicodeEscape,3.2,, function,PyUnicode_EncodeCodePage,3.7,on Windows, function,PyUnicode_EncodeFSDefault,3.2,, function,PyUnicode_EncodeLocale,3.7,, +function,PyUnicode_EqualToUTF8,3.13,, +function,PyUnicode_EqualToUTF8AndSize,3.13,, function,PyUnicode_FSConverter,3.2,, function,PyUnicode_FSDecoder,3.2,, function,PyUnicode_Find,3.2,, @@ -790,6 +803,7 @@ function,PyVectorcall_Call,3.12,, function,PyVectorcall_NARGS,3.12,, type,PyWeakReference,3.2,,opaque function,PyWeakref_GetObject,3.2,, +function,PyWeakref_GetRef,3.13,, function,PyWeakref_NewProxy,3.2,, function,PyWeakref_NewRef,3.2,, var,PyWrapperDescr_Type,3.2,, @@ -834,6 +848,7 @@ function,Py_Initialize,3.2,, function,Py_InitializeEx,3.2,, function,Py_Is,3.10,, function,Py_IsFalse,3.10,, +function,Py_IsFinalizing,3.13,, function,Py_IsInitialized,3.2,, function,Py_IsNone,3.10,, function,Py_IsTrue,3.10,, @@ -844,9 +859,6 @@ function,Py_NewInterpreter,3.2,, function,Py_NewRef,3.10,, function,Py_ReprEnter,3.2,, function,Py_ReprLeave,3.2,, -function,Py_SetPath,3.7,, -function,Py_SetProgramName,3.2,, -function,Py_SetPythonHome,3.2,, function,Py_SetRecursionLimit,3.2,, type,Py_UCS4,3.2,, macro,Py_UNBLOCK_THREADS,3.2,, diff --git a/Doc/distributing/index.rst b/Doc/distributing/index.rst index 2ae2726d4e4b92a..2430564b45d6d2a 100644 --- a/Doc/distributing/index.rst +++ b/Doc/distributing/index.rst @@ -1,179 +1,19 @@ +:orphan: + +.. This page is retained solely for existing links to /distributing/index.html. + Direct readers to the PPUG instead. + .. _distributing-index: ############################### Distributing Python Modules ############################### -:Email: distutils-sig@python.org - - -As a popular open source development project, Python has an active -supporting community of contributors and users that also make their software -available for other Python developers to use under open source license terms. - -This allows Python users to share and collaborate effectively, benefiting -from the solutions others have already created to common (and sometimes -even rare!) problems, as well as potentially contributing their own -solutions to the common pool. - -This guide covers the distribution part of the process. For a guide to -installing other Python projects, refer to the -:ref:`installation guide <installing-index>`. - .. note:: - For corporate and other institutional users, be aware that many - organisations have their own policies around using and contributing to - open source software. Please take such policies into account when making - use of the distribution and installation tools provided with Python. - - -Key terms -========= - -* the `Python Package Index <https://pypi.org>`__ is a public - repository of open source licensed packages made available for use by - other Python users -* the `Python Packaging Authority - <https://www.pypa.io/>`__ are the group of - developers and documentation authors responsible for the maintenance and - evolution of the standard packaging tools and the associated metadata and - file format standards. They maintain a variety of tools, documentation - and issue trackers on both `GitHub <https://github.com/pypa>`__ and - `Bitbucket <https://bitbucket.org/pypa/>`__. -* ``distutils`` is the original build and distribution system first added - to the Python standard library in 1998. While direct use of ``distutils`` - is being phased out, it still laid the foundation for the current packaging - and distribution infrastructure, and it not only remains part of the - standard library, but its name lives on in other ways (such as the name - of the mailing list used to coordinate Python packaging standards - development). -* `setuptools`_ is a (largely) drop-in replacement for ``distutils`` first - published in 2004. Its most notable addition over the unmodified - ``distutils`` tools was the ability to declare dependencies on other - packages. It is currently recommended as a more regularly updated - alternative to ``distutils`` that offers consistent support for more - recent packaging standards across a wide range of Python versions. -* `wheel`_ (in this context) is a project that adds the ``bdist_wheel`` - command to ``distutils``/`setuptools`_. This produces a cross platform - binary packaging format (called "wheels" or "wheel files" and defined in - :pep:`427`) that allows Python libraries, even those including binary - extensions, to be installed on a system without needing to be built - locally. - -.. _setuptools: https://setuptools.readthedocs.io/en/latest/ -.. _wheel: https://wheel.readthedocs.io/ - -Open source licensing and collaboration -======================================= - -In most parts of the world, software is automatically covered by copyright. -This means that other developers require explicit permission to copy, use, -modify and redistribute the software. - -Open source licensing is a way of explicitly granting such permission in a -relatively consistent way, allowing developers to share and collaborate -efficiently by making common solutions to various problems freely available. -This leaves many developers free to spend more time focusing on the problems -that are relatively unique to their specific situation. - -The distribution tools provided with Python are designed to make it -reasonably straightforward for developers to make their own contributions -back to that common pool of software if they choose to do so. - -The same distribution tools can also be used to distribute software within -an organisation, regardless of whether that software is published as open -source software or not. - - -Installing the tools -==================== - -The standard library does not include build tools that support modern -Python packaging standards, as the core development team has found that it -is important to have standard tools that work consistently, even on older -versions of Python. - -The currently recommended build and distribution tools can be installed -by invoking the ``pip`` module at the command line:: - - python -m pip install setuptools wheel twine - -.. note:: - - For POSIX users (including macOS and Linux users), these instructions - assume the use of a :term:`virtual environment`. - - For Windows users, these instructions assume that the option to - adjust the system PATH environment variable was selected when installing - Python. - -The Python Packaging User Guide includes more details on the `currently -recommended tools`_. - -.. _currently recommended tools: https://packaging.python.org/guides/tool-recommendations/#packaging-tool-recommendations - -.. index:: - single: Python Package Index (PyPI) - single: PyPI; (see Python Package Index (PyPI)) - -.. _publishing-python-packages: - -Reading the Python Packaging User Guide -======================================= - -The Python Packaging User Guide covers the various key steps and elements -involved in creating and publishing a project: - -* `Project structure`_ -* `Building and packaging the project`_ -* `Uploading the project to the Python Package Index`_ -* `The .pypirc file`_ - -.. _Project structure: \ - https://packaging.python.org/tutorials/packaging-projects/#packaging-python-projects -.. _Building and packaging the project: \ - https://packaging.python.org/tutorials/packaging-projects/#creating-the-package-files -.. _Uploading the project to the Python Package Index: \ - https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives -.. _The .pypirc file: \ - https://packaging.python.org/specifications/pypirc/ - - -How do I...? -============ - -These are quick answers or links for some common tasks. - -... choose a name for my project? ---------------------------------- - -This isn't an easy topic, but here are a few tips: - -* check the Python Package Index to see if the name is already in use -* check popular hosting sites like GitHub, Bitbucket, etc to see if there - is already a project with that name -* check what comes up in a web search for the name you're considering -* avoid particularly common words, especially ones with multiple meanings, - as they can make it difficult for users to find your software when - searching for it - - -... create and distribute binary extensions? --------------------------------------------- - -This is actually quite a complex topic, with a variety of alternatives -available depending on exactly what you're aiming to achieve. See the -Python Packaging User Guide for more information and recommendations. - -.. seealso:: - - `Python Packaging User Guide: Binary Extensions - <https://packaging.python.org/guides/packaging-binary-extensions/>`__ - -.. other topics: + Information and guidance on distributing Python modules and packages + has been moved to the `Python Packaging User Guide`_, + and the tutorial on `packaging Python projects`_. - Once the Development & Deployment part of PPUG is fleshed out, some of - those sections should be linked from new questions here (most notably, - we should have a question about avoiding depending on PyPI that links to - https://packaging.python.org/en/latest/mirrors/) + .. _Python Packaging User Guide: https://packaging.python.org/ + .. _packaging Python projects: https://packaging.python.org/en/latest/tutorials/packaging-projects/ diff --git a/Doc/extending/building.rst b/Doc/extending/building.rst index 880bb33ee56718b..ddde567f6f3efa5 100644 --- a/Doc/extending/building.rst +++ b/Doc/extending/building.rst @@ -45,6 +45,7 @@ See the *"Multiple modules in one library"* section in :pep:`489` for details. .. highlight:: c +.. _install-index: .. _setuptools-index: Building C and C++ Extensions with setuptools diff --git a/Doc/extending/embedding.rst b/Doc/extending/embedding.rst index e64db3733440383..20397dc5add5db6 100644 --- a/Doc/extending/embedding.rst +++ b/Doc/extending/embedding.rst @@ -59,24 +59,43 @@ perform some operation on a file. :: int main(int argc, char *argv[]) { - wchar_t *program = Py_DecodeLocale(argv[0], NULL); - if (program == NULL) { - fprintf(stderr, "Fatal error: cannot decode argv[0]\n"); - exit(1); + PyStatus status; + PyConfig config; + PyConfig_InitPythonConfig(&config); + + /* optional but recommended */ + status = PyConfig_SetBytesString(&config, &config.program_name, argv[0]); + if (PyStatus_Exception(status)) { + goto exception; } - Py_SetProgramName(program); /* optional but recommended */ - Py_Initialize(); + + status = Py_InitializeFromConfig(&config); + if (PyStatus_Exception(status)) { + goto exception; + } + PyConfig_Clear(&config); + PyRun_SimpleString("from time import time,ctime\n" "print('Today is', ctime(time()))\n"); if (Py_FinalizeEx() < 0) { exit(120); } - PyMem_RawFree(program); return 0; + + exception: + PyConfig_Clear(&config); + Py_ExitStatusException(status); } -The :c:func:`Py_SetProgramName` function should be called before -:c:func:`Py_Initialize` to inform the interpreter about paths to Python run-time +.. note:: + + ``#define PY_SSIZE_T_CLEAN`` was used to indicate that ``Py_ssize_t`` should be + used in some APIs instead of ``int``. + It is not necessary since Python 3.13, but we keep it here for backward compatibility. + See :ref:`arg-parsing-string-and-buffers` for a description of this macro. + +Setting :c:member:`PyConfig.program_name` should be called before +:c:func:`Py_InitializeFromConfig` to inform the interpreter about paths to Python run-time libraries. Next, the Python interpreter is initialized with :c:func:`Py_Initialize`, followed by the execution of a hard-coded Python script that prints the date and time. Afterwards, the :c:func:`Py_FinalizeEx` call shuts @@ -250,7 +269,7 @@ following two statements before the call to :c:func:`Py_Initialize`:: PyImport_AppendInittab("emb", &PyInit_emb); These two lines initialize the ``numargs`` variable, and make the -:func:`emb.numargs` function accessible to the embedded Python interpreter. +:func:`!emb.numargs` function accessible to the embedded Python interpreter. With these extensions, the Python script can do things like .. code-block:: python diff --git a/Doc/extending/extending.rst b/Doc/extending/extending.rst index d9bf4fd6c7ae0e3..1ee7f28b2ba2206 100644 --- a/Doc/extending/extending.rst +++ b/Doc/extending/extending.rst @@ -69,8 +69,10 @@ the module and a copyright notice if you like). headers on some systems, you *must* include :file:`Python.h` before any standard headers are included. - It is recommended to always define ``PY_SSIZE_T_CLEAN`` before including - ``Python.h``. See :ref:`parsetuple` for a description of this macro. + ``#define PY_SSIZE_T_CLEAN`` was used to indicate that ``Py_ssize_t`` should be + used in some APIs instead of ``int``. + It is not necessary since Python 3.13, but we keep it here for backward compatibility. + See :ref:`arg-parsing-string-and-buffers` for a description of this macro. All user-visible symbols defined by :file:`Python.h` have a prefix of ``Py`` or ``PY``, except those defined in standard header files. For convenience, and @@ -195,7 +197,7 @@ The choice of which exception to raise is entirely yours. There are predeclared C objects corresponding to all built-in Python exceptions, such as :c:data:`PyExc_ZeroDivisionError`, which you can use directly. Of course, you should choose exceptions wisely --- don't use :c:data:`PyExc_TypeError` to mean -that a file couldn't be opened (that should probably be :c:data:`PyExc_IOError`). +that a file couldn't be opened (that should probably be :c:data:`PyExc_OSError`). If something's wrong with the argument list, the :c:func:`PyArg_ParseTuple` function usually raises :c:data:`PyExc_TypeError`. If you have an argument whose value must be in a particular range or must satisfy other conditions, @@ -206,7 +208,7 @@ usually declare a static object variable at the beginning of your file:: static PyObject *SpamError; -and initialize it in your module's initialization function (:c:func:`PyInit_spam`) +and initialize it in your module's initialization function (:c:func:`!PyInit_spam`) with an exception object:: PyMODINIT_FUNC @@ -219,9 +221,7 @@ with an exception object:: return NULL; SpamError = PyErr_NewException("spam.error", NULL, NULL); - Py_XINCREF(SpamError); - if (PyModule_AddObject(m, "error", SpamError) < 0) { - Py_XDECREF(SpamError); + if (PyModule_AddObjectRef(m, "error", SpamError) < 0) { Py_CLEAR(SpamError); Py_DECREF(m); return NULL; @@ -230,22 +230,22 @@ with an exception object:: return m; } -Note that the Python name for the exception object is :exc:`spam.error`. The +Note that the Python name for the exception object is :exc:`!spam.error`. The :c:func:`PyErr_NewException` function may create a class with the base class being :exc:`Exception` (unless another class is passed in instead of ``NULL``), described in :ref:`bltin-exceptions`. -Note also that the :c:data:`SpamError` variable retains a reference to the newly +Note also that the :c:data:`!SpamError` variable retains a reference to the newly created exception class; this is intentional! Since the exception could be removed from the module by external code, an owned reference to the class is -needed to ensure that it will not be discarded, causing :c:data:`SpamError` to +needed to ensure that it will not be discarded, causing :c:data:`!SpamError` to become a dangling pointer. Should it become a dangling pointer, C code which raises the exception could cause a core dump or other unintended side effects. -We discuss the use of ``PyMODINIT_FUNC`` as a function return type later in this +We discuss the use of :c:macro:`PyMODINIT_FUNC` as a function return type later in this sample. -The :exc:`spam.error` exception can be raised in your extension module using a +The :exc:`!spam.error` exception can be raised in your extension module using a call to :c:func:`PyErr_SetString` as shown below:: static PyObject * @@ -279,9 +279,9 @@ statement:: It returns ``NULL`` (the error indicator for functions returning object pointers) if an error is detected in the argument list, relying on the exception set by :c:func:`PyArg_ParseTuple`. Otherwise the string value of the argument has been -copied to the local variable :c:data:`command`. This is a pointer assignment and +copied to the local variable :c:data:`!command`. This is a pointer assignment and you are not supposed to modify the string to which it points (so in Standard C, -the variable :c:data:`command` should properly be declared as ``const char +the variable :c:data:`!command` should properly be declared as ``const char *command``). The next statement is a call to the Unix function :c:func:`system`, passing it @@ -289,7 +289,7 @@ the string we just got from :c:func:`PyArg_ParseTuple`:: sts = system(command); -Our :func:`spam.system` function must return the value of :c:data:`sts` as a +Our :func:`!spam.system` function must return the value of :c:data:`!sts` as a Python object. This is done using the function :c:func:`PyLong_FromLong`. :: return PyLong_FromLong(sts); @@ -315,7 +315,7 @@ contexts, as we have seen. The Module's Method Table and Initialization Function ===================================================== -I promised to show how :c:func:`spam_system` is called from Python programs. +I promised to show how :c:func:`!spam_system` is called from Python programs. First, we need to list its name and address in a "method table":: static PyMethodDef SpamMethods[] = { @@ -335,7 +335,7 @@ When using only ``METH_VARARGS``, the function should expect the Python-level parameters to be passed in as a tuple acceptable for parsing via :c:func:`PyArg_ParseTuple`; more information on this function is provided below. -The :const:`METH_KEYWORDS` bit may be set in the third field if keyword +The :c:macro:`METH_KEYWORDS` bit may be set in the third field if keyword arguments should be passed to the function. In this case, the C function should accept a third ``PyObject *`` parameter which will be a dictionary of keywords. Use :c:func:`PyArg_ParseTupleAndKeywords` to parse the arguments to such a @@ -354,7 +354,7 @@ The method table must be referenced in the module definition structure:: This structure, in turn, must be passed to the interpreter in the module's initialization function. The initialization function must be named -:c:func:`PyInit_name`, where *name* is the name of the module, and should be the +:c:func:`!PyInit_name`, where *name* is the name of the module, and should be the only non-\ ``static`` item defined in the module file:: PyMODINIT_FUNC @@ -363,12 +363,12 @@ only non-\ ``static`` item defined in the module file:: return PyModule_Create(&spammodule); } -Note that PyMODINIT_FUNC declares the function as ``PyObject *`` return type, +Note that :c:macro:`PyMODINIT_FUNC` declares the function as ``PyObject *`` return type, declares any special linkage declarations required by the platform, and for C++ declares the function as ``extern "C"``. -When the Python program imports module :mod:`spam` for the first time, -:c:func:`PyInit_spam` is called. (See below for comments about embedding Python.) +When the Python program imports module :mod:`!spam` for the first time, +:c:func:`!PyInit_spam` is called. (See below for comments about embedding Python.) It calls :c:func:`PyModule_Create`, which returns a module object, and inserts built-in function objects into the newly created module based upon the table (an array of :c:type:`PyMethodDef` structures) found in the module definition. @@ -378,19 +378,20 @@ certain errors, or return ``NULL`` if the module could not be initialized satisfactorily. The init function must return the module object to its caller, so that it then gets inserted into ``sys.modules``. -When embedding Python, the :c:func:`PyInit_spam` function is not called +When embedding Python, the :c:func:`!PyInit_spam` function is not called automatically unless there's an entry in the :c:data:`PyImport_Inittab` table. To add the module to the initialization table, use :c:func:`PyImport_AppendInittab`, optionally followed by an import of the module:: + #define PY_SSIZE_T_CLEAN + #include <Python.h> + int main(int argc, char *argv[]) { - wchar_t *program = Py_DecodeLocale(argv[0], NULL); - if (program == NULL) { - fprintf(stderr, "Fatal error: cannot decode argv[0]\n"); - exit(1); - } + PyStatus status; + PyConfig config; + PyConfig_InitPythonConfig(&config); /* Add a built-in module, before Py_Initialize */ if (PyImport_AppendInittab("spam", PyInit_spam) == -1) { @@ -399,11 +400,18 @@ optionally followed by an import of the module:: } /* Pass argv[0] to the Python interpreter */ - Py_SetProgramName(program); + status = PyConfig_SetBytesString(&config, &config.program_name, argv[0]); + if (PyStatus_Exception(status)) { + goto exception; + } /* Initialize the Python interpreter. Required. If this step fails, it will be a fatal error. */ - Py_Initialize(); + status = Py_InitializeFromConfig(&config); + if (PyStatus_Exception(status)) { + goto exception; + } + PyConfig_Clear(&config); /* Optionally import the module; alternatively, import can be deferred until the embedded script @@ -414,10 +422,13 @@ optionally followed by an import of the module:: fprintf(stderr, "Error: could not import module 'spam'\n"); } - ... + // ... use Python C API here ... - PyMem_RawFree(program); return 0; + + exception: + PyConfig_Clear(&config); + Py_ExitStatusException(status); } .. note:: @@ -527,7 +538,7 @@ be part of a module definition:: } This function must be registered with the interpreter using the -:const:`METH_VARARGS` flag; this is described in section :ref:`methodtable`. The +:c:macro:`METH_VARARGS` flag; this is described in section :ref:`methodtable`. The :c:func:`PyArg_ParseTuple` function and its arguments are documented in section :ref:`parsetuple`. @@ -649,7 +660,7 @@ Note that any Python object references which are provided to the caller are Some example calls:: - #define PY_SSIZE_T_CLEAN /* Make "s#" use Py_ssize_t rather than int. */ + #define PY_SSIZE_T_CLEAN #include <Python.h> :: @@ -745,7 +756,7 @@ it returns false and raises an appropriate exception. Here is an example module which uses keywords, based on an example by Geoff Philbrick (philbrick@hks.com):: - #define PY_SSIZE_T_CLEAN /* Make "s#" use Py_ssize_t rather than int. */ + #define PY_SSIZE_T_CLEAN #include <Python.h> static PyObject * @@ -1030,13 +1041,13 @@ Let's follow the control flow into :c:func:`PyList_SetItem`. The list owns references to all its items, so when item 1 is replaced, it has to dispose of the original item 1. Now let's suppose the original item 1 was an instance of a user-defined class, and let's further suppose that the class defined a -:meth:`__del__` method. If this class instance has a reference count of 1, -disposing of it will call its :meth:`__del__` method. +:meth:`!__del__` method. If this class instance has a reference count of 1, +disposing of it will call its :meth:`!__del__` method. -Since it is written in Python, the :meth:`__del__` method can execute arbitrary +Since it is written in Python, the :meth:`!__del__` method can execute arbitrary Python code. Could it perhaps do something to invalidate the reference to -``item`` in :c:func:`bug`? You bet! Assuming that the list passed into -:c:func:`bug` is accessible to the :meth:`__del__` method, it could execute a +``item`` in :c:func:`!bug`? You bet! Assuming that the list passed into +:c:func:`!bug` is accessible to the :meth:`!__del__` method, it could execute a statement to the effect of ``del list[0]``, and assuming this was the last reference to that object, it would free the memory associated with it, thereby invalidating ``item``. @@ -1057,7 +1068,7 @@ increment the reference count. The correct version of the function reads:: This is a true story. An older version of Python contained variants of this bug and someone spent a considerable amount of time in a C debugger to figure out -why his :meth:`__del__` methods would fail... +why his :meth:`!__del__` methods would fail... The second case of problems with a borrowed reference is a variant involving threads. Normally, multiple threads in the Python interpreter can't get in each @@ -1208,14 +1219,14 @@ file corresponding to the module provides a macro that takes care of importing the module and retrieving its C API pointers; client modules only have to call this macro before accessing the C API. -The exporting module is a modification of the :mod:`spam` module from section -:ref:`extending-simpleexample`. The function :func:`spam.system` does not call +The exporting module is a modification of the :mod:`!spam` module from section +:ref:`extending-simpleexample`. The function :func:`!spam.system` does not call the C library function :c:func:`system` directly, but a function -:c:func:`PySpam_System`, which would of course do something more complicated in +:c:func:`!PySpam_System`, which would of course do something more complicated in reality (such as adding "spam" to every command). This function -:c:func:`PySpam_System` is also exported to other extension modules. +:c:func:`!PySpam_System` is also exported to other extension modules. -The function :c:func:`PySpam_System` is a plain C function, declared +The function :c:func:`!PySpam_System` is a plain C function, declared ``static`` like everything else:: static int @@ -1224,7 +1235,7 @@ The function :c:func:`PySpam_System` is a plain C function, declared return system(command); } -The function :c:func:`spam_system` is modified in a trivial way:: +The function :c:func:`!spam_system` is modified in a trivial way:: static PyObject * spam_system(PyObject *self, PyObject *args) @@ -1268,8 +1279,7 @@ function must take care of initializing the C API pointer array:: /* Create a Capsule containing the API pointer array's address */ c_api_object = PyCapsule_New((void *)PySpam_API, "spam._C_API", NULL); - if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { - Py_XDECREF(c_api_object); + if (PyModule_Add(m, "_C_API", c_api_object) < 0) { Py_DECREF(m); return NULL; } @@ -1278,7 +1288,7 @@ function must take care of initializing the C API pointer array:: } Note that ``PySpam_API`` is declared ``static``; otherwise the pointer -array would disappear when :func:`PyInit_spam` terminates! +array would disappear when :c:func:`!PyInit_spam` terminates! The bulk of the work is in the header file :file:`spammodule.h`, which looks like this:: @@ -1332,8 +1342,8 @@ like this:: #endif /* !defined(Py_SPAMMODULE_H) */ All that a client module must do in order to have access to the function -:c:func:`PySpam_System` is to call the function (or rather macro) -:c:func:`import_spam` in its initialization function:: +:c:func:`!PySpam_System` is to call the function (or rather macro) +:c:func:`!import_spam` in its initialization function:: PyMODINIT_FUNC PyInit_client(void) diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst index 3de849ade788887..7a92b3257c6cd31 100644 --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -149,7 +149,7 @@ done. This can be done using the :c:func:`PyErr_Fetch` and .. index:: single: string; object representation - builtin: repr + pair: built-in function; repr Object Presentation ------------------- @@ -168,7 +168,7 @@ representation of the instance for which it is called. Here is a simple example:: static PyObject * - newdatatype_repr(newdatatypeobject * obj) + newdatatype_repr(newdatatypeobject *obj) { return PyUnicode_FromFormat("Repr-ified_newdatatype{{size:%d}}", obj->obj_UnderlyingDatatypePtr->size); @@ -188,7 +188,7 @@ used instead. Here is a simple example:: static PyObject * - newdatatype_str(newdatatypeobject * obj) + newdatatype_str(newdatatypeobject *obj) { return PyUnicode_FromFormat("Stringified_newdatatype{{size:%d}}", obj->obj_UnderlyingDatatypePtr->size); @@ -270,7 +270,7 @@ structure:: One entry should be defined for each method provided by the type; no entries are needed for methods inherited from a base type. One additional entry is needed at the end; it is a sentinel that marks the end of the array. The -:attr:`ml_name` field of the sentinel must be ``NULL``. +:c:member:`~PyMethodDef.ml_name` field of the sentinel must be ``NULL``. The second table is used to define attributes which map directly to data stored in the instance. A variety of primitive C types are supported, and access may @@ -286,44 +286,19 @@ be read-only or read-write. The structures in the table are defined as:: For each entry in the table, a :term:`descriptor` will be constructed and added to the type which will be able to extract a value from the instance structure. The -:attr:`type` field should contain one of the type codes defined in the -:file:`structmember.h` header; the value will be used to determine how to -convert Python values to and from C values. The :attr:`flags` field is used to -store flags which control how the attribute can be accessed. - -The following flag constants are defined in :file:`structmember.h`; they may be -combined using bitwise-OR. - -+---------------------------+----------------------------------------------+ -| Constant | Meaning | -+===========================+==============================================+ -| :const:`READONLY` | Never writable. | -+---------------------------+----------------------------------------------+ -| :const:`PY_AUDIT_READ` | Emit an ``object.__getattr__`` | -| | :ref:`audit events <audit-events>` before | -| | reading. | -+---------------------------+----------------------------------------------+ - -.. versionchanged:: 3.10 - :const:`RESTRICTED`, :const:`READ_RESTRICTED` and :const:`WRITE_RESTRICTED` - are deprecated. However, :const:`READ_RESTRICTED` is an alias for - :const:`PY_AUDIT_READ`, so fields that specify either :const:`RESTRICTED` - or :const:`READ_RESTRICTED` will also raise an audit event. - -.. index:: - single: READONLY - single: READ_RESTRICTED - single: WRITE_RESTRICTED - single: RESTRICTED - single: PY_AUDIT_READ +:c:member:`~PyMemberDef.type` field should contain a type code like :c:macro:`Py_T_INT` or +:c:macro:`Py_T_DOUBLE`; the value will be used to determine how to +convert Python values to and from C values. The :c:member:`~PyMemberDef.flags` field is used to +store flags which control how the attribute can be accessed: you can set it to +:c:macro:`Py_READONLY` to prevent Python code from setting it. An interesting advantage of using the :c:member:`~PyTypeObject.tp_members` table to build descriptors that are used at runtime is that any attribute defined this way can have an associated doc string simply by providing the text in the table. An application can use the introspection API to retrieve the descriptor from the -class object, and get the doc string using its :attr:`__doc__` attribute. +class object, and get the doc string using its :attr:`!__doc__` attribute. -As with the :c:member:`~PyTypeObject.tp_methods` table, a sentinel entry with a :attr:`name` value +As with the :c:member:`~PyTypeObject.tp_methods` table, a sentinel entry with a :c:member:`~PyMethodDef.ml_name` value of ``NULL`` is required. .. XXX Descriptors need to be explained in more detail somewhere, but not here. @@ -348,7 +323,7 @@ called, so that if you do need to extend their functionality, you'll understand what needs to be done. The :c:member:`~PyTypeObject.tp_getattr` handler is called when the object requires an attribute -look-up. It is called in the same situations where the :meth:`__getattr__` +look-up. It is called in the same situations where the :meth:`~object.__getattr__` method of a class would be called. Here is an example:: @@ -362,13 +337,13 @@ Here is an example:: } PyErr_Format(PyExc_AttributeError, - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, name); + "'%.100s' object has no attribute '%.400s'", + Py_TYPE(obj)->tp_name, name); return NULL; } -The :c:member:`~PyTypeObject.tp_setattr` handler is called when the :meth:`__setattr__` or -:meth:`__delattr__` method of a class instance would be called. When an +The :c:member:`~PyTypeObject.tp_setattr` handler is called when the :meth:`~object.__setattr__` or +:meth:`~object.__delattr__` method of a class instance would be called. When an attribute should be deleted, the third parameter will be ``NULL``. Here is an example that simply raises an exception; if this were really all you wanted, the :c:member:`~PyTypeObject.tp_setattr` handler should be set to ``NULL``. :: @@ -389,7 +364,7 @@ Object Comparison The :c:member:`~PyTypeObject.tp_richcompare` handler is called when comparisons are needed. It is analogous to the :ref:`rich comparison methods <richcmpfuncs>`, like -:meth:`__lt__`, and also called by :c:func:`PyObject_RichCompare` and +:meth:`!__lt__`, and also called by :c:func:`PyObject_RichCompare` and :c:func:`PyObject_RichCompareBool`. This function is called with two Python objects and the operator as arguments, @@ -404,7 +379,7 @@ Here is a sample implementation, for a datatype that is considered equal if the size of an internal pointer is equal:: static PyObject * - newdatatype_richcmp(PyObject *obj1, PyObject *obj2, int op) + newdatatype_richcmp(newdatatypeobject *obj1, newdatatypeobject *obj2, int op) { PyObject *result; int c, size1, size2; @@ -503,7 +478,7 @@ This function takes three arguments: Here is a toy ``tp_call`` implementation:: static PyObject * - newdatatype_call(newdatatypeobject *self, PyObject *args, PyObject *kwds) + newdatatype_call(newdatatypeobject *obj, PyObject *args, PyObject *kwds) { PyObject *result; const char *arg1; @@ -530,7 +505,7 @@ These functions provide support for the iterator protocol. Both handlers take exactly one parameter, the instance for which they are being called, and return a new reference. In the case of an error, they should set an exception and return ``NULL``. :c:member:`~PyTypeObject.tp_iter` corresponds -to the Python :meth:`__iter__` method, while :c:member:`~PyTypeObject.tp_iternext` +to the Python :meth:`~object.__iter__` method, while :c:member:`~PyTypeObject.tp_iternext` corresponds to the Python :meth:`~iterator.__next__` method. Any :term:`iterable` object must implement the :c:member:`~PyTypeObject.tp_iter` diff --git a/Doc/extending/newtypes_tutorial.rst b/Doc/extending/newtypes_tutorial.rst index 5d4a3f06dd54026..7eba9759119b3bf 100644 --- a/Doc/extending/newtypes_tutorial.rst +++ b/Doc/extending/newtypes_tutorial.rst @@ -36,8 +36,8 @@ So, if you want to define a new extension type, you need to create a new type object. This sort of thing can only be explained by example, so here's a minimal, but -complete, module that defines a new type named :class:`Custom` inside a C -extension module :mod:`custom`: +complete, module that defines a new type named :class:`!Custom` inside a C +extension module :mod:`!custom`: .. note:: What we're showing here is the traditional way of defining *static* @@ -45,17 +45,17 @@ extension module :mod:`custom`: allows defining heap-allocated extension types using the :c:func:`PyType_FromSpec` function, which isn't covered in this tutorial. -.. literalinclude:: ../includes/custom.c +.. literalinclude:: ../includes/newtypes/custom.c Now that's quite a bit to take in at once, but hopefully bits will seem familiar from the previous chapter. This file defines three things: -#. What a :class:`Custom` **object** contains: this is the ``CustomObject`` - struct, which is allocated once for each :class:`Custom` instance. -#. How the :class:`Custom` **type** behaves: this is the ``CustomType`` struct, +#. What a :class:`!Custom` **object** contains: this is the ``CustomObject`` + struct, which is allocated once for each :class:`!Custom` instance. +#. How the :class:`!Custom` **type** behaves: this is the ``CustomType`` struct, which defines a set of flags and function pointers that the interpreter inspects when specific operations are requested. -#. How to initialize the :mod:`custom` module: this is the ``PyInit_custom`` +#. How to initialize the :mod:`!custom` module: this is the ``PyInit_custom`` function and the associated ``custommodule`` struct. The first bit is:: @@ -88,7 +88,7 @@ standard Python floats:: The second bit is the definition of the type object. :: static PyTypeObject CustomType = { - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "custom.Custom", .tp_doc = PyDoc_STR("Custom objects"), .tp_basicsize = sizeof(CustomObject), @@ -109,7 +109,7 @@ common practice to not specify them explicitly unless you need them. We're going to pick it apart, one field at a time:: - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) This line is mandatory boilerplate to initialize the ``ob_base`` field mentioned above. :: @@ -127,8 +127,8 @@ our objects and in some error messages, for example: TypeError: can only concatenate str (not "custom.Custom") to str Note that the name is a dotted name that includes both the module name and the -name of the type within the module. The module in this case is :mod:`custom` and -the type is :class:`Custom`, so we set the type name to :class:`custom.Custom`. +name of the type within the module. The module in this case is :mod:`!custom` and +the type is :class:`!Custom`, so we set the type name to :class:`!custom.Custom`. Using the real dotted import path is important to make your type compatible with the :mod:`pydoc` and :mod:`pickle` modules. :: @@ -136,7 +136,7 @@ with the :mod:`pydoc` and :mod:`pickle` modules. :: .tp_itemsize = 0, This is so that Python knows how much memory to allocate when creating -new :class:`Custom` instances. :c:member:`~PyTypeObject.tp_itemsize` is +new :class:`!Custom` instances. :c:member:`~PyTypeObject.tp_itemsize` is only used for variable-sized objects and should otherwise be zero. .. note:: @@ -145,13 +145,13 @@ only used for variable-sized objects and should otherwise be zero. :c:member:`~PyTypeObject.tp_basicsize` as its base type, you may have problems with multiple inheritance. A Python subclass of your type will have to list your type first in its :attr:`~class.__bases__`, or else it will not be able to call your type's - :meth:`__new__` method without getting an error. You can avoid this problem by + :meth:`~object.__new__` method without getting an error. You can avoid this problem by ensuring that your type has a larger value for :c:member:`~PyTypeObject.tp_basicsize` than its base type does. Most of the time, this will be true anyway, because either your base type will be :class:`object`, or else you will be adding data members to your base type, and therefore increasing its size. -We set the class flags to :const:`Py_TPFLAGS_DEFAULT`. :: +We set the class flags to :c:macro:`Py_TPFLAGS_DEFAULT`. :: .tp_flags = Py_TPFLAGS_DEFAULT, @@ -164,31 +164,29 @@ We provide a doc string for the type in :c:member:`~PyTypeObject.tp_doc`. :: .tp_doc = PyDoc_STR("Custom objects"), To enable object creation, we have to provide a :c:member:`~PyTypeObject.tp_new` -handler. This is the equivalent of the Python method :meth:`__new__`, but +handler. This is the equivalent of the Python method :meth:`~object.__new__`, but has to be specified explicitly. In this case, we can just use the default implementation provided by the API function :c:func:`PyType_GenericNew`. :: .tp_new = PyType_GenericNew, Everything else in the file should be familiar, except for some code in -:c:func:`PyInit_custom`:: +:c:func:`!PyInit_custom`:: if (PyType_Ready(&CustomType) < 0) return; -This initializes the :class:`Custom` type, filling in a number of members -to the appropriate default values, including :attr:`ob_type` that we initially +This initializes the :class:`!Custom` type, filling in a number of members +to the appropriate default values, including :c:member:`~PyObject.ob_type` that we initially set to ``NULL``. :: - Py_INCREF(&CustomType); - if (PyModule_AddObject(m, "Custom", (PyObject *) &CustomType) < 0) { - Py_DECREF(&CustomType); + if (PyModule_AddObjectRef(m, "Custom", (PyObject *) &CustomType) < 0) { Py_DECREF(m); return NULL; } This adds the type to the module dictionary. This allows us to create -:class:`Custom` instances by calling the :class:`Custom` class: +:class:`!Custom` instances by calling the :class:`!Custom` class: .. code-block:: pycon @@ -196,57 +194,46 @@ This adds the type to the module dictionary. This allows us to create >>> mycustom = custom.Custom() That's it! All that remains is to build it; put the above code in a file called -:file:`custom.c` and: +:file:`custom.c`, + +.. literalinclude:: ../includes/newtypes/pyproject.toml + +in a file called :file:`pyproject.toml`, and .. code-block:: python - from distutils.core import setup, Extension - setup(name="custom", version="1.0", - ext_modules=[Extension("custom", ["custom.c"])]) + from setuptools import Extension, setup + setup(ext_modules=[Extension("custom", ["custom.c"])]) in a file called :file:`setup.py`; then typing .. code-block:: shell-session - $ python setup.py build + $ python -m pip install . -at a shell should produce a file :file:`custom.so` in a subdirectory; move to -that directory and fire up Python --- you should be able to ``import custom`` and -play around with Custom objects. +in a shell should produce a file :file:`custom.so` in a subdirectory +and install it; now fire up Python --- you should be able to ``import custom`` +and play around with ``Custom`` objects. That wasn't so hard, was it? Of course, the current Custom type is pretty uninteresting. It has no data and doesn't do anything. It can't even be subclassed. -.. note:: - While this documentation showcases the standard :mod:`distutils` module - for building C extensions, it is recommended in real-world use cases to - use the newer and better-maintained ``setuptools`` library. Documentation - on how to do this is out of scope for this document and can be found in - the `Python Packaging User's Guide <https://packaging.python.org/tutorials/distributing-packages/>`_. - Adding data and methods to the Basic example ============================================ Let's extend the basic example to add some data and methods. Let's also make -the type usable as a base class. We'll create a new module, :mod:`custom2` that +the type usable as a base class. We'll create a new module, :mod:`!custom2` that adds these capabilities: -.. literalinclude:: ../includes/custom2.c +.. literalinclude:: ../includes/newtypes/custom2.c This version of the module has a number of changes. -We've added an extra include:: - - #include <structmember.h> - -This include provides declarations that we use to handle attributes, as -described a bit later. - -The :class:`Custom` type now has three data attributes in its C struct, +The :class:`!Custom` type now has three data attributes in its C struct, *first*, *last*, and *number*. The *first* and *last* variables are Python strings containing first and last names. The *number* attribute is a C integer. @@ -279,7 +266,7 @@ This method first clears the reference counts of the two Python attributes. ``NULL`` (which might happen here if ``tp_new`` failed midway). It then calls the :c:member:`~PyTypeObject.tp_free` member of the object's type (computed by ``Py_TYPE(self)``) to free the object's memory. Note that -the object's type might not be :class:`CustomType`, because the object may +the object's type might not be :class:`!CustomType`, because the object may be an instance of a subclass. .. note:: @@ -318,10 +305,10 @@ and install it in the :c:member:`~PyTypeObject.tp_new` member:: .tp_new = Custom_new, The ``tp_new`` handler is responsible for creating (as opposed to initializing) -objects of the type. It is exposed in Python as the :meth:`__new__` method. +objects of the type. It is exposed in Python as the :meth:`~object.__new__` method. It is not required to define a ``tp_new`` member, and indeed many extension types will simply reuse :c:func:`PyType_GenericNew` as done in the first -version of the ``Custom`` type above. In this case, we use the ``tp_new`` +version of the :class:`!Custom` type above. In this case, we use the ``tp_new`` handler to initialize the ``first`` and ``last`` attributes to non-``NULL`` default values. @@ -352,7 +339,7 @@ result against ``NULL`` before proceeding. .. note:: If you are creating a co-operative :c:member:`~PyTypeObject.tp_new` (one - that calls a base type's :c:member:`~PyTypeObject.tp_new` or :meth:`__new__`), + that calls a base type's :c:member:`~PyTypeObject.tp_new` or :meth:`~object.__new__`), you must *not* try to determine what method to call using method resolution order at runtime. Always statically determine what type you are going to call, and call its :c:member:`~PyTypeObject.tp_new` directly, or via @@ -395,14 +382,14 @@ by filling the :c:member:`~PyTypeObject.tp_init` slot. :: .tp_init = (initproc) Custom_init, The :c:member:`~PyTypeObject.tp_init` slot is exposed in Python as the -:meth:`__init__` method. It is used to initialize an object after it's +:meth:`~object.__init__` method. It is used to initialize an object after it's created. Initializers always accept positional and keyword arguments, and they should return either ``0`` on success or ``-1`` on error. Unlike the ``tp_new`` handler, there is no guarantee that ``tp_init`` is called at all (for example, the :mod:`pickle` module by default -doesn't call :meth:`__init__` on unpickled instances). It can also be -called multiple times. Anyone can call the :meth:`__init__` method on +doesn't call :meth:`~object.__init__` on unpickled instances). It can also be +called multiple times. Anyone can call the :meth:`!__init__` method on our objects. For this reason, we have to be extra careful when assigning the new attribute values. We might be tempted, for example to assign the ``first`` member like this:: @@ -436,11 +423,11 @@ We want to expose our instance variables as attributes. There are a number of ways to do that. The simplest way is to define member definitions:: static PyMemberDef Custom_members[] = { - {"first", T_OBJECT_EX, offsetof(CustomObject, first), 0, + {"first", Py_T_OBJECT_EX, offsetof(CustomObject, first), 0, "first name"}, - {"last", T_OBJECT_EX, offsetof(CustomObject, last), 0, + {"last", Py_T_OBJECT_EX, offsetof(CustomObject, last), 0, "last name"}, - {"number", T_INT, offsetof(CustomObject, number), 0, + {"number", Py_T_INT, offsetof(CustomObject, number), 0, "custom number"}, {NULL} /* Sentinel */ }; @@ -460,7 +447,7 @@ Further, the attributes can be deleted, setting the C pointers to ``NULL``. Eve though we can make sure the members are initialized to non-``NULL`` values, the members can be set to ``NULL`` if the attributes are deleted. -We define a single method, :meth:`Custom.name()`, that outputs the objects name as the +We define a single method, :meth:`!Custom.name()`, that outputs the objects name as the concatenation of the first and last names. :: static PyObject * @@ -477,8 +464,8 @@ concatenation of the first and last names. :: return PyUnicode_FromFormat("%S %S", self->first, self->last); } -The method is implemented as a C function that takes a :class:`Custom` (or -:class:`Custom` subclass) instance as the first argument. Methods always take an +The method is implemented as a C function that takes a :class:`!Custom` (or +:class:`!Custom` subclass) instance as the first argument. Methods always take an instance as the first argument. Methods often take positional and keyword arguments as well, but in this case we don't take any and don't need to accept a positional argument tuple or keyword argument dictionary. This method is @@ -489,8 +476,8 @@ equivalent to the Python method: def name(self): return "%s %s" % (self.first, self.last) -Note that we have to check for the possibility that our :attr:`first` and -:attr:`last` members are ``NULL``. This is because they can be deleted, in which +Note that we have to check for the possibility that our :attr:`!first` and +:attr:`!last` members are ``NULL``. This is because they can be deleted, in which case they are set to ``NULL``. It would be better to prevent deletion of these attributes and to restrict the attribute values to be strings. We'll see how to do that in the next section. @@ -505,7 +492,7 @@ definitions:: {NULL} /* Sentinel */ }; -(note that we used the :const:`METH_NOARGS` flag to indicate that the method +(note that we used the :c:macro:`METH_NOARGS` flag to indicate that the method is expecting no arguments other than *self*) and assign it to the :c:member:`~PyTypeObject.tp_methods` slot:: @@ -515,41 +502,45 @@ and assign it to the :c:member:`~PyTypeObject.tp_methods` slot:: Finally, we'll make our type usable as a base class for subclassing. We've written our methods carefully so far so that they don't make any assumptions about the type of the object being created or used, so all we need to do is -to add the :const:`Py_TPFLAGS_BASETYPE` to our class flag definition:: +to add the :c:macro:`Py_TPFLAGS_BASETYPE` to our class flag definition:: .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, -We rename :c:func:`PyInit_custom` to :c:func:`PyInit_custom2`, update the +We rename :c:func:`!PyInit_custom` to :c:func:`!PyInit_custom2`, update the module name in the :c:type:`PyModuleDef` struct, and update the full class name in the :c:type:`PyTypeObject` struct. -Finally, we update our :file:`setup.py` file to build the new module: +Finally, we update our :file:`setup.py` file to include the new module, .. code-block:: python - from distutils.core import setup, Extension - setup(name="custom", version="1.0", - ext_modules=[ - Extension("custom", ["custom.c"]), - Extension("custom2", ["custom2.c"]), - ]) + from setuptools import Extension, setup + setup(ext_modules=[ + Extension("custom", ["custom.c"]), + Extension("custom2", ["custom2.c"]), + ]) + +and then we re-install so that we can ``import custom2``: + +.. code-block:: shell-session + $ python -m pip install . Providing finer control over data attributes ============================================ -In this section, we'll provide finer control over how the :attr:`first` and -:attr:`last` attributes are set in the :class:`Custom` example. In the previous -version of our module, the instance variables :attr:`first` and :attr:`last` +In this section, we'll provide finer control over how the :attr:`!first` and +:attr:`!last` attributes are set in the :class:`!Custom` example. In the previous +version of our module, the instance variables :attr:`!first` and :attr:`!last` could be set to non-string values or even deleted. We want to make sure that these attributes always contain strings. -.. literalinclude:: ../includes/custom3.c +.. literalinclude:: ../includes/newtypes/custom3.c -To provide greater control, over the :attr:`first` and :attr:`last` attributes, +To provide greater control, over the :attr:`!first` and :attr:`!last` attributes, we'll use custom getter and setter functions. Here are the functions for -getting and setting the :attr:`first` attribute:: +getting and setting the :attr:`!first` attribute:: static PyObject * Custom_getfirst(CustomObject *self, void *closure) @@ -578,13 +569,13 @@ getting and setting the :attr:`first` attribute:: return 0; } -The getter function is passed a :class:`Custom` object and a "closure", which is +The getter function is passed a :class:`!Custom` object and a "closure", which is a void pointer. In this case, the closure is ignored. (The closure supports an advanced usage in which definition data is passed to the getter and setter. This could, for example, be used to allow a single set of getter and setter functions that decide the attribute to get or set based on data in the closure.) -The setter function is passed the :class:`Custom` object, the new value, and the +The setter function is passed the :class:`!Custom` object, the new value, and the closure. The new value may be ``NULL``, in which case the attribute is being deleted. In our setter, we raise an error if the attribute is deleted or if its new value is not a string. @@ -609,7 +600,7 @@ above. In this case, we aren't using a closure, so we just pass ``NULL``. We also remove the member definitions for these attributes:: static PyMemberDef Custom_members[] = { - {"number", T_INT, offsetof(CustomObject, number), 0, + {"number", Py_T_INT, offsetof(CustomObject, number), 0, "custom number"}, {NULL} /* Sentinel */ }; @@ -673,11 +664,11 @@ still has a reference from itself. Its reference count doesn't drop to zero. Fortunately, Python's cyclic garbage collector will eventually figure out that the list is garbage and free it. -In the second version of the :class:`Custom` example, we allowed any kind of -object to be stored in the :attr:`first` or :attr:`last` attributes [#]_. +In the second version of the :class:`!Custom` example, we allowed any kind of +object to be stored in the :attr:`!first` or :attr:`!last` attributes [#]_. Besides, in the second and third versions, we allowed subclassing -:class:`Custom`, and subclasses may add arbitrary attributes. For any of -those two reasons, :class:`Custom` objects can participate in cycles: +:class:`!Custom`, and subclasses may add arbitrary attributes. For any of +those two reasons, :class:`!Custom` objects can participate in cycles: .. code-block:: pycon @@ -687,11 +678,11 @@ those two reasons, :class:`Custom` objects can participate in cycles: >>> n = Derived() >>> n.some_attribute = n -To allow a :class:`Custom` instance participating in a reference cycle to -be properly detected and collected by the cyclic GC, our :class:`Custom` type +To allow a :class:`!Custom` instance participating in a reference cycle to +be properly detected and collected by the cyclic GC, our :class:`!Custom` type needs to fill two additional slots and to enable a flag that enables these slots: -.. literalinclude:: ../includes/custom4.c +.. literalinclude:: ../includes/newtypes/custom4.c First, the traversal method lets the cyclic GC know about subobjects that could @@ -715,8 +706,8 @@ participate in cycles:: } For each subobject that can participate in cycles, we need to call the -:c:func:`visit` function, which is passed to the traversal method. The -:c:func:`visit` function takes as arguments the subobject and the extra argument +:c:func:`!visit` function, which is passed to the traversal method. The +:c:func:`!visit` function takes as arguments the subobject and the extra argument *arg* passed to the traversal method. It returns an integer value that must be returned if it is non-zero. @@ -781,7 +772,7 @@ and ``Custom_clear``:: Py_TYPE(self)->tp_free((PyObject *) self); } -Finally, we add the :const:`Py_TPFLAGS_HAVE_GC` flag to the class flags:: +Finally, we add the :c:macro:`Py_TPFLAGS_HAVE_GC` flag to the class flags:: .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, @@ -798,9 +789,9 @@ types. It is easiest to inherit from the built in types, since an extension can easily use the :c:type:`PyTypeObject` it needs. It can be difficult to share these :c:type:`PyTypeObject` structures between extension modules. -In this example we will create a :class:`SubList` type that inherits from the +In this example we will create a :class:`!SubList` type that inherits from the built-in :class:`list` type. The new type will be completely compatible with -regular lists, but will have an additional :meth:`increment` method that +regular lists, but will have an additional :meth:`!increment` method that increases an internal counter: .. code-block:: pycon @@ -815,10 +806,10 @@ increases an internal counter: >>> print(s.increment()) 2 -.. literalinclude:: ../includes/sublist.c +.. literalinclude:: ../includes/newtypes/sublist.c -As you can see, the source code closely resembles the :class:`Custom` examples in +As you can see, the source code closely resembles the :class:`!Custom` examples in previous sections. We will break down the main differences between them. :: typedef struct { @@ -830,7 +821,7 @@ The primary difference for derived type objects is that the base type's object structure must be the first value. The base type will already include the :c:func:`PyObject_HEAD` at the beginning of its structure. -When a Python object is a :class:`SubList` instance, its ``PyObject *`` pointer +When a Python object is a :class:`!SubList` instance, its ``PyObject *`` pointer can be safely cast to both ``PyListObject *`` and ``SubListObject *``:: static int @@ -842,7 +833,7 @@ can be safely cast to both ``PyListObject *`` and ``SubListObject *``:: return 0; } -We see above how to call through to the :attr:`__init__` method of the base +We see above how to call through to the :meth:`~object.__init__` method of the base type. This pattern is important when writing a type with custom @@ -869,9 +860,7 @@ function:: if (m == NULL) return NULL; - Py_INCREF(&SubListType); - if (PyModule_AddObject(m, "SubList", (PyObject *) &SubListType) < 0) { - Py_DECREF(&SubListType); + if (PyModule_AddObjectRef(m, "SubList", (PyObject *) &SubListType) < 0) { Py_DECREF(m); return NULL; } @@ -886,7 +875,7 @@ slot with :c:func:`PyType_GenericNew` -- the allocation function from the base type will be inherited. After that, calling :c:func:`PyType_Ready` and adding the type object to the -module is the same as with the basic :class:`Custom` examples. +module is the same as with the basic :class:`!Custom` examples. .. rubric:: Footnotes diff --git a/Doc/extending/windows.rst b/Doc/extending/windows.rst index 1129b0968bc4e60..e366d6cb9f79e3b 100644 --- a/Doc/extending/windows.rst +++ b/Doc/extending/windows.rst @@ -132,4 +132,4 @@ modules (including Python) to be able to see your identifiers, you have to say Developer Studio will throw in a lot of import libraries that you do not really need, adding about 100K to your executable. To get rid of them, use the Project Settings dialog, Link tab, to specify *ignore default libraries*. Add the -correct :file:`msvcrtxx.lib` to the list of libraries. +correct :file:`msvcrt{xx}.lib` to the list of libraries. diff --git a/Doc/faq/design.rst b/Doc/faq/design.rst index 11d01374dc1e791..ae02c443e5938bf 100644 --- a/Doc/faq/design.rst +++ b/Doc/faq/design.rst @@ -584,9 +584,9 @@ exhaustive test suites that exercise every line of code in a module. An appropriate testing discipline can help build large complex applications in Python as well as having interface specifications would. In fact, it can be better because an interface specification cannot test certain properties of a -program. For example, the :meth:`list.append` method is expected to add new elements +program. For example, the :meth:`!list.append` method is expected to add new elements to the end of some internal list; an interface specification cannot test that -your :meth:`list.append` implementation will actually do this correctly, but it's +your :meth:`!list.append` implementation will actually do this correctly, but it's trivial to check this property in a test suite. Writing test suites is very helpful, and you might want to design your code to diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst index 07282639e4f9b4b..2a8b976925d042c 100644 --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -42,7 +42,7 @@ on what you're trying to do. .. XXX make sure these all work `Cython <https://cython.org>`_ and its relative `Pyrex -<https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/>`_ are compilers +<https://www.csse.canterbury.ac.nz/greg.ewing/python/Pyrex/>`_ are compilers that accept a slightly modified form of Python and generate the corresponding C code. Cython and Pyrex make it possible to write an extension without having to learn Python's C API. @@ -81,13 +81,13 @@ How do I extract C values from a Python object? That depends on the object's type. If it's a tuple, :c:func:`PyTuple_Size` returns its length and :c:func:`PyTuple_GetItem` returns the item at a specified -index. Lists have similar functions, :c:func:`PyListSize` and +index. Lists have similar functions, :c:func:`PyList_Size` and :c:func:`PyList_GetItem`. For bytes, :c:func:`PyBytes_Size` returns its length and :c:func:`PyBytes_AsStringAndSize` provides a pointer to its value and its length. Note that Python bytes objects may contain null bytes so C's -:c:func:`strlen` should not be used. +:c:func:`!strlen` should not be used. To test the type of an object, first make sure it isn't ``NULL``, and then use :c:func:`PyBytes_Check`, :c:func:`PyTuple_Check`, :c:func:`PyList_Check`, etc. diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst index 489bca76432d859..8727332594bda69 100644 --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -54,8 +54,8 @@ commercial use, to sell copies of Python in source or binary form (modified or unmodified), or to sell products that incorporate Python in some form. We would still like to know about all commercial use of Python, of course. -See `the PSF license page <https://www.python.org/psf/license/>`_ to find further -explanations and a link to the full text of the license. +See `the license page <https://docs.python.org/3/license.html>`_ to find further +explanations and the full text of the PSF License. The Python logo is trademarked, and in certain cases permission is required to use it. Consult `the Trademark Usage Policy @@ -135,7 +135,7 @@ Python versions are numbered "A.B.C" or "A.B": See :pep:`6` for more information about bugfix releases. -Not all releases are bugfix releases. In the run-up to a new major release, a +Not all releases are bugfix releases. In the run-up to a new feature release, a series of development releases are made, denoted as alpha, beta, or release candidate. Alphas are early releases in which interfaces aren't yet finalized; it's not unexpected to see an interface change between two alpha releases. @@ -215,7 +215,7 @@ every day, and Usenet readers are often more able to cope with this volume. Announcements of new software releases and events can be found in comp.lang.python.announce, a low-traffic moderated list that receives about five postings per day. It's available as `the python-announce mailing list -<https://mail.python.org/mailman/listinfo/python-announce-list>`_. +<https://mail.python.org/mailman3/lists/python-announce-list.python.org/>`_. More info about other mailing lists and newsgroups can be found at https://www.python.org/community/lists/. @@ -248,8 +248,8 @@ Are there any published articles about Python that I can reference? It's probably best to cite your favorite book about Python. -The very first article about Python was written in 1991 and is now quite -outdated. +The `very first article <https://ir.cwi.nl/pub/18204>`_ about Python was +written in 1991 and is now quite outdated. Guido van Rossum and Jelke de Boer, "Interactively Testing Remote Servers Using the Python Programming Language", CWI Quarterly, Volume 4, Issue 4 @@ -297,9 +297,9 @@ How stable is Python? Very stable. New, stable releases have been coming out roughly every 6 to 18 months since 1991, and this seems likely to continue. As of version 3.9, -Python will have a major new release every 12 months (:pep:`602`). +Python will have a new feature release every 12 months (:pep:`602`). -The developers issue "bugfix" releases of older versions, so the stability of +The developers issue bugfix releases of older versions, so the stability of existing releases gradually improves. Bugfix releases, indicated by a third component of the version number (e.g. 3.5.3, 3.6.2), are managed for stability; only fixes for known problems are included in a bugfix release, and it's @@ -352,7 +352,7 @@ titled "Python X.Y Release Schedule", where X.Y is a version that hasn't been publicly released yet. New development is discussed on `the python-dev mailing list -<https://mail.python.org/mailman/listinfo/python-dev/>`_. +<https://mail.python.org/mailman3/lists/python-dev.python.org/>`_. Is it reasonable to propose incompatible changes to Python? diff --git a/Doc/faq/gui.rst b/Doc/faq/gui.rst index 023ffdf0db510a6..cfa60feceb31b77 100644 --- a/Doc/faq/gui.rst +++ b/Doc/faq/gui.rst @@ -43,18 +43,11 @@ applications, the applications will not be truly stand-alone, as the application will still need the Tcl and Tk libraries. One solution is to ship the application with the Tcl and Tk libraries, and point -to them at run-time using the :envvar:`TCL_LIBRARY` and :envvar:`TK_LIBRARY` +to them at run-time using the :envvar:`!TCL_LIBRARY` and :envvar:`!TK_LIBRARY` environment variables. -To get truly stand-alone applications, the Tcl scripts that form the library -have to be integrated into the application as well. One tool supporting that is -SAM (stand-alone modules), which is part of the Tix distribution -(https://tix.sourceforge.net/). - -Build Tix with SAM enabled, perform the appropriate call to -:c:func:`Tclsam_init`, etc. inside Python's -:file:`Modules/tkappinit.c`, and link with libtclsam and libtksam (you -might include the Tix libraries as well). +Various third-party freeze libraries such as py2exe and cx_Freeze have +handling for Tkinter applications built-in. Can I have Tk events handled while waiting for I/O? @@ -62,7 +55,7 @@ Can I have Tk events handled while waiting for I/O? On platforms other than Windows, yes, and you don't even need threads! But you'll have to restructure your I/O -code a bit. Tk has the equivalent of Xt's :c:func:`XtAddInput()` call, which allows you +code a bit. Tk has the equivalent of Xt's :c:func:`!XtAddInput` call, which allows you to register a callback function which will be called from the Tk mainloop when I/O is possible on a file descriptor. See :ref:`tkinter-file-handlers`. @@ -70,8 +63,9 @@ I/O is possible on a file descriptor. See :ref:`tkinter-file-handlers`. I can't get key bindings to work in Tkinter: why? ------------------------------------------------- -An often-heard complaint is that event handlers bound to events with the -:meth:`bind` method don't get handled even when the appropriate key is pressed. +An often-heard complaint is that event handlers :ref:`bound <bindings-and-events>` +to events with the :meth:`!bind` method +don't get handled even when the appropriate key is pressed. The most common cause is that the widget to which the binding applies doesn't have "keyboard focus". Check out the Tk documentation for the focus command. diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst index a9cde456575020f..476a43d9c288f18 100644 --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -111,7 +111,7 @@ Is there an equivalent to C's onexit() in Python? ------------------------------------------------- The :mod:`atexit` module provides a register function that is similar to C's -:c:func:`onexit`. +:c:func:`!onexit`. Why don't my signal handlers work? @@ -397,7 +397,7 @@ These aren't:: D[x] = D[x] + 1 Operations that replace other objects may invoke those other objects' -:meth:`__del__` method when their reference count reaches zero, and that can +:meth:`~object.__del__` method when their reference count reaches zero, and that can affect things. This is especially true for the mass updates to dictionaries and lists. When in doubt, use a mutex! @@ -566,7 +566,7 @@ use ``p.read(n)``. Note on a bug in popen2: unless your program calls ``wait()`` or ``waitpid()``, finished child processes are never removed, and eventually calls to popen2 will fail because of a limit on the number of child - processes. Calling :func:`os.waitpid` with the :data:`os.WNOHANG` option can + processes. Calling :func:`os.waitpid` with the :const:`os.WNOHANG` option can prevent this; a good place to insert such a call would be before calling ``popen2`` again. @@ -669,41 +669,6 @@ and client-side web systems. A summary of available frameworks is maintained by Paul Boddie at https://wiki.python.org/moin/WebProgramming\ . -Cameron Laird maintains a useful set of pages about Python web technologies at -https://web.archive.org/web/20210224183619/http://phaseit.net/claird/comp.lang.python/web_python. - - -How can I mimic CGI form submission (METHOD=POST)? --------------------------------------------------- - -I would like to retrieve web pages that are the result of POSTing a form. Is -there existing code that would let me do this easily? - -Yes. Here's a simple example that uses :mod:`urllib.request`:: - - #!/usr/local/bin/python - - import urllib.request - - # build the query string - qs = "First=Josephine&MI=Q&Last=Public" - - # connect and send the server a path - req = urllib.request.urlopen('http://www.some-server.out-there' - '/cgi-bin/some-cgi-script', data=qs) - with req: - msg, hdrs = req.read(), req.info() - -Note that in general for percent-encoded POST operations, query strings must be -quoted using :func:`urllib.parse.urlencode`. For example, to send -``name=Guy Steele, Jr.``:: - - >>> import urllib.parse - >>> urllib.parse.urlencode({'name': 'Guy Steele, Jr.'}) - 'name=Guy+Steele%2C+Jr.' - -.. seealso:: :ref:`urllib-howto` for extensive examples. - What module should I use to help with generating HTML? ------------------------------------------------------ @@ -765,14 +730,17 @@ The :mod:`select` module is commonly used to help with asynchronous I/O on sockets. To prevent the TCP connect from blocking, you can set the socket to non-blocking -mode. Then when you do the :meth:`socket.connect`, you will either connect immediately +mode. Then when you do the :meth:`~socket.socket.connect`, +you will either connect immediately (unlikely) or get an exception that contains the error number as ``.errno``. ``errno.EINPROGRESS`` indicates that the connection is in progress, but hasn't finished yet. Different OSes will return different values, so you're going to have to check what's returned on your system. -You can use the :meth:`socket.connect_ex` method to avoid creating an exception. It will -just return the errno value. To poll, you can call :meth:`socket.connect_ex` again later +You can use the :meth:`~socket.socket.connect_ex` method +to avoid creating an exception. +It will just return the errno value. +To poll, you can call :meth:`~socket.socket.connect_ex` again later -- ``0`` or ``errno.EISCONN`` indicate that you're connected -- or you can pass this socket to :meth:`select.select` to check if it's writable. @@ -780,7 +748,7 @@ socket to :meth:`select.select` to check if it's writable. The :mod:`asyncio` module provides a general purpose single-threaded and concurrent asynchronous library, which can be used for writing non-blocking network code. - The third-party `Twisted <https://twistedmatrix.com/trac/>`_ library is + The third-party `Twisted <https://twisted.org/>`_ library is a popular and feature-rich alternative. diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst index f3c5b0f76494aae..0a88c5f6384f2be 100644 --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -61,7 +61,7 @@ Yes. `Pyflakes <https://github.com/PyCQA/pyflakes>`_ do basic checking that will help you catch bugs sooner. -Static type checkers such as `Mypy <http://mypy-lang.org/>`_, +Static type checkers such as `Mypy <https://mypy-lang.org/>`_, `Pyre <https://pyre-check.org/>`_, and `Pytype <https://github.com/google/pytype>`_ can check type hints in Python source code. @@ -113,6 +113,8 @@ Yes. The coding style required for standard library modules is documented as Core Language ============= +.. _faq-unboundlocalerror: + Why am I getting an UnboundLocalError when the variable has a value? -------------------------------------------------------------------- @@ -452,7 +454,7 @@ There are two factors that produce this result: (the list), and both ``x`` and ``y`` refer to it. 2) Lists are :term:`mutable`, which means that you can change their content. -After the call to :meth:`~list.append`, the content of the mutable object has +After the call to :meth:`!append`, the content of the mutable object has changed from ``[]`` to ``[10]``. Since both the variables refer to the same object, using either name accesses the modified value ``[10]``. @@ -922,12 +924,12 @@ module:: 'Hello, there!' >>> import array - >>> a = array.array('u', s) + >>> a = array.array('w', s) >>> print(a) - array('u', 'Hello, world') + array('w', 'Hello, world') >>> a[0] = 'y' >>> print(a) - array('u', 'yello, world') + array('w', 'yello, world') >>> a.tounicode() 'yello, world' @@ -1024,6 +1026,46 @@ What does 'UnicodeDecodeError' or 'UnicodeEncodeError' error mean? See the :ref:`unicode-howto`. +.. _faq-programming-raw-string-backslash: + +Can I end a raw string with an odd number of backslashes? +--------------------------------------------------------- + +A raw string ending with an odd number of backslashes will escape the string's quote:: + + >>> r'C:\this\will\not\work\' + File "<stdin>", line 1 + r'C:\this\will\not\work\' + ^ + SyntaxError: unterminated string literal (detected at line 1) + +There are several workarounds for this. One is to use regular strings and double +the backslashes:: + + >>> 'C:\\this\\will\\work\\' + 'C:\\this\\will\\work\\' + +Another is to concatenate a regular string containing an escaped backslash to the +raw string:: + + >>> r'C:\this\will\work' '\\' + 'C:\\this\\will\\work\\' + +It is also possible to use :func:`os.path.join` to append a backslash on Windows:: + + >>> os.path.join(r'C:\this\will\work', '') + 'C:\\this\\will\\work\\' + +Note that while a backslash will "escape" a quote for the purposes of +determining where the raw string ends, no escaping occurs when interpreting the +value of the raw string. That is, the backslash remains present in the value of +the raw string:: + + >>> r'backslash\'preserved' + "backslash\\'preserved" + +Also see the specification in the :ref:`language reference <strings>`. + Performance =========== @@ -1279,13 +1321,25 @@ Or, you can use an extension that provides a matrix datatype; `NumPy <https://numpy.org/>`_ is the best known. -How do I apply a method to a sequence of objects? -------------------------------------------------- +How do I apply a method or function to a sequence of objects? +------------------------------------------------------------- -Use a list comprehension:: +To call a method or function and accumulate the return values is a list, +a :term:`list comprehension` is an elegant solution:: result = [obj.method() for obj in mylist] + result = [function(obj) for obj in mylist] + +To just run the method or function without saving the return values, +a plain :keyword:`for` loop will suffice:: + + for obj in mylist: + obj.method() + + for obj in mylist: + function(obj) + .. _faq-augmented-assignment-tuple-error: Why does a_tuple[i] += ['item'] raise an exception when the addition works? @@ -1343,7 +1397,7 @@ To see why this happens, you need to know that (a) if an object implements an :meth:`~object.__iadd__` magic method, it gets called when the ``+=`` augmented assignment is executed, and its return value is what gets used in the assignment statement; -and (b) for lists, :meth:`!__iadd__` is equivalent to calling :meth:`~list.extend` on the list +and (b) for lists, :meth:`!__iadd__` is equivalent to calling :meth:`!extend` on the list and returning the list. That's why we say that for lists, ``+=`` is a "shorthand" for :meth:`!list.extend`:: @@ -1849,7 +1903,7 @@ identity tests. This prevents the code from being confused by objects such as ``float('NaN')`` that are not equal to themselves. For example, here is the implementation of -:meth:`collections.abc.Sequence.__contains__`:: +:meth:`!collections.abc.Sequence.__contains__`:: def __contains__(self, value): for v in self: @@ -1925,7 +1979,7 @@ method result will be released right away. The disadvantage is that if instances accumulate, so too will the accumulated method results. They can grow without bound. -The *lru_cache* approach works with methods that have hashable +The *lru_cache* approach works with methods that have :term:`hashable` arguments. It creates a reference to the instance unless special efforts are made to pass in weak references. diff --git a/Doc/faq/windows.rst b/Doc/faq/windows.rst index 7768aafd979685b..c0c92fdbbc84d12 100644 --- a/Doc/faq/windows.rst +++ b/Doc/faq/windows.rst @@ -167,7 +167,7 @@ How can I embed Python into a Windows application? Embedding the Python interpreter in a Windows app can be summarized as follows: -1. Do _not_ build Python into your .exe file directly. On Windows, Python must +1. Do **not** build Python into your .exe file directly. On Windows, Python must be a DLL to handle importing modules that are themselves DLL's. (This is the first key undocumented fact.) Instead, link to :file:`python{NN}.dll`; it is typically installed in ``C:\Windows\System``. *NN* is the Python version, a @@ -191,7 +191,7 @@ Embedding the Python interpreter in a Windows app can be summarized as follows: 2. If you use SWIG, it is easy to create a Python "extension module" that will make the app's data and methods available to Python. SWIG will handle just about all the grungy details for you. The result is C code that you link - *into* your .exe file (!) You do _not_ have to create a DLL file, and this + *into* your .exe file (!) You do **not** have to create a DLL file, and this also simplifies linking. 3. SWIG will create an init function (a C function) whose name depends on the @@ -218,10 +218,10 @@ Embedding the Python interpreter in a Windows app can be summarized as follows: 5. There are two problems with Python's C API which will become apparent if you use a compiler other than MSVC, the compiler used to build pythonNN.dll. - Problem 1: The so-called "Very High Level" functions that take FILE * + Problem 1: The so-called "Very High Level" functions that take ``FILE *`` arguments will not work in a multi-compiler environment because each - compiler's notion of a struct FILE will be different. From an implementation - standpoint these are very _low_ level functions. + compiler's notion of a ``struct FILE`` will be different. From an implementation + standpoint these are very low level functions. Problem 2: SWIG generates the following code when generating wrappers to void functions: diff --git a/Doc/glossary.rst b/Doc/glossary.rst index 3d74d550dc345a5..6b517b95f970138 100644 --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -22,15 +22,6 @@ Glossary * The :const:`Ellipsis` built-in constant. - 2to3 - A tool that tries to convert Python 2.x code to Python 3.x code by - handling most of the incompatibilities which can be detected by parsing the - source and traversing the parse tree. - - 2to3 is available in the standard library as :mod:`lib2to3`; a standalone - entry point is provided as :file:`Tools/scripts/2to3`. See - :ref:`2to3-reference`. - abstract base class Abstract base classes complement :term:`duck-typing` by providing a way to define interfaces when other techniques like @@ -92,8 +83,8 @@ Glossary asynchronous context manager An object which controls the environment seen in an - :keyword:`async with` statement by defining :meth:`__aenter__` and - :meth:`__aexit__` methods. Introduced by :pep:`492`. + :keyword:`async with` statement by defining :meth:`~object.__aenter__` and + :meth:`~object.__aexit__` methods. Introduced by :pep:`492`. asynchronous generator A function which returns an :term:`asynchronous generator iterator`. It @@ -113,26 +104,26 @@ Glossary An object created by a :term:`asynchronous generator` function. This is an :term:`asynchronous iterator` which when called using the - :meth:`__anext__` method returns an awaitable object which will execute + :meth:`~object.__anext__` method returns an awaitable object which will execute the body of the asynchronous generator function until the next :keyword:`yield` expression. Each :keyword:`yield` temporarily suspends processing, remembering the location execution state (including local variables and pending try-statements). When the *asynchronous generator iterator* effectively - resumes with another awaitable returned by :meth:`__anext__`, it + resumes with another awaitable returned by :meth:`~object.__anext__`, it picks up where it left off. See :pep:`492` and :pep:`525`. asynchronous iterable An object, that can be used in an :keyword:`async for` statement. Must return an :term:`asynchronous iterator` from its - :meth:`__aiter__` method. Introduced by :pep:`492`. + :meth:`~object.__aiter__` method. Introduced by :pep:`492`. asynchronous iterator - An object that implements the :meth:`__aiter__` and :meth:`__anext__` - methods. ``__anext__`` must return an :term:`awaitable` object. + An object that implements the :meth:`~object.__aiter__` and :meth:`~object.__anext__` + methods. :meth:`~object.__anext__` must return an :term:`awaitable` object. :keyword:`async for` resolves the awaitables returned by an asynchronous - iterator's :meth:`__anext__` method until it raises a + iterator's :meth:`~object.__anext__` method until it raises a :exc:`StopAsyncIteration` exception. Introduced by :pep:`492`. attribute @@ -149,7 +140,7 @@ Glossary awaitable An object that can be used in an :keyword:`await` expression. Can be - a :term:`coroutine` or an object with an :meth:`__await__` method. + a :term:`coroutine` or an object with an :meth:`~object.__await__` method. See also :pep:`492`. BDFL @@ -168,8 +159,9 @@ Glossary :class:`str` objects. borrowed reference - In Python's C API, a borrowed reference is a reference to an object. - It does not modify the object reference count. It becomes a dangling + In Python's C API, a borrowed reference is a reference to an object, + where the code using the object does not own the reference. + It becomes a dangling pointer if the object is destroyed. For example, a garbage collection can remove the last :term:`strong reference` to the object and so destroy it. @@ -214,7 +206,7 @@ Glossary A callable is an object that can be called, possibly with a set of arguments (see :term:`argument`), with the following syntax:: - callable(argument1, argument2, ...) + callable(argument1, argument2, argumentN) A :term:`function`, and by extension a :term:`method`, is a callable. An instance of a class that implements the :meth:`~object.__call__` @@ -247,7 +239,7 @@ Glossary context manager An object which controls the environment seen in a :keyword:`with` - statement by defining :meth:`__enter__` and :meth:`__exit__` methods. + statement by defining :meth:`~object.__enter__` and :meth:`~object.__exit__` methods. See :pep:`343`. context variable @@ -587,6 +579,16 @@ Glossary :ref:`idle` is a basic editor and interpreter environment which ships with the standard distribution of Python. + immortal + If an object is immortal, its reference count is never modified, and + therefore it is never deallocated. + + Built-in strings and singletons are immortal objects. For example, + :const:`True` and :const:`None` singletons are immmortal. + + See `PEP 683 – Immortal Objects, Using a Fixed Refcount + <https://peps.python.org/pep-0683/>`_ for more information. + immutable An object with a fixed value. Immutable objects include numbers, strings and tuples. Such an object cannot be altered. A new object has to @@ -644,7 +646,7 @@ Glossary iterables include all sequence types (such as :class:`list`, :class:`str`, and :class:`tuple`) and some non-sequence types like :class:`dict`, :term:`file objects <file object>`, and objects of any classes you define - with an :meth:`__iter__` method or with a :meth:`__getitem__` method + with an :meth:`__iter__` method or with a :meth:`~object.__getitem__` method that implements :term:`sequence` semantics. Iterables can be @@ -1063,7 +1065,9 @@ Glossary reference count The number of references to an object. When the reference count of an - object drops to zero, it is deallocated. Reference counting is + object drops to zero, it is deallocated. Some objects are + :term:`immortal` and have reference counts that are never modified, and + therefore the objects are never deallocated. Reference counting is generally not visible to Python code, but it is a key element of the :term:`CPython` implementation. Programmers can call the :func:`sys.getrefcount` function to return the @@ -1084,17 +1088,17 @@ Glossary sequence An :term:`iterable` which supports efficient element access using integer - indices via the :meth:`__getitem__` special method and defines a + indices via the :meth:`~object.__getitem__` special method and defines a :meth:`__len__` method that returns the length of the sequence. Some built-in sequence types are :class:`list`, :class:`str`, :class:`tuple`, and :class:`bytes`. Note that :class:`dict` also - supports :meth:`__getitem__` and :meth:`__len__`, but is considered a + supports :meth:`~object.__getitem__` and :meth:`__len__`, but is considered a mapping rather than a sequence because the lookups use arbitrary :term:`immutable` keys rather than integers. The :class:`collections.abc.Sequence` abstract base class defines a much richer interface that goes beyond just - :meth:`__getitem__` and :meth:`__len__`, adding :meth:`count`, + :meth:`~object.__getitem__` and :meth:`__len__`, adding :meth:`count`, :meth:`index`, :meth:`__contains__`, and :meth:`__reversed__`. Types that implement this expanded interface can be registered explicitly using @@ -1116,6 +1120,21 @@ Glossary when several are given, such as in ``variable_name[1:3:5]``. The bracket (subscript) notation uses :class:`slice` objects internally. + soft deprecated + A soft deprecation can be used when using an API which should no longer + be used to write new code, but it remains safe to continue using it in + existing code. The API remains documented and tested, but will not be + developed further (no enhancement). + + The main difference between a "soft" and a (regular) "hard" deprecation + is that the soft deprecation does not imply scheduling the removal of the + deprecated API. + + Another difference is that a soft deprecation does not issue a warning. + + See `PEP 387: Soft Deprecation + <https://peps.python.org/pep-0387/#soft-deprecation>`_. + special method .. index:: pair: special; method @@ -1129,10 +1148,17 @@ Glossary an :term:`expression` or one of several constructs with a keyword, such as :keyword:`if`, :keyword:`while` or :keyword:`for`. + static type checker + An external tool that reads Python code and analyzes it, looking for + issues such as incorrect types. See also :term:`type hints <type hint>` + and the :mod:`typing` module. + strong reference In Python's C API, a strong reference is a reference to an object - which increments the object's reference count when it is created and - decrements the object's reference count when it is deleted. + which is owned by the code holding the reference. The strong + reference is taken by calling :c:func:`Py_INCREF` when the + reference is created and released with :c:func:`Py_DECREF` + when the reference is deleted. The :c:func:`Py_NewRef` function can be used to create a strong reference to an object. Usually, the :c:func:`Py_DECREF` function must be called on @@ -1203,8 +1229,8 @@ Glossary attribute, or a function parameter or return value. Type hints are optional and are not enforced by Python but - they are useful to static type analysis tools, and aid IDEs with code - completion and refactoring. + they are useful to :term:`static type checkers <static type checker>`. + They can also aid IDEs with code completion and refactoring. Type hints of global variables, class attributes, and functions, but not local variables, can be accessed using diff --git a/Doc/howto/annotations.rst b/Doc/howto/annotations.rst index 2bc2f2d4c839e2d..1134686c947d668 100644 --- a/Doc/howto/annotations.rst +++ b/Doc/howto/annotations.rst @@ -32,195 +32,201 @@ Annotations Best Practices Accessing The Annotations Dict Of An Object In Python 3.10 And Newer ==================================================================== - Python 3.10 adds a new function to the standard library: - :func:`inspect.get_annotations`. In Python versions 3.10 - and newer, calling this function is the best practice for - accessing the annotations dict of any object that supports - annotations. This function can also "un-stringize" - stringized annotations for you. - - If for some reason :func:`inspect.get_annotations` isn't - viable for your use case, you may access the - ``__annotations__`` data member manually. Best practice - for this changed in Python 3.10 as well: as of Python 3.10, - ``o.__annotations__`` is guaranteed to *always* work - on Python functions, classes, and modules. If you're - certain the object you're examining is one of these three - *specific* objects, you may simply use ``o.__annotations__`` - to get at the object's annotations dict. - - However, other types of callables--for example, - callables created by :func:`functools.partial`--may - not have an ``__annotations__`` attribute defined. When - accessing the ``__annotations__`` of a possibly unknown - object, best practice in Python versions 3.10 and - newer is to call :func:`getattr` with three arguments, - for example ``getattr(o, '__annotations__', None)``. +Python 3.10 adds a new function to the standard library: +:func:`inspect.get_annotations`. In Python versions 3.10 +and newer, calling this function is the best practice for +accessing the annotations dict of any object that supports +annotations. This function can also "un-stringize" +stringized annotations for you. + +If for some reason :func:`inspect.get_annotations` isn't +viable for your use case, you may access the +``__annotations__`` data member manually. Best practice +for this changed in Python 3.10 as well: as of Python 3.10, +``o.__annotations__`` is guaranteed to *always* work +on Python functions, classes, and modules. If you're +certain the object you're examining is one of these three +*specific* objects, you may simply use ``o.__annotations__`` +to get at the object's annotations dict. + +However, other types of callables--for example, +callables created by :func:`functools.partial`--may +not have an ``__annotations__`` attribute defined. When +accessing the ``__annotations__`` of a possibly unknown +object, best practice in Python versions 3.10 and +newer is to call :func:`getattr` with three arguments, +for example ``getattr(o, '__annotations__', None)``. + +Before Python 3.10, accessing ``__annotations__`` on a class that +defines no annotations but that has a parent class with +annotations would return the parent's ``__annotations__``. +In Python 3.10 and newer, the child class's annotations +will be an empty dict instead. Accessing The Annotations Dict Of An Object In Python 3.9 And Older =================================================================== - In Python 3.9 and older, accessing the annotations dict - of an object is much more complicated than in newer versions. - The problem is a design flaw in these older versions of Python, - specifically to do with class annotations. +In Python 3.9 and older, accessing the annotations dict +of an object is much more complicated than in newer versions. +The problem is a design flaw in these older versions of Python, +specifically to do with class annotations. - Best practice for accessing the annotations dict of other - objects--functions, other callables, and modules--is the same - as best practice for 3.10, assuming you aren't calling - :func:`inspect.get_annotations`: you should use three-argument - :func:`getattr` to access the object's ``__annotations__`` - attribute. +Best practice for accessing the annotations dict of other +objects--functions, other callables, and modules--is the same +as best practice for 3.10, assuming you aren't calling +:func:`inspect.get_annotations`: you should use three-argument +:func:`getattr` to access the object's ``__annotations__`` +attribute. - Unfortunately, this isn't best practice for classes. The problem - is that, since ``__annotations__`` is optional on classes, and - because classes can inherit attributes from their base classes, - accessing the ``__annotations__`` attribute of a class may - inadvertently return the annotations dict of a *base class.* - As an example:: +Unfortunately, this isn't best practice for classes. The problem +is that, since ``__annotations__`` is optional on classes, and +because classes can inherit attributes from their base classes, +accessing the ``__annotations__`` attribute of a class may +inadvertently return the annotations dict of a *base class.* +As an example:: - class Base: - a: int = 3 - b: str = 'abc' + class Base: + a: int = 3 + b: str = 'abc' - class Derived(Base): - pass + class Derived(Base): + pass - print(Derived.__annotations__) + print(Derived.__annotations__) - This will print the annotations dict from ``Base``, not - ``Derived``. +This will print the annotations dict from ``Base``, not +``Derived``. - Your code will have to have a separate code path if the object - you're examining is a class (``isinstance(o, type)``). - In that case, best practice relies on an implementation detail - of Python 3.9 and before: if a class has annotations defined, - they are stored in the class's ``__dict__`` dictionary. Since - the class may or may not have annotations defined, best practice - is to call the ``get`` method on the class dict. +Your code will have to have a separate code path if the object +you're examining is a class (``isinstance(o, type)``). +In that case, best practice relies on an implementation detail +of Python 3.9 and before: if a class has annotations defined, +they are stored in the class's ``__dict__`` dictionary. Since +the class may or may not have annotations defined, best practice +is to call the ``get`` method on the class dict. - To put it all together, here is some sample code that safely - accesses the ``__annotations__`` attribute on an arbitrary - object in Python 3.9 and before:: +To put it all together, here is some sample code that safely +accesses the ``__annotations__`` attribute on an arbitrary +object in Python 3.9 and before:: - if isinstance(o, type): - ann = o.__dict__.get('__annotations__', None) - else: - ann = getattr(o, '__annotations__', None) + if isinstance(o, type): + ann = o.__dict__.get('__annotations__', None) + else: + ann = getattr(o, '__annotations__', None) - After running this code, ``ann`` should be either a - dictionary or ``None``. You're encouraged to double-check - the type of ``ann`` using :func:`isinstance` before further - examination. +After running this code, ``ann`` should be either a +dictionary or ``None``. You're encouraged to double-check +the type of ``ann`` using :func:`isinstance` before further +examination. - Note that some exotic or malformed type objects may not have - a ``__dict__`` attribute, so for extra safety you may also wish - to use :func:`getattr` to access ``__dict__``. +Note that some exotic or malformed type objects may not have +a ``__dict__`` attribute, so for extra safety you may also wish +to use :func:`getattr` to access ``__dict__``. Manually Un-Stringizing Stringized Annotations ============================================== - In situations where some annotations may be "stringized", - and you wish to evaluate those strings to produce the - Python values they represent, it really is best to - call :func:`inspect.get_annotations` to do this work - for you. - - If you're using Python 3.9 or older, or if for some reason - you can't use :func:`inspect.get_annotations`, you'll need - to duplicate its logic. You're encouraged to examine the - implementation of :func:`inspect.get_annotations` in the - current Python version and follow a similar approach. - - In a nutshell, if you wish to evaluate a stringized annotation - on an arbitrary object ``o``: - - * If ``o`` is a module, use ``o.__dict__`` as the - ``globals`` when calling :func:`eval`. - * If ``o`` is a class, use ``sys.modules[o.__module__].__dict__`` - as the ``globals``, and ``dict(vars(o))`` as the ``locals``, - when calling :func:`eval`. - * If ``o`` is a wrapped callable using :func:`functools.update_wrapper`, - :func:`functools.wraps`, or :func:`functools.partial`, iteratively - unwrap it by accessing either ``o.__wrapped__`` or ``o.func`` as - appropriate, until you have found the root unwrapped function. - * If ``o`` is a callable (but not a class), use - ``o.__globals__`` as the globals when calling :func:`eval`. - - However, not all string values used as annotations can - be successfully turned into Python values by :func:`eval`. - String values could theoretically contain any valid string, - and in practice there are valid use cases for type hints that - require annotating with string values that specifically - *can't* be evaluated. For example: - - * :pep:`604` union types using ``|``, before support for this - was added to Python 3.10. - * Definitions that aren't needed at runtime, only imported - when :const:`typing.TYPE_CHECKING` is true. - - If :func:`eval` attempts to evaluate such values, it will - fail and raise an exception. So, when designing a library - API that works with annotations, it's recommended to only - attempt to evaluate string values when explicitly requested - to by the caller. +In situations where some annotations may be "stringized", +and you wish to evaluate those strings to produce the +Python values they represent, it really is best to +call :func:`inspect.get_annotations` to do this work +for you. + +If you're using Python 3.9 or older, or if for some reason +you can't use :func:`inspect.get_annotations`, you'll need +to duplicate its logic. You're encouraged to examine the +implementation of :func:`inspect.get_annotations` in the +current Python version and follow a similar approach. + +In a nutshell, if you wish to evaluate a stringized annotation +on an arbitrary object ``o``: + +* If ``o`` is a module, use ``o.__dict__`` as the + ``globals`` when calling :func:`eval`. +* If ``o`` is a class, use ``sys.modules[o.__module__].__dict__`` + as the ``globals``, and ``dict(vars(o))`` as the ``locals``, + when calling :func:`eval`. +* If ``o`` is a wrapped callable using :func:`functools.update_wrapper`, + :func:`functools.wraps`, or :func:`functools.partial`, iteratively + unwrap it by accessing either ``o.__wrapped__`` or ``o.func`` as + appropriate, until you have found the root unwrapped function. +* If ``o`` is a callable (but not a class), use + ``o.__globals__`` as the globals when calling :func:`eval`. + +However, not all string values used as annotations can +be successfully turned into Python values by :func:`eval`. +String values could theoretically contain any valid string, +and in practice there are valid use cases for type hints that +require annotating with string values that specifically +*can't* be evaluated. For example: + +* :pep:`604` union types using ``|``, before support for this + was added to Python 3.10. +* Definitions that aren't needed at runtime, only imported + when :const:`typing.TYPE_CHECKING` is true. + +If :func:`eval` attempts to evaluate such values, it will +fail and raise an exception. So, when designing a library +API that works with annotations, it's recommended to only +attempt to evaluate string values when explicitly requested +to by the caller. Best Practices For ``__annotations__`` In Any Python Version ============================================================ - * You should avoid assigning to the ``__annotations__`` member - of objects directly. Let Python manage setting ``__annotations__``. +* You should avoid assigning to the ``__annotations__`` member + of objects directly. Let Python manage setting ``__annotations__``. - * If you do assign directly to the ``__annotations__`` member - of an object, you should always set it to a ``dict`` object. +* If you do assign directly to the ``__annotations__`` member + of an object, you should always set it to a ``dict`` object. - * If you directly access the ``__annotations__`` member - of an object, you should ensure that it's a - dictionary before attempting to examine its contents. +* If you directly access the ``__annotations__`` member + of an object, you should ensure that it's a + dictionary before attempting to examine its contents. - * You should avoid modifying ``__annotations__`` dicts. +* You should avoid modifying ``__annotations__`` dicts. - * You should avoid deleting the ``__annotations__`` attribute - of an object. +* You should avoid deleting the ``__annotations__`` attribute + of an object. ``__annotations__`` Quirks ========================== - In all versions of Python 3, function - objects lazy-create an annotations dict if no annotations - are defined on that object. You can delete the ``__annotations__`` - attribute using ``del fn.__annotations__``, but if you then - access ``fn.__annotations__`` the object will create a new empty dict - that it will store and return as its annotations. Deleting the - annotations on a function before it has lazily created its annotations - dict will throw an ``AttributeError``; using ``del fn.__annotations__`` - twice in a row is guaranteed to always throw an ``AttributeError``. - - Everything in the above paragraph also applies to class and module - objects in Python 3.10 and newer. - - In all versions of Python 3, you can set ``__annotations__`` - on a function object to ``None``. However, subsequently - accessing the annotations on that object using ``fn.__annotations__`` - will lazy-create an empty dictionary as per the first paragraph of - this section. This is *not* true of modules and classes, in any Python - version; those objects permit setting ``__annotations__`` to any - Python value, and will retain whatever value is set. - - If Python stringizes your annotations for you - (using ``from __future__ import annotations``), and you - specify a string as an annotation, the string will - itself be quoted. In effect the annotation is quoted - *twice.* For example:: - - from __future__ import annotations - def foo(a: "str"): pass - - print(foo.__annotations__) - - This prints ``{'a': "'str'"}``. This shouldn't really be considered - a "quirk"; it's mentioned here simply because it might be surprising. +In all versions of Python 3, function +objects lazy-create an annotations dict if no annotations +are defined on that object. You can delete the ``__annotations__`` +attribute using ``del fn.__annotations__``, but if you then +access ``fn.__annotations__`` the object will create a new empty dict +that it will store and return as its annotations. Deleting the +annotations on a function before it has lazily created its annotations +dict will throw an ``AttributeError``; using ``del fn.__annotations__`` +twice in a row is guaranteed to always throw an ``AttributeError``. + +Everything in the above paragraph also applies to class and module +objects in Python 3.10 and newer. + +In all versions of Python 3, you can set ``__annotations__`` +on a function object to ``None``. However, subsequently +accessing the annotations on that object using ``fn.__annotations__`` +will lazy-create an empty dictionary as per the first paragraph of +this section. This is *not* true of modules and classes, in any Python +version; those objects permit setting ``__annotations__`` to any +Python value, and will retain whatever value is set. + +If Python stringizes your annotations for you +(using ``from __future__ import annotations``), and you +specify a string as an annotation, the string will +itself be quoted. In effect the annotation is quoted +*twice.* For example:: + + from __future__ import annotations + def foo(a: "str"): pass + + print(foo.__annotations__) + +This prints ``{'a': "'str'"}``. This shouldn't really be considered +a "quirk"; it's mentioned here simply because it might be surprising. diff --git a/Doc/howto/argparse.rst b/Doc/howto/argparse.rst index f4d08e75d946422..ae5bab90bf81315 100644 --- a/Doc/howto/argparse.rst +++ b/Doc/howto/argparse.rst @@ -1,10 +1,12 @@ +.. _argparse-tutorial: + ***************** Argparse Tutorial ***************** :author: Tshepang Mbambo -.. _argparse-tutorial: +.. currentmodule:: argparse This tutorial is intended to be a gentle introduction to :mod:`argparse`, the recommended command-line parsing module in the Python standard library. @@ -12,7 +14,7 @@ recommended command-line parsing module in the Python standard library. .. note:: There are two other modules that fulfill the same task, namely - :mod:`getopt` (an equivalent for :c:func:`getopt` from the C + :mod:`getopt` (an equivalent for ``getopt()`` from the C language) and the deprecated :mod:`optparse`. Note also that :mod:`argparse` is based on :mod:`optparse`, and therefore very similar in terms of usage. @@ -79,16 +81,16 @@ Following is a result of running the code: .. code-block:: shell-session - $ python3 prog.py - $ python3 prog.py --help + $ python prog.py + $ python prog.py --help usage: prog.py [-h] options: -h, --help show this help message and exit - $ python3 prog.py --verbose + $ python prog.py --verbose usage: prog.py [-h] prog.py: error: unrecognized arguments: --verbose - $ python3 prog.py foo + $ python prog.py foo usage: prog.py [-h] prog.py: error: unrecognized arguments: foo @@ -121,10 +123,10 @@ And running the code: .. code-block:: shell-session - $ python3 prog.py + $ python prog.py usage: prog.py [-h] echo prog.py: error: the following arguments are required: echo - $ python3 prog.py --help + $ python prog.py --help usage: prog.py [-h] echo positional arguments: @@ -132,18 +134,18 @@ And running the code: options: -h, --help show this help message and exit - $ python3 prog.py foo + $ python prog.py foo foo Here is what's happening: -* We've added the :meth:`add_argument` method, which is what we use to specify +* We've added the :meth:`~ArgumentParser.add_argument` method, which is what we use to specify which command-line options the program is willing to accept. In this case, I've named it ``echo`` so that it's in line with its function. * Calling our program now requires us to specify an option. -* The :meth:`parse_args` method actually returns some data from the +* The :meth:`~ArgumentParser.parse_args` method actually returns some data from the options specified, in this case, ``echo``. * The variable is some form of 'magic' that :mod:`argparse` performs for free @@ -166,7 +168,7 @@ And we get: .. code-block:: shell-session - $ python3 prog.py -h + $ python prog.py -h usage: prog.py [-h] echo positional arguments: @@ -187,7 +189,7 @@ Following is a result of running the code: .. code-block:: shell-session - $ python3 prog.py 4 + $ python prog.py 4 Traceback (most recent call last): File "prog.py", line 5, in <module> print(args.square**2) @@ -208,9 +210,9 @@ Following is a result of running the code: .. code-block:: shell-session - $ python3 prog.py 4 + $ python prog.py 4 16 - $ python3 prog.py four + $ python prog.py four usage: prog.py [-h] square prog.py: error: argument square: invalid int value: 'four' @@ -235,17 +237,17 @@ And the output: .. code-block:: shell-session - $ python3 prog.py --verbosity 1 + $ python prog.py --verbosity 1 verbosity turned on - $ python3 prog.py - $ python3 prog.py --help + $ python prog.py + $ python prog.py --help usage: prog.py [-h] [--verbosity VERBOSITY] options: -h, --help show this help message and exit --verbosity VERBOSITY increase output verbosity - $ python3 prog.py --verbosity + $ python prog.py --verbosity usage: prog.py [-h] [--verbosity VERBOSITY] prog.py: error: argument --verbosity: expected one argument @@ -256,7 +258,7 @@ Here is what is happening: * To show that the option is actually optional, there is no error when running the program without it. Note that by default, if an optional argument isn't - used, the relevant variable, in this case :attr:`args.verbosity`, is + used, the relevant variable, in this case ``args.verbosity``, is given ``None`` as a value, which is the reason it fails the truth test of the :keyword:`if` statement. @@ -281,12 +283,12 @@ And the output: .. code-block:: shell-session - $ python3 prog.py --verbose + $ python prog.py --verbose verbosity turned on - $ python3 prog.py --verbose 1 + $ python prog.py --verbose 1 usage: prog.py [-h] [--verbose] prog.py: error: unrecognized arguments: 1 - $ python3 prog.py --help + $ python prog.py --help usage: prog.py [-h] [--verbose] options: @@ -299,7 +301,7 @@ Here is what is happening: We even changed the name of the option to match that idea. Note that we now specify a new keyword, ``action``, and give it the value ``"store_true"``. This means that, if the option is specified, - assign the value ``True`` to :data:`args.verbose`. + assign the value ``True`` to ``args.verbose``. Not specifying it implies ``False``. * It complains when you specify a value, in true spirit of what flags @@ -327,9 +329,9 @@ And here goes: .. code-block:: shell-session - $ python3 prog.py -v + $ python prog.py -v verbosity turned on - $ python3 prog.py --help + $ python prog.py --help usage: prog.py [-h] [-v] options: @@ -361,14 +363,14 @@ And now the output: .. code-block:: shell-session - $ python3 prog.py + $ python prog.py usage: prog.py [-h] [-v] square prog.py: error: the following arguments are required: square - $ python3 prog.py 4 + $ python prog.py 4 16 - $ python3 prog.py 4 --verbose + $ python prog.py 4 --verbose the square of 4 equals 16 - $ python3 prog.py --verbose 4 + $ python prog.py --verbose 4 the square of 4 equals 16 * We've brought back a positional argument, hence the complaint. @@ -397,16 +399,16 @@ And the output: .. code-block:: shell-session - $ python3 prog.py 4 + $ python prog.py 4 16 - $ python3 prog.py 4 -v + $ python prog.py 4 -v usage: prog.py [-h] [-v VERBOSITY] square prog.py: error: argument -v/--verbosity: expected one argument - $ python3 prog.py 4 -v 1 + $ python prog.py 4 -v 1 4^2 == 16 - $ python3 prog.py 4 -v 2 + $ python prog.py 4 -v 2 the square of 4 equals 16 - $ python3 prog.py 4 -v 3 + $ python prog.py 4 -v 3 16 These all look good except the last one, which exposes a bug in our program. @@ -431,10 +433,10 @@ And the output: .. code-block:: shell-session - $ python3 prog.py 4 -v 3 + $ python prog.py 4 -v 3 usage: prog.py [-h] [-v {0,1,2}] square prog.py: error: argument -v/--verbosity: invalid choice: 3 (choose from 0, 1, 2) - $ python3 prog.py 4 -h + $ python prog.py 4 -h usage: prog.py [-h] [-v {0,1,2}] square positional arguments: @@ -473,18 +475,18 @@ to count the number of occurrences of specific options. .. code-block:: shell-session - $ python3 prog.py 4 + $ python prog.py 4 16 - $ python3 prog.py 4 -v + $ python prog.py 4 -v 4^2 == 16 - $ python3 prog.py 4 -vv + $ python prog.py 4 -vv the square of 4 equals 16 - $ python3 prog.py 4 --verbosity --verbosity + $ python prog.py 4 --verbosity --verbosity the square of 4 equals 16 - $ python3 prog.py 4 -v 1 + $ python prog.py 4 -v 1 usage: prog.py [-h] [-v] square prog.py: error: unrecognized arguments: 1 - $ python3 prog.py 4 -h + $ python prog.py 4 -h usage: prog.py [-h] [-v] square positional arguments: @@ -493,7 +495,7 @@ to count the number of occurrences of specific options. options: -h, --help show this help message and exit -v, --verbosity increase output verbosity - $ python3 prog.py 4 -vvv + $ python prog.py 4 -vvv 16 * Yes, it's now more of a flag (similar to ``action="store_true"``) in the @@ -540,11 +542,11 @@ And this is what it gives: .. code-block:: shell-session - $ python3 prog.py 4 -vvv + $ python prog.py 4 -vvv the square of 4 equals 16 - $ python3 prog.py 4 -vvvv + $ python prog.py 4 -vvvv the square of 4 equals 16 - $ python3 prog.py 4 + $ python prog.py 4 Traceback (most recent call last): File "prog.py", line 11, in <module> if args.verbosity >= 2: @@ -584,7 +586,7 @@ And: .. code-block:: shell-session - $ python3 prog.py 4 + $ python prog.py 4 16 You can go quite far just with what we've learned so far, @@ -617,10 +619,10 @@ Output: .. code-block:: shell-session - $ python3 prog.py + $ python prog.py usage: prog.py [-h] [-v] x y prog.py: error: the following arguments are required: x, y - $ python3 prog.py -h + $ python prog.py -h usage: prog.py [-h] [-v] x y positional arguments: @@ -630,7 +632,7 @@ Output: options: -h, --help show this help message and exit -v, --verbosity - $ python3 prog.py 4 2 -v + $ python prog.py 4 2 -v 4^2 == 16 @@ -655,11 +657,11 @@ Output: .. code-block:: shell-session - $ python3 prog.py 4 2 + $ python prog.py 4 2 16 - $ python3 prog.py 4 2 -v + $ python prog.py 4 2 -v 4^2 == 16 - $ python3 prog.py 4 2 -vv + $ python prog.py 4 2 -vv Running 'prog.py' 4^2 == 16 @@ -698,7 +700,7 @@ Conflicting options So far, we have been working with two methods of an :class:`argparse.ArgumentParser` instance. Let's introduce a third one, -:meth:`add_mutually_exclusive_group`. It allows for us to specify options that +:meth:`~ArgumentParser.add_mutually_exclusive_group`. It allows for us to specify options that conflict with each other. Let's also change the rest of the program so that the new functionality makes more sense: we'll introduce the ``--quiet`` option, @@ -727,16 +729,16 @@ demonstration. Anyways, here's the output: .. code-block:: shell-session - $ python3 prog.py 4 2 + $ python prog.py 4 2 4^2 == 16 - $ python3 prog.py 4 2 -q + $ python prog.py 4 2 -q 16 - $ python3 prog.py 4 2 -v + $ python prog.py 4 2 -v 4 to the power 2 equals 16 - $ python3 prog.py 4 2 -vq + $ python prog.py 4 2 -vq usage: prog.py [-h] [-v | -q] x y prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose - $ python3 prog.py 4 2 -v --quiet + $ python prog.py 4 2 -v --quiet usage: prog.py [-h] [-v | -q] x y prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose @@ -771,7 +773,34 @@ but not both at the same time: .. code-block:: shell-session - $ python3 prog.py --help + $ python prog.py --help + usage: prog.py [-h] [-v | -q] x y + + calculate X to the power of Y + + positional arguments: + x the base + y the exponent + + options: + -h, --help show this help message and exit + -v, --verbose + -q, --quiet + + +How to translate the argparse output +==================================== + +The output of the :mod:`argparse` module such as its help text and error +messages are all made translatable using the :mod:`gettext` module. This +allows applications to easily localize messages produced by +:mod:`argparse`. See also :ref:`i18n-howto`. + +For instance, in this :mod:`argparse` output: + +.. code-block:: shell-session + + $ python prog.py --help usage: prog.py [-h] [-v | -q] x y calculate X to the power of Y @@ -785,6 +814,32 @@ but not both at the same time: -v, --verbose -q, --quiet +The strings ``usage:``, ``positional arguments:``, ``options:`` and +``show this help message and exit`` are all translatable. + +In order to translate these strings, they must first be extracted +into a ``.po`` file. For example, using `Babel <https://babel.pocoo.org/>`__, +run this command: + +.. code-block:: shell-session + + $ pybabel extract -o messages.po /usr/lib/python3.12/argparse.py + +This command will extract all translatable strings from the :mod:`argparse` +module and output them into a file named ``messages.po``. This command assumes +that your Python installation is in ``/usr/lib``. + +You can find out the location of the :mod:`argparse` module on your system +using this script:: + + import argparse + print(argparse.__file__) + +Once the messages in the ``.po`` file are translated and the translations are +installed using :mod:`gettext`, :mod:`argparse` will be able to display the +translated messages. + +To translate your own strings in the :mod:`argparse` output, use :mod:`gettext`. Conclusion ========== diff --git a/Doc/howto/clinic.rst b/Doc/howto/clinic.rst index a97f1d23f53f318..060977246149cfa 100644 --- a/Doc/howto/clinic.rst +++ b/Doc/howto/clinic.rst @@ -1,1804 +1,14 @@ -.. highlight:: c +:orphan: -.. _howto-clinic: +.. This page is retained solely for existing links to /howto/clinic.html. + Direct readers to the devguide. ********************** Argument Clinic How-To ********************** -:author: Larry Hastings +.. note:: -.. topic:: Abstract - - Argument Clinic is a preprocessor for CPython C files. - Its purpose is to automate all the boilerplate involved - with writing argument parsing code for "builtins". - This document shows you how to convert your first C - function to work with Argument Clinic, and then introduces - some advanced topics on Argument Clinic usage. - - Currently Argument Clinic is considered internal-only - for CPython. Its use is not supported for files outside - CPython, and no guarantees are made regarding backwards - compatibility for future versions. In other words: if you - maintain an external C extension for CPython, you're welcome - to experiment with Argument Clinic in your own code. But the - version of Argument Clinic that ships with the next version - of CPython *could* be totally incompatible and break all your code. - -The Goals Of Argument Clinic -============================ - -Argument Clinic's primary goal -is to take over responsibility for all argument parsing code -inside CPython. This means that, when you convert a function -to work with Argument Clinic, that function should no longer -do any of its own argument parsing—the code generated by -Argument Clinic should be a "black box" to you, where CPython -calls in at the top, and your code gets called at the bottom, -with ``PyObject *args`` (and maybe ``PyObject *kwargs``) -magically converted into the C variables and types you need. - -In order for Argument Clinic to accomplish its primary goal, -it must be easy to use. Currently, working with CPython's -argument parsing library is a chore, requiring maintaining -redundant information in a surprising number of places. -When you use Argument Clinic, you don't have to repeat yourself. - -Obviously, no one would want to use Argument Clinic unless -it's solving their problem—and without creating new problems of -its own. -So it's paramount that Argument Clinic generate correct code. -It'd be nice if the code was faster, too, but at the very least -it should not introduce a major speed regression. (Eventually Argument -Clinic *should* make a major speedup possible—we could -rewrite its code generator to produce tailor-made argument -parsing code, rather than calling the general-purpose CPython -argument parsing library. That would make for the fastest -argument parsing possible!) - -Additionally, Argument Clinic must be flexible enough to -work with any approach to argument parsing. Python has -some functions with some very strange parsing behaviors; -Argument Clinic's goal is to support all of them. - -Finally, the original motivation for Argument Clinic was -to provide introspection "signatures" for CPython builtins. -It used to be, the introspection query functions would throw -an exception if you passed in a builtin. With Argument -Clinic, that's a thing of the past! - -One idea you should keep in mind, as you work with -Argument Clinic: the more information you give it, the -better job it'll be able to do. -Argument Clinic is admittedly relatively simple right -now. But as it evolves it will get more sophisticated, -and it should be able to do many interesting and smart -things with all the information you give it. - - -Basic Concepts And Usage -======================== - -Argument Clinic ships with CPython; you'll find it in ``Tools/clinic/clinic.py``. -If you run that script, specifying a C file as an argument: - -.. code-block:: shell-session - - $ python3 Tools/clinic/clinic.py foo.c - -Argument Clinic will scan over the file looking for lines that -look exactly like this: - -.. code-block:: none - - /*[clinic input] - -When it finds one, it reads everything up to a line that looks -exactly like this: - -.. code-block:: none - - [clinic start generated code]*/ - -Everything in between these two lines is input for Argument Clinic. -All of these lines, including the beginning and ending comment -lines, are collectively called an Argument Clinic "block". - -When Argument Clinic parses one of these blocks, it -generates output. This output is rewritten into the C file -immediately after the block, followed by a comment containing a checksum. -The Argument Clinic block now looks like this: - -.. code-block:: none - - /*[clinic input] - ... clinic input goes here ... - [clinic start generated code]*/ - ... clinic output goes here ... - /*[clinic end generated code: checksum=...]*/ - -If you run Argument Clinic on the same file a second time, Argument Clinic -will discard the old output and write out the new output with a fresh checksum -line. However, if the input hasn't changed, the output won't change either. - -You should never modify the output portion of an Argument Clinic block. Instead, -change the input until it produces the output you want. (That's the purpose of the -checksum—to detect if someone changed the output, as these edits would be lost -the next time Argument Clinic writes out fresh output.) - -For the sake of clarity, here's the terminology we'll use with Argument Clinic: - -* The first line of the comment (``/*[clinic input]``) is the *start line*. -* The last line of the initial comment (``[clinic start generated code]*/``) is the *end line*. -* The last line (``/*[clinic end generated code: checksum=...]*/``) is the *checksum line*. -* In between the start line and the end line is the *input*. -* In between the end line and the checksum line is the *output*. -* All the text collectively, from the start line to the checksum line inclusively, - is the *block*. (A block that hasn't been successfully processed by Argument - Clinic yet doesn't have output or a checksum line, but it's still considered - a block.) - - -Converting Your First Function -============================== - -The best way to get a sense of how Argument Clinic works is to -convert a function to work with it. Here, then, are the bare -minimum steps you'd need to follow to convert a function to -work with Argument Clinic. Note that for code you plan to -check in to CPython, you really should take the conversion farther, -using some of the advanced concepts you'll see later on in -the document (like "return converters" and "self converters"). -But we'll keep it simple for this walkthrough so you can learn. - -Let's dive in! - -0. Make sure you're working with a freshly updated checkout - of the CPython trunk. - -1. Find a Python builtin that calls either :c:func:`PyArg_ParseTuple` - or :c:func:`PyArg_ParseTupleAndKeywords`, and hasn't been converted - to work with Argument Clinic yet. - For my example I'm using ``_pickle.Pickler.dump()``. - -2. If the call to the ``PyArg_Parse`` function uses any of the - following format units: - - .. code-block:: none - - O& - O! - es - es# - et - et# - - or if it has multiple calls to :c:func:`PyArg_ParseTuple`, - you should choose a different function. Argument Clinic *does* - support all of these scenarios. But these are advanced - topics—let's do something simpler for your first function. - - Also, if the function has multiple calls to :c:func:`PyArg_ParseTuple` - or :c:func:`PyArg_ParseTupleAndKeywords` where it supports different - types for the same argument, or if the function uses something besides - PyArg_Parse functions to parse its arguments, it probably - isn't suitable for conversion to Argument Clinic. Argument Clinic - doesn't support generic functions or polymorphic parameters. - -3. Add the following boilerplate above the function, creating our block:: - - /*[clinic input] - [clinic start generated code]*/ - -4. Cut the docstring and paste it in between the ``[clinic]`` lines, - removing all the junk that makes it a properly quoted C string. - When you're done you should have just the text, based at the left - margin, with no line wider than 80 characters. - (Argument Clinic will preserve indents inside the docstring.) - - If the old docstring had a first line that looked like a function - signature, throw that line away. (The docstring doesn't need it - anymore—when you use ``help()`` on your builtin in the future, - the first line will be built automatically based on the function's - signature.) - - Sample:: - - /*[clinic input] - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -5. If your docstring doesn't have a "summary" line, Argument Clinic will - complain. So let's make sure it has one. The "summary" line should - be a paragraph consisting of a single 80-column line - at the beginning of the docstring. - - (Our example docstring consists solely of a summary line, so the sample - code doesn't have to change for this step.) - -6. Above the docstring, enter the name of the function, followed - by a blank line. This should be the Python name of the function, - and should be the full dotted path - to the function—it should start with the name of the module, - include any sub-modules, and if the function is a method on - a class it should include the class name too. - - Sample:: - - /*[clinic input] - _pickle.Pickler.dump - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -7. If this is the first time that module or class has been used with Argument - Clinic in this C file, - you must declare the module and/or class. Proper Argument Clinic hygiene - prefers declaring these in a separate block somewhere near the - top of the C file, in the same way that include files and statics go at - the top. (In our sample code we'll just show the two blocks next to - each other.) - - The name of the class and module should be the same as the one - seen by Python. Check the name defined in the :c:type:`PyModuleDef` - or :c:type:`PyTypeObject` as appropriate. - - When you declare a class, you must also specify two aspects of its type - in C: the type declaration you'd use for a pointer to an instance of - this class, and a pointer to the :c:type:`PyTypeObject` for this class. - - Sample:: - - /*[clinic input] - module _pickle - class _pickle.Pickler "PicklerObject *" "&Pickler_Type" - [clinic start generated code]*/ - - /*[clinic input] - _pickle.Pickler.dump - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - - - - -8. Declare each of the parameters to the function. Each parameter - should get its own line. All the parameter lines should be - indented from the function name and the docstring. - - The general form of these parameter lines is as follows: - - .. code-block:: none - - name_of_parameter: converter - - If the parameter has a default value, add that after the - converter: - - .. code-block:: none - - name_of_parameter: converter = default_value - - Argument Clinic's support for "default values" is quite sophisticated; - please see :ref:`the section below on default values <default_values>` - for more information. - - Add a blank line below the parameters. - - What's a "converter"? It establishes both the type - of the variable used in C, and the method to convert the Python - value into a C value at runtime. - For now you're going to use what's called a "legacy converter"—a - convenience syntax intended to make porting old code into Argument - Clinic easier. - - For each parameter, copy the "format unit" for that - parameter from the ``PyArg_Parse()`` format argument and - specify *that* as its converter, as a quoted - string. ("format unit" is the formal name for the one-to-three - character substring of the ``format`` parameter that tells - the argument parsing function what the type of the variable - is and how to convert it. For more on format units please - see :ref:`arg-parsing`.) - - For multicharacter format units like ``z#``, use the - entire two-or-three character string. - - Sample:: - - /*[clinic input] - module _pickle - class _pickle.Pickler "PicklerObject *" "&Pickler_Type" - [clinic start generated code]*/ - - /*[clinic input] - _pickle.Pickler.dump - - obj: 'O' - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -9. If your function has ``|`` in the format string, meaning some - parameters have default values, you can ignore it. Argument - Clinic infers which parameters are optional based on whether - or not they have default values. - - If your function has ``$`` in the format string, meaning it - takes keyword-only arguments, specify ``*`` on a line by - itself before the first keyword-only argument, indented the - same as the parameter lines. - - (``_pickle.Pickler.dump`` has neither, so our sample is unchanged.) - - -10. If the existing C function calls :c:func:`PyArg_ParseTuple` - (as opposed to :c:func:`PyArg_ParseTupleAndKeywords`), then all its - arguments are positional-only. - - To mark all parameters as positional-only in Argument Clinic, - add a ``/`` on a line by itself after the last parameter, - indented the same as the parameter lines. - - Currently this is all-or-nothing; either all parameters are - positional-only, or none of them are. (In the future Argument - Clinic may relax this restriction.) - - Sample:: - - /*[clinic input] - module _pickle - class _pickle.Pickler "PicklerObject *" "&Pickler_Type" - [clinic start generated code]*/ - - /*[clinic input] - _pickle.Pickler.dump - - obj: 'O' - / - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -11. It's helpful to write a per-parameter docstring for each parameter. - But per-parameter docstrings are optional; you can skip this step - if you prefer. - - Here's how to add a per-parameter docstring. The first line - of the per-parameter docstring must be indented further than the - parameter definition. The left margin of this first line establishes - the left margin for the whole per-parameter docstring; all the text - you write will be outdented by this amount. You can write as much - text as you like, across multiple lines if you wish. - - Sample:: - - /*[clinic input] - module _pickle - class _pickle.Pickler "PicklerObject *" "&Pickler_Type" - [clinic start generated code]*/ - - /*[clinic input] - _pickle.Pickler.dump - - obj: 'O' - The object to be pickled. - / - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -12. Save and close the file, then run ``Tools/clinic/clinic.py`` on - it. With luck everything worked---your block now has output, and - a ``.c.h`` file has been generated! Reopen the file in your - text editor to see:: - - /*[clinic input] - _pickle.Pickler.dump - - obj: 'O' - The object to be pickled. - / - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - - static PyObject * - _pickle_Pickler_dump(PicklerObject *self, PyObject *obj) - /*[clinic end generated code: output=87ecad1261e02ac7 input=552eb1c0f52260d9]*/ - - Obviously, if Argument Clinic didn't produce any output, it's because - it found an error in your input. Keep fixing your errors and retrying - until Argument Clinic processes your file without complaint. - - For readability, most of the glue code has been generated to a ``.c.h`` - file. You'll need to include that in your original ``.c`` file, - typically right after the clinic module block:: - - #include "clinic/_pickle.c.h" - -13. Double-check that the argument-parsing code Argument Clinic generated - looks basically the same as the existing code. - - First, ensure both places use the same argument-parsing function. - The existing code must call either - :c:func:`PyArg_ParseTuple` or :c:func:`PyArg_ParseTupleAndKeywords`; - ensure that the code generated by Argument Clinic calls the - *exact* same function. - - Second, the format string passed in to :c:func:`PyArg_ParseTuple` or - :c:func:`PyArg_ParseTupleAndKeywords` should be *exactly* the same - as the hand-written one in the existing function, up to the colon - or semi-colon. - - (Argument Clinic always generates its format strings - with a ``:`` followed by the name of the function. If the - existing code's format string ends with ``;``, to provide - usage help, this change is harmless—don't worry about it.) - - Third, for parameters whose format units require two arguments - (like a length variable, or an encoding string, or a pointer - to a conversion function), ensure that the second argument is - *exactly* the same between the two invocations. - - Fourth, inside the output portion of the block you'll find a preprocessor - macro defining the appropriate static :c:type:`PyMethodDef` structure for - this builtin:: - - #define __PICKLE_PICKLER_DUMP_METHODDEF \ - {"dump", (PyCFunction)__pickle_Pickler_dump, METH_O, __pickle_Pickler_dump__doc__}, - - This static structure should be *exactly* the same as the existing static - :c:type:`PyMethodDef` structure for this builtin. - - If any of these items differ in *any way*, - adjust your Argument Clinic function specification and rerun - ``Tools/clinic/clinic.py`` until they *are* the same. - - -14. Notice that the last line of its output is the declaration - of your "impl" function. This is where the builtin's implementation goes. - Delete the existing prototype of the function you're modifying, but leave - the opening curly brace. Now delete its argument parsing code and the - declarations of all the variables it dumps the arguments into. - Notice how the Python arguments are now arguments to this impl function; - if the implementation used different names for these variables, fix it. - - Let's reiterate, just because it's kind of weird. Your code should now - look like this:: - - static return_type - your_function_impl(...) - /*[clinic end generated code: checksum=...]*/ - { - ... - - Argument Clinic generated the checksum line and the function prototype just - above it. You should write the opening (and closing) curly braces for the - function, and the implementation inside. - - Sample:: - - /*[clinic input] - module _pickle - class _pickle.Pickler "PicklerObject *" "&Pickler_Type" - [clinic start generated code]*/ - /*[clinic end generated code: checksum=da39a3ee5e6b4b0d3255bfef95601890afd80709]*/ - - /*[clinic input] - _pickle.Pickler.dump - - obj: 'O' - The object to be pickled. - / - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - - PyDoc_STRVAR(__pickle_Pickler_dump__doc__, - "Write a pickled representation of obj to the open file.\n" - "\n" - ... - static PyObject * - _pickle_Pickler_dump_impl(PicklerObject *self, PyObject *obj) - /*[clinic end generated code: checksum=3bd30745bf206a48f8b576a1da3d90f55a0a4187]*/ - { - /* Check whether the Pickler was initialized correctly (issue3664). - Developers often forget to call __init__() in their subclasses, which - would trigger a segfault without this check. */ - if (self->write == NULL) { - PyErr_Format(PicklingError, - "Pickler.__init__() was not called by %s.__init__()", - Py_TYPE(self)->tp_name); - return NULL; - } - - if (_Pickler_ClearBuffer(self) < 0) - return NULL; - - ... - -15. Remember the macro with the :c:type:`PyMethodDef` structure for this - function? Find the existing :c:type:`PyMethodDef` structure for this - function and replace it with a reference to the macro. (If the builtin - is at module scope, this will probably be very near the end of the file; - if the builtin is a class method, this will probably be below but relatively - near to the implementation.) - - Note that the body of the macro contains a trailing comma. So when you - replace the existing static :c:type:`PyMethodDef` structure with the macro, - *don't* add a comma to the end. - - Sample:: - - static struct PyMethodDef Pickler_methods[] = { - __PICKLE_PICKLER_DUMP_METHODDEF - __PICKLE_PICKLER_CLEAR_MEMO_METHODDEF - {NULL, NULL} /* sentinel */ - }; - - -16. Argument Clinic may generate new instances of ``_Py_ID``. For example:: - - &_Py_ID(new_unique_py_id) - - If it does, you'll have to run ``Tools/scripts/generate_global_objects.py`` - to regenerate the list of precompiled identifiers at this point. - - -17. Compile, then run the relevant portions of the regression-test suite. - This change should not introduce any new compile-time warnings or errors, - and there should be no externally visible change to Python's behavior. - - Well, except for one difference: ``inspect.signature()`` run on your function - should now provide a valid signature! - - Congratulations, you've ported your first function to work with Argument Clinic! - -Advanced Topics -=============== - -Now that you've had some experience working with Argument Clinic, it's time -for some advanced topics. - - -Symbolic default values ------------------------ - -The default value you provide for a parameter can't be any arbitrary -expression. Currently the following are explicitly supported: - -* Numeric constants (integer and float) -* String constants -* ``True``, ``False``, and ``None`` -* Simple symbolic constants like ``sys.maxsize``, which must - start with the name of the module - -(In the future, this may need to get even more elaborate, -to allow full expressions like ``CONSTANT - 1``.) - - -Renaming the C functions and variables generated by Argument Clinic -------------------------------------------------------------------- - -Argument Clinic automatically names the functions it generates for you. -Occasionally this may cause a problem, if the generated name collides with -the name of an existing C function. There's an easy solution: override the names -used for the C functions. Just add the keyword ``"as"`` -to your function declaration line, followed by the function name you wish to use. -Argument Clinic will use that function name for the base (generated) function, -then add ``"_impl"`` to the end and use that for the name of the impl function. - -For example, if we wanted to rename the C function names generated for -``pickle.Pickler.dump``, it'd look like this:: - - /*[clinic input] - pickle.Pickler.dump as pickler_dumper - - ... - -The base function would now be named ``pickler_dumper()``, -and the impl function would now be named ``pickler_dumper_impl()``. - - -Similarly, you may have a problem where you want to give a parameter -a specific Python name, but that name may be inconvenient in C. Argument -Clinic allows you to give a parameter different names in Python and in C, -using the same ``"as"`` syntax:: - - /*[clinic input] - pickle.Pickler.dump - - obj: object - file as file_obj: object - protocol: object = NULL - * - fix_imports: bool = True - -Here, the name used in Python (in the signature and the ``keywords`` -array) would be ``file``, but the C variable would be named ``file_obj``. - -You can use this to rename the ``self`` parameter too! - - -Converting functions using PyArg_UnpackTuple --------------------------------------------- - -To convert a function parsing its arguments with :c:func:`PyArg_UnpackTuple`, -simply write out all the arguments, specifying each as an ``object``. You -may specify the ``type`` argument to cast the type as appropriate. All -arguments should be marked positional-only (add a ``/`` on a line by itself -after the last argument). - -Currently the generated code will use :c:func:`PyArg_ParseTuple`, but this -will change soon. - -Optional Groups ---------------- - -Some legacy functions have a tricky approach to parsing their arguments: -they count the number of positional arguments, then use a ``switch`` statement -to call one of several different :c:func:`PyArg_ParseTuple` calls depending on -how many positional arguments there are. (These functions cannot accept -keyword-only arguments.) This approach was used to simulate optional -arguments back before :c:func:`PyArg_ParseTupleAndKeywords` was created. - -While functions using this approach can often be converted to -use :c:func:`PyArg_ParseTupleAndKeywords`, optional arguments, and default values, -it's not always possible. Some of these legacy functions have -behaviors :c:func:`PyArg_ParseTupleAndKeywords` doesn't directly support. -The most obvious example is the builtin function ``range()``, which has -an optional argument on the *left* side of its required argument! -Another example is ``curses.window.addch()``, which has a group of two -arguments that must always be specified together. (The arguments are -called ``x`` and ``y``; if you call the function passing in ``x``, -you must also pass in ``y``—and if you don't pass in ``x`` you may not -pass in ``y`` either.) - -In any case, the goal of Argument Clinic is to support argument parsing -for all existing CPython builtins without changing their semantics. -Therefore Argument Clinic supports -this alternate approach to parsing, using what are called *optional groups*. -Optional groups are groups of arguments that must all be passed in together. -They can be to the left or the right of the required arguments. They -can *only* be used with positional-only parameters. - -.. note:: Optional groups are *only* intended for use when converting - functions that make multiple calls to :c:func:`PyArg_ParseTuple`! - Functions that use *any* other approach for parsing arguments - should *almost never* be converted to Argument Clinic using - optional groups. Functions using optional groups currently - cannot have accurate signatures in Python, because Python just - doesn't understand the concept. Please avoid using optional - groups wherever possible. - -To specify an optional group, add a ``[`` on a line by itself before -the parameters you wish to group together, and a ``]`` on a line by itself -after these parameters. As an example, here's how ``curses.window.addch`` -uses optional groups to make the first two parameters and the last -parameter optional:: - - /*[clinic input] - - curses.window.addch - - [ - x: int - X-coordinate. - y: int - Y-coordinate. - ] - - ch: object - Character to add. - - [ - attr: long - Attributes for the character. - ] - / - - ... - - -Notes: - -* For every optional group, one additional parameter will be passed into the - impl function representing the group. The parameter will be an int named - ``group_{direction}_{number}``, - where ``{direction}`` is either ``right`` or ``left`` depending on whether the group - is before or after the required parameters, and ``{number}`` is a monotonically - increasing number (starting at 1) indicating how far away the group is from - the required parameters. When the impl is called, this parameter will be set - to zero if this group was unused, and set to non-zero if this group was used. - (By used or unused, I mean whether or not the parameters received arguments - in this invocation.) - -* If there are no required arguments, the optional groups will behave - as if they're to the right of the required arguments. - -* In the case of ambiguity, the argument parsing code - favors parameters on the left (before the required parameters). - -* Optional groups can only contain positional-only parameters. - -* Optional groups are *only* intended for legacy code. Please do not - use optional groups for new code. - - -Using real Argument Clinic converters, instead of "legacy converters" ---------------------------------------------------------------------- - -To save time, and to minimize how much you need to learn -to achieve your first port to Argument Clinic, the walkthrough above tells -you to use "legacy converters". "Legacy converters" are a convenience, -designed explicitly to make porting existing code to Argument Clinic -easier. And to be clear, their use is acceptable when porting code for -Python 3.4. - -However, in the long term we probably want all our blocks to -use Argument Clinic's real syntax for converters. Why? A couple -reasons: - -* The proper converters are far easier to read and clearer in their intent. -* There are some format units that are unsupported as "legacy converters", - because they require arguments, and the legacy converter syntax doesn't - support specifying arguments. -* In the future we may have a new argument parsing library that isn't - restricted to what :c:func:`PyArg_ParseTuple` supports; this flexibility - won't be available to parameters using legacy converters. - -Therefore, if you don't mind a little extra effort, please use the normal -converters instead of legacy converters. - -In a nutshell, the syntax for Argument Clinic (non-legacy) converters -looks like a Python function call. However, if there are no explicit -arguments to the function (all functions take their default values), -you may omit the parentheses. Thus ``bool`` and ``bool()`` are exactly -the same converters. - -All arguments to Argument Clinic converters are keyword-only. -All Argument Clinic converters accept the following arguments: - - ``c_default`` - The default value for this parameter when defined in C. - Specifically, this will be the initializer for the variable declared - in the "parse function". See :ref:`the section on default values <default_values>` - for how to use this. - Specified as a string. - - ``annotation`` - The annotation value for this parameter. Not currently supported, - because :pep:`8` mandates that the Python library may not use - annotations. - -In addition, some converters accept additional arguments. Here is a list -of these arguments, along with their meanings: - - ``accept`` - A set of Python types (and possibly pseudo-types); - this restricts the allowable Python argument to values of these types. - (This is not a general-purpose facility; as a rule it only supports - specific lists of types as shown in the legacy converter table.) - - To accept ``None``, add ``NoneType`` to this set. - - ``bitwise`` - Only supported for unsigned integers. The native integer value of this - Python argument will be written to the parameter without any range checking, - even for negative values. - - ``converter`` - Only supported by the ``object`` converter. Specifies the name of a - :ref:`C "converter function" <o_ampersand>` - to use to convert this object to a native type. - - ``encoding`` - Only supported for strings. Specifies the encoding to use when converting - this string from a Python str (Unicode) value into a C ``char *`` value. - - - ``subclass_of`` - Only supported for the ``object`` converter. Requires that the Python - value be a subclass of a Python type, as expressed in C. - - ``type`` - Only supported for the ``object`` and ``self`` converters. Specifies - the C type that will be used to declare the variable. Default value is - ``"PyObject *"``. - - ``zeroes`` - Only supported for strings. If true, embedded NUL bytes (``'\\0'``) are - permitted inside the value. The length of the string will be passed in - to the impl function, just after the string parameter, as a parameter named - ``<parameter_name>_length``. - -Please note, not every possible combination of arguments will work. -Usually these arguments are implemented by specific ``PyArg_ParseTuple`` -*format units*, with specific behavior. For example, currently you cannot -call ``unsigned_short`` without also specifying ``bitwise=True``. -Although it's perfectly reasonable to think this would work, these semantics don't -map to any existing format unit. So Argument Clinic doesn't support it. (Or, at -least, not yet.) - -Below is a table showing the mapping of legacy converters into real -Argument Clinic converters. On the left is the legacy converter, -on the right is the text you'd replace it with. - -========= ================================================================================= -``'B'`` ``unsigned_char(bitwise=True)`` -``'b'`` ``unsigned_char`` -``'c'`` ``char`` -``'C'`` ``int(accept={str})`` -``'d'`` ``double`` -``'D'`` ``Py_complex`` -``'es'`` ``str(encoding='name_of_encoding')`` -``'es#'`` ``str(encoding='name_of_encoding', zeroes=True)`` -``'et'`` ``str(encoding='name_of_encoding', accept={bytes, bytearray, str})`` -``'et#'`` ``str(encoding='name_of_encoding', accept={bytes, bytearray, str}, zeroes=True)`` -``'f'`` ``float`` -``'h'`` ``short`` -``'H'`` ``unsigned_short(bitwise=True)`` -``'i'`` ``int`` -``'I'`` ``unsigned_int(bitwise=True)`` -``'k'`` ``unsigned_long(bitwise=True)`` -``'K'`` ``unsigned_long_long(bitwise=True)`` -``'l'`` ``long`` -``'L'`` ``long long`` -``'n'`` ``Py_ssize_t`` -``'O'`` ``object`` -``'O!'`` ``object(subclass_of='&PySomething_Type')`` -``'O&'`` ``object(converter='name_of_c_function')`` -``'p'`` ``bool`` -``'S'`` ``PyBytesObject`` -``'s'`` ``str`` -``'s#'`` ``str(zeroes=True)`` -``'s*'`` ``Py_buffer(accept={buffer, str})`` -``'U'`` ``unicode`` -``'u'`` ``wchar_t`` -``'u#'`` ``wchar_t(zeroes=True)`` -``'w*'`` ``Py_buffer(accept={rwbuffer})`` -``'Y'`` ``PyByteArrayObject`` -``'y'`` ``str(accept={bytes})`` -``'y#'`` ``str(accept={robuffer}, zeroes=True)`` -``'y*'`` ``Py_buffer`` -``'Z'`` ``wchar_t(accept={str, NoneType})`` -``'Z#'`` ``wchar_t(accept={str, NoneType}, zeroes=True)`` -``'z'`` ``str(accept={str, NoneType})`` -``'z#'`` ``str(accept={str, NoneType}, zeroes=True)`` -``'z*'`` ``Py_buffer(accept={buffer, str, NoneType})`` -========= ================================================================================= - -As an example, here's our sample ``pickle.Pickler.dump`` using the proper -converter:: - - /*[clinic input] - pickle.Pickler.dump - - obj: object - The object to be pickled. - / - - Write a pickled representation of obj to the open file. - [clinic start generated code]*/ - -One advantage of real converters is that they're more flexible than legacy -converters. For example, the ``unsigned_int`` converter (and all the -``unsigned_`` converters) can be specified without ``bitwise=True``. Their -default behavior performs range checking on the value, and they won't accept -negative numbers. You just can't do that with a legacy converter! - -Argument Clinic will show you all the converters it has -available. For each converter it'll show you all the parameters -it accepts, along with the default value for each parameter. -Just run ``Tools/clinic/clinic.py --converters`` to see the full list. - -Py_buffer ---------- - -When using the ``Py_buffer`` converter -(or the ``'s*'``, ``'w*'``, ``'*y'``, or ``'z*'`` legacy converters), -you *must* not call :c:func:`PyBuffer_Release` on the provided buffer. -Argument Clinic generates code that does it for you (in the parsing function). - - - -Advanced converters -------------------- - -Remember those format units you skipped for your first -time because they were advanced? Here's how to handle those too. - -The trick is, all those format units take arguments—either -conversion functions, or types, or strings specifying an encoding. -(But "legacy converters" don't support arguments. That's why we -skipped them for your first function.) The argument you specified -to the format unit is now an argument to the converter; this -argument is either ``converter`` (for ``O&``), ``subclass_of`` (for ``O!``), -or ``encoding`` (for all the format units that start with ``e``). - -When using ``subclass_of``, you may also want to use the other -custom argument for ``object()``: ``type``, which lets you set the type -actually used for the parameter. For example, if you want to ensure -that the object is a subclass of ``PyUnicode_Type``, you probably want -to use the converter ``object(type='PyUnicodeObject *', subclass_of='&PyUnicode_Type')``. - -One possible problem with using Argument Clinic: it takes away some possible -flexibility for the format units starting with ``e``. When writing a -``PyArg_Parse`` call by hand, you could theoretically decide at runtime what -encoding string to pass in to :c:func:`PyArg_ParseTuple`. But now this string must -be hard-coded at Argument-Clinic-preprocessing-time. This limitation is deliberate; -it made supporting this format unit much easier, and may allow for future optimizations. -This restriction doesn't seem unreasonable; CPython itself always passes in static -hard-coded encoding strings for parameters whose format units start with ``e``. - - -.. _default_values: - -Parameter default values ------------------------- - -Default values for parameters can be any of a number of values. -At their simplest, they can be string, int, or float literals: - -.. code-block:: none - - foo: str = "abc" - bar: int = 123 - bat: float = 45.6 - -They can also use any of Python's built-in constants: - -.. code-block:: none - - yep: bool = True - nope: bool = False - nada: object = None - -There's also special support for a default value of ``NULL``, and -for simple expressions, documented in the following sections. - - -The ``NULL`` default value --------------------------- - -For string and object parameters, you can set them to ``None`` to indicate -that there's no default. However, that means the C variable will be -initialized to ``Py_None``. For convenience's sakes, there's a special -value called ``NULL`` for just this reason: from Python's perspective it -behaves like a default value of ``None``, but the C variable is initialized -with ``NULL``. - -Expressions specified as default values ---------------------------------------- - -The default value for a parameter can be more than just a literal value. -It can be an entire expression, using math operators and looking up attributes -on objects. However, this support isn't exactly simple, because of some -non-obvious semantics. - -Consider the following example: - -.. code-block:: none - - foo: Py_ssize_t = sys.maxsize - 1 - -``sys.maxsize`` can have different values on different platforms. Therefore -Argument Clinic can't simply evaluate that expression locally and hard-code it -in C. So it stores the default in such a way that it will get evaluated at -runtime, when the user asks for the function's signature. - -What namespace is available when the expression is evaluated? It's evaluated -in the context of the module the builtin came from. So, if your module has an -attribute called "``max_widgets``", you may simply use it: - -.. code-block:: none - - foo: Py_ssize_t = max_widgets - -If the symbol isn't found in the current module, it fails over to looking in -``sys.modules``. That's how it can find ``sys.maxsize`` for example. (Since you -don't know in advance what modules the user will load into their interpreter, -it's best to restrict yourself to modules that are preloaded by Python itself.) - -Evaluating default values only at runtime means Argument Clinic can't compute -the correct equivalent C default value. So you need to tell it explicitly. -When you use an expression, you must also specify the equivalent expression -in C, using the ``c_default`` parameter to the converter: - -.. code-block:: none - - foo: Py_ssize_t(c_default="PY_SSIZE_T_MAX - 1") = sys.maxsize - 1 - -Another complication: Argument Clinic can't know in advance whether or not the -expression you supply is valid. It parses it to make sure it looks legal, but -it can't *actually* know. You must be very careful when using expressions to -specify values that are guaranteed to be valid at runtime! - -Finally, because expressions must be representable as static C values, there -are many restrictions on legal expressions. Here's a list of Python features -you're not permitted to use: - -* Function calls. -* Inline if statements (``3 if foo else 5``). -* Automatic sequence unpacking (``*[1, 2, 3]``). -* List/set/dict comprehensions and generator expressions. -* Tuple/list/set/dict literals. - - - -Using a return converter ------------------------- - -By default the impl function Argument Clinic generates for you returns ``PyObject *``. -But your C function often computes some C type, then converts it into the ``PyObject *`` -at the last moment. Argument Clinic handles converting your inputs from Python types -into native C types—why not have it convert your return value from a native C type -into a Python type too? - -That's what a "return converter" does. It changes your impl function to return -some C type, then adds code to the generated (non-impl) function to handle converting -that value into the appropriate ``PyObject *``. - -The syntax for return converters is similar to that of parameter converters. -You specify the return converter like it was a return annotation on the -function itself. Return converters behave much the same as parameter converters; -they take arguments, the arguments are all keyword-only, and if you're not changing -any of the default arguments you can omit the parentheses. - -(If you use both ``"as"`` *and* a return converter for your function, -the ``"as"`` should come before the return converter.) - -There's one additional complication when using return converters: how do you -indicate an error has occurred? Normally, a function returns a valid (non-``NULL``) -pointer for success, and ``NULL`` for failure. But if you use an integer return converter, -all integers are valid. How can Argument Clinic detect an error? Its solution: each return -converter implicitly looks for a special value that indicates an error. If you return -that value, and an error has been set (``PyErr_Occurred()`` returns a true -value), then the generated code will propagate the error. Otherwise it will -encode the value you return like normal. - -Currently Argument Clinic supports only a few return converters: - -.. code-block:: none - - bool - int - unsigned int - long - unsigned int - size_t - Py_ssize_t - float - double - DecodeFSDefault - -None of these take parameters. For the first three, return -1 to indicate -error. For ``DecodeFSDefault``, the return type is ``const char *``; return a ``NULL`` -pointer to indicate an error. - -To see all the return converters Argument Clinic supports, along with -their parameters (if any), -just run ``Tools/clinic/clinic.py --converters`` for the full list. - - -Cloning existing functions --------------------------- - -If you have a number of functions that look similar, you may be able to -use Clinic's "clone" feature. When you clone an existing function, -you reuse: - -* its parameters, including - - * their names, - - * their converters, with all parameters, - - * their default values, - - * their per-parameter docstrings, - - * their *kind* (whether they're positional only, - positional or keyword, or keyword only), and - -* its return converter. - -The only thing not copied from the original function is its docstring; -the syntax allows you to specify a new docstring. - -Here's the syntax for cloning a function:: - - /*[clinic input] - module.class.new_function [as c_basename] = module.class.existing_function - - Docstring for new_function goes here. - [clinic start generated code]*/ - -(The functions can be in different modules or classes. I wrote -``module.class`` in the sample just to illustrate that you must -use the full path to *both* functions.) - -Sorry, there's no syntax for partially cloning a function, or cloning a function -then modifying it. Cloning is an all-or nothing proposition. - -Also, the function you are cloning from must have been previously defined -in the current file. - -Calling Python code -------------------- - -The rest of the advanced topics require you to write Python code -which lives inside your C file and modifies Argument Clinic's -runtime state. This is simple: you simply define a Python block. - -A Python block uses different delimiter lines than an Argument -Clinic function block. It looks like this:: - - /*[python input] - # python code goes here - [python start generated code]*/ - -All the code inside the Python block is executed at the -time it's parsed. All text written to stdout inside the block -is redirected into the "output" after the block. - -As an example, here's a Python block that adds a static integer -variable to the C code:: - - /*[python input] - print('static int __ignored_unused_variable__ = 0;') - [python start generated code]*/ - static int __ignored_unused_variable__ = 0; - /*[python checksum:...]*/ - - -Using a "self converter" ------------------------- - -Argument Clinic automatically adds a "self" parameter for you -using a default converter. It automatically sets the ``type`` -of this parameter to the "pointer to an instance" you specified -when you declared the type. However, you can override -Argument Clinic's converter and specify one yourself. -Just add your own ``self`` parameter as the first parameter in a -block, and ensure that its converter is an instance of -``self_converter`` or a subclass thereof. - -What's the point? This lets you override the type of ``self``, -or give it a different default name. - -How do you specify the custom type you want to cast ``self`` to? -If you only have one or two functions with the same type for ``self``, -you can directly use Argument Clinic's existing ``self`` converter, -passing in the type you want to use as the ``type`` parameter:: - - /*[clinic input] - - _pickle.Pickler.dump - - self: self(type="PicklerObject *") - obj: object - / - - Write a pickled representation of the given object to the open file. - [clinic start generated code]*/ - -On the other hand, if you have a lot of functions that will use the same -type for ``self``, it's best to create your own converter, subclassing -``self_converter`` but overwriting the ``type`` member:: - - /*[python input] - class PicklerObject_converter(self_converter): - type = "PicklerObject *" - [python start generated code]*/ - - /*[clinic input] - - _pickle.Pickler.dump - - self: PicklerObject - obj: object - / - - Write a pickled representation of the given object to the open file. - [clinic start generated code]*/ - - -Using a "defining class" converter ----------------------------------- - -Argument Clinic facilitates gaining access to the defining class of a method. -This is useful for :ref:`heap type <heap-types>` methods that need to fetch -module level state. Use :c:func:`PyType_FromModuleAndSpec` to associate a new -heap type with a module. You can now use :c:func:`PyType_GetModuleState` on -the defining class to fetch the module state, for example from a module method. - -Example from ``Modules/zlibmodule.c``. First, ``defining_class`` is added to -the clinic input:: - - /*[clinic input] - zlib.Compress.compress - - cls: defining_class - data: Py_buffer - Binary data to be compressed. - / - - -After running the Argument Clinic tool, the following function signature is -generated:: - - /*[clinic start generated code]*/ - static PyObject * - zlib_Compress_compress_impl(compobject *self, PyTypeObject *cls, - Py_buffer *data) - /*[clinic end generated code: output=6731b3f0ff357ca6 input=04d00f65ab01d260]*/ - - -The following code can now use ``PyType_GetModuleState(cls)`` to fetch the -module state:: - - zlibstate *state = PyType_GetModuleState(cls); - - -Each method may only have one argument using this converter, and it must appear -after ``self``, or, if ``self`` is not used, as the first argument. The argument -will be of type ``PyTypeObject *``. The argument will not appear in the -``__text_signature__``. - -The ``defining_class`` converter is not compatible with ``__init__`` and ``__new__`` -methods, which cannot use the ``METH_METHOD`` convention. - -It is not possible to use ``defining_class`` with slot methods. In order to -fetch the module state from such methods, use :c:func:`PyType_GetModuleByDef` -to look up the module and then :c:func:`PyModule_GetState` to fetch the module -state. Example from the ``setattro`` slot method in -``Modules/_threadmodule.c``:: - - static int - local_setattro(localobject *self, PyObject *name, PyObject *v) - { - PyObject *module = PyType_GetModuleByDef(Py_TYPE(self), &thread_module); - thread_module_state *state = get_thread_state(module); - ... - } - - -See also :pep:`573`. - - -Writing a custom converter --------------------------- - -As we hinted at in the previous section... you can write your own converters! -A converter is simply a Python class that inherits from ``CConverter``. -The main purpose of a custom converter is if you have a parameter using -the ``O&`` format unit—parsing this parameter means calling -a :c:func:`PyArg_ParseTuple` "converter function". - -Your converter class should be named ``*something*_converter``. -If the name follows this convention, then your converter class -will be automatically registered with Argument Clinic; its name -will be the name of your class with the ``_converter`` suffix -stripped off. (This is accomplished with a metaclass.) - -You shouldn't subclass ``CConverter.__init__``. Instead, you should -write a ``converter_init()`` function. ``converter_init()`` -always accepts a ``self`` parameter; after that, all additional -parameters *must* be keyword-only. Any arguments passed in to -the converter in Argument Clinic will be passed along to your -``converter_init()``. - -There are some additional members of ``CConverter`` you may wish -to specify in your subclass. Here's the current list: - -``type`` - The C type to use for this variable. - ``type`` should be a Python string specifying the type, e.g. ``int``. - If this is a pointer type, the type string should end with ``' *'``. - -``default`` - The Python default value for this parameter, as a Python value. - Or the magic value ``unspecified`` if there is no default. - -``py_default`` - ``default`` as it should appear in Python code, - as a string. - Or ``None`` if there is no default. - -``c_default`` - ``default`` as it should appear in C code, - as a string. - Or ``None`` if there is no default. - -``c_ignored_default`` - The default value used to initialize the C variable when - there is no default, but not specifying a default may - result in an "uninitialized variable" warning. This can - easily happen when using option groups—although - properly written code will never actually use this value, - the variable does get passed in to the impl, and the - C compiler will complain about the "use" of the - uninitialized value. This value should always be a - non-empty string. - -``converter`` - The name of the C converter function, as a string. - -``impl_by_reference`` - A boolean value. If true, - Argument Clinic will add a ``&`` in front of the name of - the variable when passing it into the impl function. - -``parse_by_reference`` - A boolean value. If true, - Argument Clinic will add a ``&`` in front of the name of - the variable when passing it into :c:func:`PyArg_ParseTuple`. - - -Here's the simplest example of a custom converter, from ``Modules/zlibmodule.c``:: - - /*[python input] - - class ssize_t_converter(CConverter): - type = 'Py_ssize_t' - converter = 'ssize_t_converter' - - [python start generated code]*/ - /*[python end generated code: output=da39a3ee5e6b4b0d input=35521e4e733823c7]*/ - -This block adds a converter to Argument Clinic named ``ssize_t``. Parameters -declared as ``ssize_t`` will be declared as type :c:type:`Py_ssize_t`, and will -be parsed by the ``'O&'`` format unit, which will call the -``ssize_t_converter`` converter function. ``ssize_t`` variables -automatically support default values. - -More sophisticated custom converters can insert custom C code to -handle initialization and cleanup. -You can see more examples of custom converters in the CPython -source tree; grep the C files for the string ``CConverter``. - -Writing a custom return converter ---------------------------------- - -Writing a custom return converter is much like writing -a custom converter. Except it's somewhat simpler, because return -converters are themselves much simpler. - -Return converters must subclass ``CReturnConverter``. -There are no examples yet of custom return converters, -because they are not widely used yet. If you wish to -write your own return converter, please read ``Tools/clinic/clinic.py``, -specifically the implementation of ``CReturnConverter`` and -all its subclasses. - -METH_O and METH_NOARGS ----------------------------------------------- - -To convert a function using ``METH_O``, make sure the function's -single argument is using the ``object`` converter, and mark the -arguments as positional-only:: - - /*[clinic input] - meth_o_sample - - argument: object - / - [clinic start generated code]*/ - - -To convert a function using ``METH_NOARGS``, just don't specify -any arguments. - -You can still use a self converter, a return converter, and specify -a ``type`` argument to the object converter for ``METH_O``. - -tp_new and tp_init functions ----------------------------------------------- - -You can convert ``tp_new`` and ``tp_init`` functions. Just name -them ``__new__`` or ``__init__`` as appropriate. Notes: - -* The function name generated for ``__new__`` doesn't end in ``__new__`` - like it would by default. It's just the name of the class, converted - into a valid C identifier. - -* No ``PyMethodDef`` ``#define`` is generated for these functions. - -* ``__init__`` functions return ``int``, not ``PyObject *``. - -* Use the docstring as the class docstring. - -* Although ``__new__`` and ``__init__`` functions must always - accept both the ``args`` and ``kwargs`` objects, when converting - you may specify any signature for these functions that you like. - (If your function doesn't support keywords, the parsing function - generated will throw an exception if it receives any.) - -Changing and redirecting Clinic's output ----------------------------------------- - -It can be inconvenient to have Clinic's output interspersed with -your conventional hand-edited C code. Luckily, Clinic is configurable: -you can buffer up its output for printing later (or earlier!), or write -its output to a separate file. You can also add a prefix or suffix to -every line of Clinic's generated output. - -While changing Clinic's output in this manner can be a boon to readability, -it may result in Clinic code using types before they are defined, or -your code attempting to use Clinic-generated code before it is defined. -These problems can be easily solved by rearranging the declarations in your file, -or moving where Clinic's generated code goes. (This is why the default behavior -of Clinic is to output everything into the current block; while many people -consider this hampers readability, it will never require rearranging your -code to fix definition-before-use problems.) - -Let's start with defining some terminology: - -*field* - A field, in this context, is a subsection of Clinic's output. - For example, the ``#define`` for the ``PyMethodDef`` structure - is a field, called ``methoddef_define``. Clinic has seven - different fields it can output per function definition: - - .. code-block:: none - - docstring_prototype - docstring_definition - methoddef_define - impl_prototype - parser_prototype - parser_definition - impl_definition - - All the names are of the form ``"<a>_<b>"``, - where ``"<a>"`` is the semantic object represented (the parsing function, - the impl function, the docstring, or the methoddef structure) and ``"<b>"`` - represents what kind of statement the field is. Field names that end in - ``"_prototype"`` - represent forward declarations of that thing, without the actual body/data - of the thing; field names that end in ``"_definition"`` represent the actual - definition of the thing, with the body/data of the thing. (``"methoddef"`` - is special, it's the only one that ends with ``"_define"``, representing that - it's a preprocessor #define.) - -*destination* - A destination is a place Clinic can write output to. There are - five built-in destinations: - - ``block`` - The default destination: printed in the output section of - the current Clinic block. - - ``buffer`` - A text buffer where you can save text for later. Text sent - here is appended to the end of any existing text. It's an - error to have any text left in the buffer when Clinic finishes - processing a file. - - ``file`` - A separate "clinic file" that will be created automatically by Clinic. - The filename chosen for the file is ``{basename}.clinic{extension}``, - where ``basename`` and ``extension`` were assigned the output - from ``os.path.splitext()`` run on the current file. (Example: - the ``file`` destination for ``_pickle.c`` would be written to - ``_pickle.clinic.c``.) - - **Important: When using a** ``file`` **destination, you** - *must check in* **the generated file!** - - ``two-pass`` - A buffer like ``buffer``. However, a two-pass buffer can only - be dumped once, and it prints out all text sent to it during - all processing, even from Clinic blocks *after* the dumping point. - - ``suppress`` - The text is suppressed—thrown away. - - -Clinic defines five new directives that let you reconfigure its output. - -The first new directive is ``dump``: - -.. code-block:: none - - dump <destination> - -This dumps the current contents of the named destination into the output of -the current block, and empties it. This only works with ``buffer`` and -``two-pass`` destinations. - -The second new directive is ``output``. The most basic form of ``output`` -is like this: - -.. code-block:: none - - output <field> <destination> - -This tells Clinic to output *field* to *destination*. ``output`` also -supports a special meta-destination, called ``everything``, which tells -Clinic to output *all* fields to that *destination*. - -``output`` has a number of other functions: - -.. code-block:: none - - output push - output pop - output preset <preset> - - -``output push`` and ``output pop`` allow you to push and pop -configurations on an internal configuration stack, so that you -can temporarily modify the output configuration, then easily restore -the previous configuration. Simply push before your change to save -the current configuration, then pop when you wish to restore the -previous configuration. - -``output preset`` sets Clinic's output to one of several built-in -preset configurations, as follows: - - ``block`` - Clinic's original starting configuration. Writes everything - immediately after the input block. - - Suppress the ``parser_prototype`` - and ``docstring_prototype``, write everything else to ``block``. - - ``file`` - Designed to write everything to the "clinic file" that it can. - You then ``#include`` this file near the top of your file. - You may need to rearrange your file to make this work, though - usually this just means creating forward declarations for various - ``typedef`` and ``PyTypeObject`` definitions. - - Suppress the ``parser_prototype`` - and ``docstring_prototype``, write the ``impl_definition`` to - ``block``, and write everything else to ``file``. - - The default filename is ``"{dirname}/clinic/{basename}.h"``. - - ``buffer`` - Save up most of the output from Clinic, to be written into - your file near the end. For Python files implementing modules - or builtin types, it's recommended that you dump the buffer - just above the static structures for your module or - builtin type; these are normally very near the end. Using - ``buffer`` may require even more editing than ``file``, if - your file has static ``PyMethodDef`` arrays defined in the - middle of the file. - - Suppress the ``parser_prototype``, ``impl_prototype``, - and ``docstring_prototype``, write the ``impl_definition`` to - ``block``, and write everything else to ``file``. - - ``two-pass`` - Similar to the ``buffer`` preset, but writes forward declarations to - the ``two-pass`` buffer, and definitions to the ``buffer``. - This is similar to the ``buffer`` preset, but may require - less editing than ``buffer``. Dump the ``two-pass`` buffer - near the top of your file, and dump the ``buffer`` near - the end just like you would when using the ``buffer`` preset. - - Suppresses the ``impl_prototype``, write the ``impl_definition`` - to ``block``, write ``docstring_prototype``, ``methoddef_define``, - and ``parser_prototype`` to ``two-pass``, write everything else - to ``buffer``. - - ``partial-buffer`` - Similar to the ``buffer`` preset, but writes more things to ``block``, - only writing the really big chunks of generated code to ``buffer``. - This avoids the definition-before-use problem of ``buffer`` completely, - at the small cost of having slightly more stuff in the block's output. - Dump the ``buffer`` near the end, just like you would when using - the ``buffer`` preset. - - Suppresses the ``impl_prototype``, write the ``docstring_definition`` - and ``parser_definition`` to ``buffer``, write everything else to ``block``. - -The third new directive is ``destination``: - -.. code-block:: none - - destination <name> <command> [...] - -This performs an operation on the destination named ``name``. - -There are two defined subcommands: ``new`` and ``clear``. - -The ``new`` subcommand works like this: - -.. code-block:: none - - destination <name> new <type> - -This creates a new destination with name ``<name>`` and type ``<type>``. - -There are five destination types: - - ``suppress`` - Throws the text away. - - ``block`` - Writes the text to the current block. This is what Clinic - originally did. - - ``buffer`` - A simple text buffer, like the "buffer" builtin destination above. - - ``file`` - A text file. The file destination takes an extra argument, - a template to use for building the filename, like so: - - destination <name> new <type> <file_template> - - The template can use three strings internally that will be replaced - by bits of the filename: - - {path} - The full path to the file, including directory and full filename. - {dirname} - The name of the directory the file is in. - {basename} - Just the name of the file, not including the directory. - {basename_root} - Basename with the extension clipped off - (everything up to but not including the last '.'). - {basename_extension} - The last '.' and everything after it. If the basename - does not contain a period, this will be the empty string. - - If there are no periods in the filename, {basename} and {filename} - are the same, and {extension} is empty. "{basename}{extension}" - is always exactly the same as "{filename}"." - - ``two-pass`` - A two-pass buffer, like the "two-pass" builtin destination above. - - -The ``clear`` subcommand works like this: - -.. code-block:: none - - destination <name> clear - -It removes all the accumulated text up to this point in the destination. -(I don't know what you'd need this for, but I thought maybe it'd be -useful while someone's experimenting.) - -The fourth new directive is ``set``: - -.. code-block:: none - - set line_prefix "string" - set line_suffix "string" - -``set`` lets you set two internal variables in Clinic. -``line_prefix`` is a string that will be prepended to every line of Clinic's output; -``line_suffix`` is a string that will be appended to every line of Clinic's output. - -Both of these support two format strings: - - ``{block comment start}`` - Turns into the string ``/*``, the start-comment text sequence for C files. - - ``{block comment end}`` - Turns into the string ``*/``, the end-comment text sequence for C files. - -The final new directive is one you shouldn't need to use directly, -called ``preserve``: - -.. code-block:: none - - preserve - -This tells Clinic that the current contents of the output should be kept, unmodified. -This is used internally by Clinic when dumping output into ``file`` files; wrapping -it in a Clinic block lets Clinic use its existing checksum functionality to ensure -the file was not modified by hand before it gets overwritten. - - -The #ifdef trick ----------------------------------------------- - -If you're converting a function that isn't available on all platforms, -there's a trick you can use to make life a little easier. The existing -code probably looks like this:: - - #ifdef HAVE_FUNCTIONNAME - static module_functionname(...) - { - ... - } - #endif /* HAVE_FUNCTIONNAME */ - -And then in the ``PyMethodDef`` structure at the bottom the existing code -will have: - -.. code-block:: none - - #ifdef HAVE_FUNCTIONNAME - {'functionname', ... }, - #endif /* HAVE_FUNCTIONNAME */ - -In this scenario, you should enclose the body of your impl function inside the ``#ifdef``, -like so:: - - #ifdef HAVE_FUNCTIONNAME - /*[clinic input] - module.functionname - ... - [clinic start generated code]*/ - static module_functionname(...) - { - ... - } - #endif /* HAVE_FUNCTIONNAME */ - -Then, remove those three lines from the ``PyMethodDef`` structure, -replacing them with the macro Argument Clinic generated: - -.. code-block:: none - - MODULE_FUNCTIONNAME_METHODDEF - -(You can find the real name for this macro inside the generated code. -Or you can calculate it yourself: it's the name of your function as defined -on the first line of your block, but with periods changed to underscores, -uppercased, and ``"_METHODDEF"`` added to the end.) - -Perhaps you're wondering: what if ``HAVE_FUNCTIONNAME`` isn't defined? -The ``MODULE_FUNCTIONNAME_METHODDEF`` macro won't be defined either! - -Here's where Argument Clinic gets very clever. It actually detects that the -Argument Clinic block might be deactivated by the ``#ifdef``. When that -happens, it generates a little extra code that looks like this:: - - #ifndef MODULE_FUNCTIONNAME_METHODDEF - #define MODULE_FUNCTIONNAME_METHODDEF - #endif /* !defined(MODULE_FUNCTIONNAME_METHODDEF) */ - -That means the macro always works. If the function is defined, this turns -into the correct structure, including the trailing comma. If the function is -undefined, this turns into nothing. - -However, this causes one ticklish problem: where should Argument Clinic put this -extra code when using the "block" output preset? It can't go in the output block, -because that could be deactivated by the ``#ifdef``. (That's the whole point!) - -In this situation, Argument Clinic writes the extra code to the "buffer" destination. -This may mean that you get a complaint from Argument Clinic: - -.. code-block:: none - - Warning in file "Modules/posixmodule.c" on line 12357: - Destination buffer 'buffer' not empty at end of file, emptying. - -When this happens, just open your file, find the ``dump buffer`` block that -Argument Clinic added to your file (it'll be at the very bottom), then -move it above the ``PyMethodDef`` structure where that macro is used. - - - -Using Argument Clinic in Python files -------------------------------------- - -It's actually possible to use Argument Clinic to preprocess Python files. -There's no point to using Argument Clinic blocks, of course, as the output -wouldn't make any sense to the Python interpreter. But using Argument Clinic -to run Python blocks lets you use Python as a Python preprocessor! - -Since Python comments are different from C comments, Argument Clinic -blocks embedded in Python files look slightly different. They look like this: - -.. code-block:: python3 - - #/*[python input] - #print("def foo(): pass") - #[python start generated code]*/ - def foo(): pass - #/*[python checksum:...]*/ + The Argument Clinic How-TO has been moved to the `Python Developer's Guide + <https://devguide.python.org/development-tools/clinic/>`__. diff --git a/Doc/howto/curses.rst b/Doc/howto/curses.rst index 83d80471ffc8ee0..4828e2fa29bd247 100644 --- a/Doc/howto/curses.rst +++ b/Doc/howto/curses.rst @@ -4,6 +4,8 @@ Curses Programming with Python ********************************** +.. currentmodule:: curses + :Author: A.M. Kuchling, Eric S. Raymond :Release: 2.04 @@ -65,7 +67,7 @@ The Python module is a fairly simple wrapper over the C functions provided by curses; if you're already familiar with curses programming in C, it's really easy to transfer that knowledge to Python. The biggest difference is that the Python interface makes things simpler by merging different C functions such as -:c:func:`addstr`, :c:func:`mvaddstr`, and :c:func:`mvwaddstr` into a single +:c:func:`!addstr`, :c:func:`!mvaddstr`, and :c:func:`!mvwaddstr` into a single :meth:`~curses.window.addstr` method. You'll see this covered in more detail later. @@ -82,7 +84,7 @@ Before doing anything, curses must be initialized. This is done by calling the :func:`~curses.initscr` function, which will determine the terminal type, send any required setup codes to the terminal, and create various internal data structures. If successful, -:func:`initscr` returns a window object representing the entire +:func:`!initscr` returns a window object representing the entire screen; this is usually called ``stdscr`` after the name of the corresponding C variable. :: @@ -151,8 +153,8 @@ importing the :func:`curses.wrapper` function and using it like this:: The :func:`~curses.wrapper` function takes a callable object and does the initializations described above, also initializing colors if color -support is present. :func:`wrapper` then runs your provided callable. -Once the callable returns, :func:`wrapper` will restore the original +support is present. :func:`!wrapper` then runs your provided callable. +Once the callable returns, :func:`!wrapper` will restore the original state of the terminal. The callable is called inside a :keyword:`try`...\ :keyword:`except` that catches exceptions, restores the state of the terminal, and then re-raises the exception. Therefore @@ -200,7 +202,7 @@ This is because curses was originally written with slow 300-baud terminal connections in mind; with these terminals, minimizing the time required to redraw the screen was very important. Instead curses accumulates changes to the screen and displays them in the most -efficient manner when you call :meth:`refresh`. For example, if your +efficient manner when you call :meth:`!refresh`. For example, if your program displays some text in a window and then clears the window, there's no need to send the original text because they're never visible. @@ -210,7 +212,7 @@ really complicate programming with curses much. Most programs go into a flurry of activity, and then pause waiting for a keypress or some other action on the part of the user. All you have to do is to be sure that the screen has been redrawn before pausing to wait for user input, by first calling -``stdscr.refresh()`` or the :meth:`refresh` method of some other relevant +:meth:`!stdscr.refresh` or the :meth:`!refresh` method of some other relevant window. A pad is a special case of a window; it can be larger than the actual display @@ -234,7 +236,7 @@ displayed. :: # : filled with pad content. pad.refresh( 0,0, 5,5, 20,75) -The :meth:`refresh` call displays a section of the pad in the rectangle +The :meth:`!refresh` call displays a section of the pad in the rectangle extending from coordinate (5,5) to coordinate (20,75) on the screen; the upper left corner of the displayed section is coordinate (0,0) on the pad. Beyond that difference, pads are exactly like ordinary windows and support the same @@ -242,7 +244,7 @@ methods. If you have multiple windows and pads on screen there is a more efficient way to update the screen and prevent annoying screen flicker -as each part of the screen gets updated. :meth:`refresh` actually +as each part of the screen gets updated. :meth:`!refresh` actually does two things: 1) Calls the :meth:`~curses.window.noutrefresh` method of each window @@ -251,8 +253,8 @@ does two things: 2) Calls the function :func:`~curses.doupdate` function to change the physical screen to match the desired state recorded in the data structure. -Instead you can call :meth:`noutrefresh` on a number of windows to -update the data structure, and then call :func:`doupdate` to update +Instead you can call :meth:`!noutrefresh` on a number of windows to +update the data structure, and then call :func:`!doupdate` to update the screen. @@ -261,11 +263,11 @@ Displaying Text From a C programmer's point of view, curses may sometimes look like a twisty maze of functions, all subtly different. For example, -:c:func:`addstr` displays a string at the current cursor location in -the ``stdscr`` window, while :c:func:`mvaddstr` moves to a given y,x -coordinate first before displaying the string. :c:func:`waddstr` is just -like :c:func:`addstr`, but allows specifying a window to use instead of -using ``stdscr`` by default. :c:func:`mvwaddstr` allows specifying both +:c:func:`!addstr` displays a string at the current cursor location in +the ``stdscr`` window, while :c:func:`!mvaddstr` moves to a given y,x +coordinate first before displaying the string. :c:func:`!waddstr` is just +like :c:func:`!addstr`, but allows specifying a window to use instead of +using ``stdscr`` by default. :c:func:`!mvwaddstr` allows specifying both a window and a coordinate. Fortunately the Python interface hides all these details. ``stdscr`` @@ -298,7 +300,7 @@ the next subsection. The :meth:`~curses.window.addstr` method takes a Python string or bytestring as the value to be displayed. The contents of bytestrings are sent to the terminal as-is. Strings are encoded to bytes using -the value of the window's :attr:`encoding` attribute; this defaults to +the value of the window's :attr:`~window.encoding` attribute; this defaults to the default system encoding as returned by :func:`locale.getencoding`. The :meth:`~curses.window.addch` methods take a character, which can be @@ -444,15 +446,15 @@ There are two methods for getting input from a window: It's possible to not wait for the user using the :meth:`~curses.window.nodelay` window method. After ``nodelay(True)``, -:meth:`getch` and :meth:`getkey` for the window become -non-blocking. To signal that no input is ready, :meth:`getch` returns -``curses.ERR`` (a value of -1) and :meth:`getkey` raises an exception. +:meth:`!getch` and :meth:`!getkey` for the window become +non-blocking. To signal that no input is ready, :meth:`!getch` returns +``curses.ERR`` (a value of -1) and :meth:`!getkey` raises an exception. There's also a :func:`~curses.halfdelay` function, which can be used to (in -effect) set a timer on each :meth:`getch`; if no input becomes +effect) set a timer on each :meth:`!getch`; if no input becomes available within a specified delay (measured in tenths of a second), curses raises an exception. -The :meth:`getch` method returns an integer; if it's between 0 and 255, it +The :meth:`!getch` method returns an integer; if it's between 0 and 255, it represents the ASCII code of the key pressed. Values greater than 255 are special keys such as Page Up, Home, or the cursor keys. You can compare the value returned to constants such as :const:`curses.KEY_PPAGE`, @@ -525,7 +527,7 @@ If you're in doubt about the detailed behavior of the curses functions, consult the manual pages for your curses implementation, whether it's ncurses or a proprietary Unix vendor's. The manual pages will document any quirks, and provide complete lists of all the -functions, attributes, and :const:`ACS_\*` characters available to +functions, attributes, and :ref:`ACS_\* <curses-acs-codes>` characters available to you. Because the curses API is so large, some functions aren't supported in diff --git a/Doc/howto/descriptor.rst b/Doc/howto/descriptor.rst index 74710d9b3fc2edc..f732aaea729d401 100644 --- a/Doc/howto/descriptor.rst +++ b/Doc/howto/descriptor.rst @@ -521,11 +521,11 @@ everyday Python programs. Descriptor protocol ------------------- -``descr.__get__(self, obj, type=None) -> value`` +``descr.__get__(self, obj, type=None)`` -``descr.__set__(self, obj, value) -> None`` +``descr.__set__(self, obj, value)`` -``descr.__delete__(self, obj) -> None`` +``descr.__delete__(self, obj)`` That is all there is to it. Define any of these methods and an object is considered a descriptor and can override default behavior upon being looked up @@ -779,8 +779,8 @@ by a search through the class's :term:`method resolution order`. If a descriptor is found, it is invoked with ``desc.__get__(None, A)``. -The full C implementation can be found in :c:func:`type_getattro()` and -:c:func:`_PyType_Lookup()` in :source:`Objects/typeobject.c`. +The full C implementation can be found in :c:func:`!type_getattro` and +:c:func:`!_PyType_Lookup` in :source:`Objects/typeobject.c`. Invocation from super @@ -794,7 +794,7 @@ for the base class ``B`` immediately following ``A`` and then returns ``B.__dict__['m'].__get__(obj, A)``. If not a descriptor, ``m`` is returned unchanged. -The full C implementation can be found in :c:func:`super_getattro()` in +The full C implementation can be found in :c:func:`!super_getattro` in :source:`Objects/typeobject.c`. A pure Python equivalent can be found in `Guido's Tutorial <https://www.python.org/download/releases/2.2.3/descrintro/#cooperation>`_. @@ -836,8 +836,8 @@ and if they define :meth:`__set_name__`, that method is called with two arguments. The *owner* is the class where the descriptor is used, and the *name* is the class variable the descriptor was assigned to. -The implementation details are in :c:func:`type_new()` and -:c:func:`set_names()` in :source:`Objects/typeobject.c`. +The implementation details are in :c:func:`!type_new` and +:c:func:`!set_names` in :source:`Objects/typeobject.c`. Since the update logic is in :meth:`type.__new__`, notifications only take place at the time of class creation. If descriptors are added to the class @@ -943,6 +943,10 @@ it can be updated: >>> Movie('Star Wars').director 'J.J. Abrams' +.. testcleanup:: + + conn.close() + Pure Python Equivalents ^^^^^^^^^^^^^^^^^^^^^^^ @@ -1009,17 +1013,23 @@ here is a pure Python equivalent: if obj is None: return self if self.fget is None: - raise AttributeError(f"property '{self._name}' has no getter") + raise AttributeError( + f'property {self._name!r} of {type(obj).__name__!r} object has no getter' + ) return self.fget(obj) def __set__(self, obj, value): if self.fset is None: - raise AttributeError(f"property '{self._name}' has no setter") + raise AttributeError( + f'property {self._name!r} of {type(obj).__name__!r} object has no setter' + ) self.fset(obj, value) def __delete__(self, obj): if self.fdel is None: - raise AttributeError(f"property '{self._name}' has no deleter") + raise AttributeError( + f'property {self._name!r} of {type(obj).__name__!r} object has no deleter' + ) self.fdel(obj) def getter(self, fget): @@ -1050,6 +1060,11 @@ here is a pure Python equivalent: def delx(self): del self.__x x = Property(getx, setx, delx, "I'm the 'x' property.") + no_getter = Property(None, setx, delx, "I'm the 'x' property.") + no_setter = Property(getx, None, delx, "I'm the 'x' property.") + no_deleter = Property(getx, setx, None, "I'm the 'x' property.") + no_doc = Property(getx, setx, delx, None) + # Now do it again but use the decorator style @@ -1088,6 +1103,32 @@ here is a pure Python equivalent: >>> hasattr(ccc, 'x') False + >>> cc = CC() + >>> cc.x = 33 + >>> try: + ... cc.no_getter + ... except AttributeError as e: + ... e.args[0] + ... + "property 'no_getter' of 'CC' object has no getter" + + >>> try: + ... cc.no_setter = 33 + ... except AttributeError as e: + ... e.args[0] + ... + "property 'no_setter' of 'CC' object has no setter" + + >>> try: + ... del cc.no_deleter + ... except AttributeError as e: + ... e.args[0] + ... + "property 'no_deleter' of 'CC' object has no deleter" + + >>> CC.no_doc.__doc__ is None + True + The :func:`property` builtin helps whenever a user interface has granted attribute access and then subsequent changes require the intervention of a method. @@ -1141,6 +1182,16 @@ roughly equivalent to: obj = self.__self__ return func(obj, *args, **kwargs) + def __getattribute__(self, name): + "Emulate method_getset() in Objects/classobject.c" + if name == '__doc__': + return self.__func__.__doc__ + return object.__getattribute__(self, name) + + def __getattr__(self, name): + "Emulate method_getattro() in Objects/classobject.c" + return getattr(self.__func__, name) + To support automatic creation of methods, functions include the :meth:`__get__` method for binding methods during attribute access. This means that functions are non-data descriptors that return bound methods @@ -1273,11 +1324,14 @@ Using the non-data descriptor protocol, a pure Python version of .. testcode:: + import functools + class StaticMethod: "Emulate PyStaticMethod_Type() in Objects/funcobject.c" def __init__(self, f): self.f = f + functools.update_wrapper(self, f) def __get__(self, obj, objtype=None): return self.f @@ -1285,13 +1339,19 @@ Using the non-data descriptor protocol, a pure Python version of def __call__(self, *args, **kwds): return self.f(*args, **kwds) +The :func:`functools.update_wrapper` call adds a ``__wrapped__`` attribute +that refers to the underlying function. Also it carries forward +the attributes necessary to make the wrapper look like the wrapped +function: ``__name__``, ``__qualname__``, ``__doc__``, and ``__annotations__``. + .. testcode:: :hide: class E_sim: @StaticMethod - def f(x): - return x * 10 + def f(x: int) -> str: + "Simple function example" + return "!" * x wrapped_ord = StaticMethod(ord) @@ -1299,11 +1359,51 @@ Using the non-data descriptor protocol, a pure Python version of :hide: >>> E_sim.f(3) - 30 + '!!!' >>> E_sim().f(3) - 30 + '!!!' + + >>> sm = vars(E_sim)['f'] + >>> type(sm).__name__ + 'StaticMethod' + >>> f = E_sim.f + >>> type(f).__name__ + 'function' + >>> sm.__name__ + 'f' + >>> f.__name__ + 'f' + >>> sm.__qualname__ + 'E_sim.f' + >>> f.__qualname__ + 'E_sim.f' + >>> sm.__doc__ + 'Simple function example' + >>> f.__doc__ + 'Simple function example' + >>> sm.__annotations__ + {'x': <class 'int'>, 'return': <class 'str'>} + >>> f.__annotations__ + {'x': <class 'int'>, 'return': <class 'str'>} + >>> sm.__module__ == f.__module__ + True + >>> sm(3) + '!!!' + >>> f(3) + '!!!' + >>> wrapped_ord('A') 65 + >>> wrapped_ord.__module__ == ord.__module__ + True + >>> wrapped_ord.__wrapped__ == ord + True + >>> wrapped_ord.__name__ == ord.__name__ + True + >>> wrapped_ord.__qualname__ == ord.__qualname__ + True + >>> wrapped_ord.__doc__ == ord.__doc__ + True Class methods @@ -1359,19 +1459,18 @@ Using the non-data descriptor protocol, a pure Python version of .. testcode:: + import functools + class ClassMethod: "Emulate PyClassMethod_Type() in Objects/funcobject.c" def __init__(self, f): self.f = f + functools.update_wrapper(self, f) def __get__(self, obj, cls=None): if cls is None: cls = type(obj) - if hasattr(type(self.f), '__get__'): - # This code path was added in Python 3.9 - # and was deprecated in Python 3.11. - return self.f.__get__(cls, cls) return MethodType(self.f, cls) .. testcode:: @@ -1380,48 +1479,51 @@ Using the non-data descriptor protocol, a pure Python version of # Verify the emulation works class T: @ClassMethod - def cm(cls, x, y): - return (cls, x, y) - - @ClassMethod - @property - def __doc__(cls): - return f'A doc for {cls.__name__!r}' + def cm(cls, x: int, y: str) -> tuple[str, int, str]: + "Class method that returns a tuple" + return (cls.__name__, x, y) .. doctest:: :hide: >>> T.cm(11, 22) - (<class 'T'>, 11, 22) + ('T', 11, 22) # Also call it from an instance >>> t = T() >>> t.cm(11, 22) - (<class 'T'>, 11, 22) - - # Check the alternate path for chained descriptors - >>> T.__doc__ - "A doc for 'T'" - - -The code path for ``hasattr(type(self.f), '__get__')`` was added in -Python 3.9 and makes it possible for :func:`classmethod` to support -chained decorators. For example, a classmethod and property could be -chained together. In Python 3.11, this functionality was deprecated. - -.. testcode:: - - class G: - @classmethod - @property - def __doc__(cls): - return f'A doc for {cls.__name__!r}' - -.. doctest:: - - >>> G.__doc__ - "A doc for 'G'" + ('T', 11, 22) + + # Verify that T uses our emulation + >>> type(vars(T)['cm']).__name__ + 'ClassMethod' + + # Verify that update_wrapper() correctly copied attributes + >>> T.cm.__name__ + 'cm' + >>> T.cm.__qualname__ + 'T.cm' + >>> T.cm.__doc__ + 'Class method that returns a tuple' + >>> T.cm.__annotations__ + {'x': <class 'int'>, 'y': <class 'str'>, 'return': tuple[str, int, str]} + + # Verify that __wrapped__ was added and works correctly + >>> f = vars(T)['cm'].__wrapped__ + >>> type(f).__name__ + 'function' + >>> f.__name__ + 'cm' + >>> f(T, 11, 22) + ('T', 11, 22) + + +The :func:`functools.update_wrapper` call in ``ClassMethod`` adds a +``__wrapped__`` attribute that refers to the underlying function. Also +it carries forward the attributes necessary to make the wrapper look +like the wrapped function: ``__name__``, ``__qualname__``, ``__doc__``, +and ``__annotations__``. Member objects and __slots__ diff --git a/Doc/howto/enum.rst b/Doc/howto/enum.rst index 03f05657997cfda..a136c76303c8ef7 100644 --- a/Doc/howto/enum.rst +++ b/Doc/howto/enum.rst @@ -36,8 +36,10 @@ inherits from :class:`Enum` itself. .. note:: Case of Enum Members - Because Enums are used to represent constants we recommend using - UPPER_CASE names for members, and will be using that style in our examples. + Because Enums are used to represent constants, and to help avoid issues + with name clashes between mixin-class methods/attributes and enum names, + we strongly recommend using UPPER_CASE names for members, and will be using + that style in our examples. Depending on the nature of the enum a member's value may or may not be important, but either way that value can be used to get the corresponding @@ -158,6 +160,7 @@ And a function to display the chores for a given day:: ... for chore, days in chores.items(): ... if day in days: ... print(chore) + ... >>> show_chores(chores_for_ethan, Weekday.SATURDAY) answer SO questions @@ -173,6 +176,7 @@ yourself some work and use :func:`auto()` for the values:: ... FRIDAY = auto() ... SATURDAY = auto() ... SUNDAY = auto() + ... WEEKEND = SATURDAY | SUNDAY .. _enum-advanced-tutorial: @@ -282,6 +286,7 @@ The values are chosen by :func:`_generate_next_value_`, which can be overridden:: >>> class AutoName(Enum): + ... @staticmethod ... def _generate_next_value_(name, start, count, last_values): ... return name ... @@ -305,6 +310,10 @@ Iterating over the members of an enum does not provide the aliases:: >>> list(Shape) [<Shape.SQUARE: 2>, <Shape.DIAMOND: 1>, <Shape.CIRCLE: 3>] + >>> list(Weekday) + [<Weekday.MONDAY: 1>, <Weekday.TUESDAY: 2>, <Weekday.WEDNESDAY: 4>, <Weekday.THURSDAY: 8>, <Weekday.FRIDAY: 16>, <Weekday.SATURDAY: 32>, <Weekday.SUNDAY: 64>] + +Note that the aliases ``Shape.ALIAS_FOR_SQUARE`` and ``Weekday.WEEKEND`` aren't shown. The special attribute ``__members__`` is a read-only ordered mapping of names to members. It includes all names defined in the enumeration, including the @@ -324,6 +333,11 @@ the enumeration members. For example, finding all the aliases:: >>> [name for name, member in Shape.__members__.items() if member.name != name] ['ALIAS_FOR_SQUARE'] +.. note:: + + Aliases for flags include values with multiple flags set, such as ``3``, + and no flags set, i.e. ``0``. + Comparisons ----------- @@ -361,6 +375,11 @@ below):: >>> Color.BLUE == 2 False +.. warning:: + + It is possible to reload modules -- if a reloaded module contains + enums, they will be recreated, and the new members may not + compare identical/equal to the original members. Allowed members and attributes of enumerations ---------------------------------------------- @@ -407,10 +426,17 @@ enumeration, with the exception of special methods (:meth:`__str__`, :meth:`__add__`, etc.), descriptors (methods are also descriptors), and variable names listed in :attr:`_ignore_`. -Note: if your enumeration defines :meth:`__new__` and/or :meth:`__init__` then +Note: if your enumeration defines :meth:`__new__` and/or :meth:`__init__`, any value(s) given to the enum member will be passed into those methods. See `Planet`_ for an example. +.. note:: + + The :meth:`__new__` method, if defined, is used during creation of the Enum + members; it is then replaced by Enum's :meth:`__new__` which is used after + class creation for lookup of existing members. See :ref:`new-vs-init` for + more details. + Restricted Enum subclassing --------------------------- @@ -449,6 +475,36 @@ sense to allow sharing some common behavior between a group of enumerations. (See `OrderedEnum`_ for an example.) +.. _enum-dataclass-support: + +Dataclass support +----------------- + +When inheriting from a :class:`~dataclasses.dataclass`, +the :meth:`~Enum.__repr__` omits the inherited class' name. For example:: + + >>> from dataclasses import dataclass, field + >>> @dataclass + ... class CreatureDataMixin: + ... size: str + ... legs: int + ... tail: bool = field(repr=False, default=True) + ... + >>> class Creature(CreatureDataMixin, Enum): + ... BEETLE = 'small', 6 + ... DOG = 'medium', 4 + ... + >>> Creature.DOG + <Creature.DOG: size='medium', legs=4> + +Use the :func:`!dataclass` argument ``repr=False`` +to use the standard :func:`repr`. + +.. versionchanged:: 3.12 + Only the dataclass fields are shown in the value area, not the dataclass' + name. + + Pickling -------- @@ -469,7 +525,17 @@ from that module. nested in other classes. It is possible to modify how enum members are pickled/unpickled by defining -:meth:`__reduce_ex__` in the enumeration class. +:meth:`__reduce_ex__` in the enumeration class. The default method is by-value, +but enums with complicated values may want to use by-name:: + + >>> import enum + >>> class MyEnum(enum.Enum): + ... __reduce_ex__ = enum.pickle_by_enum_name + +.. note:: + + Using by-name for flags is not recommended, as unnamed aliases will + not unpickle. Functional API @@ -541,9 +607,9 @@ The complete signature is:: start=1, ) -:value: What the new enum class will record as its name. +* *value*: What the new enum class will record as its name. -:names: The enum members. This can be a whitespace- or comma-separated string +* *names*: The enum members. This can be a whitespace- or comma-separated string (values will start at 1 unless otherwise specified):: 'RED GREEN BLUE' | 'RED,GREEN,BLUE' | 'RED, GREEN, BLUE' @@ -560,13 +626,13 @@ The complete signature is:: {'CHARTREUSE': 7, 'SEA_GREEN': 11, 'ROSEMARY': 42} -:module: name of module where new enum class can be found. +* *module*: name of module where new enum class can be found. -:qualname: where in module new enum class can be found. +* *qualname*: where in module new enum class can be found. -:type: type to mix in to new enum class. +* *type*: type to mix in to new enum class. -:start: number to start counting at if only names are passed in. +* *start*: number to start counting at if only names are passed in. .. versionchanged:: 3.5 The *start* parameter was added. @@ -677,6 +743,7 @@ It is also possible to name the combinations:: ... W = 2 ... X = 1 ... RWX = 7 + ... >>> Perm.RWX <Perm.RWX: 7> >>> ~Perm.RWX @@ -705,7 +772,7 @@ be combined with them (but may lose :class:`IntFlag` membership:: >>> Perm.X | 4 <Perm.R|X: 5> - >>> Perm.X | 8 + >>> Perm.X + 8 9 .. note:: @@ -751,7 +818,7 @@ flags being set, the boolean evaluation is :data:`False`:: False Individual flags should have values that are powers of two (1, 2, 4, 8, ...), -while combinations of flags won't:: +while combinations of flags will not:: >>> class Color(Flag): ... RED = auto() @@ -822,19 +889,23 @@ Some rules: 4. When another data type is mixed in, the :attr:`value` attribute is *not the same* as the enum member itself, although it is equivalent and will compare equal. -5. %-style formatting: ``%s`` and ``%r`` call the :class:`Enum` class's +5. A ``data type`` is a mixin that defines :meth:`__new__`, or a + :class:`~dataclasses.dataclass` +6. %-style formatting: ``%s`` and ``%r`` call the :class:`Enum` class's :meth:`__str__` and :meth:`__repr__` respectively; other codes (such as ``%i`` or ``%h`` for IntEnum) treat the enum member as its mixed-in type. -6. :ref:`Formatted string literals <f-strings>`, :meth:`str.format`, +7. :ref:`Formatted string literals <f-strings>`, :meth:`str.format`, and :func:`format` will use the enum's :meth:`__str__` method. .. note:: Because :class:`IntEnum`, :class:`IntFlag`, and :class:`StrEnum` are designed to be drop-in replacements for existing constants, their - :meth:`__str__` method has been reset to their data types + :meth:`__str__` method has been reset to their data types' :meth:`__str__` method. +.. _new-vs-init: + When to use :meth:`__new__` vs. :meth:`__init__` ------------------------------------------------ @@ -867,6 +938,11 @@ want one of them to be the value:: >>> print(Coordinate(3)) Coordinate.VY +.. warning:: + + *Do not* call ``super().__new__()``, as the lookup-only ``__new__`` is the one + that is found; instead, use the data type directly. + Finer Points ^^^^^^^^^^^^ @@ -945,12 +1021,13 @@ but remain normal attributes. """""""""""""""""""" Enum members are instances of their enum class, and are normally accessed as -``EnumClass.member``. In Python versions starting with ``3.5`` you could access -members from other members -- this practice is discouraged, is deprecated -in ``3.12``, and will be removed in ``3.14``. +``EnumClass.member``. In certain situations, such as writing custom enum +behavior, being able to access one member directly from another is useful, +and is supported; however, in order to avoid name clashes between member names +and attributes/methods from mixed-in classes, upper-case names are strongly +recommended. .. versionchanged:: 3.5 -.. versionchanged:: 3.12 Creating members that are mixed with other data types @@ -1081,13 +1158,14 @@ the following are true: There is a new boundary mechanism that controls how out-of-range / invalid bits are handled: ``STRICT``, ``CONFORM``, ``EJECT``, and ``KEEP``: - * STRICT --> raises an exception when presented with invalid values - * CONFORM --> discards any invalid bits - * EJECT --> lose Flag status and become a normal int with the given value - * KEEP --> keep the extra bits - - keeps Flag status and extra bits - - extra bits do not show up in iteration - - extra bits do show up in repr() and str() +* STRICT --> raises an exception when presented with invalid values +* CONFORM --> discards any invalid bits +* EJECT --> lose Flag status and become a normal int with the given value +* KEEP --> keep the extra bits + + - keeps Flag status and extra bits + - extra bits do not show up in iteration + - extra bits do show up in repr() and str() The default for Flag is ``STRICT``, the default for ``IntFlag`` is ``EJECT``, and the default for ``_convert_`` is ``KEEP`` (see ``ssl.Options`` for an @@ -1096,8 +1174,8 @@ example of when ``KEEP`` is needed). .. _enum-class-differences: -How are Enums different? ------------------------- +How are Enums and Flags different? +---------------------------------- Enums have a custom metaclass that affects many aspects of both derived :class:`Enum` classes and their instances (members). @@ -1114,6 +1192,13 @@ responsible for ensuring that various other methods on the final :class:`Enum` class are correct (such as :meth:`__new__`, :meth:`__getnewargs__`, :meth:`__str__` and :meth:`__repr__`). +Flag Classes +^^^^^^^^^^^^ + +Flags have an expanded view of aliasing: to be canonical, the value of a flag +needs to be a power-of-two value, and not a duplicate name. So, in addition to the +:class:`Enum` definition of alias, a flag with no value (a.k.a. ``0``) or with more than one +power-of-two value (e.g. ``3``) is considered an alias. Enum Members (aka instances) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1123,9 +1208,35 @@ The most interesting thing about enum members is that they are singletons. and then puts a custom :meth:`__new__` in place to ensure that no new ones are ever instantiated by returning only the existing member instances. +Flag Members +^^^^^^^^^^^^ + +Flag members can be iterated over just like the :class:`Flag` class, and only the +canonical members will be returned. For example:: + + >>> list(Color) + [<Color.RED: 1>, <Color.GREEN: 2>, <Color.BLUE: 4>] + +(Note that ``BLACK``, ``PURPLE``, and ``WHITE`` do not show up.) + +Inverting a flag member returns the corresponding positive value, +rather than a negative value --- for example:: + + >>> ~Color.RED + <Color.GREEN|BLUE: 6> + +Flag members have a length corresponding to the number of power-of-two values +they contain. For example:: + + >>> len(Color.PURPLE) + 2 + .. _enum-cookbook: +Enum Cookbook +------------- + While :class:`Enum`, :class:`IntEnum`, :class:`StrEnum`, :class:`Flag`, and :class:`IntFlag` are expected to cover the majority of use-cases, they cannot @@ -1259,6 +1370,13 @@ to handle any extra arguments:: members; it is then replaced by Enum's :meth:`__new__` which is used after class creation for lookup of existing members. +.. warning:: + + *Do not* call ``super().__new__()``, as the lookup-only ``__new__`` is the one + that is found; instead, use the data type directly -- e.g.:: + + obj = int.__new__(cls, value) + OrderedEnum ^^^^^^^^^^^ @@ -1299,7 +1417,7 @@ enumerations):: DuplicateFreeEnum ^^^^^^^^^^^^^^^^^ -Raises an error if a duplicate member name is found instead of creating an +Raises an error if a duplicate member value is found instead of creating an alias:: >>> class DuplicateFreeEnum(Enum): @@ -1319,8 +1437,9 @@ alias:: ... GRENE = 2 ... Traceback (most recent call last): - ... + ... ValueError: aliases not allowed in DuplicateFreeEnum: 'GRENE' --> 'GREEN' + Error calling __set_name__ on '_proto_member' instance 'GRENE' in 'Color' .. note:: diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst index 38a651b0f964a6b..b0f9d22d74f0e31 100644 --- a/Doc/howto/functional.rst +++ b/Doc/howto/functional.rst @@ -1072,8 +1072,8 @@ write the obvious :keyword:`for` loop:: A related function is :func:`itertools.accumulate(iterable, func=operator.add) <itertools.accumulate>`. It performs the same calculation, but instead of -returning only the final result, :func:`accumulate` returns an iterator that -also yields each partial result:: +returning only the final result, :func:`~itertools.accumulate` returns an iterator +that also yields each partial result:: itertools.accumulate([1, 2, 3, 4, 5]) => 1, 3, 6, 10, 15 @@ -1208,8 +1208,8 @@ General ------- **Structure and Interpretation of Computer Programs**, by Harold Abelson and -Gerald Jay Sussman with Julie Sussman. Full text at -https://mitpress.mit.edu/sicp/. In this classic textbook of computer science, +Gerald Jay Sussman with Julie Sussman. The book can be found at +https://mitpress.mit.edu/sicp. In this classic textbook of computer science, chapters 2 and 3 discuss the use of sequences and streams to organize the data flow inside a program. The book uses Scheme for its examples, but many of the design approaches described in these chapters are applicable to functional-style diff --git a/Doc/howto/index.rst b/Doc/howto/index.rst index f521276a5a83c54..a835bb5f13bd1c9 100644 --- a/Doc/howto/index.rst +++ b/Doc/howto/index.rst @@ -28,9 +28,9 @@ Currently, the HOWTOs are: urllib2.rst argparse.rst ipaddress.rst - clinic.rst instrumentation.rst perf_profiling.rst annotations.rst isolating-extensions.rst + timerfd.rst diff --git a/Doc/howto/instrumentation.rst b/Doc/howto/instrumentation.rst index 4ce15c69dac90be..9c99fcecce1fcbd 100644 --- a/Doc/howto/instrumentation.rst +++ b/Doc/howto/instrumentation.rst @@ -13,9 +13,9 @@ DTrace and SystemTap are monitoring tools, each providing a way to inspect what the processes on a computer system are doing. They both use domain-specific languages allowing a user to write scripts which: - - filter which processes are to be observed - - gather data from the processes of interest - - generate reports on the data +- filter which processes are to be observed +- gather data from the processes of interest +- generate reports on the data As of Python 3.6, CPython can be built with embedded "markers", also known as "probes", that can be observed by a DTrace or SystemTap script, @@ -246,11 +246,9 @@ The output looks like this: where the columns are: - - time in microseconds since start of script - - - name of executable - - - PID of process +- time in microseconds since start of script +- name of executable +- PID of process and the remainder indicates the call/return hierarchy as the script executes. @@ -292,11 +290,11 @@ Available static markers .. object:: function__return(str filename, str funcname, int lineno) - This marker is the converse of :c:func:`function__entry`, and indicates that + This marker is the converse of :c:func:`!function__entry`, and indicates that execution of a Python function has ended (either via ``return``, or via an exception). It is only triggered for pure-Python (bytecode) functions. - The arguments are the same as for :c:func:`function__entry` + The arguments are the same as for :c:func:`!function__entry` .. object:: line(str filename, str funcname, int lineno) @@ -304,7 +302,7 @@ Available static markers the equivalent of line-by-line tracing with a Python profiler. It is not triggered within C functions. - The arguments are the same as for :c:func:`function__entry`. + The arguments are the same as for :c:func:`!function__entry`. .. object:: gc__start(int generation) diff --git a/Doc/howto/isolating-extensions.rst b/Doc/howto/isolating-extensions.rst index 2eddb582da7c248..835c0afad7c6c82 100644 --- a/Doc/howto/isolating-extensions.rst +++ b/Doc/howto/isolating-extensions.rst @@ -1,5 +1,7 @@ .. highlight:: c +.. _isolating-extensions-howto: + *************************** Isolating Extension Modules *************************** @@ -62,7 +64,7 @@ Enter Per-Module State Instead of focusing on per-interpreter state, Python's C API is evolving to better support the more granular *per-module* state. -This means that C-level data is be attached to a *module object*. +This means that C-level data should be attached to a *module object*. Each interpreter creates its own module object, keeping the data separate. For testing the isolation, multiple module objects corresponding to a single extension can even be loaded in a single interpreter. @@ -298,10 +300,10 @@ Watch out for the following two points in particular (but note that this is not a comprehensive list): * Unlike static types, heap type objects are mutable by default. - Use the :c:data:`Py_TPFLAGS_IMMUTABLETYPE` flag to prevent mutability. + Use the :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` flag to prevent mutability. * Heap types inherit :c:member:`~PyTypeObject.tp_new` by default, so it may become possible to instantiate them from Python code. - You can prevent this with the :c:data:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag. + You can prevent this with the :c:macro:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag. Defining Heap Types @@ -333,16 +335,48 @@ To avoid memory leaks, instances of heap types must implement the garbage collection protocol. That is, heap types should: -- Have the :c:data:`Py_TPFLAGS_HAVE_GC` flag. +- Have the :c:macro:`Py_TPFLAGS_HAVE_GC` flag. - Define a traverse function using ``Py_tp_traverse``, which visits the type (e.g. using :c:expr:`Py_VISIT(Py_TYPE(self))`). -Please refer to the :ref:`the documentation <type-structs>` of -:c:data:`Py_TPFLAGS_HAVE_GC` and :c:member:`~PyTypeObject.tp_traverse` +Please refer to the the documentation of +:c:macro:`Py_TPFLAGS_HAVE_GC` and :c:member:`~PyTypeObject.tp_traverse` for additional considerations. -If your traverse function delegates to the ``tp_traverse`` of its base class -(or another type), ensure that ``Py_TYPE(self)`` is visited only once. +The API for defining heap types grew organically, leaving it +somewhat awkward to use in its current state. +The following sections will guide you through common issues. + + +``tp_traverse`` in Python 3.8 and lower +....................................... + +The requirement to visit the type from ``tp_traverse`` was added in Python 3.9. +If you support Python 3.8 and lower, the traverse function must *not* +visit the type, so it must be more complicated:: + + static int my_traverse(PyObject *self, visitproc visit, void *arg) + { + if (Py_Version >= 0x03090000) { + Py_VISIT(Py_TYPE(self)); + } + return 0; + } + +Unfortunately, :c:data:`Py_Version` was only added in Python 3.11. +As a replacement, use: + +* :c:macro:`PY_VERSION_HEX`, if not using the stable ABI, or +* :py:data:`sys.version_info` (via :c:func:`PySys_GetObject` and + :c:func:`PyArg_ParseTuple`). + + +Delegating ``tp_traverse`` +.......................... + +If your traverse function delegates to the :c:member:`~PyTypeObject.tp_traverse` +of its base class (or another type), ensure that ``Py_TYPE(self)`` is visited +only once. Note that only heap type are expected to visit the type in ``tp_traverse``. For example, if your traverse function includes:: @@ -354,11 +388,70 @@ For example, if your traverse function includes:: if (base->tp_flags & Py_TPFLAGS_HEAPTYPE) { // a heap type's tp_traverse already visited Py_TYPE(self) } else { - Py_VISIT(Py_TYPE(self)); + if (Py_Version >= 0x03090000) { + Py_VISIT(Py_TYPE(self)); + } } -It is not necessary to handle the type's reference count in ``tp_new`` -and ``tp_clear``. +It is not necessary to handle the type's reference count in +:c:member:`~PyTypeObject.tp_new` and :c:member:`~PyTypeObject.tp_clear`. + + +Defining ``tp_dealloc`` +....................... + +If your type has a custom :c:member:`~PyTypeObject.tp_dealloc` function, +it needs to: + +- call :c:func:`PyObject_GC_UnTrack` before any fields are invalidated, and +- decrement the reference count of the type. + +To keep the type valid while ``tp_free`` is called, the type's refcount needs +to be decremented *after* the instance is deallocated. For example:: + + static void my_dealloc(PyObject *self) + { + PyObject_GC_UnTrack(self); + ... + PyTypeObject *type = Py_TYPE(self); + type->tp_free(self); + Py_DECREF(type); + } + +The default ``tp_dealloc`` function does this, so +if your type does *not* override +``tp_dealloc`` you don't need to add it. + + +Not overriding ``tp_free`` +.......................... + +The :c:member:`~PyTypeObject.tp_free` slot of a heap type must be set to +:c:func:`PyObject_GC_Del`. +This is the default; do not override it. + + +Avoiding ``PyObject_New`` +......................... + +GC-tracked objects need to be allocated using GC-aware functions. + +If you use use :c:func:`PyObject_New` or :c:func:`PyObject_NewVar`: + +- Get and call type's :c:member:`~PyTypeObject.tp_alloc` slot, if possible. + That is, replace ``TYPE *o = PyObject_New(TYPE, typeobj)`` with:: + + TYPE *o = typeobj->tp_alloc(typeobj, 0); + + Replace ``o = PyObject_NewVar(TYPE, typeobj, size)`` with the same, + but use size instead of the 0. + +- If the above is not possible (e.g. inside a custom ``tp_alloc``), + call :c:func:`PyObject_GC_New` or :c:func:`PyObject_GC_NewVar`:: + + TYPE *o = PyObject_GC_New(TYPE, typeobj); + + TYPE *o = PyObject_GC_NewVar(TYPE, typeobj, size); Module State Access from Classes @@ -372,7 +465,7 @@ To save a some tedious error-handling boilerplate code, you can combine these two steps with :c:func:`PyType_GetModuleState`, resulting in:: my_struct *state = (my_struct*)PyType_GetModuleState(type); - if (state === NULL) { + if (state == NULL) { return NULL; } @@ -411,7 +504,7 @@ that subclass, which may be defined in different module than yours. pass For a method to get its "defining class", it must use the -:data:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS` +:ref:`METH_METHOD | METH_FASTCALL | METH_KEYWORDS <METH_METHOD-METH_FASTCALL-METH_KEYWORDS>` :c:type:`calling convention <PyMethodDef>` and the corresponding :c:type:`PyCMethod` signature:: @@ -435,7 +528,7 @@ For example:: PyObject *kwnames) { my_struct *state = (my_struct*)PyType_GetModuleState(defining_class); - if (state === NULL) { + if (state == NULL) { return NULL; } ... // rest of logic @@ -461,13 +554,13 @@ Module State Access from Slot Methods, Getters and Setters .. After adding to limited API: - If you use the :ref:`limited API <stable>, + If you use the :ref:`limited API <limited-c-api>`, you must update ``Py_LIMITED_API`` to ``0x030b0000``, losing ABI compatibility with earlier versions. Slot methods—the fast C equivalents for special methods, such as :c:member:`~PyNumberMethods.nb_add` for :py:attr:`~object.__add__` or -:c:member:`~PyType.tp_new` for initialization—have a very simple API that +:c:member:`~PyTypeObject.tp_new` for initialization—have a very simple API that doesn't allow passing in the defining class, unlike with :c:type:`PyCMethod`. The same goes for getters and setters defined with :c:type:`PyGetSetDef`. @@ -479,18 +572,18 @@ to get the state:: PyObject *module = PyType_GetModuleByDef(Py_TYPE(self), &module_def); my_struct *state = (my_struct*)PyModule_GetState(module); - if (state === NULL) { + if (state == NULL) { return NULL; } -``PyType_GetModuleByDef`` works by searching the +:c:func:`!PyType_GetModuleByDef` works by searching the :term:`method resolution order` (i.e. all superclasses) for the first superclass that has a corresponding module. .. note:: In very exotic cases (inheritance chains spanning multiple modules - created from the same definition), ``PyType_GetModuleByDef`` might not + created from the same definition), :c:func:`!PyType_GetModuleByDef` might not return the module of the true defining class. However, it will always return a module with the same definition, ensuring a compatible C memory layout. diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst index bf6f54a841a7b9d..588f5a0a53ded0a 100644 --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -307,7 +307,7 @@ Suppose you configure logging with the following JSON: "class": "logging.StreamHandler", "level": "INFO", "formatter": "simple", - "stream": "ext://sys.stdout", + "stream": "ext://sys.stdout" }, "stderr": { "class": "logging.StreamHandler", @@ -340,10 +340,12 @@ adding a ``filters`` section parallel to ``formatters`` and ``handlers``: .. code-block:: json - "filters": { - "warnings_and_below": { - "()" : "__main__.filter_maker", - "level": "WARNING" + { + "filters": { + "warnings_and_below": { + "()" : "__main__.filter_maker", + "level": "WARNING" + } } } @@ -351,12 +353,14 @@ and changing the section on the ``stdout`` handler to add it: .. code-block:: json - "stdout": { - "class": "logging.StreamHandler", - "level": "INFO", - "formatter": "simple", - "stream": "ext://sys.stdout", - "filters": ["warnings_and_below"] + { + "stdout": { + "class": "logging.StreamHandler", + "level": "INFO", + "formatter": "simple", + "stream": "ext://sys.stdout", + "filters": ["warnings_and_below"] + } } A filter is just a function, so we can define the ``filter_maker`` (a factory @@ -757,7 +761,7 @@ printed on the console; on the server side, you should see something like: Note that there are some security issues with pickle in some scenarios. If these affect you, you can use an alternative serialization scheme by overriding -the :meth:`~handlers.SocketHandler.makePickle` method and implementing your +the :meth:`~SocketHandler.makePickle` method and implementing your alternative there, as well as adapting the above script to use your alternative serialization. @@ -831,6 +835,8 @@ To test these files, do the following in a POSIX environment: You may need to tweak the configuration files in the unlikely event that the configured ports clash with something else in your test environment. +.. currentmodule:: logging + .. _context-info: Adding contextual information to your logging output @@ -1131,7 +1137,7 @@ each request is handled by a thread: 'context can be used to ' 'populate logs') aa = ap.add_argument - aa('--count', '-c', default=100, help='How many requests to simulate') + aa('--count', '-c', type=int, default=100, help='How many requests to simulate') options = ap.parse_args() # Create the dummy webapps and put them in a list which we can use to select @@ -1542,7 +1548,7 @@ Sometimes you want to let a log file grow to a certain size, then open a new file and log to that. You may want to keep a certain number of these files, and when that many files have been created, rotate the files so that the number of files and the size of the files both remain bounded. For this usage pattern, the -logging package provides a :class:`~handlers.RotatingFileHandler`:: +logging package provides a :class:`RotatingFileHandler`:: import glob import logging @@ -1590,6 +1596,8 @@ and each time it reaches the size limit it is renamed with the suffix Obviously this example sets the log length much too small as an extreme example. You would want to set *maxBytes* to an appropriate value. +.. currentmodule:: logging + .. _format-styles: Use of alternative formatting styles @@ -1720,7 +1728,7 @@ when (and if) the logged message is actually about to be output to a log by a handler. So the only slightly unusual thing which might trip you up is that the parentheses go around the format string and the arguments, not just the format string. That's because the __ notation is just syntax sugar for a constructor -call to one of the XXXMessage classes. +call to one of the :samp:`{XXX}Message` classes. If you prefer, you can use a :class:`LoggerAdapter` to achieve a similar effect to the above, as in the following example:: @@ -1836,6 +1844,7 @@ However, it should be borne in mind that each link in the chain adds run-time overhead to all logging operations, and the technique should only be used when the use of a :class:`Filter` does not provide the desired result. +.. currentmodule:: logging.handlers .. _zeromq-handlers: @@ -1913,6 +1922,8 @@ of queues, for example a ZeroMQ 'subscribe' socket. Here's an example:: :ref:`A more advanced logging tutorial <logging-advanced-tutorial>` +.. currentmodule:: logging + An example dictionary-based configuration ----------------------------------------- @@ -1982,26 +1993,47 @@ Using a rotator and namer to customize log rotation processing -------------------------------------------------------------- An example of how you can define a namer and rotator is given in the following -snippet, which shows zlib-based compression of the log file:: +runnable script, which shows gzip compression of the log file:: + + import gzip + import logging + import logging.handlers + import os + import shutil def namer(name): return name + ".gz" def rotator(source, dest): - with open(source, "rb") as sf: - data = sf.read() - compressed = zlib.compress(data, 9) - with open(dest, "wb") as df: - df.write(compressed) + with open(source, 'rb') as f_in: + with gzip.open(dest, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) os.remove(source) - rh = logging.handlers.RotatingFileHandler(...) + + rh = logging.handlers.RotatingFileHandler('rotated.log', maxBytes=128, backupCount=5) rh.rotator = rotator rh.namer = namer -These are not "true" .gz files, as they are bare compressed data, with no -"container" such as you’d find in an actual gzip file. This snippet is just -for illustration purposes. + root = logging.getLogger() + root.setLevel(logging.INFO) + root.addHandler(rh) + f = logging.Formatter('%(asctime)s %(message)s') + rh.setFormatter(f) + for i in range(1000): + root.info(f'Message no. {i + 1}') + +After running this, you will see six new files, five of which are compressed: + +.. code-block:: shell-session + + $ ls rotated.log* + rotated.log rotated.log.2.gz rotated.log.4.gz + rotated.log.1.gz rotated.log.3.gz rotated.log.5.gz + $ zcat rotated.log.1.gz + 2023-01-20 02:28:17,767 Message no. 996 + 2023-01-20 02:28:17,767 Message no. 997 + 2023-01-20 02:28:17,767 Message no. 998 A more elaborate multiprocessing example ---------------------------------------- @@ -2517,7 +2549,7 @@ should be logged, or the ``extra`` keyword parameter to indicate additional contextual information to be added to the log). So you cannot directly make logging calls using :meth:`str.format` or :class:`string.Template` syntax, because internally the logging package uses %-formatting to merge the format -string and the variable arguments. There would no changing this while preserving +string and the variable arguments. There would be no changing this while preserving backward compatibility, since all logging calls which are out there in existing code will be using %-format strings. @@ -2612,7 +2644,7 @@ when (and if) the logged message is actually about to be output to a log by a handler. So the only slightly unusual thing which might trip you up is that the parentheses go around the format string and the arguments, not just the format string. That’s because the __ notation is just syntax sugar for a constructor -call to one of the ``XXXMessage`` classes shown above. +call to one of the :samp:`{XXX}Message` classes shown above. .. _filters-dictconfig: @@ -3607,7 +3639,7 @@ refer to the comments in the code snippet for more detailed information. Logging to syslog with RFC5424 support -------------------------------------- -Although :rfc:`5424` dates from 2009, most syslog servers are configured by detault to +Although :rfc:`5424` dates from 2009, most syslog servers are configured by default to use the older :rfc:`3164`, which hails from 2001. When ``logging`` was added to Python in 2003, it supported the earlier (and only existing) protocol at the time. Since RFC5424 came out, as there has not been widespread deployment of it in syslog @@ -3798,7 +3830,7 @@ then running the script results in WARNING:demo:division by zero As you can see, this output isn't ideal. That's because the underlying code -which writes to ``sys.stderr`` makes mutiple writes, each of which results in a +which writes to ``sys.stderr`` makes multiple writes, each of which results in a separate logged line (for example, the last three lines above). To get around this problem, you need to buffer things and only output log lines when newlines are seen. Let's use a slghtly better implementation of ``LoggerWriter``: @@ -3893,8 +3925,8 @@ that in other languages such as Java and C#, loggers are often static class attributes. However, this pattern doesn't make sense in Python, where the module (and not the class) is the unit of software decomposition. -Adding handlers other than :class:`NullHandler` to a logger in a library -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Adding handlers other than :class:`~logging.NullHandler` to a logger in a library +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configuring logging by adding handlers, formatters and filters is the responsibility of the application developer, not the library developer. If you diff --git a/Doc/howto/logging.rst b/Doc/howto/logging.rst index 145449b2dfbd9fc..7330cf675baa36d 100644 --- a/Doc/howto/logging.rst +++ b/Doc/howto/logging.rst @@ -418,6 +418,7 @@ The flow of log event information in loggers and handlers is illustrated in the following diagram. .. image:: logging_flow.png + :class: invert-in-dark-mode Loggers ^^^^^^^ @@ -978,7 +979,7 @@ provided: #. :class:`NullHandler` instances do nothing with error messages. They are used by library developers who want to use logging, but want to avoid the 'No - handlers could be found for logger XXX' message which can be displayed if + handlers could be found for logger *XXX*' message which can be displayed if the library user has not configured logging. See :ref:`library-config` for more information. diff --git a/Doc/howto/perf_profiling.rst b/Doc/howto/perf_profiling.rst index ad2eb7b4d58aa57..bb1c00e0aa51d5f 100644 --- a/Doc/howto/perf_profiling.rst +++ b/Doc/howto/perf_profiling.rst @@ -15,21 +15,21 @@ information about the performance of your application. that aid with the analysis of the data that it produces. The main problem with using the ``perf`` profiler with Python applications is that -``perf`` only allows to get information about native symbols, this is, the names of -the functions and procedures written in C. This means that the names and file names -of the Python functions in your code will not appear in the output of the ``perf``. +``perf`` only gets information about native symbols, that is, the names of +functions and procedures written in C. This means that the names and file names +of Python functions in your code will not appear in the output of ``perf``. Since Python 3.12, the interpreter can run in a special mode that allows Python functions to appear in the output of the ``perf`` profiler. When this mode is enabled, the interpreter will interpose a small piece of code compiled on the fly before the execution of every Python function and it will teach ``perf`` the relationship between this piece of code and the associated Python function using -`perf map files`_. +:doc:`perf map files <../c-api/perfmaps>`. .. note:: - Support for the ``perf`` profiler is only currently available for Linux on - selected architectures. Check the output of the configure build step or + Support for the ``perf`` profiler is currently only available for Linux on + select architectures. Check the output of the ``configure`` build step or check the output of ``python -m sysconfig | grep HAVE_PERF_TRAMPOLINE`` to see if your system is supported. @@ -52,11 +52,11 @@ For example, consider the following script: if __name__ == "__main__": baz(1000000) -We can run ``perf`` to sample CPU stack traces at 9999 Hertz:: +We can run ``perf`` to sample CPU stack traces at 9999 hertz:: $ perf record -F 9999 -g -o perf.data python my_script.py -Then we can use ``perf`` report to analyze the data: +Then we can use ``perf report`` to analyze the data: .. code-block:: shell-session @@ -97,7 +97,7 @@ Then we can use ``perf`` report to analyze the data: | | | | | |--2.97%--_PyObject_Malloc ... -As you can see here, the Python functions are not shown in the output, only ``_Py_Eval_EvalFrameDefault`` appears +As you can see, the Python functions are not shown in the output, only ``_PyEval_EvalFrameDefault`` (the function that evaluates the Python bytecode) shows up. Unfortunately that's not very useful because all Python functions use the same C function to evaluate bytecode so we cannot know which Python function corresponds to which bytecode-evaluating function. @@ -151,7 +151,7 @@ Instead, if we run the same experiment with ``perf`` support enabled we get: How to enable ``perf`` profiling support ---------------------------------------- -``perf`` profiling support can either be enabled from the start using +``perf`` profiling support can be enabled either from the start using the environment variable :envvar:`PYTHONPERFSUPPORT` or the :option:`-X perf <-X>` option, or dynamically using :func:`sys.activate_stack_trampoline` and @@ -162,8 +162,7 @@ the :option:`!-X` option takes precedence over the environment variable. Example, using the environment variable:: - $ PYTHONPERFSUPPORT=1 - $ python script.py + $ PYTHONPERFSUPPORT=1 python script.py $ perf report -g -i perf.data Example, using the :option:`!-X` option:: @@ -192,7 +191,7 @@ Example, using the :mod:`sys` APIs in file :file:`example.py`: How to obtain the best results ------------------------------ -For the best results, Python should be compiled with +For best results, Python should be compiled with ``CFLAGS="-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer"`` as this allows profilers to unwind using only the frame pointer and not on DWARF debug information. This is because as the code that is interposed to allow ``perf`` @@ -206,5 +205,3 @@ You can check if your system has been compiled with this flag by running:: If you don't see any output it means that your interpreter has not been compiled with frame pointers and therefore it may not be able to show Python functions in the output of ``perf``. - -.. _perf map files: https://github.com/torvalds/linux/blob/0513e464f9007b70b96740271a948ca5ab6e7dd7/tools/perf/Documentation/jit-interface.txt diff --git a/Doc/howto/pyporting.rst b/Doc/howto/pyporting.rst index add1c11be534e3b..501b16d82d4d6fc 100644 --- a/Doc/howto/pyporting.rst +++ b/Doc/howto/pyporting.rst @@ -1,49 +1,48 @@ .. _pyporting-howto: -********************************* -Porting Python 2 Code to Python 3 -********************************* +************************************* +How to port Python 2 Code to Python 3 +************************************* :author: Brett Cannon .. topic:: Abstract - With Python 3 being the future of Python while Python 2 is still in active - use, it is good to have your project available for both major releases of - Python. This guide is meant to help you figure out how best to support both - Python 2 & 3 simultaneously. + Python 2 reached its official end-of-life at the start of 2020. This means + that no new bug reports, fixes, or changes will be made to Python 2 - it's + no longer supported. + + This guide is intended to provide you with a path to Python 3 for your + code, that includes compatibility with Python 2 as a first step. If you are looking to port an extension module instead of pure Python code, please see :ref:`cporting-howto`. - If you would like to read one core Python developer's take on why Python 3 - came into existence, you can read Nick Coghlan's `Python 3 Q & A`_ or - Brett Cannon's `Why Python 3 exists`_. - + The archived python-porting_ mailing list may contain some useful guidance. - For help with porting, you can view the archived python-porting_ mailing list. The Short Explanation ===================== -To make your project be single-source Python 2/3 compatible, the basic steps +To achieve Python 2/3 compatibility in a single code base, the basic steps are: #. Only worry about supporting Python 2.7 #. Make sure you have good test coverage (coverage.py_ can help; ``python -m pip install coverage``) -#. Learn the differences between Python 2 & 3 +#. Learn the differences between Python 2 and 3 #. Use Futurize_ (or Modernize_) to update your code (e.g. ``python -m pip install future``) #. Use Pylint_ to help make sure you don't regress on your Python 3 support (``python -m pip install pylint``) #. Use caniusepython3_ to find out which of your dependencies are blocking your use of Python 3 (``python -m pip install caniusepython3``) #. Once your dependencies are no longer blocking you, use continuous integration - to make sure you stay compatible with Python 2 & 3 (tox_ can help test + to make sure you stay compatible with Python 2 and 3 (tox_ can help test against multiple versions of Python; ``python -m pip install tox``) -#. Consider using optional static type checking to make sure your type usage - works in both Python 2 & 3 (e.g. use mypy_ to check your typing under both - Python 2 & Python 3; ``python -m pip install mypy``). +#. Consider using optional :term:`static type checking <static type checker>` + to make sure your type usage + works in both Python 2 and 3 (e.g. use mypy_ to check your typing under both + Python 2 and Python 3; ``python -m pip install mypy``). .. note:: @@ -55,43 +54,30 @@ are: Details ======= -A key point about supporting Python 2 & 3 simultaneously is that you can start -**today**! Even if your dependencies are not supporting Python 3 yet that does -not mean you can't modernize your code **now** to support Python 3. Most changes -required to support Python 3 lead to cleaner code using newer practices even in -Python 2 code. +Even if other factors - say, dependencies over which you have no control - +still require you to support Python 2, that does not prevent you taking the +step of including Python 3 support. -Another key point is that modernizing your Python 2 code to also support -Python 3 is largely automated for you. While you might have to make some API -decisions thanks to Python 3 clarifying text data versus binary data, the -lower-level work is now mostly done for you and thus can at least benefit from -the automated changes immediately. +Most changes required to support Python 3 lead to cleaner code using newer +practices even in Python 2 code. -Keep those key points in mind while you read on about the details of porting -your code to support Python 2 & 3 simultaneously. +Different versions of Python 2 +------------------------------ -Drop support for Python 2.6 and older -------------------------------------- +Ideally, your code should be compatible with Python 2.7, which was the +last supported version of Python 2. -While you can make Python 2.5 work with Python 3, it is **much** easier if you -only have to work with Python 2.7. If dropping Python 2.5 is not an -option then the six_ project can help you support Python 2.5 & 3 simultaneously -(``python -m pip install six``). Do realize, though, that nearly all the projects listed -in this HOWTO will not be available to you. +Some of the tools mentioned in this guide will not work with Python 2.6. -If you are able to skip Python 2.5 and older, then the required changes -to your code should continue to look and feel like idiomatic Python code. At -worst you will have to use a function instead of a method in some instances or -have to import a function instead of using a built-in one, but otherwise the -overall transformation should not feel foreign to you. +If absolutely necessary, the six_ project can help you support Python 2.5 and +3 simultaneously. Do realize, though, that nearly all the projects listed in +this guide will not be available to you. -But you should aim for only supporting Python 2.7. Python 2.6 is no longer -freely supported and thus is not receiving bugfixes. This means **you** will have -to work around any issues you come across with Python 2.6. There are also some -tools mentioned in this HOWTO which do not support Python 2.6 (e.g., Pylint_), -and this will become more commonplace as time goes on. It will simply be easier -for you if you only support the versions of Python that you have to support. +If you are able to skip Python 2.5 and older, the required changes to your +code will be minimal. At worst you will have to use a function instead of a +method in some instances or have to import a function instead of using a +built-in one. Make sure you specify the proper version support in your ``setup.py`` file @@ -118,62 +104,57 @@ coverage). If you don't already have a tool to measure test coverage then coverage.py_ is recommended. -Learn the differences between Python 2 & 3 -------------------------------------------- +Be aware of the differences between Python 2 and 3 +-------------------------------------------------- Once you have your code well-tested you are ready to begin porting your code to Python 3! But to fully understand how your code is going to change and what you want to look out for while you code, you will want to learn what changes -Python 3 makes in terms of Python 2. Typically the two best ways of doing that -is reading the :ref:`"What's New" <whatsnew-index>` doc for each release of Python 3 and the -`Porting to Python 3`_ book (which is free online). There is also a handy -`cheat sheet`_ from the Python-Future project. +Python 3 makes in terms of Python 2. + +Some resources for understanding the differences and their implications for you +code: + +* the :ref:`"What's New" <whatsnew-index>` doc for each release of Python 3 +* the `Porting to Python 3`_ book (which is free online) +* the handy `cheat sheet`_ from the Python-Future project. Update your code ---------------- -Once you feel like you know what is different in Python 3 compared to Python 2, -it's time to update your code! You have a choice between two tools in porting -your code automatically: Futurize_ and Modernize_. Which tool you choose will -depend on how much like Python 3 you want your code to be. Futurize_ does its -best to make Python 3 idioms and practices exist in Python 2, e.g. backporting -the ``bytes`` type from Python 3 so that you have semantic parity between the -major versions of Python. Modernize_, -on the other hand, is more conservative and targets a Python 2/3 subset of -Python, directly relying on six_ to help provide compatibility. As Python 3 is -the future, it might be best to consider Futurize to begin adjusting to any new -practices that Python 3 introduces which you are not accustomed to yet. - -Regardless of which tool you choose, they will update your code to run under -Python 3 while staying compatible with the version of Python 2 you started with. -Depending on how conservative you want to be, you may want to run the tool over -your test suite first and visually inspect the diff to make sure the -transformation is accurate. After you have transformed your test suite and -verified that all the tests still pass as expected, then you can transform your -application code knowing that any tests which fail is a translation failure. +There are tools available that can port your code automatically. + +Futurize_ does its best to make Python 3 idioms and practices exist in Python +2, e.g. backporting the ``bytes`` type from Python 3 so that you have +semantic parity between the major versions of Python. This is the better +approach for most cases. + +Modernize_, on the other hand, is more conservative and targets a Python 2/3 +subset of Python, directly relying on six_ to help provide compatibility. + +A good approach is to run the tool over your test suite first and visually +inspect the diff to make sure the transformation is accurate. After you have +transformed your test suite and verified that all the tests still pass as +expected, then you can transform your application code knowing that any tests +which fail is a translation failure. Unfortunately the tools can't automate everything to make your code work under -Python 3 and so there are a handful of things you will need to update manually -to get full Python 3 support (which of these steps are necessary vary between -the tools). Read the documentation for the tool you choose to use to see what it -fixes by default and what it can do optionally to know what will (not) be fixed -for you and what you may have to fix on your own (e.g. using ``io.open()`` over -the built-in ``open()`` function is off by default in Modernize). Luckily, -though, there are only a couple of things to watch out for which can be -considered large issues that may be hard to debug if not watched for. +Python 3, and you will also need to read the tools' documentation in case some +options you need are turned off by default. +Key issues to be aware of and check for: Division ++++++++ -In Python 3, ``5 / 2 == 2.5`` and not ``2``; all division between ``int`` values -result in a ``float``. This change has actually been planned since Python 2.2 -which was released in 2002. Since then users have been encouraged to add -``from __future__ import division`` to any and all files which use the ``/`` and -``//`` operators or to be running the interpreter with the ``-Q`` flag. If you -have not been doing this then you will need to go through your code and do two -things: +In Python 3, ``5 / 2 == 2.5`` and not ``2`` as it was in Python 2; all +division between ``int`` values result in a ``float``. This change has +actually been planned since Python 2.2 which was released in 2002. Since then +users have been encouraged to add ``from __future__ import division`` to any +and all files which use the ``/`` and ``//`` operators or to be running the +interpreter with the ``-Q`` flag. If you have not been doing this then you +will need to go through your code and do two things: #. Add ``from __future__ import division`` to your files #. Update any division operator as necessary to either use ``//`` to use floor @@ -197,30 +178,29 @@ specific type. This complicated the situation especially for anyone supporting multiple languages as APIs wouldn't bother explicitly supporting ``unicode`` when they claimed text data support. -To make the distinction between text and binary data clearer and more -pronounced, Python 3 did what most languages created in the age of the internet -have done and made text and binary data distinct types that cannot blindly be -mixed together (Python predates widespread access to the internet). For any code -that deals only with text or only binary data, this separation doesn't pose an -issue. But for code that has to deal with both, it does mean you might have to -now care about when you are using text compared to binary data, which is why -this cannot be entirely automated. - -To start, you will need to decide which APIs take text and which take binary -(it is **highly** recommended you don't design APIs that can take both due to -the difficulty of keeping the code working; as stated earlier it is difficult to -do well). In Python 2 this means making sure the APIs that take text can work -with ``unicode`` and those that work with binary data work with the -``bytes`` type from Python 3 (which is a subset of ``str`` in Python 2 and acts -as an alias for ``bytes`` type in Python 2). Usually the biggest issue is -realizing which methods exist on which types in Python 2 & 3 simultaneously -(for text that's ``unicode`` in Python 2 and ``str`` in Python 3, for binary -that's ``str``/``bytes`` in Python 2 and ``bytes`` in Python 3). The following -table lists the **unique** methods of each data type across Python 2 & 3 -(e.g., the ``decode()`` method is usable on the equivalent binary data type in -either Python 2 or 3, but it can't be used by the textual data type consistently -between Python 2 and 3 because ``str`` in Python 3 doesn't have the method). Do -note that as of Python 3.5 the ``__mod__`` method was added to the bytes type. +Python 3 made text and binary data distinct types that cannot simply be mixed +together. For any code that deals only with text or only binary data, this +separation doesn't pose an issue. But for code that has to deal with both, it +does mean you might have to now care about when you are using text compared +to binary data, which is why this cannot be entirely automated. + +Decide which APIs take text and which take binary (it is **highly** recommended +you don't design APIs that can take both due to the difficulty of keeping the +code working; as stated earlier it is difficult to do well). In Python 2 this +means making sure the APIs that take text can work with ``unicode`` and those +that work with binary data work with the ``bytes`` type from Python 3 +(which is a subset of ``str`` in Python 2 and acts as an alias for ``bytes`` +type in Python 2). Usually the biggest issue is realizing which methods exist +on which types in Python 2 and 3 simultaneously (for text that's ``unicode`` +in Python 2 and ``str`` in Python 3, for binary that's ``str``/``bytes`` in +Python 2 and ``bytes`` in Python 3). + +The following table lists the **unique** methods of each data type across +Python 2 and 3 (e.g., the ``decode()`` method is usable on the equivalent binary +data type in either Python 2 or 3, but it can't be used by the textual data +type consistently between Python 2 and 3 because ``str`` in Python 3 doesn't +have the method). Do note that as of Python 3.5 the ``__mod__`` method was +added to the bytes type. ======================== ===================== **Text data** **Binary data** @@ -246,12 +226,11 @@ having to keep track of what type of data you are working with. The next issue is making sure you know whether the string literals in your code represent text or binary data. You should add a ``b`` prefix to any literal that presents binary data. For text you should add a ``u`` prefix to -the text literal. (there is a :mod:`__future__` import to force all unspecified +the text literal. (There is a :mod:`__future__` import to force all unspecified literals to be Unicode, but usage has shown it isn't as effective as adding a ``b`` or ``u`` prefix to all literals explicitly) -As part of this dichotomy you also need to be careful about opening files. -Unless you have been working on Windows, there is a chance you have not always +You also need to be careful about opening files. Possibly you have not always bothered to add the ``b`` mode when opening a binary file (e.g., ``rb`` for binary reading). Under Python 3, binary files and text files are clearly distinct and mutually incompatible; see the :mod:`io` module for details. @@ -265,7 +244,7 @@ outdated practice of using :func:`codecs.open` as that's only necessary for keeping compatibility with Python 2.5. The constructors of both ``str`` and ``bytes`` have different semantics for the -same arguments between Python 2 & 3. Passing an integer to ``bytes`` in Python 2 +same arguments between Python 2 and 3. Passing an integer to ``bytes`` in Python 2 will give you the string representation of the integer: ``bytes(3) == '3'``. But in Python 3, an integer argument to ``bytes`` will give you a bytes object as long as the integer specified, filled with null bytes: @@ -400,7 +379,7 @@ Use continuous integration to stay compatible --------------------------------------------- Once you are able to fully run under Python 3 you will want to make sure your -code always works under both Python 2 & 3. Probably the best tool for running +code always works under both Python 2 and 3. Probably the best tool for running your tests under multiple Python interpreters is tox_. You can then integrate tox with your continuous integration system so that you never accidentally break Python 2 or 3 support. @@ -413,16 +392,11 @@ separation of text/binary data handling or indexing on bytes you wouldn't easily find the mistake. This flag will raise an exception when these kinds of comparisons occur, making the mistake much easier to track down. -And that's mostly it! At this point your code base is compatible with both -Python 2 and 3 simultaneously. Your testing will also be set up so that you -don't accidentally break Python 2 or 3 compatibility regardless of which version -you typically run your tests under while developing. - Consider using optional static type checking -------------------------------------------- -Another way to help port your code is to use a static type checker like +Another way to help port your code is to use a :term:`static type checker` like mypy_ or pytype_ on your code. These tools can be used to analyze your code as if it's being run under Python 2, then you can run the tool a second time as if your code is running under Python 3. By running a static type checker twice like @@ -438,7 +412,7 @@ to make sure everything functions as expected in both versions of Python. .. _Futurize: https://python-future.org/automatic_conversion.html .. _importlib2: https://pypi.org/project/importlib2 .. _Modernize: https://python-modernize.readthedocs.io/ -.. _mypy: http://mypy-lang.org/ +.. _mypy: https://mypy-lang.org/ .. _Porting to Python 3: http://python3porting.com/ .. _Pylint: https://pypi.org/project/pylint diff --git a/Doc/howto/regex.rst b/Doc/howto/regex.rst index 655df59e27b641e..5e2f9a9d1837fe0 100644 --- a/Doc/howto/regex.rst +++ b/Doc/howto/regex.rst @@ -245,6 +245,9 @@ You can omit either *m* or *n*; in that case, a reasonable value is assumed for the missing value. Omitting *m* is interpreted as a lower limit of 0, while omitting *n* results in an upper bound of infinity. +The simplest case ``{m}`` matches the preceding item exactly *m* times. +For example, ``a/{2}b`` will only match ``'a//b'``. + Readers of a reductionist bent may notice that the three other quantifiers can all be expressed using this notation. ``{0,}`` is the same as ``*``, ``{1,}`` is equivalent to ``+``, and ``{0,1}`` is the same as ``?``. It's better to use @@ -518,6 +521,8 @@ cache. Compilation Flags ----------------- +.. currentmodule:: re + Compilation flags let you modify some aspects of how regular expressions work. Flags are available in the :mod:`re` module under two names, a long name such as :const:`IGNORECASE` and a short, one-letter form such as :const:`I`. (If you're diff --git a/Doc/howto/sorting.rst b/Doc/howto/sorting.rst index decce12bf3faf6b..38dd09f0a721d2f 100644 --- a/Doc/howto/sorting.rst +++ b/Doc/howto/sorting.rst @@ -273,7 +273,7 @@ Odds and Ends * The sort routines use ``<`` when making comparisons between two objects. So, it is easy to add a standard sort order to a class by - defining an :meth:`__lt__` method: + defining an :meth:`~object.__lt__` method: .. doctest:: @@ -281,8 +281,8 @@ Odds and Ends >>> sorted(student_objects) [('dave', 'B', 10), ('jane', 'B', 12), ('john', 'A', 15)] - However, note that ``<`` can fall back to using :meth:`__gt__` if - :meth:`__lt__` is not implemented (see :func:`object.__lt__`). + However, note that ``<`` can fall back to using :meth:`~object.__gt__` if + :meth:`~object.__lt__` is not implemented (see :func:`object.__lt__`). * Key functions need not depend directly on the objects being sorted. A key function can also access external resources. For instance, if the student grades diff --git a/Doc/howto/timerfd.rst b/Doc/howto/timerfd.rst new file mode 100644 index 000000000000000..98f0294f9d082d3 --- /dev/null +++ b/Doc/howto/timerfd.rst @@ -0,0 +1,230 @@ +.. _timerfd-howto: + +***************************** + timer file descriptor HOWTO +***************************** + +:Release: 1.13 + +This HOWTO discusses Python's support for the linux timer file descriptor. + + +Examples +======== + +The following example shows how to use a timer file descriptor +to execute a function twice a second: + +.. code-block:: python + + # Practical scripts should use really use a non-blocking timer, + # we use a blocking timer here for simplicity. + import os, time + + # Create the timer file descriptor + fd = os.timerfd_create(time.CLOCK_REALTIME) + + # Start the timer in 1 second, with an interval of half a second + os.timerfd_settime(fd, initial=1, interval=0.5) + + try: + # Process timer events four times. + for _ in range(4): + # read() will block until the timer expires + _ = os.read(fd, 8) + print("Timer expired") + finally: + # Remember to close the timer file descriptor! + os.close(fd) + +To avoid the precision loss caused by the :class:`float` type, +timer file descriptors allow specifying initial expiration and interval +in integer nanoseconds with ``_ns`` variants of the functions. + +This example shows how :func:`~select.epoll` can be used with timer file +descriptors to wait until the file descriptor is ready for reading: + +.. code-block:: python + + import os, time, select, socket, sys + + # Create an epoll object + ep = select.epoll() + + # In this example, use loopback address to send "stop" command to the server. + # + # $ telnet 127.0.0.1 1234 + # Trying 127.0.0.1... + # Connected to 127.0.0.1. + # Escape character is '^]'. + # stop + # Connection closed by foreign host. + # + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("127.0.0.1", 1234)) + sock.setblocking(False) + sock.listen(1) + ep.register(sock, select.EPOLLIN) + + # Create timer file descriptors in non-blocking mode. + num = 3 + fds = [] + for _ in range(num): + fd = os.timerfd_create(time.CLOCK_REALTIME, flags=os.TFD_NONBLOCK) + fds.append(fd) + # Register the timer file descriptor for read events + ep.register(fd, select.EPOLLIN) + + # Start the timer with os.timerfd_settime_ns() in nanoseconds. + # Timer 1 fires every 0.25 seconds; timer 2 every 0.5 seconds; etc + for i, fd in enumerate(fds, start=1): + one_sec_in_nsec = 10**9 + i = i * one_sec_in_nsec + os.timerfd_settime_ns(fd, initial=i//4, interval=i//4) + + timeout = 3 + try: + conn = None + is_active = True + while is_active: + # Wait for the timer to expire for 3 seconds. + # epoll.poll() returns a list of (fd, event) pairs. + # fd is a file descriptor. + # sock and conn[=returned value of socket.accept()] are socket objects, not file descriptors. + # So use sock.fileno() and conn.fileno() to get the file descriptors. + events = ep.poll(timeout) + + # If more than one timer file descriptors are ready for reading at once, + # epoll.poll() returns a list of (fd, event) pairs. + # + # In this example settings, + # 1st timer fires every 0.25 seconds in 0.25 seconds. (0.25, 0.5, 0.75, 1.0, ...) + # 2nd timer every 0.5 seconds in 0.5 seconds. (0.5, 1.0, 1.5, 2.0, ...) + # 3rd timer every 0.75 seconds in 0.75 seconds. (0.75, 1.5, 2.25, 3.0, ...) + # + # In 0.25 seconds, only 1st timer fires. + # In 0.5 seconds, 1st timer and 2nd timer fires at once. + # In 0.75 seconds, 1st timer and 3rd timer fires at once. + # In 1.5 seconds, 1st timer, 2nd timer and 3rd timer fires at once. + # + # If a timer file descriptor is signaled more than once since + # the last os.read() call, os.read() returns the nubmer of signaled + # as host order of class bytes. + print(f"Signaled events={events}") + for fd, event in events: + if event & select.EPOLLIN: + if fd == sock.fileno(): + # Check if there is a connection request. + print(f"Accepting connection {fd}") + conn, addr = sock.accept() + conn.setblocking(False) + print(f"Accepted connection {conn} from {addr}") + ep.register(conn, select.EPOLLIN) + elif conn and fd == conn.fileno(): + # Check if there is data to read. + print(f"Reading data {fd}") + data = conn.recv(1024) + if data: + # You should catch UnicodeDecodeError exception for safety. + cmd = data.decode() + if cmd.startswith("stop"): + print(f"Stopping server") + is_active = False + else: + print(f"Unknown command: {cmd}") + else: + # No more data, close connection + print(f"Closing connection {fd}") + ep.unregister(conn) + conn.close() + conn = None + elif fd in fds: + print(f"Reading timer {fd}") + count = int.from_bytes(os.read(fd, 8), byteorder=sys.byteorder) + print(f"Timer {fds.index(fd) + 1} expired {count} times") + else: + print(f"Unknown file descriptor {fd}") + finally: + for fd in fds: + ep.unregister(fd) + os.close(fd) + ep.close() + +This example shows how :func:`~select.select` can be used with timer file +descriptors to wait until the file descriptor is ready for reading: + +.. code-block:: python + + import os, time, select, socket, sys + + # In this example, use loopback address to send "stop" command to the server. + # + # $ telnet 127.0.0.1 1234 + # Trying 127.0.0.1... + # Connected to 127.0.0.1. + # Escape character is '^]'. + # stop + # Connection closed by foreign host. + # + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("127.0.0.1", 1234)) + sock.setblocking(False) + sock.listen(1) + + # Create timer file descriptors in non-blocking mode. + num = 3 + fds = [os.timerfd_create(time.CLOCK_REALTIME, flags=os.TFD_NONBLOCK) + for _ in range(num)] + select_fds = fds + [sock] + + # Start the timers with os.timerfd_settime() in seconds. + # Timer 1 fires every 0.25 seconds; timer 2 every 0.5 seconds; etc + for i, fd in enumerate(fds, start=1): + os.timerfd_settime(fd, initial=i/4, interval=i/4) + + timeout = 3 + try: + conn = None + is_active = True + while is_active: + # Wait for the timer to expire for 3 seconds. + # select.select() returns a list of file descriptors or objects. + rfd, wfd, xfd = select.select(select_fds, select_fds, select_fds, timeout) + for fd in rfd: + if fd == sock: + # Check if there is a connection request. + print(f"Accepting connection {fd}") + conn, addr = sock.accept() + conn.setblocking(False) + print(f"Accepted connection {conn} from {addr}") + select_fds.append(conn) + elif conn and fd == conn: + # Check if there is data to read. + print(f"Reading data {fd}") + data = conn.recv(1024) + if data: + # You should catch UnicodeDecodeError exception for safety. + cmd = data.decode() + if cmd.startswith("stop"): + print(f"Stopping server") + is_active = False + else: + print(f"Unknown command: {cmd}") + else: + # No more data, close connection + print(f"Closing connection {fd}") + select_fds.remove(conn) + conn.close() + conn = None + elif fd in fds: + print(f"Reading timer {fd}") + count = int.from_bytes(os.read(fd, 8), byteorder=sys.byteorder) + print(f"Timer {fds.index(fd) + 1} expired {count} times") + else: + print(f"Unknown file descriptor {fd}") + finally: + for fd in fds: + os.close(fd) + sock.close() + sock = None + diff --git a/Doc/howto/unicode.rst b/Doc/howto/unicode.rst index ca09aee72bf879d..254fe7293553535 100644 --- a/Doc/howto/unicode.rst +++ b/Doc/howto/unicode.rst @@ -424,8 +424,8 @@ lowercase letters 'ss'. A second tool is the :mod:`unicodedata` module's :func:`~unicodedata.normalize` function that converts strings to one -of several normal forms, where letters followed by a combining -character are replaced with single characters. :func:`normalize` can +of several normal forms, where letters followed by a combining character are +replaced with single characters. :func:`~unicodedata.normalize` can be used to perform string comparisons that won't falsely report inequality if two strings use combining characters differently: @@ -449,7 +449,7 @@ When run, this outputs: .. code-block:: shell-session - $ python3 compare-strs.py + $ python compare-strs.py length of first string= 1 length of second string= 2 True @@ -474,8 +474,8 @@ The Unicode Standard also specifies how to do caseless comparisons:: print(compare_caseless(single_char, multiple_chars)) -This will print ``True``. (Why is :func:`NFD` invoked twice? Because -there are a few characters that make :meth:`casefold` return a +This will print ``True``. (Why is :func:`!NFD` invoked twice? Because +there are a few characters that make :meth:`~str.casefold` return a non-normalized string, so the result needs to be normalized again. See section 3.13 of the Unicode Standard for a discussion and an example.) diff --git a/Doc/howto/urllib2.rst b/Doc/howto/urllib2.rst index 69af3c3a85c5d6c..570435d48866d3b 100644 --- a/Doc/howto/urllib2.rst +++ b/Doc/howto/urllib2.rst @@ -6,13 +6,6 @@ :Author: `Michael Foord <https://agileabstractions.com/>`_ -.. note:: - - There is a French translation of an earlier revision of this - HOWTO, available at `urllib2 - Le Manuel manquant - <https://web.archive.org/web/20200910051922/http://www.voidspace.org.uk/python/articles/urllib2_francais.shtml>`_. - - Introduction ============ @@ -86,7 +79,7 @@ response:: import urllib.request - req = urllib.request.Request('http://www.voidspace.org.uk') + req = urllib.request.Request('http://python.org/') with urllib.request.urlopen(req) as response: the_page = response.read() @@ -201,11 +194,11 @@ which comes after we have a look at what happens when things go wrong. Handling Exceptions =================== -*urlopen* raises :exc:`URLError` when it cannot handle a response (though as +*urlopen* raises :exc:`~urllib.error.URLError` when it cannot handle a response (though as usual with Python APIs, built-in exceptions such as :exc:`ValueError`, :exc:`TypeError` etc. may also be raised). -:exc:`HTTPError` is the subclass of :exc:`URLError` raised in the specific case of +:exc:`~urllib.error.HTTPError` is the subclass of :exc:`~urllib.error.URLError` raised in the specific case of HTTP URLs. The exception classes are exported from the :mod:`urllib.error` module. @@ -236,12 +229,12 @@ the status code indicates that the server is unable to fulfil the request. The default handlers will handle some of these responses for you (for example, if the response is a "redirection" that requests the client fetch the document from a different URL, urllib will handle that for you). For those it can't handle, -urlopen will raise an :exc:`HTTPError`. Typical errors include '404' (page not +urlopen will raise an :exc:`~urllib.error.HTTPError`. Typical errors include '404' (page not found), '403' (request forbidden), and '401' (authentication required). See section 10 of :rfc:`2616` for a reference on all the HTTP error codes. -The :exc:`HTTPError` instance raised will have an integer 'code' attribute, which +The :exc:`~urllib.error.HTTPError` instance raised will have an integer 'code' attribute, which corresponds to the error sent by the server. Error Codes @@ -324,7 +317,7 @@ dictionary is reproduced here for convenience :: } When an error is raised the server responds by returning an HTTP error code -*and* an error page. You can use the :exc:`HTTPError` instance as a response on the +*and* an error page. You can use the :exc:`~urllib.error.HTTPError` instance as a response on the page returned. This means that as well as the code attribute, it also has read, geturl, and info, methods as returned by the ``urllib.response`` module:: @@ -345,7 +338,7 @@ geturl, and info, methods as returned by the ``urllib.response`` module:: Wrapping it Up -------------- -So if you want to be prepared for :exc:`HTTPError` *or* :exc:`URLError` there are two +So if you want to be prepared for :exc:`~urllib.error.HTTPError` *or* :exc:`~urllib.error.URLError` there are two basic approaches. I prefer the second approach. Number 1 @@ -372,7 +365,7 @@ Number 1 .. note:: The ``except HTTPError`` *must* come first, otherwise ``except URLError`` - will *also* catch an :exc:`HTTPError`. + will *also* catch an :exc:`~urllib.error.HTTPError`. Number 2 ~~~~~~~~ @@ -398,7 +391,7 @@ Number 2 info and geturl =============== -The response returned by urlopen (or the :exc:`HTTPError` instance) has two +The response returned by urlopen (or the :exc:`~urllib.error.HTTPError` instance) has two useful methods :meth:`info` and :meth:`geturl` and is defined in the module :mod:`urllib.response`.. @@ -458,7 +451,7 @@ To illustrate creating and installing a handler we will use the ``HTTPBasicAuthHandler``. For a more detailed discussion of this subject -- including an explanation of how Basic Authentication works - see the `Basic Authentication Tutorial -<http://www.voidspace.org.uk/python/articles/authentication.shtml>`_. +<https://web.archive.org/web/20201215133350/http://www.voidspace.org.uk/python/articles/authentication.shtml>`__. When authentication is required, the server sends a header (as well as the 401 error code) requesting authentication. This specifies the authentication scheme diff --git a/Doc/includes/email-alternative.py b/Doc/includes/email-alternative.py index df7ca6f3faa332c..26b302b495c7acc 100644 --- a/Doc/includes/email-alternative.py +++ b/Doc/includes/email-alternative.py @@ -8,14 +8,14 @@ # Create the base text message. msg = EmailMessage() -msg['Subject'] = "Ayons asperges pour le déjeuner" +msg['Subject'] = "Pourquoi pas des asperges pour ce midi ?" msg['From'] = Address("Pepé Le Pew", "pepe", "example.com") msg['To'] = (Address("Penelope Pussycat", "penelope", "example.com"), Address("Fabrette Pussycat", "fabrette", "example.com")) msg.set_content("""\ Salut! -Cela ressemble à un excellent recipie[1] déjeuner. +Cette recette [1] sera sûrement un très bon repas. [1] http://www.yummly.com/recipe/Roasted-Asparagus-Epicurious-203718 @@ -31,10 +31,10 @@ <head></head> <body> <p>Salut!</p> - <p>Cela ressemble à un excellent + <p>Cette <a href="http://www.yummly.com/recipe/Roasted-Asparagus-Epicurious-203718"> - recipie - </a> déjeuner. + recette + </a> sera sûrement un très bon repas. </p> <img src="cid:{asparagus_cid}" /> </body> diff --git a/Doc/includes/custom.c b/Doc/includes/newtypes/custom.c similarity index 80% rename from Doc/includes/custom.c rename to Doc/includes/newtypes/custom.c index 26ca754964733d0..5253f879360210e 100644 --- a/Doc/includes/custom.c +++ b/Doc/includes/newtypes/custom.c @@ -7,7 +7,7 @@ typedef struct { } CustomObject; static PyTypeObject CustomType = { - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "custom.Custom", .tp_doc = PyDoc_STR("Custom objects"), .tp_basicsize = sizeof(CustomObject), @@ -17,7 +17,7 @@ static PyTypeObject CustomType = { }; static PyModuleDef custommodule = { - PyModuleDef_HEAD_INIT, + .m_base = PyModuleDef_HEAD_INIT, .m_name = "custom", .m_doc = "Example module that creates an extension type.", .m_size = -1, @@ -34,9 +34,7 @@ PyInit_custom(void) if (m == NULL) return NULL; - Py_INCREF(&CustomType); - if (PyModule_AddObject(m, "Custom", (PyObject *) &CustomType) < 0) { - Py_DECREF(&CustomType); + if (PyModule_AddObjectRef(m, "Custom", (PyObject *) &CustomType) < 0) { Py_DECREF(m); return NULL; } diff --git a/Doc/includes/custom2.c b/Doc/includes/newtypes/custom2.c similarity index 81% rename from Doc/includes/custom2.c rename to Doc/includes/newtypes/custom2.c index 2a3c59f8f04c3d9..a0222b1795209b7 100644 --- a/Doc/includes/custom2.c +++ b/Doc/includes/newtypes/custom2.c @@ -1,6 +1,6 @@ #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <stddef.h> /* for offsetof() */ typedef struct { PyObject_HEAD @@ -42,7 +42,7 @@ static int Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"first", "last", "number", NULL}; - PyObject *first = NULL, *last = NULL, *tmp; + PyObject *first = NULL, *last = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist, &first, &last, @@ -50,26 +50,20 @@ Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) return -1; if (first) { - tmp = self->first; - Py_INCREF(first); - self->first = first; - Py_XDECREF(tmp); + Py_XSETREF(self->first, Py_NewRef(first)); } if (last) { - tmp = self->last; - Py_INCREF(last); - self->last = last; - Py_XDECREF(tmp); + Py_XSETREF(self->last, Py_NewRef(last)); } return 0; } static PyMemberDef Custom_members[] = { - {"first", T_OBJECT_EX, offsetof(CustomObject, first), 0, + {"first", Py_T_OBJECT_EX, offsetof(CustomObject, first), 0, "first name"}, - {"last", T_OBJECT_EX, offsetof(CustomObject, last), 0, + {"last", Py_T_OBJECT_EX, offsetof(CustomObject, last), 0, "last name"}, - {"number", T_INT, offsetof(CustomObject, number), 0, + {"number", Py_T_INT, offsetof(CustomObject, number), 0, "custom number"}, {NULL} /* Sentinel */ }; @@ -96,7 +90,7 @@ static PyMethodDef Custom_methods[] = { }; static PyTypeObject CustomType = { - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "custom2.Custom", .tp_doc = PyDoc_STR("Custom objects"), .tp_basicsize = sizeof(CustomObject), @@ -110,7 +104,7 @@ static PyTypeObject CustomType = { }; static PyModuleDef custommodule = { - PyModuleDef_HEAD_INIT, + .m_base =PyModuleDef_HEAD_INIT, .m_name = "custom2", .m_doc = "Example module that creates an extension type.", .m_size = -1, @@ -127,9 +121,7 @@ PyInit_custom2(void) if (m == NULL) return NULL; - Py_INCREF(&CustomType); - if (PyModule_AddObject(m, "Custom", (PyObject *) &CustomType) < 0) { - Py_DECREF(&CustomType); + if (PyModule_AddObjectRef(m, "Custom", (PyObject *) &CustomType) < 0) { Py_DECREF(m); return NULL; } diff --git a/Doc/includes/custom3.c b/Doc/includes/newtypes/custom3.c similarity index 81% rename from Doc/includes/custom3.c rename to Doc/includes/newtypes/custom3.c index 5a47530f0a6b0dc..4aeebe0a7507d10 100644 --- a/Doc/includes/custom3.c +++ b/Doc/includes/newtypes/custom3.c @@ -1,6 +1,6 @@ #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <stddef.h> /* for offsetof() */ typedef struct { PyObject_HEAD @@ -42,7 +42,7 @@ static int Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"first", "last", "number", NULL}; - PyObject *first = NULL, *last = NULL, *tmp; + PyObject *first = NULL, *last = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|UUi", kwlist, &first, &last, @@ -50,22 +50,16 @@ Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) return -1; if (first) { - tmp = self->first; - Py_INCREF(first); - self->first = first; - Py_DECREF(tmp); + Py_SETREF(self->first, Py_NewRef(first)); } if (last) { - tmp = self->last; - Py_INCREF(last); - self->last = last; - Py_DECREF(tmp); + Py_SETREF(self->last, Py_NewRef(last)); } return 0; } static PyMemberDef Custom_members[] = { - {"number", T_INT, offsetof(CustomObject, number), 0, + {"number", Py_T_INT, offsetof(CustomObject, number), 0, "custom number"}, {NULL} /* Sentinel */ }; @@ -73,14 +67,12 @@ static PyMemberDef Custom_members[] = { static PyObject * Custom_getfirst(CustomObject *self, void *closure) { - Py_INCREF(self->first); - return self->first; + return Py_NewRef(self->first); } static int Custom_setfirst(CustomObject *self, PyObject *value, void *closure) { - PyObject *tmp; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute"); return -1; @@ -90,24 +82,19 @@ Custom_setfirst(CustomObject *self, PyObject *value, void *closure) "The first attribute value must be a string"); return -1; } - tmp = self->first; - Py_INCREF(value); - self->first = value; - Py_DECREF(tmp); + Py_SETREF(self->first, Py_NewRef(value)); return 0; } static PyObject * Custom_getlast(CustomObject *self, void *closure) { - Py_INCREF(self->last); - return self->last; + return Py_NewRef(self->last); } static int Custom_setlast(CustomObject *self, PyObject *value, void *closure) { - PyObject *tmp; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the last attribute"); return -1; @@ -117,10 +104,7 @@ Custom_setlast(CustomObject *self, PyObject *value, void *closure) "The last attribute value must be a string"); return -1; } - tmp = self->last; - Py_INCREF(value); - self->last = value; - Py_DECREF(tmp); + Py_SETREF(self->last, Py_NewRef(value)); return 0; } @@ -146,7 +130,7 @@ static PyMethodDef Custom_methods[] = { }; static PyTypeObject CustomType = { - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "custom3.Custom", .tp_doc = PyDoc_STR("Custom objects"), .tp_basicsize = sizeof(CustomObject), @@ -161,7 +145,7 @@ static PyTypeObject CustomType = { }; static PyModuleDef custommodule = { - PyModuleDef_HEAD_INIT, + .m_base = PyModuleDef_HEAD_INIT, .m_name = "custom3", .m_doc = "Example module that creates an extension type.", .m_size = -1, @@ -178,9 +162,7 @@ PyInit_custom3(void) if (m == NULL) return NULL; - Py_INCREF(&CustomType); - if (PyModule_AddObject(m, "Custom", (PyObject *) &CustomType) < 0) { - Py_DECREF(&CustomType); + if (PyModule_AddObjectRef(m, "Custom", (PyObject *) &CustomType) < 0) { Py_DECREF(m); return NULL; } diff --git a/Doc/includes/custom4.c b/Doc/includes/newtypes/custom4.c similarity index 84% rename from Doc/includes/custom4.c rename to Doc/includes/newtypes/custom4.c index c7ee55578488ed4..3998918f68301e9 100644 --- a/Doc/includes/custom4.c +++ b/Doc/includes/newtypes/custom4.c @@ -1,6 +1,6 @@ #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <stddef.h> /* for offsetof() */ typedef struct { PyObject_HEAD @@ -58,7 +58,7 @@ static int Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"first", "last", "number", NULL}; - PyObject *first = NULL, *last = NULL, *tmp; + PyObject *first = NULL, *last = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|UUi", kwlist, &first, &last, @@ -66,22 +66,16 @@ Custom_init(CustomObject *self, PyObject *args, PyObject *kwds) return -1; if (first) { - tmp = self->first; - Py_INCREF(first); - self->first = first; - Py_DECREF(tmp); + Py_SETREF(self->first, Py_NewRef(first)); } if (last) { - tmp = self->last; - Py_INCREF(last); - self->last = last; - Py_DECREF(tmp); + Py_SETREF(self->last, Py_NewRef(last)); } return 0; } static PyMemberDef Custom_members[] = { - {"number", T_INT, offsetof(CustomObject, number), 0, + {"number", Py_T_INT, offsetof(CustomObject, number), 0, "custom number"}, {NULL} /* Sentinel */ }; @@ -89,8 +83,7 @@ static PyMemberDef Custom_members[] = { static PyObject * Custom_getfirst(CustomObject *self, void *closure) { - Py_INCREF(self->first); - return self->first; + return Py_NewRef(self->first); } static int @@ -105,17 +98,14 @@ Custom_setfirst(CustomObject *self, PyObject *value, void *closure) "The first attribute value must be a string"); return -1; } - Py_INCREF(value); - Py_CLEAR(self->first); - self->first = value; + Py_XSETREF(self->first, Py_NewRef(value)); return 0; } static PyObject * Custom_getlast(CustomObject *self, void *closure) { - Py_INCREF(self->last); - return self->last; + return Py_NewRef(self->last); } static int @@ -130,9 +120,7 @@ Custom_setlast(CustomObject *self, PyObject *value, void *closure) "The last attribute value must be a string"); return -1; } - Py_INCREF(value); - Py_CLEAR(self->last); - self->last = value; + Py_XSETREF(self->last, Py_NewRef(value)); return 0; } @@ -158,7 +146,7 @@ static PyMethodDef Custom_methods[] = { }; static PyTypeObject CustomType = { - PyVarObject_HEAD_INIT(NULL, 0) + .ob_base = PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "custom4.Custom", .tp_doc = PyDoc_STR("Custom objects"), .tp_basicsize = sizeof(CustomObject), @@ -175,7 +163,7 @@ static PyTypeObject CustomType = { }; static PyModuleDef custommodule = { - PyModuleDef_HEAD_INIT, + .m_base = PyModuleDef_HEAD_INIT, .m_name = "custom4", .m_doc = "Example module that creates an extension type.", .m_size = -1, @@ -192,9 +180,7 @@ PyInit_custom4(void) if (m == NULL) return NULL; - Py_INCREF(&CustomType); - if (PyModule_AddObject(m, "Custom", (PyObject *) &CustomType) < 0) { - Py_DECREF(&CustomType); + if (PyModule_AddObjectRef(m, "Custom", (PyObject *) &CustomType) < 0) { Py_DECREF(m); return NULL; } diff --git a/Doc/includes/newtypes/pyproject.toml b/Doc/includes/newtypes/pyproject.toml new file mode 100644 index 000000000000000..ea7937a3171473a --- /dev/null +++ b/Doc/includes/newtypes/pyproject.toml @@ -0,0 +1,7 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +name = "custom" +version = "1" diff --git a/Doc/includes/newtypes/setup.py b/Doc/includes/newtypes/setup.py new file mode 100644 index 000000000000000..67f83673bcc657d --- /dev/null +++ b/Doc/includes/newtypes/setup.py @@ -0,0 +1,8 @@ +from setuptools import Extension, setup +setup(ext_modules=[ + Extension("custom", ["custom.c"]), + Extension("custom2", ["custom2.c"]), + Extension("custom3", ["custom3.c"]), + Extension("custom4", ["custom4.c"]), + Extension("sublist", ["sublist.c"]), +]) diff --git a/Doc/includes/sublist.c b/Doc/includes/newtypes/sublist.c similarity index 91% rename from Doc/includes/sublist.c rename to Doc/includes/newtypes/sublist.c index b36dadf07eae877..d8aba463f30ba2a 100644 --- a/Doc/includes/sublist.c +++ b/Doc/includes/newtypes/sublist.c @@ -58,9 +58,7 @@ PyInit_sublist(void) if (m == NULL) return NULL; - Py_INCREF(&SubListType); - if (PyModule_AddObject(m, "SubList", (PyObject *) &SubListType) < 0) { - Py_DECREF(&SubListType); + if (PyModule_AddObjectRef(m, "SubList", (PyObject *) &SubListType) < 0) { Py_DECREF(m); return NULL; } diff --git a/Doc/includes/test.py b/Doc/includes/newtypes/test.py similarity index 94% rename from Doc/includes/test.py rename to Doc/includes/newtypes/test.py index 09ebe3fec0bdbe3..55a5cf9f68b94ac 100644 --- a/Doc/includes/test.py +++ b/Doc/includes/newtypes/test.py @@ -187,13 +187,6 @@ >>> gc.enable() """ -import os -import sys -from distutils.util import get_platform -PLAT_SPEC = "%s-%d.%d" % (get_platform(), *sys.version_info[:2]) -src = os.path.join("build", "lib.%s" % PLAT_SPEC) -sys.path.append(src) - if __name__ == "__main__": import doctest, __main__ doctest.testmod(__main__) diff --git a/Doc/includes/setup.py b/Doc/includes/setup.py deleted file mode 100644 index a38a39de3e7c86c..000000000000000 --- a/Doc/includes/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup, Extension -setup(name="noddy", version="1.0", - ext_modules=[ - Extension("noddy", ["noddy.c"]), - Extension("noddy2", ["noddy2.c"]), - Extension("noddy3", ["noddy3.c"]), - Extension("noddy4", ["noddy4.c"]), - Extension("shoddy", ["shoddy.c"]), - ]) diff --git a/Doc/includes/turtle-star.py b/Doc/includes/turtle-star.py deleted file mode 100644 index 1a5db761b32385d..000000000000000 --- a/Doc/includes/turtle-star.py +++ /dev/null @@ -1,10 +0,0 @@ -from turtle import * -color('red', 'yellow') -begin_fill() -while True: - forward(200) - left(170) - if abs(pos()) < 1: - break -end_fill() -done() diff --git a/Doc/includes/typestruct.h b/Doc/includes/typestruct.h index 02f8ccfe4438a5a..ec939c28831c33c 100644 --- a/Doc/includes/typestruct.h +++ b/Doc/includes/typestruct.h @@ -80,4 +80,7 @@ typedef struct _typeobject { destructor tp_finalize; vectorcallfunc tp_vectorcall; + + /* bitset of which type-watchers care about this type */ + unsigned char tp_watched; } PyTypeObject; diff --git a/Doc/install/index.rst b/Doc/install/index.rst deleted file mode 100644 index ab581d785ef7f04..000000000000000 --- a/Doc/install/index.rst +++ /dev/null @@ -1,1081 +0,0 @@ -.. highlight:: none - -.. _install-index: - -******************************************** - Installing Python Modules (Legacy version) -******************************************** - -:Author: Greg Ward - -.. TODO: Fill in XXX comments - -.. note:: - - The entire ``distutils`` package has been deprecated and will be - removed in Python 3.12. This documentation is retained as a - reference only, and will be removed with the package. See the - :ref:`What's New <distutils-deprecated>` entry for more information. - -.. seealso:: - - :ref:`installing-index` - The up to date module installation documentation. For regular Python - usage, you almost certainly want that document rather than this one. - -.. note:: - - This document is being retained solely until the ``setuptools`` documentation - at https://setuptools.readthedocs.io/en/latest/setuptools.html - independently covers all of the relevant information currently included here. - -.. note:: - - This guide only covers the basic tools for building and distributing - extensions that are provided as part of this version of Python. Third party - tools offer easier to use and more secure alternatives. Refer to the `quick - recommendations section <https://packaging.python.org/guides/tool-recommendations/>`__ - in the Python Packaging User Guide for more information. - - -.. _inst-intro: - - -Introduction -============ - -In Python 2.0, the ``distutils`` API was first added to the standard library. -This provided Linux distro maintainers with a standard way of converting -Python projects into Linux distro packages, and system administrators with a -standard way of installing them directly onto target systems. - -In the many years since Python 2.0 was released, tightly coupling the build -system and package installer to the language runtime release cycle has turned -out to be problematic, and it is now recommended that projects use the -``pip`` package installer and the ``setuptools`` build system, rather than -using ``distutils`` directly. - -See :ref:`installing-index` and :ref:`distributing-index` for more details. - -This legacy documentation is being retained only until we're confident that the -``setuptools`` documentation covers everything needed. - -.. _inst-new-standard: - -Distutils based source distributions ------------------------------------- - -If you download a module source distribution, you can tell pretty quickly if it -was packaged and distributed in the standard way, i.e. using the Distutils. -First, the distribution's name and version number will be featured prominently -in the name of the downloaded archive, e.g. :file:`foo-1.0.tar.gz` or -:file:`widget-0.9.7.zip`. Next, the archive will unpack into a similarly named -directory: :file:`foo-1.0` or :file:`widget-0.9.7`. Additionally, the -distribution will contain a setup script :file:`setup.py`, and a file named -:file:`README.txt` or possibly just :file:`README`, which should explain that -building and installing the module distribution is a simple matter of running -one command from a terminal:: - - python setup.py install - -For Windows, this command should be run from a command prompt window -(:menuselection:`Start --> Accessories`):: - - setup.py install - -If all these things are true, then you already know how to build and install the -modules you've just downloaded: Run the command above. Unless you need to -install things in a non-standard way or customize the build process, you don't -really need this manual. Or rather, the above command is everything you need to -get out of this manual. - - -.. _inst-standard-install: - -Standard Build and Install -========================== - -As described in section :ref:`inst-new-standard`, building and installing a module -distribution using the Distutils is usually one simple command to run from a -terminal:: - - python setup.py install - - -.. _inst-platform-variations: - -Platform variations -------------------- - -You should always run the setup command from the distribution root directory, -i.e. the top-level subdirectory that the module source distribution unpacks -into. For example, if you've just downloaded a module source distribution -:file:`foo-1.0.tar.gz` onto a Unix system, the normal thing to do is:: - - gunzip -c foo-1.0.tar.gz | tar xf - # unpacks into directory foo-1.0 - cd foo-1.0 - python setup.py install - -On Windows, you'd probably download :file:`foo-1.0.zip`. If you downloaded the -archive file to :file:`C:\\Temp`, then it would unpack into -:file:`C:\\Temp\\foo-1.0`; you can use either an archive manipulator with a -graphical user interface (such as WinZip) or a command-line tool (such as -:program:`unzip` or :program:`pkunzip`) to unpack the archive. Then, open a -command prompt window and run:: - - cd c:\Temp\foo-1.0 - python setup.py install - - -.. _inst-splitting-up: - -Splitting the job up --------------------- - -Running ``setup.py install`` builds and installs all modules in one run. If you -prefer to work incrementally---especially useful if you want to customize the -build process, or if things are going wrong---you can use the setup script to do -one thing at a time. This is particularly helpful when the build and install -will be done by different users---for example, you might want to build a module -distribution and hand it off to a system administrator for installation (or do -it yourself, with super-user privileges). - -For example, you can build everything in one step, and then install everything -in a second step, by invoking the setup script twice:: - - python setup.py build - python setup.py install - -If you do this, you will notice that running the :command:`install` command -first runs the :command:`build` command, which---in this case---quickly notices -that it has nothing to do, since everything in the :file:`build` directory is -up-to-date. - -You may not need this ability to break things down often if all you do is -install modules downloaded off the 'net, but it's very handy for more advanced -tasks. If you get into distributing your own Python modules and extensions, -you'll run lots of individual Distutils commands on their own. - - -.. _inst-how-build-works: - -How building works ------------------- - -As implied above, the :command:`build` command is responsible for putting the -files to install into a *build directory*. By default, this is :file:`build` -under the distribution root; if you're excessively concerned with speed, or want -to keep the source tree pristine, you can change the build directory with the -:option:`!--build-base` option. For example:: - - python setup.py build --build-base=/path/to/pybuild/foo-1.0 - -(Or you could do this permanently with a directive in your system or personal -Distutils configuration file; see section :ref:`inst-config-files`.) Normally, this -isn't necessary. - -The default layout for the build tree is as follows:: - - --- build/ --- lib/ - or - --- build/ --- lib.<plat>/ - temp.<plat>/ - -where ``<plat>`` expands to a brief description of the current OS/hardware -platform and Python version. The first form, with just a :file:`lib` directory, -is used for "pure module distributions"---that is, module distributions that -include only pure Python modules. If a module distribution contains any -extensions (modules written in C/C++), then the second form, with two ``<plat>`` -directories, is used. In that case, the :file:`temp.{plat}` directory holds -temporary files generated by the compile/link process that don't actually get -installed. In either case, the :file:`lib` (or :file:`lib.{plat}`) directory -contains all Python modules (pure Python and extensions) that will be installed. - -In the future, more directories will be added to handle Python scripts, -documentation, binary executables, and whatever else is needed to handle the job -of installing Python modules and applications. - - -.. _inst-how-install-works: - -How installation works ----------------------- - -After the :command:`build` command runs (whether you run it explicitly, or the -:command:`install` command does it for you), the work of the :command:`install` -command is relatively simple: all it has to do is copy everything under -:file:`build/lib` (or :file:`build/lib.{plat}`) to your chosen installation -directory. - -If you don't choose an installation directory---i.e., if you just run ``setup.py -install``\ ---then the :command:`install` command installs to the standard -location for third-party Python modules. This location varies by platform and -by how you built/installed Python itself. On Unix (and macOS, which is also -Unix-based), it also depends on whether the module distribution being installed -is pure Python or contains extensions ("non-pure"): - -.. tabularcolumns:: |l|l|l|l| - -+-----------------+-----------------------------------------------------+--------------------------------------------------+-------+ -| Platform | Standard installation location | Default value | Notes | -+=================+=====================================================+==================================================+=======+ -| Unix (pure) | :file:`{prefix}/lib/python{X.Y}/site-packages` | :file:`/usr/local/lib/python{X.Y}/site-packages` | \(1) | -+-----------------+-----------------------------------------------------+--------------------------------------------------+-------+ -| Unix (non-pure) | :file:`{exec-prefix}/lib/python{X.Y}/site-packages` | :file:`/usr/local/lib/python{X.Y}/site-packages` | \(1) | -+-----------------+-----------------------------------------------------+--------------------------------------------------+-------+ -| Windows | :file:`{prefix}\\Lib\\site-packages` | :file:`C:\\Python{XY}\\Lib\\site-packages` | \(2) | -+-----------------+-----------------------------------------------------+--------------------------------------------------+-------+ - -Notes: - -(1) - Most Linux distributions include Python as a standard part of the system, so - :file:`{prefix}` and :file:`{exec-prefix}` are usually both :file:`/usr` on - Linux. If you build Python yourself on Linux (or any Unix-like system), the - default :file:`{prefix}` and :file:`{exec-prefix}` are :file:`/usr/local`. - -(2) - The default installation directory on Windows was :file:`C:\\Program - Files\\Python` under Python 1.6a1, 1.5.2, and earlier. - -:file:`{prefix}` and :file:`{exec-prefix}` stand for the directories that Python -is installed to, and where it finds its libraries at run-time. They are always -the same under Windows, and very often the same under Unix and macOS. You -can find out what your Python installation uses for :file:`{prefix}` and -:file:`{exec-prefix}` by running Python in interactive mode and typing a few -simple commands. Under Unix, just type ``python`` at the shell prompt. Under -Windows, choose :menuselection:`Start --> Programs --> Python X.Y --> -Python (command line)`. Once the interpreter is started, you type Python code -at the prompt. For example, on my Linux system, I type the three Python -statements shown below, and get the output as shown, to find out my -:file:`{prefix}` and :file:`{exec-prefix}`: - -.. code-block:: pycon - - Python 2.4 (#26, Aug 7 2004, 17:19:02) - Type "help", "copyright", "credits" or "license" for more information. - >>> import sys - >>> sys.prefix - '/usr' - >>> sys.exec_prefix - '/usr' - -A few other placeholders are used in this document: :file:`{X.Y}` stands for the -version of Python, for example ``3.2``; :file:`{abiflags}` will be replaced by -the value of :data:`sys.abiflags` or the empty string for platforms which don't -define ABI flags; :file:`{distname}` will be replaced by the name of the module -distribution being installed. Dots and capitalization are important in the -paths; for example, a value that uses ``python3.2`` on UNIX will typically use -``Python32`` on Windows. - -If you don't want to install modules to the standard location, or if you don't -have permission to write there, then you need to read about alternate -installations in section :ref:`inst-alt-install`. If you want to customize your -installation directories more heavily, see section :ref:`inst-custom-install` on -custom installations. - - -.. _inst-alt-install: - -Alternate Installation -====================== - -Often, it is necessary or desirable to install modules to a location other than -the standard location for third-party Python modules. For example, on a Unix -system you might not have permission to write to the standard third-party module -directory. Or you might wish to try out a module before making it a standard -part of your local Python installation. This is especially true when upgrading -a distribution already present: you want to make sure your existing base of -scripts still works with the new version before actually upgrading. - -The Distutils :command:`install` command is designed to make installing module -distributions to an alternate location simple and painless. The basic idea is -that you supply a base directory for the installation, and the -:command:`install` command picks a set of directories (called an *installation -scheme*) under this base directory in which to install files. The details -differ across platforms, so read whichever of the following sections applies to -you. - -Note that the various alternate installation schemes are mutually exclusive: you -can pass ``--user``, or ``--home``, or ``--prefix`` and ``--exec-prefix``, or -``--install-base`` and ``--install-platbase``, but you can't mix from these -groups. - - -.. _inst-alt-install-user: - -Alternate installation: the user scheme ---------------------------------------- - -This scheme is designed to be the most convenient solution for users that don't -have write permission to the global site-packages directory or don't want to -install into it. It is enabled with a simple option:: - - python setup.py install --user - -Files will be installed into subdirectories of :data:`site.USER_BASE` (written -as :file:`{userbase}` hereafter). This scheme installs pure Python modules and -extension modules in the same location (also known as :data:`site.USER_SITE`). -Here are the values for UNIX, including macOS: - -=============== =========================================================== -Type of file Installation directory -=============== =========================================================== -modules :file:`{userbase}/lib/python{X.Y}/site-packages` -scripts :file:`{userbase}/bin` -data :file:`{userbase}` -C headers :file:`{userbase}/include/python{X.Y}{abiflags}/{distname}` -=============== =========================================================== - -And here are the values used on Windows: - -=============== =========================================================== -Type of file Installation directory -=============== =========================================================== -modules :file:`{userbase}\\Python{XY}\\site-packages` -scripts :file:`{userbase}\\Python{XY}\\Scripts` -data :file:`{userbase}` -C headers :file:`{userbase}\\Python{XY}\\Include\\{distname}` -=============== =========================================================== - -The advantage of using this scheme compared to the other ones described below is -that the user site-packages directory is under normal conditions always included -in :data:`sys.path` (see :mod:`site` for more information), which means that -there is no additional step to perform after running the :file:`setup.py` script -to finalize the installation. - -The :command:`build_ext` command also has a ``--user`` option to add -:file:`{userbase}/include` to the compiler search path for header files and -:file:`{userbase}/lib` to the compiler search path for libraries as well as to -the runtime search path for shared C libraries (rpath). - - -.. _inst-alt-install-home: - -Alternate installation: the home scheme ---------------------------------------- - -The idea behind the "home scheme" is that you build and maintain a personal -stash of Python modules. This scheme's name is derived from the idea of a -"home" directory on Unix, since it's not unusual for a Unix user to make their -home directory have a layout similar to :file:`/usr/` or :file:`/usr/local/`. -This scheme can be used by anyone, regardless of the operating system they -are installing for. - -Installing a new module distribution is as simple as :: - - python setup.py install --home=<dir> - -where you can supply any directory you like for the :option:`!--home` option. On -Unix, lazy typists can just type a tilde (``~``); the :command:`install` command -will expand this to your home directory:: - - python setup.py install --home=~ - -To make Python find the distributions installed with this scheme, you may have -to :ref:`modify Python's search path <inst-search-path>` or edit -:mod:`sitecustomize` (see :mod:`site`) to call :func:`site.addsitedir` or edit -:data:`sys.path`. - -The :option:`!--home` option defines the installation base directory. Files are -installed to the following directories under the installation base as follows: - -=============== =========================================================== -Type of file Installation directory -=============== =========================================================== -modules :file:`{home}/lib/python` -scripts :file:`{home}/bin` -data :file:`{home}` -C headers :file:`{home}/include/python/{distname}` -=============== =========================================================== - -(Mentally replace slashes with backslashes if you're on Windows.) - - -.. _inst-alt-install-prefix-unix: - -Alternate installation: Unix (the prefix scheme) ------------------------------------------------- - -The "prefix scheme" is useful when you wish to use one Python installation to -perform the build/install (i.e., to run the setup script), but install modules -into the third-party module directory of a different Python installation (or -something that looks like a different Python installation). If this sounds a -trifle unusual, it is---that's why the user and home schemes come before. However, -there are at least two known cases where the prefix scheme will be useful. - -First, consider that many Linux distributions put Python in :file:`/usr`, rather -than the more traditional :file:`/usr/local`. This is entirely appropriate, -since in those cases Python is part of "the system" rather than a local add-on. -However, if you are installing Python modules from source, you probably want -them to go in :file:`/usr/local/lib/python2.{X}` rather than -:file:`/usr/lib/python2.{X}`. This can be done with :: - - /usr/bin/python setup.py install --prefix=/usr/local - -Another possibility is a network filesystem where the name used to write to a -remote directory is different from the name used to read it: for example, the -Python interpreter accessed as :file:`/usr/local/bin/python` might search for -modules in :file:`/usr/local/lib/python2.{X}`, but those modules would have to -be installed to, say, :file:`/mnt/{@server}/export/lib/python2.{X}`. This could -be done with :: - - /usr/local/bin/python setup.py install --prefix=/mnt/@server/export - -In either case, the :option:`!--prefix` option defines the installation base, and -the :option:`!--exec-prefix` option defines the platform-specific installation -base, which is used for platform-specific files. (Currently, this just means -non-pure module distributions, but could be expanded to C libraries, binary -executables, etc.) If :option:`!--exec-prefix` is not supplied, it defaults to -:option:`!--prefix`. Files are installed as follows: - -================= ========================================================== -Type of file Installation directory -================= ========================================================== -Python modules :file:`{prefix}/lib/python{X.Y}/site-packages` -extension modules :file:`{exec-prefix}/lib/python{X.Y}/site-packages` -scripts :file:`{prefix}/bin` -data :file:`{prefix}` -C headers :file:`{prefix}/include/python{X.Y}{abiflags}/{distname}` -================= ========================================================== - -There is no requirement that :option:`!--prefix` or :option:`!--exec-prefix` -actually point to an alternate Python installation; if the directories listed -above do not already exist, they are created at installation time. - -Incidentally, the real reason the prefix scheme is important is simply that a -standard Unix installation uses the prefix scheme, but with :option:`!--prefix` -and :option:`!--exec-prefix` supplied by Python itself as ``sys.prefix`` and -``sys.exec_prefix``. Thus, you might think you'll never use the prefix scheme, -but every time you run ``python setup.py install`` without any other options, -you're using it. - -Note that installing extensions to an alternate Python installation has no -effect on how those extensions are built: in particular, the Python header files -(:file:`Python.h` and friends) installed with the Python interpreter used to run -the setup script will be used in compiling extensions. It is your -responsibility to ensure that the interpreter used to run extensions installed -in this way is compatible with the interpreter used to build them. The best way -to do this is to ensure that the two interpreters are the same version of Python -(possibly different builds, or possibly copies of the same build). (Of course, -if your :option:`!--prefix` and :option:`!--exec-prefix` don't even point to an -alternate Python installation, this is immaterial.) - - -.. _inst-alt-install-prefix-windows: - -Alternate installation: Windows (the prefix scheme) ---------------------------------------------------- - -Windows has no concept of a user's home directory, and since the standard Python -installation under Windows is simpler than under Unix, the :option:`!--prefix` -option has traditionally been used to install additional packages in separate -locations on Windows. :: - - python setup.py install --prefix="\Temp\Python" - -to install modules to the :file:`\\Temp\\Python` directory on the current drive. - -The installation base is defined by the :option:`!--prefix` option; the -:option:`!--exec-prefix` option is not supported under Windows, which means that -pure Python modules and extension modules are installed into the same location. -Files are installed as follows: - -=============== ========================================================== -Type of file Installation directory -=============== ========================================================== -modules :file:`{prefix}\\Lib\\site-packages` -scripts :file:`{prefix}\\Scripts` -data :file:`{prefix}` -C headers :file:`{prefix}\\Include\\{distname}` -=============== ========================================================== - - -.. _inst-custom-install: - -Custom Installation -=================== - -Sometimes, the alternate installation schemes described in section -:ref:`inst-alt-install` just don't do what you want. You might want to tweak just -one or two directories while keeping everything under the same base directory, -or you might want to completely redefine the installation scheme. In either -case, you're creating a *custom installation scheme*. - -To create a custom installation scheme, you start with one of the alternate -schemes and override some of the installation directories used for the various -types of files, using these options: - -====================== ======================= -Type of file Override option -====================== ======================= -Python modules ``--install-purelib`` -extension modules ``--install-platlib`` -all modules ``--install-lib`` -scripts ``--install-scripts`` -data ``--install-data`` -C headers ``--install-headers`` -====================== ======================= - -These override options can be relative, absolute, -or explicitly defined in terms of one of the installation base directories. -(There are two installation base directories, and they are normally the -same---they only differ when you use the Unix "prefix scheme" and supply -different ``--prefix`` and ``--exec-prefix`` options; using ``--install-lib`` -will override values computed or given for ``--install-purelib`` and -``--install-platlib``, and is recommended for schemes that don't make a -difference between Python and extension modules.) - -For example, say you're installing a module distribution to your home directory -under Unix---but you want scripts to go in :file:`~/scripts` rather than -:file:`~/bin`. As you might expect, you can override this directory with the -:option:`!--install-scripts` option; in this case, it makes most sense to supply -a relative path, which will be interpreted relative to the installation base -directory (your home directory, in this case):: - - python setup.py install --home=~ --install-scripts=scripts - -Another Unix example: suppose your Python installation was built and installed -with a prefix of :file:`/usr/local/python`, so under a standard installation -scripts will wind up in :file:`/usr/local/python/bin`. If you want them in -:file:`/usr/local/bin` instead, you would supply this absolute directory for the -:option:`!--install-scripts` option:: - - python setup.py install --install-scripts=/usr/local/bin - -(This performs an installation using the "prefix scheme", where the prefix is -whatever your Python interpreter was installed with--- :file:`/usr/local/python` -in this case.) - -If you maintain Python on Windows, you might want third-party modules to live in -a subdirectory of :file:`{prefix}`, rather than right in :file:`{prefix}` -itself. This is almost as easy as customizing the script installation -directory---you just have to remember that there are two types of modules -to worry about, Python and extension modules, which can conveniently be both -controlled by one option:: - - python setup.py install --install-lib=Site - -The specified installation directory is relative to :file:`{prefix}`. Of -course, you also have to ensure that this directory is in Python's module -search path, such as by putting a :file:`.pth` file in a site directory (see -:mod:`site`). See section :ref:`inst-search-path` to find out how to modify -Python's search path. - -If you want to define an entire installation scheme, you just have to supply all -of the installation directory options. The recommended way to do this is to -supply relative paths; for example, if you want to maintain all Python -module-related files under :file:`python` in your home directory, and you want a -separate directory for each platform that you use your home directory from, you -might define the following installation scheme:: - - python setup.py install --home=~ \ - --install-purelib=python/lib \ - --install-platlib=python/lib.$PLAT \ - --install-scripts=python/scripts - --install-data=python/data - -or, equivalently, :: - - python setup.py install --home=~/python \ - --install-purelib=lib \ - --install-platlib='lib.$PLAT' \ - --install-scripts=scripts - --install-data=data - -``$PLAT`` is not (necessarily) an environment variable---it will be expanded by -the Distutils as it parses your command line options, just as it does when -parsing your configuration file(s). - -Obviously, specifying the entire installation scheme every time you install a -new module distribution would be very tedious. Thus, you can put these options -into your Distutils config file (see section :ref:`inst-config-files`): - -.. code-block:: ini - - [install] - install-base=$HOME - install-purelib=python/lib - install-platlib=python/lib.$PLAT - install-scripts=python/scripts - install-data=python/data - -or, equivalently, - -.. code-block:: ini - - [install] - install-base=$HOME/python - install-purelib=lib - install-platlib=lib.$PLAT - install-scripts=scripts - install-data=data - -Note that these two are *not* equivalent if you supply a different installation -base directory when you run the setup script. For example, :: - - python setup.py install --install-base=/tmp - -would install pure modules to :file:`/tmp/python/lib` in the first case, and -to :file:`/tmp/lib` in the second case. (For the second case, you probably -want to supply an installation base of :file:`/tmp/python`.) - -You probably noticed the use of ``$HOME`` and ``$PLAT`` in the sample -configuration file input. These are Distutils configuration variables, which -bear a strong resemblance to environment variables. In fact, you can use -environment variables in config files on platforms that have such a notion but -the Distutils additionally define a few extra variables that may not be in your -environment, such as ``$PLAT``. (And of course, on systems that don't have -environment variables, such as Mac OS 9, the configuration variables supplied by -the Distutils are the only ones you can use.) See section :ref:`inst-config-files` -for details. - -.. note:: When a :ref:`virtual environment <venv-def>` is activated, any options - that change the installation path will be ignored from all distutils configuration - files to prevent inadvertently installing projects outside of the virtual - environment. - -.. XXX need some Windows examples---when would custom installation schemes be - needed on those platforms? - - -.. XXX Move this to Doc/using - -.. _inst-search-path: - -Modifying Python's Search Path ------------------------------- - -When the Python interpreter executes an :keyword:`import` statement, it searches -for both Python code and extension modules along a search path. A default value -for the path is configured into the Python binary when the interpreter is built. -You can determine the path by importing the :mod:`sys` module and printing the -value of ``sys.path``. :: - - $ python - Python 2.2 (#11, Oct 3 2002, 13:31:27) - [GCC 2.96 20000731 (Red Hat Linux 7.3 2.96-112)] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import sys - >>> sys.path - ['', '/usr/local/lib/python2.3', '/usr/local/lib/python2.3/plat-linux2', - '/usr/local/lib/python2.3/lib-tk', '/usr/local/lib/python2.3/lib-dynload', - '/usr/local/lib/python2.3/site-packages'] - >>> - -The null string in ``sys.path`` represents the current working directory. - -The expected convention for locally installed packages is to put them in the -:file:`{...}/site-packages/` directory, but you may want to install Python -modules into some arbitrary directory. For example, your site may have a -convention of keeping all software related to the web server under :file:`/www`. -Add-on Python modules might then belong in :file:`/www/python`, and in order to -import them, this directory must be added to ``sys.path``. There are several -different ways to add the directory. - -The most convenient way is to add a path configuration file to a directory -that's already on Python's path, usually to the :file:`.../site-packages/` -directory. Path configuration files have an extension of :file:`.pth`, and each -line must contain a single path that will be appended to ``sys.path``. (Because -the new paths are appended to ``sys.path``, modules in the added directories -will not override standard modules. This means you can't use this mechanism for -installing fixed versions of standard modules.) - -Paths can be absolute or relative, in which case they're relative to the -directory containing the :file:`.pth` file. See the documentation of -the :mod:`site` module for more information. - -A slightly less convenient way is to edit the :file:`site.py` file in Python's -standard library, and modify ``sys.path``. :file:`site.py` is automatically -imported when the Python interpreter is executed, unless the :option:`-S` switch -is supplied to suppress this behaviour. So you could simply edit -:file:`site.py` and add two lines to it: - -.. code-block:: python - - import sys - sys.path.append('/www/python/') - -However, if you reinstall the same major version of Python (perhaps when -upgrading from 2.2 to 2.2.2, for example) :file:`site.py` will be overwritten by -the stock version. You'd have to remember that it was modified and save a copy -before doing the installation. - -There are two environment variables that can modify ``sys.path``. -:envvar:`PYTHONHOME` sets an alternate value for the prefix of the Python -installation. For example, if :envvar:`PYTHONHOME` is set to ``/www/python``, -the search path will be set to ``['', '/www/python/lib/pythonX.Y/', -'/www/python/lib/pythonX.Y/plat-linux2', ...]``. - -The :envvar:`PYTHONPATH` variable can be set to a list of paths that will be -added to the beginning of ``sys.path``. For example, if :envvar:`PYTHONPATH` is -set to ``/www/python:/opt/py``, the search path will begin with -``['/www/python', '/opt/py']``. (Note that directories must exist in order to -be added to ``sys.path``; the :mod:`site` module removes paths that don't -exist.) - -Finally, ``sys.path`` is just a regular Python list, so any Python application -can modify it by adding or removing entries. - - -.. _inst-config-files: - -Distutils Configuration Files -============================= - -As mentioned above, you can use Distutils configuration files to record personal -or site preferences for any Distutils options. That is, any option to any -command can be stored in one of two or three (depending on your platform) -configuration files, which will be consulted before the command-line is parsed. -This means that configuration files will override default values, and the -command-line will in turn override configuration files. Furthermore, if -multiple configuration files apply, values from "earlier" files are overridden -by "later" files. - - -.. _inst-config-filenames: - -Location and names of config files ----------------------------------- - -The names and locations of the configuration files vary slightly across -platforms. On Unix and macOS, the three configuration files (in the order -they are processed) are: - -+--------------+----------------------------------------------------------+-------+ -| Type of file | Location and filename | Notes | -+==============+==========================================================+=======+ -| system | :file:`{prefix}/lib/python{ver}/distutils/distutils.cfg` | \(1) | -+--------------+----------------------------------------------------------+-------+ -| personal | :file:`$HOME/.pydistutils.cfg` | \(2) | -+--------------+----------------------------------------------------------+-------+ -| local | :file:`setup.cfg` | \(3) | -+--------------+----------------------------------------------------------+-------+ - -And on Windows, the configuration files are: - -+--------------+-------------------------------------------------+-------+ -| Type of file | Location and filename | Notes | -+==============+=================================================+=======+ -| system | :file:`{prefix}\\Lib\\distutils\\distutils.cfg` | \(4) | -+--------------+-------------------------------------------------+-------+ -| personal | :file:`%HOME%\\pydistutils.cfg` | \(5) | -+--------------+-------------------------------------------------+-------+ -| local | :file:`setup.cfg` | \(3) | -+--------------+-------------------------------------------------+-------+ - -On all platforms, the "personal" file can be temporarily disabled by -passing the ``--no-user-cfg`` option. - -Notes: - -(1) - Strictly speaking, the system-wide configuration file lives in the directory - where the Distutils are installed; under Python 1.6 and later on Unix, this is - as shown. For Python 1.5.2, the Distutils will normally be installed to - :file:`{prefix}/lib/python1.5/site-packages/distutils`, so the system - configuration file should be put there under Python 1.5.2. - -(2) - On Unix, if the :envvar:`HOME` environment variable is not defined, the user's - home directory will be determined with the :func:`getpwuid` function from the - standard :mod:`pwd` module. This is done by the :func:`os.path.expanduser` - function used by Distutils. - -(3) - I.e., in the current directory (usually the location of the setup script). - -(4) - (See also note (1).) Under Python 1.6 and later, Python's default "installation - prefix" is :file:`C:\\Python`, so the system configuration file is normally - :file:`C:\\Python\\Lib\\distutils\\distutils.cfg`. Under Python 1.5.2, the - default prefix was :file:`C:\\Program Files\\Python`, and the Distutils were not - part of the standard library---so the system configuration file would be - :file:`C:\\Program Files\\Python\\distutils\\distutils.cfg` in a standard Python - 1.5.2 installation under Windows. - -(5) - On Windows, if the :envvar:`HOME` environment variable is not defined, - :envvar:`USERPROFILE` then :envvar:`HOMEDRIVE` and :envvar:`HOMEPATH` will - be tried. This is done by the :func:`os.path.expanduser` function used - by Distutils. - - -.. _inst-config-syntax: - -Syntax of config files ----------------------- - -The Distutils configuration files all have the same syntax. The config files -are grouped into sections. There is one section for each Distutils command, -plus a ``global`` section for global options that affect every command. Each -section consists of one option per line, specified as ``option=value``. - -For example, the following is a complete config file that just forces all -commands to run quietly by default: - -.. code-block:: ini - - [global] - verbose=0 - -If this is installed as the system config file, it will affect all processing of -any Python module distribution by any user on the current system. If it is -installed as your personal config file (on systems that support them), it will -affect only module distributions processed by you. And if it is used as the -:file:`setup.cfg` for a particular module distribution, it affects only that -distribution. - -You could override the default "build base" directory and make the -:command:`build\*` commands always forcibly rebuild all files with the -following: - -.. code-block:: ini - - [build] - build-base=blib - force=1 - -which corresponds to the command-line arguments :: - - python setup.py build --build-base=blib --force - -except that including the :command:`build` command on the command-line means -that command will be run. Including a particular command in config files has no -such implication; it only means that if the command is run, the options in the -config file will apply. (Or if other commands that derive values from it are -run, they will use the values in the config file.) - -You can find out the complete list of options for any command using the -:option:`!--help` option, e.g.:: - - python setup.py build --help - -and you can find out the complete list of global options by using -:option:`!--help` without a command:: - - python setup.py --help - -See also the "Reference" section of the "Distributing Python Modules" manual. - - -.. _inst-building-ext: - -Building Extensions: Tips and Tricks -==================================== - -Whenever possible, the Distutils try to use the configuration information made -available by the Python interpreter used to run the :file:`setup.py` script. -For example, the same compiler and linker flags used to compile Python will also -be used for compiling extensions. Usually this will work well, but in -complicated situations this might be inappropriate. This section discusses how -to override the usual Distutils behaviour. - - -.. _inst-tweak-flags: - -Tweaking compiler/linker flags ------------------------------- - -Compiling a Python extension written in C or C++ will sometimes require -specifying custom flags for the compiler and linker in order to use a particular -library or produce a special kind of object code. This is especially true if the -extension hasn't been tested on your platform, or if you're trying to -cross-compile Python. - -In the most general case, the extension author might have foreseen that -compiling the extensions would be complicated, and provided a :file:`Setup` file -for you to edit. This will likely only be done if the module distribution -contains many separate extension modules, or if they often require elaborate -sets of compiler flags in order to work. - -A :file:`Setup` file, if present, is parsed in order to get a list of extensions -to build. Each line in a :file:`Setup` describes a single module. Lines have -the following structure:: - - module ... [sourcefile ...] [cpparg ...] [library ...] - - -Let's examine each of the fields in turn. - -* *module* is the name of the extension module to be built, and should be a - valid Python identifier. You can't just change this in order to rename a module - (edits to the source code would also be needed), so this should be left alone. - -* *sourcefile* is anything that's likely to be a source code file, at least - judging by the filename. Filenames ending in :file:`.c` are assumed to be - written in C, filenames ending in :file:`.C`, :file:`.cc`, and :file:`.c++` are - assumed to be C++, and filenames ending in :file:`.m` or :file:`.mm` are assumed - to be in Objective C. - -* *cpparg* is an argument for the C preprocessor, and is anything starting with - :option:`!-I`, :option:`!-D`, :option:`!-U` or :option:`!-C`. - -* *library* is anything ending in :file:`.a` or beginning with :option:`!-l` or - :option:`!-L`. - -If a particular platform requires a special library on your platform, you can -add it by editing the :file:`Setup` file and running ``python setup.py build``. -For example, if the module defined by the line :: - - foo foomodule.c - -must be linked with the math library :file:`libm.a` on your platform, simply add -:option:`!-lm` to the line:: - - foo foomodule.c -lm - -Arbitrary switches intended for the compiler or the linker can be supplied with -the :option:`!-Xcompiler` *arg* and :option:`!-Xlinker` *arg* options:: - - foo foomodule.c -Xcompiler -o32 -Xlinker -shared -lm - -The next option after :option:`!-Xcompiler` and :option:`!-Xlinker` will be -appended to the proper command line, so in the above example the compiler will -be passed the :option:`!-o32` option, and the linker will be passed -:option:`!-shared`. If a compiler option requires an argument, you'll have to -supply multiple :option:`!-Xcompiler` options; for example, to pass ``-x c++`` -the :file:`Setup` file would have to contain ``-Xcompiler -x -Xcompiler c++``. - -Compiler flags can also be supplied through setting the :envvar:`CFLAGS` -environment variable. If set, the contents of :envvar:`CFLAGS` will be added to -the compiler flags specified in the :file:`Setup` file. - - -.. _inst-non-ms-compilers: - -Using non-Microsoft compilers on Windows ----------------------------------------- - -.. sectionauthor:: Rene Liebscher <R.Liebscher@gmx.de> - - - -Borland/CodeGear C++ -^^^^^^^^^^^^^^^^^^^^ - -This subsection describes the necessary steps to use Distutils with the Borland -C++ compiler version 5.5. First you have to know that Borland's object file -format (OMF) is different from the format used by the Python version you can -download from the Python or ActiveState web site. (Python is built with -Microsoft Visual C++, which uses COFF as the object file format.) For this -reason you have to convert Python's library :file:`python25.lib` into the -Borland format. You can do this as follows: - -.. Should we mention that users have to create cfg-files for the compiler? -.. see also http://community.borland.com/article/0,1410,21205,00.html - -:: - - coff2omf python25.lib python25_bcpp.lib - -The :file:`coff2omf` program comes with the Borland compiler. The file -:file:`python25.lib` is in the :file:`Libs` directory of your Python -installation. If your extension uses other libraries (zlib, ...) you have to -convert them too. - -The converted files have to reside in the same directories as the normal -libraries. - -How does Distutils manage to use these libraries with their changed names? If -the extension needs a library (eg. :file:`foo`) Distutils checks first if it -finds a library with suffix :file:`_bcpp` (eg. :file:`foo_bcpp.lib`) and then -uses this library. In the case it doesn't find such a special library it uses -the default name (:file:`foo.lib`.) [#]_ - -To let Distutils compile your extension with Borland C++ you now have to type:: - - python setup.py build --compiler=bcpp - -If you want to use the Borland C++ compiler as the default, you could specify -this in your personal or system-wide configuration file for Distutils (see -section :ref:`inst-config-files`.) - - -.. seealso:: - - `C++Builder Compiler <https://www.embarcadero.com/products>`_ - Information about the free C++ compiler from Borland, including links to the - download pages. - - `Creating Python Extensions Using Borland's Free Compiler <http://www.cyberus.ca/~g_will/pyExtenDL.shtml>`_ - Document describing how to use Borland's free command-line C++ compiler to build - Python. - - -GNU C / Cygwin / MinGW -^^^^^^^^^^^^^^^^^^^^^^ - -This section describes the necessary steps to use Distutils with the GNU C/C++ -compilers in their Cygwin and MinGW distributions. [#]_ For a Python interpreter -that was built with Cygwin, everything should work without any of these -following steps. - -Not all extensions can be built with MinGW or Cygwin, but many can. Extensions -most likely to not work are those that use C++ or depend on Microsoft Visual C -extensions. - -To let Distutils compile your extension with Cygwin you have to type:: - - python setup.py build --compiler=cygwin - -and for Cygwin in no-cygwin mode [#]_ or for MinGW type:: - - python setup.py build --compiler=mingw32 - -If you want to use any of these options/compilers as default, you should -consider writing it in your personal or system-wide configuration file for -Distutils (see section :ref:`inst-config-files`.) - -Older Versions of Python and MinGW -"""""""""""""""""""""""""""""""""" -The following instructions only apply if you're using a version of Python -inferior to 2.4.1 with a MinGW inferior to 3.0.0 (with -binutils-2.13.90-20030111-1). - -These compilers require some special libraries. This task is more complex than -for Borland's C++, because there is no program to convert the library. First -you have to create a list of symbols which the Python DLL exports. (You can find -a good program for this task at -https://sourceforge.net/projects/mingw/files/MinGW/Extension/pexports/). - -.. I don't understand what the next line means. --amk -.. (inclusive the references on data structures.) - -:: - - pexports python25.dll >python25.def - -The location of an installed :file:`python25.dll` will depend on the -installation options and the version and language of Windows. In a "just for -me" installation, it will appear in the root of the installation directory. In -a shared installation, it will be located in the system directory. - -Then you can create from these information an import library for gcc. :: - - /cygwin/bin/dlltool --dllname python25.dll --def python25.def --output-lib libpython25.a - -The resulting library has to be placed in the same directory as -:file:`python25.lib`. (Should be the :file:`libs` directory under your Python -installation directory.) - -If your extension uses other libraries (zlib,...) you might have to convert -them too. The converted files have to reside in the same directories as the -normal libraries do. - - -.. seealso:: - - `Building Python modules on MS Windows platform with MinGW <https://old.zope.dev/Members/als/tips/win32_mingw_modules>`_ - Information about building the required libraries for the MinGW environment. - - -.. rubric:: Footnotes - -.. [#] This also means you could replace all existing COFF-libraries with OMF-libraries - of the same name. - -.. [#] Check https://www.sourceware.org/cygwin/ for more information - -.. [#] Then you have no POSIX emulation available, but you also don't need - :file:`cygwin1.dll`. diff --git a/Doc/installing/index.rst b/Doc/installing/index.rst index e158bf1c4c0c7f5..a46c1caefe4d8ac 100644 --- a/Doc/installing/index.rst +++ b/Doc/installing/index.rst @@ -19,7 +19,9 @@ solutions to the common pool. This guide covers the installation part of the process. For a guide to creating and sharing your own Python projects, refer to the -:ref:`distribution guide <distributing-index>`. +`Python packaging user guide`_. + +.. _Python Packaging User Guide: https://packaging.python.org/en/latest/tutorials/packaging-projects/ .. note:: @@ -52,8 +54,7 @@ Key terms developers and documentation authors responsible for the maintenance and evolution of the standard packaging tools and the associated metadata and file format standards. They maintain a variety of tools, documentation, - and issue trackers on both `GitHub <https://github.com/pypa>`__ and - `Bitbucket <https://bitbucket.org/pypa/>`__. + and issue trackers on `GitHub <https://github.com/pypa>`__. * ``distutils`` is the original build and distribution system first added to the Python standard library in 1998. While direct use of ``distutils`` is being phased out, it still laid the foundation for the current packaging diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst deleted file mode 100644 index d85ad94e9b7fe47..000000000000000 --- a/Doc/library/2to3.rst +++ /dev/null @@ -1,489 +0,0 @@ -.. _2to3-reference: - -2to3 --- Automated Python 2 to 3 code translation -================================================= - -.. sectionauthor:: Benjamin Peterson <benjamin@python.org> - -2to3 is a Python program that reads Python 2.x source code and applies a series -of *fixers* to transform it into valid Python 3.x code. The standard library -contains a rich set of fixers that will handle almost all code. 2to3 supporting -library :mod:`lib2to3` is, however, a flexible and generic library, so it is -possible to write your own fixers for 2to3. - -.. deprecated-removed:: 3.11 3.13 - The ``lib2to3`` module was marked pending for deprecation in Python 3.9 - (raising :exc:`PendingDeprecationWarning` on import) and fully deprecated - in Python 3.11 (raising :exc:`DeprecationWarning`). The ``2to3`` tool is - part of that. It will be removed in Python 3.13. - -.. _2to3-using: - -Using 2to3 ----------- - -2to3 will usually be installed with the Python interpreter as a script. It is -also located in the :file:`Tools/scripts` directory of the Python root. - -2to3's basic arguments are a list of files or directories to transform. The -directories are recursively traversed for Python sources. - -Here is a sample Python 2.x source file, :file:`example.py`:: - - def greet(name): - print "Hello, {0}!".format(name) - print "What's your name?" - name = raw_input() - greet(name) - -It can be converted to Python 3.x code via 2to3 on the command line: - -.. code-block:: shell-session - - $ 2to3 example.py - -A diff against the original source file is printed. 2to3 can also write the -needed modifications right back to the source file. (A backup of the original -file is made unless :option:`!-n` is also given.) Writing the changes back is -enabled with the :option:`!-w` flag: - -.. code-block:: shell-session - - $ 2to3 -w example.py - -After transformation, :file:`example.py` looks like this:: - - def greet(name): - print("Hello, {0}!".format(name)) - print("What's your name?") - name = input() - greet(name) - -Comments and exact indentation are preserved throughout the translation process. - -By default, 2to3 runs a set of :ref:`predefined fixers <2to3-fixers>`. The -:option:`!-l` flag lists all available fixers. An explicit set of fixers to run -can be given with :option:`!-f`. Likewise the :option:`!-x` explicitly disables a -fixer. The following example runs only the ``imports`` and ``has_key`` fixers: - -.. code-block:: shell-session - - $ 2to3 -f imports -f has_key example.py - -This command runs every fixer except the ``apply`` fixer: - -.. code-block:: shell-session - - $ 2to3 -x apply example.py - -Some fixers are *explicit*, meaning they aren't run by default and must be -listed on the command line to be run. Here, in addition to the default fixers, -the ``idioms`` fixer is run: - -.. code-block:: shell-session - - $ 2to3 -f all -f idioms example.py - -Notice how passing ``all`` enables all default fixers. - -Sometimes 2to3 will find a place in your source code that needs to be changed, -but 2to3 cannot fix automatically. In this case, 2to3 will print a warning -beneath the diff for a file. You should address the warning in order to have -compliant 3.x code. - -2to3 can also refactor doctests. To enable this mode, use the :option:`!-d` -flag. Note that *only* doctests will be refactored. This also doesn't require -the module to be valid Python. For example, doctest like examples in a reST -document could also be refactored with this option. - -The :option:`!-v` option enables output of more information on the translation -process. - -Since some print statements can be parsed as function calls or statements, 2to3 -cannot always read files containing the print function. When 2to3 detects the -presence of the ``from __future__ import print_function`` compiler directive, it -modifies its internal grammar to interpret :func:`print` as a function. This -change can also be enabled manually with the :option:`!-p` flag. Use -:option:`!-p` to run fixers on code that already has had its print statements -converted. Also :option:`!-e` can be used to make :func:`exec` a function. - -The :option:`!-o` or :option:`!--output-dir` option allows specification of an -alternate directory for processed output files to be written to. The -:option:`!-n` flag is required when using this as backup files do not make sense -when not overwriting the input files. - -.. versionadded:: 3.2.3 - The :option:`!-o` option was added. - -The :option:`!-W` or :option:`!--write-unchanged-files` flag tells 2to3 to always -write output files even if no changes were required to the file. This is most -useful with :option:`!-o` so that an entire Python source tree is copied with -translation from one directory to another. -This option implies the :option:`!-w` flag as it would not make sense otherwise. - -.. versionadded:: 3.2.3 - The :option:`!-W` flag was added. - -The :option:`!--add-suffix` option specifies a string to append to all output -filenames. The :option:`!-n` flag is required when specifying this as backups -are not necessary when writing to different filenames. Example: - -.. code-block:: shell-session - - $ 2to3 -n -W --add-suffix=3 example.py - -Will cause a converted file named ``example.py3`` to be written. - -.. versionadded:: 3.2.3 - The :option:`!--add-suffix` option was added. - -To translate an entire project from one directory tree to another use: - -.. code-block:: shell-session - - $ 2to3 --output-dir=python3-version/mycode -W -n python2-version/mycode - - -.. _2to3-fixers: - -Fixers ------- - -Each step of transforming code is encapsulated in a fixer. The command ``2to3 --l`` lists them. As :ref:`documented above <2to3-using>`, each can be turned on -and off individually. They are described here in more detail. - - -.. 2to3fixer:: apply - - Removes usage of :func:`apply`. For example ``apply(function, *args, - **kwargs)`` is converted to ``function(*args, **kwargs)``. - -.. 2to3fixer:: asserts - - Replaces deprecated :mod:`unittest` method names with the correct ones. - - ================================ ========================================== - From To - ================================ ========================================== - ``failUnlessEqual(a, b)`` :meth:`assertEqual(a, b) - <unittest.TestCase.assertEqual>` - ``assertEquals(a, b)`` :meth:`assertEqual(a, b) - <unittest.TestCase.assertEqual>` - ``failIfEqual(a, b)`` :meth:`assertNotEqual(a, b) - <unittest.TestCase.assertNotEqual>` - ``assertNotEquals(a, b)`` :meth:`assertNotEqual(a, b) - <unittest.TestCase.assertNotEqual>` - ``failUnless(a)`` :meth:`assertTrue(a) - <unittest.TestCase.assertTrue>` - ``assert_(a)`` :meth:`assertTrue(a) - <unittest.TestCase.assertTrue>` - ``failIf(a)`` :meth:`assertFalse(a) - <unittest.TestCase.assertFalse>` - ``failUnlessRaises(exc, cal)`` :meth:`assertRaises(exc, cal) - <unittest.TestCase.assertRaises>` - ``failUnlessAlmostEqual(a, b)`` :meth:`assertAlmostEqual(a, b) - <unittest.TestCase.assertAlmostEqual>` - ``assertAlmostEquals(a, b)`` :meth:`assertAlmostEqual(a, b) - <unittest.TestCase.assertAlmostEqual>` - ``failIfAlmostEqual(a, b)`` :meth:`assertNotAlmostEqual(a, b) - <unittest.TestCase.assertNotAlmostEqual>` - ``assertNotAlmostEquals(a, b)`` :meth:`assertNotAlmostEqual(a, b) - <unittest.TestCase.assertNotAlmostEqual>` - ================================ ========================================== - -.. 2to3fixer:: basestring - - Converts :class:`basestring` to :class:`str`. - -.. 2to3fixer:: buffer - - Converts :class:`buffer` to :class:`memoryview`. This fixer is optional - because the :class:`memoryview` API is similar but not exactly the same as - that of :class:`buffer`. - -.. 2to3fixer:: dict - - Fixes dictionary iteration methods. :meth:`dict.iteritems` is converted to - :meth:`dict.items`, :meth:`dict.iterkeys` to :meth:`dict.keys`, and - :meth:`dict.itervalues` to :meth:`dict.values`. Similarly, - :meth:`dict.viewitems`, :meth:`dict.viewkeys` and :meth:`dict.viewvalues` are - converted respectively to :meth:`dict.items`, :meth:`dict.keys` and - :meth:`dict.values`. It also wraps existing usages of :meth:`dict.items`, - :meth:`dict.keys`, and :meth:`dict.values` in a call to :class:`list`. - -.. 2to3fixer:: except - - Converts ``except X, T`` to ``except X as T``. - -.. 2to3fixer:: exec - - Converts the ``exec`` statement to the :func:`exec` function. - -.. 2to3fixer:: execfile - - Removes usage of :func:`execfile`. The argument to :func:`execfile` is - wrapped in calls to :func:`open`, :func:`compile`, and :func:`exec`. - -.. 2to3fixer:: exitfunc - - Changes assignment of :attr:`sys.exitfunc` to use of the :mod:`atexit` - module. - -.. 2to3fixer:: filter - - Wraps :func:`filter` usage in a :class:`list` call. - -.. 2to3fixer:: funcattrs - - Fixes function attributes that have been renamed. For example, - ``my_function.func_closure`` is converted to ``my_function.__closure__``. - -.. 2to3fixer:: future - - Removes ``from __future__ import new_feature`` statements. - -.. 2to3fixer:: getcwdu - - Renames :func:`os.getcwdu` to :func:`os.getcwd`. - -.. 2to3fixer:: has_key - - Changes ``dict.has_key(key)`` to ``key in dict``. - -.. 2to3fixer:: idioms - - This optional fixer performs several transformations that make Python code - more idiomatic. Type comparisons like ``type(x) is SomeClass`` and - ``type(x) == SomeClass`` are converted to ``isinstance(x, SomeClass)``. - ``while 1`` becomes ``while True``. This fixer also tries to make use of - :func:`sorted` in appropriate places. For example, this block :: - - L = list(some_iterable) - L.sort() - - is changed to :: - - L = sorted(some_iterable) - -.. 2to3fixer:: import - - Detects sibling imports and converts them to relative imports. - -.. 2to3fixer:: imports - - Handles module renames in the standard library. - -.. 2to3fixer:: imports2 - - Handles other modules renames in the standard library. It is separate from - the :2to3fixer:`imports` fixer only because of technical limitations. - -.. 2to3fixer:: input - - Converts ``input(prompt)`` to ``eval(input(prompt))``. - -.. 2to3fixer:: intern - - Converts :func:`intern` to :func:`sys.intern`. - -.. 2to3fixer:: isinstance - - Fixes duplicate types in the second argument of :func:`isinstance`. For - example, ``isinstance(x, (int, int))`` is converted to ``isinstance(x, - int)`` and ``isinstance(x, (int, float, int))`` is converted to - ``isinstance(x, (int, float))``. - -.. 2to3fixer:: itertools_imports - - Removes imports of :func:`itertools.ifilter`, :func:`itertools.izip`, and - :func:`itertools.imap`. Imports of :func:`itertools.ifilterfalse` are also - changed to :func:`itertools.filterfalse`. - -.. 2to3fixer:: itertools - - Changes usage of :func:`itertools.ifilter`, :func:`itertools.izip`, and - :func:`itertools.imap` to their built-in equivalents. - :func:`itertools.ifilterfalse` is changed to :func:`itertools.filterfalse`. - -.. 2to3fixer:: long - - Renames :class:`long` to :class:`int`. - -.. 2to3fixer:: map - - Wraps :func:`map` in a :class:`list` call. It also changes ``map(None, x)`` - to ``list(x)``. Using ``from future_builtins import map`` disables this - fixer. - -.. 2to3fixer:: metaclass - - Converts the old metaclass syntax (``__metaclass__ = Meta`` in the class - body) to the new (``class X(metaclass=Meta)``). - -.. 2to3fixer:: methodattrs - - Fixes old method attribute names. For example, ``meth.im_func`` is converted - to ``meth.__func__``. - -.. 2to3fixer:: ne - - Converts the old not-equal syntax, ``<>``, to ``!=``. - -.. 2to3fixer:: next - - Converts the use of iterator's :meth:`~iterator.next` methods to the - :func:`next` function. It also renames :meth:`next` methods to - :meth:`~iterator.__next__`. - -.. 2to3fixer:: nonzero - - Renames definitions of methods called :meth:`__nonzero__` - to :meth:`~object.__bool__`. - -.. 2to3fixer:: numliterals - - Converts octal literals into the new syntax. - -.. 2to3fixer:: operator - - Converts calls to various functions in the :mod:`operator` module to other, - but equivalent, function calls. When needed, the appropriate ``import`` - statements are added, e.g. ``import collections.abc``. The following mapping - are made: - - ================================== ============================================= - From To - ================================== ============================================= - ``operator.isCallable(obj)`` ``callable(obj)`` - ``operator.sequenceIncludes(obj)`` ``operator.contains(obj)`` - ``operator.isSequenceType(obj)`` ``isinstance(obj, collections.abc.Sequence)`` - ``operator.isMappingType(obj)`` ``isinstance(obj, collections.abc.Mapping)`` - ``operator.isNumberType(obj)`` ``isinstance(obj, numbers.Number)`` - ``operator.repeat(obj, n)`` ``operator.mul(obj, n)`` - ``operator.irepeat(obj, n)`` ``operator.imul(obj, n)`` - ================================== ============================================= - -.. 2to3fixer:: paren - - Add extra parenthesis where they are required in list comprehensions. For - example, ``[x for x in 1, 2]`` becomes ``[x for x in (1, 2)]``. - -.. 2to3fixer:: print - - Converts the ``print`` statement to the :func:`print` function. - -.. 2to3fixer:: raise - - Converts ``raise E, V`` to ``raise E(V)``, and ``raise E, V, T`` to ``raise - E(V).with_traceback(T)``. If ``E`` is a tuple, the translation will be - incorrect because substituting tuples for exceptions has been removed in 3.0. - -.. 2to3fixer:: raw_input - - Converts :func:`raw_input` to :func:`input`. - -.. 2to3fixer:: reduce - - Handles the move of :func:`reduce` to :func:`functools.reduce`. - -.. 2to3fixer:: reload - - Converts :func:`reload` to :func:`importlib.reload`. - -.. 2to3fixer:: renames - - Changes :data:`sys.maxint` to :data:`sys.maxsize`. - -.. 2to3fixer:: repr - - Replaces backtick repr with the :func:`repr` function. - -.. 2to3fixer:: set_literal - - Replaces use of the :class:`set` constructor with set literals. This fixer - is optional. - -.. 2to3fixer:: standarderror - - Renames :exc:`StandardError` to :exc:`Exception`. - -.. 2to3fixer:: sys_exc - - Changes the deprecated :data:`sys.exc_value`, :data:`sys.exc_type`, - :data:`sys.exc_traceback` to use :func:`sys.exc_info`. - -.. 2to3fixer:: throw - - Fixes the API change in generator's :meth:`throw` method. - -.. 2to3fixer:: tuple_params - - Removes implicit tuple parameter unpacking. This fixer inserts temporary - variables. - -.. 2to3fixer:: types - - Fixes code broken from the removal of some members in the :mod:`types` - module. - -.. 2to3fixer:: unicode - - Renames :class:`unicode` to :class:`str`. - -.. 2to3fixer:: urllib - - Handles the rename of :mod:`urllib` and :mod:`urllib2` to the :mod:`urllib` - package. - -.. 2to3fixer:: ws_comma - - Removes excess whitespace from comma separated items. This fixer is - optional. - -.. 2to3fixer:: xrange - - Renames :func:`xrange` to :func:`range` and wraps existing :func:`range` - calls with :class:`list`. - -.. 2to3fixer:: xreadlines - - Changes ``for x in file.xreadlines()`` to ``for x in file``. - -.. 2to3fixer:: zip - - Wraps :func:`zip` usage in a :class:`list` call. This is disabled when - ``from future_builtins import zip`` appears. - - -:mod:`lib2to3` --- 2to3's library ---------------------------------- - -.. module:: lib2to3 - :synopsis: The 2to3 library - -.. moduleauthor:: Guido van Rossum -.. moduleauthor:: Collin Winter -.. moduleauthor:: Benjamin Peterson <benjamin@python.org> - -**Source code:** :source:`Lib/lib2to3/` - --------------- - -.. deprecated-removed:: 3.11 3.13 - Python 3.9 switched to a PEG parser (see :pep:`617`) while lib2to3 is - using a less flexible LL(1) parser. Python 3.10 includes new language - syntax that is not parsable by lib2to3's LL(1) parser (see :pep:`634`). - The ``lib2to3`` module was marked pending for deprecation in Python 3.9 - (raising :exc:`PendingDeprecationWarning` on import) and fully deprecated - in Python 3.11 (raising :exc:`DeprecationWarning`). - It will be removed from the standard library in Python 3.13. - Consider third-party alternatives such as `LibCST`_ or `parso`_. - -.. note:: - - The :mod:`lib2to3` API should be considered unstable and may change - drastically in the future. - -.. _LibCST: https://libcst.readthedocs.io/ -.. _parso: https://parso.readthedocs.io/ diff --git a/Doc/library/__future__.rst b/Doc/library/__future__.rst index 8bd23daee73977b..d261e4a4f338a56 100644 --- a/Doc/library/__future__.rst +++ b/Doc/library/__future__.rst @@ -22,42 +22,48 @@ can be inspected programmatically via importing :mod:`__future__` and examining its contents. -Each statement in :file:`__future__.py` is of the form:: +.. _future-classes: - FeatureName = _Feature(OptionalRelease, MandatoryRelease, - CompilerFlag) +.. class:: _Feature + Each statement in :file:`__future__.py` is of the form:: -where, normally, *OptionalRelease* is less than *MandatoryRelease*, and both are -5-tuples of the same form as :data:`sys.version_info`:: + FeatureName = _Feature(OptionalRelease, MandatoryRelease, + CompilerFlag) - (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int - PY_MINOR_VERSION, # the 1; an int - PY_MICRO_VERSION, # the 0; an int - PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string - PY_RELEASE_SERIAL # the 3; an int - ) + where, normally, *OptionalRelease* is less than *MandatoryRelease*, and both are + 5-tuples of the same form as :data:`sys.version_info`:: -*OptionalRelease* records the first release in which the feature was accepted. + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) -In the case of a *MandatoryRelease* that has not yet occurred, -*MandatoryRelease* predicts the release in which the feature will become part of -the language. +.. method:: _Feature.getOptionalRelease() -Else *MandatoryRelease* records when the feature became part of the language; in -releases at or after that, modules no longer need a future statement to use the -feature in question, but may continue to use such imports. + *OptionalRelease* records the first release in which the feature was accepted. -*MandatoryRelease* may also be ``None``, meaning that a planned feature got -dropped. +.. method:: _Feature.getMandatoryRelease() -Instances of class :class:`_Feature` have two corresponding methods, -:meth:`getOptionalRelease` and :meth:`getMandatoryRelease`. + In the case of a *MandatoryRelease* that has not yet occurred, + *MandatoryRelease* predicts the release in which the feature will become part of + the language. -*CompilerFlag* is the (bitfield) flag that should be passed in the fourth -argument to the built-in function :func:`compile` to enable the feature in -dynamically compiled code. This flag is stored in the :attr:`compiler_flag` -attribute on :class:`_Feature` instances. + Else *MandatoryRelease* records when the feature became part of the language; in + releases at or after that, modules no longer need a future statement to use the + feature in question, but may continue to use such imports. + + *MandatoryRelease* may also be ``None``, meaning that a planned feature got + dropped or that it is not yet decided. + +.. attribute:: _Feature.compiler_flag + + *CompilerFlag* is the (bitfield) flag that should be passed in the fourth + argument to the built-in function :func:`compile` to enable the feature in + dynamically compiled code. This flag is stored in the :attr:`_Feature.compiler_flag` + attribute on :class:`_Feature` instances. No feature description will ever be deleted from :mod:`__future__`. Since its introduction in Python 2.1 the following features have found their way into the diff --git a/Doc/library/__main__.rst b/Doc/library/__main__.rst index d0a65e76b842375..c999253f781b107 100644 --- a/Doc/library/__main__.rst +++ b/Doc/library/__main__.rst @@ -54,45 +54,45 @@ The top-level code environment can be: * the scope of an interactive prompt:: - >>> __name__ - '__main__' + >>> __name__ + '__main__' * the Python module passed to the Python interpreter as a file argument: - .. code-block:: shell-session + .. code-block:: shell-session - $ python3 helloworld.py - Hello, world! + $ python helloworld.py + Hello, world! * the Python module or package passed to the Python interpreter with the :option:`-m` argument: - .. code-block:: shell-session + .. code-block:: shell-session - $ python3 -m tarfile - usage: tarfile.py [-h] [-v] (...) + $ python -m tarfile + usage: tarfile.py [-h] [-v] (...) * Python code read by the Python interpreter from standard input: - .. code-block:: shell-session + .. code-block:: shell-session - $ echo "import this" | python3 - The Zen of Python, by Tim Peters + $ echo "import this" | python + The Zen of Python, by Tim Peters - Beautiful is better than ugly. - Explicit is better than implicit. - ... + Beautiful is better than ugly. + Explicit is better than implicit. + ... * Python code passed to the Python interpreter with the :option:`-c` argument: - .. code-block:: shell-session + .. code-block:: shell-session - $ python3 -c "import this" - The Zen of Python, by Tim Peters + $ python -c "import this" + The Zen of Python, by Tim Peters - Beautiful is better than ugly. - Explicit is better than implicit. - ... + Beautiful is better than ugly. + Explicit is better than implicit. + ... In each of these situations, the top-level module's ``__name__`` is set to ``'__main__'``. @@ -102,9 +102,9 @@ top-level environment by checking its own ``__name__``, which allows a common idiom for conditionally executing code when the module is not initialized from an import statement:: - if __name__ == '__main__': - # Execute when the module is not initialized from an import statement. - ... + if __name__ == '__main__': + # Execute when the module is not initialized from an import statement. + ... .. seealso:: @@ -124,7 +124,7 @@ This is where using the ``if __name__ == '__main__'`` code block comes in handy. Code within this block won't run unless the module is executed in the top-level environment. -Putting as few statements as possible in the block below ``if __name___ == +Putting as few statements as possible in the block below ``if __name__ == '__main__'`` can improve code clarity and correctness. Most often, a function named ``main`` encapsulates the program's primary behavior:: @@ -178,7 +178,7 @@ that your function will return some value acceptable as an input to returned if your function does not have a return statement). By proactively following this convention ourselves, our module will have the -same behavior when run directly (i.e. ``python3 echo.py``) as it will have if +same behavior when run directly (i.e. ``python echo.py``) as it will have if we later package it as a console script entry-point in a pip-installable package. @@ -215,7 +215,7 @@ directly from the command line using the :option:`-m` flag. For example: .. code-block:: shell-session - $ python3 -m bandclass + $ python -m bandclass This command will cause ``__main__.py`` to run. How you utilize this mechanism will depend on the nature of the package you are writing, but in this @@ -227,7 +227,7 @@ students:: import sys from .student import search_students - student_name = sys.argv[2] if len(sys.argv) >= 2 else '' + student_name = sys.argv[1] if len(sys.argv) >= 2 else '' print(f'Found student: {search_students(student_name)}') Note that ``from .student import search_students`` is an example of a relative @@ -238,9 +238,9 @@ package. For more details, see :ref:`intra-package-references` in the Idiomatic Usage ^^^^^^^^^^^^^^^ -The contents of ``__main__.py`` typically isn't fenced with -``if __name__ == '__main__'`` blocks. Instead, those files are kept short, -functions to execute from other modules. Those other modules can then be +The content of ``__main__.py`` typically isn't fenced with an +``if __name__ == '__main__'`` block. Instead, those files are kept +short and import functions to execute from other modules. Those other modules can then be easily unit-tested and are properly reusable. If used, an ``if __name__ == '__main__'`` block will still work as expected @@ -259,7 +259,7 @@ one mentioned below are preferred. See :mod:`venv` for an example of a package with a minimal ``__main__.py`` in the standard library. It doesn't contain a ``if __name__ == '__main__'`` - block. You can invoke it with ``python3 -m venv [directory]``. + block. You can invoke it with ``python -m venv [directory]``. See :mod:`runpy` for more details on the :option:`-m` flag to the interpreter executable. @@ -320,7 +320,7 @@ Now, if we started our program, the result would look like this: .. code-block:: shell-session - $ python3 start.py + $ python start.py Define the variable `my_name`! The exit code of the program would be 1, indicating an error. Uncommenting the @@ -329,19 +329,19 @@ status code 0, indicating success: .. code-block:: shell-session - $ python3 start.py + $ python start.py Dinsdale found in file /path/to/start.py Note that importing ``__main__`` doesn't cause any issues with unintentionally running top-level code meant for script use which is put in the ``if __name__ == "__main__"`` block of the ``start`` module. Why does this work? -Python inserts an empty ``__main__`` module in :attr:`sys.modules` at +Python inserts an empty ``__main__`` module in :data:`sys.modules` at interpreter startup, and populates it by running top-level code. In our example this is the ``start`` module which runs line by line and imports ``namely``. In turn, ``namely`` imports ``__main__`` (which is really ``start``). That's an import cycle! Fortunately, since the partially populated ``__main__`` -module is present in :attr:`sys.modules`, Python passes that to ``namely``. +module is present in :data:`sys.modules`, Python passes that to ``namely``. See :ref:`Special considerations for __main__ <import-dunder-main>` in the import system's reference for details on how this works. diff --git a/Doc/library/_thread.rst b/Doc/library/_thread.rst index 9df9e7914e093b1..297f50a46e06928 100644 --- a/Doc/library/_thread.rst +++ b/Doc/library/_thread.rst @@ -57,6 +57,8 @@ This module defines the following constants and functions: When the function raises a :exc:`SystemExit` exception, it is silently ignored. + .. audit-event:: _thread.start_new_thread function,args,kwargs start_new_thread + .. versionchanged:: 3.8 :func:`sys.unraisablehook` is now used to handle unhandled exceptions. @@ -68,10 +70,10 @@ This module defines the following constants and functions: there is no guarantee that the interruption will happen immediately. If given, *signum* is the number of the signal to simulate. - If *signum* is not given, :data:`signal.SIGINT` is simulated. + If *signum* is not given, :const:`signal.SIGINT` is simulated. If the given signal isn't handled by Python (it was set to - :data:`signal.SIG_DFL` or :data:`signal.SIG_IGN`), this function does + :const:`signal.SIG_DFL` or :const:`signal.SIG_IGN`), this function does nothing. .. versionchanged:: 3.10 @@ -118,10 +120,13 @@ This module defines the following constants and functions: Its value may be used to uniquely identify this particular thread system-wide (until the thread terminates, after which the value may be recycled by the OS). - .. availability:: Windows, FreeBSD, Linux, macOS, OpenBSD, NetBSD, AIX, DragonFlyBSD. + .. availability:: Windows, FreeBSD, Linux, macOS, OpenBSD, NetBSD, AIX, DragonFlyBSD, GNU/kFreeBSD. .. versionadded:: 3.8 + .. versionchanged:: 3.13 + Added support for GNU/kFreeBSD. + .. function:: stack_size([size]) @@ -148,8 +153,8 @@ This module defines the following constants and functions: .. data:: TIMEOUT_MAX The maximum value allowed for the *timeout* parameter of - :meth:`Lock.acquire`. Specifying a timeout greater than this value will - raise an :exc:`OverflowError`. + :meth:`Lock.acquire <threading.Lock.acquire>`. Specifying a timeout greater + than this value will raise an :exc:`OverflowError`. .. versionadded:: 3.2 @@ -206,7 +211,7 @@ In addition to these methods, lock objects can also be used via the **Caveats:** - .. index:: module: signal +.. index:: pair: module; signal * Threads interact strangely with interrupts: the :exc:`KeyboardInterrupt` exception will be received by an arbitrary thread. (When the :mod:`signal` @@ -215,8 +220,9 @@ In addition to these methods, lock objects can also be used via the * Calling :func:`sys.exit` or raising the :exc:`SystemExit` exception is equivalent to calling :func:`_thread.exit`. -* It is not possible to interrupt the :meth:`acquire` method on a lock --- the - :exc:`KeyboardInterrupt` exception will happen after the lock has been acquired. +* It is not possible to interrupt the :meth:`~threading.Lock.acquire` method on + a lock --- the :exc:`KeyboardInterrupt` exception will happen after the lock + has been acquired. * When the main thread exits, it is system defined whether the other threads survive. On most systems, they are killed without executing diff --git a/Doc/library/abc.rst b/Doc/library/abc.rst index 3b74622e7ff46c8..fb4f9da169c5abf 100644 --- a/Doc/library/abc.rst +++ b/Doc/library/abc.rst @@ -21,7 +21,7 @@ The :mod:`collections` module has some concrete classes that derive from ABCs; these can, of course, be further derived. In addition, the :mod:`collections.abc` submodule has some ABCs that can be used to test whether a class or instance provides a particular interface, for example, if it is -hashable or if it is a mapping. +:term:`hashable` or if it is a mapping. This module provides the metaclass :class:`ABCMeta` for defining ABCs and @@ -154,7 +154,7 @@ a helper class :class:`ABC` to alternatively define ABCs through inheritance: Finally, the last line makes ``Foo`` a virtual subclass of ``MyIterable``, even though it does not define an :meth:`~iterator.__iter__` method (it uses the old-style iterable protocol, defined in terms of :meth:`__len__` and - :meth:`__getitem__`). Note that this will not make ``get_iterator`` + :meth:`~object.__getitem__`). Note that this will not make ``get_iterator`` available as a method of ``Foo``, so it is provided separately. diff --git a/Doc/library/aifc.rst b/Doc/library/aifc.rst deleted file mode 100644 index 9f20a30193fa70d..000000000000000 --- a/Doc/library/aifc.rst +++ /dev/null @@ -1,247 +0,0 @@ -:mod:`aifc` --- Read and write AIFF and AIFC files -================================================== - -.. module:: aifc - :synopsis: Read and write audio files in AIFF or AIFC format. - :deprecated: - -**Source code:** :source:`Lib/aifc.py` - -.. index:: - single: Audio Interchange File Format - single: AIFF - single: AIFF-C - - -.. deprecated-removed:: 3.11 3.13 - The :mod:`aifc` module is deprecated - (see :pep:`PEP 594 <594#aifc>` for details). - --------------- - -This module provides support for reading and writing AIFF and AIFF-C files. -AIFF is Audio Interchange File Format, a format for storing digital audio -samples in a file. AIFF-C is a newer version of the format that includes the -ability to compress the audio data. - -Audio files have a number of parameters that describe the audio data. The -sampling rate or frame rate is the number of times per second the sound is -sampled. The number of channels indicate if the audio is mono, stereo, or -quadro. Each frame consists of one sample per channel. The sample size is the -size in bytes of each sample. Thus a frame consists of -``nchannels * samplesize`` bytes, and a second's worth of audio consists of -``nchannels * samplesize * framerate`` bytes. - -For example, CD quality audio has a sample size of two bytes (16 bits), uses two -channels (stereo) and has a frame rate of 44,100 frames/second. This gives a -frame size of 4 bytes (2\*2), and a second's worth occupies 2\*2\*44100 bytes -(176,400 bytes). - -Module :mod:`aifc` defines the following function: - - -.. function:: open(file, mode=None) - - Open an AIFF or AIFF-C file and return an object instance with methods that are - described below. The argument *file* is either a string naming a file or a - :term:`file object`. *mode* must be ``'r'`` or ``'rb'`` when the file must be - opened for reading, or ``'w'`` or ``'wb'`` when the file must be opened for writing. - If omitted, ``file.mode`` is used if it exists, otherwise ``'rb'`` is used. When - used for writing, the file object should be seekable, unless you know ahead of - time how many samples you are going to write in total and use - :meth:`writeframesraw` and :meth:`setnframes`. - The :func:`.open` function may be used in a :keyword:`with` statement. When - the :keyword:`!with` block completes, the :meth:`~aifc.close` method is called. - - .. versionchanged:: 3.4 - Support for the :keyword:`with` statement was added. - -Objects returned by :func:`.open` when a file is opened for reading have the -following methods: - - -.. method:: aifc.getnchannels() - - Return the number of audio channels (1 for mono, 2 for stereo). - - -.. method:: aifc.getsampwidth() - - Return the size in bytes of individual samples. - - -.. method:: aifc.getframerate() - - Return the sampling rate (number of audio frames per second). - - -.. method:: aifc.getnframes() - - Return the number of audio frames in the file. - - -.. method:: aifc.getcomptype() - - Return a bytes array of length 4 describing the type of compression - used in the audio file. For AIFF files, the returned value is - ``b'NONE'``. - - -.. method:: aifc.getcompname() - - Return a bytes array convertible to a human-readable description - of the type of compression used in the audio file. For AIFF files, - the returned value is ``b'not compressed'``. - - -.. method:: aifc.getparams() - - Returns a :func:`~collections.namedtuple` ``(nchannels, sampwidth, - framerate, nframes, comptype, compname)``, equivalent to output of the - :meth:`get\*` methods. - - -.. method:: aifc.getmarkers() - - Return a list of markers in the audio file. A marker consists of a tuple of - three elements. The first is the mark ID (an integer), the second is the mark - position in frames from the beginning of the data (an integer), the third is the - name of the mark (a string). - - -.. method:: aifc.getmark(id) - - Return the tuple as described in :meth:`getmarkers` for the mark with the given - *id*. - - -.. method:: aifc.readframes(nframes) - - Read and return the next *nframes* frames from the audio file. The returned - data is a string containing for each frame the uncompressed samples of all - channels. - - -.. method:: aifc.rewind() - - Rewind the read pointer. The next :meth:`readframes` will start from the - beginning. - - -.. method:: aifc.setpos(pos) - - Seek to the specified frame number. - - -.. method:: aifc.tell() - - Return the current frame number. - - -.. method:: aifc.close() - - Close the AIFF file. After calling this method, the object can no longer be - used. - -Objects returned by :func:`.open` when a file is opened for writing have all the -above methods, except for :meth:`readframes` and :meth:`setpos`. In addition -the following methods exist. The :meth:`get\*` methods can only be called after -the corresponding :meth:`set\*` methods have been called. Before the first -:meth:`writeframes` or :meth:`writeframesraw`, all parameters except for the -number of frames must be filled in. - - -.. method:: aifc.aiff() - - Create an AIFF file. The default is that an AIFF-C file is created, unless the - name of the file ends in ``'.aiff'`` in which case the default is an AIFF file. - - -.. method:: aifc.aifc() - - Create an AIFF-C file. The default is that an AIFF-C file is created, unless - the name of the file ends in ``'.aiff'`` in which case the default is an AIFF - file. - - -.. method:: aifc.setnchannels(nchannels) - - Specify the number of channels in the audio file. - - -.. method:: aifc.setsampwidth(width) - - Specify the size in bytes of audio samples. - - -.. method:: aifc.setframerate(rate) - - Specify the sampling frequency in frames per second. - - -.. method:: aifc.setnframes(nframes) - - Specify the number of frames that are to be written to the audio file. If this - parameter is not set, or not set correctly, the file needs to support seeking. - - -.. method:: aifc.setcomptype(type, name) - - .. index:: - single: u-LAW - single: A-LAW - single: G.722 - - Specify the compression type. If not specified, the audio data will - not be compressed. In AIFF files, compression is not possible. - The name parameter should be a human-readable description of the - compression type as a bytes array, the type parameter should be a - bytes array of length 4. Currently the following compression types - are supported: ``b'NONE'``, ``b'ULAW'``, ``b'ALAW'``, ``b'G722'``. - - -.. method:: aifc.setparams(nchannels, sampwidth, framerate, comptype, compname) - - Set all the above parameters at once. The argument is a tuple consisting of the - various parameters. This means that it is possible to use the result of a - :meth:`getparams` call as argument to :meth:`setparams`. - - -.. method:: aifc.setmark(id, pos, name) - - Add a mark with the given id (larger than 0), and the given name at the given - position. This method can be called at any time before :meth:`close`. - - -.. method:: aifc.tell() - :noindex: - - Return the current write position in the output file. Useful in combination - with :meth:`setmark`. - - -.. method:: aifc.writeframes(data) - - Write data to the output file. This method can only be called after the audio - file parameters have been set. - - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. - - -.. method:: aifc.writeframesraw(data) - - Like :meth:`writeframes`, except that the header of the audio file is not - updated. - - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. - - -.. method:: aifc.close() - :noindex: - - Close the AIFF file. The header of the file is updated to reflect the actual - size of the audio data. After calling this method, the object can no longer be - used. - diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst index c55d94421e5b14e..fbffa71d2007359 100644 --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -31,12 +31,12 @@ Core Functionality The :mod:`argparse` module's support for command-line interfaces is built around an instance of :class:`argparse.ArgumentParser`. It is a container for -argument specifications and has options that apply the parser as whole:: +argument specifications and has options that apply to the parser as whole:: parser = argparse.ArgumentParser( - prog = 'ProgramName', - description = 'What the program does', - epilog = 'Text at the bottom of help') + prog='ProgramName', + description='What the program does', + epilog='Text at the bottom of help') The :meth:`ArgumentParser.add_argument` method attaches individual argument specifications to the parser. It supports positional arguments, options that @@ -57,20 +57,20 @@ the extracted data in a :class:`argparse.Namespace` object:: Quick Links for add_argument() ------------------------------ -====================== =========================================================== ========================================================================================================================== -Name Description Values -====================== =========================================================== ========================================================================================================================== -action_ Specify how an argument should be handled ``'store'``, ``'store_const'``, ``'store_true'``, ``'append'``, ``'append_const'``, ``'count'``, ``'help'``, ``'version'`` -choices_ Limit values to a specific set of choices ``['foo', 'bar']``, ``range(1, 10)``, or :class:`~collections.abc.Container` instance -const_ Store a constant value -default_ Default value used when an argument is not provided Defaults to ``None`` -dest_ Specify the attribute name used in the result namespace -help_ Help message for an argument -metavar_ Alternate display name for the argument as shown in help -nargs_ Number of times the argument can be used :class:`int`, ``'?'``, ``'*'``, ``'+'``, or ``argparse.REMAINDER`` -required_ Indicate whether an argument is required or optional ``True`` or ``False`` -type_ Automatically convert an argument to the given type :class:`int`, :class:`float`, ``argparse.FileType('w')``, or callable function -====================== =========================================================== ========================================================================================================================== +============================ =========================================================== ========================================================================================================================== +Name Description Values +============================ =========================================================== ========================================================================================================================== +action_ Specify how an argument should be handled ``'store'``, ``'store_const'``, ``'store_true'``, ``'append'``, ``'append_const'``, ``'count'``, ``'help'``, ``'version'`` +choices_ Limit values to a specific set of choices ``['foo', 'bar']``, ``range(1, 10)``, or :class:`~collections.abc.Container` instance +const_ Store a constant value +default_ Default value used when an argument is not provided Defaults to ``None`` +dest_ Specify the attribute name used in the result namespace +help_ Help message for an argument +metavar_ Alternate display name for the argument as shown in help +nargs_ Number of times the argument can be used :class:`int`, ``'?'``, ``'*'``, or ``'+'`` +required_ Indicate whether an argument is required or optional ``True`` or ``False`` +:ref:`type <argparse-type>` Automatically convert an argument to the given type :class:`int`, :class:`float`, ``argparse.FileType('w')``, or callable function +============================ =========================================================== ========================================================================================================================== Example @@ -565,6 +565,7 @@ arguments they contain. For example:: >>> with open('args.txt', 'w', encoding=sys.getfilesystemencoding()) as fp: ... fp.write('-f\nbar') + ... >>> parser = argparse.ArgumentParser(fromfile_prefix_chars='@') >>> parser.add_argument('-f') >>> parser.parse_args(['-f', 'foo', '@args.txt']) @@ -584,7 +585,7 @@ arguments will never be treated as file references. .. versionchanged:: 3.12 :class:`ArgumentParser` changed encoding and errors to read arguments files - from default (e.g. :func:`locale.getpreferredencoding(False)` and + from default (e.g. :func:`locale.getpreferredencoding(False) <locale.getpreferredencoding>` and ``"strict"``) to :term:`filesystem encoding and error handler`. Arguments file should be encoded in UTF-8 instead of ANSI Codepage on Windows. @@ -764,7 +765,7 @@ The add_argument() method * type_ - The type to which the command-line argument should be converted. - * choices_ - A container of the allowable values for the argument. + * choices_ - A sequence of the allowable values for the argument. * required_ - Whether or not the command-line option may be omitted (optionals only). @@ -1131,7 +1132,7 @@ command-line argument was not present:: Namespace(foo='1') -.. _type: +.. _argparse-type: type ^^^^ @@ -1190,7 +1191,7 @@ done downstream after the arguments are parsed. For example, JSON or YAML conversions have complex error cases that require better reporting than can be given by the ``type`` keyword. A :exc:`~json.JSONDecodeError` would not be well formatted and a -:exc:`FileNotFound` exception would not be handled at all. +:exc:`FileNotFoundError` exception would not be handled at all. Even :class:`~argparse.FileType` has its limitations for use with the ``type`` keyword. If one argument uses *FileType* and then a subsequent argument fails, @@ -1208,7 +1209,7 @@ choices ^^^^^^^ Some command-line arguments should be selected from a restricted set of values. -These can be handled by passing a container object as the *choices* keyword +These can be handled by passing a sequence object as the *choices* keyword argument to :meth:`~ArgumentParser.add_argument`. When the command line is parsed, argument values will be checked, and an error message will be displayed if the argument was not one of the acceptable values:: @@ -1222,9 +1223,9 @@ if the argument was not one of the acceptable values:: game.py: error: argument move: invalid choice: 'fire' (choose from 'rock', 'paper', 'scissors') -Note that inclusion in the *choices* container is checked after any type_ +Note that inclusion in the *choices* sequence is checked after any type_ conversions have been performed, so the type of the objects in the *choices* -container should match the type_ specified:: +sequence should match the type_ specified:: >>> parser = argparse.ArgumentParser(prog='doors.py') >>> parser.add_argument('door', type=int, choices=range(1, 4)) @@ -1234,8 +1235,8 @@ container should match the type_ specified:: usage: doors.py [-h] {1,2,3} doors.py: error: argument door: invalid choice: 4 (choose from 1, 2, 3) -Any container can be passed as the *choices* value, so :class:`list` objects, -:class:`set` objects, and custom containers are all supported. +Any sequence can be passed as the *choices* value, so :class:`list` objects, +:class:`tuple` objects, and custom sequences are all supported. Use of :class:`enum.Enum` is not recommended because it is difficult to control its appearance in usage, help, and error messages. @@ -1444,7 +1445,7 @@ Action classes Action classes implement the Action API, a callable which returns a callable which processes arguments from the command-line. Any object which follows this API may be passed as the ``action`` parameter to -:meth:`add_argument`. +:meth:`~ArgumentParser.add_argument`. .. class:: Action(option_strings, dest, nargs=None, const=None, default=None, \ type=None, choices=None, required=False, help=None, \ @@ -1722,7 +1723,7 @@ Sub-commands :class:`ArgumentParser` supports the creation of such sub-commands with the :meth:`add_subparsers` method. The :meth:`add_subparsers` method is normally called with no arguments and returns a special action object. This object - has a single method, :meth:`~ArgumentParser.add_parser`, which takes a + has a single method, :meth:`~_SubParsersAction.add_parser`, which takes a command name and any :class:`ArgumentParser` constructor arguments, and returns an :class:`ArgumentParser` object that can be modified as usual. @@ -1788,7 +1789,7 @@ Sub-commands for that particular parser will be printed. The help message will not include parent parser or sibling parser messages. (A help message for each subparser command, however, can be given by supplying the ``help=`` argument - to :meth:`add_parser` as above.) + to :meth:`~_SubParsersAction.add_parser` as above.) :: @@ -1866,7 +1867,7 @@ Sub-commands ... >>> # create the top-level parser >>> parser = argparse.ArgumentParser() - >>> subparsers = parser.add_subparsers() + >>> subparsers = parser.add_subparsers(required=True) >>> >>> # create the parser for the "foo" command >>> parser_foo = subparsers.add_parser('foo') @@ -1945,7 +1946,7 @@ Argument groups .. method:: ArgumentParser.add_argument_group(title=None, description=None) By default, :class:`ArgumentParser` groups command-line arguments into - "positional arguments" and "optional arguments" when displaying help + "positional arguments" and "options" when displaying help messages. When there is a better conceptual grouping of arguments than this default one, appropriate groups can be created using the :meth:`add_argument_group` method:: @@ -2156,7 +2157,7 @@ the populated namespace and the list of remaining argument strings. .. warning:: :ref:`Prefix matching <prefix-matching>` rules apply to - :meth:`parse_known_args`. The parser may consume an option even if it's just + :meth:`~ArgumentParser.parse_known_args`. The parser may consume an option even if it's just a prefix of one of its known options, instead of leaving it in the remaining arguments list. @@ -2217,7 +2218,7 @@ support this parsing style. These parsers do not support all the argparse features, and will raise exceptions if unsupported features are used. In particular, subparsers, -``argparse.REMAINDER``, and mutually exclusive groups that include both +and mutually exclusive groups that include both optionals and positionals are not supported. The following example shows the difference between @@ -2294,3 +2295,17 @@ A partial upgrade path from :mod:`optparse` to :mod:`argparse`: * Replace the OptionParser constructor ``version`` argument with a call to ``parser.add_argument('--version', action='version', version='<the version>')``. + +Exceptions +---------- + +.. exception:: ArgumentError + + An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + +.. exception:: ArgumentTypeError + + Raised when something goes wrong converting a command line string to a type. diff --git a/Doc/library/array.rst b/Doc/library/array.rst index 95f1eaf401b0526..ad622627724217a 100644 --- a/Doc/library/array.rst +++ b/Doc/library/array.rst @@ -24,6 +24,8 @@ defined: +-----------+--------------------+-------------------+-----------------------+-------+ | ``'u'`` | wchar_t | Unicode character | 2 | \(1) | +-----------+--------------------+-------------------+-----------------------+-------+ +| ``'w'`` | Py_UCS4 | Unicode character | 4 | | ++-----------+--------------------+-------------------+-----------------------+-------+ | ``'h'`` | signed short | int | 2 | | +-----------+--------------------+-------------------+-----------------------+-------+ | ``'H'`` | unsigned short | int | 2 | | @@ -51,16 +53,17 @@ Notes: It can be 16 bits or 32 bits depending on the platform. .. versionchanged:: 3.9 - ``array('u')`` now uses ``wchar_t`` as C type instead of deprecated + ``array('u')`` now uses :c:type:`wchar_t` as C type instead of deprecated ``Py_UNICODE``. This change doesn't affect its behavior because - ``Py_UNICODE`` is alias of ``wchar_t`` since Python 3.3. + ``Py_UNICODE`` is alias of :c:type:`wchar_t` since Python 3.3. - .. deprecated-removed:: 3.3 4.0 + .. deprecated-removed:: 3.3 3.16 + Please migrate to ``'w'`` typecode. The actual representation of values is determined by the machine architecture (strictly speaking, by the C implementation). The actual size can be accessed -through the :attr:`itemsize` attribute. +through the :attr:`array.itemsize` attribute. The module defines the following item: @@ -85,173 +88,173 @@ The module defines the following type: to add initial items to the array. Otherwise, the iterable initializer is passed to the :meth:`extend` method. - .. audit-event:: array.__new__ typecode,initializer array.array + Array objects support the ordinary sequence operations of indexing, slicing, + concatenation, and multiplication. When using slice assignment, the assigned + value must be an array object with the same type code; in all other cases, + :exc:`TypeError` is raised. Array objects also implement the buffer interface, + and may be used wherever :term:`bytes-like objects <bytes-like object>` are supported. + .. audit-event:: array.__new__ typecode,initializer array.array -Array objects support the ordinary sequence operations of indexing, slicing, -concatenation, and multiplication. When using slice assignment, the assigned -value must be an array object with the same type code; in all other cases, -:exc:`TypeError` is raised. Array objects also implement the buffer interface, -and may be used wherever :term:`bytes-like objects <bytes-like object>` are supported. -The following data items and methods are also supported: + .. attribute:: typecode -.. attribute:: array.typecode + The typecode character used to create the array. - The typecode character used to create the array. + .. attribute:: itemsize -.. attribute:: array.itemsize + The length in bytes of one array item in the internal representation. - The length in bytes of one array item in the internal representation. + .. method:: append(x) -.. method:: array.append(x) + Append a new item with value *x* to the end of the array. - Append a new item with value *x* to the end of the array. + .. method:: buffer_info() -.. method:: array.buffer_info() + Return a tuple ``(address, length)`` giving the current memory address and the + length in elements of the buffer used to hold array's contents. The size of the + memory buffer in bytes can be computed as ``array.buffer_info()[1] * + array.itemsize``. This is occasionally useful when working with low-level (and + inherently unsafe) I/O interfaces that require memory addresses, such as certain + :c:func:`!ioctl` operations. The returned numbers are valid as long as the array + exists and no length-changing operations are applied to it. - Return a tuple ``(address, length)`` giving the current memory address and the - length in elements of the buffer used to hold array's contents. The size of the - memory buffer in bytes can be computed as ``array.buffer_info()[1] * - array.itemsize``. This is occasionally useful when working with low-level (and - inherently unsafe) I/O interfaces that require memory addresses, such as certain - :c:func:`ioctl` operations. The returned numbers are valid as long as the array - exists and no length-changing operations are applied to it. + .. note:: - .. note:: + When using array objects from code written in C or C++ (the only way to + effectively make use of this information), it makes more sense to use the buffer + interface supported by array objects. This method is maintained for backward + compatibility and should be avoided in new code. The buffer interface is + documented in :ref:`bufferobjects`. - When using array objects from code written in C or C++ (the only way to - effectively make use of this information), it makes more sense to use the buffer - interface supported by array objects. This method is maintained for backward - compatibility and should be avoided in new code. The buffer interface is - documented in :ref:`bufferobjects`. + .. method:: byteswap() -.. method:: array.byteswap() + "Byteswap" all items of the array. This is only supported for values which are + 1, 2, 4, or 8 bytes in size; for other types of values, :exc:`RuntimeError` is + raised. It is useful when reading data from a file written on a machine with a + different byte order. - "Byteswap" all items of the array. This is only supported for values which are - 1, 2, 4, or 8 bytes in size; for other types of values, :exc:`RuntimeError` is - raised. It is useful when reading data from a file written on a machine with a - different byte order. + .. method:: count(x) -.. method:: array.count(x) + Return the number of occurrences of *x* in the array. - Return the number of occurrences of *x* in the array. + .. method:: extend(iterable) -.. method:: array.extend(iterable) + Append items from *iterable* to the end of the array. If *iterable* is another + array, it must have *exactly* the same type code; if not, :exc:`TypeError` will + be raised. If *iterable* is not an array, it must be iterable and its elements + must be the right type to be appended to the array. - Append items from *iterable* to the end of the array. If *iterable* is another - array, it must have *exactly* the same type code; if not, :exc:`TypeError` will - be raised. If *iterable* is not an array, it must be iterable and its elements - must be the right type to be appended to the array. + .. method:: frombytes(s) -.. method:: array.frombytes(s) + Appends items from the string, interpreting the string as an array of machine + values (as if it had been read from a file using the :meth:`fromfile` method). - Appends items from the string, interpreting the string as an array of machine - values (as if it had been read from a file using the :meth:`fromfile` method). + .. versionadded:: 3.2 + :meth:`!fromstring` is renamed to :meth:`frombytes` for clarity. - .. versionadded:: 3.2 - :meth:`fromstring` is renamed to :meth:`frombytes` for clarity. + .. method:: fromfile(f, n) -.. method:: array.fromfile(f, n) + Read *n* items (as machine values) from the :term:`file object` *f* and append + them to the end of the array. If less than *n* items are available, + :exc:`EOFError` is raised, but the items that were available are still + inserted into the array. - Read *n* items (as machine values) from the :term:`file object` *f* and append - them to the end of the array. If less than *n* items are available, - :exc:`EOFError` is raised, but the items that were available are still - inserted into the array. + .. method:: fromlist(list) -.. method:: array.fromlist(list) + Append items from the list. This is equivalent to ``for x in list: + a.append(x)`` except that if there is a type error, the array is unchanged. - Append items from the list. This is equivalent to ``for x in list: - a.append(x)`` except that if there is a type error, the array is unchanged. + .. method:: fromunicode(s) -.. method:: array.fromunicode(s) + Extends this array with data from the given unicode string. + The array must have type code ``'u'`` or ``'w'``; otherwise a :exc:`ValueError` is raised. + Use ``array.frombytes(unicodestring.encode(enc))`` to append Unicode data to an + array of some other type. - Extends this array with data from the given unicode string. The array must - be a type ``'u'`` array; otherwise a :exc:`ValueError` is raised. Use - ``array.frombytes(unicodestring.encode(enc))`` to append Unicode data to an - array of some other type. + .. method:: index(x[, start[, stop]]) -.. method:: array.index(x[, start[, stop]]) + Return the smallest *i* such that *i* is the index of the first occurrence of + *x* in the array. The optional arguments *start* and *stop* can be + specified to search for *x* within a subsection of the array. Raise + :exc:`ValueError` if *x* is not found. - Return the smallest *i* such that *i* is the index of the first occurrence of - *x* in the array. The optional arguments *start* and *stop* can be - specified to search for *x* within a subsection of the array. Raise - :exc:`ValueError` if *x* is not found. + .. versionchanged:: 3.10 + Added optional *start* and *stop* parameters. - .. versionchanged:: 3.10 - Added optional *start* and *stop* parameters. -.. method:: array.insert(i, x) + .. method:: insert(i, x) - Insert a new item with value *x* in the array before position *i*. Negative - values are treated as being relative to the end of the array. + Insert a new item with value *x* in the array before position *i*. Negative + values are treated as being relative to the end of the array. -.. method:: array.pop([i]) + .. method:: pop([i]) - Removes the item with the index *i* from the array and returns it. The optional - argument defaults to ``-1``, so that by default the last item is removed and - returned. + Removes the item with the index *i* from the array and returns it. The optional + argument defaults to ``-1``, so that by default the last item is removed and + returned. -.. method:: array.remove(x) + .. method:: remove(x) - Remove the first occurrence of *x* from the array. + Remove the first occurrence of *x* from the array. -.. method:: array.reverse() + .. method:: reverse() - Reverse the order of the items in the array. + Reverse the order of the items in the array. -.. method:: array.tobytes() + .. method:: tobytes() - Convert the array to an array of machine values and return the bytes - representation (the same sequence of bytes that would be written to a file by - the :meth:`tofile` method.) + Convert the array to an array of machine values and return the bytes + representation (the same sequence of bytes that would be written to a file by + the :meth:`tofile` method.) - .. versionadded:: 3.2 - :meth:`tostring` is renamed to :meth:`tobytes` for clarity. + .. versionadded:: 3.2 + :meth:`!tostring` is renamed to :meth:`tobytes` for clarity. -.. method:: array.tofile(f) + .. method:: tofile(f) - Write all items (as machine values) to the :term:`file object` *f*. + Write all items (as machine values) to the :term:`file object` *f*. -.. method:: array.tolist() + .. method:: tolist() - Convert the array to an ordinary list with the same items. + Convert the array to an ordinary list with the same items. -.. method:: array.tounicode() + .. method:: tounicode() - Convert the array to a unicode string. The array must be a type ``'u'`` array; - otherwise a :exc:`ValueError` is raised. Use ``array.tobytes().decode(enc)`` to - obtain a unicode string from an array of some other type. + Convert the array to a unicode string. The array must have a type ``'u'`` or ``'w'``; + otherwise a :exc:`ValueError` is raised. Use ``array.tobytes().decode(enc)`` to + obtain a unicode string from an array of some other type. When an array object is printed or converted to a string, it is represented as ``array(typecode, initializer)``. The *initializer* is omitted if the array is -empty, otherwise it is a string if the *typecode* is ``'u'``, otherwise it is a -list of numbers. The string is guaranteed to be able to be converted back to an +empty, otherwise it is a string if the *typecode* is ``'u'`` or ``'w'``, +otherwise it is a list of numbers. +The string is guaranteed to be able to be converted back to an array with the same type and value using :func:`eval`, so long as the :class:`~array.array` class has been imported using ``from array import array``. Examples:: array('l') - array('u', 'hello \u2641') + array('w', 'hello \u2641') array('l', [1, 2, 3, 4, 5]) array('d', [1.0, 2.0, 3.14]) @@ -261,10 +264,6 @@ Examples:: Module :mod:`struct` Packing and unpacking of heterogeneous binary data. - Module :mod:`xdrlib` - Packing and unpacking of External Data Representation (XDR) data as used in some - remote procedure call systems. - `NumPy <https://numpy.org/>`_ The NumPy package defines another array type. diff --git a/Doc/library/ast.rst b/Doc/library/ast.rst index 0811b3fa0e7842c..4ebbe0e5471c88c 100644 --- a/Doc/library/ast.rst +++ b/Doc/library/ast.rst @@ -146,6 +146,102 @@ Node classes Snakes <https://greentreesnakes.readthedocs.io/en/latest/>`__ project and all its contributors. + +.. _ast-root-nodes: + +Root nodes +^^^^^^^^^^ + +.. class:: Module(body, type_ignores) + + A Python module, as with :ref:`file input <file-input>`. + Node type generated by :func:`ast.parse` in the default ``"exec"`` *mode*. + + *body* is a :class:`list` of the module's :ref:`ast-statements`. + + *type_ignores* is a :class:`list` of the module's type ignore comments; + see :func:`ast.parse` for more details. + + .. doctest:: + + >>> print(ast.dump(ast.parse('x = 1'), indent=4)) + Module( + body=[ + Assign( + targets=[ + Name(id='x', ctx=Store())], + value=Constant(value=1))], + type_ignores=[]) + + +.. class:: Expression(body) + + A single Python :ref:`expression input <expression-input>`. + Node type generated by :func:`ast.parse` when *mode* is ``"eval"``. + + *body* is a single node, + one of the :ref:`expression types <ast-expressions>`. + + .. doctest:: + + >>> print(ast.dump(ast.parse('123', mode='eval'), indent=4)) + Expression( + body=Constant(value=123)) + + +.. class:: Interactive(body) + + A single :ref:`interactive input <interactive>`, like in :ref:`tut-interac`. + Node type generated by :func:`ast.parse` when *mode* is ``"single"``. + + *body* is a :class:`list` of :ref:`statement nodes <ast-statements>`. + + .. doctest:: + + >>> print(ast.dump(ast.parse('x = 1; y = 2', mode='single'), indent=4)) + Interactive( + body=[ + Assign( + targets=[ + Name(id='x', ctx=Store())], + value=Constant(value=1)), + Assign( + targets=[ + Name(id='y', ctx=Store())], + value=Constant(value=2))]) + + +.. class:: FunctionType(argtypes, returns) + + A representation of an old-style type comments for functions, + as Python versions prior to 3.5 didn't support :pep:`484` annotations. + Node type generated by :func:`ast.parse` when *mode* is ``"func_type"``. + + Such type comments would look like this:: + + def sum_two_number(a, b): + # type: (int, int) -> int + return a + b + + *argtypes* is a :class:`list` of :ref:`expression nodes <ast-expressions>`. + + *returns* is a single :ref:`expression node <ast-expressions>`. + + .. doctest:: + + >>> print(ast.dump(ast.parse('(int, str) -> List[int]', mode='func_type'), indent=4)) + FunctionType( + argtypes=[ + Name(id='int', ctx=Load()), + Name(id='str', ctx=Load())], + returns=Subscript( + value=Name(id='List', ctx=Load()), + slice=Name(id='int', ctx=Load()), + ctx=Load())) + + .. versionadded:: 3.8 + + Literals ^^^^^^^^ @@ -344,6 +440,8 @@ Variables type_ignores=[]) +.. _ast-expressions: + Expressions ^^^^^^^^^^^ @@ -481,17 +579,17 @@ Expressions Comparison operator tokens. -.. class:: Call(func, args, keywords, starargs, kwargs) +.. class:: Call(func, args, keywords) A function call. ``func`` is the function, which will often be a :class:`Name` or :class:`Attribute` object. Of the arguments: * ``args`` holds a list of the arguments passed by position. - * ``keywords`` holds a list of :class:`keyword` objects representing + * ``keywords`` holds a list of :class:`.keyword` objects representing arguments passed by keyword. When creating a ``Call`` node, ``args`` and ``keywords`` are required, but - they can be empty lists. ``starargs`` and ``kwargs`` are optional. + they can be empty lists. .. doctest:: @@ -552,10 +650,10 @@ Expressions .. class:: NamedExpr(target, value) - A named expression. This AST node is produced by the assignment expressions - operator (also known as the walrus operator). As opposed to the :class:`Assign` - node in which the first argument can be multiple nodes, in this case both - ``target`` and ``value`` must be single nodes. + A named expression. This AST node is produced by the assignment expressions + operator (also known as the walrus operator). As opposed to the :class:`Assign` + node in which the first argument can be multiple nodes, in this case both + ``target`` and ``value`` must be single nodes. .. doctest:: @@ -565,6 +663,7 @@ Expressions target=Name(id='x', ctx=Store()), value=Constant(value=4))) + .. versionadded:: 3.8 Subscripting ~~~~~~~~~~~~ @@ -735,6 +834,9 @@ Comprehensions ifs=[], is_async=1)])) + +.. _ast-statements: + Statements ^^^^^^^^^^ @@ -917,6 +1019,26 @@ Statements type_ignores=[]) +.. class:: TypeAlias(name, type_params, value) + + A :ref:`type alias <type-aliases>` created through the :keyword:`type` + statement. ``name`` is the name of the alias, ``type_params`` is a list of + :ref:`type parameters <ast-type-params>`, and ``value`` is the value of the + type alias. + + .. doctest:: + + >>> print(ast.dump(ast.parse('type Alias = int'), indent=4)) + Module( + body=[ + TypeAlias( + name=Name(id='Alias', ctx=Store()), + type_params=[], + value=Name(id='int', ctx=Load()))], + type_ignores=[]) + + .. versionadded:: 3.12 + Other statements which are only applicable inside functions or loops are described in other sections. @@ -1198,6 +1320,7 @@ Control flow finalbody=[])], type_ignores=[]) + .. versionadded:: 3.11 .. class:: ExceptHandler(type, name, body) @@ -1287,6 +1410,8 @@ Pattern matching that is being matched against the cases) and ``cases`` contains an iterable of :class:`match_case` nodes with the different cases. + .. versionadded:: 3.10 + .. class:: match_case(pattern, guard, body) A single case pattern in a ``match`` statement. ``pattern`` contains the @@ -1338,6 +1463,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchValue(value) A match literal or value pattern that compares by equality. ``value`` is @@ -1365,6 +1492,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchSingleton(value) A match literal pattern that compares by identity. ``value`` is the @@ -1390,6 +1519,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchSequence(patterns) A match sequence pattern. ``patterns`` contains the patterns to be matched @@ -1421,6 +1552,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchStar(name) Matches the rest of the sequence in a variable length match sequence pattern. @@ -1461,6 +1594,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchMapping(keys, patterns, rest) A match mapping pattern. ``keys`` is a sequence of expression nodes. @@ -1507,6 +1642,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchClass(cls, patterns, kwd_attrs, kwd_patterns) A match class pattern. ``cls`` is an expression giving the nominal class to @@ -1571,6 +1708,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchAs(pattern, name) A match "as-pattern", capture pattern or wildcard pattern. ``pattern`` @@ -1612,6 +1751,8 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + .. class:: MatchOr(patterns) A match "or-pattern". An or-pattern matches each of its subpatterns in turn @@ -1644,11 +1785,96 @@ Pattern matching value=Constant(value=Ellipsis))])])], type_ignores=[]) + .. versionadded:: 3.10 + +.. _ast-type-params: + +Type parameters +^^^^^^^^^^^^^^^ + +:ref:`Type parameters <type-params>` can exist on classes, functions, and type +aliases. + +.. class:: TypeVar(name, bound) + + A :class:`typing.TypeVar`. ``name`` is the name of the type variable. + ``bound`` is the bound or constraints, if any. If ``bound`` is a :class:`Tuple`, + it represents constraints; otherwise it represents the bound. + + .. doctest:: + + >>> print(ast.dump(ast.parse("type Alias[T: int] = list[T]"), indent=4)) + Module( + body=[ + TypeAlias( + name=Name(id='Alias', ctx=Store()), + type_params=[ + TypeVar( + name='T', + bound=Name(id='int', ctx=Load()))], + value=Subscript( + value=Name(id='list', ctx=Load()), + slice=Name(id='T', ctx=Load()), + ctx=Load()))], + type_ignores=[]) + + .. versionadded:: 3.12 + +.. class:: ParamSpec(name) + + A :class:`typing.ParamSpec`. ``name`` is the name of the parameter specification. + + .. doctest:: + + >>> print(ast.dump(ast.parse("type Alias[**P] = Callable[P, int]"), indent=4)) + Module( + body=[ + TypeAlias( + name=Name(id='Alias', ctx=Store()), + type_params=[ + ParamSpec(name='P')], + value=Subscript( + value=Name(id='Callable', ctx=Load()), + slice=Tuple( + elts=[ + Name(id='P', ctx=Load()), + Name(id='int', ctx=Load())], + ctx=Load()), + ctx=Load()))], + type_ignores=[]) + + .. versionadded:: 3.12 + +.. class:: TypeVarTuple(name) + + A :class:`typing.TypeVarTuple`. ``name`` is the name of the type variable tuple. + + .. doctest:: + + >>> print(ast.dump(ast.parse("type Alias[*Ts] = tuple[*Ts]"), indent=4)) + Module( + body=[ + TypeAlias( + name=Name(id='Alias', ctx=Store()), + type_params=[ + TypeVarTuple(name='Ts')], + value=Subscript( + value=Name(id='tuple', ctx=Load()), + slice=Tuple( + elts=[ + Starred( + value=Name(id='Ts', ctx=Load()), + ctx=Load())], + ctx=Load()), + ctx=Load()))], + type_ignores=[]) + + .. versionadded:: 3.12 Function and class definitions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. class:: FunctionDef(name, args, body, decorator_list, returns, type_comment) +.. class:: FunctionDef(name, args, body, decorator_list, returns, type_comment, type_params) A function definition. @@ -1658,11 +1884,15 @@ Function and class definitions * ``decorator_list`` is the list of decorators to be applied, stored outermost first (i.e. the first in the list will be applied last). * ``returns`` is the return annotation. + * ``type_params`` is a list of :ref:`type parameters <ast-type-params>`. .. attribute:: type_comment ``type_comment`` is an optional string with the type annotation as a comment. + .. versionchanged:: 3.12 + Added ``type_params``. + .. class:: Lambda(args, body) @@ -1748,7 +1978,8 @@ Function and class definitions decorator_list=[ Name(id='decorator1', ctx=Load()), Name(id='decorator2', ctx=Load())], - returns=Constant(value='return annotation'))], + returns=Constant(value='return annotation'), + type_params=[])], type_ignores=[]) @@ -1819,21 +2050,19 @@ Function and class definitions type_ignores=[]) -.. class:: ClassDef(name, bases, keywords, starargs, kwargs, body, decorator_list) +.. class:: ClassDef(name, bases, keywords, body, decorator_list, type_params) A class definition. * ``name`` is a raw string for the class name * ``bases`` is a list of nodes for explicitly specified base classes. - * ``keywords`` is a list of :class:`keyword` nodes, principally for 'metaclass'. + * ``keywords`` is a list of :class:`.keyword` nodes, principally for 'metaclass'. Other keywords will be passed to the metaclass, as per `PEP-3115 <https://peps.python.org/pep-3115/>`_. - * ``starargs`` and ``kwargs`` are each a single node, as in a function call. - starargs will be expanded to join the list of base classes, and kwargs will - be passed to the metaclass. * ``body`` is a list of nodes representing the code within the class definition. * ``decorator_list`` is a list of nodes, as in :class:`FunctionDef`. + * ``type_params`` is a list of :ref:`type parameters <ast-type-params>`. .. doctest:: @@ -1858,17 +2087,24 @@ Function and class definitions Pass()], decorator_list=[ Name(id='decorator1', ctx=Load()), - Name(id='decorator2', ctx=Load())])], + Name(id='decorator2', ctx=Load())], + type_params=[])], type_ignores=[]) + .. versionchanged:: 3.12 + Added ``type_params``. + Async and await ^^^^^^^^^^^^^^^ -.. class:: AsyncFunctionDef(name, args, body, decorator_list, returns, type_comment) +.. class:: AsyncFunctionDef(name, args, body, decorator_list, returns, type_comment, type_params) An ``async def`` function definition. Has the same fields as :class:`FunctionDef`. + .. versionchanged:: 3.12 + Added ``type_params``. + .. class:: Await(value) @@ -1898,7 +2134,8 @@ Async and await func=Name(id='other_func', ctx=Load()), args=[], keywords=[])))], - decorator_list=[])], + decorator_list=[], + type_params=[])], type_ignores=[]) @@ -1923,10 +2160,12 @@ Async and await Apart from the node classes, the :mod:`ast` module defines these utility functions and classes for traversing abstract syntax trees: -.. function:: parse(source, filename='<unknown>', mode='exec', *, type_comments=False, feature_version=None) +.. function:: parse(source, filename='<unknown>', mode='exec', *, type_comments=False, feature_version=None, optimize=-1) Parse the source into an AST node. Equivalent to ``compile(source, - filename, mode, ast.PyCF_ONLY_AST)``. + filename, mode, flags=FLAGS_VALUE, optimize=optimize)``, + where ``FLAGS_VALUE`` is ``ast.PyCF_ONLY_AST`` if ``optimize <= 0`` + and ``ast.PyCF_OPTIMIZED_AST`` otherwise. If ``type_comments=True`` is given, the parser is modified to check and return type comments as specified by :pep:`484` and :pep:`526`. @@ -1947,7 +2186,7 @@ and classes for traversing abstract syntax trees: Currently ``major`` must equal to ``3``. For example, setting ``feature_version=(3, 4)`` will allow the use of ``async`` and ``await`` as variable names. The lowest supported version is - ``(3, 4)``; the highest is ``sys.version_info[0:2]``. + ``(3, 7)``; the highest is ``sys.version_info[0:2]``. If source contains a null character ('\0'), :exc:`ValueError` is raised. @@ -1970,6 +2209,10 @@ and classes for traversing abstract syntax trees: .. versionchanged:: 3.8 Added ``type_comments``, ``mode='func_type'`` and ``feature_version``. + .. versionchanged:: 3.13 + The minimum supported version for feature_version is now (3,7) + The ``optimize`` argument was added. + .. function:: unparse(ast_obj) @@ -2240,26 +2483,26 @@ The following options are accepted: .. program:: ast -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message and exit. -.. cmdoption:: -m <mode> - --mode <mode> +.. option:: -m <mode> + --mode <mode> Specify what kind of code must be compiled, like the *mode* argument in :func:`parse`. -.. cmdoption:: --no-type-comments +.. option:: --no-type-comments Don't parse type comments. -.. cmdoption:: -a, --include-attributes +.. option:: -a, --include-attributes Include attributes such as line numbers and column offsets. -.. cmdoption:: -i <indent> - --indent <indent> +.. option:: -i <indent> + --indent <indent> Indentation of nodes in AST (number of spaces). diff --git a/Doc/library/asynchat.rst b/Doc/library/asynchat.rst deleted file mode 100644 index 32e04ad6d195333..000000000000000 --- a/Doc/library/asynchat.rst +++ /dev/null @@ -1,217 +0,0 @@ -:mod:`asynchat` --- Asynchronous socket command/response handler -================================================================ - -.. module:: asynchat - :synopsis: Support for asynchronous command/response protocols. - :deprecated: - -.. moduleauthor:: Sam Rushing <rushing@nightmare.com> -.. sectionauthor:: Steve Holden <sholden@holdenweb.com> - -**Source code:** :source:`Lib/asynchat.py` - -.. deprecated-removed:: 3.6 3.12 - The :mod:`asynchat` module is deprecated - (see :pep:`PEP 594 <594#asynchat>` for details). - Please use :mod:`asyncio` instead. - --------------- - -.. note:: - - This module exists for backwards compatibility only. For new code we - recommend using :mod:`asyncio`. - -This module builds on the :mod:`asyncore` infrastructure, simplifying -asynchronous clients and servers and making it easier to handle protocols -whose elements are terminated by arbitrary strings, or are of variable length. -:mod:`asynchat` defines the abstract class :class:`async_chat` that you -subclass, providing implementations of the :meth:`collect_incoming_data` and -:meth:`found_terminator` methods. It uses the same asynchronous loop as -:mod:`asyncore`, and the two types of channel, :class:`asyncore.dispatcher` -and :class:`asynchat.async_chat`, can freely be mixed in the channel map. -Typically an :class:`asyncore.dispatcher` server channel generates new -:class:`asynchat.async_chat` channel objects as it receives incoming -connection requests. - -.. include:: ../includes/wasm-notavail.rst - -.. class:: async_chat() - - This class is an abstract subclass of :class:`asyncore.dispatcher`. To make - practical use of the code you must subclass :class:`async_chat`, providing - meaningful :meth:`collect_incoming_data` and :meth:`found_terminator` - methods. - The :class:`asyncore.dispatcher` methods can be used, although not all make - sense in a message/response context. - - Like :class:`asyncore.dispatcher`, :class:`async_chat` defines a set of - events that are generated by an analysis of socket conditions after a - :c:func:`select` call. Once the polling loop has been started the - :class:`async_chat` object's methods are called by the event-processing - framework with no action on the part of the programmer. - - Two class attributes can be modified, to improve performance, or possibly - even to conserve memory. - - - .. data:: ac_in_buffer_size - - The asynchronous input buffer size (default ``4096``). - - - .. data:: ac_out_buffer_size - - The asynchronous output buffer size (default ``4096``). - - Unlike :class:`asyncore.dispatcher`, :class:`async_chat` allows you to - define a :abbr:`FIFO (first-in, first-out)` queue of *producers*. A producer need - have only one method, :meth:`more`, which should return data to be - transmitted on the channel. - The producer indicates exhaustion (*i.e.* that it contains no more data) by - having its :meth:`more` method return the empty bytes object. At this point - the :class:`async_chat` object removes the producer from the queue and starts - using the next producer, if any. When the producer queue is empty the - :meth:`handle_write` method does nothing. You use the channel object's - :meth:`set_terminator` method to describe how to recognize the end of, or - an important breakpoint in, an incoming transmission from the remote - endpoint. - - To build a functioning :class:`async_chat` subclass your input methods - :meth:`collect_incoming_data` and :meth:`found_terminator` must handle the - data that the channel receives asynchronously. The methods are described - below. - - -.. method:: async_chat.close_when_done() - - Pushes a ``None`` on to the producer queue. When this producer is popped off - the queue it causes the channel to be closed. - - -.. method:: async_chat.collect_incoming_data(data) - - Called with *data* holding an arbitrary amount of received data. The - default method, which must be overridden, raises a - :exc:`NotImplementedError` exception. - - -.. method:: async_chat.discard_buffers() - - In emergencies this method will discard any data held in the input and/or - output buffers and the producer queue. - - -.. method:: async_chat.found_terminator() - - Called when the incoming data stream matches the termination condition set - by :meth:`set_terminator`. The default method, which must be overridden, - raises a :exc:`NotImplementedError` exception. The buffered input data - should be available via an instance attribute. - - -.. method:: async_chat.get_terminator() - - Returns the current terminator for the channel. - - -.. method:: async_chat.push(data) - - Pushes data on to the channel's queue to ensure its transmission. - This is all you need to do to have the channel write the data out to the - network, although it is possible to use your own producers in more complex - schemes to implement encryption and chunking, for example. - - -.. method:: async_chat.push_with_producer(producer) - - Takes a producer object and adds it to the producer queue associated with - the channel. When all currently pushed producers have been exhausted the - channel will consume this producer's data by calling its :meth:`more` - method and send the data to the remote endpoint. - - -.. method:: async_chat.set_terminator(term) - - Sets the terminating condition to be recognized on the channel. ``term`` - may be any of three types of value, corresponding to three different ways - to handle incoming protocol data. - - +-----------+---------------------------------------------+ - | term | Description | - +===========+=============================================+ - | *string* | Will call :meth:`found_terminator` when the | - | | string is found in the input stream | - +-----------+---------------------------------------------+ - | *integer* | Will call :meth:`found_terminator` when the | - | | indicated number of characters have been | - | | received | - +-----------+---------------------------------------------+ - | ``None`` | The channel continues to collect data | - | | forever | - +-----------+---------------------------------------------+ - - Note that any data following the terminator will be available for reading - by the channel after :meth:`found_terminator` is called. - - -.. _asynchat-example: - -asynchat Example ----------------- - -The following partial example shows how HTTP requests can be read with -:class:`async_chat`. A web server might create an -:class:`http_request_handler` object for each incoming client connection. -Notice that initially the channel terminator is set to match the blank line at -the end of the HTTP headers, and a flag indicates that the headers are being -read. - -Once the headers have been read, if the request is of type POST (indicating -that further data are present in the input stream) then the -``Content-Length:`` header is used to set a numeric terminator to read the -right amount of data from the channel. - -The :meth:`handle_request` method is called once all relevant input has been -marshalled, after setting the channel terminator to ``None`` to ensure that -any extraneous data sent by the web client are ignored. :: - - - import asynchat - - class http_request_handler(asynchat.async_chat): - - def __init__(self, sock, addr, sessions, log): - asynchat.async_chat.__init__(self, sock=sock) - self.addr = addr - self.sessions = sessions - self.ibuffer = [] - self.obuffer = b"" - self.set_terminator(b"\r\n\r\n") - self.reading_headers = True - self.handling = False - self.cgi_data = None - self.log = log - - def collect_incoming_data(self, data): - """Buffer the data""" - self.ibuffer.append(data) - - def found_terminator(self): - if self.reading_headers: - self.reading_headers = False - self.parse_headers(b"".join(self.ibuffer)) - self.ibuffer = [] - if self.op.upper() == b"POST": - clen = self.headers.getheader("content-length") - self.set_terminator(int(clen)) - else: - self.handling = True - self.set_terminator(None) - self.handle_request() - elif not self.handling: - self.set_terminator(None) # browsers sometimes over-send - self.cgi_data = parse(self.headers, b"".join(self.ibuffer)) - self.handling = True - self.ibuffer = [] - self.handle_request() diff --git a/Doc/library/asyncio-dev.rst b/Doc/library/asyncio-dev.rst index 921a394a59fec7c..a9c3a0183bb72d2 100644 --- a/Doc/library/asyncio-dev.rst +++ b/Doc/library/asyncio-dev.rst @@ -34,7 +34,7 @@ There are several ways to enable asyncio debug mode: In addition to enabling the debug mode, consider also: * setting the log level of the :ref:`asyncio logger <asyncio-logger>` to - :py:data:`logging.DEBUG`, for example the following snippet of code + :py:const:`logging.DEBUG`, for example the following snippet of code can be run at startup of the application:: logging.basicConfig(level=logging.DEBUG) @@ -99,7 +99,7 @@ To schedule a coroutine object from a different OS thread, the # Wait for the result: result = future.result() -To handle signals and to execute subprocesses, the event loop must be +To handle signals the event loop must be run in the main thread. The :meth:`loop.run_in_executor` method can be used with a @@ -142,7 +142,7 @@ Logging asyncio uses the :mod:`logging` module and all logging is performed via the ``"asyncio"`` logger. -The default log level is :py:data:`logging.INFO`, which can be easily +The default log level is :py:const:`logging.INFO`, which can be easily adjusted:: logging.getLogger("asyncio").setLevel(logging.WARNING) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst index d0a1ed2b99e55d1..ea1d146f06cf2bd 100644 --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -33,7 +33,8 @@ an event loop: Return the running event loop in the current OS thread. - If there is no running event loop a :exc:`RuntimeError` is raised. + Raise a :exc:`RuntimeError` if there is no running event loop. + This function can only be called from a coroutine or a callback. .. versionadded:: 3.7 @@ -42,27 +43,29 @@ an event loop: Get the current event loop. - If there is no current event loop set in the current OS thread, - the OS thread is main, and :func:`set_event_loop` has not yet - been called, asyncio will create a new event loop and set it as the - current one. + When called from a coroutine or a callback (e.g. scheduled with + call_soon or similar API), this function will always return the + running event loop. + + If there is no running event loop set, the function will return + the result of the ``get_event_loop_policy().get_event_loop()`` call. Because this function has rather complex behavior (especially when custom event loop policies are in use), using the :func:`get_running_loop` function is preferred to :func:`get_event_loop` in coroutines and callbacks. - Consider also using the :func:`asyncio.run` function instead of using - lower level functions to manually create and close an event loop. + As noted above, consider using the higher-level :func:`asyncio.run` function, + instead of using these lower level functions to manually create and close an + event loop. - .. deprecated:: 3.10 - Deprecation warning is emitted if there is no running event loop. - In future Python releases, this function will be an alias of - :func:`get_running_loop`. + .. deprecated:: 3.12 + Deprecation warning is emitted if there is no current event loop. + In some future Python release this will become an error. .. function:: set_event_loop(loop) - Set *loop* as a current event loop for the current OS thread. + Set *loop* as the current event loop for the current OS thread. .. function:: new_event_loop() @@ -183,19 +186,24 @@ Running and stopping the loop .. coroutinemethod:: loop.shutdown_default_executor(timeout=None) Schedule the closure of the default executor and wait for it to join all of - the threads in the :class:`ThreadPoolExecutor`. After calling this method, a - :exc:`RuntimeError` will be raised if :meth:`loop.run_in_executor` is called - while using the default executor. + the threads in the :class:`~concurrent.futures.ThreadPoolExecutor`. + Once this method has been called, + using the default executor with :meth:`loop.run_in_executor` + will raise a :exc:`RuntimeError`. - The *timeout* parameter specifies the amount of time the executor will - be given to finish joining. The default value is ``None``, which means the - executor will be given an unlimited amount of time. + The *timeout* parameter specifies the amount of time + (in :class:`float` seconds) the executor will be given to finish joining. + With the default, ``None``, + the executor is allowed an unlimited amount of time. - If the timeout duration is reached, a warning is emitted and executor is - terminated without waiting for its threads to finish joining. + If the *timeout* is reached, a :exc:`RuntimeWarning` is emitted + and the default executor is terminated + without waiting for its threads to finish joining. - Note that there is no need to call this function when - :func:`asyncio.run` is used. + .. note:: + + Do not call this method when using :func:`asyncio.run`, + as the latter handles default executor shutdown automatically. .. versionadded:: 3.9 @@ -210,22 +218,23 @@ Scheduling callbacks Schedule the *callback* :term:`callback` to be called with *args* arguments at the next iteration of the event loop. + Return an instance of :class:`asyncio.Handle`, + which can be used later to cancel the callback. + Callbacks are called in the order in which they are registered. Each callback will be called exactly once. - An optional keyword-only *context* argument allows specifying a + The optional keyword-only *context* argument specifies a custom :class:`contextvars.Context` for the *callback* to run in. - The current context is used when no *context* is provided. - - An instance of :class:`asyncio.Handle` is returned, which can be - used later to cancel the callback. + Callbacks use the current context when no *context* is provided. - This method is not thread-safe. + Unlike :meth:`call_soon_threadsafe`, this method is not thread-safe. .. method:: loop.call_soon_threadsafe(callback, *args, context=None) - A thread-safe variant of :meth:`call_soon`. Must be used to - schedule callbacks *from another thread*. + A thread-safe variant of :meth:`call_soon`. When scheduling callbacks from + another thread, this function *must* be used, since :meth:`call_soon` is not + thread-safe. Raises :exc:`RuntimeError` if called on a loop that's been closed. This can happen on a secondary thread when the main application is @@ -234,9 +243,9 @@ Scheduling callbacks See the :ref:`concurrency and multithreading <asyncio-multithreading>` section of the documentation. -.. versionchanged:: 3.7 - The *context* keyword-only parameter was added. See :pep:`567` - for more details. + .. versionchanged:: 3.7 + The *context* keyword-only parameter was added. See :pep:`567` + for more details. .. _asyncio-pass-keywords: @@ -394,11 +403,11 @@ Opening network connections Open a streaming transport connection to a given address specified by *host* and *port*. - The socket family can be either :py:data:`~socket.AF_INET` or - :py:data:`~socket.AF_INET6` depending on *host* (or the *family* + The socket family can be either :py:const:`~socket.AF_INET` or + :py:const:`~socket.AF_INET6` depending on *host* (or the *family* argument, if provided). - The socket type will be :py:data:`~socket.SOCK_STREAM`. + The socket type will be :py:const:`~socket.SOCK_STREAM`. *protocol_factory* must be a callable returning an :ref:`asyncio protocol <asyncio-protocol>` implementation. @@ -500,7 +509,7 @@ Opening network connections .. versionchanged:: 3.6 - The socket option :py:data:`~socket.TCP_NODELAY` is set by default + The socket option :ref:`socket.TCP_NODELAY <socket-unix-constants>` is set by default for all TCP connections. .. versionchanged:: 3.7 @@ -515,12 +524,12 @@ Opening network connections When a server's IPv4 path and protocol are working, but the server's IPv6 path and protocol are not working, a dual-stack client application experiences significant connection delay compared to an - IPv4-only client. This is undesirable because it causes the dual- - stack client to have a worse user experience. This document + IPv4-only client. This is undesirable because it causes the + dual-stack client to have a worse user experience. This document specifies requirements for algorithms that reduce this user-visible delay and provides an algorithm. - For more information: https://tools.ietf.org/html/rfc6555 + For more information: https://datatracker.ietf.org/doc/html/rfc6555 .. versionchanged:: 3.11 @@ -543,11 +552,11 @@ Opening network connections Create a datagram connection. - The socket family can be either :py:data:`~socket.AF_INET`, - :py:data:`~socket.AF_INET6`, or :py:data:`~socket.AF_UNIX`, + The socket family can be either :py:const:`~socket.AF_INET`, + :py:const:`~socket.AF_INET6`, or :py:const:`~socket.AF_UNIX`, depending on *host* (or the *family* argument, if provided). - The socket type will be :py:data:`~socket.SOCK_DGRAM`. + The socket type will be :py:const:`~socket.SOCK_DGRAM`. *protocol_factory* must be a callable returning a :ref:`protocol <asyncio-protocol>` implementation. @@ -572,7 +581,7 @@ Opening network connections * *reuse_port* tells the kernel to allow this endpoint to be bound to the same port as other existing endpoints are bound to, so long as they all set this flag when being created. This option is not supported on Windows - and some Unixes. If the :py:data:`~socket.SO_REUSEPORT` constant is not + and some Unixes. If the :ref:`socket.SO_REUSEPORT <socket-unix-constants>` constant is not defined then this capability is unsupported. * *allow_broadcast* tells the kernel to allow this endpoint to send @@ -598,7 +607,8 @@ Opening network connections .. versionchanged:: 3.8.1 The *reuse_address* parameter is no longer supported, as using - :py:data:`~sockets.SO_REUSEADDR` poses a significant security concern for + :ref:`socket.SO_REUSEADDR <socket-unix-constants>` + poses a significant security concern for UDP. Explicitly passing ``reuse_address=True`` will raise an exception. When multiple processes with differing UIDs assign sockets to an @@ -607,7 +617,8 @@ Opening network connections For supported platforms, *reuse_port* can be used as a replacement for similar functionality. With *reuse_port*, - :py:data:`~sockets.SO_REUSEPORT` is used instead, which specifically + :ref:`socket.SO_REUSEPORT <socket-unix-constants>` + is used instead, which specifically prevents processes with differing UIDs from assigning sockets to the same socket address. @@ -625,8 +636,8 @@ Opening network connections Create a Unix connection. - The socket family will be :py:data:`~socket.AF_UNIX`; socket - type will be :py:data:`~socket.SOCK_STREAM`. + The socket family will be :py:const:`~socket.AF_UNIX`; socket + type will be :py:const:`~socket.SOCK_STREAM`. A tuple of ``(transport, protocol)`` is returned on success. @@ -652,6 +663,8 @@ Opening network connections Creating network servers ^^^^^^^^^^^^^^^^^^^^^^^^ +.. _loop_create_server: + .. coroutinemethod:: loop.create_server(protocol_factory, \ host=None, port=None, *, \ family=socket.AF_UNSPEC, \ @@ -662,7 +675,7 @@ Creating network servers ssl_shutdown_timeout=None, \ start_serving=True) - Create a TCP server (socket type :data:`~socket.SOCK_STREAM`) listening + Create a TCP server (socket type :const:`~socket.SOCK_STREAM`) listening on *port* of the *host* address. Returns a :class:`Server` object. @@ -690,10 +703,10 @@ Creating network servers be selected (note that if *host* resolves to multiple network interfaces, a different random port will be selected for each interface). - * *family* can be set to either :data:`socket.AF_INET` or - :data:`~socket.AF_INET6` to force the socket to use IPv4 or IPv6. + * *family* can be set to either :const:`socket.AF_INET` or + :const:`~socket.AF_INET6` to force the socket to use IPv4 or IPv6. If not set, the *family* will be determined from host name - (defaults to :data:`~socket.AF_UNSPEC`). + (defaults to :const:`~socket.AF_UNSPEC`). * *flags* is a bitmask for :meth:`getaddrinfo`. @@ -747,7 +760,7 @@ Creating network servers .. versionchanged:: 3.6 Added *ssl_handshake_timeout* and *start_serving* parameters. - The socket option :py:data:`~socket.TCP_NODELAY` is set by default + The socket option :ref:`socket.TCP_NODELAY <socket-unix-constants>` is set by default for all TCP connections. .. versionchanged:: 3.11 @@ -765,16 +778,20 @@ Creating network servers *, sock=None, backlog=100, ssl=None, \ ssl_handshake_timeout=None, \ ssl_shutdown_timeout=None, \ - start_serving=True) + start_serving=True, cleanup_socket=True) Similar to :meth:`loop.create_server` but works with the - :py:data:`~socket.AF_UNIX` socket family. + :py:const:`~socket.AF_UNIX` socket family. *path* is the name of a Unix domain socket, and is required, unless a *sock* argument is provided. Abstract Unix sockets, :class:`str`, :class:`bytes`, and :class:`~pathlib.Path` paths are supported. + If *cleanup_socket* is True then the Unix socket will automatically + be removed from the filesystem when the server is closed, unless the + socket has been replaced after the server has been created. + See the documentation of the :meth:`loop.create_server` method for information about arguments to this method. @@ -789,6 +806,10 @@ Creating network servers Added the *ssl_shutdown_timeout* parameter. + .. versionchanged:: 3.13 + + Added the *cleanup_socket* parameter. + .. coroutinemethod:: loop.connect_accepted_socket(protocol_factory, \ sock, *, ssl=None, ssl_handshake_timeout=None, \ @@ -886,6 +907,9 @@ TLS Upgrade object only because the coder caches *protocol*-side data and sporadically exchanges extra TLS session packets with *transport*. + In some situations (e.g. when the passed transport is already closing) this + may return ``None``. + Parameters: * *transport* and *protocol* instances that methods like @@ -927,7 +951,8 @@ Watching file descriptors .. method:: loop.remove_reader(fd) - Stop monitoring the *fd* file descriptor for read availability. + Stop monitoring the *fd* file descriptor for read availability. Returns + ``True`` if *fd* was previously being monitored for reads. .. method:: loop.add_writer(fd, callback, *args) @@ -940,7 +965,8 @@ Watching file descriptors .. method:: loop.remove_writer(fd) - Stop monitoring the *fd* file descriptor for write availability. + Stop monitoring the *fd* file descriptor for write availability. Returns + ``True`` if *fd* was previously being monitored for writes. See also :ref:`Platform Support <asyncio-platform-support>` section for some limitations of these methods. @@ -1177,6 +1203,8 @@ Working with pipes Unix signals ^^^^^^^^^^^^ +.. _loop_add_signal_handler: + .. method:: loop.add_signal_handler(signum, callback, *args) Set *callback* as the handler for the *signum* signal. @@ -1377,6 +1405,14 @@ Enabling debug mode The new :ref:`Python Development Mode <devmode>` can now also be used to enable the debug mode. +.. attribute:: loop.slow_callback_duration + + This attribute can be used to set the + minimum execution duration in seconds that is considered "slow". + When debug mode is enabled, "slow" callbacks are logged. + + Default value is 100 milliseconds. + .. seealso:: The :ref:`debug mode of asyncio <asyncio-debug-mode>`. @@ -1397,6 +1433,8 @@ async/await code consider using the high-level :ref:`Subprocess Support on Windows <asyncio-windows-subprocess>` for details. +.. _loop_subprocess_exec: + .. coroutinemethod:: loop.subprocess_exec(protocol_factory, *args, \ stdin=subprocess.PIPE, stdout=subprocess.PIPE, \ stderr=subprocess.PIPE, **kwargs) @@ -1427,9 +1465,8 @@ async/await code consider using the high-level * *stdin* can be any of these: - * a file-like object representing a pipe to be connected to the - subprocess's standard input stream using - :meth:`~loop.connect_write_pipe` + * a file-like object + * an existing file descriptor (a positive integer), for example those created with :meth:`os.pipe()` * the :const:`subprocess.PIPE` constant (default) which will create a new pipe and connect it, * the value ``None`` which will make the subprocess inherit the file @@ -1439,9 +1476,7 @@ async/await code consider using the high-level * *stdout* can be any of these: - * a file-like object representing a pipe to be connected to the - subprocess's standard output stream using - :meth:`~loop.connect_write_pipe` + * a file-like object * the :const:`subprocess.PIPE` constant (default) which will create a new pipe and connect it, * the value ``None`` which will make the subprocess inherit the file @@ -1451,9 +1486,7 @@ async/await code consider using the high-level * *stderr* can be any of these: - * a file-like object representing a pipe to be connected to the - subprocess's standard error stream using - :meth:`~loop.connect_write_pipe` + * a file-like object * the :const:`subprocess.PIPE` constant (default) which will create a new pipe and connect it, * the value ``None`` which will make the subprocess inherit the file @@ -1472,6 +1505,11 @@ async/await code consider using the high-level as text. :func:`bytes.decode` can be used to convert the bytes returned from the stream to text. + If a file-like object passed as *stdin*, *stdout* or *stderr* represents a + pipe, then the other side of this pipe should be registered with + :meth:`~loop.connect_write_pipe` or :meth:`~loop.connect_read_pipe` for use + with the event loop. + See the constructor of the :class:`subprocess.Popen` class for documentation on other arguments. @@ -1560,7 +1598,7 @@ Server objects are created by :meth:`loop.create_server`, :meth:`loop.create_unix_server`, :func:`start_server`, and :func:`start_unix_server` functions. -Do not instantiate the class directly. +Do not instantiate the :class:`Server` class directly. .. class:: Server @@ -1580,6 +1618,9 @@ Do not instantiate the class directly. .. versionchanged:: 3.7 Server object is an asynchronous context manager since Python 3.7. + .. versionchanged:: 3.11 + This class was exposed publicly as ``asyncio.Server`` in Python 3.9.11, 3.10.3 and 3.11. + .. method:: close() Stop serving: close listening sockets and set the :attr:`sockets` @@ -1588,8 +1629,9 @@ Do not instantiate the class directly. The sockets that represent existing incoming client connections are left open. - The server is closed asynchronously, use the :meth:`wait_closed` - coroutine to wait until the server is closed. + The server is closed asynchronously; use the :meth:`wait_closed` + coroutine to wait until the server is closed (and no more + connections are active). .. method:: get_loop() @@ -1647,11 +1689,13 @@ Do not instantiate the class directly. .. coroutinemethod:: wait_closed() - Wait until the :meth:`close` method completes. + Wait until the :meth:`close` method completes and all active + connections have finished. .. attribute:: sockets - List of :class:`socket.socket` objects the server is listening on. + List of socket-like objects, ``asyncio.trsock.TransportSocket``, which + the server is listening on. .. versionchanged:: 3.7 Prior to Python 3.7 ``Server.sockets`` used to return an @@ -1668,13 +1712,13 @@ Event Loop Implementations asyncio ships with two different event loop implementations: :class:`SelectorEventLoop` and :class:`ProactorEventLoop`. -By default asyncio is configured to use :class:`SelectorEventLoop` -on Unix and :class:`ProactorEventLoop` on Windows. +By default asyncio is configured to use :class:`EventLoop`. .. class:: SelectorEventLoop - An event loop based on the :mod:`selectors` module. + A subclass of :class:`AbstractEventLoop` based on the + :mod:`selectors` module. Uses the most efficient *selector* available for the given platform. It is also possible to manually configure the @@ -1696,7 +1740,7 @@ on Unix and :class:`ProactorEventLoop` on Windows. .. class:: ProactorEventLoop - An event loop for Windows that uses "I/O Completion Ports" (IOCP). + A subclass of :class:`AbstractEventLoop` for Windows that uses "I/O Completion Ports" (IOCP). .. availability:: Windows. @@ -1705,6 +1749,14 @@ on Unix and :class:`ProactorEventLoop` on Windows. `MSDN documentation on I/O Completion Ports <https://docs.microsoft.com/en-ca/windows/desktop/FileIO/i-o-completion-ports>`_. +.. class:: EventLoop + + An alias to the most efficient available subclass of :class:`AbstractEventLoop` for the given + platform. + + It is an alias to :class:`SelectorEventLoop` on Unix and :class:`ProactorEventLoop` on Windows. + + .. versionadded:: 3.13 .. class:: AbstractEventLoop @@ -1854,7 +1906,7 @@ Set signal handlers for SIGINT and SIGTERM (This ``signals`` example only works on Unix.) -Register handlers for signals :py:data:`SIGINT` and :py:data:`SIGTERM` +Register handlers for signals :const:`~signal.SIGINT` and :const:`~signal.SIGTERM` using the :meth:`loop.add_signal_handler` method:: import asyncio diff --git a/Doc/library/asyncio-exceptions.rst b/Doc/library/asyncio-exceptions.rst index 9250f01b8a08953..7ad9103ca3fdfc8 100644 --- a/Doc/library/asyncio-exceptions.rst +++ b/Doc/library/asyncio-exceptions.rst @@ -31,7 +31,7 @@ Exceptions .. versionchanged:: 3.8 - :exc:`CancelledError` is now a subclass of :class:`BaseException`. + :exc:`CancelledError` is now a subclass of :class:`BaseException` rather than :class:`Exception`. .. exception:: InvalidStateError diff --git a/Doc/library/asyncio-extending.rst b/Doc/library/asyncio-extending.rst index 8ffd356f2d1cc3b..e7b293f484f8de6 100644 --- a/Doc/library/asyncio-extending.rst +++ b/Doc/library/asyncio-extending.rst @@ -69,7 +69,7 @@ Task lifetime support ===================== A third party task implementation should call the following functions to keep a task -visible by :func:`asyncio.get_tasks` and :func:`asyncio.current_task`: +visible by :func:`asyncio.all_tasks` and :func:`asyncio.current_task`: .. function:: _register_task(task) diff --git a/Doc/library/asyncio-future.rst b/Doc/library/asyncio-future.rst index 70cec9b2f90248d..893ae5518f757d8 100644 --- a/Doc/library/asyncio-future.rst +++ b/Doc/library/asyncio-future.rst @@ -276,4 +276,4 @@ the Future has a result:: :func:`concurrent.futures.as_completed` functions. - :meth:`asyncio.Future.cancel` accepts an optional ``msg`` argument, - but :func:`concurrent.futures.cancel` does not. + but :meth:`concurrent.futures.Future.cancel` does not. diff --git a/Doc/library/asyncio-llapi-index.rst b/Doc/library/asyncio-llapi-index.rst index b7ad888a7b67ab5..67136ba69ec875b 100644 --- a/Doc/library/asyncio-llapi-index.rst +++ b/Doc/library/asyncio-llapi-index.rst @@ -19,7 +19,7 @@ Obtaining the Event Loop - The **preferred** function to get the running event loop. * - :func:`asyncio.get_event_loop` - - Get an event loop instance (current or via the policy). + - Get an event loop instance (running or current via the current policy). * - :func:`asyncio.set_event_loop` - Set the event loop as current via the current policy. @@ -484,19 +484,19 @@ Protocol classes can implement the following **callback methods**: :widths: 50 50 :class: full-width-table - * - ``callback`` :meth:`pipe_data_received() - <SubprocessProtocol.pipe_data_received>` + * - ``callback`` :meth:`~SubprocessProtocol.pipe_data_received` - Called when the child process writes data into its *stdout* or *stderr* pipe. - * - ``callback`` :meth:`pipe_connection_lost() - <SubprocessProtocol.pipe_connection_lost>` + * - ``callback`` :meth:`~SubprocessProtocol.pipe_connection_lost` - Called when one of the pipes communicating with the child process is closed. * - ``callback`` :meth:`process_exited() <SubprocessProtocol.process_exited>` - - Called when the child process has exited. + - Called when the child process has exited. It can be called before + :meth:`~SubprocessProtocol.pipe_data_received` and + :meth:`~SubprocessProtocol.pipe_connection_lost` methods. Event Loop Policies diff --git a/Doc/library/asyncio-platforms.rst b/Doc/library/asyncio-platforms.rst index 50ad8a2ab70324a..19ec726c1be0602 100644 --- a/Doc/library/asyncio-platforms.rst +++ b/Doc/library/asyncio-platforms.rst @@ -37,7 +37,7 @@ All event loops on Windows do not support the following methods: * :meth:`loop.create_unix_connection` and :meth:`loop.create_unix_server` are not supported. - The :data:`socket.AF_UNIX` socket family is specific to Unix. + The :const:`socket.AF_UNIX` socket family is specific to Unix. * :meth:`loop.add_signal_handler` and :meth:`loop.remove_signal_handler` are not supported. diff --git a/Doc/library/asyncio-policy.rst b/Doc/library/asyncio-policy.rst index 052378ef32743b5..0d7821e608ec983 100644 --- a/Doc/library/asyncio-policy.rst +++ b/Doc/library/asyncio-policy.rst @@ -116,6 +116,12 @@ asyncio ships with the following built-in policies: On Windows, :class:`ProactorEventLoop` is now used by default. + .. deprecated:: 3.12 + The :meth:`get_event_loop` method of the default asyncio policy now emits + a :exc:`DeprecationWarning` if there is no current event loop set and it + decides to create one. + In some future Python release this will become an error. + .. class:: WindowsSelectorEventLoopPolicy @@ -222,6 +228,9 @@ implementation used by the asyncio event loop: This method has to be called to ensure that underlying resources are cleaned-up. + .. deprecated:: 3.12 + + .. class:: ThreadedChildWatcher This implementation starts a new waiting thread for every subprocess spawn. diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst index 7bc906eaafc1f26..3f734f544afe216 100644 --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -708,6 +708,9 @@ factories passed to the :meth:`loop.subprocess_exec` and Called when the child process has exited. + It can be called before :meth:`~SubprocessProtocol.pipe_data_received` and + :meth:`~SubprocessProtocol.pipe_connection_lost` methods. + Examples ======== @@ -746,7 +749,7 @@ received data, and close the connection:: loop = asyncio.get_running_loop() server = await loop.create_server( - lambda: EchoServerProtocol(), + EchoServerProtocol, '127.0.0.1', 8888) async with server: @@ -850,7 +853,7 @@ method, sends back received data:: # One protocol instance will be created to serve all # client requests. transport, protocol = await loop.create_datagram_endpoint( - lambda: EchoServerProtocol(), + EchoServerProtocol, local_addr=('127.0.0.1', 9999)) try: @@ -1003,12 +1006,26 @@ The subprocess is created by the :meth:`loop.subprocess_exec` method:: def __init__(self, exit_future): self.exit_future = exit_future self.output = bytearray() + self.pipe_closed = False + self.exited = False + + def pipe_connection_lost(self, fd, exc): + self.pipe_closed = True + self.check_for_exit() def pipe_data_received(self, fd, data): self.output.extend(data) def process_exited(self): - self.exit_future.set_result(True) + self.exited = True + # process_exited() method can be called before + # pipe_connection_lost() method: wait until both methods are + # called. + self.check_for_exit() + + def check_for_exit(self): + if self.pipe_closed and self.exited: + self.exit_future.set_result(True) async def get_date(): # Get a reference to the event loop as we plan to use diff --git a/Doc/library/asyncio-runner.rst b/Doc/library/asyncio-runner.rst index c43d664eba717f2..ec170dfde9e9aa2 100644 --- a/Doc/library/asyncio-runner.rst +++ b/Doc/library/asyncio-runner.rst @@ -22,7 +22,7 @@ to simplify async code usage for common wide-spread scenarios. Running an asyncio Program ========================== -.. function:: run(coro, *, debug=None) +.. function:: run(coro, *, debug=None, loop_factory=None) Execute the :term:`coroutine` *coro* and return the result. @@ -37,9 +37,13 @@ Running an asyncio Program debug mode explicitly. ``None`` is used to respect the global :ref:`asyncio-debug-mode` settings. - This function always creates a new event loop and closes it at - the end. It should be used as a main entry point for asyncio - programs, and should ideally only be called once. + If *loop_factory* is not ``None``, it is used to create a new event loop; + otherwise :func:`asyncio.new_event_loop` is used. The loop is closed at the end. + This function should be used as a main entry point for asyncio programs, + and should ideally only be called once. It is recommended to use + *loop_factory* to configure the event loop instead of policies. + Passing :class:`asyncio.EventLoop` allows running asyncio without the + policy system. The executor is given a timeout duration of 5 minutes to shutdown. If the executor hasn't finished within that duration, a warning is @@ -62,6 +66,10 @@ Running an asyncio Program *debug* is ``None`` by default to respect the global debug mode settings. + .. versionchanged:: 3.12 + + Added *loop_factory* parameter. + Runner context manager ====================== diff --git a/Doc/library/asyncio-stream.rst b/Doc/library/asyncio-stream.rst index d87e3c042c99770..0736e783bbc8c8e 100644 --- a/Doc/library/asyncio-stream.rst +++ b/Doc/library/asyncio-stream.rst @@ -52,6 +52,7 @@ and work with streams: limit=None, ssl=None, family=0, proto=0, \ flags=0, sock=None, local_addr=None, \ server_hostname=None, ssl_handshake_timeout=None, \ + ssl_shutdown_timeout=None, \ happy_eyeballs_delay=None, interleave=None) Establish a network connection and return a pair of @@ -82,6 +83,9 @@ and work with streams: .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.11 + Added the *ssl_shutdown_timeout* parameter. + .. coroutinefunction:: start_server(client_connected_cb, host=None, \ port=None, *, limit=None, \ @@ -89,7 +93,7 @@ and work with streams: flags=socket.AI_PASSIVE, sock=None, \ backlog=100, ssl=None, reuse_address=None, \ reuse_port=None, ssl_handshake_timeout=None, \ - start_serving=True) + ssl_shutdown_timeout=None, start_serving=True) Start a socket server. @@ -121,12 +125,15 @@ and work with streams: .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.11 + Added the *ssl_shutdown_timeout* parameter. + .. rubric:: Unix Sockets .. coroutinefunction:: open_unix_connection(path=None, *, limit=None, \ ssl=None, sock=None, server_hostname=None, \ - ssl_handshake_timeout=None) + ssl_handshake_timeout=None, ssl_shutdown_timeout=None) Establish a Unix socket connection and return a pair of ``(reader, writer)``. @@ -150,10 +157,14 @@ and work with streams: .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.11 + Added the *ssl_shutdown_timeout* parameter. + .. coroutinefunction:: start_unix_server(client_connected_cb, path=None, \ *, limit=None, sock=None, backlog=100, ssl=None, \ - ssl_handshake_timeout=None, start_serving=True) + ssl_handshake_timeout=None, \ + ssl_shutdown_timeout=None, start_serving=True) Start a Unix socket server. @@ -176,6 +187,9 @@ and work with streams: .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.11 + Added the *ssl_shutdown_timeout* parameter. + StreamReader ============ @@ -190,14 +204,26 @@ StreamReader directly; use :func:`open_connection` and :func:`start_server` instead. + .. method:: feed_eof() + + Acknowledge the EOF. + .. coroutinemethod:: read(n=-1) - Read up to *n* bytes. If *n* is not provided, or set to ``-1``, - read until EOF and return all read bytes. + Read up to *n* bytes from the stream. + If *n* is not provided or set to ``-1``, + read until EOF, then return all read :class:`bytes`. If EOF was received and the internal buffer is empty, return an empty ``bytes`` object. + If *n* is ``0``, return an empty ``bytes`` object immediately. + + If *n* is positive, return at most *n* available ``bytes`` + as soon as at least 1 byte is available in the internal buffer. + If EOF is received before any byte is read, return an empty + ``bytes`` object. + .. coroutinemethod:: readline() Read one line, where "line" is a sequence of bytes @@ -281,7 +307,8 @@ StreamWriter The method closes the stream and the underlying socket. - The method should be used along with the ``wait_closed()`` method:: + The method should be used, though not mandatory, + along with the ``wait_closed()`` method:: stream.close() await stream.wait_closed() @@ -321,7 +348,7 @@ StreamWriter returns immediately. .. coroutinemethod:: start_tls(sslcontext, \*, server_hostname=None, \ - ssl_handshake_timeout=None) + ssl_handshake_timeout=None, ssl_shutdown_timeout=None) Upgrade an existing stream-based connection to TLS. @@ -336,8 +363,16 @@ StreamWriter handshake to complete before aborting the connection. ``60.0`` seconds if ``None`` (default). + * *ssl_shutdown_timeout* is the time in seconds to wait for the SSL shutdown + to complete before aborting the connection. ``30.0`` seconds if ``None`` + (default). + .. versionadded:: 3.11 + .. versionchanged:: 3.12 + Added the *ssl_shutdown_timeout* parameter. + + .. method:: is_closing() Return ``True`` if the stream is closed or in the process of @@ -350,7 +385,8 @@ StreamWriter Wait until the stream is closed. Should be called after :meth:`close` to wait until the underlying - connection is closed. + connection is closed, ensuring that all data has been flushed + before e.g. exiting the program. .. versionadded:: 3.7 @@ -380,6 +416,7 @@ TCP echo client using the :func:`asyncio.open_connection` function:: print('Close the connection') writer.close() + await writer.wait_closed() asyncio.run(tcp_echo_client('Hello World!')) @@ -412,6 +449,7 @@ TCP echo server using the :func:`asyncio.start_server` function:: print("Close the connection") writer.close() + await writer.wait_closed() async def main(): server = await asyncio.start_server( @@ -468,6 +506,7 @@ Simple example querying HTTP headers of the URL passed on the command line:: # Ignore the body, close the socket writer.close() + await writer.wait_closed() url = sys.argv[1] asyncio.run(print_http_headers(url)) @@ -513,6 +552,7 @@ Coroutine waiting until a socket receives data using the # Got data, we are done: close the socket print("Received:", data.decode()) writer.close() + await writer.wait_closed() # Close the second socket wsock.close() diff --git a/Doc/library/asyncio-subprocess.rst b/Doc/library/asyncio-subprocess.rst index 28d0b21e8180b64..bf35b1cb798aee1 100644 --- a/Doc/library/asyncio-subprocess.rst +++ b/Doc/library/asyncio-subprocess.rst @@ -68,7 +68,7 @@ Creating Subprocesses The *limit* argument sets the buffer limit for :class:`StreamReader` wrappers for :attr:`Process.stdout` and :attr:`Process.stderr` - (if :attr:`subprocess.PIPE` is passed to *stdout* and *stderr* arguments). + (if :const:`subprocess.PIPE` is passed to *stdout* and *stderr* arguments). Return a :class:`~asyncio.subprocess.Process` instance. @@ -86,7 +86,7 @@ Creating Subprocesses The *limit* argument sets the buffer limit for :class:`StreamReader` wrappers for :attr:`Process.stdout` and :attr:`Process.stderr` - (if :attr:`subprocess.PIPE` is passed to *stdout* and *stderr* arguments). + (if :const:`subprocess.PIPE` is passed to *stdout* and *stderr* arguments). Return a :class:`~asyncio.subprocess.Process` instance. @@ -175,7 +175,7 @@ their completion. * the :meth:`~asyncio.subprocess.Process.communicate` and :meth:`~asyncio.subprocess.Process.wait` methods don't have a - *timeout* parameter: use the :func:`wait_for` function; + *timeout* parameter: use the :func:`~asyncio.wait_for` function; * the :meth:`Process.wait() <asyncio.subprocess.Process.wait>` method is asynchronous, whereas :meth:`subprocess.Popen.wait` method @@ -207,8 +207,9 @@ their completion. Interact with process: 1. send data to *stdin* (if *input* is not ``None``); - 2. read data from *stdout* and *stderr*, until EOF is reached; - 3. wait for process to terminate. + 2. closes *stdin*; + 3. read data from *stdout* and *stderr*, until EOF is reached; + 4. wait for process to terminate. The optional *input* argument is the data (:class:`bytes` object) that will be sent to the child process. @@ -229,6 +230,10 @@ their completion. Note, that the data read is buffered in memory, so do not use this method if the data size is large or unlimited. + .. versionchanged:: 3.12 + + *stdin* gets closed when `input=None` too. + .. method:: send_signal(signal) Sends the signal *signal* to the child process. @@ -244,7 +249,7 @@ their completion. Stop the child process. - On POSIX systems this method sends :py:data:`signal.SIGTERM` to the + On POSIX systems this method sends :py:const:`signal.SIGTERM` to the child process. On Windows the Win32 API function :c:func:`TerminateProcess` is diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst index fb6d23fda03e41d..797065c8ccf8941 100644 --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -18,6 +18,10 @@ and Tasks. Coroutines ========== +**Source code:** :source:`Lib/asyncio/coroutines.py` + +---------------------------------------------------- + :term:`Coroutines <coroutine>` declared with the async/await syntax is the preferred way of writing asyncio applications. For example, the following snippet of code prints "hello", waits 1 second, @@ -117,7 +121,7 @@ To actually run a coroutine, asyncio provides the following mechanisms: print(f"started at {time.strftime('%X')}") - # The wait is implicit when the context manager exits. + # The await is implicit when the context manager exits. print(f"finished at {time.strftime('%X')}") @@ -230,6 +234,10 @@ is :meth:`loop.run_in_executor`. Creating Tasks ============== +**Source code:** :source:`Lib/asyncio/tasks.py` + +----------------------------------------------- + .. function:: create_task(coro, *, name=None, context=None) Wrap the *coro* :ref:`coroutine <coroutine>` into a :class:`Task` @@ -248,8 +256,9 @@ Creating Tasks .. note:: - :meth:`asyncio.TaskGroup.create_task` is a newer alternative - that allows for convenient waiting for a group of related tasks. + :meth:`asyncio.TaskGroup.create_task` is a new alternative + leveraging structural concurrency; it allows for waiting + for a group of related tasks with strong safety guarantees. .. important:: @@ -292,13 +301,17 @@ in the task at the next opportunity. It is recommended that coroutines use ``try/finally`` blocks to robustly perform clean-up logic. In case :exc:`asyncio.CancelledError` is explicitly caught, it should generally be propagated when -clean-up is complete. Most code can safely ignore :exc:`asyncio.CancelledError`. +clean-up is complete. :exc:`asyncio.CancelledError` directly subclasses +:exc:`BaseException` so most code will not need to be aware of it. The asyncio components that enable structured concurrency, like :class:`asyncio.TaskGroup` and :func:`asyncio.timeout`, are implemented using cancellation internally and might misbehave if a coroutine swallows :exc:`asyncio.CancelledError`. Similarly, user code -should not call :meth:`uncancel <asyncio.Task.uncancel>`. +should not generally call :meth:`uncancel <asyncio.Task.uncancel>`. +However, in cases when suppressing :exc:`asyncio.CancelledError` is +truly desired, it is necessary to also call ``uncancel()`` to completely +remove the cancellation state. .. _taskgroups: @@ -328,7 +341,7 @@ Example:: async with asyncio.TaskGroup() as tg: task1 = tg.create_task(some_coro(...)) task2 = tg.create_task(another_coro(...)) - print("Both tasks have completed now.") + print(f"Both tasks have completed now: {task1.result()}, {task2.result()}") The ``async with`` statement will wait for all tasks in the group to finish. While waiting, new tasks may still be added to the group @@ -413,6 +426,9 @@ Sleeping .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.13 + Raises :exc:`ValueError` if *delay* is :data:`~math.nan`. + Running Tasks Concurrently ========================== @@ -447,8 +463,12 @@ Running Tasks Concurrently Tasks/Futures to be cancelled. .. note:: - A more modern way to create and run tasks concurrently and - wait for their completion is :class:`asyncio.TaskGroup`. + A new alternative to create and run tasks concurrently and + wait for their completion is :class:`asyncio.TaskGroup`. *TaskGroup* + provides stronger safety guarantees than *gather* for scheduling a nesting of subtasks: + if a task (or a subtask, a task scheduled by a task) + raises an exception, *TaskGroup* will, while *gather* will not, + cancel the remaining scheduled tasks). .. _asyncio_example_gather: @@ -510,6 +530,51 @@ Running Tasks Concurrently and there is no running event loop. +.. _eager-task-factory: + +Eager Task Factory +================== + +.. function:: eager_task_factory(loop, coro, *, name=None, context=None) + + A task factory for eager task execution. + + When using this factory (via :meth:`loop.set_task_factory(asyncio.eager_task_factory) <loop.set_task_factory>`), + coroutines begin execution synchronously during :class:`Task` construction. + Tasks are only scheduled on the event loop if they block. + This can be a performance improvement as the overhead of loop scheduling + is avoided for coroutines that complete synchronously. + + A common example where this is beneficial is coroutines which employ + caching or memoization to avoid actual I/O when possible. + + .. note:: + + Immediate execution of the coroutine is a semantic change. + If the coroutine returns or raises, the task is never scheduled + to the event loop. If the coroutine execution blocks, the task is + scheduled to the event loop. This change may introduce behavior + changes to existing applications. For example, + the application's task execution order is likely to change. + + .. versionadded:: 3.12 + +.. function:: create_eager_task_factory(custom_task_constructor) + + Create an eager task factory, similar to :func:`eager_task_factory`, + using the provided *custom_task_constructor* when creating a new task instead + of the default :class:`Task`. + + *custom_task_constructor* must be a *callable* with the signature matching + the signature of :class:`Task.__init__ <Task>`. + The callable must return a :class:`asyncio.Task`-compatible object. + + This function returns a *callable* intended to be used as a task factory of an + event loop via :meth:`loop.set_task_factory(factory) <loop.set_task_factory>`). + + .. versionadded:: 3.12 + + Shielding From Cancellation =========================== @@ -566,9 +631,9 @@ Shielding From Cancellation Timeouts ======== -.. coroutinefunction:: timeout(delay) +.. function:: timeout(delay) - An :ref:`asynchronous context manager <async-context-managers>` + Return an :ref:`asynchronous context manager <async-context-managers>` that can be used to limit the amount of time spent waiting on something. @@ -589,16 +654,16 @@ Timeouts If ``long_running_task`` takes more than 10 seconds to complete, the context manager will cancel the current task and handle the resulting :exc:`asyncio.CancelledError` internally, transforming it - into an :exc:`asyncio.TimeoutError` which can be caught and handled. + into a :exc:`TimeoutError` which can be caught and handled. .. note:: The :func:`asyncio.timeout` context manager is what transforms - the :exc:`asyncio.CancelledError` into an :exc:`asyncio.TimeoutError`, - which means the :exc:`asyncio.TimeoutError` can only be caught + the :exc:`asyncio.CancelledError` into a :exc:`TimeoutError`, + which means the :exc:`TimeoutError` can only be caught *outside* of the context manager. - Example of catching :exc:`asyncio.TimeoutError`:: + Example of catching :exc:`TimeoutError`:: async def main(): try: @@ -612,32 +677,26 @@ Timeouts The context manager produced by :func:`asyncio.timeout` can be rescheduled to a different deadline and inspected. - .. class:: Timeout() + .. class:: Timeout(when) An :ref:`asynchronous context manager <async-context-managers>` - that limits time spent inside of it. + for cancelling overdue coroutines. + + ``when`` should be an absolute time at which the context should time out, + as measured by the event loop's clock: - .. versionadded:: 3.11 + - If ``when`` is ``None``, the timeout will never trigger. + - If ``when < loop.time()``, the timeout will trigger on the next + iteration of the event loop. .. method:: when() -> float | None Return the current deadline, or ``None`` if the current deadline is not set. - The deadline is a float, consistent with the time returned by - :meth:`loop.time`. - .. method:: reschedule(when: float | None) - Change the time the timeout will trigger. - - If *when* is ``None``, any current deadline will be removed, and the - context manager will wait indefinitely. - - If *when* is a float, it is set as the new deadline. - - if *when* is in the past, the timeout will trigger on the next - iteration of the event loop. + Reschedule the timeout. .. method:: expired() -> bool @@ -658,14 +717,14 @@ Timeouts except TimeoutError: pass - if cm.expired: + if cm.expired(): print("Looks like we haven't finished on time.") Timeout context managers can be safely nested. .. versionadded:: 3.11 -.. coroutinefunction:: timeout_at(when) +.. function:: timeout_at(when) Similar to :func:`asyncio.timeout`, except *when* is the absolute time to stop waiting, or ``None``. @@ -708,9 +767,6 @@ Timeouts If the wait is cancelled, the future *aw* is also cancelled. - .. versionchanged:: 3.10 - Removed the *loop* parameter. - .. _asyncio_example_waitfor: Example:: @@ -741,6 +797,9 @@ Timeouts .. versionchanged:: 3.10 Removed the *loop* parameter. + .. versionchanged:: 3.11 + Raises :exc:`TimeoutError` instead of :exc:`asyncio.TimeoutError`. + Waiting Primitives ================== @@ -796,6 +855,10 @@ Waiting Primitives .. versionchanged:: 3.11 Passing coroutine objects to ``wait()`` directly is forbidden. + .. versionchanged:: 3.12 + Added support for generators yielding tasks. + + .. function:: as_completed(aws, *, timeout=None) Run :ref:`awaitable objects <asyncio-awaitables>` in the *aws* @@ -806,9 +869,6 @@ Waiting Primitives Raises :exc:`TimeoutError` if the timeout occurs before all Futures are done. - .. versionchanged:: 3.10 - Removed the *loop* parameter. - Example:: for coro in as_completed(aws): @@ -822,6 +882,9 @@ Waiting Primitives Deprecation warning is emitted if not all awaitable objects in the *aws* iterable are Future-like objects and there is no running event loop. + .. versionchanged:: 3.12 + Added support for generators yielding tasks. + Running in Threads ================== @@ -953,10 +1016,17 @@ Introspection .. versionadded:: 3.7 +.. function:: iscoroutine(obj) + + Return ``True`` if *obj* is a coroutine object. + + .. versionadded:: 3.4 + + Task Object =========== -.. class:: Task(coro, *, loop=None, name=None) +.. class:: Task(coro, *, loop=None, name=None, context=None, eager_start=False) A :class:`Future-like <Future>` object that runs a Python :ref:`coroutine <coroutine>`. Not thread-safe. @@ -991,9 +1061,17 @@ Task Object APIs except :meth:`Future.set_result` and :meth:`Future.set_exception`. - Tasks support the :mod:`contextvars` module. When a Task - is created it copies the current context and later runs its - coroutine in the copied context. + An optional keyword-only *context* argument allows specifying a + custom :class:`contextvars.Context` for the *coro* to run in. + If no *context* is provided, the Task copies the current context + and later runs its coroutine in the copied context. + + An optional keyword-only *eager_start* argument allows eagerly starting + the execution of the :class:`asyncio.Task` at task creation time. + If set to ``True`` and the event loop is running, the task will start + executing the coroutine immediately, until the first time the coroutine + blocks. If the coroutine returns or raises without blocking, the task + will be finished eagerly and will skip scheduling to the event loop. .. versionchanged:: 3.7 Added support for the :mod:`contextvars` module. @@ -1005,6 +1083,12 @@ Task Object Deprecation warning is emitted if *loop* is not specified and there is no running event loop. + .. versionchanged:: 3.11 + Added the *context* parameter. + + .. versionchanged:: 3.12 + Added the *eager_start* parameter. + .. method:: done() Return ``True`` if the Task is *done*. @@ -1089,14 +1173,23 @@ Task Object The *limit* argument is passed to :meth:`get_stack` directly. The *file* argument is an I/O stream to which the output - is written; by default output is written to :data:`sys.stderr`. + is written; by default output is written to :data:`sys.stdout`. .. method:: get_coro() Return the coroutine object wrapped by the :class:`Task`. + .. note:: + + This will return ``None`` for Tasks which have already + completed eagerly. See the :ref:`Eager Task Factory <eager-task-factory>`. + .. versionadded:: 3.8 + .. versionchanged:: 3.12 + + Newly added eager task execution means result may be ``None``. + .. method:: get_context() Return the :class:`contextvars.Context` object @@ -1139,7 +1232,9 @@ Task Object Therefore, unlike :meth:`Future.cancel`, :meth:`Task.cancel` does not guarantee that the Task will be cancelled, although suppressing cancellation completely is not common and is actively - discouraged. + discouraged. Should the coroutine nevertheless decide to suppress + the cancellation, it needs to call :meth:`Task.uncancel` in addition + to catching the exception. .. versionchanged:: 3.9 Added the *msg* parameter. @@ -1229,6 +1324,10 @@ Task Object with :meth:`uncancel`. :class:`TaskGroup` context managers use :func:`uncancel` in a similar fashion. + If end-user code is, for some reason, suppresing cancellation by + catching :exc:`CancelledError`, it needs to call this method to remove + the cancellation state. + .. method:: cancelling() Return the number of pending cancellation requests to this Task, i.e., diff --git a/Doc/library/asyncio.rst b/Doc/library/asyncio.rst index b71006e32b2b896..5f33c6813e74c0b 100644 --- a/Doc/library/asyncio.rst +++ b/Doc/library/asyncio.rst @@ -46,9 +46,9 @@ Additionally, there are **low-level** APIs for *library and framework developers* to: * create and manage :ref:`event loops <asyncio-event-loop>`, which - provide asynchronous APIs for :meth:`networking <loop.create_server>`, - running :meth:`subprocesses <loop.subprocess_exec>`, - handling :meth:`OS signals <loop.add_signal_handler>`, etc; + provide asynchronous APIs for :ref:`networking <loop_create_server>`, + running :ref:`subprocesses <loop_subprocess_exec>`, + handling :ref:`OS signals <loop_add_signal_handler>`, etc; * implement efficient protocols using :ref:`transports <asyncio-transports-protocols>`; @@ -56,6 +56,20 @@ Additionally, there are **low-level** APIs for * :ref:`bridge <asyncio-futures>` callback-based libraries and code with async/await syntax. +.. _asyncio-cli: + +You can experiment with an ``asyncio`` concurrent context in the REPL: + +.. code-block:: pycon + + $ python -m asyncio + asyncio REPL ... + Use "await" directly instead of "asyncio.run()". + Type "help", "copyright", "credits" or "license" for more information. + >>> import asyncio + >>> await asyncio.sleep(10, result='hello') + 'hello' + .. include:: ../includes/wasm-notavail.rst .. We use the "rubric" directive here to avoid creating diff --git a/Doc/library/asyncore.rst b/Doc/library/asyncore.rst deleted file mode 100644 index a3a4e90d05277e8..000000000000000 --- a/Doc/library/asyncore.rst +++ /dev/null @@ -1,365 +0,0 @@ -:mod:`asyncore` --- Asynchronous socket handler -=============================================== - -.. module:: asyncore - :synopsis: A base class for developing asynchronous socket handling - services. - :deprecated: - -.. moduleauthor:: Sam Rushing <rushing@nightmare.com> -.. sectionauthor:: Christopher Petrilli <petrilli@amber.org> -.. sectionauthor:: Steve Holden <sholden@holdenweb.com> -.. heavily adapted from original documentation by Sam Rushing - -**Source code:** :source:`Lib/asyncore.py` - -.. deprecated-removed:: 3.6 3.12 - The :mod:`asyncore` module is deprecated - (see :pep:`PEP 594 <594#asyncore>` for details). - Please use :mod:`asyncio` instead. - --------------- - -.. note:: - - This module exists for backwards compatibility only. For new code we - recommend using :mod:`asyncio`. - -This module provides the basic infrastructure for writing asynchronous socket -service clients and servers. - -.. include:: ../includes/wasm-notavail.rst - -There are only two ways to have a program on a single processor do "more than -one thing at a time." Multi-threaded programming is the simplest and most -popular way to do it, but there is another very different technique, that lets -you have nearly all the advantages of multi-threading, without actually using -multiple threads. It's really only practical if your program is largely I/O -bound. If your program is processor bound, then pre-emptive scheduled threads -are probably what you really need. Network servers are rarely processor -bound, however. - -If your operating system supports the :c:func:`select` system call in its I/O -library (and nearly all do), then you can use it to juggle multiple -communication channels at once; doing other work while your I/O is taking -place in the "background." Although this strategy can seem strange and -complex, especially at first, it is in many ways easier to understand and -control than multi-threaded programming. The :mod:`asyncore` module solves -many of the difficult problems for you, making the task of building -sophisticated high-performance network servers and clients a snap. For -"conversational" applications and protocols the companion :mod:`asynchat` -module is invaluable. - -The basic idea behind both modules is to create one or more network -*channels*, instances of class :class:`asyncore.dispatcher` and -:class:`asynchat.async_chat`. Creating the channels adds them to a global -map, used by the :func:`loop` function if you do not provide it with your own -*map*. - -Once the initial channel(s) is(are) created, calling the :func:`loop` function -activates channel service, which continues until the last channel (including -any that have been added to the map during asynchronous service) is closed. - - -.. function:: loop([timeout[, use_poll[, map[,count]]]]) - - Enter a polling loop that terminates after count passes or all open - channels have been closed. All arguments are optional. The *count* - parameter defaults to ``None``, resulting in the loop terminating only when all - channels have been closed. The *timeout* argument sets the timeout - parameter for the appropriate :func:`~select.select` or :func:`~select.poll` - call, measured in seconds; the default is 30 seconds. The *use_poll* - parameter, if true, indicates that :func:`~select.poll` should be used in - preference to :func:`~select.select` (the default is ``False``). - - The *map* parameter is a dictionary whose items are the channels to watch. - As channels are closed they are deleted from their map. If *map* is - omitted, a global map is used. Channels (instances of - :class:`asyncore.dispatcher`, :class:`asynchat.async_chat` and subclasses - thereof) can freely be mixed in the map. - - -.. class:: dispatcher() - - The :class:`dispatcher` class is a thin wrapper around a low-level socket - object. To make it more useful, it has a few methods for event-handling - which are called from the asynchronous loop. Otherwise, it can be treated - as a normal non-blocking socket object. - - The firing of low-level events at certain times or in certain connection - states tells the asynchronous loop that certain higher-level events have - taken place. For example, if we have asked for a socket to connect to - another host, we know that the connection has been made when the socket - becomes writable for the first time (at this point you know that you may - write to it with the expectation of success). The implied higher-level - events are: - - +----------------------+----------------------------------------+ - | Event | Description | - +======================+========================================+ - | ``handle_connect()`` | Implied by the first read or write | - | | event | - +----------------------+----------------------------------------+ - | ``handle_close()`` | Implied by a read event with no data | - | | available | - +----------------------+----------------------------------------+ - | ``handle_accepted()``| Implied by a read event on a listening | - | | socket | - +----------------------+----------------------------------------+ - - During asynchronous processing, each mapped channel's :meth:`readable` and - :meth:`writable` methods are used to determine whether the channel's socket - should be added to the list of channels :c:func:`select`\ ed or - :c:func:`poll`\ ed for read and write events. - - Thus, the set of channel events is larger than the basic socket events. The - full set of methods that can be overridden in your subclass follows: - - - .. method:: handle_read() - - Called when the asynchronous loop detects that a :meth:`read` call on the - channel's socket will succeed. - - - .. method:: handle_write() - - Called when the asynchronous loop detects that a writable socket can be - written. Often this method will implement the necessary buffering for - performance. For example:: - - def handle_write(self): - sent = self.send(self.buffer) - self.buffer = self.buffer[sent:] - - - .. method:: handle_expt() - - Called when there is out of band (OOB) data for a socket connection. This - will almost never happen, as OOB is tenuously supported and rarely used. - - - .. method:: handle_connect() - - Called when the active opener's socket actually makes a connection. Might - send a "welcome" banner, or initiate a protocol negotiation with the - remote endpoint, for example. - - - .. method:: handle_close() - - Called when the socket is closed. - - - .. method:: handle_error() - - Called when an exception is raised and not otherwise handled. The default - version prints a condensed traceback. - - - .. method:: handle_accept() - - Called on listening channels (passive openers) when a connection can be - established with a new remote endpoint that has issued a :meth:`connect` - call for the local endpoint. Deprecated in version 3.2; use - :meth:`handle_accepted` instead. - - .. deprecated:: 3.2 - - - .. method:: handle_accepted(sock, addr) - - Called on listening channels (passive openers) when a connection has been - established with a new remote endpoint that has issued a :meth:`connect` - call for the local endpoint. *sock* is a *new* socket object usable to - send and receive data on the connection, and *addr* is the address - bound to the socket on the other end of the connection. - - .. versionadded:: 3.2 - - - .. method:: readable() - - Called each time around the asynchronous loop to determine whether a - channel's socket should be added to the list on which read events can - occur. The default method simply returns ``True``, indicating that by - default, all channels will be interested in read events. - - - .. method:: writable() - - Called each time around the asynchronous loop to determine whether a - channel's socket should be added to the list on which write events can - occur. The default method simply returns ``True``, indicating that by - default, all channels will be interested in write events. - - - In addition, each channel delegates or extends many of the socket methods. - Most of these are nearly identical to their socket partners. - - - .. method:: create_socket(family=socket.AF_INET, type=socket.SOCK_STREAM) - - This is identical to the creation of a normal socket, and will use the - same options for creation. Refer to the :mod:`socket` documentation for - information on creating sockets. - - .. versionchanged:: 3.3 - *family* and *type* arguments can be omitted. - - - .. method:: connect(address) - - As with the normal socket object, *address* is a tuple with the first - element the host to connect to, and the second the port number. - - - .. method:: send(data) - - Send *data* to the remote end-point of the socket. - - - .. method:: recv(buffer_size) - - Read at most *buffer_size* bytes from the socket's remote end-point. An - empty bytes object implies that the channel has been closed from the - other end. - - Note that :meth:`recv` may raise :exc:`BlockingIOError` , even though - :func:`select.select` or :func:`select.poll` has reported the socket - ready for reading. - - - .. method:: listen(backlog) - - Listen for connections made to the socket. The *backlog* argument - specifies the maximum number of queued connections and should be at least - 1; the maximum value is system-dependent (usually 5). - - - .. method:: bind(address) - - Bind the socket to *address*. The socket must not already be bound. (The - format of *address* depends on the address family --- refer to the - :mod:`socket` documentation for more information.) To mark - the socket as re-usable (setting the :const:`SO_REUSEADDR` option), call - the :class:`dispatcher` object's :meth:`set_reuse_addr` method. - - - .. method:: accept() - - Accept a connection. The socket must be bound to an address and listening - for connections. The return value can be either ``None`` or a pair - ``(conn, address)`` where *conn* is a *new* socket object usable to send - and receive data on the connection, and *address* is the address bound to - the socket on the other end of the connection. - When ``None`` is returned it means the connection didn't take place, in - which case the server should just ignore this event and keep listening - for further incoming connections. - - - .. method:: close() - - Close the socket. All future operations on the socket object will fail. - The remote end-point will receive no more data (after queued data is - flushed). Sockets are automatically closed when they are - garbage-collected. - - -.. class:: dispatcher_with_send() - - A :class:`dispatcher` subclass which adds simple buffered output capability, - useful for simple clients. For more sophisticated usage use - :class:`asynchat.async_chat`. - -.. class:: file_dispatcher() - - A file_dispatcher takes a file descriptor or :term:`file object` along - with an optional map argument and wraps it for use with the :c:func:`poll` - or :c:func:`loop` functions. If provided a file object or anything with a - :c:func:`fileno` method, that method will be called and passed to the - :class:`file_wrapper` constructor. - - .. availability:: Unix. - -.. class:: file_wrapper() - - A file_wrapper takes an integer file descriptor and calls :func:`os.dup` to - duplicate the handle so that the original handle may be closed independently - of the file_wrapper. This class implements sufficient methods to emulate a - socket for use by the :class:`file_dispatcher` class. - - .. availability:: Unix. - - -.. _asyncore-example-1: - -asyncore Example basic HTTP client ----------------------------------- - -Here is a very basic HTTP client that uses the :class:`dispatcher` class to -implement its socket handling:: - - import asyncore - - class HTTPClient(asyncore.dispatcher): - - def __init__(self, host, path): - asyncore.dispatcher.__init__(self) - self.create_socket() - self.connect( (host, 80) ) - self.buffer = bytes('GET %s HTTP/1.0\r\nHost: %s\r\n\r\n' % - (path, host), 'ascii') - - def handle_connect(self): - pass - - def handle_close(self): - self.close() - - def handle_read(self): - print(self.recv(8192)) - - def writable(self): - return (len(self.buffer) > 0) - - def handle_write(self): - sent = self.send(self.buffer) - self.buffer = self.buffer[sent:] - - - client = HTTPClient('www.python.org', '/') - asyncore.loop() - -.. _asyncore-example-2: - -asyncore Example basic echo server ----------------------------------- - -Here is a basic echo server that uses the :class:`dispatcher` class to accept -connections and dispatches the incoming connections to a handler:: - - import asyncore - - class EchoHandler(asyncore.dispatcher_with_send): - - def handle_read(self): - data = self.recv(8192) - if data: - self.send(data) - - class EchoServer(asyncore.dispatcher): - - def __init__(self, host, port): - asyncore.dispatcher.__init__(self) - self.create_socket() - self.set_reuse_addr() - self.bind((host, port)) - self.listen(5) - - def handle_accepted(self, sock, addr): - print('Incoming connection from %s' % repr(addr)) - handler = EchoHandler(sock) - - server = EchoServer('localhost', 8080) - asyncore.loop() diff --git a/Doc/library/atexit.rst b/Doc/library/atexit.rst index f7f038107d11fe5..3dbef69580d9b30 100644 --- a/Doc/library/atexit.rst +++ b/Doc/library/atexit.rst @@ -20,6 +20,9 @@ at interpreter termination time they will be run in the order ``C``, ``B``, program is killed by a signal not handled by Python, when a Python fatal internal error is detected, or when :func:`os._exit` is called. +**Note:** The effect of registering or unregistering functions from within +a cleanup function is undefined. + .. versionchanged:: 3.7 When used with C-API subinterpreters, registered functions are local to the interpreter they were registered in. @@ -45,6 +48,16 @@ internal error is detected, or when :func:`os._exit` is called. This function returns *func*, which makes it possible to use it as a decorator. + .. warning:: + Starting new threads or calling :func:`os.fork` from a registered + function can lead to race condition between the main Python + runtime thread freeing thread states while internal :mod:`threading` + routines or the new process try to use that state. This can lead to + crashes rather than clean shutdown. + + .. versionchanged:: 3.12 + Attempts to start a new thread or :func:`os.fork` a new process + in a registered function now leads to :exc:`RuntimeError`. .. function:: unregister(func) diff --git a/Doc/library/audioop.rst b/Doc/library/audioop.rst deleted file mode 100644 index 1f96575d08f5b19..000000000000000 --- a/Doc/library/audioop.rst +++ /dev/null @@ -1,287 +0,0 @@ -:mod:`audioop` --- Manipulate raw audio data -============================================ - -.. module:: audioop - :synopsis: Manipulate raw audio data. - :deprecated: - -.. deprecated-removed:: 3.11 3.13 - The :mod:`audioop` module is deprecated - (see :pep:`PEP 594 <594#audioop>` for details). - --------------- - -The :mod:`audioop` module contains some useful operations on sound fragments. -It operates on sound fragments consisting of signed integer samples 8, 16, 24 -or 32 bits wide, stored in :term:`bytes-like objects <bytes-like object>`. All scalar items are -integers, unless specified otherwise. - -.. versionchanged:: 3.4 - Support for 24-bit samples was added. - All functions now accept any :term:`bytes-like object`. - String input now results in an immediate error. - -.. index:: - single: Intel/DVI ADPCM - single: ADPCM, Intel/DVI - single: a-LAW - single: u-LAW - -This module provides support for a-LAW, u-LAW and Intel/DVI ADPCM encodings. - -.. This para is mostly here to provide an excuse for the index entries... - -A few of the more complicated operations only take 16-bit samples, otherwise the -sample size (in bytes) is always a parameter of the operation. - -The module defines the following variables and functions: - - -.. exception:: error - - This exception is raised on all errors, such as unknown number of bytes per - sample, etc. - - -.. function:: add(fragment1, fragment2, width) - - Return a fragment which is the addition of the two samples passed as parameters. - *width* is the sample width in bytes, either ``1``, ``2``, ``3`` or ``4``. Both - fragments should have the same length. Samples are truncated in case of overflow. - - -.. function:: adpcm2lin(adpcmfragment, width, state) - - Decode an Intel/DVI ADPCM coded fragment to a linear fragment. See the - description of :func:`lin2adpcm` for details on ADPCM coding. Return a tuple - ``(sample, newstate)`` where the sample has the width specified in *width*. - - -.. function:: alaw2lin(fragment, width) - - Convert sound fragments in a-LAW encoding to linearly encoded sound fragments. - a-LAW encoding always uses 8 bits samples, so *width* refers only to the sample - width of the output fragment here. - - -.. function:: avg(fragment, width) - - Return the average over all samples in the fragment. - - -.. function:: avgpp(fragment, width) - - Return the average peak-peak value over all samples in the fragment. No - filtering is done, so the usefulness of this routine is questionable. - - -.. function:: bias(fragment, width, bias) - - Return a fragment that is the original fragment with a bias added to each - sample. Samples wrap around in case of overflow. - - -.. function:: byteswap(fragment, width) - - "Byteswap" all samples in a fragment and returns the modified fragment. - Converts big-endian samples to little-endian and vice versa. - - .. versionadded:: 3.4 - - -.. function:: cross(fragment, width) - - Return the number of zero crossings in the fragment passed as an argument. - - -.. function:: findfactor(fragment, reference) - - Return a factor *F* such that ``rms(add(fragment, mul(reference, -F)))`` is - minimal, i.e., return the factor with which you should multiply *reference* to - make it match as well as possible to *fragment*. The fragments should both - contain 2-byte samples. - - The time taken by this routine is proportional to ``len(fragment)``. - - -.. function:: findfit(fragment, reference) - - Try to match *reference* as well as possible to a portion of *fragment* (which - should be the longer fragment). This is (conceptually) done by taking slices - out of *fragment*, using :func:`findfactor` to compute the best match, and - minimizing the result. The fragments should both contain 2-byte samples. - Return a tuple ``(offset, factor)`` where *offset* is the (integer) offset into - *fragment* where the optimal match started and *factor* is the (floating-point) - factor as per :func:`findfactor`. - - -.. function:: findmax(fragment, length) - - Search *fragment* for a slice of length *length* samples (not bytes!) with - maximum energy, i.e., return *i* for which ``rms(fragment[i*2:(i+length)*2])`` - is maximal. The fragments should both contain 2-byte samples. - - The routine takes time proportional to ``len(fragment)``. - - -.. function:: getsample(fragment, width, index) - - Return the value of sample *index* from the fragment. - - -.. function:: lin2adpcm(fragment, width, state) - - Convert samples to 4 bit Intel/DVI ADPCM encoding. ADPCM coding is an adaptive - coding scheme, whereby each 4 bit number is the difference between one sample - and the next, divided by a (varying) step. The Intel/DVI ADPCM algorithm has - been selected for use by the IMA, so it may well become a standard. - - *state* is a tuple containing the state of the coder. The coder returns a tuple - ``(adpcmfrag, newstate)``, and the *newstate* should be passed to the next call - of :func:`lin2adpcm`. In the initial call, ``None`` can be passed as the state. - *adpcmfrag* is the ADPCM coded fragment packed 2 4-bit values per byte. - - -.. function:: lin2alaw(fragment, width) - - Convert samples in the audio fragment to a-LAW encoding and return this as a - bytes object. a-LAW is an audio encoding format whereby you get a dynamic - range of about 13 bits using only 8 bit samples. It is used by the Sun audio - hardware, among others. - - -.. function:: lin2lin(fragment, width, newwidth) - - Convert samples between 1-, 2-, 3- and 4-byte formats. - - .. note:: - - In some audio formats, such as .WAV files, 16, 24 and 32 bit samples are - signed, but 8 bit samples are unsigned. So when converting to 8 bit wide - samples for these formats, you need to also add 128 to the result:: - - new_frames = audioop.lin2lin(frames, old_width, 1) - new_frames = audioop.bias(new_frames, 1, 128) - - The same, in reverse, has to be applied when converting from 8 to 16, 24 - or 32 bit width samples. - - -.. function:: lin2ulaw(fragment, width) - - Convert samples in the audio fragment to u-LAW encoding and return this as a - bytes object. u-LAW is an audio encoding format whereby you get a dynamic - range of about 14 bits using only 8 bit samples. It is used by the Sun audio - hardware, among others. - - -.. function:: max(fragment, width) - - Return the maximum of the *absolute value* of all samples in a fragment. - - -.. function:: maxpp(fragment, width) - - Return the maximum peak-peak value in the sound fragment. - - -.. function:: minmax(fragment, width) - - Return a tuple consisting of the minimum and maximum values of all samples in - the sound fragment. - - -.. function:: mul(fragment, width, factor) - - Return a fragment that has all samples in the original fragment multiplied by - the floating-point value *factor*. Samples are truncated in case of overflow. - - -.. function:: ratecv(fragment, width, nchannels, inrate, outrate, state[, weightA[, weightB]]) - - Convert the frame rate of the input fragment. - - *state* is a tuple containing the state of the converter. The converter returns - a tuple ``(newfragment, newstate)``, and *newstate* should be passed to the next - call of :func:`ratecv`. The initial call should pass ``None`` as the state. - - The *weightA* and *weightB* arguments are parameters for a simple digital filter - and default to ``1`` and ``0`` respectively. - - -.. function:: reverse(fragment, width) - - Reverse the samples in a fragment and returns the modified fragment. - - -.. function:: rms(fragment, width) - - Return the root-mean-square of the fragment, i.e. ``sqrt(sum(S_i^2)/n)``. - - This is a measure of the power in an audio signal. - - -.. function:: tomono(fragment, width, lfactor, rfactor) - - Convert a stereo fragment to a mono fragment. The left channel is multiplied by - *lfactor* and the right channel by *rfactor* before adding the two channels to - give a mono signal. - - -.. function:: tostereo(fragment, width, lfactor, rfactor) - - Generate a stereo fragment from a mono fragment. Each pair of samples in the - stereo fragment are computed from the mono sample, whereby left channel samples - are multiplied by *lfactor* and right channel samples by *rfactor*. - - -.. function:: ulaw2lin(fragment, width) - - Convert sound fragments in u-LAW encoding to linearly encoded sound fragments. - u-LAW encoding always uses 8 bits samples, so *width* refers only to the sample - width of the output fragment here. - -Note that operations such as :func:`.mul` or :func:`.max` make no distinction -between mono and stereo fragments, i.e. all samples are treated equal. If this -is a problem the stereo fragment should be split into two mono fragments first -and recombined later. Here is an example of how to do that:: - - def mul_stereo(sample, width, lfactor, rfactor): - lsample = audioop.tomono(sample, width, 1, 0) - rsample = audioop.tomono(sample, width, 0, 1) - lsample = audioop.mul(lsample, width, lfactor) - rsample = audioop.mul(rsample, width, rfactor) - lsample = audioop.tostereo(lsample, width, 1, 0) - rsample = audioop.tostereo(rsample, width, 0, 1) - return audioop.add(lsample, rsample, width) - -If you use the ADPCM coder to build network packets and you want your protocol -to be stateless (i.e. to be able to tolerate packet loss) you should not only -transmit the data but also the state. Note that you should send the *initial* -state (the one you passed to :func:`lin2adpcm`) along to the decoder, not the -final state (as returned by the coder). If you want to use -:class:`struct.Struct` to store the state in binary you can code the first -element (the predicted value) in 16 bits and the second (the delta index) in 8. - -The ADPCM coders have never been tried against other ADPCM coders, only against -themselves. It could well be that I misinterpreted the standards in which case -they will not be interoperable with the respective standards. - -The :func:`find\*` routines might look a bit funny at first sight. They are -primarily meant to do echo cancellation. A reasonably fast way to do this is to -pick the most energetic piece of the output sample, locate that in the input -sample and subtract the whole output sample from the input sample:: - - def echocancel(outputdata, inputdata): - pos = audioop.findmax(outputdata, 800) # one tenth second - out_test = outputdata[pos*2:] - in_test = inputdata[pos*2:] - ipos, factor = audioop.findfit(in_test, out_test) - # Optional (for better cancellation): - # factor = audioop.findfactor(in_test[ipos*2:ipos*2+len(out_test)], - # out_test) - prefill = '\0'*(pos+ipos)*2 - postfill = '\0'*(len(inputdata)-len(prefill)-len(outputdata)) - outputdata = prefill + audioop.mul(outputdata, 2, -factor) + postfill - return audioop.add(inputdata, outputdata, 2) - diff --git a/Doc/library/base64.rst b/Doc/library/base64.rst index a02ba739146aaf3..d5b6af8c1928eff 100644 --- a/Doc/library/base64.rst +++ b/Doc/library/base64.rst @@ -53,11 +53,13 @@ The modern interface provides: Encode the :term:`bytes-like object` *s* using Base64 and return the encoded :class:`bytes`. - Optional *altchars* must be a :term:`bytes-like object` of at least - length 2 (additional characters are ignored) which specifies an alternative - alphabet for the ``+`` and ``/`` characters. This allows an application to e.g. - generate URL or filesystem safe Base64 strings. The default is ``None``, for - which the standard Base64 alphabet is used. + Optional *altchars* must be a :term:`bytes-like object` of length 2 which + specifies an alternative alphabet for the ``+`` and ``/`` characters. + This allows an application to e.g. generate URL or filesystem safe Base64 + strings. The default is ``None``, for which the standard Base64 alphabet is used. + + May assert or raise a :exc:`ValueError` if the length of *altchars* is not 2. Raises a + :exc:`TypeError` if *altchars* is not a :term:`bytes-like object`. .. function:: b64decode(s, altchars=None, validate=False) @@ -65,9 +67,9 @@ The modern interface provides: Decode the Base64 encoded :term:`bytes-like object` or ASCII string *s* and return the decoded :class:`bytes`. - Optional *altchars* must be a :term:`bytes-like object` or ASCII string of - at least length 2 (additional characters are ignored) which specifies the - alternative alphabet used instead of the ``+`` and ``/`` characters. + Optional *altchars* must be a :term:`bytes-like object` or ASCII string + of length 2 which specifies the alternative alphabet used instead of the + ``+`` and ``/`` characters. A :exc:`binascii.Error` exception is raised if *s* is incorrectly padded. @@ -80,6 +82,7 @@ The modern interface provides: For more information about the strict base64 check, see :func:`binascii.a2b_base64` + May assert or raise a :exc:`ValueError` if the length of *altchars* is not 2. .. function:: standard_b64encode(s) diff --git a/Doc/library/binascii.rst b/Doc/library/binascii.rst index 5a0815faa38eac2..39fabb59bb19844 100644 --- a/Doc/library/binascii.rst +++ b/Doc/library/binascii.rst @@ -6,14 +6,13 @@ representations. .. index:: - module: uu - module: base64 + pair: module; base64 -------------- The :mod:`binascii` module contains a number of methods to convert between binary and various ASCII-encoded binary representations. Normally, you will not -use these functions directly but use wrapper modules like :mod:`uu` or +use these functions directly but use wrapper modules like :mod:`base64` instead. The :mod:`binascii` module contains low-level functions written in C for greater speed that are used by the higher-level modules. @@ -58,10 +57,11 @@ The :mod:`binascii` module defines the following functions: data will raise :exc:`binascii.Error`. Valid base64: - * Conforms to :rfc:`3548`. - * Contains only characters from the base64 alphabet. - * Contains no excess data after padding (including excess padding, newlines, etc.). - * Does not start with a padding. + + * Conforms to :rfc:`3548`. + * Contains only characters from the base64 alphabet. + * Contains no excess data after padding (including excess padding, newlines, etc.). + * Does not start with a padding. .. versionchanged:: 3.11 Added the *strict_mode* parameter. @@ -179,8 +179,5 @@ The :mod:`binascii` module defines the following functions: Support for RFC compliant base64-style encoding in base 16, 32, 64, and 85. - Module :mod:`uu` - Support for UU encoding used on Unix. - Module :mod:`quopri` Support for quoted-printable encoding used in MIME email messages. diff --git a/Doc/library/bisect.rst b/Doc/library/bisect.rst index b85564f17866e0b..8022c596f0af97c 100644 --- a/Doc/library/bisect.rst +++ b/Doc/library/bisect.rst @@ -24,6 +24,8 @@ method to determine whether a value has been found. Instead, the functions only call the :meth:`__lt__` method and will return an insertion point between values in an array. +.. _bisect functions: + The following functions are provided: @@ -55,7 +57,7 @@ The following functions are provided: .. function:: bisect_right(a, x, lo=0, hi=len(a), *, key=None) bisect(a, x, lo=0, hi=len(a), *, key=None) - Similar to :func:`bisect_left`, but returns an insertion point which comes + Similar to :py:func:`~bisect.bisect_left`, but returns an insertion point which comes after (to the right of) any existing entries of *x* in *a*. The returned insertion point *ip* partitions the array *a* into two slices @@ -70,7 +72,7 @@ The following functions are provided: Insert *x* in *a* in sorted order. - This function first runs :func:`bisect_left` to locate an insertion point. + This function first runs :py:func:`~bisect.bisect_left` to locate an insertion point. Next, it runs the :meth:`insert` method on *a* to insert *x* at the appropriate position to maintain sort order. @@ -87,10 +89,10 @@ The following functions are provided: .. function:: insort_right(a, x, lo=0, hi=len(a), *, key=None) insort(a, x, lo=0, hi=len(a), *, key=None) - Similar to :func:`insort_left`, but inserting *x* in *a* after any existing + Similar to :py:func:`~bisect.insort_left`, but inserting *x* in *a* after any existing entries of *x*. - This function first runs :func:`bisect_right` to locate an insertion point. + This function first runs :py:func:`~bisect.bisect_right` to locate an insertion point. Next, it runs the :meth:`insert` method on *a* to insert *x* at the appropriate position to maintain sort order. @@ -120,7 +122,7 @@ thoughts in mind: they are used. Consequently, if the search functions are used in a loop, the key function may be called again and again on the same array elements. If the key function isn't fast, consider wrapping it with - :func:`functools.cache` to avoid duplicate computations. Alternatively, + :py:func:`functools.cache` to avoid duplicate computations. Alternatively, consider searching an array of precomputed keys to locate the insertion point (as shown in the examples section below). @@ -140,7 +142,7 @@ thoughts in mind: Searching Sorted Lists ---------------------- -The above :func:`bisect` functions are useful for finding insertion points but +The above `bisect functions`_ are useful for finding insertion points but can be tricky or awkward to use for common searching tasks. The following five functions show how to transform them into the standard lookups for sorted lists:: @@ -186,8 +188,8 @@ Examples .. _bisect-example: -The :func:`bisect` function can be useful for numeric table lookups. This -example uses :func:`bisect` to look up a letter grade for an exam score (say) +The :py:func:`~bisect.bisect` function can be useful for numeric table lookups. This +example uses :py:func:`~bisect.bisect` to look up a letter grade for an exam score (say) based on a set of ordered numeric breakpoints: 90 and up is an 'A', 80 to 89 is a 'B', and so on:: @@ -198,8 +200,8 @@ a 'B', and so on:: >>> [grade(score) for score in [33, 99, 77, 70, 89, 90, 100]] ['F', 'A', 'C', 'C', 'B', 'A', 'A'] -The :func:`bisect` and :func:`insort` functions also work with lists of -tuples. The *key* argument can serve to extract the field used for ordering +The :py:func:`~bisect.bisect` and :py:func:`~bisect.insort` functions also work with +lists of tuples. The *key* argument can serve to extract the field used for ordering records in a table:: >>> from collections import namedtuple @@ -210,10 +212,10 @@ records in a table:: >>> Movie = namedtuple('Movie', ('name', 'released', 'director')) >>> movies = [ - ... Movie('Jaws', 1975, 'Speilberg'), + ... Movie('Jaws', 1975, 'Spielberg'), ... Movie('Titanic', 1997, 'Cameron'), ... Movie('The Birds', 1963, 'Hitchcock'), - ... Movie('Aliens', 1986, 'Scott') + ... Movie('Aliens', 1986, 'Cameron') ... ] >>> # Find the first movie released after 1960 @@ -228,8 +230,8 @@ records in a table:: >>> pprint(movies) [Movie(name='The Birds', released=1963, director='Hitchcock'), Movie(name='Love Story', released=1970, director='Hiller'), - Movie(name='Jaws', released=1975, director='Speilberg'), - Movie(name='Aliens', released=1986, director='Scott'), + Movie(name='Jaws', released=1975, director='Spielberg'), + Movie(name='Aliens', released=1986, director='Cameron'), Movie(name='Titanic', released=1997, director='Cameron')] If the key function is expensive, it is possible to avoid repeated function diff --git a/Doc/library/bz2.rst b/Doc/library/bz2.rst index ae5a1598f84b44e..6a95a4a6e8d1832 100644 --- a/Doc/library/bz2.rst +++ b/Doc/library/bz2.rst @@ -87,10 +87,11 @@ The :mod:`bz2` module contains: compressed streams. :class:`BZ2File` provides all of the members specified by the - :class:`io.BufferedIOBase`, except for :meth:`detach` and :meth:`truncate`. + :class:`io.BufferedIOBase`, except for :meth:`~io.BufferedIOBase.detach` + and :meth:`~io.IOBase.truncate`. Iteration and the :keyword:`with` statement are supported. - :class:`BZ2File` also provides the following method: + :class:`BZ2File` also provides the following methods: .. method:: peek([n]) @@ -105,14 +106,52 @@ The :mod:`bz2` module contains: .. versionadded:: 3.3 + .. method:: fileno() + + Return the file descriptor for the underlying file. + + .. versionadded:: 3.3 + + .. method:: readable() + + Return whether the file was opened for reading. + + .. versionadded:: 3.3 + + .. method:: seekable() + + Return whether the file supports seeking. + + .. versionadded:: 3.3 + + .. method:: writable() + + Return whether the file was opened for writing. + + .. versionadded:: 3.3 + + .. method:: read1(size=-1) + + Read up to *size* uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns ``b''`` if the file is at EOF. + + .. versionadded:: 3.3 + + .. method:: readinto(b) + + Read bytes into *b*. + + Returns the number of bytes read (0 for EOF). + + .. versionadded:: 3.3 + .. versionchanged:: 3.1 Support for the :keyword:`with` statement was added. - .. versionchanged:: 3.3 - The :meth:`fileno`, :meth:`readable`, :meth:`seekable`, :meth:`writable`, - :meth:`read1` and :meth:`readinto` methods were added. - .. versionchanged:: 3.3 Support was added for *filename* being a :term:`file object` instead of an actual filename. @@ -320,9 +359,11 @@ Writing and reading a bzip2-compressed file in binary mode: >>> with bz2.open("myfile.bz2", "wb") as f: ... # Write compressed data to file ... unused = f.write(data) + ... >>> with bz2.open("myfile.bz2", "rb") as f: ... # Decompress data from file ... content = f.read() + ... >>> content == data # Check equality to original object after round-trip True diff --git a/Doc/library/calendar.rst b/Doc/library/calendar.rst index 66f59f0e2ced275..157a7537f97dc60 100644 --- a/Doc/library/calendar.rst +++ b/Doc/library/calendar.rst @@ -394,6 +394,29 @@ The :mod:`calendar` module exports the following data attributes: An array that represents the abbreviated days of the week in the current locale. +.. data:: MONDAY + TUESDAY + WEDNESDAY + THURSDAY + FRIDAY + SATURDAY + SUNDAY + + Aliases for the days of the week, + where ``MONDAY`` is ``0`` and ``SUNDAY`` is ``6``. + + .. versionadded:: 3.12 + + +.. class:: Day + + Enumeration defining days of the week as integer constants. + The members of this enumeration are exported to the module scope as + :data:`MONDAY` through :data:`SUNDAY`. + + .. versionadded:: 3.12 + + .. data:: month_name An array that represents the months of the year in the current locale. This @@ -407,15 +430,56 @@ The :mod:`calendar` module exports the following data attributes: locale. This follows normal convention of January being month number 1, so it has a length of 13 and ``month_abbr[0]`` is the empty string. -.. data:: MONDAY - TUESDAY - WEDNESDAY - THURSDAY - FRIDAY - SATURDAY - SUNDAY - Aliases for day numbers, where ``MONDAY`` is ``0`` and ``SUNDAY`` is ``6``. +.. data:: JANUARY + FEBRUARY + MARCH + APRIL + MAY + JUNE + JULY + AUGUST + SEPTEMBER + OCTOBER + NOVEMBER + DECEMBER + + Aliases for the months of the year, + where ``JANUARY`` is ``1`` and ``DECEMBER`` is ``12``. + + .. versionadded:: 3.12 + + +.. class:: Month + + Enumeration defining months of the year as integer constants. + The members of this enumeration are exported to the module scope as + :data:`JANUARY` through :data:`DECEMBER`. + + .. versionadded:: 3.12 + + +The :mod:`calendar` module defines the following exceptions: + +.. exception:: IllegalMonthError(month) + + A subclass of :exc:`ValueError`, + raised when the given month number is outside of the range 1-12 (inclusive). + + .. attribute:: month + + The invalid month number. + + +.. exception:: IllegalWeekdayError(weekday) + + A subclass of :exc:`ValueError`, + raised when the given weekday number is outside of the range 0-6 (inclusive). + + .. attribute:: weekday + + The invalid weekday number. + .. seealso:: @@ -425,3 +489,146 @@ The :mod:`calendar` module exports the following data attributes: Module :mod:`time` Low-level time related functions. + + +.. _calendar-cli: + +Command-Line Usage +------------------ + +.. versionadded:: 2.5 + +The :mod:`calendar` module can be executed as a script from the command line +to interactively print a calendar. + +.. code-block:: shell + + python -m calendar [-h] [-L LOCALE] [-e ENCODING] [-t {text,html}] + [-w WIDTH] [-l LINES] [-s SPACING] [-m MONTHS] [-c CSS] + [year] [month] + + +For example, to print a calendar for the year 2000: + +.. code-block:: console + + $ python -m calendar 2000 + 2000 + + January February March + Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su + 1 2 1 2 3 4 5 6 1 2 3 4 5 + 3 4 5 6 7 8 9 7 8 9 10 11 12 13 6 7 8 9 10 11 12 + 10 11 12 13 14 15 16 14 15 16 17 18 19 20 13 14 15 16 17 18 19 + 17 18 19 20 21 22 23 21 22 23 24 25 26 27 20 21 22 23 24 25 26 + 24 25 26 27 28 29 30 28 29 27 28 29 30 31 + 31 + + April May June + Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su + 1 2 1 2 3 4 5 6 7 1 2 3 4 + 3 4 5 6 7 8 9 8 9 10 11 12 13 14 5 6 7 8 9 10 11 + 10 11 12 13 14 15 16 15 16 17 18 19 20 21 12 13 14 15 16 17 18 + 17 18 19 20 21 22 23 22 23 24 25 26 27 28 19 20 21 22 23 24 25 + 24 25 26 27 28 29 30 29 30 31 26 27 28 29 30 + + July August September + Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su + 1 2 1 2 3 4 5 6 1 2 3 + 3 4 5 6 7 8 9 7 8 9 10 11 12 13 4 5 6 7 8 9 10 + 10 11 12 13 14 15 16 14 15 16 17 18 19 20 11 12 13 14 15 16 17 + 17 18 19 20 21 22 23 21 22 23 24 25 26 27 18 19 20 21 22 23 24 + 24 25 26 27 28 29 30 28 29 30 31 25 26 27 28 29 30 + 31 + + October November December + Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su + 1 1 2 3 4 5 1 2 3 + 2 3 4 5 6 7 8 6 7 8 9 10 11 12 4 5 6 7 8 9 10 + 9 10 11 12 13 14 15 13 14 15 16 17 18 19 11 12 13 14 15 16 17 + 16 17 18 19 20 21 22 20 21 22 23 24 25 26 18 19 20 21 22 23 24 + 23 24 25 26 27 28 29 27 28 29 30 25 26 27 28 29 30 31 + 30 31 + + +The following options are accepted: + +.. program:: calendar + + +.. option:: --help, -h + + Show the help message and exit. + + +.. option:: --locale LOCALE, -L LOCALE + + The locale to use for month and weekday names. + Defaults to English. + + +.. option:: --encoding ENCODING, -e ENCODING + + The encoding to use for output. + :option:`--encoding` is required if :option:`--locale` is set. + + +.. option:: --type {text,html}, -t {text,html} + + Print the calendar to the terminal as text, + or as an HTML document. + + +.. option:: year + + The year to print the calendar for. + Must be a number between 1 and 9999. + Defaults to the current year. + + +.. option:: month + + The month of the specified :option:`year` to print the calendar for. + Must be a number between 1 and 12, + and may only be used in text mode. + Defaults to printing a calendar for the full year. + + +*Text-mode options:* + +.. option:: --width WIDTH, -w WIDTH + + The width of the date column in terminal columns. + The date is printed centred in the column. + Any value lower than 2 is ignored. + Defaults to 2. + + +.. option:: --lines LINES, -l LINES + + The number of lines for each week in terminal rows. + The date is printed top-aligned. + Any value lower than 1 is ignored. + Defaults to 1. + + +.. option:: --spacing SPACING, -s SPACING + + The space between months in columns. + Any value lower than 2 is ignored. + Defaults to 6. + + +.. option:: --months MONTHS, -m MONTHS + + The number of months printed per row. + Defaults to 3. + + +*HTML-mode options:* + +.. option:: --css CSS, -c CSS + + The path of a CSS stylesheet to use for the calendar. + This must either be relative to the generated HTML, + or an absolute HTTP or ``file:///`` URL. diff --git a/Doc/library/cgi.rst b/Doc/library/cgi.rst deleted file mode 100644 index 295a601a7bf197f..000000000000000 --- a/Doc/library/cgi.rst +++ /dev/null @@ -1,564 +0,0 @@ -:mod:`cgi` --- Common Gateway Interface support -=============================================== - -.. module:: cgi - :synopsis: Helpers for running Python scripts via the Common Gateway Interface. - :deprecated: - -**Source code:** :source:`Lib/cgi.py` - -.. index:: - pair: WWW; server - pair: CGI; protocol - pair: HTTP; protocol - pair: MIME; headers - single: URL - single: Common Gateway Interface - -.. deprecated-removed:: 3.11 3.13 - The :mod:`cgi` module is deprecated - (see :pep:`PEP 594 <594#cgi>` for details and alternatives). - - The :class:`FieldStorage` class can typically be replaced with - :func:`urllib.parse.parse_qsl` for ``GET`` and ``HEAD`` requests, - and the :mod:`email.message` module or - `multipart <https://pypi.org/project/multipart/>`_ for ``POST`` and ``PUT``. - Most :ref:`utility functions <functions-in-cgi-module>` have replacements. - --------------- - -Support module for Common Gateway Interface (CGI) scripts. - -This module defines a number of utilities for use by CGI scripts written in -Python. - -The global variable ``maxlen`` can be set to an integer indicating the maximum -size of a POST request. POST requests larger than this size will result in a -:exc:`ValueError` being raised during parsing. The default value of this -variable is ``0``, meaning the request size is unlimited. - -.. include:: ../includes/wasm-notavail.rst - -Introduction ------------- - -.. _cgi-intro: - -A CGI script is invoked by an HTTP server, usually to process user input -submitted through an HTML ``<FORM>`` or ``<ISINDEX>`` element. - -Most often, CGI scripts live in the server's special :file:`cgi-bin` directory. -The HTTP server places all sorts of information about the request (such as the -client's hostname, the requested URL, the query string, and lots of other -goodies) in the script's shell environment, executes the script, and sends the -script's output back to the client. - -The script's input is connected to the client too, and sometimes the form data -is read this way; at other times the form data is passed via the "query string" -part of the URL. This module is intended to take care of the different cases -and provide a simpler interface to the Python script. It also provides a number -of utilities that help in debugging scripts, and the latest addition is support -for file uploads from a form (if your browser supports it). - -The output of a CGI script should consist of two sections, separated by a blank -line. The first section contains a number of headers, telling the client what -kind of data is following. Python code to generate a minimal header section -looks like this:: - - print("Content-Type: text/html") # HTML is following - print() # blank line, end of headers - -The second section is usually HTML, which allows the client software to display -nicely formatted text with header, in-line images, etc. Here's Python code that -prints a simple piece of HTML:: - - print("<TITLE>CGI script output") - print("

This is my first CGI script

") - print("Hello, world!") - - -.. _using-the-cgi-module: - -Using the cgi module --------------------- - -Begin by writing ``import cgi``. - -When you write a new script, consider adding these lines:: - - import cgitb - cgitb.enable() - -This activates a special exception handler that will display detailed reports in -the web browser if any errors occur. If you'd rather not show the guts of your -program to users of your script, you can have the reports saved to files -instead, with code like this:: - - import cgitb - cgitb.enable(display=0, logdir="/path/to/logdir") - -It's very helpful to use this feature during script development. The reports -produced by :mod:`cgitb` provide information that can save you a lot of time in -tracking down bugs. You can always remove the ``cgitb`` line later when you -have tested your script and are confident that it works correctly. - -To get at submitted form data, use the :class:`FieldStorage` class. If the form -contains non-ASCII characters, use the *encoding* keyword parameter set to the -value of the encoding defined for the document. It is usually contained in the -META tag in the HEAD section of the HTML document or by the -:mailheader:`Content-Type` header. This reads the form contents from the -standard input or the environment (depending on the value of various -environment variables set according to the CGI standard). Since it may consume -standard input, it should be instantiated only once. - -The :class:`FieldStorage` instance can be indexed like a Python dictionary. -It allows membership testing with the :keyword:`in` operator, and also supports -the standard dictionary method :meth:`~dict.keys` and the built-in function -:func:`len`. Form fields containing empty strings are ignored and do not appear -in the dictionary; to keep such values, provide a true value for the optional -*keep_blank_values* keyword parameter when creating the :class:`FieldStorage` -instance. - -For instance, the following code (which assumes that the -:mailheader:`Content-Type` header and blank line have already been printed) -checks that the fields ``name`` and ``addr`` are both set to a non-empty -string:: - - form = cgi.FieldStorage() - if "name" not in form or "addr" not in form: - print("

Error

") - print("Please fill in the name and addr fields.") - return - print("

name:", form["name"].value) - print("

addr:", form["addr"].value) - ...further form processing here... - -Here the fields, accessed through ``form[key]``, are themselves instances of -:class:`FieldStorage` (or :class:`MiniFieldStorage`, depending on the form -encoding). The :attr:`~FieldStorage.value` attribute of the instance yields -the string value of the field. The :meth:`~FieldStorage.getvalue` method -returns this string value directly; it also accepts an optional second argument -as a default to return if the requested key is not present. - -If the submitted form data contains more than one field with the same name, the -object retrieved by ``form[key]`` is not a :class:`FieldStorage` or -:class:`MiniFieldStorage` instance but a list of such instances. Similarly, in -this situation, ``form.getvalue(key)`` would return a list of strings. If you -expect this possibility (when your HTML form contains multiple fields with the -same name), use the :meth:`~FieldStorage.getlist` method, which always returns -a list of values (so that you do not need to special-case the single item -case). For example, this code concatenates any number of username fields, -separated by commas:: - - value = form.getlist("username") - usernames = ",".join(value) - -If a field represents an uploaded file, accessing the value via the -:attr:`~FieldStorage.value` attribute or the :meth:`~FieldStorage.getvalue` -method reads the entire file in memory as bytes. This may not be what you -want. You can test for an uploaded file by testing either the -:attr:`~FieldStorage.filename` attribute or the :attr:`~FieldStorage.file` -attribute. You can then read the data from the :attr:`!file` -attribute before it is automatically closed as part of the garbage collection of -the :class:`FieldStorage` instance -(the :func:`~io.RawIOBase.read` and :func:`~io.IOBase.readline` methods will -return bytes):: - - fileitem = form["userfile"] - if fileitem.file: - # It's an uploaded file; count lines - linecount = 0 - while True: - line = fileitem.file.readline() - if not line: break - linecount = linecount + 1 - -:class:`FieldStorage` objects also support being used in a :keyword:`with` -statement, which will automatically close them when done. - -If an error is encountered when obtaining the contents of an uploaded file -(for example, when the user interrupts the form submission by clicking on -a Back or Cancel button) the :attr:`~FieldStorage.done` attribute of the -object for the field will be set to the value -1. - -The file upload draft standard entertains the possibility of uploading multiple -files from one field (using a recursive :mimetype:`multipart/\*` encoding). -When this occurs, the item will be a dictionary-like :class:`FieldStorage` item. -This can be determined by testing its :attr:`!type` attribute, which should be -:mimetype:`multipart/form-data` (or perhaps another MIME type matching -:mimetype:`multipart/\*`). In this case, it can be iterated over recursively -just like the top-level form object. - -When a form is submitted in the "old" format (as the query string or as a single -data part of type :mimetype:`application/x-www-form-urlencoded`), the items will -actually be instances of the class :class:`MiniFieldStorage`. In this case, the -:attr:`!list`, :attr:`!file`, and :attr:`filename` attributes are always ``None``. - -A form submitted via POST that also has a query string will contain both -:class:`FieldStorage` and :class:`MiniFieldStorage` items. - -.. versionchanged:: 3.4 - The :attr:`~FieldStorage.file` attribute is automatically closed upon the - garbage collection of the creating :class:`FieldStorage` instance. - -.. versionchanged:: 3.5 - Added support for the context management protocol to the - :class:`FieldStorage` class. - - -Higher Level Interface ----------------------- - -The previous section explains how to read CGI form data using the -:class:`FieldStorage` class. This section describes a higher level interface -which was added to this class to allow one to do it in a more readable and -intuitive way. The interface doesn't make the techniques described in previous -sections obsolete --- they are still useful to process file uploads efficiently, -for example. - -.. XXX: Is this true ? - -The interface consists of two simple methods. Using the methods you can process -form data in a generic way, without the need to worry whether only one or more -values were posted under one name. - -In the previous section, you learned to write following code anytime you -expected a user to post more than one value under one name:: - - item = form.getvalue("item") - if isinstance(item, list): - # The user is requesting more than one item. - else: - # The user is requesting only one item. - -This situation is common for example when a form contains a group of multiple -checkboxes with the same name:: - - - - -In most situations, however, there's only one form control with a particular -name in a form and then you expect and need only one value associated with this -name. So you write a script containing for example this code:: - - user = form.getvalue("user").upper() - -The problem with the code is that you should never expect that a client will -provide valid input to your scripts. For example, if a curious user appends -another ``user=foo`` pair to the query string, then the script would crash, -because in this situation the ``getvalue("user")`` method call returns a list -instead of a string. Calling the :meth:`~str.upper` method on a list is not valid -(since lists do not have a method of this name) and results in an -:exc:`AttributeError` exception. - -Therefore, the appropriate way to read form data values was to always use the -code which checks whether the obtained value is a single value or a list of -values. That's annoying and leads to less readable scripts. - -A more convenient approach is to use the methods :meth:`~FieldStorage.getfirst` -and :meth:`~FieldStorage.getlist` provided by this higher level interface. - - -.. method:: FieldStorage.getfirst(name, default=None) - - This method always returns only one value associated with form field *name*. - The method returns only the first value in case that more values were posted - under such name. Please note that the order in which the values are received - may vary from browser to browser and should not be counted on. [#]_ If no such - form field or value exists then the method returns the value specified by the - optional parameter *default*. This parameter defaults to ``None`` if not - specified. - - -.. method:: FieldStorage.getlist(name) - - This method always returns a list of values associated with form field *name*. - The method returns an empty list if no such form field or value exists for - *name*. It returns a list consisting of one item if only one such value exists. - -Using these methods you can write nice compact code:: - - import cgi - form = cgi.FieldStorage() - user = form.getfirst("user", "").upper() # This way it's safe. - for item in form.getlist("item"): - do_something(item) - - -.. _functions-in-cgi-module: - -Functions ---------- - -These are useful if you want more control, or if you want to employ some of the -algorithms implemented in this module in other circumstances. - - -.. function:: parse(fp=None, environ=os.environ, keep_blank_values=False, strict_parsing=False, separator="&") - - Parse a query in the environment or from a file (the file defaults to - ``sys.stdin``). The *keep_blank_values*, *strict_parsing* and *separator* parameters are - passed to :func:`urllib.parse.parse_qs` unchanged. - - .. deprecated-removed:: 3.11 3.13 - This function, like the rest of the :mod:`cgi` module, is deprecated. - It can be replaced by calling :func:`urllib.parse.parse_qs` directly - on the desired query string (except for ``multipart/form-data`` input, - which can be handled as described for :func:`parse_multipart`). - - -.. function:: parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator="&") - - Parse input of type :mimetype:`multipart/form-data` (for file uploads). - Arguments are *fp* for the input file, *pdict* for a dictionary containing - other parameters in the :mailheader:`Content-Type` header, and *encoding*, - the request encoding. - - Returns a dictionary just like :func:`urllib.parse.parse_qs`: keys are the - field names, each value is a list of values for that field. For non-file - fields, the value is a list of strings. - - This is easy to use but not much good if you are expecting megabytes to be - uploaded --- in that case, use the :class:`FieldStorage` class instead - which is much more flexible. - - .. versionchanged:: 3.7 - Added the *encoding* and *errors* parameters. For non-file fields, the - value is now a list of strings, not bytes. - - .. versionchanged:: 3.10 - Added the *separator* parameter. - - .. deprecated-removed:: 3.11 3.13 - This function, like the rest of the :mod:`cgi` module, is deprecated. - It can be replaced with the functionality in the :mod:`email` package - (e.g. :class:`email.message.EmailMessage`/:class:`email.message.Message`) - which implements the same MIME RFCs, or with the - `multipart `__ PyPI project. - - -.. function:: parse_header(string) - - Parse a MIME header (such as :mailheader:`Content-Type`) into a main value and a - dictionary of parameters. - - .. deprecated-removed:: 3.11 3.13 - This function, like the rest of the :mod:`cgi` module, is deprecated. - It can be replaced with the functionality in the :mod:`email` package, - which implements the same MIME RFCs. - - For example, with :class:`email.message.EmailMessage`:: - - from email.message import EmailMessage - msg = EmailMessage() - msg['content-type'] = 'application/json; charset="utf8"' - main, params = msg.get_content_type(), msg['content-type'].params - - -.. function:: test() - - Robust test CGI script, usable as main program. Writes minimal HTTP headers and - formats all information provided to the script in HTML format. - - -.. function:: print_environ() - - Format the shell environment in HTML. - - -.. function:: print_form(form) - - Format a form in HTML. - - -.. function:: print_directory() - - Format the current directory in HTML. - - -.. function:: print_environ_usage() - - Print a list of useful (used by CGI) environment variables in HTML. - - -.. _cgi-security: - -Caring about security ---------------------- - -.. index:: pair: CGI; security - -There's one important rule: if you invoke an external program (via -:func:`os.system`, :func:`os.popen` or other functions with similar -functionality), make very sure you don't pass arbitrary strings received from -the client to the shell. This is a well-known security hole whereby clever -hackers anywhere on the web can exploit a gullible CGI script to invoke -arbitrary shell commands. Even parts of the URL or field names cannot be -trusted, since the request doesn't have to come from your form! - -To be on the safe side, if you must pass a string gotten from a form to a shell -command, you should make sure the string contains only alphanumeric characters, -dashes, underscores, and periods. - - -Installing your CGI script on a Unix system -------------------------------------------- - -Read the documentation for your HTTP server and check with your local system -administrator to find the directory where CGI scripts should be installed; -usually this is in a directory :file:`cgi-bin` in the server tree. - -Make sure that your script is readable and executable by "others"; the Unix file -mode should be ``0o755`` octal (use ``chmod 0755 filename``). Make sure that the -first line of the script contains ``#!`` starting in column 1 followed by the -pathname of the Python interpreter, for instance:: - - #!/usr/local/bin/python - -Make sure the Python interpreter exists and is executable by "others". - -Make sure that any files your script needs to read or write are readable or -writable, respectively, by "others" --- their mode should be ``0o644`` for -readable and ``0o666`` for writable. This is because, for security reasons, the -HTTP server executes your script as user "nobody", without any special -privileges. It can only read (write, execute) files that everybody can read -(write, execute). The current directory at execution time is also different (it -is usually the server's cgi-bin directory) and the set of environment variables -is also different from what you get when you log in. In particular, don't count -on the shell's search path for executables (:envvar:`PATH`) or the Python module -search path (:envvar:`PYTHONPATH`) to be set to anything interesting. - -If you need to load modules from a directory which is not on Python's default -module search path, you can change the path in your script, before importing -other modules. For example:: - - import sys - sys.path.insert(0, "/usr/home/joe/lib/python") - sys.path.insert(0, "/usr/local/lib/python") - -(This way, the directory inserted last will be searched first!) - -Instructions for non-Unix systems will vary; check your HTTP server's -documentation (it will usually have a section on CGI scripts). - - -Testing your CGI script ------------------------ - -Unfortunately, a CGI script will generally not run when you try it from the -command line, and a script that works perfectly from the command line may fail -mysteriously when run from the server. There's one reason why you should still -test your script from the command line: if it contains a syntax error, the -Python interpreter won't execute it at all, and the HTTP server will most likely -send a cryptic error to the client. - -Assuming your script has no syntax errors, yet it does not work, you have no -choice but to read the next section. - - -Debugging CGI scripts ---------------------- - -.. index:: pair: CGI; debugging - -First of all, check for trivial installation errors --- reading the section -above on installing your CGI script carefully can save you a lot of time. If -you wonder whether you have understood the installation procedure correctly, try -installing a copy of this module file (:file:`cgi.py`) as a CGI script. When -invoked as a script, the file will dump its environment and the contents of the -form in HTML format. Give it the right mode etc., and send it a request. If it's -installed in the standard :file:`cgi-bin` directory, it should be possible to -send it a request by entering a URL into your browser of the form: - -.. code-block:: none - - http://yourhostname/cgi-bin/cgi.py?name=Joe+Blow&addr=At+Home - -If this gives an error of type 404, the server cannot find the script -- perhaps -you need to install it in a different directory. If it gives another error, -there's an installation problem that you should fix before trying to go any -further. If you get a nicely formatted listing of the environment and form -content (in this example, the fields should be listed as "addr" with value "At -Home" and "name" with value "Joe Blow"), the :file:`cgi.py` script has been -installed correctly. If you follow the same procedure for your own script, you -should now be able to debug it. - -The next step could be to call the :mod:`cgi` module's :func:`test` function -from your script: replace its main code with the single statement :: - - cgi.test() - -This should produce the same results as those gotten from installing the -:file:`cgi.py` file itself. - -When an ordinary Python script raises an unhandled exception (for whatever -reason: of a typo in a module name, a file that can't be opened, etc.), the -Python interpreter prints a nice traceback and exits. While the Python -interpreter will still do this when your CGI script raises an exception, most -likely the traceback will end up in one of the HTTP server's log files, or be -discarded altogether. - -Fortunately, once you have managed to get your script to execute *some* code, -you can easily send tracebacks to the web browser using the :mod:`cgitb` module. -If you haven't done so already, just add the lines:: - - import cgitb - cgitb.enable() - -to the top of your script. Then try running it again; when a problem occurs, -you should see a detailed report that will likely make apparent the cause of the -crash. - -If you suspect that there may be a problem in importing the :mod:`cgitb` module, -you can use an even more robust approach (which only uses built-in modules):: - - import sys - sys.stderr = sys.stdout - print("Content-Type: text/plain") - print() - ...your code here... - -This relies on the Python interpreter to print the traceback. The content type -of the output is set to plain text, which disables all HTML processing. If your -script works, the raw HTML will be displayed by your client. If it raises an -exception, most likely after the first two lines have been printed, a traceback -will be displayed. Because no HTML interpretation is going on, the traceback -will be readable. - - -Common problems and solutions ------------------------------ - -* Most HTTP servers buffer the output from CGI scripts until the script is - completed. This means that it is not possible to display a progress report on - the client's display while the script is running. - -* Check the installation instructions above. - -* Check the HTTP server's log files. (``tail -f logfile`` in a separate window - may be useful!) - -* Always check a script for syntax errors first, by doing something like - ``python script.py``. - -* If your script does not have any syntax errors, try adding ``import cgitb; - cgitb.enable()`` to the top of the script. - -* When invoking external programs, make sure they can be found. Usually, this - means using absolute path names --- :envvar:`PATH` is usually not set to a very - useful value in a CGI script. - -* When reading or writing external files, make sure they can be read or written - by the userid under which your CGI script will be running: this is typically the - userid under which the web server is running, or some explicitly specified - userid for a web server's ``suexec`` feature. - -* Don't try to give a CGI script a set-uid mode. This doesn't work on most - systems, and is a security liability as well. - -.. rubric:: Footnotes - -.. [#] Note that some recent versions of the HTML specification do state what - order the field values should be supplied in, but knowing whether a request - was received from a conforming browser, or even from a browser at all, is - tedious and error-prone. diff --git a/Doc/library/cgitb.rst b/Doc/library/cgitb.rst deleted file mode 100644 index 7f00bcd55c1e53e..000000000000000 --- a/Doc/library/cgitb.rst +++ /dev/null @@ -1,89 +0,0 @@ -:mod:`cgitb` --- Traceback manager for CGI scripts -================================================== - -.. module:: cgitb - :synopsis: Configurable traceback handler for CGI scripts. - :deprecated: - -.. moduleauthor:: Ka-Ping Yee -.. sectionauthor:: Fred L. Drake, Jr. - -**Source code:** :source:`Lib/cgitb.py` - -.. index:: - single: CGI; exceptions - single: CGI; tracebacks - single: exceptions; in CGI scripts - single: tracebacks; in CGI scripts - -.. deprecated-removed:: 3.11 3.13 - The :mod:`cgitb` module is deprecated - (see :pep:`PEP 594 <594#cgitb>` for details). - --------------- - -The :mod:`cgitb` module provides a special exception handler for Python scripts. -(Its name is a bit misleading. It was originally designed to display extensive -traceback information in HTML for CGI scripts. It was later generalized to also -display this information in plain text.) After this module is activated, if an -uncaught exception occurs, a detailed, formatted report will be displayed. The -report includes a traceback showing excerpts of the source code for each level, -as well as the values of the arguments and local variables to currently running -functions, to help you debug the problem. Optionally, you can save this -information to a file instead of sending it to the browser. - -To enable this feature, simply add this to the top of your CGI script:: - - import cgitb - cgitb.enable() - -The options to the :func:`enable` function control whether the report is -displayed in the browser and whether the report is logged to a file for later -analysis. - - -.. function:: enable(display=1, logdir=None, context=5, format="html") - - .. index:: single: excepthook() (in module sys) - - This function causes the :mod:`cgitb` module to take over the interpreter's - default handling for exceptions by setting the value of :attr:`sys.excepthook`. - - The optional argument *display* defaults to ``1`` and can be set to ``0`` to - suppress sending the traceback to the browser. If the argument *logdir* is - present, the traceback reports are written to files. The value of *logdir* - should be a directory where these files will be placed. The optional argument - *context* is the number of lines of context to display around the current line - of source code in the traceback; this defaults to ``5``. If the optional - argument *format* is ``"html"``, the output is formatted as HTML. Any other - value forces plain text output. The default value is ``"html"``. - - -.. function:: text(info, context=5) - - This function handles the exception described by *info* (a 3-tuple containing - the result of :func:`sys.exc_info`), formatting its traceback as text and - returning the result as a string. The optional argument *context* is the - number of lines of context to display around the current line of source code - in the traceback; this defaults to ``5``. - - -.. function:: html(info, context=5) - - This function handles the exception described by *info* (a 3-tuple containing - the result of :func:`sys.exc_info`), formatting its traceback as HTML and - returning the result as a string. The optional argument *context* is the - number of lines of context to display around the current line of source code - in the traceback; this defaults to ``5``. - - -.. function:: handler(info=None) - - This function handles an exception using the default settings (that is, show a - report in the browser, but don't log to a file). This can be used when you've - caught an exception and want to report it using :mod:`cgitb`. The optional - *info* argument should be a 3-tuple containing an exception type, exception - value, and traceback object, exactly like the tuple returned by - :func:`sys.exc_info`. If the *info* argument is not supplied, the current - exception is obtained from :func:`sys.exc_info`. - diff --git a/Doc/library/chunk.rst b/Doc/library/chunk.rst deleted file mode 100644 index 3b88e55b1478824..000000000000000 --- a/Doc/library/chunk.rst +++ /dev/null @@ -1,142 +0,0 @@ -:mod:`chunk` --- Read IFF chunked data -====================================== - -.. module:: chunk - :synopsis: Module to read IFF chunks. - :deprecated: - -.. moduleauthor:: Sjoerd Mullender -.. sectionauthor:: Sjoerd Mullender - -**Source code:** :source:`Lib/chunk.py` - -.. index:: - single: Audio Interchange File Format - single: AIFF - single: AIFF-C - single: Real Media File Format - single: RMFF - -.. deprecated-removed:: 3.11 3.13 - The :mod:`chunk` module is deprecated - (see :pep:`PEP 594 <594#chunk>` for details). - --------------- - -This module provides an interface for reading files that use EA IFF 85 chunks. -[#]_ This format is used in at least the Audio Interchange File Format -(AIFF/AIFF-C) and the Real Media File Format (RMFF). The WAVE audio file format -is closely related and can also be read using this module. - -A chunk has the following structure: - -+---------+--------+-------------------------------+ -| Offset | Length | Contents | -+=========+========+===============================+ -| 0 | 4 | Chunk ID | -+---------+--------+-------------------------------+ -| 4 | 4 | Size of chunk in big-endian | -| | | byte order, not including the | -| | | header | -+---------+--------+-------------------------------+ -| 8 | *n* | Data bytes, where *n* is the | -| | | size given in the preceding | -| | | field | -+---------+--------+-------------------------------+ -| 8 + *n* | 0 or 1 | Pad byte needed if *n* is odd | -| | | and chunk alignment is used | -+---------+--------+-------------------------------+ - -The ID is a 4-byte string which identifies the type of chunk. - -The size field (a 32-bit value, encoded using big-endian byte order) gives the -size of the chunk data, not including the 8-byte header. - -Usually an IFF-type file consists of one or more chunks. The proposed usage of -the :class:`Chunk` class defined here is to instantiate an instance at the start -of each chunk and read from the instance until it reaches the end, after which a -new instance can be instantiated. At the end of the file, creating a new -instance will fail with an :exc:`EOFError` exception. - - -.. class:: Chunk(file, align=True, bigendian=True, inclheader=False) - - Class which represents a chunk. The *file* argument is expected to be a - file-like object. An instance of this class is specifically allowed. The - only method that is needed is :meth:`~io.IOBase.read`. If the methods - :meth:`~io.IOBase.seek` and :meth:`~io.IOBase.tell` are present and don't - raise an exception, they are also used. - If these methods are present and raise an exception, they are expected to not - have altered the object. If the optional argument *align* is true, chunks - are assumed to be aligned on 2-byte boundaries. If *align* is false, no - alignment is assumed. The default value is true. If the optional argument - *bigendian* is false, the chunk size is assumed to be in little-endian order. - This is needed for WAVE audio files. The default value is true. If the - optional argument *inclheader* is true, the size given in the chunk header - includes the size of the header. The default value is false. - - A :class:`Chunk` object supports the following methods: - - - .. method:: getname() - - Returns the name (ID) of the chunk. This is the first 4 bytes of the - chunk. - - - .. method:: getsize() - - Returns the size of the chunk. - - - .. method:: close() - - Close and skip to the end of the chunk. This does not close the - underlying file. - - The remaining methods will raise :exc:`OSError` if called after the - :meth:`close` method has been called. Before Python 3.3, they used to - raise :exc:`IOError`, now an alias of :exc:`OSError`. - - - .. method:: isatty() - - Returns ``False``. - - - .. method:: seek(pos, whence=0) - - Set the chunk's current position. The *whence* argument is optional and - defaults to ``0`` (absolute file positioning); other values are ``1`` - (seek relative to the current position) and ``2`` (seek relative to the - file's end). There is no return value. If the underlying file does not - allow seek, only forward seeks are allowed. - - - .. method:: tell() - - Return the current position into the chunk. - - - .. method:: read(size=-1) - - Read at most *size* bytes from the chunk (less if the read hits the end of - the chunk before obtaining *size* bytes). If the *size* argument is - negative or omitted, read all data until the end of the chunk. An empty - bytes object is returned when the end of the chunk is encountered - immediately. - - - .. method:: skip() - - Skip to the end of the chunk. All further calls to :meth:`read` for the - chunk will return ``b''``. If you are not interested in the contents of - the chunk, this method should be called so that the file points to the - start of the next chunk. - - -.. rubric:: Footnotes - -.. [#] "EA IFF 85" Standard for Interchange Format Files, Jerry Morrison, Electronic - Arts, January 1985. - diff --git a/Doc/library/cmath.rst b/Doc/library/cmath.rst index 28cd96b0e12da9c..fdac51d9603cebe 100644 --- a/Doc/library/cmath.rst +++ b/Doc/library/cmath.rst @@ -9,17 +9,33 @@ This module provides access to mathematical functions for complex numbers. The functions in this module accept integers, floating-point numbers or complex numbers as arguments. They will also accept any Python object that has either a -:meth:`__complex__` or a :meth:`__float__` method: these methods are used to +:meth:`~object.__complex__` or a :meth:`~object.__float__` method: these methods are used to convert the object to a complex or floating-point number, respectively, and the function is then applied to the result of the conversion. .. note:: - On platforms with hardware and system-level support for signed - zeros, functions involving branch cuts are continuous on *both* - sides of the branch cut: the sign of the zero distinguishes one - side of the branch cut from the other. On platforms that do not - support signed zeros the continuity is as specified below. + For functions involving branch cuts, we have the problem of deciding how to + define those functions on the cut itself. Following Kahan's "Branch cuts for + complex elementary functions" paper, as well as Annex G of C99 and later C + standards, we use the sign of zero to distinguish one side of the branch cut + from the other: for a branch cut along (a portion of) the real axis we look + at the sign of the imaginary part, while for a branch cut along the + imaginary axis we look at the sign of the real part. + + For example, the :func:`cmath.sqrt` function has a branch cut along the + negative real axis. An argument of ``complex(-2.0, -0.0)`` is treated as + though it lies *below* the branch cut, and so gives a result on the negative + imaginary axis:: + + >>> cmath.sqrt(complex(-2.0, -0.0)) + -1.4142135623730951j + + But an argument of ``complex(-2.0, 0.0)`` is treated as though it lies above + the branch cut:: + + >>> cmath.sqrt(complex(-2.0, 0.0)) + 1.4142135623730951j Conversions to and from polar coordinates @@ -44,14 +60,11 @@ rectangular coordinates to polar coordinates and back. .. function:: phase(x) - Return the phase of *x* (also known as the *argument* of *x*), as a - float. ``phase(x)`` is equivalent to ``math.atan2(x.imag, - x.real)``. The result lies in the range [-\ *π*, *π*], and the branch - cut for this operation lies along the negative real axis, - continuous from above. On systems with support for signed zeros - (which includes most systems in current use), this means that the - sign of the result is the same as the sign of ``x.imag``, even when - ``x.imag`` is zero:: + Return the phase of *x* (also known as the *argument* of *x*), as a float. + ``phase(x)`` is equivalent to ``math.atan2(x.imag, x.real)``. The result + lies in the range [-\ *π*, *π*], and the branch cut for this operation lies + along the negative real axis. The sign of the result is the same as the + sign of ``x.imag``, even when ``x.imag`` is zero:: >>> phase(complex(-1.0, 0.0)) 3.141592653589793 @@ -92,8 +105,8 @@ Power and logarithmic functions .. function:: log(x[, base]) Returns the logarithm of *x* to the given *base*. If the *base* is not - specified, returns the natural logarithm of *x*. There is one branch cut, from 0 - along the negative real axis to -∞, continuous from above. + specified, returns the natural logarithm of *x*. There is one branch cut, + from 0 along the negative real axis to -∞. .. function:: log10(x) @@ -112,9 +125,9 @@ Trigonometric functions .. function:: acos(x) - Return the arc cosine of *x*. There are two branch cuts: One extends right from - 1 along the real axis to ∞, continuous from below. The other extends left from - -1 along the real axis to -∞, continuous from above. + Return the arc cosine of *x*. There are two branch cuts: One extends right + from 1 along the real axis to ∞. The other extends left from -1 along the + real axis to -∞. .. function:: asin(x) @@ -125,9 +138,8 @@ Trigonometric functions .. function:: atan(x) Return the arc tangent of *x*. There are two branch cuts: One extends from - ``1j`` along the imaginary axis to ``∞j``, continuous from the right. The - other extends from ``-1j`` along the imaginary axis to ``-∞j``, continuous - from the left. + ``1j`` along the imaginary axis to ``∞j``. The other extends from ``-1j`` + along the imaginary axis to ``-∞j``. .. function:: cos(x) @@ -151,23 +163,21 @@ Hyperbolic functions .. function:: acosh(x) Return the inverse hyperbolic cosine of *x*. There is one branch cut, - extending left from 1 along the real axis to -∞, continuous from above. + extending left from 1 along the real axis to -∞. .. function:: asinh(x) Return the inverse hyperbolic sine of *x*. There are two branch cuts: - One extends from ``1j`` along the imaginary axis to ``∞j``, - continuous from the right. The other extends from ``-1j`` along - the imaginary axis to ``-∞j``, continuous from the left. + One extends from ``1j`` along the imaginary axis to ``∞j``. The other + extends from ``-1j`` along the imaginary axis to ``-∞j``. .. function:: atanh(x) Return the inverse hyperbolic tangent of *x*. There are two branch cuts: One - extends from ``1`` along the real axis to ``∞``, continuous from below. The - other extends from ``-1`` along the real axis to ``-∞``, continuous from - above. + extends from ``1`` along the real axis to ``∞``. The other extends from + ``-1`` along the real axis to ``-∞``. .. function:: cosh(x) @@ -291,7 +301,7 @@ Constants .. versionadded:: 3.6 -.. index:: module: math +.. index:: pair: module; math Note that the selection of functions is similar, but not identical, to that in module :mod:`math`. The reason for having two modules is that some users aren't diff --git a/Doc/library/cmdline.rst b/Doc/library/cmdline.rst new file mode 100644 index 000000000000000..b2379befeffcbab --- /dev/null +++ b/Doc/library/cmdline.rst @@ -0,0 +1,57 @@ +++++++++++++++++++++++++++++++++++++ +Modules command-line interface (CLI) +++++++++++++++++++++++++++++++++++++ + +The following modules have a command-line interface. + +* :ref:`ast ` +* :ref:`asyncio ` +* :mod:`base64` +* :ref:`calendar ` +* :mod:`code` +* :ref:`compileall ` +* :mod:`cProfile`: see :ref:`profile ` +* :ref:`difflib ` +* :ref:`dis ` +* :mod:`doctest` +* :mod:`!encodings.rot_13` +* :mod:`ensurepip` +* :mod:`filecmp` +* :mod:`fileinput` +* :mod:`ftplib` +* :ref:`gzip ` +* :ref:`http.server ` +* :mod:`!idlelib` +* :ref:`inspect ` +* :ref:`json.tool ` +* :mod:`mimetypes` +* :mod:`pdb` +* :mod:`pickle` +* :ref:`pickletools ` +* :mod:`platform` +* :mod:`poplib` +* :ref:`profile ` +* :mod:`pstats` +* :ref:`py_compile ` +* :mod:`pyclbr` +* :mod:`pydoc` +* :mod:`quopri` +* :mod:`runpy` +* :ref:`site ` +* :ref:`sqlite3 ` +* :ref:`sysconfig ` +* :mod:`tabnanny` +* :ref:`tarfile ` +* :mod:`!this` +* :ref:`timeit ` +* :ref:`tokenize ` +* :ref:`trace ` +* :mod:`turtledemo` +* :ref:`unittest ` +* :ref:`uuid ` +* :mod:`venv` +* :mod:`webbrowser` +* :ref:`zipapp ` +* :ref:`zipfile ` + +See also the :ref:`Python command-line interface `. diff --git a/Doc/library/code.rst b/Doc/library/code.rst index 538e5afc7822aa8..091840781bd235b 100644 --- a/Doc/library/code.rst +++ b/Doc/library/code.rst @@ -23,20 +23,25 @@ build applications which provide an interactive interpreter prompt. ``'__doc__'`` set to ``None``. -.. class:: InteractiveConsole(locals=None, filename="") +.. class:: InteractiveConsole(locals=None, filename="", local_exit=False) Closely emulate the behavior of the interactive Python interpreter. This class builds on :class:`InteractiveInterpreter` and adds prompting using the familiar - ``sys.ps1`` and ``sys.ps2``, and input buffering. + ``sys.ps1`` and ``sys.ps2``, and input buffering. If *local_exit* is True, + ``exit()`` and ``quit()`` in the console will not raise :exc:`SystemExit`, but + instead return to the calling code. + .. versionchanged:: 3.13 + Added *local_exit* parameter. -.. function:: interact(banner=None, readfunc=None, local=None, exitmsg=None) +.. function:: interact(banner=None, readfunc=None, local=None, exitmsg=None, local_exit=False) Convenience function to run a read-eval-print loop. This creates a new instance of :class:`InteractiveConsole` and sets *readfunc* to be used as the :meth:`InteractiveConsole.raw_input` method, if provided. If *local* is provided, it is passed to the :class:`InteractiveConsole` constructor for - use as the default namespace for the interpreter loop. The :meth:`interact` + use as the default namespace for the interpreter loop. If *local_exit* is provided, + it is passed to the :class:`InteractiveConsole` constructor. The :meth:`interact` method of the instance is then run with *banner* and *exitmsg* passed as the banner and exit message to use, if provided. The console object is discarded after use. @@ -44,6 +49,8 @@ build applications which provide an interactive interpreter prompt. .. versionchanged:: 3.6 Added *exitmsg* parameter. + .. versionchanged:: 3.13 + Added *local_exit* parameter. .. function:: compile_command(source, filename="", symbol="single") @@ -163,12 +170,12 @@ interpreter objects as well as the following additions. Push a line of source text to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a - buffer and the interpreter's :meth:`runsource` method is called with the + buffer and the interpreter's :meth:`~InteractiveInterpreter.runsource` method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is ``True`` if more input is required, ``False`` if the line was - dealt with in some way (this is the same as :meth:`runsource`). + dealt with in some way (this is the same as :meth:`!runsource`). .. method:: InteractiveConsole.resetbuffer() diff --git a/Doc/library/codecs.rst b/Doc/library/codecs.rst index 8225236350d22ef..4617624686b1f3b 100644 --- a/Doc/library/codecs.rst +++ b/Doc/library/codecs.rst @@ -345,9 +345,10 @@ The following error handlers can be used with all Python +-------------------------+-----------------------------------------------+ | ``'backslashreplace'`` | Replace with backslashed escape sequences. | | | On encoding, use hexadecimal form of Unicode | -| | code point with formats ``\xhh`` ``\uxxxx`` | -| | ``\Uxxxxxxxx``. On decoding, use hexadecimal | -| | form of byte value with format ``\xhh``. | +| | code point with formats :samp:`\\x{hh}` | +| | :samp:`\\u{xxxx}` :samp:`\\U{xxxxxxxx}`. | +| | On decoding, use hexadecimal form of byte | +| | value with format :samp:`\\x{hh}`. | | | Implemented in | | | :func:`backslashreplace_errors`. | +-------------------------+-----------------------------------------------+ @@ -373,8 +374,9 @@ The following error handlers are only applicable to encoding (within +=========================+===============================================+ | ``'xmlcharrefreplace'`` | Replace with XML/HTML numeric character | | | reference, which is a decimal form of Unicode | -| | code point with format ``&#num;`` Implemented | -| | in :func:`xmlcharrefreplace_errors`. | +| | code point with format :samp:`&#{num};`. | +| | Implemented in | +| | :func:`xmlcharrefreplace_errors`. | +-------------------------+-----------------------------------------------+ | ``'namereplace'`` | Replace with ``\N{...}`` escape sequences, | | | what appears in the braces is the Name | @@ -478,8 +480,9 @@ functions: Malformed data is replaced by a backslashed escape sequence. On encoding, use the hexadecimal form of Unicode code point with formats - ``\xhh`` ``\uxxxx`` ``\Uxxxxxxxx``. On decoding, use the hexadecimal form of - byte value with format ``\xhh``. + :samp:`\\x{hh}` :samp:`\\u{xxxx}` :samp:`\\U{xxxxxxxx}`. + On decoding, use the hexadecimal form of + byte value with format :samp:`\\x{hh}`. .. versionchanged:: 3.5 Works with decoding and translating. @@ -492,7 +495,7 @@ functions: The unencodable character is replaced by an appropriate XML/HTML numeric character reference, which is a decimal form of Unicode code point with - format ``&#num;`` . + format :samp:`&#{num};` . .. function:: namereplace_errors(exception) @@ -517,44 +520,46 @@ The base :class:`Codec` class defines these methods which also define the function interfaces of the stateless encoder and decoder: -.. method:: Codec.encode(input, errors='strict') +.. class:: Codec + + .. method:: encode(input, errors='strict') - Encodes the object *input* and returns a tuple (output object, length consumed). - For instance, :term:`text encoding` converts - a string object to a bytes object using a particular - character set encoding (e.g., ``cp1252`` or ``iso-8859-1``). + Encodes the object *input* and returns a tuple (output object, length consumed). + For instance, :term:`text encoding` converts + a string object to a bytes object using a particular + character set encoding (e.g., ``cp1252`` or ``iso-8859-1``). - The *errors* argument defines the error handling to apply. - It defaults to ``'strict'`` handling. + The *errors* argument defines the error handling to apply. + It defaults to ``'strict'`` handling. - The method may not store state in the :class:`Codec` instance. Use - :class:`StreamWriter` for codecs which have to keep state in order to make - encoding efficient. + The method may not store state in the :class:`Codec` instance. Use + :class:`StreamWriter` for codecs which have to keep state in order to make + encoding efficient. - The encoder must be able to handle zero length input and return an empty object - of the output object type in this situation. + The encoder must be able to handle zero length input and return an empty object + of the output object type in this situation. -.. method:: Codec.decode(input, errors='strict') + .. method:: decode(input, errors='strict') - Decodes the object *input* and returns a tuple (output object, length - consumed). For instance, for a :term:`text encoding`, decoding converts - a bytes object encoded using a particular - character set encoding to a string object. + Decodes the object *input* and returns a tuple (output object, length + consumed). For instance, for a :term:`text encoding`, decoding converts + a bytes object encoded using a particular + character set encoding to a string object. - For text encodings and bytes-to-bytes codecs, - *input* must be a bytes object or one which provides the read-only - buffer interface -- for example, buffer objects and memory mapped files. + For text encodings and bytes-to-bytes codecs, + *input* must be a bytes object or one which provides the read-only + buffer interface -- for example, buffer objects and memory mapped files. - The *errors* argument defines the error handling to apply. - It defaults to ``'strict'`` handling. + The *errors* argument defines the error handling to apply. + It defaults to ``'strict'`` handling. - The method may not store state in the :class:`Codec` instance. Use - :class:`StreamReader` for codecs which have to keep state in order to make - decoding efficient. + The method may not store state in the :class:`Codec` instance. Use + :class:`StreamReader` for codecs which have to keep state in order to make + decoding efficient. - The decoder must be able to handle zero length input and return an empty object - of the output object type in this situation. + The decoder must be able to handle zero length input and return an empty object + of the output object type in this situation. Incremental Encoding and Decoding @@ -702,7 +707,7 @@ Stream Encoding and Decoding The :class:`StreamWriter` and :class:`StreamReader` classes provide generic working interfaces which can be used to implement new encoding submodules very -easily. See :mod:`encodings.utf_8` for an example of how this is done. +easily. See :mod:`!encodings.utf_8` for an example of how this is done. .. _stream-writer-objects: @@ -892,9 +897,10 @@ The design is such that one can use the factory functions returned by the .. class:: StreamRecoder(stream, encode, decode, Reader, Writer, errors='strict') Creates a :class:`StreamRecoder` instance which implements a two-way conversion: - *encode* and *decode* work on the frontend — the data visible to - code calling :meth:`read` and :meth:`write`, while *Reader* and *Writer* - work on the backend — the data in *stream*. + *encode* and *decode* work on the frontend — the data visible to + code calling :meth:`~StreamReader.read` and :meth:`~StreamWriter.write`, + while *Reader* and *Writer* + work on the backend — the data in *stream*. You can use these objects to do transparent transcodings, e.g., from Latin-1 to UTF-8 and back. @@ -1346,9 +1352,10 @@ encodings. | | | supported. | +--------------------+---------+---------------------------+ | raw_unicode_escape | | Latin-1 encoding with | -| | | ``\uXXXX`` and | -| | | ``\UXXXXXXXX`` for other | -| | | code points. Existing | +| | | :samp:`\\u{XXXX}` and | +| | | :samp:`\\U{XXXXXXXX}` | +| | | for other code points. | +| | | Existing | | | | backslashes are not | | | | escaped in any way. | | | | It is used in the Python | @@ -1413,8 +1420,8 @@ to :class:`bytes` mappings. They are not supported by :meth:`bytes.decode` | | quotedprintable, | quoted printable. | ``quotetabs=True`` / | | | quoted_printable | | :meth:`quopri.decode` | +----------------------+------------------+------------------------------+------------------------------+ -| uu_codec | uu | Convert the operand using | :meth:`uu.encode` / | -| | | uuencode. | :meth:`uu.decode` | +| uu_codec | uu | Convert the operand using | | +| | | uuencode. | | +----------------------+------------------+------------------------------+------------------------------+ | zlib_codec | zip, zlib | Compress the operand using | :meth:`zlib.compress` / | | | | gzip. | :meth:`zlib.decompress` | diff --git a/Doc/library/codeop.rst b/Doc/library/codeop.rst index c66b9d3ec0a26d4..55606e1c5f09ac2 100644 --- a/Doc/library/codeop.rst +++ b/Doc/library/codeop.rst @@ -19,10 +19,10 @@ module instead. There are two parts to this job: -#. Being able to tell if a line of input completes a Python statement: in +#. Being able to tell if a line of input completes a Python statement: in short, telling whether to print '``>>>``' or '``...``' next. -#. Remembering which future statements the user has entered, so subsequent +#. Remembering which future statements the user has entered, so subsequent input can be compiled with these in effect. The :mod:`codeop` module provides a way of doing each of these things, and a way @@ -33,9 +33,9 @@ To do just the former: .. function:: compile_command(source, filename="", symbol="single") Tries to compile *source*, which should be a string of Python code and return a - code object if *source* is valid Python code. In that case, the filename + code object if *source* is valid Python code. In that case, the filename attribute of the code object will be *filename*, which defaults to - ``''``. Returns ``None`` if *source* is *not* valid Python code, but is a + ``''``. Returns ``None`` if *source* is *not* valid Python code, but is a prefix of valid Python code. If there is a problem with *source*, an exception will be raised. @@ -43,9 +43,9 @@ To do just the former: :exc:`OverflowError` or :exc:`ValueError` if there is an invalid literal. The *symbol* argument determines whether *source* is compiled as a statement - (``'single'``, the default), as a sequence of statements (``'exec'``) or + (``'single'``, the default), as a sequence of :term:`statement` (``'exec'``) or as an :term:`expression` (``'eval'``). Any other value will - cause :exc:`ValueError` to be raised. + cause :exc:`ValueError` to be raised. .. note:: @@ -58,7 +58,7 @@ To do just the former: .. class:: Compile() - Instances of this class have :meth:`__call__` methods identical in signature to + Instances of this class have :meth:`~object.__call__` methods identical in signature to the built-in function :func:`compile`, but with the difference that if the instance compiles program text containing a :mod:`__future__` statement, the instance 'remembers' and compiles all subsequent program texts with the @@ -67,7 +67,7 @@ To do just the former: .. class:: CommandCompiler() - Instances of this class have :meth:`__call__` methods identical in signature to + Instances of this class have :meth:`~object.__call__` methods identical in signature to :func:`compile_command`; the difference is that if the instance compiles program - text containing a ``__future__`` statement, the instance 'remembers' and + text containing a :mod:`__future__` statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force. diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst index 132b0ce7192ac17..edc078953290d7a 100644 --- a/Doc/library/collections.abc.rst +++ b/Doc/library/collections.abc.rst @@ -22,7 +22,7 @@ This module provides :term:`abstract base classes ` that can be used to test whether a class provides a particular interface; for -example, whether it is hashable or whether it is a mapping. +example, whether it is :term:`hashable` or whether it is a mapping. An :func:`issubclass` or :func:`isinstance` test for an interface works in one of three ways. @@ -177,6 +177,7 @@ ABC Inherits from Abstract Methods Mi :class:`AsyncIterable` [1]_ ``__aiter__`` :class:`AsyncIterator` [1]_ :class:`AsyncIterable` ``__anext__`` ``__aiter__`` :class:`AsyncGenerator` [1]_ :class:`AsyncIterator` ``asend``, ``athrow`` ``aclose``, ``__aiter__``, ``__anext__`` +:class:`Buffer` [1]_ ``__buffer__`` ============================== ====================== ======================= ==================================================== @@ -191,7 +192,7 @@ ABC Inherits from Abstract Methods Mi .. [2] Checking ``isinstance(obj, Iterable)`` detects classes that are registered as :class:`Iterable` or that have an :meth:`__iter__` method, but it does not detect classes that iterate with the - :meth:`__getitem__` method. The only reliable way to determine + :meth:`~object.__getitem__` method. The only reliable way to determine whether an object is :term:`iterable` is to call ``iter(obj)``. @@ -221,7 +222,7 @@ Collections Abstract Base Classes -- Detailed Descriptions Checking ``isinstance(obj, Iterable)`` detects classes that are registered as :class:`Iterable` or that have an :meth:`__iter__` method, but it does - not detect classes that iterate with the :meth:`__getitem__` method. + not detect classes that iterate with the :meth:`~object.__getitem__` method. The only reliable way to determine whether an object is :term:`iterable` is to call ``iter(obj)``. @@ -261,8 +262,8 @@ Collections Abstract Base Classes -- Detailed Descriptions Implementation note: Some of the mixin methods, such as :meth:`__iter__`, :meth:`__reversed__` and :meth:`index`, make - repeated calls to the underlying :meth:`__getitem__` method. - Consequently, if :meth:`__getitem__` is implemented with constant + repeated calls to the underlying :meth:`~object.__getitem__` method. + Consequently, if :meth:`~object.__getitem__` is implemented with constant access speed, the mixin methods will have linear performance; however, if the underlying method is linear (as it would be with a linked list), the mixins will have quadratic performance and will @@ -272,6 +273,12 @@ Collections Abstract Base Classes -- Detailed Descriptions The index() method added support for *stop* and *start* arguments. + .. deprecated-removed:: 3.12 3.14 + The :class:`ByteString` ABC has been deprecated. + For use in typing, prefer a union, like ``bytes | bytearray``, or + :class:`collections.abc.Buffer`. + For use as an ABC, prefer :class:`Sequence` or :class:`collections.abc.Buffer`. + .. class:: Set MutableSet @@ -346,6 +353,13 @@ Collections Abstract Base Classes -- Detailed Descriptions .. versionadded:: 3.6 +.. class:: Buffer + + ABC for classes that provide the :meth:`~object.__buffer__` method, + implementing the :ref:`buffer protocol `. See :pep:`688`. + + .. versionadded:: 3.12 + Examples and Recipes -------------------- @@ -406,7 +420,7 @@ Notes on using :class:`Set` and :class:`MutableSet` as a mixin: (3) The :class:`Set` mixin provides a :meth:`_hash` method to compute a hash value for the set; however, :meth:`__hash__` is not defined because not all sets - are hashable or immutable. To add set hashability using mixins, + are :term:`hashable` or immutable. To add set hashability using mixins, inherit from both :meth:`Set` and :meth:`Hashable`, then define ``__hash__ = Set._hash``. diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst index 53b4b69f84b7bf4..17dd6da7479e504 100644 --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -25,7 +25,7 @@ Python's general purpose built-in containers, :class:`dict`, :class:`list`, :func:`namedtuple` factory function for creating tuple subclasses with named fields :class:`deque` list-like container with fast appends and pops on either end :class:`ChainMap` dict-like class for creating a single view of multiple mappings -:class:`Counter` dict subclass for counting hashable objects +:class:`Counter` dict subclass for counting :term:`hashable` objects :class:`OrderedDict` dict subclass that remembers the order entries were added :class:`defaultdict` dict subclass that calls a factory function to supply missing values :class:`UserDict` wrapper around dictionary objects for easier dict subclassing @@ -120,26 +120,26 @@ The class can be used to simulate nested scopes and is useful in templating. .. seealso:: - * The `MultiContext class - `_ - in the Enthought `CodeTools package - `_ has options to support - writing to any mapping in the chain. + * The `MultiContext class + `_ + in the Enthought `CodeTools package + `_ has options to support + writing to any mapping in the chain. - * Django's `Context class - `_ - for templating is a read-only chain of mappings. It also features - pushing and popping of contexts similar to the - :meth:`~collections.ChainMap.new_child` method and the - :attr:`~collections.ChainMap.parents` property. + * Django's `Context class + `_ + for templating is a read-only chain of mappings. It also features + pushing and popping of contexts similar to the + :meth:`~collections.ChainMap.new_child` method and the + :attr:`~collections.ChainMap.parents` property. - * The `Nested Contexts recipe - `_ has options to control - whether writes and other mutations apply only to the first mapping or to - any mapping in the chain. + * The `Nested Contexts recipe + `_ has options to control + whether writes and other mutations apply only to the first mapping or to + any mapping in the chain. - * A `greatly simplified read-only version of Chainmap - `_. + * A `greatly simplified read-only version of Chainmap + `_. :class:`ChainMap` Examples and Recipes @@ -229,6 +229,7 @@ For example:: >>> cnt = Counter() >>> for word in ['red', 'blue', 'red', 'green', 'blue', 'blue']: ... cnt[word] += 1 + ... >>> cnt Counter({'blue': 3, 'red': 2, 'green': 1}) @@ -241,7 +242,7 @@ For example:: .. class:: Counter([iterable-or-mapping]) - A :class:`Counter` is a :class:`dict` subclass for counting hashable objects. + A :class:`Counter` is a :class:`dict` subclass for counting :term:`hashable` objects. It is a collection where elements are stored as dictionary keys and their counts are stored as dictionary values. Counts are allowed to be any integer value including zero or negative counts. The :class:`Counter` @@ -357,7 +358,7 @@ Common patterns for working with :class:`Counter` objects:: list(c) # list unique elements set(c) # convert to a set dict(c) # convert to a regular dictionary - c.items() # convert to a list of (elem, cnt) pairs + c.items() # access the (elem, cnt) pairs Counter(dict(list_of_pairs)) # convert from a list of (elem, cnt) pairs c.most_common()[:-n-1:-1] # n least common elements +c # remove zero and negative counts @@ -428,22 +429,22 @@ or subtracting from an empty counter. .. seealso:: - * `Bag class `_ - in Smalltalk. + * `Bag class `_ + in Smalltalk. - * Wikipedia entry for `Multisets `_. + * Wikipedia entry for `Multisets `_. - * `C++ multisets `_ - tutorial with examples. + * `C++ multisets `_ + tutorial with examples. - * For mathematical operations on multisets and their use cases, see - *Knuth, Donald. The Art of Computer Programming Volume II, - Section 4.6.3, Exercise 19*. + * For mathematical operations on multisets and their use cases, see + *Knuth, Donald. The Art of Computer Programming Volume II, + Section 4.6.3, Exercise 19*. - * To enumerate all distinct multisets of a given size over a given set of - elements, see :func:`itertools.combinations_with_replacement`:: + * To enumerate all distinct multisets of a given size over a given set of + elements, see :func:`itertools.combinations_with_replacement`:: - map(Counter, combinations_with_replacement('ABC', 2)) # --> AA AB AC BB BC CC + map(Counter, combinations_with_replacement('ABC', 2)) # --> AA AB AC BB BC CC :class:`deque` objects @@ -742,12 +743,12 @@ stack manipulations such as ``dup``, ``drop``, ``swap``, ``over``, ``pick``, If calling :attr:`default_factory` raises an exception this exception is propagated unchanged. - This method is called by the :meth:`__getitem__` method of the + This method is called by the :meth:`~object.__getitem__` method of the :class:`dict` class when the requested key is not found; whatever it - returns or raises is then returned or raised by :meth:`__getitem__`. + returns or raises is then returned or raised by :meth:`~object.__getitem__`. Note that :meth:`__missing__` is *not* called for any operations besides - :meth:`__getitem__`. This means that :meth:`get` will, like normal + :meth:`~object.__getitem__`. This means that :meth:`get` will, like normal dictionaries, return ``None`` as a default rather than using :attr:`default_factory`. @@ -818,6 +819,7 @@ zero): >>> def constant_factory(value): ... return lambda: value + ... >>> d = defaultdict(constant_factory('')) >>> d.update(name='John', action='ran') >>> '%(name)s %(action)s to %(object)s' % d @@ -977,6 +979,8 @@ field names, the method and attribute names start with an underscore. >>> for partnum, record in inventory.items(): ... inventory[partnum] = record._replace(price=newprices[partnum], timestamp=time.now()) + Named tuples are also supported by generic function :func:`copy.replace`. + .. attribute:: somenamedtuple._fields Tuple of strings listing the field names. Useful for introspection @@ -1058,20 +1062,20 @@ fields: .. seealso:: - * See :class:`typing.NamedTuple` for a way to add type hints for named - tuples. It also provides an elegant notation using the :keyword:`class` - keyword:: + * See :class:`typing.NamedTuple` for a way to add type hints for named + tuples. It also provides an elegant notation using the :keyword:`class` + keyword:: - class Component(NamedTuple): - part_number: int - weight: float - description: Optional[str] = None + class Component(NamedTuple): + part_number: int + weight: float + description: Optional[str] = None - * See :meth:`types.SimpleNamespace` for a mutable namespace based on an - underlying dictionary instead of a tuple. + * See :meth:`types.SimpleNamespace` for a mutable namespace based on an + underlying dictionary instead of a tuple. - * The :mod:`dataclasses` module provides a decorator and functions for - automatically adding generated special methods to user-defined classes. + * The :mod:`dataclasses` module provides a decorator and functions for + automatically adding generated special methods to user-defined classes. :class:`OrderedDict` objects @@ -1222,7 +1226,7 @@ variants of :func:`functools.lru_cache`: result = self.func(*args) self.cache[args] = time(), result if len(self.cache) > self.maxsize: - self.cache.popitem(0) + self.cache.popitem(last=False) return result @@ -1254,12 +1258,12 @@ variants of :func:`functools.lru_cache`: if self.requests[args] <= self.cache_after: self.requests.move_to_end(args) if len(self.requests) > self.maxrequests: - self.requests.popitem(0) + self.requests.popitem(last=False) else: self.requests.pop(args, None) self.cache[args] = result if len(self.cache) > self.maxsize: - self.cache.popitem(0) + self.cache.popitem(last=False) return result .. doctest:: diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst index 7af46cf3200878d..df1eefab839cc16 100644 --- a/Doc/library/compileall.rst +++ b/Doc/library/compileall.rst @@ -16,6 +16,8 @@ have write permission to the library directories. .. include:: ../includes/wasm-notavail.rst +.. _compileall-cli: + Command-line use ---------------- @@ -24,28 +26,28 @@ compile Python sources. .. program:: compileall -.. cmdoption:: directory ... - file ... +.. option:: directory ... + file ... Positional arguments are files to compile or directories that contain source files, traversed recursively. If no argument is given, behave as if - the command line was ``-l ``. + the command line was :samp:`-l {}`. -.. cmdoption:: -l +.. option:: -l Do not recurse into subdirectories, only compile source code files directly contained in the named or implied directories. -.. cmdoption:: -f +.. option:: -f Force rebuild even if timestamps are up-to-date. -.. cmdoption:: -q +.. option:: -q Do not print the list of files compiled. If passed once, error messages will still be printed. If passed twice (``-qq``), all output is suppressed. -.. cmdoption:: -d destdir +.. option:: -d destdir Directory prepended to the path to each file being compiled. This will appear in compilation time tracebacks, and is also compiled in to the @@ -53,45 +55,45 @@ compile Python sources. cases where the source file does not exist at the time the byte-code file is executed. -.. cmdoption:: -s strip_prefix -.. cmdoption:: -p prepend_prefix +.. option:: -s strip_prefix +.. option:: -p prepend_prefix Remove (``-s``) or append (``-p``) the given prefix of paths recorded in the ``.pyc`` files. Cannot be combined with ``-d``. -.. cmdoption:: -x regex +.. option:: -x regex regex is used to search the full path to each file considered for compilation, and if the regex produces a match, the file is skipped. -.. cmdoption:: -i list +.. option:: -i list Read the file ``list`` and add each line that it contains to the list of files and directories to compile. If ``list`` is ``-``, read lines from ``stdin``. -.. cmdoption:: -b +.. option:: -b Write the byte-code files to their legacy locations and names, which may overwrite byte-code files created by another version of Python. The default is to write files to their :pep:`3147` locations and names, which allows byte-code files from multiple versions of Python to coexist. -.. cmdoption:: -r +.. option:: -r Control the maximum recursion level for subdirectories. If this is given, then ``-l`` option will not be taken into account. :program:`python -m compileall -r 0` is equivalent to :program:`python -m compileall -l`. -.. cmdoption:: -j N +.. option:: -j N Use *N* workers to compile the files within the given directory. - If ``0`` is used, then the result of :func:`os.cpu_count()` + If ``0`` is used, then the result of :func:`os.process_cpu_count()` will be used. -.. cmdoption:: --invalidation-mode [timestamp|checked-hash|unchecked-hash] +.. option:: --invalidation-mode [timestamp|checked-hash|unchecked-hash] Control how the generated byte-code files are invalidated at runtime. The ``timestamp`` value, means that ``.pyc`` files with the source timestamp @@ -104,17 +106,17 @@ compile Python sources. variable is not set, and ``checked-hash`` if the ``SOURCE_DATE_EPOCH`` environment variable is set. -.. cmdoption:: -o level +.. option:: -o level Compile with the given optimization level. May be used multiple times to compile for multiple levels at a time (for example, ``compileall -o 1 -o 2``). -.. cmdoption:: -e dir +.. option:: -e dir Ignore symlinks pointing outside the given directory. -.. cmdoption:: --hardlink-dupes +.. option:: --hardlink-dupes If two ``.pyc`` files with different optimization level have the same content, use hard links to consolidate duplicate files. @@ -141,9 +143,9 @@ There is no command-line option to control the optimization level used by the :func:`compile` function, because the Python interpreter itself already provides the option: :program:`python -O -m compileall`. -Similarly, the :func:`compile` function respects the :attr:`sys.pycache_prefix` +Similarly, the :func:`compile` function respects the :data:`sys.pycache_prefix` setting. The generated bytecode cache will only be useful if :func:`compile` is -run with the same :attr:`sys.pycache_prefix` (if any) that will be used at +run with the same :data:`sys.pycache_prefix` (if any) that will be used at runtime. Public functions @@ -199,7 +201,7 @@ Public functions The *stripdir*, *prependdir* and *limit_sl_dest* arguments correspond to the ``-s``, ``-p`` and ``-e`` options described above. - They may be specified as ``str``, ``bytes`` or :py:class:`os.PathLike`. + They may be specified as ``str`` or :py:class:`os.PathLike`. If *hardlink_dupes* is true and two ``.pyc`` files with different optimization level have the same content, use hard links to consolidate duplicate files. @@ -269,7 +271,7 @@ Public functions The *stripdir*, *prependdir* and *limit_sl_dest* arguments correspond to the ``-s``, ``-p`` and ``-e`` options described above. - They may be specified as ``str``, ``bytes`` or :py:class:`os.PathLike`. + They may be specified as ``str`` or :py:class:`os.PathLike`. If *hardlink_dupes* is true and two ``.pyc`` files with different optimization level have the same content, use hard links to consolidate duplicate files. diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index 8106cc235e5a3c0..deefb8606ead844 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -29,83 +29,83 @@ Executor Objects An abstract class that provides methods to execute calls asynchronously. It should not be used directly, but through its concrete subclasses. - .. method:: submit(fn, /, *args, **kwargs) + .. method:: submit(fn, /, *args, **kwargs) - Schedules the callable, *fn*, to be executed as ``fn(*args, **kwargs)`` - and returns a :class:`Future` object representing the execution of the - callable. :: + Schedules the callable, *fn*, to be executed as ``fn(*args, **kwargs)`` + and returns a :class:`Future` object representing the execution of the + callable. :: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(pow, 323, 1235) - print(future.result()) + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(pow, 323, 1235) + print(future.result()) - .. method:: map(func, *iterables, timeout=None, chunksize=1) + .. method:: map(func, *iterables, timeout=None, chunksize=1) - Similar to :func:`map(func, *iterables) ` except: + Similar to :func:`map(func, *iterables) ` except: - * the *iterables* are collected immediately rather than lazily; + * the *iterables* are collected immediately rather than lazily; - * *func* is executed asynchronously and several calls to - *func* may be made concurrently. + * *func* is executed asynchronously and several calls to + *func* may be made concurrently. - The returned iterator raises a :exc:`TimeoutError` - if :meth:`~iterator.__next__` is called and the result isn't available - after *timeout* seconds from the original call to :meth:`Executor.map`. - *timeout* can be an int or a float. If *timeout* is not specified or - ``None``, there is no limit to the wait time. + The returned iterator raises a :exc:`TimeoutError` + if :meth:`~iterator.__next__` is called and the result isn't available + after *timeout* seconds from the original call to :meth:`Executor.map`. + *timeout* can be an int or a float. If *timeout* is not specified or + ``None``, there is no limit to the wait time. - If a *func* call raises an exception, then that exception will be - raised when its value is retrieved from the iterator. + If a *func* call raises an exception, then that exception will be + raised when its value is retrieved from the iterator. - When using :class:`ProcessPoolExecutor`, this method chops *iterables* - into a number of chunks which it submits to the pool as separate - tasks. The (approximate) size of these chunks can be specified by - setting *chunksize* to a positive integer. For very long iterables, - using a large value for *chunksize* can significantly improve - performance compared to the default size of 1. With - :class:`ThreadPoolExecutor`, *chunksize* has no effect. + When using :class:`ProcessPoolExecutor`, this method chops *iterables* + into a number of chunks which it submits to the pool as separate + tasks. The (approximate) size of these chunks can be specified by + setting *chunksize* to a positive integer. For very long iterables, + using a large value for *chunksize* can significantly improve + performance compared to the default size of 1. With + :class:`ThreadPoolExecutor`, *chunksize* has no effect. - .. versionchanged:: 3.5 - Added the *chunksize* argument. + .. versionchanged:: 3.5 + Added the *chunksize* argument. - .. method:: shutdown(wait=True, *, cancel_futures=False) + .. method:: shutdown(wait=True, *, cancel_futures=False) - Signal the executor that it should free any resources that it is using - when the currently pending futures are done executing. Calls to - :meth:`Executor.submit` and :meth:`Executor.map` made after shutdown will - raise :exc:`RuntimeError`. + Signal the executor that it should free any resources that it is using + when the currently pending futures are done executing. Calls to + :meth:`Executor.submit` and :meth:`Executor.map` made after shutdown will + raise :exc:`RuntimeError`. - If *wait* is ``True`` then this method will not return until all the - pending futures are done executing and the resources associated with the - executor have been freed. If *wait* is ``False`` then this method will - return immediately and the resources associated with the executor will be - freed when all pending futures are done executing. Regardless of the - value of *wait*, the entire Python program will not exit until all - pending futures are done executing. + If *wait* is ``True`` then this method will not return until all the + pending futures are done executing and the resources associated with the + executor have been freed. If *wait* is ``False`` then this method will + return immediately and the resources associated with the executor will be + freed when all pending futures are done executing. Regardless of the + value of *wait*, the entire Python program will not exit until all + pending futures are done executing. - If *cancel_futures* is ``True``, this method will cancel all pending - futures that the executor has not started running. Any futures that - are completed or running won't be cancelled, regardless of the value - of *cancel_futures*. + If *cancel_futures* is ``True``, this method will cancel all pending + futures that the executor has not started running. Any futures that + are completed or running won't be cancelled, regardless of the value + of *cancel_futures*. - If both *cancel_futures* and *wait* are ``True``, all futures that the - executor has started running will be completed prior to this method - returning. The remaining futures are cancelled. + If both *cancel_futures* and *wait* are ``True``, all futures that the + executor has started running will be completed prior to this method + returning. The remaining futures are cancelled. - You can avoid having to call this method explicitly if you use the - :keyword:`with` statement, which will shutdown the :class:`Executor` - (waiting as if :meth:`Executor.shutdown` were called with *wait* set to - ``True``):: + You can avoid having to call this method explicitly if you use the + :keyword:`with` statement, which will shutdown the :class:`Executor` + (waiting as if :meth:`Executor.shutdown` were called with *wait* set to + ``True``):: - import shutil - with ThreadPoolExecutor(max_workers=4) as e: - e.submit(shutil.copy, 'src1.txt', 'dest1.txt') - e.submit(shutil.copy, 'src2.txt', 'dest2.txt') - e.submit(shutil.copy, 'src3.txt', 'dest3.txt') - e.submit(shutil.copy, 'src4.txt', 'dest4.txt') + import shutil + with ThreadPoolExecutor(max_workers=4) as e: + e.submit(shutil.copy, 'src1.txt', 'dest1.txt') + e.submit(shutil.copy, 'src2.txt', 'dest2.txt') + e.submit(shutil.copy, 'src3.txt', 'dest3.txt') + e.submit(shutil.copy, 'src4.txt', 'dest4.txt') - .. versionchanged:: 3.9 - Added *cancel_futures*. + .. versionchanged:: 3.9 + Added *cancel_futures*. ThreadPoolExecutor @@ -188,6 +188,10 @@ And:: ThreadPoolExecutor now reuses idle worker threads before starting *max_workers* worker threads too. + .. versionchanged:: 3.13 + Default value of *max_workers* is changed to + ``min(32, (os.process_cpu_count() or 1) + 4)``. + .. _threadpoolexecutor-example: @@ -202,7 +206,7 @@ ThreadPoolExecutor Example 'http://www.cnn.com/', 'http://europe.wsj.com/', 'http://www.bbc.co.uk/', - 'http://some-made-up-domain.com/'] + 'http://nonexistant-subdomain.python.org/'] # Retrieve a single page and report the URL and contents def load_url(url, timeout): @@ -243,16 +247,17 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. An :class:`Executor` subclass that executes calls asynchronously using a pool of at most *max_workers* processes. If *max_workers* is ``None`` or not - given, it will default to the number of processors on the machine. + given, it will default to :func:`os.process_cpu_count`. If *max_workers* is less than or equal to ``0``, then a :exc:`ValueError` will be raised. On Windows, *max_workers* must be less than or equal to ``61``. If it is not then :exc:`ValueError` will be raised. If *max_workers* is ``None``, then the default chosen will be at most ``61``, even if more processors are available. - *mp_context* can be a multiprocessing context or None. It will be used to - launch the workers. If *mp_context* is ``None`` or not given, the default - multiprocessing context is used. + *mp_context* can be a :mod:`multiprocessing` context or ``None``. It will be + used to launch the workers. If *mp_context* is ``None`` or not given, the + default :mod:`multiprocessing` context is used. + See :ref:`multiprocessing-start-methods`. *initializer* is an optional callable that is called at the start of each worker process; *initargs* is a tuple of arguments passed to the @@ -280,10 +285,29 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. Added the *initializer* and *initargs* arguments. + .. note:: + The default :mod:`multiprocessing` start method + (see :ref:`multiprocessing-start-methods`) will change away from + *fork* in Python 3.14. Code that requires *fork* be used for their + :class:`ProcessPoolExecutor` should explicitly specify that by + passing a ``mp_context=multiprocessing.get_context("fork")`` + parameter. + .. versionchanged:: 3.11 The *max_tasks_per_child* argument was added to allow users to control the lifetime of workers in the pool. + .. versionchanged:: 3.12 + On POSIX systems, if your application has multiple threads and the + :mod:`multiprocessing` context uses the ``"fork"`` start method: + The :func:`os.fork` function called internally to spawn workers may raise a + :exc:`DeprecationWarning`. Pass a *mp_context* configured to use a + different start method. See the :func:`os.fork` documentation for + further explanation. + + .. versionchanged:: 3.13 + *max_workers* uses :func:`os.process_cpu_count` by default, instead of + :func:`os.cpu_count`. .. _processpoolexecutor-example: @@ -337,117 +361,117 @@ The :class:`Future` class encapsulates the asynchronous execution of a callable. instances are created by :meth:`Executor.submit` and should not be created directly except for testing. - .. method:: cancel() + .. method:: cancel() - Attempt to cancel the call. If the call is currently being executed or - finished running and cannot be cancelled then the method will return - ``False``, otherwise the call will be cancelled and the method will - return ``True``. + Attempt to cancel the call. If the call is currently being executed or + finished running and cannot be cancelled then the method will return + ``False``, otherwise the call will be cancelled and the method will + return ``True``. - .. method:: cancelled() + .. method:: cancelled() - Return ``True`` if the call was successfully cancelled. + Return ``True`` if the call was successfully cancelled. - .. method:: running() + .. method:: running() - Return ``True`` if the call is currently being executed and cannot be - cancelled. + Return ``True`` if the call is currently being executed and cannot be + cancelled. - .. method:: done() + .. method:: done() - Return ``True`` if the call was successfully cancelled or finished - running. + Return ``True`` if the call was successfully cancelled or finished + running. - .. method:: result(timeout=None) + .. method:: result(timeout=None) - Return the value returned by the call. If the call hasn't yet completed - then this method will wait up to *timeout* seconds. If the call hasn't - completed in *timeout* seconds, then a - :exc:`TimeoutError` will be raised. *timeout* can be - an int or float. If *timeout* is not specified or ``None``, there is no - limit to the wait time. + Return the value returned by the call. If the call hasn't yet completed + then this method will wait up to *timeout* seconds. If the call hasn't + completed in *timeout* seconds, then a + :exc:`TimeoutError` will be raised. *timeout* can be + an int or float. If *timeout* is not specified or ``None``, there is no + limit to the wait time. - If the future is cancelled before completing then :exc:`.CancelledError` - will be raised. + If the future is cancelled before completing then :exc:`.CancelledError` + will be raised. - If the call raised an exception, this method will raise the same exception. + If the call raised an exception, this method will raise the same exception. - .. method:: exception(timeout=None) + .. method:: exception(timeout=None) - Return the exception raised by the call. If the call hasn't yet - completed then this method will wait up to *timeout* seconds. If the - call hasn't completed in *timeout* seconds, then a - :exc:`TimeoutError` will be raised. *timeout* can be - an int or float. If *timeout* is not specified or ``None``, there is no - limit to the wait time. + Return the exception raised by the call. If the call hasn't yet + completed then this method will wait up to *timeout* seconds. If the + call hasn't completed in *timeout* seconds, then a + :exc:`TimeoutError` will be raised. *timeout* can be + an int or float. If *timeout* is not specified or ``None``, there is no + limit to the wait time. - If the future is cancelled before completing then :exc:`.CancelledError` - will be raised. + If the future is cancelled before completing then :exc:`.CancelledError` + will be raised. - If the call completed without raising, ``None`` is returned. + If the call completed without raising, ``None`` is returned. - .. method:: add_done_callback(fn) + .. method:: add_done_callback(fn) - Attaches the callable *fn* to the future. *fn* will be called, with the - future as its only argument, when the future is cancelled or finishes - running. + Attaches the callable *fn* to the future. *fn* will be called, with the + future as its only argument, when the future is cancelled or finishes + running. - Added callables are called in the order that they were added and are - always called in a thread belonging to the process that added them. If - the callable raises an :exc:`Exception` subclass, it will be logged and - ignored. If the callable raises a :exc:`BaseException` subclass, the - behavior is undefined. + Added callables are called in the order that they were added and are + always called in a thread belonging to the process that added them. If + the callable raises an :exc:`Exception` subclass, it will be logged and + ignored. If the callable raises a :exc:`BaseException` subclass, the + behavior is undefined. - If the future has already completed or been cancelled, *fn* will be - called immediately. + If the future has already completed or been cancelled, *fn* will be + called immediately. The following :class:`Future` methods are meant for use in unit tests and :class:`Executor` implementations. - .. method:: set_running_or_notify_cancel() + .. method:: set_running_or_notify_cancel() - This method should only be called by :class:`Executor` implementations - before executing the work associated with the :class:`Future` and by unit - tests. + This method should only be called by :class:`Executor` implementations + before executing the work associated with the :class:`Future` and by unit + tests. - If the method returns ``False`` then the :class:`Future` was cancelled, - i.e. :meth:`Future.cancel` was called and returned ``True``. Any threads - waiting on the :class:`Future` completing (i.e. through - :func:`as_completed` or :func:`wait`) will be woken up. + If the method returns ``False`` then the :class:`Future` was cancelled, + i.e. :meth:`Future.cancel` was called and returned ``True``. Any threads + waiting on the :class:`Future` completing (i.e. through + :func:`as_completed` or :func:`wait`) will be woken up. - If the method returns ``True`` then the :class:`Future` was not cancelled - and has been put in the running state, i.e. calls to - :meth:`Future.running` will return ``True``. + If the method returns ``True`` then the :class:`Future` was not cancelled + and has been put in the running state, i.e. calls to + :meth:`Future.running` will return ``True``. - This method can only be called once and cannot be called after - :meth:`Future.set_result` or :meth:`Future.set_exception` have been - called. + This method can only be called once and cannot be called after + :meth:`Future.set_result` or :meth:`Future.set_exception` have been + called. - .. method:: set_result(result) + .. method:: set_result(result) - Sets the result of the work associated with the :class:`Future` to - *result*. + Sets the result of the work associated with the :class:`Future` to + *result*. - This method should only be used by :class:`Executor` implementations and - unit tests. + This method should only be used by :class:`Executor` implementations and + unit tests. - .. versionchanged:: 3.8 - This method raises - :exc:`concurrent.futures.InvalidStateError` if the :class:`Future` is - already done. + .. versionchanged:: 3.8 + This method raises + :exc:`concurrent.futures.InvalidStateError` if the :class:`Future` is + already done. - .. method:: set_exception(exception) + .. method:: set_exception(exception) - Sets the result of the work associated with the :class:`Future` to the - :class:`Exception` *exception*. + Sets the result of the work associated with the :class:`Future` to the + :class:`Exception` *exception*. - This method should only be used by :class:`Executor` implementations and - unit tests. + This method should only be used by :class:`Executor` implementations and + unit tests. - .. versionchanged:: 3.8 - This method raises - :exc:`concurrent.futures.InvalidStateError` if the :class:`Future` is - already done. + .. versionchanged:: 3.8 + This method raises + :exc:`concurrent.futures.InvalidStateError` if the :class:`Future` is + already done. Module Functions ---------------- diff --git a/Doc/library/concurrent.rst b/Doc/library/concurrent.rst index 2eba5365125805d..8caea78bbb57e82 100644 --- a/Doc/library/concurrent.rst +++ b/Doc/library/concurrent.rst @@ -1,5 +1,5 @@ -The :mod:`concurrent` package -============================= +The :mod:`!concurrent` package +============================== Currently, there is only one module in this package: diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst index a925a3dd4fb9c26..bb282428c5fffc1 100644 --- a/Doc/library/configparser.rst +++ b/Doc/library/configparser.rst @@ -69,10 +69,10 @@ Let's take a very basic configuration file that looks like this: CompressionLevel = 9 ForwardX11 = yes - [bitbucket.org] + [forge.example] User = hg - [topsecret.server.com] + [topsecret.server.example] Port = 50022 ForwardX11 = no @@ -89,10 +89,10 @@ creating the above configuration file programmatically. >>> config['DEFAULT'] = {'ServerAliveInterval': '45', ... 'Compression': 'yes', ... 'CompressionLevel': '9'} - >>> config['bitbucket.org'] = {} - >>> config['bitbucket.org']['User'] = 'hg' - >>> config['topsecret.server.com'] = {} - >>> topsecret = config['topsecret.server.com'] + >>> config['forge.example'] = {} + >>> config['forge.example']['User'] = 'hg' + >>> config['topsecret.server.example'] = {} + >>> topsecret = config['topsecret.server.example'] >>> topsecret['Port'] = '50022' # mutates the parser >>> topsecret['ForwardX11'] = 'no' # same here >>> config['DEFAULT']['ForwardX11'] = 'yes' @@ -115,28 +115,28 @@ back and explore the data it holds. >>> config.read('example.ini') ['example.ini'] >>> config.sections() - ['bitbucket.org', 'topsecret.server.com'] - >>> 'bitbucket.org' in config + ['forge.example', 'topsecret.server.example'] + >>> 'forge.example' in config True - >>> 'bytebong.com' in config + >>> 'python.org' in config False - >>> config['bitbucket.org']['User'] + >>> config['forge.example']['User'] 'hg' >>> config['DEFAULT']['Compression'] 'yes' - >>> topsecret = config['topsecret.server.com'] + >>> topsecret = config['topsecret.server.example'] >>> topsecret['ForwardX11'] 'no' >>> topsecret['Port'] '50022' - >>> for key in config['bitbucket.org']: # doctest: +SKIP + >>> for key in config['forge.example']: # doctest: +SKIP ... print(key) user compressionlevel serveraliveinterval compression forwardx11 - >>> config['bitbucket.org']['ForwardX11'] + >>> config['forge.example']['ForwardX11'] 'yes' As we can see above, the API is pretty straightforward. The only bit of magic @@ -154,15 +154,15 @@ configuration while the previously existing keys are retained. >>> another_config = configparser.ConfigParser() >>> another_config.read('example.ini') ['example.ini'] - >>> another_config['topsecret.server.com']['Port'] + >>> another_config['topsecret.server.example']['Port'] '50022' - >>> another_config.read_string("[topsecret.server.com]\nPort=48484") - >>> another_config['topsecret.server.com']['Port'] + >>> another_config.read_string("[topsecret.server.example]\nPort=48484") + >>> another_config['topsecret.server.example']['Port'] '48484' - >>> another_config.read_dict({"topsecret.server.com": {"Port": 21212}}) - >>> another_config['topsecret.server.com']['Port'] + >>> another_config.read_dict({"topsecret.server.example": {"Port": 21212}}) + >>> another_config['topsecret.server.example']['Port'] '21212' - >>> another_config['topsecret.server.com']['ForwardX11'] + >>> another_config['topsecret.server.example']['ForwardX11'] 'no' This behaviour is equivalent to a :meth:`ConfigParser.read` call with several @@ -195,9 +195,9 @@ recognizes Boolean values from ``'yes'``/``'no'``, ``'on'``/``'off'``, >>> topsecret.getboolean('ForwardX11') False - >>> config['bitbucket.org'].getboolean('ForwardX11') + >>> config['forge.example'].getboolean('ForwardX11') True - >>> config.getboolean('bitbucket.org', 'Compression') + >>> config.getboolean('forge.example', 'Compression') True Apart from :meth:`~ConfigParser.getboolean`, config parsers also @@ -224,7 +224,7 @@ provide fallback values: Please note that default values have precedence over fallback values. For instance, in our example the ``'CompressionLevel'`` key was specified only in the ``'DEFAULT'`` section. If we try to get it from -the section ``'topsecret.server.com'``, we will always get the default, +the section ``'topsecret.server.example'``, we will always get the default, even if we specify a fallback: .. doctest:: @@ -239,7 +239,7 @@ the ``fallback`` keyword-only argument: .. doctest:: - >>> config.get('bitbucket.org', 'monster', + >>> config.get('forge.example', 'monster', ... fallback='No such things as monsters') 'No such things as monsters' @@ -935,8 +935,10 @@ ConfigParser Objects When *default_section* is given, it specifies the name for the special section holding default values for other sections and interpolation purposes - (normally named ``"DEFAULT"``). This value can be retrieved and changed on - runtime using the ``default_section`` instance attribute. + (normally named ``"DEFAULT"``). This value can be retrieved and changed at + runtime using the ``default_section`` instance attribute. This won't + re-evaluate an already parsed config file, but will be used when writing + parsed settings to a new config file. Interpolation behaviour may be customized by providing a custom handler through the *interpolation* argument. ``None`` can be used to turn off diff --git a/Doc/library/constants.rst b/Doc/library/constants.rst index 38dd552a0363acc..401dc9a320c5e06 100644 --- a/Doc/library/constants.rst +++ b/Doc/library/constants.rst @@ -22,16 +22,16 @@ A small number of constants live in the built-in namespace. They are: An object frequently used to represent the absence of a value, as when default arguments are not passed to a function. Assignments to ``None`` are illegal and raise a :exc:`SyntaxError`. - ``None`` is the sole instance of the :data:`NoneType` type. + ``None`` is the sole instance of the :data:`~types.NoneType` type. .. data:: NotImplemented A special value which should be returned by the binary special methods - (e.g. :meth:`__eq__`, :meth:`__lt__`, :meth:`__add__`, :meth:`__rsub__`, + (e.g. :meth:`~object.__eq__`, :meth:`~object.__lt__`, :meth:`~object.__add__`, :meth:`~object.__rsub__`, etc.) to indicate that the operation is not implemented with respect to the other type; may be returned by the in-place binary special methods - (e.g. :meth:`__imul__`, :meth:`__iand__`, etc.) for the same purpose. + (e.g. :meth:`~object.__imul__`, :meth:`~object.__iand__`, etc.) for the same purpose. It should not be evaluated in a boolean context. ``NotImplemented`` is the sole instance of the :data:`types.NotImplementedType` type. diff --git a/Doc/library/contextlib.rst b/Doc/library/contextlib.rst index 1b55868c3aa62ff..f6ebbfacfba509f 100644 --- a/Doc/library/contextlib.rst +++ b/Doc/library/contextlib.rst @@ -45,7 +45,7 @@ Functions and classes provided: This function is a :term:`decorator` that can be used to define a factory function for :keyword:`with` statement context managers, without needing to - create a class or separate :meth:`__enter__` and :meth:`__exit__` methods. + create a class or separate :meth:`~object.__enter__` and :meth:`~object.__exit__` methods. While many objects natively support use in with statements, sometimes a resource needs to be managed that isn't a context manager in its own right, @@ -304,8 +304,15 @@ Functions and classes provided: This context manager is :ref:`reentrant `. + If the code within the :keyword:`!with` block raises a + :exc:`BaseExceptionGroup`, suppressed exceptions are removed from the + group. If any exceptions in the group are not suppressed, a group containing them is re-raised. + .. versionadded:: 3.4 + .. versionchanged:: 3.12 + ``suppress`` now supports suppressing exceptions raised as + part of an :exc:`BaseExceptionGroup`. .. function:: redirect_stdout(new_target) @@ -508,7 +515,7 @@ Functions and classes provided: # the with statement, even if attempts to open files later # in the list raise an exception - The :meth:`__enter__` method returns the :class:`ExitStack` instance, and + The :meth:`~object.__enter__` method returns the :class:`ExitStack` instance, and performs no additional operations. Each instance maintains a stack of registered callbacks that are called in @@ -536,9 +543,9 @@ Functions and classes provided: .. method:: enter_context(cm) - Enters a new context manager and adds its :meth:`__exit__` method to + Enters a new context manager and adds its :meth:`~object.__exit__` method to the callback stack. The return value is the result of the context - manager's own :meth:`__enter__` method. + manager's own :meth:`~object.__enter__` method. These context managers may suppress exceptions just as they normally would if used directly as part of a :keyword:`with` statement. @@ -549,18 +556,18 @@ Functions and classes provided: .. method:: push(exit) - Adds a context manager's :meth:`__exit__` method to the callback stack. + Adds a context manager's :meth:`~object.__exit__` method to the callback stack. As ``__enter__`` is *not* invoked, this method can be used to cover - part of an :meth:`__enter__` implementation with a context manager's own - :meth:`__exit__` method. + part of an :meth:`~object.__enter__` implementation with a context manager's own + :meth:`~object.__exit__` method. If passed an object that is not a context manager, this method assumes it is a callback with the same signature as a context manager's - :meth:`__exit__` method and adds it directly to the callback stack. + :meth:`~object.__exit__` method and adds it directly to the callback stack. By returning true values, these callbacks can suppress exceptions the - same way context manager :meth:`__exit__` methods can. + same way context manager :meth:`~object.__exit__` methods can. The passed in object is returned from the function, allowing this method to be used as a function decorator. @@ -707,7 +714,7 @@ Cleaning up in an ``__enter__`` implementation As noted in the documentation of :meth:`ExitStack.push`, this method can be useful in cleaning up an already allocated resource if later -steps in the :meth:`__enter__` implementation fail. +steps in the :meth:`~object.__enter__` implementation fail. Here's an example of doing this for a context manager that accepts resource acquisition and release functions, along with an optional validation function, @@ -864,7 +871,7 @@ And also as a function decorator:: Note that there is one additional limitation when using context managers as function decorators: there's no way to access the return value of -:meth:`__enter__`. If that value is needed, then it is still necessary to use +:meth:`~object.__enter__`. If that value is needed, then it is still necessary to use an explicit ``with`` statement. .. seealso:: diff --git a/Doc/library/contextvars.rst b/Doc/library/contextvars.rst index 08a7c7d74eab97a..0ac2f3d85749b7c 100644 --- a/Doc/library/contextvars.rst +++ b/Doc/library/contextvars.rst @@ -144,6 +144,11 @@ Manual Context Management To get a copy of the current context use the :func:`~contextvars.copy_context` function. + Every thread will have a different top-level :class:`~contextvars.Context` + object. This means that a :class:`ContextVar` object behaves in a similar + fashion to :func:`threading.local()` when values are assigned in different + threads. + Context implements the :class:`collections.abc.Mapping` interface. .. method:: run(callable, *args, **kwargs) diff --git a/Doc/library/copy.rst b/Doc/library/copy.rst index a8bc2fa55ea8c31..74333b2e934814d 100644 --- a/Doc/library/copy.rst +++ b/Doc/library/copy.rst @@ -17,14 +17,22 @@ operations (explained below). Interface summary: -.. function:: copy(x) +.. function:: copy(obj) - Return a shallow copy of *x*. + Return a shallow copy of *obj*. -.. function:: deepcopy(x[, memo]) +.. function:: deepcopy(obj[, memo]) - Return a deep copy of *x*. + Return a deep copy of *obj*. + + +.. function:: replace(obj, /, **changes) + + Creates a new object of the same type as *obj*, replacing fields with values + from *changes*. + + .. versionadded:: 3.13 .. exception:: Error @@ -68,7 +76,7 @@ Shallow copies of dictionaries can be made using :meth:`dict.copy`, and of lists by assigning a slice of the entire list, for example, ``copied_list = original_list[:]``. -.. index:: module: pickle +.. index:: pair: module; pickle Classes can use the same interfaces to control copying that they use to control pickling. See the description of module :mod:`pickle` for information on these @@ -79,14 +87,40 @@ pickle functions from the :mod:`copyreg` module. single: __copy__() (copy protocol) single: __deepcopy__() (copy protocol) +.. currentmodule:: None + In order for a class to define its own copy implementation, it can define -special methods :meth:`__copy__` and :meth:`__deepcopy__`. The former is called -to implement the shallow copy operation; no additional arguments are passed. -The latter is called to implement the deep copy operation; it is passed one -argument, the ``memo`` dictionary. If the :meth:`__deepcopy__` implementation needs -to make a deep copy of a component, it should call the :func:`deepcopy` function -with the component as first argument and the memo dictionary as second argument. -The memo dictionary should be treated as an opaque object. +special methods :meth:`~object.__copy__` and :meth:`~object.__deepcopy__`. + +.. method:: object.__copy__(self) + :noindexentry: + + Called to implement the shallow copy operation; + no additional arguments are passed. + +.. method:: object.__deepcopy__(self, memo) + :noindexentry: + + Called to implement the deep copy operation; it is passed one + argument, the *memo* dictionary. If the ``__deepcopy__`` implementation needs + to make a deep copy of a component, it should call the :func:`~copy.deepcopy` function + with the component as first argument and the *memo* dictionary as second argument. + The *memo* dictionary should be treated as an opaque object. + + +.. index:: + single: __replace__() (replace protocol) + +Function :func:`!copy.replace` is more limited +than :func:`~copy.copy` and :func:`~copy.deepcopy`, +and only supports named tuples created by :func:`~collections.namedtuple`, +:mod:`dataclasses`, and other classes which define method :meth:`~object.__replace__`. + +.. method:: object.__replace__(self, /, **changes) + :noindexentry: + + This method should create a new object of the same type, + replacing fields with values from *changes*. .. seealso:: diff --git a/Doc/library/copyreg.rst b/Doc/library/copyreg.rst index 866b180f4bc3b82..2a28c043f807233 100644 --- a/Doc/library/copyreg.rst +++ b/Doc/library/copyreg.rst @@ -7,8 +7,8 @@ **Source code:** :source:`Lib/copyreg.py` .. index:: - module: pickle - module: copy + pair: module; pickle + pair: module; copy -------------- @@ -28,8 +28,8 @@ Such constructors may be factory functions or class instances. .. function:: pickle(type, function, constructor_ob=None) Declares that *function* should be used as a "reduction" function for objects - of type *type*. *function* should return either a string or a tuple - containing two or three elements. See the :attr:`~pickle.Pickler.dispatch_table` + of type *type*. *function* must return either a string or a tuple + containing between two and six elements. See the :attr:`~pickle.Pickler.dispatch_table` for more details on the interface of *function*. The *constructor_ob* parameter is a legacy feature and is now ignored, but if diff --git a/Doc/library/crypt.rst b/Doc/library/crypt.rst deleted file mode 100644 index 740084b40c5ac96..000000000000000 --- a/Doc/library/crypt.rst +++ /dev/null @@ -1,182 +0,0 @@ -:mod:`crypt` --- Function to check Unix passwords -================================================= - -.. module:: crypt - :platform: Unix - :synopsis: The crypt() function used to check Unix passwords. - :deprecated: - -.. moduleauthor:: Steven D. Majewski -.. sectionauthor:: Steven D. Majewski -.. sectionauthor:: Peter Funk - -**Source code:** :source:`Lib/crypt.py` - -.. index:: - single: crypt(3) - pair: cipher; DES - -.. deprecated-removed:: 3.11 3.13 - The :mod:`crypt` module is deprecated - (see :pep:`PEP 594 <594#crypt>` for details and alternatives). - The :mod:`hashlib` module is a potential replacement for certain use cases. - --------------- - -This module implements an interface to the :manpage:`crypt(3)` routine, which is -a one-way hash function based upon a modified DES algorithm; see the Unix man -page for further details. Possible uses include storing hashed passwords -so you can check passwords without storing the actual password, or attempting -to crack Unix passwords with a dictionary. - -.. index:: single: crypt(3) - -Notice that the behavior of this module depends on the actual implementation of -the :manpage:`crypt(3)` routine in the running system. Therefore, any -extensions available on the current implementation will also be available on -this module. - -.. availability:: Unix, not VxWorks. - -.. include:: ../includes/wasm-notavail.rst - -Hashing Methods ---------------- - -.. versionadded:: 3.3 - -The :mod:`crypt` module defines the list of hashing methods (not all methods -are available on all platforms): - -.. data:: METHOD_SHA512 - - A Modular Crypt Format method with 16 character salt and 86 character - hash based on the SHA-512 hash function. This is the strongest method. - -.. data:: METHOD_SHA256 - - Another Modular Crypt Format method with 16 character salt and 43 - character hash based on the SHA-256 hash function. - -.. data:: METHOD_BLOWFISH - - Another Modular Crypt Format method with 22 character salt and 31 - character hash based on the Blowfish cipher. - - .. versionadded:: 3.7 - -.. data:: METHOD_MD5 - - Another Modular Crypt Format method with 8 character salt and 22 - character hash based on the MD5 hash function. - -.. data:: METHOD_CRYPT - - The traditional method with a 2 character salt and 13 characters of - hash. This is the weakest method. - - -Module Attributes ------------------ - -.. versionadded:: 3.3 - -.. attribute:: methods - - A list of available password hashing algorithms, as - ``crypt.METHOD_*`` objects. This list is sorted from strongest to - weakest. - - -Module Functions ----------------- - -The :mod:`crypt` module defines the following functions: - -.. function:: crypt(word, salt=None) - - *word* will usually be a user's password as typed at a prompt or in a graphical - interface. The optional *salt* is either a string as returned from - :func:`mksalt`, one of the ``crypt.METHOD_*`` values (though not all - may be available on all platforms), or a full encrypted password - including salt, as returned by this function. If *salt* is not - provided, the strongest method available in :attr:`methods` will be used. - - Checking a password is usually done by passing the plain-text password - as *word* and the full results of a previous :func:`crypt` call, - which should be the same as the results of this call. - - *salt* (either a random 2 or 16 character string, possibly prefixed with - ``$digit$`` to indicate the method) which will be used to perturb the - encryption algorithm. The characters in *salt* must be in the set - ``[./a-zA-Z0-9]``, with the exception of Modular Crypt Format which - prefixes a ``$digit$``. - - Returns the hashed password as a string, which will be composed of - characters from the same alphabet as the salt. - - .. index:: single: crypt(3) - - Since a few :manpage:`crypt(3)` extensions allow different values, with - different sizes in the *salt*, it is recommended to use the full crypted - password as salt when checking for a password. - - .. versionchanged:: 3.3 - Accept ``crypt.METHOD_*`` values in addition to strings for *salt*. - - -.. function:: mksalt(method=None, *, rounds=None) - - Return a randomly generated salt of the specified method. If no - *method* is given, the strongest method available in :attr:`methods` is - used. - - The return value is a string suitable for passing as the *salt* argument - to :func:`crypt`. - - *rounds* specifies the number of rounds for ``METHOD_SHA256``, - ``METHOD_SHA512`` and ``METHOD_BLOWFISH``. - For ``METHOD_SHA256`` and ``METHOD_SHA512`` it must be an integer between - ``1000`` and ``999_999_999``, the default is ``5000``. For - ``METHOD_BLOWFISH`` it must be a power of two between ``16`` (2\ :sup:`4`) - and ``2_147_483_648`` (2\ :sup:`31`), the default is ``4096`` - (2\ :sup:`12`). - - .. versionadded:: 3.3 - - .. versionchanged:: 3.7 - Added the *rounds* parameter. - - -Examples --------- - -A simple example illustrating typical use (a constant-time comparison -operation is needed to limit exposure to timing attacks. -:func:`hmac.compare_digest` is suitable for this purpose):: - - import pwd - import crypt - import getpass - from hmac import compare_digest as compare_hash - - def login(): - username = input('Python login: ') - cryptedpasswd = pwd.getpwnam(username)[1] - if cryptedpasswd: - if cryptedpasswd == 'x' or cryptedpasswd == '*': - raise ValueError('no support for shadow passwords') - cleartext = getpass.getpass() - return compare_hash(crypt.crypt(cleartext, cryptedpasswd), cryptedpasswd) - else: - return True - -To generate a hash of a password using the strongest available method and -check it against the original:: - - import crypt - from hmac import compare_digest as compare_hash - - hashed = crypt.crypt(plaintext) - if not compare_hash(hashed, crypt.crypt(plaintext, hashed)): - raise ValueError("hashed version doesn't validate against original") diff --git a/Doc/library/crypto.rst b/Doc/library/crypto.rst index ae45549a6d8941d..5a3b7a807213f4e 100644 --- a/Doc/library/crypto.rst +++ b/Doc/library/crypto.rst @@ -8,7 +8,6 @@ Cryptographic Services The modules described in this chapter implement various algorithms of a cryptographic nature. They are available at the discretion of the installation. -On Unix systems, the :mod:`crypt` module may also be available. Here's an overview: diff --git a/Doc/library/csv.rst b/Doc/library/csv.rst index 41f11505aa10309..aba398b8ee1e54d 100644 --- a/Doc/library/csv.rst +++ b/Doc/library/csv.rst @@ -288,9 +288,9 @@ The :mod:`csv` module defines the following classes: Inspecting each column, one of two key criteria will be considered to estimate if the sample contains a header: - - the second through n-th rows contain numeric values - - the second through n-th rows contain strings where at least one value's - length differs from that of the putative header of that column. + - the second through n-th rows contain numeric values + - the second through n-th rows contain strings where at least one value's + length differs from that of the putative header of that column. Twenty rows after the first row are sampled; if more than half of columns + rows meet the criteria, :const:`True` is returned. @@ -327,7 +327,7 @@ The :mod:`csv` module defines the following constants: Instructs :class:`writer` objects to quote all non-numeric fields. - Instructs the reader to convert all non-quoted fields to type *float*. + Instructs :class:`reader` objects to convert all non-quoted fields to type *float*. .. data:: QUOTE_NONE @@ -337,7 +337,25 @@ The :mod:`csv` module defines the following constants: character. If *escapechar* is not set, the writer will raise :exc:`Error` if any characters that require escaping are encountered. - Instructs :class:`reader` to perform no special processing of quote characters. + Instructs :class:`reader` objects to perform no special processing of quote characters. + +.. data:: QUOTE_NOTNULL + + Instructs :class:`writer` objects to quote all fields which are not + ``None``. This is similar to :data:`QUOTE_ALL`, except that if a + field value is ``None`` an empty (unquoted) string is written. + + Instructs :class:`reader` objects to interpret an empty (unquoted) field as None and + to otherwise behave as :data:`QUOTE_ALL`. + +.. data:: QUOTE_STRINGS + + Instructs :class:`writer` objects to always place quotes around fields + which are strings. This is similar to :data:`QUOTE_NONNUMERIC`, except that if a + field value is ``None`` an empty (unquoted) string is written. + + Instructs :class:`reader` objects to interpret an empty (unquoted) string as ``None`` and + to otherwise behave as :data:`QUOTE_NONNUMERIC`. The :mod:`csv` module defines the following exception: @@ -458,7 +476,7 @@ Reader objects have the following public attributes: DictReader objects have the following public attribute: -.. attribute:: csvreader.fieldnames +.. attribute:: DictReader.fieldnames If not passed as a parameter when creating the object, this attribute is initialized upon first access or when the first record is read from the diff --git a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst index 0351ec970be0b8f..ef3a9a0f5898af4 100644 --- a/Doc/library/ctypes.rst +++ b/Doc/library/ctypes.rst @@ -6,6 +6,8 @@ .. moduleauthor:: Thomas Heller +**Source code:** :source:`Lib/ctypes` + -------------- :mod:`ctypes` is a foreign function library for Python. It provides C compatible @@ -39,7 +41,7 @@ You load libraries by accessing them as attributes of these objects. *cdll* loads libraries which export functions using the standard ``cdecl`` calling convention, while *windll* libraries call functions using the ``stdcall`` calling convention. *oledll* also uses the ``stdcall`` calling convention, and -assumes the functions return a Windows :c:type:`HRESULT` error code. The error +assumes the functions return a Windows :c:type:`!HRESULT` error code. The error code is used to automatically raise an :class:`OSError` exception when the function call fails. @@ -70,8 +72,9 @@ Windows appends the usual ``.dll`` file suffix automatically. On Linux, it is required to specify the filename *including* the extension to load a library, so attribute access can not be used to load libraries. Either the -:meth:`LoadLibrary` method of the dll loaders should be used, or you should load -the library by creating an instance of CDLL by calling the constructor:: +:meth:`~LibraryLoader.LoadLibrary` method of the dll loaders should be used, +or you should load the library by creating an instance of CDLL by calling +the constructor:: >>> cdll.LoadLibrary("libc.so.6") # doctest: +LINUX @@ -218,7 +221,7 @@ Fundamental data types +----------------------+------------------------------------------+----------------------------+ | :class:`c_char` | :c:expr:`char` | 1-character bytes object | +----------------------+------------------------------------------+----------------------------+ -| :class:`c_wchar` | :c:expr:`wchar_t` | 1-character string | +| :class:`c_wchar` | :c:type:`wchar_t` | 1-character string | +----------------------+------------------------------------------+----------------------------+ | :class:`c_byte` | :c:expr:`char` | int | +----------------------+------------------------------------------+----------------------------+ @@ -241,9 +244,9 @@ Fundamental data types | :class:`c_ulonglong` | :c:expr:`unsigned __int64` or | int | | | :c:expr:`unsigned long long` | | +----------------------+------------------------------------------+----------------------------+ -| :class:`c_size_t` | :c:expr:`size_t` | int | +| :class:`c_size_t` | :c:type:`size_t` | int | +----------------------+------------------------------------------+----------------------------+ -| :class:`c_ssize_t` | :c:expr:`ssize_t` or | int | +| :class:`c_ssize_t` | :c:type:`ssize_t` or | int | | | :c:expr:`Py_ssize_t` | | +----------------------+------------------------------------------+----------------------------+ | :class:`c_time_t` | :c:type:`time_t` | int | @@ -331,9 +334,9 @@ property:: 10 b'Hi\x00lo\x00\x00\x00\x00\x00' >>> -The :func:`create_string_buffer` function replaces the old :func:`c_buffer` +The :func:`create_string_buffer` function replaces the old :func:`!c_buffer` function (which is still available as an alias). To create a mutable memory -block containing unicode characters of the C type :c:expr:`wchar_t`, use the +block containing unicode characters of the C type :c:type:`wchar_t`, use the :func:`create_unicode_buffer` function. @@ -359,7 +362,7 @@ from within *IDLE* or *PythonWin*:: >>> printf(b"%f bottles of beer\n", 42.5) Traceback (most recent call last): File "", line 1, in - ArgumentError: argument 2: exceptions.TypeError: Don't know how to convert parameter 2 + ctypes.ArgumentError: argument 2: TypeError: Don't know how to convert parameter 2 >>> As has been mentioned before, all Python types except integers, strings, and @@ -371,6 +374,26 @@ that they can be converted to the required C data type:: 31 >>> +.. _ctypes-calling-variadic-functions: + +Calling variadic functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +On a lot of platforms calling variadic functions through ctypes is exactly the same +as calling functions with a fixed number of parameters. On some platforms, and in +particular ARM64 for Apple Platforms, the calling convention for variadic functions +is different than that for regular functions. + +On those platforms it is required to specify the :attr:`~_FuncPtr.argtypes` +attribute for the regular, non-variadic, function arguments: + +.. code-block:: python3 + + libc.printf.argtypes = [ctypes.c_char_p] + +Because specifying the attribute does not inhibit portability it is advised to always +specify :attr:`~_FuncPtr.argtypes` for all variadic functions. + .. _ctypes-calling-functions-with-own-custom-data-types: @@ -378,9 +401,10 @@ Calling functions with your own custom data types ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also customize :mod:`ctypes` argument conversion to allow instances of -your own classes be used as function arguments. :mod:`ctypes` looks for an -:attr:`_as_parameter_` attribute and uses this as the function argument. Of -course, it must be one of integer, string, or bytes:: +your own classes be used as function arguments. :mod:`ctypes` looks for an +:attr:`!_as_parameter_` attribute and uses this as the function argument. The +attribute must be an integer, string, bytes, a :mod:`ctypes` instance, or an +object with an :attr:`!_as_parameter_` attribute:: >>> class Bottles: ... def __init__(self, number): @@ -392,7 +416,7 @@ course, it must be one of integer, string, or bytes:: 19 >>> -If you don't want to store the instance's data in the :attr:`_as_parameter_` +If you don't want to store the instance's data in the :attr:`!_as_parameter_` instance variable, you could define a :class:`property` which makes the attribute available on request. @@ -403,9 +427,9 @@ Specifying the required argument types (function prototypes) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is possible to specify the required argument types of functions exported from -DLLs by setting the :attr:`argtypes` attribute. +DLLs by setting the :attr:`~_FuncPtr.argtypes` attribute. -:attr:`argtypes` must be a sequence of C data types (the ``printf`` function is +:attr:`~_FuncPtr.argtypes` must be a sequence of C data types (the :func:`!printf` function is probably not a good example here, because it takes a variable number and different types of parameters depending on the format string, on the other hand this is quite handy to experiment with this feature):: @@ -422,21 +446,21 @@ prototype for a C function), and tries to convert the arguments to valid types:: >>> printf(b"%d %d %d", 1, 2, 3) Traceback (most recent call last): File "", line 1, in - ArgumentError: argument 2: exceptions.TypeError: wrong type + ctypes.ArgumentError: argument 2: TypeError: 'int' object cannot be interpreted as ctypes.c_char_p >>> printf(b"%s %d %f\n", b"X", 2, 3) X 2 3.000000 13 >>> If you have defined your own classes which you pass to function calls, you have -to implement a :meth:`from_param` class method for them to be able to use them -in the :attr:`argtypes` sequence. The :meth:`from_param` class method receives +to implement a :meth:`~_CData.from_param` class method for them to be able to use them +in the :attr:`~_FuncPtr.argtypes` sequence. The :meth:`~_CData.from_param` class method receives the Python object passed to the function call, it should do a typecheck or whatever is needed to make sure this object is acceptable, and then return the -object itself, its :attr:`_as_parameter_` attribute, or whatever you want to +object itself, its :attr:`!_as_parameter_` attribute, or whatever you want to pass as the C function argument in this case. Again, the result should be an integer, string, bytes, a :mod:`ctypes` instance, or an object with an -:attr:`_as_parameter_` attribute. +:attr:`!_as_parameter_` attribute. .. _ctypes-return-types: @@ -444,17 +468,25 @@ integer, string, bytes, a :mod:`ctypes` instance, or an object with an Return types ^^^^^^^^^^^^ +.. testsetup:: + + from ctypes import CDLL, c_char, c_char_p + from ctypes.util import find_library + libc = CDLL(find_library('c')) + strchr = libc.strchr + + By default functions are assumed to return the C :c:expr:`int` type. Other -return types can be specified by setting the :attr:`restype` attribute of the +return types can be specified by setting the :attr:`~_FuncPtr.restype` attribute of the function object. -The C prototype of ``time()`` is ``time_t time(time_t *)``. Because ``time_t`` -might be of a different type than the default return type ``int``, you should -specify the ``restype``:: +The C prototype of :c:func:`time` is ``time_t time(time_t *)``. Because :c:type:`time_t` +might be of a different type than the default return type :c:expr:`int`, you should +specify the :attr:`!restype` attribute:: >>> libc.time.restype = c_time_t -The argument types can be specified using ``argtypes``:: +The argument types can be specified using :attr:`~_FuncPtr.argtypes`:: >>> libc.time.argtypes = (POINTER(c_time_t),) @@ -463,7 +495,7 @@ To call the function with a ``NULL`` pointer as first argument, use ``None``:: >>> print(libc.time(None)) # doctest: +SKIP 1150640792 -Here is a more advanced example, it uses the ``strchr`` function, which expects +Here is a more advanced example, it uses the :func:`!strchr` function, which expects a string pointer and a char, and returns a pointer to a string:: >>> strchr = libc.strchr @@ -476,26 +508,27 @@ a string pointer and a char, and returns a pointer to a string:: None >>> -If you want to avoid the ``ord("x")`` calls above, you can set the -:attr:`argtypes` attribute, and the second argument will be converted from a -single character Python bytes object into a C char:: +If you want to avoid the :func:`ord("x") ` calls above, you can set the +:attr:`~_FuncPtr.argtypes` attribute, and the second argument will be converted from a +single character Python bytes object into a C char: + +.. doctest:: >>> strchr.restype = c_char_p >>> strchr.argtypes = [c_char_p, c_char] >>> strchr(b"abcdef", b"d") - 'def' + b'def' >>> strchr(b"abcdef", b"def") Traceback (most recent call last): - File "", line 1, in - ArgumentError: argument 2: exceptions.TypeError: one character string expected + ctypes.ArgumentError: argument 2: TypeError: one character bytes, bytearray or integer expected >>> print(strchr(b"abcdef", b"x")) None >>> strchr(b"abcdef", b"d") - 'def' + b'def' >>> You can also use a callable Python object (a function or a class for example) as -the :attr:`restype` attribute, if the foreign function returns an integer. The +the :attr:`~_FuncPtr.restype` attribute, if the foreign function returns an integer. The callable will be called with the *integer* the C function returns, and the result of this call will be used as the result of your function call. This is useful to check for error return values and automatically raise an exception:: @@ -523,7 +556,8 @@ get the string representation of an error code, and *returns* an exception. :func:`GetLastError` to retrieve it. Please note that a much more powerful error checking mechanism is available -through the :attr:`errcheck` attribute; see the reference manual for details. +through the :attr:`~_FuncPtr.errcheck` attribute; +see the reference manual for details. .. _ctypes-passing-pointers: @@ -561,7 +595,7 @@ Structures and unions Structures and unions must derive from the :class:`Structure` and :class:`Union` base classes which are defined in the :mod:`ctypes` module. Each subclass must -define a :attr:`_fields_` attribute. :attr:`_fields_` must be a list of +define a :attr:`~Structure._fields_` attribute. :attr:`!_fields_` must be a list of *2-tuples*, containing a *field name* and a *field type*. The field type must be a :mod:`ctypes` type like :class:`c_int`, or any other @@ -633,9 +667,9 @@ Structure/union alignment and byte order By default, Structure and Union fields are aligned in the same way the C compiler does it. It is possible to override this behavior by specifying a -:attr:`_pack_` class attribute in the subclass definition. This must be set to a -positive integer and specifies the maximum alignment for the fields. This is -what ``#pragma pack(n)`` also does in MSVC. +:attr:`~Structure._pack_` class attribute in the subclass definition. +This must be set to a positive integer and specifies the maximum alignment for the fields. +This is what ``#pragma pack(n)`` also does in MSVC. :mod:`ctypes` uses the native byte order for Structures and Unions. To build structures with non-native byte order, you can use one of the @@ -651,7 +685,7 @@ Bit fields in structures and unions It is possible to create structures and unions containing bit fields. Bit fields are only possible for integer fields, the bit width is specified as the third -item in the :attr:`_fields_` tuples:: +item in the :attr:`~Structure._fields_` tuples:: >>> class Int(Structure): ... _fields_ = [("first_16", c_int, 16), @@ -822,7 +856,7 @@ Type conversions ^^^^^^^^^^^^^^^^ Usually, ctypes does strict type checking. This means, if you have -``POINTER(c_int)`` in the :attr:`argtypes` list of a function or as the type of +``POINTER(c_int)`` in the :attr:`~_FuncPtr.argtypes` list of a function or as the type of a member field in a structure definition, only instances of exactly the same type are accepted. There are some exceptions to this rule, where ctypes accepts other objects. For example, you can pass compatible array instances instead of @@ -843,7 +877,7 @@ pointer types. So, for ``POINTER(c_int)``, ctypes accepts an array of c_int:: >>> In addition, if a function argument is explicitly declared to be a pointer type -(such as ``POINTER(c_int)``) in :attr:`argtypes`, an object of the pointed +(such as ``POINTER(c_int)``) in :attr:`~_FuncPtr.argtypes`, an object of the pointed type (``c_int`` in this case) can be passed to the function. ctypes will apply the required :func:`byref` conversion in this case automatically. @@ -919,8 +953,8 @@ work:: >>> because the new ``class cell`` is not available in the class statement itself. -In :mod:`ctypes`, we can define the ``cell`` class and set the :attr:`_fields_` -attribute later, after the class statement:: +In :mod:`ctypes`, we can define the ``cell`` class and set the +:attr:`~Structure._fields_` attribute later, after the class statement:: >>> from ctypes import * >>> class cell(Structure): @@ -970,8 +1004,8 @@ argument, and the callback functions expected argument types as the remaining arguments. I will present an example here which uses the standard C library's -:c:func:`qsort` function, that is used to sort items with the help of a callback -function. :c:func:`qsort` will be used to sort an array of integers:: +:c:func:`!qsort` function, that is used to sort items with the help of a callback +function. :c:func:`!qsort` will be used to sort an array of integers:: >>> IntArray5 = c_int * 5 >>> ia = IntArray5(5, 1, 7, 33, 99) @@ -979,7 +1013,7 @@ function. :c:func:`qsort` will be used to sort an array of integers:: >>> qsort.restype = None >>> -:func:`qsort` must be called with a pointer to the data to sort, the number of +:func:`!qsort` must be called with a pointer to the data to sort, the number of items in the data array, the size of one item, and a pointer to the comparison function, the callback. The callback will then be called with two pointers to items, and it must return a negative integer if the first item is smaller than @@ -1071,7 +1105,7 @@ Some shared libraries not only export functions, they also export variables. An example in the Python library itself is the :c:data:`Py_Version`, Python runtime version number encoded in a single constant integer. -:mod:`ctypes` can access values like this with the :meth:`in_dll` class methods of +:mod:`ctypes` can access values like this with the :meth:`~_CData.in_dll` class methods of the type. *pythonapi* is a predefined symbol giving access to the Python C api:: @@ -1261,13 +1295,13 @@ Finding shared libraries When programming in a compiled language, shared libraries are accessed when compiling/linking a program, and when the program is run. -The purpose of the :func:`find_library` function is to locate a library in a way +The purpose of the :func:`~ctypes.util.find_library` function is to locate a library in a way similar to what the compiler or runtime loader does (on platforms with several versions of a shared library the most recent should be loaded), while the ctypes library loaders act like when a program is run, and call the runtime loader directly. -The :mod:`ctypes.util` module provides a function which can help to determine +The :mod:`!ctypes.util` module provides a function which can help to determine the library to load. @@ -1282,7 +1316,7 @@ the library to load. The exact functionality is system dependent. -On Linux, :func:`find_library` tries to run external programs +On Linux, :func:`~ctypes.util.find_library` tries to run external programs (``/sbin/ldconfig``, ``gcc``, ``objdump`` and ``ld``) to find the library file. It returns the filename of the library file. @@ -1301,7 +1335,7 @@ Here are some examples:: 'libbz2.so.1.0' >>> -On macOS, :func:`find_library` tries several predefined naming schemes and paths +On macOS, :func:`~ctypes.util.find_library` tries several predefined naming schemes and paths to locate the library, and returns a full pathname if successful:: >>> from ctypes.util import find_library @@ -1315,13 +1349,13 @@ to locate the library, and returns a full pathname if successful:: '/System/Library/Frameworks/AGL.framework/AGL' >>> -On Windows, :func:`find_library` searches along the system search path, and +On Windows, :func:`~ctypes.util.find_library` searches along the system search path, and returns the full pathname, but since there is no predefined naming scheme a call like ``find_library("c")`` will fail and return ``None``. If wrapping a shared library with :mod:`ctypes`, it *may* be better to determine the shared library name at development time, and hardcode that into the wrapper -module instead of using :func:`find_library` to locate the library at runtime. +module instead of using :func:`~ctypes.util.find_library` to locate the library at runtime. .. _ctypes-loading-shared-libraries: @@ -1349,6 +1383,10 @@ way is to instantiate one of the following classes: DLLs and determine which one is not found using Windows debugging and tracing tools. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + .. seealso:: `Microsoft DUMPBIN tool `_ @@ -1365,7 +1403,12 @@ way is to instantiate one of the following classes: failure, an :class:`OSError` is automatically raised. .. versionchanged:: 3.3 - :exc:`WindowsError` used to be raised. + :exc:`WindowsError` used to be raised, + which is now an alias of :exc:`OSError`. + + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. .. class:: WinDLL(name, mode=DEFAULT_MODE, handle=None, use_errno=False, use_last_error=False, winmode=None) @@ -1374,6 +1417,10 @@ way is to instantiate one of the following classes: functions in these libraries use the ``stdcall`` calling convention, and are assumed to return :c:expr:`int` by default. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + The Python :term:`global interpreter lock` is released before calling any function exported by these libraries, and reacquired afterwards. @@ -1387,12 +1434,16 @@ function exported by these libraries, and reacquired afterwards. Thus, this is only useful to call Python C api functions directly. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + All these classes can be instantiated by calling them with at least one argument, the pathname of the shared library. If you have an existing handle to an already loaded shared library, it can be passed as the ``handle`` named -parameter, otherwise the underlying platforms ``dlopen`` or ``LoadLibrary`` -function is used to load the library into the process, and to get a handle to -it. +parameter, otherwise the underlying platforms :c:func:`!dlopen` or +:c:func:`!LoadLibrary` function is used to load the library into +the process, and to get a handle to it. The *mode* parameter can be used to specify how the library is loaded. For details, consult the :manpage:`dlopen(3)` manpage. On Windows, *mode* is @@ -1412,14 +1463,14 @@ to a new value and returns the former value. The *use_last_error* parameter, when set to true, enables the same mechanism for the Windows error code which is managed by the :func:`GetLastError` and -:func:`SetLastError` Windows API functions; :func:`ctypes.get_last_error` and +:func:`!SetLastError` Windows API functions; :func:`ctypes.get_last_error` and :func:`ctypes.set_last_error` are used to request and change the ctypes private copy of the windows error code. The *winmode* parameter is used on Windows to specify how the library is loaded (since *mode* is ignored). It takes any value that is valid for the Win32 API -``LoadLibraryEx`` flags parameter. When omitted, the default is to use the flags -that result in the most secure DLL load to avoiding issues such as DLL +``LoadLibraryEx`` flags parameter. When omitted, the default is to use the +flags that result in the most secure DLL load, which avoids issues such as DLL hijacking. Passing the full path to the DLL is the safest way to ensure the correct library and dependencies are loaded. @@ -1475,8 +1526,8 @@ underscore to not clash with exported function names: Shared libraries can also be loaded by using one of the prefabricated objects, which are instances of the :class:`LibraryLoader` class, either by calling the -:meth:`LoadLibrary` method, or by retrieving the library as attribute of the -loader instance. +:meth:`~LibraryLoader.LoadLibrary` method, or by retrieving the library as +attribute of the loader instance. .. class:: LibraryLoader(dlltype) @@ -1484,7 +1535,7 @@ loader instance. Class which loads shared libraries. *dlltype* should be one of the :class:`CDLL`, :class:`PyDLL`, :class:`WinDLL`, or :class:`OleDLL` types. - :meth:`__getattr__` has special behavior: It allows loading a shared library by + :meth:`!__getattr__` has special behavior: It allows loading a shared library by accessing it as attribute of a library loader instance. The result is cached, so repeated attribute accesses return the same library each time. @@ -1529,7 +1580,7 @@ object is available: An instance of :class:`PyDLL` that exposes Python C API functions as attributes. Note that all these functions are assumed to return C :c:expr:`int`, which is of course not always the truth, so you have to assign - the correct :attr:`restype` attribute to use these functions. + the correct :attr:`!restype` attribute to use these functions. .. audit-event:: ctypes.dlopen name ctypes.LibraryLoader @@ -1581,7 +1632,7 @@ They are instances of a private class: the callable will be called with this integer, allowing further processing or error checking. Using this is deprecated, for more flexible post processing or error checking use a ctypes data type as - :attr:`restype` and assign a callable to the :attr:`errcheck` attribute. + :attr:`!restype` and assign a callable to the :attr:`errcheck` attribute. .. attribute:: argtypes @@ -1592,14 +1643,14 @@ They are instances of a private class: unspecified arguments as well. When a foreign function is called, each actual argument is passed to the - :meth:`from_param` class method of the items in the :attr:`argtypes` + :meth:`~_CData.from_param` class method of the items in the :attr:`argtypes` tuple, this method allows adapting the actual argument to an object that the foreign function accepts. For example, a :class:`c_char_p` item in the :attr:`argtypes` tuple will convert a string passed as argument into a bytes object using ctypes conversion rules. New: It is now possible to put items in argtypes which are not ctypes - types, but each item must have a :meth:`from_param` method which returns a + types, but each item must have a :meth:`~_CData.from_param` method which returns a value usable as argument (integer, string, ctypes instance). This allows defining adapters that can adapt custom objects as function parameters. @@ -1613,7 +1664,7 @@ They are instances of a private class: :module: *result* is what the foreign function returns, as specified by the - :attr:`restype` attribute. + :attr:`!restype` attribute. *func* is the foreign function object itself, this allows reusing the same callable object to check or post process the results of several @@ -1634,12 +1685,12 @@ They are instances of a private class: passed arguments. -.. audit-event:: ctypes.seh_exception code foreign-functions +.. audit-event:: ctypes.set_exception code foreign-functions On Windows, when a foreign function call raises a system exception (for example, due to an access violation), it will be captured and replaced with a suitable Python exception. Further, an auditing event - ``ctypes.seh_exception`` with argument ``code`` will be raised, allowing an + ``ctypes.set_exception`` with argument ``code`` will be raised, allowing an audit hook to replace the exception with its own. .. audit-event:: ctypes.call_function func_pointer,arguments foreign-functions @@ -1687,70 +1738,70 @@ See :ref:`ctypes-callback-functions` for examples. Function prototypes created by these factory functions can be instantiated in different ways, depending on the type and number of the parameters in the call: +.. function:: prototype(address) + :noindex: + :module: - .. function:: prototype(address) - :noindex: - :module: + Returns a foreign function at the specified address which must be an integer. - Returns a foreign function at the specified address which must be an integer. +.. function:: prototype(callable) + :noindex: + :module: - .. function:: prototype(callable) - :noindex: - :module: + Create a C callable function (a callback function) from a Python *callable*. - Create a C callable function (a callback function) from a Python *callable*. +.. function:: prototype(func_spec[, paramflags]) + :noindex: + :module: - .. function:: prototype(func_spec[, paramflags]) - :noindex: - :module: + Returns a foreign function exported by a shared library. *func_spec* must + be a 2-tuple ``(name_or_ordinal, library)``. The first item is the name of + the exported function as string, or the ordinal of the exported function + as small integer. The second item is the shared library instance. - Returns a foreign function exported by a shared library. *func_spec* must - be a 2-tuple ``(name_or_ordinal, library)``. The first item is the name of - the exported function as string, or the ordinal of the exported function - as small integer. The second item is the shared library instance. +.. function:: prototype(vtbl_index, name[, paramflags[, iid]]) + :noindex: + :module: - .. function:: prototype(vtbl_index, name[, paramflags[, iid]]) - :noindex: - :module: + Returns a foreign function that will call a COM method. *vtbl_index* is + the index into the virtual function table, a small non-negative + integer. *name* is name of the COM method. *iid* is an optional pointer to + the interface identifier which is used in extended error reporting. - Returns a foreign function that will call a COM method. *vtbl_index* is - the index into the virtual function table, a small non-negative - integer. *name* is name of the COM method. *iid* is an optional pointer to - the interface identifier which is used in extended error reporting. + COM methods use a special calling convention: They require a pointer to + the COM interface as first argument, in addition to those parameters that + are specified in the :attr:`!argtypes` tuple. - COM methods use a special calling convention: They require a pointer to - the COM interface as first argument, in addition to those parameters that - are specified in the :attr:`argtypes` tuple. +The optional *paramflags* parameter creates foreign function wrappers with much +more functionality than the features described above. - The optional *paramflags* parameter creates foreign function wrappers with much - more functionality than the features described above. +*paramflags* must be a tuple of the same length as :attr:`~_FuncPtr.argtypes`. - *paramflags* must be a tuple of the same length as :attr:`argtypes`. +Each item in this tuple contains further information about a parameter, it must +be a tuple containing one, two, or three items. - Each item in this tuple contains further information about a parameter, it must - be a tuple containing one, two, or three items. +The first item is an integer containing a combination of direction +flags for the parameter: - The first item is an integer containing a combination of direction - flags for the parameter: + 1 + Specifies an input parameter to the function. - 1 - Specifies an input parameter to the function. + 2 + Output parameter. The foreign function fills in a value. - 2 - Output parameter. The foreign function fills in a value. + 4 + Input parameter which defaults to the integer zero. - 4 - Input parameter which defaults to the integer zero. +The optional second item is the parameter name as string. If this is specified, +the foreign function can be called with named parameters. - The optional second item is the parameter name as string. If this is specified, - the foreign function can be called with named parameters. +The optional third item is the default value for this parameter. - The optional third item is the default value for this parameter. -This example demonstrates how to wrap the Windows ``MessageBoxW`` function so +The following example demonstrates how to wrap the Windows ``MessageBoxW`` function so that it supports default parameters and named arguments. The C declaration from the windows header file is this:: @@ -1798,7 +1849,7 @@ value if there is a single one, or a tuple containing the output parameter values when there are more than one, so the GetWindowRect function now returns a RECT instance, when called. -Output parameters can be combined with the :attr:`errcheck` protocol to do +Output parameters can be combined with the :attr:`~_FuncPtr.errcheck` protocol to do further output processing and error checking. The win32 ``GetWindowRect`` api function returns a ``BOOL`` to signal success or failure, so this function could do the error checking, and raises an exception when the api call failed:: @@ -1811,7 +1862,7 @@ do the error checking, and raises an exception when the api call failed:: >>> GetWindowRect.errcheck = errcheck >>> -If the :attr:`errcheck` function returns the argument tuple it receives +If the :attr:`~_FuncPtr.errcheck` function returns the argument tuple it receives unchanged, :mod:`ctypes` continues the normal processing it does on the output parameters. If you want to return a tuple of window coordinates instead of a ``RECT`` instance, you can retrieve the fields in the function and return them @@ -1961,7 +2012,7 @@ Utility functions .. function:: get_last_error() Windows only: returns the current value of the ctypes-private copy of the system - :data:`LastError` variable in the calling thread. + :data:`!LastError` variable in the calling thread. .. audit-event:: ctypes.get_last_error "" ctypes.get_last_error @@ -1979,17 +2030,17 @@ Utility functions specifying an address, or a ctypes instance. -.. function:: POINTER(type) +.. function:: POINTER(type, /) - This factory function creates and returns a new ctypes pointer type. Pointer - types are cached and reused internally, so calling this function repeatedly is - cheap. *type* must be a ctypes type. + Create and return a new ctypes pointer type. Pointer types are cached and + reused internally, so calling this function repeatedly is cheap. + *type* must be a ctypes type. -.. function:: pointer(obj) +.. function:: pointer(obj, /) - This function creates a new pointer instance, pointing to *obj*. The returned - object is of the type ``POINTER(type(obj))``. + Create a new pointer instance, pointing to *obj*. + The returned object is of the type ``POINTER(type(obj))``. Note: If you just want to pass a pointer to an object to a foreign function call, you should use ``byref(obj)`` which is much faster. @@ -2014,7 +2065,7 @@ Utility functions .. function:: set_last_error(value) Windows only: set the current value of the ctypes-private copy of the system - :data:`LastError` variable in the calling thread to *value* and return the + :data:`!LastError` variable in the calling thread to *value* and return the previous value. .. audit-event:: ctypes.set_last_error error ctypes.set_last_error @@ -2038,13 +2089,14 @@ Utility functions .. function:: WinError(code=None, descr=None) Windows only: this function is probably the worst-named thing in ctypes. It - creates an instance of OSError. If *code* is not specified, + creates an instance of :exc:`OSError`. If *code* is not specified, ``GetLastError`` is called to determine the error code. If *descr* is not specified, :func:`FormatError` is called to get a textual description of the error. .. versionchanged:: 3.3 - An instance of :exc:`WindowsError` used to be created. + An instance of :exc:`WindowsError` used to be created, which is now an + alias of :exc:`OSError`. .. function:: wstring_at(address, size=-1) @@ -2110,8 +2162,8 @@ Data types This method adapts *obj* to a ctypes type. It is called with the actual object used in a foreign function call when the type is present in the - foreign function's :attr:`argtypes` tuple; it must return an object that - can be used as a function call parameter. + foreign function's :attr:`~_FuncPtr.argtypes` tuple; + it must return an object that can be used as a function call parameter. All ctypes data types have a default implementation of this classmethod that normally returns *obj* if that is an instance of the type. Some @@ -2176,13 +2228,13 @@ Fundamental data types Fundamental data types, when returned as foreign function call results, or, for example, by retrieving structure field members or array items, are transparently converted to native Python types. In other words, if a foreign function has a -:attr:`restype` of :class:`c_char_p`, you will always receive a Python bytes +:attr:`~_FuncPtr.restype` of :class:`c_char_p`, you will always receive a Python bytes object, *not* a :class:`c_char_p` instance. .. XXX above is false, it actually returns a Unicode string Subclasses of fundamental data types do *not* inherit this behavior. So, if a -foreign functions :attr:`restype` is a subclass of :class:`c_void_p`, you will +foreign functions :attr:`!restype` is a subclass of :class:`c_void_p`, you will receive an instance of this subclass from the function call. Of course, you can get the value of the pointer by accessing the ``value`` attribute. @@ -2360,7 +2412,7 @@ These are the fundamental ctypes data types: .. class:: c_wchar - Represents the C :c:expr:`wchar_t` datatype, and interprets the value as a + Represents the C :c:type:`wchar_t` datatype, and interprets the value as a single character unicode string. The constructor accepts an optional string initializer, the length of the string must be exactly one character. @@ -2381,7 +2433,7 @@ These are the fundamental ctypes data types: .. class:: HRESULT - Windows only: Represents a :c:type:`HRESULT` value, which contains success or + Windows only: Represents a :c:type:`!HRESULT` value, which contains success or error information for a function or method call. @@ -2390,9 +2442,9 @@ These are the fundamental ctypes data types: Represents the C :c:expr:`PyObject *` datatype. Calling this without an argument creates a ``NULL`` :c:expr:`PyObject *` pointer. -The :mod:`ctypes.wintypes` module provides quite some other Windows specific -data types, for example :c:type:`HWND`, :c:type:`WPARAM`, or :c:type:`DWORD`. Some -useful structures like :c:type:`MSG` or :c:type:`RECT` are also defined. +The :mod:`!ctypes.wintypes` module provides quite some other Windows specific +data types, for example :c:type:`!HWND`, :c:type:`!WPARAM`, or :c:type:`!DWORD`. +Some useful structures like :c:type:`!MSG` or :c:type:`!RECT` are also defined. .. _ctypes-structured-data-types: @@ -2479,6 +2531,7 @@ fields, or any other data types containing pointer type fields. An optional small integer that allows overriding the alignment of structure fields in the instance. :attr:`_pack_` must already be defined when :attr:`_fields_` is assigned, otherwise it will have no effect. + Setting this attribute to 0 is the same as not setting it at all. .. attribute:: _anonymous_ diff --git a/Doc/library/curses.ascii.rst b/Doc/library/curses.ascii.rst index a69dbb2ac06572e..410b76e77c025ba 100644 --- a/Doc/library/curses.ascii.rst +++ b/Doc/library/curses.ascii.rst @@ -7,87 +7,89 @@ .. moduleauthor:: Eric S. Raymond .. sectionauthor:: Eric S. Raymond +**Source code:** :source:`Lib/curses/ascii.py` + -------------- The :mod:`curses.ascii` module supplies name constants for ASCII characters and functions to test membership in various ASCII character classes. The constants supplied are names for control characters as follows: -+--------------+----------------------------------------------+ -| Name | Meaning | -+==============+==============================================+ -| :const:`NUL` | | -+--------------+----------------------------------------------+ -| :const:`SOH` | Start of heading, console interrupt | -+--------------+----------------------------------------------+ -| :const:`STX` | Start of text | -+--------------+----------------------------------------------+ -| :const:`ETX` | End of text | -+--------------+----------------------------------------------+ -| :const:`EOT` | End of transmission | -+--------------+----------------------------------------------+ -| :const:`ENQ` | Enquiry, goes with :const:`ACK` flow control | -+--------------+----------------------------------------------+ -| :const:`ACK` | Acknowledgement | -+--------------+----------------------------------------------+ -| :const:`BEL` | Bell | -+--------------+----------------------------------------------+ -| :const:`BS` | Backspace | -+--------------+----------------------------------------------+ -| :const:`TAB` | Tab | -+--------------+----------------------------------------------+ -| :const:`HT` | Alias for :const:`TAB`: "Horizontal tab" | -+--------------+----------------------------------------------+ -| :const:`LF` | Line feed | -+--------------+----------------------------------------------+ -| :const:`NL` | Alias for :const:`LF`: "New line" | -+--------------+----------------------------------------------+ -| :const:`VT` | Vertical tab | -+--------------+----------------------------------------------+ -| :const:`FF` | Form feed | -+--------------+----------------------------------------------+ -| :const:`CR` | Carriage return | -+--------------+----------------------------------------------+ -| :const:`SO` | Shift-out, begin alternate character set | -+--------------+----------------------------------------------+ -| :const:`SI` | Shift-in, resume default character set | -+--------------+----------------------------------------------+ -| :const:`DLE` | Data-link escape | -+--------------+----------------------------------------------+ -| :const:`DC1` | XON, for flow control | -+--------------+----------------------------------------------+ -| :const:`DC2` | Device control 2, block-mode flow control | -+--------------+----------------------------------------------+ -| :const:`DC3` | XOFF, for flow control | -+--------------+----------------------------------------------+ -| :const:`DC4` | Device control 4 | -+--------------+----------------------------------------------+ -| :const:`NAK` | Negative acknowledgement | -+--------------+----------------------------------------------+ -| :const:`SYN` | Synchronous idle | -+--------------+----------------------------------------------+ -| :const:`ETB` | End transmission block | -+--------------+----------------------------------------------+ -| :const:`CAN` | Cancel | -+--------------+----------------------------------------------+ -| :const:`EM` | End of medium | -+--------------+----------------------------------------------+ -| :const:`SUB` | Substitute | -+--------------+----------------------------------------------+ -| :const:`ESC` | Escape | -+--------------+----------------------------------------------+ -| :const:`FS` | File separator | -+--------------+----------------------------------------------+ -| :const:`GS` | Group separator | -+--------------+----------------------------------------------+ -| :const:`RS` | Record separator, block-mode terminator | -+--------------+----------------------------------------------+ -| :const:`US` | Unit separator | -+--------------+----------------------------------------------+ -| :const:`SP` | Space | -+--------------+----------------------------------------------+ -| :const:`DEL` | Delete | -+--------------+----------------------------------------------+ ++---------------+----------------------------------------------+ +| Name | Meaning | ++===============+==============================================+ +| .. data:: NUL | | ++---------------+----------------------------------------------+ +| .. data:: SOH | Start of heading, console interrupt | ++---------------+----------------------------------------------+ +| .. data:: STX | Start of text | ++---------------+----------------------------------------------+ +| .. data:: ETX | End of text | ++---------------+----------------------------------------------+ +| .. data:: EOT | End of transmission | ++---------------+----------------------------------------------+ +| .. data:: ENQ | Enquiry, goes with :const:`ACK` flow control | ++---------------+----------------------------------------------+ +| .. data:: ACK | Acknowledgement | ++---------------+----------------------------------------------+ +| .. data:: BEL | Bell | ++---------------+----------------------------------------------+ +| .. data:: BS | Backspace | ++---------------+----------------------------------------------+ +| .. data:: TAB | Tab | ++---------------+----------------------------------------------+ +| .. data:: HT | Alias for :const:`TAB`: "Horizontal tab" | ++---------------+----------------------------------------------+ +| .. data:: LF | Line feed | ++---------------+----------------------------------------------+ +| .. data:: NL | Alias for :const:`LF`: "New line" | ++---------------+----------------------------------------------+ +| .. data:: VT | Vertical tab | ++---------------+----------------------------------------------+ +| .. data:: FF | Form feed | ++---------------+----------------------------------------------+ +| .. data:: CR | Carriage return | ++---------------+----------------------------------------------+ +| .. data:: SO | Shift-out, begin alternate character set | ++---------------+----------------------------------------------+ +| .. data:: SI | Shift-in, resume default character set | ++---------------+----------------------------------------------+ +| .. data:: DLE | Data-link escape | ++---------------+----------------------------------------------+ +| .. data:: DC1 | XON, for flow control | ++---------------+----------------------------------------------+ +| .. data:: DC2 | Device control 2, block-mode flow control | ++---------------+----------------------------------------------+ +| .. data:: DC3 | XOFF, for flow control | ++---------------+----------------------------------------------+ +| .. data:: DC4 | Device control 4 | ++---------------+----------------------------------------------+ +| .. data:: NAK | Negative acknowledgement | ++---------------+----------------------------------------------+ +| .. data:: SYN | Synchronous idle | ++---------------+----------------------------------------------+ +| .. data:: ETB | End transmission block | ++---------------+----------------------------------------------+ +| .. data:: CAN | Cancel | ++---------------+----------------------------------------------+ +| .. data:: EM | End of medium | ++---------------+----------------------------------------------+ +| .. data:: SUB | Substitute | ++---------------+----------------------------------------------+ +| .. data:: ESC | Escape | ++---------------+----------------------------------------------+ +| .. data:: FS | File separator | ++---------------+----------------------------------------------+ +| .. data:: GS | Group separator | ++---------------+----------------------------------------------+ +| .. data:: RS | Record separator, block-mode terminator | ++---------------+----------------------------------------------+ +| .. data:: US | Unit separator | ++---------------+----------------------------------------------+ +| .. data:: SP | Space | ++---------------+----------------------------------------------+ +| .. data:: DEL | Delete | ++---------------+----------------------------------------------+ Note that many of these have little practical significance in modern usage. The mnemonics derive from teleprinter conventions that predate digital computers. diff --git a/Doc/library/curses.rst b/Doc/library/curses.rst index bf4e69a0170a62c..9b8a98f05f7cbb5 100644 --- a/Doc/library/curses.rst +++ b/Doc/library/curses.rst @@ -9,6 +9,8 @@ .. sectionauthor:: Moshe Zadka .. sectionauthor:: Eric Raymond +**Source code:** :source:`Lib/curses` + -------------- The :mod:`curses` module provides an interface to the curses library, the @@ -105,7 +107,7 @@ The module :mod:`curses` defines the following functions: Return the attribute value for displaying text in the specified color pair. Only the first 256 color pairs are supported. This attribute value can be combined with :const:`A_STANDOUT`, :const:`A_REVERSE`, - and the other :const:`A_\*` attributes. :func:`pair_number` is the counterpart + and the other :const:`!A_\*` attributes. :func:`pair_number` is the counterpart to this function. @@ -221,7 +223,7 @@ The module :mod:`curses` defines the following functions: .. function:: getwin(file) - Read window related data stored in the file by an earlier :func:`putwin` call. + Read window related data stored in the file by an earlier :func:`window.putwin` call. The routine then creates and initializes a new window using that data, returning the new window object. @@ -639,7 +641,8 @@ The module :mod:`curses` defines the following functions: .. function:: update_lines_cols() - Update :envvar:`LINES` and :envvar:`COLS`. Useful for detecting manual screen resize. + Update the :const:`LINES` and :const:`COLS` module variables. + Useful for detecting manual screen resize. .. versionadded:: 3.5 @@ -1295,11 +1298,11 @@ the following methods and attributes: :meth:`refresh`. -.. method:: window.vline(ch, n) - window.vline(y, x, ch, n) +.. method:: window.vline(ch, n[, attr]) + window.vline(y, x, ch, n[, attr]) Display a vertical line starting at ``(y, x)`` with length *n* consisting of the - character *ch*. + character *ch* with attributes *attr*. Constants @@ -1321,9 +1324,9 @@ The :mod:`curses` module defines the following data members: .. data:: version +.. data:: __version__ - A bytes object representing the current version of the module. Also available as - :const:`__version__`. + A bytes object representing the current version of the module. .. data:: ncurses_version @@ -1337,51 +1340,72 @@ The :mod:`curses` module defines the following data members: .. versionadded:: 3.8 +.. data:: COLORS + + The maximum number of colors the terminal can support. + It is defined only after the call to :func:`start_color`. + +.. data:: COLOR_PAIRS + + The maximum number of color pairs the terminal can support. + It is defined only after the call to :func:`start_color`. + +.. data:: COLS + + The width of the screen, i.e., the number of columns. + It is defined only after the call to :func:`initscr`. + Updated by :func:`update_lines_cols`, :func:`resizeterm` and + :func:`resize_term`. + +.. data:: LINES + + The height of the screen, i.e., the number of lines. + It is defined only after the call to :func:`initscr`. + Updated by :func:`update_lines_cols`, :func:`resizeterm` and + :func:`resize_term`. + Some constants are available to specify character cell attributes. The exact constants available are system dependent. -+------------------+-------------------------------+ -| Attribute | Meaning | -+==================+===============================+ -| ``A_ALTCHARSET`` | Alternate character set mode | -+------------------+-------------------------------+ -| ``A_BLINK`` | Blink mode | -+------------------+-------------------------------+ -| ``A_BOLD`` | Bold mode | -+------------------+-------------------------------+ -| ``A_DIM`` | Dim mode | -+------------------+-------------------------------+ -| ``A_INVIS`` | Invisible or blank mode | -+------------------+-------------------------------+ -| ``A_ITALIC`` | Italic mode | -+------------------+-------------------------------+ -| ``A_NORMAL`` | Normal attribute | -+------------------+-------------------------------+ -| ``A_PROTECT`` | Protected mode | -+------------------+-------------------------------+ -| ``A_REVERSE`` | Reverse background and | -| | foreground colors | -+------------------+-------------------------------+ -| ``A_STANDOUT`` | Standout mode | -+------------------+-------------------------------+ -| ``A_UNDERLINE`` | Underline mode | -+------------------+-------------------------------+ -| ``A_HORIZONTAL`` | Horizontal highlight | -+------------------+-------------------------------+ -| ``A_LEFT`` | Left highlight | -+------------------+-------------------------------+ -| ``A_LOW`` | Low highlight | -+------------------+-------------------------------+ -| ``A_RIGHT`` | Right highlight | -+------------------+-------------------------------+ -| ``A_TOP`` | Top highlight | -+------------------+-------------------------------+ -| ``A_VERTICAL`` | Vertical highlight | -+------------------+-------------------------------+ -| ``A_CHARTEXT`` | Bit-mask to extract a | -| | character | -+------------------+-------------------------------+ ++------------------------+-------------------------------+ +| Attribute | Meaning | ++========================+===============================+ +| .. data:: A_ALTCHARSET | Alternate character set mode | ++------------------------+-------------------------------+ +| .. data:: A_BLINK | Blink mode | ++------------------------+-------------------------------+ +| .. data:: A_BOLD | Bold mode | ++------------------------+-------------------------------+ +| .. data:: A_DIM | Dim mode | ++------------------------+-------------------------------+ +| .. data:: A_INVIS | Invisible or blank mode | ++------------------------+-------------------------------+ +| .. data:: A_ITALIC | Italic mode | ++------------------------+-------------------------------+ +| .. data:: A_NORMAL | Normal attribute | ++------------------------+-------------------------------+ +| .. data:: A_PROTECT | Protected mode | ++------------------------+-------------------------------+ +| .. data:: A_REVERSE | Reverse background and | +| | foreground colors | ++------------------------+-------------------------------+ +| .. data:: A_STANDOUT | Standout mode | ++------------------------+-------------------------------+ +| .. data:: A_UNDERLINE | Underline mode | ++------------------------+-------------------------------+ +| .. data:: A_HORIZONTAL | Horizontal highlight | ++------------------------+-------------------------------+ +| .. data:: A_LEFT | Left highlight | ++------------------------+-------------------------------+ +| .. data:: A_LOW | Low highlight | ++------------------------+-------------------------------+ +| .. data:: A_RIGHT | Right highlight | ++------------------------+-------------------------------+ +| .. data:: A_TOP | Top highlight | ++------------------------+-------------------------------+ +| .. data:: A_VERTICAL | Vertical highlight | ++------------------------+-------------------------------+ .. versionadded:: 3.7 ``A_ITALIC`` was added. @@ -1389,220 +1413,220 @@ The exact constants available are system dependent. Several constants are available to extract corresponding attributes returned by some methods. -+------------------+-------------------------------+ -| Bit-mask | Meaning | -+==================+===============================+ -| ``A_ATTRIBUTES`` | Bit-mask to extract | -| | attributes | -+------------------+-------------------------------+ -| ``A_CHARTEXT`` | Bit-mask to extract a | -| | character | -+------------------+-------------------------------+ -| ``A_COLOR`` | Bit-mask to extract | -| | color-pair field information | -+------------------+-------------------------------+ ++-------------------------+-------------------------------+ +| Bit-mask | Meaning | ++=========================+===============================+ +| .. data:: A_ATTRIBUTES | Bit-mask to extract | +| | attributes | ++-------------------------+-------------------------------+ +| .. data:: A_CHARTEXT | Bit-mask to extract a | +| | character | ++-------------------------+-------------------------------+ +| .. data:: A_COLOR | Bit-mask to extract | +| | color-pair field information | ++-------------------------+-------------------------------+ Keys are referred to by integer constants with names starting with ``KEY_``. The exact keycaps available are system dependent. .. XXX this table is far too large! should it be alphabetized? -+-------------------+--------------------------------------------+ -| Key constant | Key | -+===================+============================================+ -| ``KEY_MIN`` | Minimum key value | -+-------------------+--------------------------------------------+ -| ``KEY_BREAK`` | Break key (unreliable) | -+-------------------+--------------------------------------------+ -| ``KEY_DOWN`` | Down-arrow | -+-------------------+--------------------------------------------+ -| ``KEY_UP`` | Up-arrow | -+-------------------+--------------------------------------------+ -| ``KEY_LEFT`` | Left-arrow | -+-------------------+--------------------------------------------+ -| ``KEY_RIGHT`` | Right-arrow | -+-------------------+--------------------------------------------+ -| ``KEY_HOME`` | Home key (upward+left arrow) | -+-------------------+--------------------------------------------+ -| ``KEY_BACKSPACE`` | Backspace (unreliable) | -+-------------------+--------------------------------------------+ -| ``KEY_F0`` | Function keys. Up to 64 function keys are | -| | supported. | -+-------------------+--------------------------------------------+ -| ``KEY_Fn`` | Value of function key *n* | -+-------------------+--------------------------------------------+ -| ``KEY_DL`` | Delete line | -+-------------------+--------------------------------------------+ -| ``KEY_IL`` | Insert line | -+-------------------+--------------------------------------------+ -| ``KEY_DC`` | Delete character | -+-------------------+--------------------------------------------+ -| ``KEY_IC`` | Insert char or enter insert mode | -+-------------------+--------------------------------------------+ -| ``KEY_EIC`` | Exit insert char mode | -+-------------------+--------------------------------------------+ -| ``KEY_CLEAR`` | Clear screen | -+-------------------+--------------------------------------------+ -| ``KEY_EOS`` | Clear to end of screen | -+-------------------+--------------------------------------------+ -| ``KEY_EOL`` | Clear to end of line | -+-------------------+--------------------------------------------+ -| ``KEY_SF`` | Scroll 1 line forward | -+-------------------+--------------------------------------------+ -| ``KEY_SR`` | Scroll 1 line backward (reverse) | -+-------------------+--------------------------------------------+ -| ``KEY_NPAGE`` | Next page | -+-------------------+--------------------------------------------+ -| ``KEY_PPAGE`` | Previous page | -+-------------------+--------------------------------------------+ -| ``KEY_STAB`` | Set tab | -+-------------------+--------------------------------------------+ -| ``KEY_CTAB`` | Clear tab | -+-------------------+--------------------------------------------+ -| ``KEY_CATAB`` | Clear all tabs | -+-------------------+--------------------------------------------+ -| ``KEY_ENTER`` | Enter or send (unreliable) | -+-------------------+--------------------------------------------+ -| ``KEY_SRESET`` | Soft (partial) reset (unreliable) | -+-------------------+--------------------------------------------+ -| ``KEY_RESET`` | Reset or hard reset (unreliable) | -+-------------------+--------------------------------------------+ -| ``KEY_PRINT`` | Print | -+-------------------+--------------------------------------------+ -| ``KEY_LL`` | Home down or bottom (lower left) | -+-------------------+--------------------------------------------+ -| ``KEY_A1`` | Upper left of keypad | -+-------------------+--------------------------------------------+ -| ``KEY_A3`` | Upper right of keypad | -+-------------------+--------------------------------------------+ -| ``KEY_B2`` | Center of keypad | -+-------------------+--------------------------------------------+ -| ``KEY_C1`` | Lower left of keypad | -+-------------------+--------------------------------------------+ -| ``KEY_C3`` | Lower right of keypad | -+-------------------+--------------------------------------------+ -| ``KEY_BTAB`` | Back tab | -+-------------------+--------------------------------------------+ -| ``KEY_BEG`` | Beg (beginning) | -+-------------------+--------------------------------------------+ -| ``KEY_CANCEL`` | Cancel | -+-------------------+--------------------------------------------+ -| ``KEY_CLOSE`` | Close | -+-------------------+--------------------------------------------+ -| ``KEY_COMMAND`` | Cmd (command) | -+-------------------+--------------------------------------------+ -| ``KEY_COPY`` | Copy | -+-------------------+--------------------------------------------+ -| ``KEY_CREATE`` | Create | -+-------------------+--------------------------------------------+ -| ``KEY_END`` | End | -+-------------------+--------------------------------------------+ -| ``KEY_EXIT`` | Exit | -+-------------------+--------------------------------------------+ -| ``KEY_FIND`` | Find | -+-------------------+--------------------------------------------+ -| ``KEY_HELP`` | Help | -+-------------------+--------------------------------------------+ -| ``KEY_MARK`` | Mark | -+-------------------+--------------------------------------------+ -| ``KEY_MESSAGE`` | Message | -+-------------------+--------------------------------------------+ -| ``KEY_MOVE`` | Move | -+-------------------+--------------------------------------------+ -| ``KEY_NEXT`` | Next | -+-------------------+--------------------------------------------+ -| ``KEY_OPEN`` | Open | -+-------------------+--------------------------------------------+ -| ``KEY_OPTIONS`` | Options | -+-------------------+--------------------------------------------+ -| ``KEY_PREVIOUS`` | Prev (previous) | -+-------------------+--------------------------------------------+ -| ``KEY_REDO`` | Redo | -+-------------------+--------------------------------------------+ -| ``KEY_REFERENCE`` | Ref (reference) | -+-------------------+--------------------------------------------+ -| ``KEY_REFRESH`` | Refresh | -+-------------------+--------------------------------------------+ -| ``KEY_REPLACE`` | Replace | -+-------------------+--------------------------------------------+ -| ``KEY_RESTART`` | Restart | -+-------------------+--------------------------------------------+ -| ``KEY_RESUME`` | Resume | -+-------------------+--------------------------------------------+ -| ``KEY_SAVE`` | Save | -+-------------------+--------------------------------------------+ -| ``KEY_SBEG`` | Shifted Beg (beginning) | -+-------------------+--------------------------------------------+ -| ``KEY_SCANCEL`` | Shifted Cancel | -+-------------------+--------------------------------------------+ -| ``KEY_SCOMMAND`` | Shifted Command | -+-------------------+--------------------------------------------+ -| ``KEY_SCOPY`` | Shifted Copy | -+-------------------+--------------------------------------------+ -| ``KEY_SCREATE`` | Shifted Create | -+-------------------+--------------------------------------------+ -| ``KEY_SDC`` | Shifted Delete char | -+-------------------+--------------------------------------------+ -| ``KEY_SDL`` | Shifted Delete line | -+-------------------+--------------------------------------------+ -| ``KEY_SELECT`` | Select | -+-------------------+--------------------------------------------+ -| ``KEY_SEND`` | Shifted End | -+-------------------+--------------------------------------------+ -| ``KEY_SEOL`` | Shifted Clear line | -+-------------------+--------------------------------------------+ -| ``KEY_SEXIT`` | Shifted Exit | -+-------------------+--------------------------------------------+ -| ``KEY_SFIND`` | Shifted Find | -+-------------------+--------------------------------------------+ -| ``KEY_SHELP`` | Shifted Help | -+-------------------+--------------------------------------------+ -| ``KEY_SHOME`` | Shifted Home | -+-------------------+--------------------------------------------+ -| ``KEY_SIC`` | Shifted Input | -+-------------------+--------------------------------------------+ -| ``KEY_SLEFT`` | Shifted Left arrow | -+-------------------+--------------------------------------------+ -| ``KEY_SMESSAGE`` | Shifted Message | -+-------------------+--------------------------------------------+ -| ``KEY_SMOVE`` | Shifted Move | -+-------------------+--------------------------------------------+ -| ``KEY_SNEXT`` | Shifted Next | -+-------------------+--------------------------------------------+ -| ``KEY_SOPTIONS`` | Shifted Options | -+-------------------+--------------------------------------------+ -| ``KEY_SPREVIOUS`` | Shifted Prev | -+-------------------+--------------------------------------------+ -| ``KEY_SPRINT`` | Shifted Print | -+-------------------+--------------------------------------------+ -| ``KEY_SREDO`` | Shifted Redo | -+-------------------+--------------------------------------------+ -| ``KEY_SREPLACE`` | Shifted Replace | -+-------------------+--------------------------------------------+ -| ``KEY_SRIGHT`` | Shifted Right arrow | -+-------------------+--------------------------------------------+ -| ``KEY_SRSUME`` | Shifted Resume | -+-------------------+--------------------------------------------+ -| ``KEY_SSAVE`` | Shifted Save | -+-------------------+--------------------------------------------+ -| ``KEY_SSUSPEND`` | Shifted Suspend | -+-------------------+--------------------------------------------+ -| ``KEY_SUNDO`` | Shifted Undo | -+-------------------+--------------------------------------------+ -| ``KEY_SUSPEND`` | Suspend | -+-------------------+--------------------------------------------+ -| ``KEY_UNDO`` | Undo | -+-------------------+--------------------------------------------+ -| ``KEY_MOUSE`` | Mouse event has occurred | -+-------------------+--------------------------------------------+ -| ``KEY_RESIZE`` | Terminal resize event | -+-------------------+--------------------------------------------+ -| ``KEY_MAX`` | Maximum key value | -+-------------------+--------------------------------------------+ ++-------------------------+--------------------------------------------+ +| Key constant | Key | ++=========================+============================================+ +| .. data:: KEY_MIN | Minimum key value | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_BREAK | Break key (unreliable) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_DOWN | Down-arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_UP | Up-arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_LEFT | Left-arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_RIGHT | Right-arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_HOME | Home key (upward+left arrow) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_BACKSPACE | Backspace (unreliable) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_F0 | Function keys. Up to 64 function keys are | +| | supported. | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_Fn | Value of function key *n* | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_DL | Delete line | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_IL | Insert line | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_DC | Delete character | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_IC | Insert char or enter insert mode | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_EIC | Exit insert char mode | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CLEAR | Clear screen | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_EOS | Clear to end of screen | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_EOL | Clear to end of line | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SF | Scroll 1 line forward | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SR | Scroll 1 line backward (reverse) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_NPAGE | Next page | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_PPAGE | Previous page | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_STAB | Set tab | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CTAB | Clear tab | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CATAB | Clear all tabs | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_ENTER | Enter or send (unreliable) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SRESET | Soft (partial) reset (unreliable) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_RESET | Reset or hard reset (unreliable) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_PRINT | Print | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_LL | Home down or bottom (lower left) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_A1 | Upper left of keypad | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_A3 | Upper right of keypad | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_B2 | Center of keypad | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_C1 | Lower left of keypad | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_C3 | Lower right of keypad | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_BTAB | Back tab | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_BEG | Beg (beginning) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CANCEL | Cancel | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CLOSE | Close | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_COMMAND | Cmd (command) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_COPY | Copy | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_CREATE | Create | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_END | End | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_EXIT | Exit | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_FIND | Find | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_HELP | Help | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_MARK | Mark | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_MESSAGE | Message | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_MOVE | Move | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_NEXT | Next | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_OPEN | Open | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_OPTIONS | Options | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_PREVIOUS | Prev (previous) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_REDO | Redo | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_REFERENCE | Ref (reference) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_REFRESH | Refresh | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_REPLACE | Replace | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_RESTART | Restart | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_RESUME | Resume | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SAVE | Save | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SBEG | Shifted Beg (beginning) | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SCANCEL | Shifted Cancel | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SCOMMAND | Shifted Command | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SCOPY | Shifted Copy | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SCREATE | Shifted Create | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SDC | Shifted Delete char | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SDL | Shifted Delete line | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SELECT | Select | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SEND | Shifted End | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SEOL | Shifted Clear line | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SEXIT | Shifted Exit | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SFIND | Shifted Find | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SHELP | Shifted Help | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SHOME | Shifted Home | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SIC | Shifted Input | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SLEFT | Shifted Left arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SMESSAGE | Shifted Message | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SMOVE | Shifted Move | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SNEXT | Shifted Next | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SOPTIONS | Shifted Options | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SPREVIOUS | Shifted Prev | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SPRINT | Shifted Print | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SREDO | Shifted Redo | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SREPLACE | Shifted Replace | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SRIGHT | Shifted Right arrow | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SRSUME | Shifted Resume | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SSAVE | Shifted Save | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SSUSPEND | Shifted Suspend | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SUNDO | Shifted Undo | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_SUSPEND | Suspend | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_UNDO | Undo | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_MOUSE | Mouse event has occurred | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_RESIZE | Terminal resize event | ++-------------------------+--------------------------------------------+ +| .. data:: KEY_MAX | Maximum key value | ++-------------------------+--------------------------------------------+ On VT100s and their software emulations, such as X terminal emulators, there are -normally at least four function keys (:const:`KEY_F1`, :const:`KEY_F2`, -:const:`KEY_F3`, :const:`KEY_F4`) available, and the arrow keys mapped to +normally at least four function keys (:const:`KEY_F1 `, :const:`KEY_F2 `, +:const:`KEY_F3 `, :const:`KEY_F4 `) available, and the arrow keys mapped to :const:`KEY_UP`, :const:`KEY_DOWN`, :const:`KEY_LEFT` and :const:`KEY_RIGHT` in the obvious way. If your machine has a PC keyboard, it is safe to expect arrow keys and twelve function keys (older PC keyboards may have only ten function @@ -1624,6 +1648,8 @@ keys); also, the following keypad mappings are standard: | :kbd:`Page Down` | KEY_NPAGE | +------------------+-----------+ +.. _curses-acs-codes: + The following table lists characters from the alternate character set. These are inherited from the VT100 terminal, and will generally be available on software emulations such as X terminals. When there is no graphic available, curses @@ -1633,117 +1659,143 @@ falls back on a crude printable ASCII approximation. These are available only after :func:`initscr` has been called. -+------------------+------------------------------------------+ -| ACS code | Meaning | -+==================+==========================================+ -| ``ACS_BBSS`` | alternate name for upper right corner | -+------------------+------------------------------------------+ -| ``ACS_BLOCK`` | solid square block | -+------------------+------------------------------------------+ -| ``ACS_BOARD`` | board of squares | -+------------------+------------------------------------------+ -| ``ACS_BSBS`` | alternate name for horizontal line | -+------------------+------------------------------------------+ -| ``ACS_BSSB`` | alternate name for upper left corner | -+------------------+------------------------------------------+ -| ``ACS_BSSS`` | alternate name for top tee | -+------------------+------------------------------------------+ -| ``ACS_BTEE`` | bottom tee | -+------------------+------------------------------------------+ -| ``ACS_BULLET`` | bullet | -+------------------+------------------------------------------+ -| ``ACS_CKBOARD`` | checker board (stipple) | -+------------------+------------------------------------------+ -| ``ACS_DARROW`` | arrow pointing down | -+------------------+------------------------------------------+ -| ``ACS_DEGREE`` | degree symbol | -+------------------+------------------------------------------+ -| ``ACS_DIAMOND`` | diamond | -+------------------+------------------------------------------+ -| ``ACS_GEQUAL`` | greater-than-or-equal-to | -+------------------+------------------------------------------+ -| ``ACS_HLINE`` | horizontal line | -+------------------+------------------------------------------+ -| ``ACS_LANTERN`` | lantern symbol | -+------------------+------------------------------------------+ -| ``ACS_LARROW`` | left arrow | -+------------------+------------------------------------------+ -| ``ACS_LEQUAL`` | less-than-or-equal-to | -+------------------+------------------------------------------+ -| ``ACS_LLCORNER`` | lower left-hand corner | -+------------------+------------------------------------------+ -| ``ACS_LRCORNER`` | lower right-hand corner | -+------------------+------------------------------------------+ -| ``ACS_LTEE`` | left tee | -+------------------+------------------------------------------+ -| ``ACS_NEQUAL`` | not-equal sign | -+------------------+------------------------------------------+ -| ``ACS_PI`` | letter pi | -+------------------+------------------------------------------+ -| ``ACS_PLMINUS`` | plus-or-minus sign | -+------------------+------------------------------------------+ -| ``ACS_PLUS`` | big plus sign | -+------------------+------------------------------------------+ -| ``ACS_RARROW`` | right arrow | -+------------------+------------------------------------------+ -| ``ACS_RTEE`` | right tee | -+------------------+------------------------------------------+ -| ``ACS_S1`` | scan line 1 | -+------------------+------------------------------------------+ -| ``ACS_S3`` | scan line 3 | -+------------------+------------------------------------------+ -| ``ACS_S7`` | scan line 7 | -+------------------+------------------------------------------+ -| ``ACS_S9`` | scan line 9 | -+------------------+------------------------------------------+ -| ``ACS_SBBS`` | alternate name for lower right corner | -+------------------+------------------------------------------+ -| ``ACS_SBSB`` | alternate name for vertical line | -+------------------+------------------------------------------+ -| ``ACS_SBSS`` | alternate name for right tee | -+------------------+------------------------------------------+ -| ``ACS_SSBB`` | alternate name for lower left corner | -+------------------+------------------------------------------+ -| ``ACS_SSBS`` | alternate name for bottom tee | -+------------------+------------------------------------------+ -| ``ACS_SSSB`` | alternate name for left tee | -+------------------+------------------------------------------+ -| ``ACS_SSSS`` | alternate name for crossover or big plus | -+------------------+------------------------------------------+ -| ``ACS_STERLING`` | pound sterling | -+------------------+------------------------------------------+ -| ``ACS_TTEE`` | top tee | -+------------------+------------------------------------------+ -| ``ACS_UARROW`` | up arrow | -+------------------+------------------------------------------+ -| ``ACS_ULCORNER`` | upper left corner | -+------------------+------------------------------------------+ -| ``ACS_URCORNER`` | upper right corner | -+------------------+------------------------------------------+ -| ``ACS_VLINE`` | vertical line | -+------------------+------------------------------------------+ ++------------------------+------------------------------------------+ +| ACS code | Meaning | ++========================+==========================================+ +| .. data:: ACS_BBSS | alternate name for upper right corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_BLOCK | solid square block | ++------------------------+------------------------------------------+ +| .. data:: ACS_BOARD | board of squares | ++------------------------+------------------------------------------+ +| .. data:: ACS_BSBS | alternate name for horizontal line | ++------------------------+------------------------------------------+ +| .. data:: ACS_BSSB | alternate name for upper left corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_BSSS | alternate name for top tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_BTEE | bottom tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_BULLET | bullet | ++------------------------+------------------------------------------+ +| .. data:: ACS_CKBOARD | checker board (stipple) | ++------------------------+------------------------------------------+ +| .. data:: ACS_DARROW | arrow pointing down | ++------------------------+------------------------------------------+ +| .. data:: ACS_DEGREE | degree symbol | ++------------------------+------------------------------------------+ +| .. data:: ACS_DIAMOND | diamond | ++------------------------+------------------------------------------+ +| .. data:: ACS_GEQUAL | greater-than-or-equal-to | ++------------------------+------------------------------------------+ +| .. data:: ACS_HLINE | horizontal line | ++------------------------+------------------------------------------+ +| .. data:: ACS_LANTERN | lantern symbol | ++------------------------+------------------------------------------+ +| .. data:: ACS_LARROW | left arrow | ++------------------------+------------------------------------------+ +| .. data:: ACS_LEQUAL | less-than-or-equal-to | ++------------------------+------------------------------------------+ +| .. data:: ACS_LLCORNER | lower left-hand corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_LRCORNER | lower right-hand corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_LTEE | left tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_NEQUAL | not-equal sign | ++------------------------+------------------------------------------+ +| .. data:: ACS_PI | letter pi | ++------------------------+------------------------------------------+ +| .. data:: ACS_PLMINUS | plus-or-minus sign | ++------------------------+------------------------------------------+ +| .. data:: ACS_PLUS | big plus sign | ++------------------------+------------------------------------------+ +| .. data:: ACS_RARROW | right arrow | ++------------------------+------------------------------------------+ +| .. data:: ACS_RTEE | right tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_S1 | scan line 1 | ++------------------------+------------------------------------------+ +| .. data:: ACS_S3 | scan line 3 | ++------------------------+------------------------------------------+ +| .. data:: ACS_S7 | scan line 7 | ++------------------------+------------------------------------------+ +| .. data:: ACS_S9 | scan line 9 | ++------------------------+------------------------------------------+ +| .. data:: ACS_SBBS | alternate name for lower right corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_SBSB | alternate name for vertical line | ++------------------------+------------------------------------------+ +| .. data:: ACS_SBSS | alternate name for right tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_SSBB | alternate name for lower left corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_SSBS | alternate name for bottom tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_SSSB | alternate name for left tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_SSSS | alternate name for crossover or big plus | ++------------------------+------------------------------------------+ +| .. data:: ACS_STERLING | pound sterling | ++------------------------+------------------------------------------+ +| .. data:: ACS_TTEE | top tee | ++------------------------+------------------------------------------+ +| .. data:: ACS_UARROW | up arrow | ++------------------------+------------------------------------------+ +| .. data:: ACS_ULCORNER | upper left corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_URCORNER | upper right corner | ++------------------------+------------------------------------------+ +| .. data:: ACS_VLINE | vertical line | ++------------------------+------------------------------------------+ + +The following table lists mouse button constants used by :meth:`getmouse`: + ++----------------------------------+---------------------------------------------+ +| Mouse button constant | Meaning | ++==================================+=============================================+ +| .. data:: BUTTONn_PRESSED | Mouse button *n* pressed | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTONn_RELEASED | Mouse button *n* released | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTONn_CLICKED | Mouse button *n* clicked | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTONn_DOUBLE_CLICKED | Mouse button *n* double clicked | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTONn_TRIPLE_CLICKED | Mouse button *n* triple clicked | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTON_SHIFT | Shift was down during button state change | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTON_CTRL | Control was down during button state change | ++----------------------------------+---------------------------------------------+ +| .. data:: BUTTON_ALT | Control was down during button state change | ++----------------------------------+---------------------------------------------+ + +.. versionchanged:: 3.10 + The ``BUTTON5_*`` constants are now exposed if they are provided by the + underlying curses library. The following table lists the predefined colors: -+-------------------+----------------------------+ -| Constant | Color | -+===================+============================+ -| ``COLOR_BLACK`` | Black | -+-------------------+----------------------------+ -| ``COLOR_BLUE`` | Blue | -+-------------------+----------------------------+ -| ``COLOR_CYAN`` | Cyan (light greenish blue) | -+-------------------+----------------------------+ -| ``COLOR_GREEN`` | Green | -+-------------------+----------------------------+ -| ``COLOR_MAGENTA`` | Magenta (purplish red) | -+-------------------+----------------------------+ -| ``COLOR_RED`` | Red | -+-------------------+----------------------------+ -| ``COLOR_WHITE`` | White | -+-------------------+----------------------------+ -| ``COLOR_YELLOW`` | Yellow | -+-------------------+----------------------------+ ++-------------------------+----------------------------+ +| Constant | Color | ++=========================+============================+ +| .. data:: COLOR_BLACK | Black | ++-------------------------+----------------------------+ +| .. data:: COLOR_BLUE | Blue | ++-------------------------+----------------------------+ +| .. data:: COLOR_CYAN | Cyan (light greenish blue) | ++-------------------------+----------------------------+ +| .. data:: COLOR_GREEN | Green | ++-------------------------+----------------------------+ +| .. data:: COLOR_MAGENTA | Magenta (purplish red) | ++-------------------------+----------------------------+ +| .. data:: COLOR_RED | Red | ++-------------------------+----------------------------+ +| .. data:: COLOR_WHITE | White | ++-------------------------+----------------------------+ +| .. data:: COLOR_YELLOW | Yellow | ++-------------------------+----------------------------+ :mod:`curses.textpad` --- Text input widget for curses programs @@ -1849,19 +1901,19 @@ You can instantiate a :class:`Textbox` object as follows: Move operations do nothing if the cursor is at an edge where the movement is not possible. The following synonyms are supported where possible: - +------------------------+------------------+ - | Constant | Keystroke | - +========================+==================+ - | :const:`KEY_LEFT` | :kbd:`Control-B` | - +------------------------+------------------+ - | :const:`KEY_RIGHT` | :kbd:`Control-F` | - +------------------------+------------------+ - | :const:`KEY_UP` | :kbd:`Control-P` | - +------------------------+------------------+ - | :const:`KEY_DOWN` | :kbd:`Control-N` | - +------------------------+------------------+ - | :const:`KEY_BACKSPACE` | :kbd:`Control-h` | - +------------------------+------------------+ + +--------------------------------+------------------+ + | Constant | Keystroke | + +================================+==================+ + | :const:`~curses.KEY_LEFT` | :kbd:`Control-B` | + +--------------------------------+------------------+ + | :const:`~curses.KEY_RIGHT` | :kbd:`Control-F` | + +--------------------------------+------------------+ + | :const:`~curses.KEY_UP` | :kbd:`Control-P` | + +--------------------------------+------------------+ + | :const:`~curses.KEY_DOWN` | :kbd:`Control-N` | + +--------------------------------+------------------+ + | :const:`~curses.KEY_BACKSPACE` | :kbd:`Control-h` | + +--------------------------------+------------------+ All other keystrokes are treated as a command to insert the given character and move right (with line wrapping). diff --git a/Doc/library/dataclasses.rst b/Doc/library/dataclasses.rst index 847299649d1efd1..bbbbcb00d8fef83 100644 --- a/Doc/library/dataclasses.rst +++ b/Doc/library/dataclasses.rst @@ -12,8 +12,8 @@ -------------- This module provides a decorator and functions for automatically -adding generated :term:`special method`\s such as :meth:`__init__` and -:meth:`__repr__` to user-defined classes. It was originally described +adding generated :term:`special method`\s such as :meth:`~object.__init__` and +:meth:`~object.__repr__` to user-defined classes. It was originally described in :pep:`557`. The member variables to use in these generated methods are defined @@ -31,7 +31,7 @@ using :pep:`526` type annotations. For example, this code:: def total_cost(self) -> float: return self.unit_price * self.quantity_on_hand -will add, among other things, a :meth:`__init__` that looks like:: +will add, among other things, a :meth:`~object.__init__` that looks like:: def __init__(self, name: str, unit_price: float, quantity_on_hand: int = 0): self.name = name @@ -79,92 +79,93 @@ Module contents class C: ... - @dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False, weakref_slot=False) + @dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, + match_args=True, kw_only=False, slots=False, weakref_slot=False) class C: ... The parameters to :func:`dataclass` are: - - ``init``: If true (the default), a :meth:`__init__` method will be + - ``init``: If true (the default), a :meth:`~object.__init__` method will be generated. - If the class already defines :meth:`__init__`, this parameter is + If the class already defines :meth:`~object.__init__`, this parameter is ignored. - - ``repr``: If true (the default), a :meth:`__repr__` method will be + - ``repr``: If true (the default), a :meth:`~object.__repr__` method will be generated. The generated repr string will have the class name and the name and repr of each field, in the order they are defined in the class. Fields that are marked as being excluded from the repr are not included. For example: ``InventoryItem(name='widget', unit_price=3.0, quantity_on_hand=10)``. - If the class already defines :meth:`__repr__`, this parameter is + If the class already defines :meth:`~object.__repr__`, this parameter is ignored. - - ``eq``: If true (the default), an :meth:`__eq__` method will be + - ``eq``: If true (the default), an :meth:`~object.__eq__` method will be generated. This method compares the class as if it were a tuple of its fields, in order. Both instances in the comparison must be of the identical type. - If the class already defines :meth:`__eq__`, this parameter is + If the class already defines :meth:`~object.__eq__`, this parameter is ignored. - - ``order``: If true (the default is ``False``), :meth:`__lt__`, - :meth:`__le__`, :meth:`__gt__`, and :meth:`__ge__` methods will be + - ``order``: If true (the default is ``False``), :meth:`~object.__lt__`, + :meth:`~object.__le__`, :meth:`~object.__gt__`, and :meth:`~object.__ge__` methods will be generated. These compare the class as if it were a tuple of its fields, in order. Both instances in the comparison must be of the identical type. If ``order`` is true and ``eq`` is false, a :exc:`ValueError` is raised. - If the class already defines any of :meth:`__lt__`, - :meth:`__le__`, :meth:`__gt__`, or :meth:`__ge__`, then + If the class already defines any of :meth:`~object.__lt__`, + :meth:`~object.__le__`, :meth:`~object.__gt__`, or :meth:`~object.__ge__`, then :exc:`TypeError` is raised. - - ``unsafe_hash``: If ``False`` (the default), a :meth:`__hash__` method + - ``unsafe_hash``: If ``False`` (the default), a :meth:`~object.__hash__` method is generated according to how ``eq`` and ``frozen`` are set. - :meth:`__hash__` is used by built-in :meth:`hash()`, and when objects are + :meth:`~object.__hash__` is used by built-in :meth:`hash()`, and when objects are added to hashed collections such as dictionaries and sets. Having a - :meth:`__hash__` implies that instances of the class are immutable. + :meth:`~object.__hash__` implies that instances of the class are immutable. Mutability is a complicated property that depends on the programmer's - intent, the existence and behavior of :meth:`__eq__`, and the values of + intent, the existence and behavior of :meth:`~object.__eq__`, and the values of the ``eq`` and ``frozen`` flags in the :func:`dataclass` decorator. - By default, :func:`dataclass` will not implicitly add a :meth:`__hash__` + By default, :func:`dataclass` will not implicitly add a :meth:`~object.__hash__` method unless it is safe to do so. Neither will it add or change an - existing explicitly defined :meth:`__hash__` method. Setting the class + existing explicitly defined :meth:`~object.__hash__` method. Setting the class attribute ``__hash__ = None`` has a specific meaning to Python, as - described in the :meth:`__hash__` documentation. + described in the :meth:`~object.__hash__` documentation. - If :meth:`__hash__` is not explicitly defined, or if it is set to ``None``, - then :func:`dataclass` *may* add an implicit :meth:`__hash__` method. + If :meth:`~object.__hash__` is not explicitly defined, or if it is set to ``None``, + then :func:`dataclass` *may* add an implicit :meth:`~object.__hash__` method. Although not recommended, you can force :func:`dataclass` to create a - :meth:`__hash__` method with ``unsafe_hash=True``. This might be the case + :meth:`~object.__hash__` method with ``unsafe_hash=True``. This might be the case if your class is logically immutable but can nonetheless be mutated. This is a specialized use case and should be considered carefully. - Here are the rules governing implicit creation of a :meth:`__hash__` - method. Note that you cannot both have an explicit :meth:`__hash__` + Here are the rules governing implicit creation of a :meth:`~object.__hash__` + method. Note that you cannot both have an explicit :meth:`~object.__hash__` method in your dataclass and set ``unsafe_hash=True``; this will result in a :exc:`TypeError`. If ``eq`` and ``frozen`` are both true, by default :func:`dataclass` will - generate a :meth:`__hash__` method for you. If ``eq`` is true and - ``frozen`` is false, :meth:`__hash__` will be set to ``None``, marking it + generate a :meth:`~object.__hash__` method for you. If ``eq`` is true and + ``frozen`` is false, :meth:`~object.__hash__` will be set to ``None``, marking it unhashable (which it is, since it is mutable). If ``eq`` is false, - :meth:`__hash__` will be left untouched meaning the :meth:`__hash__` + :meth:`~object.__hash__` will be left untouched meaning the :meth:`~object.__hash__` method of the superclass will be used (if the superclass is :class:`object`, this means it will fall back to id-based hashing). - ``frozen``: If true (the default is ``False``), assigning to fields will generate an exception. This emulates read-only frozen instances. If - :meth:`__setattr__` or :meth:`__delattr__` is defined in the class, then + :meth:`~object.__setattr__` or :meth:`~object.__delattr__` is defined in the class, then :exc:`TypeError` is raised. See the discussion below. - ``match_args``: If true (the default is ``True``), the ``__match_args__`` tuple will be created from the list of - parameters to the generated :meth:`__init__` method (even if - :meth:`__init__` is not generated, see above). If false, or if + parameters to the generated :meth:`~object.__init__` method (even if + :meth:`~object.__init__` is not generated, see above). If false, or if ``__match_args__`` is already defined in the class, then ``__match_args__`` will not be generated. @@ -172,18 +173,18 @@ Module contents - ``kw_only``: If true (the default value is ``False``), then all fields will be marked as keyword-only. If a field is marked as - keyword-only, then the only effect is that the :meth:`__init__` + keyword-only, then the only effect is that the :meth:`~object.__init__` parameter generated from a keyword-only field must be specified - with a keyword when :meth:`__init__` is called. There is no + with a keyword when :meth:`~object.__init__` is called. There is no effect on any other aspect of dataclasses. See the :term:`parameter` glossary entry for details. Also see the :const:`KW_ONLY` section. .. versionadded:: 3.10 - - ``slots``: If true (the default is ``False``), :attr:`__slots__` attribute + - ``slots``: If true (the default is ``False``), :attr:`~object.__slots__` attribute will be generated and new class will be returned instead of the original one. - If :attr:`__slots__` is already defined in the class, then :exc:`TypeError` + If :attr:`~object.__slots__` is already defined in the class, then :exc:`TypeError` is raised. .. versionadded:: 3.10 @@ -214,7 +215,7 @@ Module contents b: int = 0 # assign a default value for 'b' In this example, both ``a`` and ``b`` will be included in the added - :meth:`__init__` method, which will be defined as:: + :meth:`~object.__init__` method, which will be defined as:: def __init__(self, a: int, b: int = 0): @@ -255,13 +256,13 @@ Module contents error to specify both ``default`` and ``default_factory``. - ``init``: If true (the default), this field is included as a - parameter to the generated :meth:`__init__` method. + parameter to the generated :meth:`~object.__init__` method. - ``repr``: If true (the default), this field is included in the - string returned by the generated :meth:`__repr__` method. + string returned by the generated :meth:`~object.__repr__` method. - ``hash``: This can be a bool or ``None``. If true, this field is - included in the generated :meth:`__hash__` method. If ``None`` (the + included in the generated :meth:`~object.__hash__` method. If ``None`` (the default), use the value of ``compare``: this would normally be the expected behavior. A field should be considered in the hash if it's used for comparisons. Setting this value to anything @@ -274,8 +275,8 @@ Module contents is excluded from the hash, it will still be used for comparisons. - ``compare``: If true (the default), this field is included in the - generated equality and comparison methods (:meth:`__eq__`, - :meth:`__gt__`, et al.). + generated equality and comparison methods (:meth:`~object.__eq__`, + :meth:`~object.__gt__`, et al.). - ``metadata``: This can be a mapping or None. None is treated as an empty dict. This value is wrapped in @@ -286,7 +287,7 @@ Module contents namespace in the metadata. - ``kw_only``: If true, this field will be marked as keyword-only. - This is used when the generated :meth:`__init__` method's + This is used when the generated :meth:`~object.__init__` method's parameters are computed. .. versionadded:: 3.10 @@ -318,13 +319,11 @@ Module contents module-level method (see below). Users should never instantiate a :class:`Field` object directly. Its documented attributes are: - - ``name``: The name of the field. - - - ``type``: The type of the field. - - - ``default``, ``default_factory``, ``init``, ``repr``, ``hash``, - ``compare``, ``metadata``, and ``kw_only`` have the identical - meaning and values as they do in the :func:`field` function. + - ``name``: The name of the field. + - ``type``: The type of the field. + - ``default``, ``default_factory``, ``init``, ``repr``, ``hash``, + ``compare``, ``metadata``, and ``kw_only`` have the identical + meaning and values as they do in the :func:`field` function. Other attributes may exist, but they are private and must not be inspected or relied on. @@ -388,7 +387,7 @@ Module contents :func:`astuple` raises :exc:`TypeError` if ``obj`` is not a dataclass instance. -.. function:: make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False, weakref_slot=False) +.. function:: make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False, weakref_slot=False, module=None) Creates a new dataclass with name ``cls_name``, fields as defined in ``fields``, base classes as given in ``bases``, and initialized @@ -400,6 +399,10 @@ Module contents ``match_args``, ``kw_only``, ``slots``, and ``weakref_slot`` have the same meaning as they do in :func:`dataclass`. + If ``module`` is defined, the ``__module__`` attribute + of the dataclass is set to that value. + By default, it is set to the module name of the caller. + This function is not strictly required, because any Python mechanism for creating a new class with ``__annotations__`` can then apply the :func:`dataclass` function to convert that class to @@ -430,13 +433,13 @@ Module contents Class, raises :exc:`TypeError`. If values in ``changes`` do not specify fields, raises :exc:`TypeError`. - The newly returned object is created by calling the :meth:`__init__` + The newly returned object is created by calling the :meth:`~object.__init__` method of the dataclass. This ensures that :meth:`__post_init__`, if present, is also called. Init-only variables without default values, if any exist, must be specified on the call to :func:`replace` so that they can be passed to - :meth:`__init__` and :meth:`__post_init__`. + :meth:`~object.__init__` and :meth:`__post_init__`. It is an error for ``changes`` to contain any fields that are defined as having ``init=False``. A :exc:`ValueError` will be raised @@ -451,6 +454,8 @@ Module contents ``replace()`` (or similarly named) method which handles instance copying. + Dataclass instances are also supported by generic function :func:`copy.replace`. + .. function:: is_dataclass(obj) Return ``True`` if its parameter is a dataclass or an instance of one, @@ -475,7 +480,7 @@ Module contents :const:`KW_ONLY` is otherwise completely ignored. This includes the name of such a field. By convention, a name of ``_`` is used for a :const:`KW_ONLY` field. Keyword-only fields signify - :meth:`__init__` parameters that must be specified as keywords when + :meth:`~object.__init__` parameters that must be specified as keywords when the class is instantiated. In this example, the fields ``y`` and ``z`` will be marked as keyword-only fields:: @@ -496,35 +501,38 @@ Module contents .. exception:: FrozenInstanceError - Raised when an implicitly defined :meth:`__setattr__` or - :meth:`__delattr__` is called on a dataclass which was defined with + Raised when an implicitly defined :meth:`~object.__setattr__` or + :meth:`~object.__delattr__` is called on a dataclass which was defined with ``frozen=True``. It is a subclass of :exc:`AttributeError`. +.. _post-init-processing: + Post-init processing -------------------- -The generated :meth:`__init__` code will call a method named -:meth:`__post_init__`, if :meth:`__post_init__` is defined on the -class. It will normally be called as ``self.__post_init__()``. -However, if any ``InitVar`` fields are defined, they will also be -passed to :meth:`__post_init__` in the order they were defined in the -class. If no :meth:`__init__` method is generated, then -:meth:`__post_init__` will not automatically be called. +.. function:: __post_init__() -Among other uses, this allows for initializing field values that -depend on one or more other fields. For example:: + When defined on the class, it will be called by the generated + :meth:`~object.__init__`, normally as ``self.__post_init__()``. + However, if any ``InitVar`` fields are defined, they will also be + passed to :meth:`__post_init__` in the order they were defined in the + class. If no :meth:`~object.__init__` method is generated, then + :meth:`__post_init__` will not automatically be called. - @dataclass - class C: - a: float - b: float - c: float = field(init=False) + Among other uses, this allows for initializing field values that + depend on one or more other fields. For example:: - def __post_init__(self): - self.c = self.a + self.b + @dataclass + class C: + a: float + b: float + c: float = field(init=False) + + def __post_init__(self): + self.c = self.a + self.b -The :meth:`__init__` method generated by :func:`dataclass` does not call base -class :meth:`__init__` methods. If the base class has an :meth:`__init__` method +The :meth:`~object.__init__` method generated by :func:`dataclass` does not call base +class :meth:`~object.__init__` methods. If the base class has an :meth:`~object.__init__` method that has to be called, it is common to call this method in a :meth:`__post_init__` method:: @@ -540,7 +548,7 @@ that has to be called, it is common to call this method in a def __post_init__(self): super().__init__(self.side, self.side) -Note, however, that in general the dataclass-generated :meth:`__init__` methods +Note, however, that in general the dataclass-generated :meth:`~object.__init__` methods don't need to be called, since the derived dataclass will take care of initializing all fields of any base class that is a dataclass itself. @@ -551,7 +559,7 @@ parameters to :meth:`__post_init__`. Also see the warning about how Class variables --------------- -One of two places where :func:`dataclass` actually inspects the type +One of the few places where :func:`dataclass` actually inspects the type of a field is to determine if a field is a class variable as defined in :pep:`526`. It does this by checking if the type of the field is ``typing.ClassVar``. If a field is a ``ClassVar``, it is excluded @@ -562,13 +570,13 @@ module-level :func:`fields` function. Init-only variables ------------------- -The other place where :func:`dataclass` inspects a type annotation is to +Another place where :func:`dataclass` inspects a type annotation is to determine if a field is an init-only variable. It does this by seeing if the type of a field is of type ``dataclasses.InitVar``. If a field is an ``InitVar``, it is considered a pseudo-field called an init-only field. As it is not a true field, it is not returned by the module-level :func:`fields` function. Init-only fields are added as -parameters to the generated :meth:`__init__` method, and are passed to +parameters to the generated :meth:`~object.__init__` method, and are passed to the optional :meth:`__post_init__` method. They are not otherwise used by dataclasses. @@ -596,12 +604,12 @@ Frozen instances It is not possible to create truly immutable Python objects. However, by passing ``frozen=True`` to the :meth:`dataclass` decorator you can emulate immutability. In that case, dataclasses will add -:meth:`__setattr__` and :meth:`__delattr__` methods to the class. These +:meth:`~object.__setattr__` and :meth:`~object.__delattr__` methods to the class. These methods will raise a :exc:`FrozenInstanceError` when invoked. There is a tiny performance penalty when using ``frozen=True``: -:meth:`__init__` cannot use simple assignment to initialize fields, and -must use :meth:`object.__setattr__`. +:meth:`~object.__init__` cannot use simple assignment to initialize fields, and +must use :meth:`!object.__setattr__`. Inheritance ----------- @@ -629,14 +637,14 @@ example:: The final list of fields is, in order, ``x``, ``y``, ``z``. The final type of ``x`` is ``int``, as specified in class ``C``. -The generated :meth:`__init__` method for ``C`` will look like:: +The generated :meth:`~object.__init__` method for ``C`` will look like:: def __init__(self, x: int = 15, y: int = 0, z: int = 10): -Re-ordering of keyword-only parameters in :meth:`__init__` ----------------------------------------------------------- +Re-ordering of keyword-only parameters in :meth:`~object.__init__` +------------------------------------------------------------------ -After the parameters needed for :meth:`__init__` are computed, any +After the parameters needed for :meth:`~object.__init__` are computed, any keyword-only parameters are moved to come after all regular (non-keyword-only) parameters. This is a requirement of how keyword-only parameters are implemented in Python: they must come @@ -657,7 +665,7 @@ fields, and ``Base.x`` and ``D.z`` are regular fields:: z: int = 10 t: int = field(kw_only=True, default=0) -The generated :meth:`__init__` method for ``D`` will look like:: +The generated :meth:`~object.__init__` method for ``D`` will look like:: def __init__(self, x: Any = 15.0, z: int = 10, *, y: int = 0, w: int = 1, t: int = 0): @@ -666,7 +674,7 @@ the list of fields: parameters derived from regular fields are followed by parameters derived from keyword-only fields. The relative ordering of keyword-only parameters is maintained in the -re-ordered :meth:`__init__` parameter list. +re-ordered :meth:`~object.__init__` parameter list. Default factory functions @@ -678,10 +686,10 @@ example, to create a new instance of a list, use:: mylist: list = field(default_factory=list) -If a field is excluded from :meth:`__init__` (using ``init=False``) +If a field is excluded from :meth:`~object.__init__` (using ``init=False``) and the field also specifies ``default_factory``, then the default factory function will always be called from the generated -:meth:`__init__` function. This happens because there is no other +:meth:`~object.__init__` function. This happens because there is no other way to give the field an initial value. Mutable default values @@ -709,7 +717,7 @@ Using dataclasses, *if* this code was valid:: @dataclass class D: - x: List = [] + x: list = [] # This code raises ValueError def add(self, element): self.x += element @@ -730,7 +738,7 @@ for ``x`` when creating a class instance will share the same copy of ``x``. Because dataclasses just use normal Python class creation they also share this behavior. There is no general way for Data Classes to detect this condition. Instead, the -:func:`dataclass` decorator will raise a :exc:`TypeError` if it +:func:`dataclass` decorator will raise a :exc:`ValueError` if it detects an unhashable default parameter. The assumption is that if a value is unhashable, it is mutable. This is a partial solution, but it does protect against many common errors. diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst index f7e2bb3f3c6de33..1bab1fb9bd94646 100644 --- a/Doc/library/datetime.rst +++ b/Doc/library/datetime.rst @@ -19,6 +19,10 @@ The :mod:`datetime` module supplies classes for manipulating dates and times. While date and time arithmetic is supported, the focus of the implementation is on efficient attribute extraction for output formatting and manipulation. +.. tip:: + + Skip to :ref:`the format codes `. + .. seealso:: Module :mod:`calendar` @@ -33,6 +37,11 @@ on efficient attribute extraction for output formatting and manipulation. Package `dateutil `_ Third-party library with expanded time zone and parsing support. + Package `DateType `_ + Third-party library that introduces distinct static types to e.g. allow + :term:`static type checkers ` + to differentiate between naive and aware datetimes. + .. _datetime-naive-aware: Aware and Naive Objects @@ -160,7 +169,7 @@ The :class:`date`, :class:`.datetime`, :class:`.time`, and :class:`timezone` typ share these common features: - Objects of these types are immutable. -- Objects of these types are hashable, meaning that they can be used as +- Objects of these types are :term:`hashable`, meaning that they can be used as dictionary keys. - Objects of these types support efficient pickling via the :mod:`pickle` module. @@ -644,6 +653,9 @@ Instance methods: >>> d.replace(day=26) datetime.date(2002, 12, 26) + :class:`date` objects are also supported by generic function + :func:`copy.replace`. + .. method:: date.timetuple() @@ -737,18 +749,16 @@ Instance methods: .. method:: date.strftime(format) Return a string representing the date, controlled by an explicit format string. - Format codes referring to hours, minutes or seconds will see 0 values. For a - complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + Format codes referring to hours, minutes or seconds will see 0 values. + See also :ref:`strftime-strptime-behavior` and :meth:`date.isoformat`. .. method:: date.__format__(format) Same as :meth:`.date.strftime`. This makes it possible to specify a format string for a :class:`.date` object in :ref:`formatted string - literals ` and when using :meth:`str.format`. For a - complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + literals ` and when using :meth:`str.format`. + See also :ref:`strftime-strptime-behavior` and :meth:`date.isoformat`. Examples of Usage: :class:`date` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -765,6 +775,7 @@ Example of counting days to an event:: >>> my_birthday = date(today.year, 6, 24) >>> if my_birthday < today: ... my_birthday = my_birthday.replace(year=today.year + 1) + ... >>> my_birthday datetime.date(2008, 6, 24) >>> time_to_birthday = abs(my_birthday - today) @@ -897,6 +908,10 @@ Other constructors, all class methods: in UTC. As such, the recommended way to create an object representing the current time in UTC is by calling ``datetime.now(timezone.utc)``. + .. deprecated:: 3.12 + + Use :meth:`datetime.now` with :attr:`UTC` instead. + .. classmethod:: datetime.fromtimestamp(timestamp, tz=None) @@ -965,6 +980,10 @@ Other constructors, all class methods: :c:func:`gmtime` function. Raise :exc:`OSError` instead of :exc:`ValueError` on :c:func:`gmtime` failure. + .. deprecated:: 3.12 + + Use :meth:`datetime.fromtimestamp` with :attr:`UTC` instead. + .. classmethod:: datetime.fromordinal(ordinal) @@ -974,19 +993,18 @@ Other constructors, all class methods: microsecond of the result are all 0, and :attr:`.tzinfo` is ``None``. -.. classmethod:: datetime.combine(date, time, tzinfo=self.tzinfo) +.. classmethod:: datetime.combine(date, time, tzinfo=time.tzinfo) Return a new :class:`.datetime` object whose date components are equal to the given :class:`date` object's, and whose time components are equal to the given :class:`.time` object's. If the *tzinfo* argument is provided, its value is used to set the :attr:`.tzinfo` attribute of the result, otherwise the :attr:`~.time.tzinfo` attribute of the *time* argument - is used. + is used. If the *date* argument is a :class:`.datetime` object, its time components + and :attr:`.tzinfo` attributes are ignored. For any :class:`.datetime` object *d*, - ``d == datetime.combine(d.date(), d.time(), d.tzinfo)``. If date is a - :class:`.datetime` object, its time components and :attr:`.tzinfo` attributes - are ignored. + ``d == datetime.combine(d.date(), d.time(), d.tzinfo)``. .. versionchanged:: 3.6 Added the *tzinfo* argument. @@ -1045,14 +1063,14 @@ Other constructors, all class methods: Return a :class:`.datetime` corresponding to *date_string*, parsed according to *format*. - This is equivalent to:: + If *format* does not contain microseconds or timezone information, this is equivalent to:: datetime(*(time.strptime(date_string, format)[0:6])) :exc:`ValueError` is raised if the date_string and format can't be parsed by :func:`time.strptime` or if it returns a value which isn't a - time tuple. For a complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + time tuple. See also :ref:`strftime-strptime-behavior` and + :meth:`datetime.fromisoformat`. @@ -1237,6 +1255,9 @@ Instance methods: ``tzinfo=None`` can be specified to create a naive datetime from an aware datetime with no conversion of date and time data. + :class:`datetime` objects are also supported by generic function + :func:`copy.replace`. + .. versionadded:: 3.6 Added the ``fold`` argument. @@ -1350,7 +1371,7 @@ Instance methods: Because naive ``datetime`` objects are treated by many ``datetime`` methods as local times, it is preferred to use aware datetimes to represent times - in UTC; as a result, using ``utcfromtimetuple`` may give misleading + in UTC; as a result, using :meth:`datetime.utctimetuple` may give misleading results. If you have a naive ``datetime`` representing UTC, use ``datetime.replace(tzinfo=timezone.utc)`` to make it aware, at which point you can use :meth:`.datetime.timetuple`. @@ -1510,20 +1531,21 @@ Instance methods: (which :func:`time.ctime` invokes, but which :meth:`datetime.ctime` does not invoke) conforms to the C standard. + .. method:: datetime.strftime(format) - Return a string representing the date and time, controlled by an explicit format - string. For a complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + Return a string representing the date and time, + controlled by an explicit format string. + See also :ref:`strftime-strptime-behavior` and :meth:`datetime.isoformat`. .. method:: datetime.__format__(format) Same as :meth:`.datetime.strftime`. This makes it possible to specify a format string for a :class:`.datetime` object in :ref:`formatted string - literals ` and when using :meth:`str.format`. For a - complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + literals ` and when using :meth:`str.format`. + See also :ref:`strftime-strptime-behavior` and :meth:`datetime.isoformat`. + Examples of Usage: :class:`.datetime` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1812,6 +1834,9 @@ Instance methods: ``tzinfo=None`` can be specified to create a naive :class:`.time` from an aware :class:`.time`, without conversion of the time data. + :class:`time` objects are also supported by generic function + :func:`copy.replace`. + .. versionadded:: 3.6 Added the ``fold`` argument. @@ -1868,17 +1893,15 @@ Instance methods: .. method:: time.strftime(format) Return a string representing the time, controlled by an explicit format - string. For a complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + string. See also :ref:`strftime-strptime-behavior` and :meth:`time.isoformat`. .. method:: time.__format__(format) - Same as :meth:`.time.strftime`. This makes it possible to specify a format string - for a :class:`.time` object in :ref:`formatted string - literals ` and when using :meth:`str.format`. For a - complete list of formatting directives, see - :ref:`strftime-strptime-behavior`. + Same as :meth:`.time.strftime`. This makes it possible to specify + a format string for a :class:`.time` object in :ref:`formatted string + literals ` and when using :meth:`str.format`. + See also :ref:`strftime-strptime-behavior` and :meth:`time.isoformat`. .. method:: time.utcoffset() @@ -2317,9 +2340,19 @@ versus :meth:`strptime`: +----------------+--------------------------------------------------------+------------------------------------------------------------------------------+ + .. _format-codes: + :meth:`strftime` and :meth:`strptime` Format Codes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +These methods accept format codes that can be used to parse and format dates:: + + >>> datetime.strptime('31/01/22 23:59:59.999999', + ... '%d/%m/%y %H:%M:%S.%f') + datetime.datetime(2022, 1, 31, 23, 59, 59, 999999) + >>> _.strftime('%a %d %b %Y, %I:%M%p') + 'Mon 31 Jan 2022, 11:59PM' + The following is a list of all the format codes that the 1989 C standard requires, and these work on all platforms with a standard C implementation. @@ -2505,10 +2538,7 @@ Notes: Because the format depends on the current locale, care should be taken when making assumptions about the output value. Field orderings will vary (for example, "month/day/year" versus "day/month/year"), and the output may - contain Unicode characters encoded using the locale's default encoding (for - example, if the current locale is ``ja_JP``, the default encoding could be - any one of ``eucJP``, ``SJIS``, or ``utf-8``; use :meth:`locale.getlocale` - to determine the current locale's encoding). + contain non-ASCII characters. (2) The :meth:`strptime` method can parse years in the full [1, 9999] range, but @@ -2601,7 +2631,7 @@ Notes: (9) When used with the :meth:`strptime` method, the leading zero is optional - for formats ``%d``, ``%m``, ``%H``, ``%I``, ``%M``, ``%S``, ``%J``, ``%U``, + for formats ``%d``, ``%m``, ``%H``, ``%I``, ``%M``, ``%S``, ``%j``, ``%U``, ``%W``, and ``%V``. Format ``%y`` does require a leading zero. .. rubric:: Footnotes diff --git a/Doc/library/dbm.rst b/Doc/library/dbm.rst index 2be499337a2a156..766847b971b645a 100644 --- a/Doc/library/dbm.rst +++ b/Doc/library/dbm.rst @@ -245,6 +245,13 @@ supported. Close the ``gdbm`` database. + .. method:: gdbm.clear() + + Remove all items from the ``gdbm`` database. + + .. versionadded:: 3.13 + + :mod:`dbm.ndbm` --- Interface based on ndbm ------------------------------------------- @@ -313,6 +320,12 @@ to locate the appropriate header file to simplify building this module. Close the ``ndbm`` database. + .. method:: ndbm.clear() + + Remove all items from the ``ndbm`` database. + + .. versionadded:: 3.13 + :mod:`dbm.dumb` --- Portable DBM implementation ----------------------------------------------- diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst index 260108136df7f11..fb8bbf72adf2688 100644 --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -40,23 +40,23 @@ decimal floating point arithmetic. It offers several advantages over the people learn at school." -- excerpt from the decimal arithmetic specification. * Decimal numbers can be represented exactly. In contrast, numbers like - :const:`1.1` and :const:`2.2` do not have exact representations in binary + ``1.1`` and ``2.2`` do not have exact representations in binary floating point. End users typically would not expect ``1.1 + 2.2`` to display - as :const:`3.3000000000000003` as it does with binary floating point. + as ``3.3000000000000003`` as it does with binary floating point. * The exactness carries over into arithmetic. In decimal floating point, ``0.1 + 0.1 + 0.1 - 0.3`` is exactly equal to zero. In binary floating point, the result - is :const:`5.5511151231257827e-017`. While near to zero, the differences + is ``5.5511151231257827e-017``. While near to zero, the differences prevent reliable equality testing and differences can accumulate. For this reason, decimal is preferred in accounting applications which have strict equality invariants. * The decimal module incorporates a notion of significant places so that ``1.30 - + 1.20`` is :const:`2.50`. The trailing zero is kept to indicate significance. + + 1.20`` is ``2.50``. The trailing zero is kept to indicate significance. This is the customary presentation for monetary applications. For multiplication, the "schoolbook" approach uses all the figures in the - multiplicands. For instance, ``1.3 * 1.2`` gives :const:`1.56` while ``1.30 * - 1.20`` gives :const:`1.5600`. + multiplicands. For instance, ``1.3 * 1.2`` gives ``1.56`` while ``1.30 * + 1.20`` gives ``1.5600``. * Unlike hardware based binary floating point, the decimal module has a user alterable precision (defaulting to 28 places) which can be as large as needed for @@ -88,8 +88,8 @@ context for arithmetic, and signals. A decimal number is immutable. It has a sign, coefficient digits, and an exponent. To preserve significance, the coefficient digits do not truncate trailing zeros. Decimals also include special values such as -:const:`Infinity`, :const:`-Infinity`, and :const:`NaN`. The standard also -differentiates :const:`-0` from :const:`+0`. +``Infinity``, ``-Infinity``, and ``NaN``. The standard also +differentiates ``-0`` from ``+0``. The context for arithmetic is an environment specifying precision, rounding rules, limits on exponents, flags indicating the results of operations, and trap @@ -139,8 +139,8 @@ precision, rounding, or enabled traps:: Decimal instances can be constructed from integers, strings, floats, or tuples. Construction from an integer or a float performs an exact conversion of the value of that integer or float. Decimal numbers include special values such as -:const:`NaN` which stands for "Not a number", positive and negative -:const:`Infinity`, and :const:`-0`:: +``NaN`` which stands for "Not a number", positive and negative +``Infinity``, and ``-0``:: >>> getcontext().prec = 28 >>> Decimal(10) @@ -250,7 +250,7 @@ And some mathematical functions are also available to Decimal: >>> Decimal('10').log10() Decimal('1') -The :meth:`quantize` method rounds a number to a fixed exponent. This method is +The :meth:`~Decimal.quantize` method rounds a number to a fixed exponent. This method is useful for monetary applications that often round results to a fixed number of places: @@ -299,7 +299,7 @@ enabled: Contexts also have signal flags for monitoring exceptional conditions encountered during computations. The flags remain set until explicitly cleared, so it is best to clear the flags before each set of monitored computations by -using the :meth:`clear_flags` method. :: +using the :meth:`~Context.clear_flags` method. :: >>> setcontext(ExtendedContext) >>> getcontext().clear_flags() @@ -309,12 +309,12 @@ using the :meth:`clear_flags` method. :: Context(prec=9, rounding=ROUND_HALF_EVEN, Emin=-999999, Emax=999999, capitals=1, clamp=0, flags=[Inexact, Rounded], traps=[]) -The *flags* entry shows that the rational approximation to :const:`Pi` was +The *flags* entry shows that the rational approximation to pi was rounded (digits beyond the context precision were thrown away) and that the result is inexact (some of the discarded digits were non-zero). -Individual traps are set using the dictionary in the :attr:`traps` field of a -context: +Individual traps are set using the dictionary in the :attr:`~Context.traps` +attribute of a context: .. doctest:: newcontext @@ -369,7 +369,7 @@ Decimal objects with the fullwidth digits ``'\uff10'`` through ``'\uff19'``. If *value* is a :class:`tuple`, it should have three components, a sign - (:const:`0` for positive or :const:`1` for negative), a :class:`tuple` of + (``0`` for positive or ``1`` for negative), a :class:`tuple` of digits, and an integer exponent. For example, ``Decimal((0, (1, 4, 1, 4), -3))`` returns ``Decimal('1.414')``. @@ -387,7 +387,7 @@ Decimal objects The purpose of the *context* argument is determining what to do if *value* is a malformed string. If the context traps :const:`InvalidOperation`, an exception is raised; otherwise, the constructor returns a new Decimal with the value of - :const:`NaN`. + ``NaN``. Once constructed, :class:`Decimal` objects are immutable. @@ -701,7 +701,7 @@ Decimal objects .. method:: max(other, context=None) Like ``max(self, other)`` except that the context rounding rule is applied - before returning and that :const:`NaN` values are either signaled or + before returning and that ``NaN`` values are either signaled or ignored (depending on the context and whether they are signaling or quiet). @@ -713,7 +713,7 @@ Decimal objects .. method:: min(other, context=None) Like ``min(self, other)`` except that the context rounding rule is applied - before returning and that :const:`NaN` values are either signaled or + before returning and that ``NaN`` values are either signaled or ignored (depending on the context and whether they are signaling or quiet). @@ -743,12 +743,23 @@ Decimal objects .. method:: normalize(context=None) - Normalize the number by stripping the rightmost trailing zeros and - converting any result equal to :const:`Decimal('0')` to - :const:`Decimal('0e0')`. Used for producing canonical values for attributes - of an equivalence class. For example, ``Decimal('32.100')`` and - ``Decimal('0.321000e+2')`` both normalize to the equivalent value - ``Decimal('32.1')``. + Used for producing canonical values of an equivalence + class within either the current context or the specified context. + + This has the same semantics as the unary plus operation, except that if + the final result is finite it is reduced to its simplest form, with all + trailing zeros removed and its sign preserved. That is, while the + coefficient is non-zero and a multiple of ten the coefficient is divided + by ten and the exponent is incremented by 1. Otherwise (the coefficient is + zero) the exponent is set to 0. In all cases the sign is unchanged. + + For example, ``Decimal('32.100')`` and ``Decimal('0.321000e+2')`` both + normalize to the equivalent value ``Decimal('32.1')``. + + Note that rounding is applied *before* reducing to simplest form. + + In the latest versions of the specification, this operation is also known + as ``reduce``. .. method:: number_class(context=None) @@ -790,7 +801,7 @@ Decimal objects the current thread's context is used. An error is returned whenever the resulting exponent is greater than - :attr:`Emax` or less than :attr:`Etiny`. + :attr:`~Context.Emax` or less than :meth:`~Context.Etiny`. .. method:: radix() @@ -830,7 +841,7 @@ Decimal objects .. method:: same_quantum(other, context=None) Test whether self and other have the same exponent or whether both are - :const:`NaN`. + ``NaN``. This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. As an exception, the C version may raise @@ -892,11 +903,11 @@ Decimal objects Logical operands ^^^^^^^^^^^^^^^^ -The :meth:`logical_and`, :meth:`logical_invert`, :meth:`logical_or`, -and :meth:`logical_xor` methods expect their arguments to be *logical +The :meth:`~Decimal.logical_and`, :meth:`~Decimal.logical_invert`, :meth:`~Decimal.logical_or`, +and :meth:`~Decimal.logical_xor` methods expect their arguments to be *logical operands*. A *logical operand* is a :class:`Decimal` instance whose exponent and sign are both zero, and whose digits are all either -:const:`0` or :const:`1`. +``0`` or ``1``. .. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -926,7 +937,7 @@ Each thread has its own current context which is accessed or changed using the You can also use the :keyword:`with` statement and the :func:`localcontext` function to temporarily change the active context. -.. function:: localcontext(ctx=None, \*\*kwargs) +.. function:: localcontext(ctx=None, **kwargs) Return a context manager that will set the current context for the active thread to a copy of *ctx* on entry to the with-statement and restore the previous context @@ -982,7 +993,7 @@ described below. In addition, the module provides three pre-made contexts: exceptions are not raised during computations). Because the traps are disabled, this context is useful for applications that - prefer to have result value of :const:`NaN` or :const:`Infinity` instead of + prefer to have result value of ``NaN`` or ``Infinity`` instead of raising exceptions. This allows an application to complete a run in the presence of conditions that would otherwise halt the program. @@ -1001,8 +1012,8 @@ described below. In addition, the module provides three pre-made contexts: In single threaded environments, it is preferable to not use this context at all. Instead, simply create contexts explicitly as described below. - The default values are :attr:`prec`\ =\ :const:`28`, - :attr:`rounding`\ =\ :const:`ROUND_HALF_EVEN`, + The default values are :attr:`Context.prec`\ =\ ``28``, + :attr:`Context.rounding`\ =\ :const:`ROUND_HALF_EVEN`, and enabled traps for :class:`Overflow`, :class:`InvalidOperation`, and :class:`DivisionByZero`. @@ -1016,7 +1027,7 @@ In addition to the three supplied contexts, new contexts can be created with the default values are copied from the :const:`DefaultContext`. If the *flags* field is not specified or is :const:`None`, all flags are cleared. - *prec* is an integer in the range [:const:`1`, :const:`MAX_PREC`] that sets + *prec* is an integer in the range [``1``, :const:`MAX_PREC`] that sets the precision for arithmetic operations in the context. The *rounding* option is one of the constants listed in the section @@ -1026,20 +1037,20 @@ In addition to the three supplied contexts, new contexts can be created with the contexts should only set traps and leave the flags clear. The *Emin* and *Emax* fields are integers specifying the outer limits allowable - for exponents. *Emin* must be in the range [:const:`MIN_EMIN`, :const:`0`], - *Emax* in the range [:const:`0`, :const:`MAX_EMAX`]. + for exponents. *Emin* must be in the range [:const:`MIN_EMIN`, ``0``], + *Emax* in the range [``0``, :const:`MAX_EMAX`]. - The *capitals* field is either :const:`0` or :const:`1` (the default). If set to - :const:`1`, exponents are printed with a capital :const:`E`; otherwise, a - lowercase :const:`e` is used: :const:`Decimal('6.02e+23')`. + The *capitals* field is either ``0`` or ``1`` (the default). If set to + ``1``, exponents are printed with a capital ``E``; otherwise, a + lowercase ``e`` is used: ``Decimal('6.02e+23')``. - The *clamp* field is either :const:`0` (the default) or :const:`1`. - If set to :const:`1`, the exponent ``e`` of a :class:`Decimal` + The *clamp* field is either ``0`` (the default) or ``1``. + If set to ``1``, the exponent ``e`` of a :class:`Decimal` instance representable in this context is strictly limited to the range ``Emin - prec + 1 <= e <= Emax - prec + 1``. If *clamp* is - :const:`0` then a weaker condition holds: the adjusted exponent of - the :class:`Decimal` instance is at most ``Emax``. When *clamp* is - :const:`1`, a large normal number will, where possible, have its + ``0`` then a weaker condition holds: the adjusted exponent of + the :class:`Decimal` instance is at most :attr:`~Context.Emax`. When *clamp* is + ``1``, a large normal number will, where possible, have its exponent reduced and a corresponding number of zeros added to its coefficient, in order to fit the exponent constraints; this preserves the value of the number but loses information about @@ -1048,13 +1059,13 @@ In addition to the three supplied contexts, new contexts can be created with the >>> Context(prec=6, Emax=999, clamp=1).create_decimal('1.23e999') Decimal('1.23000E+999') - A *clamp* value of :const:`1` allows compatibility with the + A *clamp* value of ``1`` allows compatibility with the fixed-width decimal interchange formats specified in IEEE 754. The :class:`Context` class defines several general purpose methods as well as a large number of methods for doing arithmetic directly in a given context. In addition, for each of the :class:`Decimal` methods described above (with - the exception of the :meth:`adjusted` and :meth:`as_tuple` methods) there is + the exception of the :meth:`~Decimal.adjusted` and :meth:`~Decimal.as_tuple` methods) there is a corresponding :class:`Context` method. For example, for a :class:`Context` instance ``C`` and :class:`Decimal` instance ``x``, ``C.exp(x)`` is equivalent to ``x.exp(context=C)``. Each :class:`Context` method accepts a @@ -1064,11 +1075,11 @@ In addition to the three supplied contexts, new contexts can be created with the .. method:: clear_flags() - Resets all of the flags to :const:`0`. + Resets all of the flags to ``0``. .. method:: clear_traps() - Resets all of the traps to :const:`0`. + Resets all of the traps to ``0``. .. versionadded:: 3.3 @@ -1385,10 +1396,10 @@ In addition to the three supplied contexts, new contexts can be created with the With three arguments, compute ``(x**y) % modulo``. For the three argument form, the following restrictions on the arguments hold: - - all three arguments must be integral - - ``y`` must be nonnegative - - at least one of ``x`` or ``y`` must be nonzero - - ``modulo`` must be nonzero and have at most 'precision' digits + - all three arguments must be integral + - ``y`` must be nonnegative + - at least one of ``x`` or ``y`` must be nonzero + - ``modulo`` must be nonzero and have at most 'precision' digits The value resulting from ``Context.power(x, y, modulo)`` is equal to the value that would be obtained by computing ``(x**y) @@ -1483,13 +1494,13 @@ are also included in the pure Python version for compatibility. +---------------------+---------------------+-------------------------------+ | | 32-bit | 64-bit | +=====================+=====================+===============================+ -| .. data:: MAX_PREC | :const:`425000000` | :const:`999999999999999999` | +| .. data:: MAX_PREC | ``425000000`` | ``999999999999999999`` | +---------------------+---------------------+-------------------------------+ -| .. data:: MAX_EMAX | :const:`425000000` | :const:`999999999999999999` | +| .. data:: MAX_EMAX | ``425000000`` | ``999999999999999999`` | +---------------------+---------------------+-------------------------------+ -| .. data:: MIN_EMIN | :const:`-425000000` | :const:`-999999999999999999` | +| .. data:: MIN_EMIN | ``-425000000`` | ``-999999999999999999`` | +---------------------+---------------------+-------------------------------+ -| .. data:: MIN_ETINY | :const:`-849999999` | :const:`-1999999999999999997` | +| .. data:: MIN_ETINY | ``-849999999`` | ``-1999999999999999997`` | +---------------------+---------------------+-------------------------------+ @@ -1497,7 +1508,7 @@ are also included in the pure Python version for compatibility. The value is ``True``. Deprecated, because Python now always has threads. -.. deprecated:: 3.9 + .. deprecated:: 3.9 .. data:: HAVE_CONTEXTVAR @@ -1514,7 +1525,7 @@ Rounding modes .. data:: ROUND_CEILING - Round towards :const:`Infinity`. + Round towards ``Infinity``. .. data:: ROUND_DOWN @@ -1522,7 +1533,7 @@ Rounding modes .. data:: ROUND_FLOOR - Round towards :const:`-Infinity`. + Round towards ``-Infinity``. .. data:: ROUND_HALF_DOWN @@ -1570,7 +1581,7 @@ condition. Altered an exponent to fit representation constraints. Typically, clamping occurs when an exponent falls outside the context's - :attr:`Emin` and :attr:`Emax` limits. If possible, the exponent is reduced to + :attr:`~Context.Emin` and :attr:`~Context.Emax` limits. If possible, the exponent is reduced to fit by adding zeros to the coefficient. @@ -1584,8 +1595,8 @@ condition. Signals the division of a non-infinite number by zero. Can occur with division, modulo division, or when raising a number to a negative - power. If this signal is not trapped, returns :const:`Infinity` or - :const:`-Infinity` with the sign determined by the inputs to the calculation. + power. If this signal is not trapped, returns ``Infinity`` or + ``-Infinity`` with the sign determined by the inputs to the calculation. .. class:: Inexact @@ -1602,7 +1613,7 @@ condition. An invalid operation was performed. Indicates that an operation was requested that does not make sense. If not - trapped, returns :const:`NaN`. Possible causes include:: + trapped, returns ``NaN``. Possible causes include:: Infinity - Infinity 0 * Infinity @@ -1619,10 +1630,10 @@ condition. Numerical overflow. - Indicates the exponent is larger than :attr:`Emax` after rounding has + Indicates the exponent is larger than :attr:`Context.Emax` after rounding has occurred. If not trapped, the result depends on the rounding mode, either pulling inward to the largest representable finite number or rounding outward - to :const:`Infinity`. In either case, :class:`Inexact` and :class:`Rounded` + to ``Infinity``. In either case, :class:`Inexact` and :class:`Rounded` are also signaled. @@ -1631,14 +1642,14 @@ condition. Rounding occurred though possibly no information was lost. Signaled whenever rounding discards digits; even if those digits are zero - (such as rounding :const:`5.00` to :const:`5.0`). If not trapped, returns + (such as rounding ``5.00`` to ``5.0``). If not trapped, returns the result unchanged. This signal is used to detect loss of significant digits. .. class:: Subnormal - Exponent was lower than :attr:`Emin` prior to rounding. + Exponent was lower than :attr:`~Context.Emin` prior to rounding. Occurs when an operation result is subnormal (the exponent is too small). If not trapped, returns the result unchanged. @@ -1696,7 +1707,7 @@ Mitigating round-off error with increased precision ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The use of decimal floating point eliminates decimal representation error -(making it possible to represent :const:`0.1` exactly); however, some operations +(making it possible to represent ``0.1`` exactly); however, some operations can still incur round-off error when non-zero digits exceed the fixed precision. The effects of round-off error can be amplified by the addition or subtraction @@ -1746,8 +1757,8 @@ Special values ^^^^^^^^^^^^^^ The number system for the :mod:`decimal` module provides special values -including :const:`NaN`, :const:`sNaN`, :const:`-Infinity`, :const:`Infinity`, -and two zeros, :const:`+0` and :const:`-0`. +including ``NaN``, ``sNaN``, ``-Infinity``, ``Infinity``, +and two zeros, ``+0`` and ``-0``. Infinities can be constructed directly with: ``Decimal('Infinity')``. Also, they can arise from dividing by zero when the :exc:`DivisionByZero` signal is @@ -1758,30 +1769,30 @@ The infinities are signed (affine) and can be used in arithmetic operations where they get treated as very large, indeterminate numbers. For instance, adding a constant to infinity gives another infinite result. -Some operations are indeterminate and return :const:`NaN`, or if the +Some operations are indeterminate and return ``NaN``, or if the :exc:`InvalidOperation` signal is trapped, raise an exception. For example, -``0/0`` returns :const:`NaN` which means "not a number". This variety of -:const:`NaN` is quiet and, once created, will flow through other computations -always resulting in another :const:`NaN`. This behavior can be useful for a +``0/0`` returns ``NaN`` which means "not a number". This variety of +``NaN`` is quiet and, once created, will flow through other computations +always resulting in another ``NaN``. This behavior can be useful for a series of computations that occasionally have missing inputs --- it allows the calculation to proceed while flagging specific results as invalid. -A variant is :const:`sNaN` which signals rather than remaining quiet after every +A variant is ``sNaN`` which signals rather than remaining quiet after every operation. This is a useful return value when an invalid result needs to interrupt a calculation for special handling. The behavior of Python's comparison operators can be a little surprising where a -:const:`NaN` is involved. A test for equality where one of the operands is a -quiet or signaling :const:`NaN` always returns :const:`False` (even when doing +``NaN`` is involved. A test for equality where one of the operands is a +quiet or signaling ``NaN`` always returns :const:`False` (even when doing ``Decimal('NaN')==Decimal('NaN')``), while a test for inequality always returns :const:`True`. An attempt to compare two Decimals using any of the ``<``, ``<=``, ``>`` or ``>=`` operators will raise the :exc:`InvalidOperation` signal -if either operand is a :const:`NaN`, and return :const:`False` if this signal is +if either operand is a ``NaN``, and return :const:`False` if this signal is not trapped. Note that the General Decimal Arithmetic specification does not specify the behavior of direct comparisons; these rules for comparisons -involving a :const:`NaN` were taken from the IEEE 854 standard (see Table 3 in -section 5.7). To ensure strict standards-compliance, use the :meth:`compare` -and :meth:`compare-signal` methods instead. +involving a ``NaN`` were taken from the IEEE 854 standard (see Table 3 in +section 5.7). To ensure strict standards-compliance, use the :meth:`~Decimal.compare` +and :meth:`~Decimal.compare_signal` methods instead. The signed zeros can result from calculations that underflow. They keep the sign that would have resulted if the calculation had been carried out to greater @@ -2013,7 +2024,7 @@ Q. In a fixed-point application with two decimal places, some inputs have many places and need to be rounded. Others are not supposed to have excess digits and need to be validated. What methods should be used? -A. The :meth:`quantize` method rounds to a fixed number of decimal places. If +A. The :meth:`~Decimal.quantize` method rounds to a fixed number of decimal places. If the :const:`Inexact` trap is set, it is also useful for validation: >>> TWOPLACES = Decimal(10) ** -2 # same as Decimal('0.01') @@ -2037,7 +2048,7 @@ throughout an application? A. Some operations like addition, subtraction, and multiplication by an integer will automatically preserve fixed point. Others operations, like division and non-integer multiplication, will change the number of decimal places and need to -be followed-up with a :meth:`quantize` step: +be followed-up with a :meth:`~Decimal.quantize` step: >>> a = Decimal('102.72') # Initial fixed-point values >>> b = Decimal('3.17') @@ -2053,10 +2064,11 @@ be followed-up with a :meth:`quantize` step: Decimal('0.03') In developing fixed-point applications, it is convenient to define functions -to handle the :meth:`quantize` step: +to handle the :meth:`~Decimal.quantize` step: >>> def mul(x, y, fp=TWOPLACES): ... return (x * y).quantize(fp) + ... >>> def div(x, y, fp=TWOPLACES): ... return (x / y).quantize(fp) @@ -2065,24 +2077,44 @@ to handle the :meth:`quantize` step: >>> div(b, a) Decimal('0.03') -Q. There are many ways to express the same value. The numbers :const:`200`, -:const:`200.000`, :const:`2E2`, and :const:`.02E+4` all have the same value at +Q. There are many ways to express the same value. The numbers ``200``, +``200.000``, ``2E2``, and ``.02E+4`` all have the same value at various precisions. Is there a way to transform them to a single recognizable canonical value? -A. The :meth:`normalize` method maps all equivalent values to a single +A. The :meth:`~Decimal.normalize` method maps all equivalent values to a single representative: >>> values = map(Decimal, '200 200.000 2E2 .02E+4'.split()) >>> [v.normalize() for v in values] [Decimal('2E+2'), Decimal('2E+2'), Decimal('2E+2'), Decimal('2E+2')] +Q. When does rounding occur in a computation? + +A. It occurs *after* the computation. The philosophy of the decimal +specification is that numbers are considered exact and are created +independent of the current context. They can even have greater +precision than current context. Computations process with those +exact inputs and then rounding (or other context operations) is +applied to the *result* of the computation:: + + >>> getcontext().prec = 5 + >>> pi = Decimal('3.1415926535') # More than 5 digits + >>> pi # All digits are retained + Decimal('3.1415926535') + >>> pi + 0 # Rounded after an addition + Decimal('3.1416') + >>> pi - Decimal('0.00005') # Subtract unrounded numbers, then round + Decimal('3.1415') + >>> pi + 0 - Decimal('0.00005'). # Intermediate values are rounded + Decimal('3.1416') + Q. Some decimal values always print with exponential notation. Is there a way to get a non-exponential representation? A. For some values, exponential notation is the only way to express the number of significant places in the coefficient. For example, expressing -:const:`5.0E+3` as :const:`5000` keeps the value constant but cannot show the +``5.0E+3`` as ``5000`` keeps the value constant but cannot show the original's two-place significance. If an application does not care about tracking significance, it is easy to @@ -2158,12 +2190,12 @@ for medium-sized numbers and the `Number Theoretic Transform `_ for very large numbers. -The context must be adapted for exact arbitrary precision arithmetic. :attr:`Emin` -and :attr:`Emax` should always be set to the maximum values, :attr:`clamp` -should always be 0 (the default). Setting :attr:`prec` requires some care. +The context must be adapted for exact arbitrary precision arithmetic. :attr:`~Context.Emin` +and :attr:`~Context.Emax` should always be set to the maximum values, :attr:`~Context.clamp` +should always be 0 (the default). Setting :attr:`~Context.prec` requires some care. The easiest approach for trying out bignum arithmetic is to use the maximum -value for :attr:`prec` as well [#]_:: +value for :attr:`~Context.prec` as well [#]_:: >>> setcontext(Context(prec=MAX_PREC, Emax=MAX_EMAX, Emin=MIN_EMIN)) >>> x = Decimal(2) ** 256 @@ -2180,7 +2212,7 @@ the available memory will be insufficient:: MemoryError On systems with overallocation (e.g. Linux), a more sophisticated approach is to -adjust :attr:`prec` to the amount of available RAM. Suppose that you have 8GB of +adjust :attr:`~Context.prec` to the amount of available RAM. Suppose that you have 8GB of RAM and expect 10 simultaneous operands using a maximum of 500MB each:: >>> import sys diff --git a/Doc/library/development.rst b/Doc/library/development.rst index 9edce758688e2d7..b1979b921e7d5f9 100644 --- a/Doc/library/development.rst +++ b/Doc/library/development.rst @@ -8,8 +8,7 @@ The modules described in this chapter help you write software. For example, the :mod:`pydoc` module takes a module and generates documentation based on the module's contents. The :mod:`doctest` and :mod:`unittest` modules contains frameworks for writing unit tests that automatically exercise code and verify -that the expected output is produced. :program:`2to3` can translate Python 2.x -source code into valid Python 3.x code. +that the expected output is produced. The list of modules described in this chapter is: @@ -23,5 +22,4 @@ The list of modules described in this chapter is: unittest.rst unittest.mock.rst unittest.mock-examples.rst - 2to3.rst test.rst diff --git a/Doc/library/devmode.rst b/Doc/library/devmode.rst index 44e7d4f541d817a..5b8a9bd19084565 100644 --- a/Doc/library/devmode.rst +++ b/Doc/library/devmode.rst @@ -16,12 +16,12 @@ setting the :envvar:`PYTHONDEVMODE` environment variable to ``1``. See also :ref:`Python debug build `. Effects of the Python Development Mode -====================================== +-------------------------------------- Enabling the Python Development Mode is similar to the following command, but with additional effects described below:: - PYTHONMALLOC=debug PYTHONASYNCIODEBUG=1 python3 -W default -X faulthandler + PYTHONMALLOC=debug PYTHONASYNCIODEBUG=1 python -W default -X faulthandler Effects of the Python Development Mode: @@ -59,8 +59,9 @@ Effects of the Python Development Mode: ``default``. * Call :func:`faulthandler.enable` at Python startup to install handlers for - the :const:`SIGSEGV`, :const:`SIGFPE`, :const:`SIGABRT`, :const:`SIGBUS` and - :const:`SIGILL` signals to dump the Python traceback on a crash. + the :const:`~signal.SIGSEGV`, :const:`~signal.SIGFPE`, + :const:`~signal.SIGABRT`, :const:`~signal.SIGBUS` and + :const:`~signal.SIGILL` signals to dump the Python traceback on a crash. It behaves as if the :option:`-X faulthandler <-X>` command line option is used or if the :envvar:`PYTHONFAULTHANDLER` environment variable is set to @@ -81,7 +82,7 @@ Effects of the Python Development Mode: ignored for empty strings. * The :class:`io.IOBase` destructor logs ``close()`` exceptions. -* Set the :attr:`~sys.flags.dev_mode` attribute of :attr:`sys.flags` to +* Set the :attr:`~sys.flags.dev_mode` attribute of :data:`sys.flags` to ``True``. The Python Development Mode does not enable the :mod:`tracemalloc` module by @@ -107,7 +108,7 @@ value can be read from :data:`sys.flags.dev_mode `. ResourceWarning Example -======================= +----------------------- Example of a script counting the number of lines of the text file specified in the command line:: @@ -128,14 +129,14 @@ any warning. Example using README.txt, which has 269 lines: .. code-block:: shell-session - $ python3 script.py README.txt + $ python script.py README.txt 269 Enabling the Python Development Mode displays a :exc:`ResourceWarning` warning: .. code-block:: shell-session - $ python3 -X dev script.py README.txt + $ python -X dev script.py README.txt 269 script.py:10: ResourceWarning: unclosed file <_io.TextIOWrapper name='README.rst' mode='r' encoding='UTF-8'> main() @@ -146,7 +147,7 @@ opened: .. code-block:: shell-session - $ python3 -X dev -X tracemalloc=5 script.py README.rst + $ python -X dev -X tracemalloc=5 script.py README.rst 269 script.py:10: ResourceWarning: unclosed file <_io.TextIOWrapper name='README.rst' mode='r' encoding='UTF-8'> main() @@ -171,7 +172,7 @@ application more deterministic and more reliable. Bad file descriptor error example -================================= +--------------------------------- Script displaying the first line of itself:: @@ -190,7 +191,7 @@ By default, Python does not emit any warning: .. code-block:: shell-session - $ python3 script.py + $ python script.py import os The Python Development Mode shows a :exc:`ResourceWarning` and logs a "Bad file @@ -198,7 +199,7 @@ descriptor" error when finalizing the file object: .. code-block:: shell-session - $ python3 script.py + $ python -X dev script.py import os script.py:10: ResourceWarning: unclosed file <_io.TextIOWrapper name='script.py' mode='r' encoding='UTF-8'> main() diff --git a/Doc/library/dialog.rst b/Doc/library/dialog.rst index 53f98c1018988f8..191e0da12103fa3 100644 --- a/Doc/library/dialog.rst +++ b/Doc/library/dialog.rst @@ -27,15 +27,15 @@ functions for creating simple modal dialogs to get a value from the user. The base class for custom dialogs. - .. method:: body(master) + .. method:: body(master) - Override to construct the dialog's interface and return the widget that - should have initial focus. + Override to construct the dialog's interface and return the widget that + should have initial focus. - .. method:: buttonbox() + .. method:: buttonbox() - Default behaviour adds OK and Cancel buttons. Override for custom button - layouts. + Default behaviour adds OK and Cancel buttons. Override for custom button + layouts. diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst index 5ee1f4a02c6816a..9abf19557f989cb 100644 --- a/Doc/library/difflib.rst +++ b/Doc/library/difflib.rst @@ -171,9 +171,12 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. expressed in the ISO 8601 format. If not specified, the strings default to blanks. + >>> import sys + >>> from difflib import * >>> s1 = ['bacon\n', 'eggs\n', 'ham\n', 'guido\n'] >>> s2 = ['python\n', 'eggy\n', 'hamster\n', 'guido\n'] - >>> sys.stdout.writelines(context_diff(s1, s2, fromfile='before.py', tofile='after.py')) + >>> sys.stdout.writelines(context_diff(s1, s2, fromfile='before.py', + ... tofile='after.py')) *** before.py --- after.py *************** @@ -294,13 +297,12 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. For inputs that do not have trailing newlines, set the *lineterm* argument to ``""`` so that the output will be uniformly newline free. - The context diff format normally has a header for filenames and modification + The unified diff format normally has a header for filenames and modification times. Any or all of these may be specified using strings for *fromfile*, *tofile*, *fromfiledate*, and *tofiledate*. The modification times are normally expressed in the ISO 8601 format. If not specified, the strings default to blanks. - >>> s1 = ['bacon\n', 'eggs\n', 'ham\n', 'guido\n'] >>> s2 = ['python\n', 'eggy\n', 'hamster\n', 'guido\n'] >>> sys.stdout.writelines(unified_diff(s1, s2, fromfile='before.py', tofile='after.py')) @@ -570,8 +572,8 @@ The :class:`SequenceMatcher` class has this constructor: The three methods that return the ratio of matching to total characters can give different results due to differing levels of approximation, although -:meth:`quick_ratio` and :meth:`real_quick_ratio` are always at least as large as -:meth:`ratio`: +:meth:`~SequenceMatcher.quick_ratio` and :meth:`~SequenceMatcher.real_quick_ratio` +are always at least as large as :meth:`~SequenceMatcher.ratio`: >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() @@ -593,15 +595,15 @@ This example compares two strings, considering blanks to be "junk": ... "private Thread currentThread;", ... "private volatile Thread currentThread;") -:meth:`ratio` returns a float in [0, 1], measuring the similarity of the -sequences. As a rule of thumb, a :meth:`ratio` value over 0.6 means the +:meth:`~SequenceMatcher.ratio` returns a float in [0, 1], measuring the similarity of the +sequences. As a rule of thumb, a :meth:`~SequenceMatcher.ratio` value over 0.6 means the sequences are close matches: >>> print(round(s.ratio(), 3)) 0.866 If you're only interested in where the sequences match, -:meth:`get_matching_blocks` is handy: +:meth:`~SequenceMatcher.get_matching_blocks` is handy: >>> for block in s.get_matching_blocks(): ... print("a[%d] and b[%d] match for %d elements" % block) @@ -609,12 +611,12 @@ If you're only interested in where the sequences match, a[8] and b[17] match for 21 elements a[29] and b[38] match for 0 elements -Note that the last tuple returned by :meth:`get_matching_blocks` is always a -dummy, ``(len(a), len(b), 0)``, and this is the only case in which the last +Note that the last tuple returned by :meth:`~SequenceMatcher.get_matching_blocks` +is always a dummy, ``(len(a), len(b), 0)``, and this is the only case in which the last tuple element (number of elements matched) is ``0``. If you want to know how to change the first sequence into the second, use -:meth:`get_opcodes`: +:meth:`~SequenceMatcher.get_opcodes`: >>> for opcode in s.get_opcodes(): ... print("%6s a[%d:%d] b[%d:%d]" % opcode) @@ -689,7 +691,7 @@ Differ Example This example compares two texts. First we set up the texts, sequences of individual single-line strings ending with newlines (such sequences can also be -obtained from the :meth:`~io.BaseIO.readlines` method of file-like objects): +obtained from the :meth:`~io.IOBase.readlines` method of file-like objects): >>> text1 = ''' 1. Beautiful is better than ugly. ... 2. Explicit is better than implicit. diff --git a/Doc/library/dis.rst b/Doc/library/dis.rst index 30bbf95be634176..7e97f1a4524554c 100644 --- a/Doc/library/dis.rst +++ b/Doc/library/dis.rst @@ -42,27 +42,70 @@ interpreter. bytecode to specialize it for different runtime conditions. The adaptive bytecode can be shown by passing ``adaptive=True``. + .. versionchanged:: 3.12 + The argument of a jump is the offset of the target instruction relative + to the instruction that appears immediately after the jump instruction's + :opcode:`CACHE` entries. + + As a consequence, the presence of the :opcode:`CACHE` instructions is + transparent for forward jumps but needs to be taken into account when + reasoning about backward jumps. + + .. versionchanged:: 3.13 + The output shows logical labels rather than instruction offsets + for jump targets and exception handlers. The ``-O`` command line + option and the ``show_offsets`` argument were added. -Example: Given the function :func:`myfunc`:: +Example: Given the function :func:`!myfunc`:: def myfunc(alist): return len(alist) the following command can be used to display the disassembly of -:func:`myfunc`: +:func:`!myfunc`: .. doctest:: >>> dis.dis(myfunc) - 2 0 RESUME 0 + 2 RESUME 0 - 3 2 LOAD_GLOBAL 1 (NULL + len) - 14 LOAD_FAST 0 (alist) - 16 CALL 1 - 26 RETURN_VALUE + 3 LOAD_GLOBAL 1 (len + NULL) + LOAD_FAST 0 (alist) + CALL 1 + RETURN_VALUE (The "2" is a line number). +.. _dis-cli: + +Command-line interface +---------------------- + +The :mod:`dis` module can be invoked as a script from the command line: + +.. code-block:: sh + + python -m dis [-h] [-C] [-O] [infile] + +The following options are accepted: + +.. program:: dis + +.. cmdoption:: -h, --help + + Display usage and exit. + +.. cmdoption:: -C, --show-caches + + Show inline caches. + +.. cmdoption:: -O, --show-offsets + + Show offsets of instructions. + +If :file:`infile` is specified, its disassembled code will be written to stdout. +Otherwise, disassembly is performed on compiled source code recieved from stdin. + Bytecode analysis ----------------- @@ -73,7 +116,7 @@ The bytecode analysis API allows pieces of Python code to be wrapped in a code. .. class:: Bytecode(x, *, first_line=None, current_offset=None,\ - show_caches=False, adaptive=False) + show_caches=False, adaptive=False, show_offsets=False) Analyse the bytecode corresponding to a function, generator, asynchronous generator, coroutine, method, string of source code, or a code object (as @@ -98,6 +141,9 @@ code. If *adaptive* is ``True``, :meth:`.dis` will display specialized bytecode that may be different from the original bytecode. + If *show_offsets* is ``True``, :meth:`.dis` will include instruction + offsets in the output. + .. classmethod:: from_traceback(tb, *, show_caches=False) Construct a :class:`Bytecode` instance from the given traceback, setting @@ -188,9 +234,9 @@ operation is being performed, so the intermediate analysis object isn't useful: For a module, it disassembles all functions. For a class, it disassembles all methods (including class and static methods). For a code object or sequence of raw bytecode, it prints one line per bytecode instruction. - It also recursively disassembles nested code objects (the code of - comprehensions, generator expressions and nested functions, and the code - used for building nested classes). + It also recursively disassembles nested code objects. These can include + generator expressions, nested functions, the bodies of nested classes, + and the code objects used for :ref:`annotation scopes `. Strings are first compiled to code objects with the :func:`compile` built-in function before being disassembled. If no object is provided, this function disassembles the last traceback. @@ -220,7 +266,8 @@ operation is being performed, so the intermediate analysis object isn't useful: Added the *show_caches* and *adaptive* parameters. -.. function:: distb(tb=None, *, file=None, show_caches=False, adaptive=False) +.. function:: distb(tb=None, *, file=None, show_caches=False, adaptive=False, + show_offset=False) Disassemble the top-of-stack function of a traceback, using the last traceback if none was passed. The instruction causing the exception is @@ -235,9 +282,12 @@ operation is being performed, so the intermediate analysis object isn't useful: .. versionchanged:: 3.11 Added the *show_caches* and *adaptive* parameters. + .. versionchanged:: 3.13 + Added the *show_offsets* parameter. .. function:: disassemble(code, lasti=-1, *, file=None, show_caches=False, adaptive=False) - disco(code, lasti=-1, *, file=None, show_caches=False, adaptive=False) + disco(code, lasti=-1, *, file=None, show_caches=False, adaptive=False, + show_offsets=False) Disassemble a code object, indicating the last instruction if *lasti* was provided. The output is divided in the following columns: @@ -262,6 +312,8 @@ operation is being performed, so the intermediate analysis object isn't useful: .. versionchanged:: 3.11 Added the *show_caches* and *adaptive* parameters. + .. versionchanged:: 3.13 + Added the *show_offsets* parameter. .. function:: get_instructions(x, *, first_line=None, show_caches=False, adaptive=False) @@ -297,6 +349,9 @@ operation is being performed, so the intermediate analysis object isn't useful: The :pep:`626` ``co_lines`` method is used instead of the ``co_firstlineno`` and ``co_lnotab`` attributes of the code object. + .. versionchanged:: 3.13 + Line numbers can be ``None`` for bytecode that does not map to source lines. + .. function:: findlabels(code) @@ -318,6 +373,12 @@ operation is being performed, so the intermediate analysis object isn't useful: .. versionchanged:: 3.8 Added *jump* parameter. + .. versionchanged:: 3.13 + If ``oparg`` is omitted (or ``None``), the stack effect is now returned + for ``oparg=0``. Previously this was an error for opcodes that use their + arg. It is also no longer an error to pass an integer ``oparg`` when + the ``opcode`` does not use it; the ``oparg`` in this case is ignored. + .. _bytecodes: @@ -342,10 +403,25 @@ details of bytecode instructions as :class:`Instruction` instances: human readable name for operation + .. data:: baseopcode + + numeric code for the base operation if operation is specialized; + otherwise equal to :data:`opcode` + + + .. data:: baseopname + + human readable name for the base operation if operation is specialized; + otherwise equal to :data:`opname` + + .. data:: arg numeric argument to operation (if any), otherwise ``None`` + .. data:: oparg + + alias for :data:`arg` .. data:: argval @@ -363,9 +439,30 @@ details of bytecode instructions as :class:`Instruction` instances: start index of operation within bytecode sequence + .. data:: start_offset + + start index of operation within bytecode sequence, including prefixed + ``EXTENDED_ARG`` operations if present; otherwise equal to :data:`offset` + + + .. data:: cache_offset + + start index of the cache entries following the operation + + + .. data:: end_offset + + end index of the cache entries following the operation + + .. data:: starts_line - line started by this opcode (if any), otherwise ``None`` + ``True`` if this opcode starts a source line, otherwise ``False`` + + + .. data:: line_number + + source line number associated with this opcode (if any), otherwise ``None`` .. data:: is_jump_target @@ -373,6 +470,12 @@ details of bytecode instructions as :class:`Instruction` instances: ``True`` if other code jumps to here, otherwise ``False`` + .. data:: jump_target + + bytecode index of the jump target if this is a jump operation, + otherwise ``None`` + + .. data:: positions :class:`dis.Positions` object holding the @@ -384,6 +487,14 @@ details of bytecode instructions as :class:`Instruction` instances: Field ``positions`` is added. + .. versionchanged:: 3.13 + + Changed field ``starts_line``. + + Added fields ``start_offset``, ``cache_offset``, ``end_offset``, + ``baseopname``, ``baseopcode``, ``jump_target``, ``oparg``, and + ``line_number``. + .. class:: Positions @@ -402,6 +513,10 @@ The Python compiler currently generates the following bytecode instructions. **General instructions** +In the following, We will refer to the interpreter stack as ``STACK`` and describe +operations on it as if it was a Python list. The top of the stack corresponds to +``STACK[-1]`` in this language. + .. opcode:: NOP Do nothing code. Used as a placeholder by the bytecode optimizer, and to @@ -410,29 +525,44 @@ The Python compiler currently generates the following bytecode instructions. .. opcode:: POP_TOP - Removes the top-of-stack (TOS) item. + Removes the top-of-stack item:: + + STACK.pop() .. opcode:: END_FOR Removes the top two values from the stack. - Equivalent to POP_TOP; POP_TOP. + Equivalent to ``POP_TOP``; ``POP_TOP``. Used to clean up at the end of loops, hence the name. .. versionadded:: 3.12 +.. opcode:: END_SEND + + Implements ``del STACK[-2]``. + Used to clean up when a generator exits. + + .. versionadded:: 3.12 + + .. opcode:: COPY (i) - Push the *i*-th item to the top of the stack. The item is not removed from its - original location. + Push the i-th item to the top of the stack without removing it from its original + location:: + + assert i > 0 + STACK.append(STACK[-i]) .. versionadded:: 3.11 .. opcode:: SWAP (i) - Swap TOS with the item at position *i*. + Swap the top of the stack with the i-th element:: + + STACK[-i], STACK[-1] = stack[-1], STACK[-i] .. versionadded:: 3.11 @@ -460,86 +590,115 @@ The Python compiler currently generates the following bytecode instructions. Unary operations take the top of the stack, apply the operation, and push the result back on the stack. -.. opcode:: UNARY_POSITIVE - - Implements ``TOS = +TOS``. - .. opcode:: UNARY_NEGATIVE - Implements ``TOS = -TOS``. + Implements ``STACK[-1] = -STACK[-1]``. .. opcode:: UNARY_NOT - Implements ``TOS = not TOS``. + Implements ``STACK[-1] = not STACK[-1]``. + + .. versionchanged:: 3.13 + This instruction now requires an exact :class:`bool` operand. .. opcode:: UNARY_INVERT - Implements ``TOS = ~TOS``. + Implements ``STACK[-1] = ~STACK[-1]``. .. opcode:: GET_ITER - Implements ``TOS = iter(TOS)``. + Implements ``STACK[-1] = iter(STACK[-1])``. .. opcode:: GET_YIELD_FROM_ITER - If ``TOS`` is a :term:`generator iterator` or :term:`coroutine` object - it is left as is. Otherwise, implements ``TOS = iter(TOS)``. + If ``STACK[-1]`` is a :term:`generator iterator` or :term:`coroutine` object + it is left as is. Otherwise, implements ``STACK[-1] = iter(STACK[-1])``. .. versionadded:: 3.5 -**Binary and in-place operations** +.. opcode:: TO_BOOL + + Implements ``STACK[-1] = bool(STACK[-1])``. -In the following, TOS is the top-of-stack. -TOS1, TOS2, TOS3 are the second, third and fourth items on the stack, respectively. + .. versionadded:: 3.13 -Binary operations remove the top two items from the stack (TOS and TOS1). -They perform the operation, then put the result back on the stack. + +**Binary and in-place operations** + +Binary operations remove the top two items from the stack (``STACK[-1]`` and +``STACK[-2]``). They perform the operation, then put the result back on the stack. In-place operations are like binary operations, but the operation is done in-place -when TOS1 supports it, and the resulting TOS may be (but does not have to be) -the original TOS1. +when ``STACK[-2]`` supports it, and the resulting ``STACK[-1]`` may be (but does +not have to be) the original ``STACK[-2]``. .. opcode:: BINARY_OP (op) Implements the binary and in-place operators (depending on the value of - *op*). - ``TOS = TOS1 op TOS``. + *op*):: + + rhs = STACK.pop() + lhs = STACK.pop() + STACK.append(lhs op rhs) .. versionadded:: 3.11 .. opcode:: BINARY_SUBSCR - Implements ``TOS = TOS1[TOS]``. + Implements:: + + key = STACK.pop() + container = STACK.pop() + STACK.append(container[key]) .. opcode:: STORE_SUBSCR - Implements ``TOS1[TOS] = TOS2``. + Implements:: + + key = STACK.pop() + container = STACK.pop() + value = STACK.pop() + container[key] = value .. opcode:: DELETE_SUBSCR - Implements ``del TOS1[TOS]``. + Implements:: + key = STACK.pop() + container = STACK.pop() + del container[key] .. opcode:: BINARY_SLICE - Implements ``TOS = TOS2[TOS1:TOS]``. + Implements:: + + end = STACK.pop() + start = STACK.pop() + container = STACK.pop() + STACK.append(container[start:end]) .. versionadded:: 3.12 .. opcode:: STORE_SLICE - Implements ``TOS2[TOS1:TOS] = TOS3``. + Implements:: + + end = STACK.pop() + start = STACK.pop() + container = STACK.pop() + values = STACK.pop() + container[start:end] = value .. versionadded:: 3.12 @@ -548,16 +707,16 @@ the original TOS1. .. opcode:: GET_AWAITABLE (where) - Implements ``TOS = get_awaitable(TOS)``, where ``get_awaitable(o)`` + Implements ``STACK[-1] = get_awaitable(STACK[-1])``, where ``get_awaitable(o)`` returns ``o`` if ``o`` is a coroutine object or a generator object with - the CO_ITERABLE_COROUTINE flag, or resolves + the :data:`~inspect.CO_ITERABLE_COROUTINE` flag, or resolves ``o.__await__``. If the ``where`` operand is nonzero, it indicates where the instruction occurs: - * ``1`` After a call to ``__aenter__`` - * ``2`` After a call to ``__aexit__`` + * ``1``: After a call to ``__aenter__`` + * ``2``: After a call to ``__aexit__`` .. versionadded:: 3.5 @@ -567,7 +726,7 @@ the original TOS1. .. opcode:: GET_AITER - Implements ``TOS = TOS.__aiter__()``. + Implements ``STACK[-1] = STACK[-1].__aiter__()``. .. versionadded:: 3.5 .. versionchanged:: 3.7 @@ -577,8 +736,8 @@ the original TOS1. .. opcode:: GET_ANEXT - Pushes ``get_awaitable(TOS.__anext__())`` to the stack. See - ``GET_AWAITABLE`` for details about ``get_awaitable``. + Implement ``STACK.append(get_awaitable(STACK[-1].__anext__()))`` to the stack. + See ``GET_AWAITABLE`` for details about ``get_awaitable``. .. versionadded:: 3.5 @@ -586,40 +745,32 @@ the original TOS1. .. opcode:: END_ASYNC_FOR Terminates an :keyword:`async for` loop. Handles an exception raised - when awaiting a next item. If TOS is :exc:`StopAsyncIteration` pop 3 - values from the stack and restore the exception state using the second - of them. Otherwise re-raise the exception using the value - from the stack. An exception handler block is removed from the block stack. + when awaiting a next item. The stack contains the async iterable in + ``STACK[-2]`` and the raised exception in ``STACK[-1]``. Both are popped. + If the exception is not :exc:`StopAsyncIteration`, it is re-raised. .. versionadded:: 3.8 - .. versionchanged:: 3.11 - Exception representation on the stack now consist of one, not three, items. + .. versionchanged:: 3.11 + Exception representation on the stack now consist of one, not three, items. .. opcode:: CLEANUP_THROW Handles an exception raised during a :meth:`~generator.throw` or - :meth:`~generator.close` call through the current frame. If TOS is an + :meth:`~generator.close` call through the current frame. If ``STACK[-1]`` is an instance of :exc:`StopIteration`, pop three values from the stack and push - its ``value`` member. Otherwise, re-raise TOS. - - .. versionadded:: 3.12 - - -.. opcode:: STOPITERATION_ERROR - - Handles a StopIteration raised in a generator or coroutine. - If TOS is an instance of :exc:`StopIteration`, or :exc:`StopAsyncIteration` - replace it with a :exc:`RuntimeError`. + its ``value`` member. Otherwise, re-raise ``STACK[-1]``. .. versionadded:: 3.12 .. opcode:: BEFORE_ASYNC_WITH - Resolves ``__aenter__`` and ``__aexit__`` from the object on top of the - stack. Pushes ``__aexit__`` and result of ``__aenter__()`` to the stack. + Resolves ``__aenter__`` and ``__aexit__`` from ``STACK[-1]``. + Pushes ``__aexit__`` and result of ``__aenter__()`` to the stack:: + + STACK.extend((__aexit__, __aenter__()) .. versionadded:: 3.5 @@ -627,31 +778,40 @@ the original TOS1. **Miscellaneous opcodes** -.. opcode:: PRINT_EXPR +.. opcode:: SET_ADD (i) - Implements the expression statement for the interactive mode. TOS is removed - from the stack and printed. In non-interactive mode, an expression statement - is terminated with :opcode:`POP_TOP`. + Implements:: + item = STACK.pop() + set.add(STACK[-i], item) -.. opcode:: SET_ADD (i) - - Calls ``set.add(TOS1[-i], TOS)``. Used to implement set comprehensions. + Used to implement set comprehensions. .. opcode:: LIST_APPEND (i) - Calls ``list.append(TOS1[-i], TOS)``. Used to implement list comprehensions. + Implements:: + + item = STACK.pop() + list.append(STACK[-i], item) + + Used to implement list comprehensions. .. opcode:: MAP_ADD (i) - Calls ``dict.__setitem__(TOS1[-i], TOS1, TOS)``. Used to implement dict - comprehensions. + Implements:: + + value = STACK.pop() + key = STACK.pop() + dict.__setitem__(STACK[-i], key, value) + + Used to implement dict comprehensions. .. versionadded:: 3.1 .. versionchanged:: 3.8 - Map value is TOS and map key is TOS1. Before, those were reversed. + Map value is ``STACK[-1]`` and map key is ``STACK[-2]``. Before, those + were reversed. For all of the :opcode:`SET_ADD`, :opcode:`LIST_APPEND` and :opcode:`MAP_ADD` instructions, while the added value or key/value pair is popped off, the @@ -661,16 +821,29 @@ iterations of the loop. .. opcode:: RETURN_VALUE - Returns with TOS to the caller of the function. + Returns with ``STACK[-1]`` to the caller of the function. + + +.. opcode:: RETURN_CONST (consti) + + Returns with ``co_consts[consti]`` to the caller of the function. + + .. versionadded:: 3.12 .. opcode:: YIELD_VALUE - Pops TOS and yields it from a :term:`generator`. + Yields ``STACK.pop()`` from a :term:`generator`. + + .. versionchanged:: 3.11 + oparg set to be the stack depth. - .. versionchanged:: 3.11 - oparg set to be the stack depth, for efficient handling on frames. + .. versionchanged:: 3.12 + oparg set to be the exception block depth, for efficient closing of generators. + .. versionchanged:: 3.13 + oparg is ``1`` if this instruction is part of a yield-from or await, and ``0`` + otherwise. .. opcode:: SETUP_ANNOTATIONS @@ -682,50 +855,44 @@ iterations of the loop. .. versionadded:: 3.6 -.. opcode:: IMPORT_STAR - - Loads all symbols not starting with ``'_'`` directly from the module TOS to - the local namespace. The module is popped after loading all names. This - opcode implements ``from module import *``. - - .. opcode:: POP_EXCEPT Pops a value from the stack, which is used to restore the exception state. - .. versionchanged:: 3.11 - Exception representation on the stack now consist of one, not three, items. + .. versionchanged:: 3.11 + Exception representation on the stack now consist of one, not three, items. .. opcode:: RERAISE - Re-raises the exception currently on top of the stack. If oparg is non-zero, - pops an additional value from the stack which is used to set ``f_lasti`` - of the current frame. + Re-raises the exception currently on top of the stack. If oparg is non-zero, + pops an additional value from the stack which is used to set ``f_lasti`` + of the current frame. - .. versionadded:: 3.9 + .. versionadded:: 3.9 - .. versionchanged:: 3.11 - Exception representation on the stack now consist of one, not three, items. + .. versionchanged:: 3.11 + Exception representation on the stack now consist of one, not three, items. .. opcode:: PUSH_EXC_INFO - Pops a value from the stack. Pushes the current exception to the top of the stack. - Pushes the value originally popped back to the stack. - Used in exception handlers. + Pops a value from the stack. Pushes the current exception to the top of the stack. + Pushes the value originally popped back to the stack. + Used in exception handlers. - .. versionadded:: 3.11 + .. versionadded:: 3.11 .. opcode:: CHECK_EXC_MATCH - Performs exception matching for ``except``. Tests whether the TOS1 is an exception - matching TOS. Pops TOS and pushes the boolean result of the test. + Performs exception matching for ``except``. Tests whether the ``STACK[-2]`` + is an exception matching ``STACK[-1]``. Pops ``STACK[-1]`` and pushes the boolean + result of the test. .. versionadded:: 3.11 .. opcode:: CHECK_EG_MATCH - Performs exception matching for ``except*``. Applies ``split(TOS)`` on - the exception group representing TOS1. + Performs exception matching for ``except*``. Applies ``split(STACK[-1])`` on + the exception group representing ``STACK[-2]``. In case of a match, pops two items from the stack and pushes the non-matching subgroup (``None`` in case of full match) followed by the @@ -734,28 +901,18 @@ iterations of the loop. .. versionadded:: 3.11 -.. opcode:: PREP_RERAISE_STAR - - Combines the raised and reraised exceptions list from TOS, into an exception - group to propagate from a try-except* block. Uses the original exception - group from TOS1 to reconstruct the structure of reraised exceptions. Pops - two items from the stack and pushes the exception to reraise or ``None`` - if there isn't one. - - .. versionadded:: 3.11 - .. opcode:: WITH_EXCEPT_START - Calls the function in position 4 on the stack with arguments (type, val, tb) - representing the exception at the top of the stack. - Used to implement the call ``context_manager.__exit__(*exc_info())`` when an exception - has occurred in a :keyword:`with` statement. + Calls the function in position 4 on the stack with arguments (type, val, tb) + representing the exception at the top of the stack. + Used to implement the call ``context_manager.__exit__(*exc_info())`` when an exception + has occurred in a :keyword:`with` statement. - .. versionadded:: 3.9 + .. versionadded:: 3.9 - .. versionchanged:: 3.11 - The ``__exit__`` function is in position 4 of the stack rather than 7. - Exception representation on the stack now consist of one, not three, items. + .. versionchanged:: 3.11 + The ``__exit__`` function is in position 4 of the stack rather than 7. + Exception representation on the stack now consist of one, not three, items. .. opcode:: LOAD_ASSERTION_ERROR @@ -768,11 +925,11 @@ iterations of the loop. .. opcode:: LOAD_BUILD_CLASS - Pushes :func:`builtins.__build_class__` onto the stack. It is later called + Pushes :func:`!builtins.__build_class__` onto the stack. It is later called to construct a class. -.. opcode:: BEFORE_WITH (delta) +.. opcode:: BEFORE_WITH This opcode performs several operations before a with block starts. First, it loads :meth:`~object.__exit__` from the context manager and pushes it onto @@ -785,26 +942,26 @@ iterations of the loop. .. opcode:: GET_LEN - Push ``len(TOS)`` onto the stack. + Perform ``STACK.append(len(STACK[-1]))``. .. versionadded:: 3.10 .. opcode:: MATCH_MAPPING - If TOS is an instance of :class:`collections.abc.Mapping` (or, more technically: if - it has the :const:`Py_TPFLAGS_MAPPING` flag set in its - :c:member:`~PyTypeObject.tp_flags`), push ``True`` onto the stack. Otherwise, push - ``False``. + If ``STACK[-1]`` is an instance of :class:`collections.abc.Mapping` (or, more + technically: if it has the :c:macro:`Py_TPFLAGS_MAPPING` flag set in its + :c:member:`~PyTypeObject.tp_flags`), push ``True`` onto the stack. Otherwise, + push ``False``. .. versionadded:: 3.10 .. opcode:: MATCH_SEQUENCE - If TOS is an instance of :class:`collections.abc.Sequence` and is *not* an instance + If ``STACK[-1]`` is an instance of :class:`collections.abc.Sequence` and is *not* an instance of :class:`str`/:class:`bytes`/:class:`bytearray` (or, more technically: if it has - the :const:`Py_TPFLAGS_SEQUENCE` flag set in its :c:member:`~PyTypeObject.tp_flags`), + the :c:macro:`Py_TPFLAGS_SEQUENCE` flag set in its :c:member:`~PyTypeObject.tp_flags`), push ``True`` onto the stack. Otherwise, push ``False``. .. versionadded:: 3.10 @@ -812,9 +969,9 @@ iterations of the loop. .. opcode:: MATCH_KEYS - TOS is a tuple of mapping keys, and TOS1 is the match subject. If TOS1 - contains all of the keys in TOS, push a :class:`tuple` containing the - corresponding values. Otherwise, push ``None``. + ``STACK[-1]`` is a tuple of mapping keys, and ``STACK[-2]`` is the match subject. + If ``STACK[-2]`` contains all of the keys in ``STACK[-1]``, push a :class:`tuple` + containing the corresponding values. Otherwise, push ``None``. .. versionadded:: 3.10 @@ -825,44 +982,65 @@ iterations of the loop. .. opcode:: STORE_NAME (namei) - Implements ``name = TOS``. *namei* is the index of *name* in the attribute - :attr:`co_names` of the code object. The compiler tries to use - :opcode:`STORE_FAST` or :opcode:`STORE_GLOBAL` if possible. + Implements ``name = STACK.pop()``. *namei* is the index of *name* in the attribute + :attr:`!co_names` of the :ref:`code object `. + The compiler tries to use :opcode:`STORE_FAST` or :opcode:`STORE_GLOBAL` if possible. .. opcode:: DELETE_NAME (namei) - Implements ``del name``, where *namei* is the index into :attr:`co_names` - attribute of the code object. + Implements ``del name``, where *namei* is the index into :attr:`!co_names` + attribute of the :ref:`code object `. .. opcode:: UNPACK_SEQUENCE (count) - Unpacks TOS into *count* individual values, which are put onto the stack - right-to-left. + Unpacks ``STACK[-1]`` into *count* individual values, which are put onto the stack + right-to-left. Require there to be exactly *count* values.:: + + assert(len(STACK[-1]) == count) + STACK.extend(STACK.pop()[:-count-1:-1]) .. opcode:: UNPACK_EX (counts) - Implements assignment with a starred target: Unpacks an iterable in TOS into - individual values, where the total number of values can be smaller than the + Implements assignment with a starred target: Unpacks an iterable in ``STACK[-1]`` + into individual values, where the total number of values can be smaller than the number of items in the iterable: one of the new values will be a list of all leftover items. - The low byte of *counts* is the number of values before the list value, the - high byte of *counts* the number of values after it. The resulting values - are put onto the stack right-to-left. + The number of values before and after the list value is limited to 255. + + The number of values before the list value is encoded in the argument of the + opcode. The number of values after the list if any is encoded using an + ``EXTENDED_ARG``. As a consequence, the argument can be seen as a two bytes values + where the low byte of *counts* is the number of values before the list value, the + high byte of *counts* the number of values after it. + + The extracted values are put onto the stack right-to-left, i.e. ``a, *b, c = d`` + will be stored after execution as ``STACK.extend((a, b, c))``. .. opcode:: STORE_ATTR (namei) - Implements ``TOS.name = TOS1``, where *namei* is the index of name in - :attr:`co_names`. + Implements:: + obj = STACK.pop() + value = STACK.pop() + obj.name = value + + where *namei* is the index of name in :attr:`!co_names` of the + :ref:`code object `. .. opcode:: DELETE_ATTR (namei) - Implements ``del TOS.name``, using *namei* as index into :attr:`co_names`. + Implements:: + + obj = STACK.pop() + del obj.name + + where *namei* is the index of name into :attr:`!co_names` of the + :ref:`code object `. .. opcode:: STORE_GLOBAL (namei) @@ -883,12 +1061,37 @@ iterations of the loop. .. opcode:: LOAD_NAME (namei) Pushes the value associated with ``co_names[namei]`` onto the stack. + The name is looked up within the locals, then the globals, then the builtins. + + +.. opcode:: LOAD_LOCALS + + Pushes a reference to the locals dictionary onto the stack. This is used + to prepare namespace dictionaries for :opcode:`LOAD_FROM_DICT_OR_DEREF` + and :opcode:`LOAD_FROM_DICT_OR_GLOBALS`. + + .. versionadded:: 3.12 + + +.. opcode:: LOAD_FROM_DICT_OR_GLOBALS (i) + + Pops a mapping off the stack and looks up the value for ``co_names[namei]``. + If the name is not found there, looks it up in the globals and then the builtins, + similar to :opcode:`LOAD_GLOBAL`. + This is used for loading global variables in + :ref:`annotation scopes ` within class bodies. + + .. versionadded:: 3.12 .. opcode:: BUILD_TUPLE (count) Creates a tuple consuming *count* items from the stack, and pushes the - resulting tuple onto the stack. + resulting tuple onto the stack.:: + + assert count > 0 + STACK, values = STACK[:-count], STACK[-count:] + STACK.append(tuple(values)) .. opcode:: BUILD_LIST (count) @@ -905,7 +1108,7 @@ iterations of the loop. Pushes a new dictionary object onto the stack. Pops ``2 * count`` items so that the dictionary holds *count* entries: - ``{..., TOS3: TOS2, TOS1: TOS}``. + ``{..., STACK[-4]: STACK[-3], STACK[-2]: STACK[-1]}``. .. versionchanged:: 3.5 The dictionary is created from stack items instead of creating an @@ -916,7 +1119,7 @@ iterations of the loop. The version of :opcode:`BUILD_MAP` specialized for constant keys. Pops the top element on the stack which contains a tuple of keys, then starting from - ``TOS1``, pops *count* values to form values in the built dictionary. + ``STACK[-2]``, pops *count* values to form values in the built dictionary. .. versionadded:: 3.6 @@ -929,30 +1132,38 @@ iterations of the loop. .. versionadded:: 3.6 -.. opcode:: LIST_TO_TUPLE - - Pops a list from the stack and pushes a tuple containing the same values. - - .. versionadded:: 3.9 +.. opcode:: LIST_EXTEND (i) + Implements:: -.. opcode:: LIST_EXTEND (i) + seq = STACK.pop() + list.extend(STACK[-i], seq) - Calls ``list.extend(TOS1[-i], TOS)``. Used to build lists. + Used to build lists. .. versionadded:: 3.9 .. opcode:: SET_UPDATE (i) - Calls ``set.update(TOS1[-i], TOS)``. Used to build sets. + Implements:: + + seq = STACK.pop() + set.update(STACK[-i], seq) + + Used to build sets. .. versionadded:: 3.9 .. opcode:: DICT_UPDATE (i) - Calls ``dict.update(TOS1[-i], TOS)``. Used to build dicts. + Implements:: + + map = STACK.pop() + dict.update(STACK[-i], map) + + Used to build dicts. .. versionadded:: 3.9 @@ -966,26 +1177,56 @@ iterations of the loop. .. opcode:: LOAD_ATTR (namei) - If the low bit of ``namei`` is not set, this replaces TOS with - ``getattr(TOS, co_names[namei>>1])``. + If the low bit of ``namei`` is not set, this replaces ``STACK[-1]`` with + ``getattr(STACK[-1], co_names[namei>>1])``. If the low bit of ``namei`` is set, this will attempt to load a method named - ``co_names[namei>>1]`` from the TOS object. TOS is popped. - This bytecode distinguishes two cases: if TOS has a method with the correct - name, the bytecode pushes the unbound method and TOS. TOS will be used as - the first argument (``self``) by :opcode:`CALL` when calling the - unbound method. Otherwise, ``NULL`` and the object return by the attribute - lookup are pushed. + ``co_names[namei>>1]`` from the ``STACK[-1]`` object. ``STACK[-1]`` is popped. + This bytecode distinguishes two cases: if ``STACK[-1]`` has a method with the + correct name, the bytecode pushes the unbound method and ``STACK[-1]``. + ``STACK[-1]`` will be used as the first argument (``self``) by :opcode:`CALL` + or :opcode:`CALL_KW` when calling the unbound method. + Otherwise, ``NULL`` and the object returned by + the attribute lookup are pushed. .. versionchanged:: 3.12 If the low bit of ``namei`` is set, then a ``NULL`` or ``self`` is pushed to the stack before the attribute or unbound method respectively. +.. opcode:: LOAD_SUPER_ATTR (namei) + + This opcode implements :func:`super`, both in its zero-argument and + two-argument forms (e.g. ``super().method()``, ``super().attr`` and + ``super(cls, self).method()``, ``super(cls, self).attr``). + + It pops three values from the stack (from top of stack down): + - ``self``: the first argument to the current method + - ``cls``: the class within which the current method was defined + - the global ``super`` + + With respect to its argument, it works similarly to :opcode:`LOAD_ATTR`, + except that ``namei`` is shifted left by 2 bits instead of 1. + + The low bit of ``namei`` signals to attempt a method load, as with + :opcode:`LOAD_ATTR`, which results in pushing ``None`` and the loaded method. + When it is unset a single value is pushed to the stack. + + The second-low bit of ``namei``, if set, means that this was a two-argument + call to :func:`super` (unset means zero-argument). + + .. versionadded:: 3.12 + + .. opcode:: COMPARE_OP (opname) Performs a Boolean operation. The operation name can be found in - ``cmp_op[opname]``. + ``cmp_op[opname >> 5]``. If the fifth-lowest bit of ``opname`` is set + (``opname & 16``), the result should be coerced to ``bool``. + + .. versionchanged:: 3.13 + The fifth-lowest bit of the oparg now indicates a forced conversion to + :class:`bool`. .. opcode:: IS_OP (invert) @@ -1004,17 +1245,16 @@ iterations of the loop. .. opcode:: IMPORT_NAME (namei) - Imports the module ``co_names[namei]``. TOS and TOS1 are popped and provide - the *fromlist* and *level* arguments of :func:`__import__`. The module - object is pushed onto the stack. The current namespace is not affected: for - a proper import statement, a subsequent :opcode:`STORE_FAST` instruction + Imports the module ``co_names[namei]``. ``STACK[-1]`` and ``STACK[-2]`` are + popped and provide the *fromlist* and *level* arguments of :func:`__import__`. + The module object is pushed onto the stack. The current namespace is not affected: for a proper import statement, a subsequent :opcode:`STORE_FAST` instruction modifies the namespace. .. opcode:: IMPORT_FROM (namei) - Loads the attribute ``co_names[namei]`` from the module found in TOS. The - resulting object is pushed onto the stack, to be subsequently stored by a + Loads the attribute ``co_names[namei]`` from the module found in ``STACK[-1]``. + The resulting object is pushed onto the stack, to be subsequently stored by a :opcode:`STORE_FAST` instruction. @@ -1039,7 +1279,8 @@ iterations of the loop. .. opcode:: POP_JUMP_IF_TRUE (delta) - If TOS is true, increments the bytecode counter by *delta*. TOS is popped. + If ``STACK[-1]`` is true, increments the bytecode counter by *delta*. + ``STACK[-1]`` is popped. .. versionchanged:: 3.11 The oparg is now a relative delta rather than an absolute target. @@ -1049,9 +1290,13 @@ iterations of the loop. .. versionchanged:: 3.12 This is no longer a pseudo-instruction. + .. versionchanged:: 3.13 + This instruction now requires an exact :class:`bool` operand. + .. opcode:: POP_JUMP_IF_FALSE (delta) - If TOS is false, increments the bytecode counter by *delta*. TOS is popped. + If ``STACK[-1]`` is false, increments the bytecode counter by *delta*. + ``STACK[-1]`` is popped. .. versionchanged:: 3.11 The oparg is now a relative delta rather than an absolute target. @@ -1061,9 +1306,16 @@ iterations of the loop. .. versionchanged:: 3.12 This is no longer a pseudo-instruction. + .. versionchanged:: 3.13 + This instruction now requires an exact :class:`bool` operand. + .. opcode:: POP_JUMP_IF_NOT_NONE (delta) - If TOS is not ``None``, increments the bytecode counter by *delta*. TOS is popped. + If ``STACK[-1]`` is not ``None``, increments the bytecode counter by *delta*. + ``STACK[-1]`` is popped. + + This opcode is a pseudo-instruction, replaced in final bytecode by + the directed versions (forward/backward). .. versionadded:: 3.11 @@ -1073,41 +1325,23 @@ iterations of the loop. .. opcode:: POP_JUMP_IF_NONE (delta) - If TOS is ``None``, increments the bytecode counter by *delta*. TOS is popped. + If ``STACK[-1]`` is ``None``, increments the bytecode counter by *delta*. + ``STACK[-1]`` is popped. + + This opcode is a pseudo-instruction, replaced in final bytecode by + the directed versions (forward/backward). .. versionadded:: 3.11 .. versionchanged:: 3.12 This is no longer a pseudo-instruction. - -.. opcode:: JUMP_IF_TRUE_OR_POP (delta) - - If TOS is true, increments the bytecode counter by *delta* and leaves TOS on the - stack. Otherwise (TOS is false), TOS is popped. - - .. versionadded:: 3.1 - - .. versionchanged:: 3.11 - The oparg is now a relative delta rather than an absolute target. - -.. opcode:: JUMP_IF_FALSE_OR_POP (delta) - - If TOS is false, increments the bytecode counter by *delta* and leaves TOS on the - stack. Otherwise (TOS is true), TOS is popped. - - .. versionadded:: 3.1 - - .. versionchanged:: 3.11 - The oparg is now a relative delta rather than an absolute target. - - .. opcode:: FOR_ITER (delta) - TOS is an :term:`iterator`. Call its :meth:`~iterator.__next__` method. If - this yields a new value, push it on the stack (leaving the iterator below - it). If the iterator indicates it is exhausted then the byte - code counter is incremented by *delta*. + ``STACK[-1]`` is an :term:`iterator`. Call its :meth:`~iterator.__next__` method. + If this yields a new value, push it on the stack (leaving the iterator below + it). If the iterator indicates it is exhausted then the byte code counter is + incremented by *delta*. .. versionchanged:: 3.12 Up until 3.11 the iterator was popped when it was exhausted. @@ -1136,9 +1370,17 @@ iterations of the loop. .. versionadded:: 3.12 +.. opcode:: LOAD_FAST_AND_CLEAR (var_num) + + Pushes a reference to the local ``co_varnames[var_num]`` onto the stack (or + pushes ``NULL`` onto the stack if the local variable has not been + initialized) and sets ``co_varnames[var_num]`` to ``NULL``. + + .. versionadded:: 3.12 + .. opcode:: STORE_FAST (var_num) - Stores TOS into the local ``co_varnames[var_num]``. + Stores ``STACK.pop()`` into the local ``co_varnames[var_num]``. .. opcode:: DELETE_FAST (var_num) @@ -1148,24 +1390,12 @@ iterations of the loop. .. opcode:: MAKE_CELL (i) - Creates a new cell in slot ``i``. If that slot is empty then + Creates a new cell in slot ``i``. If that slot is nonempty then that value is stored into the new cell. .. versionadded:: 3.11 -.. opcode:: LOAD_CLOSURE (i) - - Pushes a reference to the cell contained in slot ``i`` of the "fast locals" - storage. The name of the variable is ``co_fastlocalnames[i]``. - - Note that ``LOAD_CLOSURE`` is effectively an alias for ``LOAD_FAST``. - It exists to keep bytecode a little more readable. - - .. versionchanged:: 3.11 - ``i`` is no longer offset by the length of ``co_varnames``. - - .. opcode:: LOAD_DEREF (i) Loads the cell contained in slot ``i`` of the "fast locals" storage. @@ -1175,21 +1405,22 @@ iterations of the loop. ``i`` is no longer offset by the length of ``co_varnames``. -.. opcode:: LOAD_CLASSDEREF (i) +.. opcode:: LOAD_FROM_DICT_OR_DEREF (i) - Much like :opcode:`LOAD_DEREF` but first checks the locals dictionary before - consulting the cell. This is used for loading free variables in class - bodies. + Pops a mapping off the stack and looks up the name associated with + slot ``i`` of the "fast locals" storage in this mapping. + If the name is not found there, loads it from the cell contained in + slot ``i``, similar to :opcode:`LOAD_DEREF`. This is used for loading + free variables in class bodies (which previously used + :opcode:`!LOAD_CLASSDEREF`) and in + :ref:`annotation scopes ` within class bodies. - .. versionadded:: 3.4 - - .. versionchanged:: 3.11 - ``i`` is no longer offset by the length of ``co_varnames``. + .. versionadded:: 3.12 .. opcode:: STORE_DEREF (i) - Stores TOS into the cell contained in slot ``i`` of the "fast locals" + Stores ``STACK.pop()`` into the cell contained in slot ``i`` of the "fast locals" storage. .. versionchanged:: 3.11 @@ -1222,32 +1453,21 @@ iterations of the loop. depending on the value of *argc*: * 0: ``raise`` (re-raise previous exception) - * 1: ``raise TOS`` (raise exception instance or type at ``TOS``) - * 2: ``raise TOS1 from TOS`` (raise exception instance or type at ``TOS1`` - with ``__cause__`` set to ``TOS``) + * 1: ``raise STACK[-1]`` (raise exception instance or type at ``STACK[-1]``) + * 2: ``raise STACK[-2] from STACK[-1]`` (raise exception instance or type at + ``STACK[-2]`` with ``__cause__`` set to ``STACK[-1]``) .. opcode:: CALL (argc) - Calls a callable object with the number of arguments specified by ``argc``, - including the named arguments specified by the preceding - :opcode:`KW_NAMES`, if any. - On the stack are (in ascending order), either: - - * NULL - * The callable - * The positional arguments - * The named arguments - - or: + Calls a callable object with the number of arguments specified by ``argc``. + On the stack are (in ascending order): * The callable - * ``self`` + * ``self`` or ``NULL`` * The remaining positional arguments - * The named arguments - ``argc`` is the total of the positional and named arguments, excluding - ``self`` when a ``NULL`` is not present. + ``argc`` is the total of the positional arguments, excluding ``self``. ``CALL`` pops all arguments and the callable object off the stack, calls the callable object with those arguments, and pushes the return value @@ -1255,6 +1475,33 @@ iterations of the loop. .. versionadded:: 3.11 + .. versionchanged:: 3.13 + The callable now always appears at the same position on the stack. + + .. versionchanged:: 3.13 + Calls with keyword arguments are now handled by :opcode:`CALL_KW`. + + +.. opcode:: CALL_KW (argc) + + Calls a callable object with the number of arguments specified by ``argc``, + including one or more named arguments. On the stack are (in ascending order): + + * The callable + * ``self`` or ``NULL`` + * The remaining positional arguments + * The named arguments + * A :class:`tuple` of keyword argument names + + ``argc`` is the total of the positional and named arguments, excluding ``self``. + The length of the tuple of keyword argument names is the number of named arguments. + + ``CALL_KW`` pops all arguments, the keyword names, and the callable object + off the stack, calls the callable object with those arguments, and pushes the + return value returned by the callable object. + + .. versionadded:: 3.13 + .. opcode:: CALL_FUNCTION_EX (flags) @@ -1273,45 +1520,61 @@ iterations of the loop. .. opcode:: PUSH_NULL - Pushes a ``NULL`` to the stack. - Used in the call sequence to match the ``NULL`` pushed by - :opcode:`LOAD_METHOD` for non-method calls. + Pushes a ``NULL`` to the stack. + Used in the call sequence to match the ``NULL`` pushed by + :opcode:`LOAD_METHOD` for non-method calls. .. versionadded:: 3.11 -.. opcode:: KW_NAMES (i) +.. opcode:: MAKE_FUNCTION - Prefixes :opcode:`CALL`. - Stores a reference to ``co_consts[consti]`` into an internal variable - for use by :opcode:`CALL`. ``co_consts[consti]`` must be a tuple of strings. + Pushes a new function object on the stack built from the code object at ``STACK[1]``. - .. versionadded:: 3.11 + .. versionchanged:: 3.10 + Flag value ``0x04`` is a tuple of strings instead of dictionary + + .. versionchanged:: 3.11 + Qualified name at ``STACK[-1]`` was removed. + .. versionchanged:: 3.13 + Extra function attributes on the stack, signaled by oparg flags, were + removed. They now use :opcode:`SET_FUNCTION_ATTRIBUTE`. -.. opcode:: MAKE_FUNCTION (flags) - Pushes a new function object on the stack. From bottom to top, the consumed - stack must consist of values if the argument carries a specified flag value +.. opcode:: SET_FUNCTION_ATTRIBUTE (flag) + + Sets an attribute on a function object. Expects the function at ``STACK[-1]`` + and the attribute value to set at ``STACK[-2]``; consumes both and leaves the + function at ``STACK[-1]``. The flag determines which attribute to set: * ``0x01`` a tuple of default values for positional-only and positional-or-keyword parameters in positional order * ``0x02`` a dictionary of keyword-only parameters' default values * ``0x04`` a tuple of strings containing parameters' annotations * ``0x08`` a tuple containing cells for free variables, making a closure - * the code associated with the function (at TOS1) - * the :term:`qualified name` of the function (at TOS) - .. versionchanged:: 3.10 - Flag value ``0x04`` is a tuple of strings instead of dictionary + .. versionadded:: 3.13 + .. opcode:: BUILD_SLICE (argc) - .. index:: builtin: slice + .. index:: pair: built-in function; slice + + Pushes a slice object on the stack. *argc* must be 2 or 3. If it is 2, implements:: - Pushes a slice object on the stack. *argc* must be 2 or 3. If it is 2, - ``slice(TOS1, TOS)`` is pushed; if it is 3, ``slice(TOS2, TOS1, TOS)`` is - pushed. See the :func:`slice` built-in function for more information. + end = STACK.pop() + start = STACK.pop() + STACK.append(slice(start, stop)) + + if it is 3, implements:: + + step = STACK.pop() + end = STACK.pop() + start = STACK.pop() + STACK.append(slice(start, end, step)) + + See the :func:`slice` built-in function for more information. .. opcode:: EXTENDED_ARG (ext) @@ -1322,37 +1585,59 @@ iterations of the loop. an argument from two-byte to four-byte. -.. opcode:: FORMAT_VALUE (flags) +.. opcode:: CONVERT_VALUE (oparg) - Used for implementing formatted literal strings (f-strings). Pops - an optional *fmt_spec* from the stack, then a required *value*. - *flags* is interpreted as follows: + Convert value to a string, depending on ``oparg``:: - * ``(flags & 0x03) == 0x00``: *value* is formatted as-is. - * ``(flags & 0x03) == 0x01``: call :func:`str` on *value* before - formatting it. - * ``(flags & 0x03) == 0x02``: call :func:`repr` on *value* before - formatting it. - * ``(flags & 0x03) == 0x03``: call :func:`ascii` on *value* before - formatting it. - * ``(flags & 0x04) == 0x04``: pop *fmt_spec* from the stack and use - it, else use an empty *fmt_spec*. + value = STACK.pop() + result = func(value) + STACK.push(result) - Formatting is performed using :c:func:`PyObject_Format`. The - result is pushed on the stack. + * ``oparg == 1``: call :func:`str` on *value* + * ``oparg == 2``: call :func:`repr` on *value* + * ``oparg == 3``: call :func:`ascii` on *value* - .. versionadded:: 3.6 + Used for implementing formatted literal strings (f-strings). + + .. versionadded:: 3.13 + + +.. opcode:: FORMAT_SIMPLE + + Formats the value on top of stack:: + + value = STACK.pop() + result = value.__format__("") + STACK.push(result) + + Used for implementing formatted literal strings (f-strings). + + .. versionadded:: 3.13 + +.. opcode:: FORMAT_SPEC + + Formats the given value with the given format spec:: + + spec = STACK.pop() + value = STACK.pop() + result = value.__format__(spec) + STACK.push(result) + + Used for implementing formatted literal strings (f-strings). + + .. versionadded:: 3.13 .. opcode:: MATCH_CLASS (count) - TOS is a tuple of keyword attribute names, TOS1 is the class being matched - against, and TOS2 is the match subject. *count* is the number of positional - sub-patterns. + ``STACK[-1]`` is a tuple of keyword attribute names, ``STACK[-2]`` is the class + being matched against, and ``STACK[-3]`` is the match subject. *count* is the + number of positional sub-patterns. - Pop TOS, TOS1, and TOS2. If TOS2 is an instance of TOS1 and has the - positional and keyword attributes required by *count* and TOS, push a tuple - of extracted attributes. Otherwise, push ``None``. + Pop ``STACK[-1]``, ``STACK[-2]``, and ``STACK[-3]``. If ``STACK[-3]`` is an + instance of ``STACK[-2]`` and has the positional and keyword attributes + required by *count* and ``STACK[-1]``, push a tuple of extracted attributes. + Otherwise, push ``None``. .. versionadded:: 3.10 @@ -1361,46 +1646,47 @@ iterations of the loop. success (``True``) or failure (``False``). -.. opcode:: RESUME (where) +.. opcode:: RESUME (context) - A no-op. Performs internal tracing, debugging and optimization checks. + A no-op. Performs internal tracing, debugging and optimization checks. - The ``where`` operand marks where the ``RESUME`` occurs: + The ``context`` oparand consists of two parts. The lowest two bits + indicate where the ``RESUME`` occurs: - * ``0`` The start of a function - * ``1`` After a ``yield`` expression - * ``2`` After a ``yield from`` expression - * ``3`` After an ``await`` expression + * ``0`` The start of a function, which is neither a generator, coroutine + nor an async generator + * ``1`` After a ``yield`` expression + * ``2`` After a ``yield from`` expression + * ``3`` After an ``await`` expression + + The next bit is ``1`` if the RESUME is at except-depth ``1``, and ``0`` + otherwise. .. versionadded:: 3.11 + .. versionchanged:: 3.13 + The oparg value changed to include information about except-depth + .. opcode:: RETURN_GENERATOR - Create a generator, coroutine, or async generator from the current frame. - Clear the current frame and return the newly created generator. + Create a generator, coroutine, or async generator from the current frame. + Used as first opcode of in code object for the above mentioned callables. + Clear the current frame and return the newly created generator. - .. versionadded:: 3.11 + .. versionadded:: 3.11 .. opcode:: SEND (delta) - Equivalent to ``TOS = TOS1.send(TOS)``. Used in ``yield from`` and ``await`` - statements. - - If the call raises :exc:`StopIteration`, pop both items, push the - exception's ``value`` attribute, and increment the bytecode counter by - *delta*. + Equivalent to ``STACK[-1] = STACK[-2].send(STACK[-1])``. Used in ``yield from`` + and ``await`` statements. - .. versionadded:: 3.11 + If the call raises :exc:`StopIteration`, pop the top value from the stack, + push the exception's ``value`` attribute, and increment the bytecode counter + by *delta*. - -.. opcode:: ASYNC_GEN_WRAP - - Wraps the value on top of the stack in an ``async_generator_wrapped_value``. - Used to yield in async generators. - - .. versionadded:: 3.11 + .. versionadded:: 3.11 .. opcode:: HAVE_ARGUMENT @@ -1409,8 +1695,8 @@ iterations of the loop. opcodes in the range [0,255] which don't use their argument and those that do (``< HAVE_ARGUMENT`` and ``>= HAVE_ARGUMENT``, respectively). - If your application uses pseudo instructions, use the :data:`hasarg` - collection instead. + If your application uses pseudo instructions or specialized instructions, + use the :data:`hasarg` collection instead. .. versionchanged:: 3.6 Now every instruction has an argument, but opcodes ``< HAVE_ARGUMENT`` @@ -1421,10 +1707,97 @@ iterations of the loop. it is not true that comparison with ``HAVE_ARGUMENT`` indicates whether they use their arg. + .. deprecated:: 3.13 + Use :data:`hasarg` instead. + +.. opcode:: CALL_INTRINSIC_1 + + Calls an intrinsic function with one argument. Passes ``STACK[-1]`` as the + argument and sets ``STACK[-1]`` to the result. Used to implement + functionality that is not performance critical. + + The operand determines which intrinsic function is called: + + +-----------------------------------+-----------------------------------+ + | Operand | Description | + +===================================+===================================+ + | ``INTRINSIC_1_INVALID`` | Not valid | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_PRINT`` | Prints the argument to standard | + | | out. Used in the REPL. | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_IMPORT_STAR`` | Performs ``import *`` for the | + | | named module. | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_STOPITERATION_ERROR`` | Extracts the return value from a | + | | ``StopIteration`` exception. | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_ASYNC_GEN_WRAP`` | Wraps an aync generator value | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_UNARY_POSITIVE`` | Performs the unary ``+`` | + | | operation | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_LIST_TO_TUPLE`` | Converts a list to a tuple | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_TYPEVAR`` | Creates a :class:`typing.TypeVar` | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_PARAMSPEC`` | Creates a | + | | :class:`typing.ParamSpec` | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_TYPEVARTUPLE`` | Creates a | + | | :class:`typing.TypeVarTuple` | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_SUBSCRIPT_GENERIC`` | Returns :class:`typing.Generic` | + | | subscripted with the argument | + +-----------------------------------+-----------------------------------+ + | ``INTRINSIC_TYPEALIAS`` | Creates a | + | | :class:`typing.TypeAliasType`; | + | | used in the :keyword:`type` | + | | statement. The argument is a tuple| + | | of the type alias's name, | + | | type parameters, and value. | + +-----------------------------------+-----------------------------------+ + + .. versionadded:: 3.12 + +.. opcode:: CALL_INTRINSIC_2 + + Calls an intrinsic function with two arguments. Used to implement functionality + that is not performance critical:: + + arg2 = STACK.pop() + arg1 = STACK.pop() + result = intrinsic2(arg1, arg2) + STACK.push(result) + + The operand determines which intrinsic function is called: + + +----------------------------------------+-----------------------------------+ + | Operand | Description | + +========================================+===================================+ + | ``INTRINSIC_2_INVALID`` | Not valid | + +----------------------------------------+-----------------------------------+ + | ``INTRINSIC_PREP_RERAISE_STAR`` | Calculates the | + | | :exc:`ExceptionGroup` to raise | + | | from a ``try-except*``. | + +----------------------------------------+-----------------------------------+ + | ``INTRINSIC_TYPEVAR_WITH_BOUND`` | Creates a :class:`typing.TypeVar` | + | | with a bound. | + +----------------------------------------+-----------------------------------+ + | ``INTRINSIC_TYPEVAR_WITH_CONSTRAINTS`` | Creates a | + | | :class:`typing.TypeVar` with | + | | constraints. | + +----------------------------------------+-----------------------------------+ + | ``INTRINSIC_SET_FUNCTION_TYPE_PARAMS`` | Sets the ``__type_params__`` | + | | attribute of a function. | + +----------------------------------------+-----------------------------------+ + + .. versionadded:: 3.12 + **Pseudo-instructions** -These opcodes do not appear in python bytecode, they are used by the compiler +These opcodes do not appear in Python bytecode. They are used by the compiler but are replaced by real opcodes or removed before bytecode is generated. .. opcode:: SETUP_FINALLY (target) @@ -1436,7 +1809,7 @@ but are replaced by real opcodes or removed before bytecode is generated. .. opcode:: SETUP_CLEANUP (target) - Like ``SETUP_FINALLY``, but in case of exception also pushes the last + Like ``SETUP_FINALLY``, but in case of an exception also pushes the last instruction (``lasti``) to the stack so that ``RERAISE`` can restore it. If an exception occurs, the value stack level and the last instruction on the frame are restored to their current state, and control is transferred @@ -1445,7 +1818,7 @@ but are replaced by real opcodes or removed before bytecode is generated. .. opcode:: SETUP_WITH (target) - Like ``SETUP_CLEANUP``, but in case of exception one more item is popped + Like ``SETUP_CLEANUP``, but in case of an exception one more item is popped from the stack before control is transferred to the exception handler at ``target``. @@ -1465,6 +1838,17 @@ but are replaced by real opcodes or removed before bytecode is generated. Undirected relative jump instructions which are replaced by their directed (forward/backward) counterparts by the assembler. +.. opcode:: LOAD_CLOSURE (i) + + Pushes a reference to the cell contained in slot ``i`` of the "fast locals" + storage. + + Note that ``LOAD_CLOSURE`` is replaced with ``LOAD_FAST`` in the assembler. + + .. versionchanged:: 3.13 + This opcode is now a pseudo-instruction. + + .. opcode:: LOAD_METHOD Optimized unbound method lookup. Emitted as a ``LOAD_ATTR`` opcode @@ -1479,9 +1863,10 @@ Opcode collections These collections are provided for automatic introspection of bytecode instructions: - .. versionchanged:: 3.12 - The collections now contain pseudo instructions as well. These are - opcodes with values ``>= MIN_PSEUDO_OPCODE``. +.. versionchanged:: 3.12 + The collections now contain pseudo instructions and instrumented + instructions as well. These are opcodes with values ``>= MIN_PSEUDO_OPCODE`` + and ``>= MIN_INSTRUMENTED_OPCODE``. .. data:: opname @@ -1502,7 +1887,7 @@ instructions: Sequence of bytecodes that use their argument. - .. versionadded:: 3.12 + .. versionadded:: 3.12 .. data:: hasconst @@ -1512,10 +1897,10 @@ instructions: .. data:: hasfree - Sequence of bytecodes that access a free variable (note that 'free' in this + Sequence of bytecodes that access a free variable. 'free' in this context refers to names in the current scope that are referenced by inner scopes or names in outer scopes that are referenced from this scope. It does - *not* include references to global or builtin scopes). + *not* include references to global or builtin scopes. .. data:: hasname @@ -1523,15 +1908,12 @@ instructions: Sequence of bytecodes that access an attribute by name. -.. data:: hasjrel - - Sequence of bytecodes that have a relative jump target. +.. data:: hasjump + Sequence of bytecodes that have a jump target. All jumps + are relative. -.. data:: hasjabs - - Sequence of bytecodes that have an absolute jump target. - + .. versionadded:: 3.13 .. data:: haslocal @@ -1546,4 +1928,21 @@ instructions: Sequence of bytecodes that set an exception handler. - .. versionadded:: 3.12 + .. versionadded:: 3.12 + + +.. data:: hasjrel + + Sequence of bytecodes that have a relative jump target. + + .. deprecated:: 3.13 + All jumps are now relative. Use :data:`hasjump`. + + +.. data:: hasjabs + + Sequence of bytecodes that have an absolute jump target. + + .. deprecated:: 3.13 + All jumps are now relative. This list is empty. + diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst index 75c6ee289a91e95..fa1b850c5313466 100644 --- a/Doc/library/doctest.rst +++ b/Doc/library/doctest.rst @@ -143,13 +143,13 @@ Simple Usage: Checking Examples in Docstrings --------------------------------------------- The simplest way to start using doctest (but not necessarily the way you'll -continue to do it) is to end each module :mod:`M` with:: +continue to do it) is to end each module :mod:`!M` with:: if __name__ == "__main__": import doctest doctest.testmod() -:mod:`doctest` then examines docstrings in module :mod:`M`. +:mod:`!doctest` then examines docstrings in module :mod:`!M`. Running the module as a script causes the examples in the docstrings to get executed and verified:: @@ -277,13 +277,34 @@ Which Docstrings Are Examined? The module docstring, and all function, class and method docstrings are searched. Objects imported into the module are not searched. -In addition, if ``M.__test__`` exists and "is true", it must be a dict, and each +In addition, there are cases when you want tests to be part of a module but not part +of the help text, which requires that the tests not be included in the docstring. +Doctest looks for a module-level variable called ``__test__`` and uses it to locate other +tests. If ``M.__test__`` exists and is truthy, it must be a dict, and each entry maps a (string) name to a function object, class object, or string. Function and class object docstrings found from ``M.__test__`` are searched, and strings are treated as if they were docstrings. In output, a key ``K`` in -``M.__test__`` appears with name :: +``M.__test__`` appears with name ``M.__test__.K``. - .__test__.K +For example, place this block of code at the top of :file:`example.py`: + +.. code-block:: python + + __test__ = { + 'numbers': """ + >>> factorial(6) + 720 + + >>> [factorial(n) for n in range(6)] + [1, 1, 2, 6, 24, 120] + """ + } + +The value of ``example.__test__["numbers"]`` will be treated as a +docstring and all the tests inside it will be run. It is +important to note that the value can be mapped to a function, +class object, or module; if so, :mod:`!doctest` +searches them recursively for docstrings, which are then scanned for tests. Any classes found are recursively searched similarly, to test docstrings in their contained methods and nested classes. @@ -351,6 +372,7 @@ The fine print: >>> def f(x): ... r'''Backslashes in a raw docstring: m\n''' + ... >>> print(f.__doc__) Backslashes in a raw docstring: m\n @@ -360,6 +382,7 @@ The fine print: >>> def f(x): ... '''Backslashes in a raw docstring: m\\n''' + ... >>> print(f.__doc__) Backslashes in a raw docstring: m\n @@ -380,10 +403,10 @@ What's the Execution Context? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default, each time :mod:`doctest` finds a docstring to test, it uses a -*shallow copy* of :mod:`M`'s globals, so that running tests doesn't change the -module's real globals, and so that one test in :mod:`M` can't leave behind +*shallow copy* of :mod:`!M`'s globals, so that running tests doesn't change the +module's real globals, and so that one test in :mod:`!M` can't leave behind crumbs that accidentally allow another test to work. This means examples can -freely use any names defined at top-level in :mod:`M`, and names defined earlier +freely use any names defined at top-level in :mod:`!M`, and names defined earlier in the docstring being run. Examples cannot see names defined in other docstrings. @@ -407,10 +430,10 @@ Simple example:: >>> [1, 2, 3].remove(42) Traceback (most recent call last): File "", line 1, in - ValueError: list.remove(x): x not in list + ValueError: 42 is not in list -That doctest succeeds if :exc:`ValueError` is raised, with the ``list.remove(x): -x not in list`` detail as shown. +That doctest succeeds if :exc:`ValueError` is raised, with the ``42 is not in list`` +detail as shown. The expected output for an exception must start with a traceback header, which may be either of the following two lines, indented the same as the first line of @@ -696,10 +719,10 @@ special Python comments following an example's source code: .. productionlist:: doctest directive: "#" "doctest:" `directive_options` - directive_options: `directive_option` ("," `directive_option`)\* + directive_options: `directive_option` ("," `directive_option`)* directive_option: `on_or_off` `directive_option_name` - on_or_off: "+" \| "-" - directive_option_name: "DONT_ACCEPT_BLANKLINE" \| "NORMALIZE_WHITESPACE" \| ... + on_or_off: "+" | "-" + directive_option_name: "DONT_ACCEPT_BLANKLINE" | "NORMALIZE_WHITESPACE" | ... Whitespace is not allowed between the ``+`` or ``-`` and the directive option name. The directive option name can be any of the option flag names explained @@ -935,7 +958,8 @@ and :ref:`doctest-simple-testfile`. Optional argument *exclude_empty* defaults to false. If true, objects for which no doctests are found are excluded from consideration. The default is a backward - compatibility hack, so that code still using :meth:`doctest.master.summarize` in + compatibility hack, so that code still using + :meth:`doctest.master.summarize ` in conjunction with :func:`testmod` continues to get output for objects with no tests. The *exclude_empty* argument to the newer :class:`DocTestFinder` constructor defaults to true. @@ -974,7 +998,7 @@ As your collection of doctest'ed modules grows, you'll want a way to run all their doctests systematically. :mod:`doctest` provides two functions that can be used to create :mod:`unittest` test suites from modules and text files containing doctests. To integrate with :mod:`unittest` test discovery, include -a :func:`load_tests` function in your test module:: +a :ref:`load_tests ` function in your test module:: import unittest import doctest @@ -1055,7 +1079,7 @@ from text files and modules with doctests: from a text file using :func:`DocFileSuite`. -.. function:: DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, setUp=None, tearDown=None, checker=None) +.. function:: DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, setUp=None, tearDown=None, optionflags=0, checker=None) Convert doctest tests for a module to a :class:`unittest.TestSuite`. @@ -1088,19 +1112,24 @@ from text files and modules with doctests: :func:`DocTestSuite` returns an empty :class:`unittest.TestSuite` if *module* contains no docstrings instead of raising :exc:`ValueError`. +.. exception:: failureException + + When doctests which have been converted to unit tests by :func:`DocFileSuite` + or :func:`DocTestSuite` fail, this exception is raised showing the name of + the file containing the test and a (sometimes approximate) line number. Under the covers, :func:`DocTestSuite` creates a :class:`unittest.TestSuite` out -of :class:`doctest.DocTestCase` instances, and :class:`DocTestCase` is a -subclass of :class:`unittest.TestCase`. :class:`DocTestCase` isn't documented +of :class:`!doctest.DocTestCase` instances, and :class:`!DocTestCase` is a +subclass of :class:`unittest.TestCase`. :class:`!DocTestCase` isn't documented here (it's an internal detail), but studying its code can answer questions about the exact details of :mod:`unittest` integration. Similarly, :func:`DocFileSuite` creates a :class:`unittest.TestSuite` out of -:class:`doctest.DocFileCase` instances, and :class:`DocFileCase` is a subclass -of :class:`DocTestCase`. +:class:`!doctest.DocFileCase` instances, and :class:`!DocFileCase` is a subclass +of :class:`!DocTestCase`. So both ways of creating a :class:`unittest.TestSuite` run instances of -:class:`DocTestCase`. This is important for a subtle reason: when you run +:class:`!DocTestCase`. This is important for a subtle reason: when you run :mod:`doctest` functions yourself, you can control the :mod:`doctest` options in use directly, by passing option flags to :mod:`doctest` functions. However, if you're writing a :mod:`unittest` framework, :mod:`unittest` ultimately controls @@ -1121,14 +1150,14 @@ reporting flags specific to :mod:`unittest` support, via this function: section :ref:`doctest-options`. Only "reporting flags" can be used. This is a module-global setting, and affects all future doctests run by module - :mod:`unittest`: the :meth:`runTest` method of :class:`DocTestCase` looks at - the option flags specified for the test case when the :class:`DocTestCase` + :mod:`unittest`: the :meth:`!runTest` method of :class:`!DocTestCase` looks at + the option flags specified for the test case when the :class:`!DocTestCase` instance was constructed. If no reporting flags were specified (which is the - typical and expected case), :mod:`doctest`'s :mod:`unittest` reporting flags are + typical and expected case), :mod:`!doctest`'s :mod:`unittest` reporting flags are :ref:`bitwise ORed ` into the option flags, and the option flags so augmented are passed to the :class:`DocTestRunner` instance created to run the doctest. If any reporting flags were specified when the - :class:`DocTestCase` instance was constructed, :mod:`doctest`'s + :class:`!DocTestCase` instance was constructed, :mod:`!doctest`'s :mod:`unittest` reporting flags are ignored. The value of the :mod:`unittest` reporting flags in effect before the function @@ -1298,7 +1327,8 @@ Example Objects A dictionary mapping from option flags to ``True`` or ``False``, which is used to override default options for this example. Any option flags not contained in this dictionary are left at their default value (as specified by the - :class:`DocTestRunner`'s :attr:`optionflags`). By default, no options are set. + :class:`DocTestRunner`'s :ref:`optionflags `). + By default, no options are set. .. _doctest-doctestfinder: @@ -1407,6 +1437,27 @@ DocTestParser objects identifying this string, and is only used for error messages. +TestResults objects +^^^^^^^^^^^^^^^^^^^ + + +.. class:: TestResults(failed, attempted) + + .. attribute:: failed + + Number of failed tests. + + .. attribute:: attempted + + Number of attempted tests. + + .. attribute:: skipped + + Number of skipped tests. + + .. versionadded:: 3.13 + + .. _doctest-doctestrunner: DocTestRunner objects @@ -1425,7 +1476,7 @@ DocTestRunner objects passing a subclass of :class:`OutputChecker` to the constructor. The test runner's display output can be controlled in two ways. First, an output - function can be passed to :meth:`TestRunner.run`; this function will be called + function can be passed to :meth:`run`; this function will be called with strings that should be displayed. It defaults to ``sys.stdout.write``. If capturing the output is not sufficient, then the display output can be also customized by subclassing DocTestRunner, and overriding the methods @@ -1446,6 +1497,10 @@ DocTestRunner objects runner compares expected output to actual output, and how it displays failures. For more information, see section :ref:`doctest-options`. + The test runner accumulates statistics. The aggregated number of attempted, + failed and skipped examples is also available via the :attr:`tries`, + :attr:`failures` and :attr:`skips` attributes. The :meth:`run` and + :meth:`summarize` methods return a :class:`TestResults` instance. :class:`DocTestParser` defines the following methods: @@ -1498,7 +1553,8 @@ DocTestRunner objects .. method:: run(test, compileflags=None, out=None, clear_globs=True) Run the examples in *test* (a :class:`DocTest` object), and display the - results using the writer function *out*. + results using the writer function *out*. Return a :class:`TestResults` + instance. The examples are run in the namespace ``test.globs``. If *clear_globs* is true (the default), then this namespace will be cleared after the test runs, @@ -1511,18 +1567,35 @@ DocTestRunner objects The output of each example is checked using the :class:`DocTestRunner`'s output checker, and the results are formatted by the - :meth:`DocTestRunner.report_\*` methods. + :meth:`!DocTestRunner.report_\*` methods. .. method:: summarize(verbose=None) Print a summary of all the test cases that have been run by this DocTestRunner, - and return a :term:`named tuple` ``TestResults(failed, attempted)``. + and return a :class:`TestResults` instance. The optional *verbose* argument controls how detailed the summary is. If the verbosity is not specified, then the :class:`DocTestRunner`'s verbosity is used. + :class:`DocTestParser` has the following attributes: + + .. attribute:: tries + + Number of attempted examples. + + .. attribute:: failures + + Number of failed examples. + + .. attribute:: skips + + Number of skipped examples. + + .. versionadded:: 3.13 + + .. _doctest-outputchecker: OutputChecker objects @@ -1669,12 +1742,12 @@ code under the debugger: module) of the object with the doctests of interest. The result is a string, containing the object's docstring converted to a Python script, as described for :func:`script_from_examples` above. For example, if module :file:`a.py` - contains a top-level function :func:`f`, then :: + contains a top-level function :func:`!f`, then :: import a, doctest print(doctest.testsource(a, "a.f")) - prints a script version of function :func:`f`'s docstring, with doctests + prints a script version of function :func:`!f`'s docstring, with doctests converted to code, and the rest placed in comments. diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst index adbe6c1c7d29b80..aa0134412f3a60d 100644 --- a/Doc/library/email.charset.rst +++ b/Doc/library/email.charset.rst @@ -150,7 +150,7 @@ Import this class from the :mod:`email.charset` module. .. method:: __str__() Returns *input_charset* as a string coerced to lower - case. :meth:`__repr__` is an alias for :meth:`__str__`. + case. :meth:`!__repr__` is an alias for :meth:`!__str__`. .. method:: __eq__(other) diff --git a/Doc/library/email.compat32-message.rst b/Doc/library/email.compat32-message.rst index 5bef155a4af3103..c4c322a82e1f44d 100644 --- a/Doc/library/email.compat32-message.rst +++ b/Doc/library/email.compat32-message.rst @@ -367,7 +367,7 @@ Here are the methods of the :class:`Message` class: .. method:: get(name, failobj=None) Return the value of the named header field. This is identical to - :meth:`__getitem__` except that optional *failobj* is returned if the + :meth:`~object.__getitem__` except that optional *failobj* is returned if the named header is missing (defaults to ``None``). Here are some additional useful methods: diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst index 918fc55677e7239..5b49339650f0e92 100644 --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -32,9 +32,9 @@ To find the handler, look for the following keys in the registry, stopping with the first one found: - * the string representing the full MIME type (``maintype/subtype``) - * the string representing the ``maintype`` - * the empty string + * the string representing the full MIME type (``maintype/subtype``) + * the string representing the ``maintype`` + * the empty string If none of these keys produce a handler, raise a :exc:`KeyError` for the full MIME type. @@ -55,11 +55,11 @@ look for the following keys in the registry, stopping with the first one found: - * the type itself (``typ``) - * the type's fully qualified name (``typ.__module__ + '.' + - typ.__qualname__``). - * the type's qualname (``typ.__qualname__``) - * the type's name (``typ.__name__``). + * the type itself (``typ``) + * the type's fully qualified name (``typ.__module__ + '.' + + typ.__qualname__``). + * the type's qualname (``typ.__qualname__``) + * the type's name (``typ.__name__``). If none of the above match, repeat all of the checks above for each of the types in the :term:`MRO` (``typ.__mro__``). Finally, if no other key @@ -132,15 +132,15 @@ Currently the email package provides only one concrete content manager, Add a :mailheader:`Content-Type` header with a ``maintype/subtype`` value. - * For ``str``, set the MIME ``maintype`` to ``text``, and set the - subtype to *subtype* if it is specified, or ``plain`` if it is not. - * For ``bytes``, use the specified *maintype* and *subtype*, or - raise a :exc:`TypeError` if they are not specified. - * For :class:`~email.message.EmailMessage` objects, set the maintype - to ``message``, and set the subtype to *subtype* if it is - specified or ``rfc822`` if it is not. If *subtype* is - ``partial``, raise an error (``bytes`` objects must be used to - construct ``message/partial`` parts). + * For ``str``, set the MIME ``maintype`` to ``text``, and set the + subtype to *subtype* if it is specified, or ``plain`` if it is not. + * For ``bytes``, use the specified *maintype* and *subtype*, or + raise a :exc:`TypeError` if they are not specified. + * For :class:`~email.message.EmailMessage` objects, set the maintype + to ``message``, and set the subtype to *subtype* if it is + specified or ``rfc822`` if it is not. If *subtype* is + ``partial``, raise an error (``bytes`` objects must be used to + construct ``message/partial`` parts). If *charset* is provided (which is valid only for ``str``), encode the string to bytes using the specified character set. The default is @@ -155,14 +155,14 @@ Currently the email package provides only one concrete content manager, ``7bit`` for an input that contains non-ASCII values), raise a :exc:`ValueError`. - * For ``str`` objects, if *cte* is not set use heuristics to - determine the most compact encoding. - * For :class:`~email.message.EmailMessage`, per :rfc:`2046`, raise - an error if a *cte* of ``quoted-printable`` or ``base64`` is - requested for *subtype* ``rfc822``, and for any *cte* other than - ``7bit`` for *subtype* ``external-body``. For - ``message/rfc822``, use ``8bit`` if *cte* is not specified. For - all other values of *subtype*, use ``7bit``. + * For ``str`` objects, if *cte* is not set use heuristics to + determine the most compact encoding. + * For :class:`~email.message.EmailMessage`, per :rfc:`2046`, raise + an error if a *cte* of ``quoted-printable`` or ``base64`` is + requested for *subtype* ``rfc822``, and for any *cte* other than + ``7bit`` for *subtype* ``external-body``. For + ``message/rfc822``, use ``8bit`` if *cte* is not specified. For + all other values of *subtype*, use ``7bit``. .. note:: A *cte* of ``binary`` does not actually work correctly yet. The ``EmailMessage`` object as modified by ``set_content`` is diff --git a/Doc/library/email.encoders.rst b/Doc/library/email.encoders.rst index 5d68b104f3a45c6..3bd377e33f6c15b 100644 --- a/Doc/library/email.encoders.rst +++ b/Doc/library/email.encoders.rst @@ -25,7 +25,7 @@ is especially true for :mimetype:`image/\*` and :mimetype:`text/\*` type message containing binary data. The :mod:`email` package provides some convenient encoders in its -:mod:`encoders` module. These encoders are actually used by the +:mod:`~email.encoders` module. These encoders are actually used by the :class:`~email.mime.audio.MIMEAudio` and :class:`~email.mime.image.MIMEImage` class constructors to provide default encodings. All encoder functions take exactly one argument, the message object to encode. They usually extract the diff --git a/Doc/library/email.errors.rst b/Doc/library/email.errors.rst index 194a98696f437d9..56aea6598b8615c 100644 --- a/Doc/library/email.errors.rst +++ b/Doc/library/email.errors.rst @@ -58,6 +58,15 @@ The following exception classes are defined in the :mod:`email.errors` module: :class:`~email.mime.nonmultipart.MIMENonMultipart` (e.g. :class:`~email.mime.image.MIMEImage`). +.. exception:: MessageDefect() + + This is the base class for all defects found when parsing email messages. + It is derived from :exc:`ValueError`. + +.. exception:: HeaderDefect() + + This is the base class for all defects found when parsing email headers. + It is derived from :exc:`MessageDefect`. Here is the list of the defects that the :class:`~email.parser.FeedParser` can find while parsing messages. Note that the defects are added to the message diff --git a/Doc/library/email.examples.rst b/Doc/library/email.examples.rst index fc964622809d0e8..492a8354d8bf852 100644 --- a/Doc/library/email.examples.rst +++ b/Doc/library/email.examples.rst @@ -55,11 +55,11 @@ Up to the prompt, the output from the above is: To: Penelope Pussycat , Fabrette Pussycat From: Pepé Le Pew - Subject: Ayons asperges pour le déjeuner + Subject: Pourquoi pas des asperges pour ce midi ? Salut! - Cela ressemble à un excellent recipie[1] déjeuner. + Cette recette [1] sera sûrement un très bon repas. .. rubric:: Footnotes diff --git a/Doc/library/email.generator.rst b/Doc/library/email.generator.rst index 34ad7b7f200af3f..afa0038ea2d6c4f 100644 --- a/Doc/library/email.generator.rst +++ b/Doc/library/email.generator.rst @@ -10,8 +10,8 @@ One of the most common tasks is to generate the flat (serialized) version of the email message represented by a message object structure. You will need to -do this if you want to send your message via :meth:`smtplib.SMTP.sendmail` or -the :mod:`nntplib` module, or print the message on the console. Taking a +do this if you want to send your message via :meth:`smtplib.SMTP.sendmail`, +or print the message on the console. Taking a message object structure and producing a serialized representation is the job of the generator classes. @@ -274,9 +274,9 @@ in with information about the part. .. rubric:: Footnotes .. [#] This statement assumes that you use the appropriate setting for - ``unixfrom``, and that there are no :mod:`policy` settings calling for + ``unixfrom``, and that there are no :mod:`email.policy` settings calling for automatic adjustments (for example, - :attr:`~email.policy.Policy.refold_source` must be ``none``, which is + :attr:`~email.policy.EmailPolicy.refold_source` must be ``none``, which is *not* the default). It is also not 100% true, since if the message does not conform to the RFC standards occasionally information about the exact original text is lost during parsing error recovery. It is a goal diff --git a/Doc/library/email.message.rst b/Doc/library/email.message.rst index 5e0509f4181199b..f58d93da6ed6871 100644 --- a/Doc/library/email.message.rst +++ b/Doc/library/email.message.rst @@ -67,7 +67,7 @@ message objects. with the base :class:`~email.message.Message` class *maxheaderlen* is accepted, but defaults to ``None``, which means that by default the line length is controlled by the - :attr:`~email.policy.EmailPolicy.max_line_length` of the policy. The + :attr:`~email.policy.Policy.max_line_length` of the policy. The *policy* argument may be used to override the default policy obtained from the message instance. This can be used to control some of the formatting produced by the method, since the specified *policy* will be @@ -213,7 +213,7 @@ message objects. del msg['subject'] msg['subject'] = 'Python roolz!' - If the :mod:`policy` defines certain headers to be unique (as the standard + If the :mod:`policy ` defines certain headers to be unique (as the standard policies do), this method may raise a :exc:`ValueError` when an attempt is made to assign a value to such a header when one already exists. This behavior is intentional for consistency's sake, but do not depend on it @@ -247,7 +247,7 @@ message objects. .. method:: get(name, failobj=None) Return the value of the named header field. This is identical to - :meth:`__getitem__` except that optional *failobj* is returned if the + :meth:`~object.__getitem__` except that optional *failobj* is returned if the named header is missing (*failobj* defaults to ``None``). @@ -378,7 +378,7 @@ message objects. deprecated. Note that existing parameter values of headers may be accessed through - the :attr:`~email.headerregistry.BaseHeader.params` attribute of the + the :attr:`~email.headerregistry.ParameterizedMIMEHeader.params` attribute of the header value (for example, ``msg['Content-Type'].params['charset']``). .. versionchanged:: 3.4 ``replace`` keyword was added. @@ -691,7 +691,7 @@ message objects. .. method:: clear_content() - Remove the payload and all of the :exc:`Content-` headers, leaving + Remove the payload and all of the :mailheader:`!Content-` headers, leaving all other headers intact and in their original order. diff --git a/Doc/library/email.mime.rst b/Doc/library/email.mime.rst index 3fe5fe88a094621..d7c0d203d191f86 100644 --- a/Doc/library/email.mime.rst +++ b/Doc/library/email.mime.rst @@ -114,9 +114,9 @@ Here are the classes: A subclass of :class:`~email.mime.nonmultipart.MIMENonMultipart`, the :class:`MIMEApplication` class is used to represent MIME message objects of - major type :mimetype:`application`. *_data* is a string containing the raw - byte data. Optional *_subtype* specifies the MIME subtype and defaults to - :mimetype:`octet-stream`. + major type :mimetype:`application`. *_data* contains the bytes for the raw + application data. Optional *_subtype* specifies the MIME subtype and defaults + to :mimetype:`octet-stream`. Optional *_encoder* is a callable (i.e. function) which will perform the actual encoding of the data for transport. This callable takes one argument, which is @@ -145,7 +145,7 @@ Here are the classes: A subclass of :class:`~email.mime.nonmultipart.MIMENonMultipart`, the :class:`MIMEAudio` class is used to create MIME message objects of major type - :mimetype:`audio`. *_audiodata* is a string containing the raw audio data. If + :mimetype:`audio`. *_audiodata* contains the bytes for the raw audio data. If this data can be decoded as au, wav, aiff, or aifc, then the subtype will be automatically included in the :mailheader:`Content-Type` header. Otherwise you can explicitly specify the audio subtype via the *_subtype* @@ -179,7 +179,7 @@ Here are the classes: A subclass of :class:`~email.mime.nonmultipart.MIMENonMultipart`, the :class:`MIMEImage` class is used to create MIME message objects of major type - :mimetype:`image`. *_imagedata* is a string containing the raw image data. If + :mimetype:`image`. *_imagedata* contains the bytes for the raw image data. If this data type can be detected (jpeg, png, gif, tiff, rgb, pbm, pgm, ppm, rast, xbm, bmp, webp, and exr attempted), then the subtype will be automatically included in the :mailheader:`Content-Type` header. Otherwise diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst index d9a61616bbbdfbd..dda0466a6afa7dc 100644 --- a/Doc/library/email.parser.rst +++ b/Doc/library/email.parser.rst @@ -39,9 +39,9 @@ returns the root object when you close the parser. Note that the parser can be extended in limited ways, and of course you can implement your own parser completely from scratch. All of the logic that connects the :mod:`email` package's bundled parser and the -:class:`~email.message.EmailMessage` class is embodied in the :mod:`policy` +:class:`~email.message.EmailMessage` class is embodied in the :class:`~email.policy.Policy` class, so a custom parser can create message object trees any way it finds -necessary by implementing custom versions of the appropriate :mod:`policy` +necessary by implementing custom versions of the appropriate :class:`!Policy` methods. diff --git a/Doc/library/email.policy.rst b/Doc/library/email.policy.rst index bf53b9520fc723e..fd47dd0dc5df36e 100644 --- a/Doc/library/email.policy.rst +++ b/Doc/library/email.policy.rst @@ -97,6 +97,7 @@ file on disk and pass it to the system ``sendmail`` program on a Unix system: >>> from subprocess import Popen, PIPE >>> with open('mymsg.txt', 'rb') as f: ... msg = message_from_binary_file(f, policy=policy.default) + ... >>> p = Popen(['sendmail', msg['To'].addresses[0]], stdin=PIPE) >>> g = BytesGenerator(p.stdin, policy=msg.policy.clone(linesep='\r\n')) >>> g.flatten(msg) @@ -556,17 +557,17 @@ more closely to the RFCs relevant to their domains. With all of these :class:`EmailPolicies <.EmailPolicy>`, the effective API of the email package is changed from the Python 3.2 API in the following ways: - * Setting a header on a :class:`~email.message.Message` results in that - header being parsed and a header object created. +* Setting a header on a :class:`~email.message.Message` results in that + header being parsed and a header object created. - * Fetching a header value from a :class:`~email.message.Message` results - in that header being parsed and a header object created and - returned. +* Fetching a header value from a :class:`~email.message.Message` results + in that header being parsed and a header object created and + returned. - * Any header object, or any header that is refolded due to the - policy settings, is folded using an algorithm that fully implements the - RFC folding algorithms, including knowing where encoded words are required - and allowed. +* Any header object, or any header that is refolded due to the + policy settings, is folded using an algorithm that fully implements the + RFC folding algorithms, including knowing where encoded words are required + and allowed. From the application view, this means that any header obtained through the :class:`~email.message.EmailMessage` is a header object with extra diff --git a/Doc/library/email.rst b/Doc/library/email.rst index 816fae991d24cb7..3a6039004fcaae9 100644 --- a/Doc/library/email.rst +++ b/Doc/library/email.rst @@ -15,7 +15,7 @@ The :mod:`email` package is a library for managing email messages. It is specifically *not* designed to do any sending of email messages to SMTP (:rfc:`2821`), NNTP, or other servers; those are functions of modules such as -:mod:`smtplib` and :mod:`nntplib`. The :mod:`email` package attempts to be as +:mod:`smtplib`. The :mod:`email` package attempts to be as RFC-compliant as possible, supporting :rfc:`5322` and :rfc:`6532`, as well as such MIME-related RFCs as :rfc:`2045`, :rfc:`2046`, :rfc:`2047`, :rfc:`2183`, and :rfc:`2231`. @@ -141,9 +141,6 @@ Legacy API: Module :mod:`imaplib` IMAP (Internet Message Access Protocol) client - Module :mod:`nntplib` - NNTP (Net News Transport Protocol) client - Module :mod:`mailbox` Tools for creating, reading, and managing collections of messages on disk using a variety standard formats. diff --git a/Doc/library/email.utils.rst b/Doc/library/email.utils.rst index 0e266b6a45782ac..345b64001c1ace1 100644 --- a/Doc/library/email.utils.rst +++ b/Doc/library/email.utils.rst @@ -13,19 +13,17 @@ module: .. function:: localtime(dt=None) - Return local time as an aware datetime object. If called without - arguments, return current time. Otherwise *dt* argument should be a - :class:`~datetime.datetime` instance, and it is converted to the local time - zone according to the system time zone database. If *dt* is naive (that - is, ``dt.tzinfo`` is ``None``), it is assumed to be in local time. In this - case, a positive or zero value for *isdst* causes ``localtime`` to presume - initially that summer time (for example, Daylight Saving Time) is or is not - (respectively) in effect for the specified time. A negative value for - *isdst* causes the ``localtime`` to attempt to divine whether summer time - is in effect for the specified time. - - .. versionadded:: 3.3 + Return local time as an aware datetime object. If called without + arguments, return current time. Otherwise *dt* argument should be a + :class:`~datetime.datetime` instance, and it is converted to the local time + zone according to the system time zone database. If *dt* is naive (that + is, ``dt.tzinfo`` is ``None``), it is assumed to be in local time. The + *isdst* parameter is ignored. + .. versionadded:: 3.3 + + .. deprecated-removed:: 3.12 3.14 + The *isdst* parameter. .. function:: make_msgid(idstring=None, domain=None) diff --git a/Doc/library/ensurepip.rst b/Doc/library/ensurepip.rst index 34f45e20bae9d9c..de3b93f5e610730 100644 --- a/Doc/library/ensurepip.rst +++ b/Doc/library/ensurepip.rst @@ -7,6 +7,8 @@ .. versionadded:: 3.4 +**Source code:** :source:`Lib/ensurepip` + -------------- The :mod:`ensurepip` package provides support for bootstrapping the ``pip`` @@ -59,7 +61,7 @@ By default, ``pip`` is installed into the current virtual environment active virtual environment). The installation location can be controlled through two additional command line options: -* ``--root

``: Installs ``pip`` relative to the given root directory +* :samp:`--root {dir}`: Installs ``pip`` relative to the given root directory rather than the root of the currently active virtual environment (if any) or the default root for the current Python installation. * ``--user``: Installs ``pip`` into the user site packages directory rather diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst index dcced28b8508e84..2d5ae361c3f1e3a 100644 --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -27,7 +27,8 @@ An enumeration: * is a set of symbolic names (members) bound to unique values -* can be iterated over to return its members in definition order +* can be iterated over to return its canonical (i.e. non-alias) members in + definition order * uses *call* syntax to return members by value * uses *index* syntax to return members by name @@ -51,11 +52,11 @@ are not normal Python classes. See .. note:: Nomenclature - - The class :class:`Color` is an *enumeration* (or *enum*) - - The attributes :attr:`Color.RED`, :attr:`Color.GREEN`, etc., are + - The class :class:`!Color` is an *enumeration* (or *enum*) + - The attributes :attr:`!Color.RED`, :attr:`!Color.GREEN`, etc., are *enumeration members* (or *members*) and are functionally constants. - The enum members have *names* and *values* (the name of - :attr:`Color.RED` is ``RED``, the value of :attr:`Color.BLUE` is + :attr:`!Color.RED` is ``RED``, the value of :attr:`!Color.BLUE` is ``3``, etc.) --------------- @@ -118,7 +119,8 @@ Module Contents :func:`~enum.property` Allows :class:`Enum` members to have attributes without conflicting with - member names. + member names. The ``value`` and ``name`` attributes are implemented this + way. :func:`unique` @@ -140,9 +142,8 @@ Module Contents :func:`global_enum` Modify the :class:`str() ` and :func:`repr` of an enum - to show its members as belonging to the module instead of its class. - Should only be used if the enum members will be exported to the - module global namespace. + to show its members as belonging to the module instead of its class, + and export the enum members to the global namespace. :func:`show_flag_values` @@ -164,11 +165,32 @@ Data Types to subclass *EnumType* -- see :ref:`Subclassing EnumType ` for details. - *EnumType* is responsible for setting the correct :meth:`__repr__`, - :meth:`__str__`, :meth:`__format__`, and :meth:`__reduce__` methods on the + ``EnumType`` is responsible for setting the correct :meth:`!__repr__`, + :meth:`!__str__`, :meth:`!__format__`, and :meth:`!__reduce__` methods on the final *enum*, as well as creating the enum members, properly handling duplicates, providing iteration over the enum class, etc. + .. method:: EnumType.__call__(cls, value, names=None, \*, module=None, qualname=None, type=None, start=1, boundary=None) + + This method is called in two different ways: + + * to look up an existing member: + + :cls: The enum class being called. + :value: The value to lookup. + + * to use the ``cls`` enum to create a new enum (only if the existing enum + does not have any members): + + :cls: The enum class being called. + :value: The name of the new Enum to create. + :names: The names/values of the members for the new Enum. + :module: The name of the module the new Enum is created in. + :qualname: The actual location in the module where this Enum can be found. + :type: A mix-in type for the new Enum. + :start: The first integer value for the Enum (used by :class:`auto`). + :boundary: How to handle out-of-range values from bit operations (:class:`Flag` only). + .. method:: EnumType.__contains__(cls, member) Returns ``True`` if member belongs to the ``cls``:: @@ -176,11 +198,12 @@ Data Types >>> some_var = Color.RED >>> some_var in Color True + >>> Color.RED.value in Color + True - .. note:: + .. versionchanged:: 3.12 - In Python 3.12 it will be possible to check for member values and not - just members; until then, a ``TypeError`` will be raised if a + Before Python 3.12, a ``TypeError`` is raised if a non-Enum-member is used in a containment check. .. method:: EnumType.__dir__(cls) @@ -193,7 +216,7 @@ Data Types .. method:: EnumType.__getitem__(cls, name) - Returns the Enum member in *cls* matching *name*, or raises an :exc:`KeyError`:: + Returns the Enum member in *cls* matching *name*, or raises a :exc:`KeyError`:: >>> Color['BLUE'] @@ -219,6 +242,10 @@ Data Types >>> list(reversed(Color)) [, , ] + .. versionadded:: 3.11 + + Before 3.11 ``enum`` used ``EnumMeta`` type, which is kept as an alias. + .. class:: Enum @@ -240,10 +267,10 @@ Data Types .. note:: Enum member values - Member values can be anything: :class:`int`, :class:`str`, etc.. If + Member values can be anything: :class:`int`, :class:`str`, etc. If the exact value is unimportant you may use :class:`auto` instances and an - appropriate value will be chosen for you. Care must be taken if you mix - :class:`auto` with other values. + appropriate value will be chosen for you. See :class:`auto` for the + details. .. attribute:: Enum._ignore_ @@ -254,26 +281,6 @@ Data Types names will also be removed from the completed enumeration. See :ref:`TimePeriod ` for an example. - .. method:: Enum.__call__(cls, value, names=None, \*, module=None, qualname=None, type=None, start=1, boundary=None) - - This method is called in two different ways: - - * to look up an existing member: - - :cls: The enum class being called. - :value: The value to lookup. - - * to use the ``cls`` enum to create a new enum: - - :cls: The enum class being called. - :value: The name of the new Enum to create. - :names: The names/values of the members for the new Enum. - :module: The name of the module the new Enum is created in. - :qualname: The actual location in the module where this Enum can be found. - :type: A mix-in type for the new Enum. - :start: The first integer value for the Enum (used by :class:`auto`) - :boundary: How to handle out-of-range values from bit operations (:class:`Flag` only) - .. method:: Enum.__dir__(self) Returns ``['__class__', '__doc__', '__module__', 'name', 'value']`` and @@ -291,6 +298,7 @@ Data Types ... @classmethod ... def today(cls): ... print('today is %s' % cls(date.today().isoweekday()).name) + ... >>> dir(Weekday.SATURDAY) ['__class__', '__doc__', '__eq__', '__hash__', '__module__', 'name', 'today', 'value'] @@ -308,13 +316,14 @@ Data Types >>> class PowersOfThree(Enum): ... @staticmethod ... def _generate_next_value_(name, start, count, last_values): - ... return (count + 1) * 3 + ... return 3 ** (count + 1) ... FIRST = auto() ... SECOND = auto() + ... >>> PowersOfThree.SECOND.value - 6 + 9 - .. method:: Enum.__init_subclass__(cls, \**kwds) + .. method:: Enum.__init_subclass__(cls, **kwds) A *classmethod* that is used to further configure subsequent subclasses. By default, does nothing. @@ -335,6 +344,7 @@ Data Types ... if member.value == value: ... return member ... return None + ... >>> Build.DEBUG.value 'debug' >>> Build('deBUG') @@ -352,6 +362,7 @@ Data Types ... def __repr__(self): ... cls_name = self.__class__.__name__ ... return f'{cls_name}.{self.name}' + ... >>> OtherStyle.ALTERNATE, str(OtherStyle.ALTERNATE), f"{OtherStyle.ALTERNATE}" (OtherStyle.ALTERNATE, 'OtherStyle.ALTERNATE', 'OtherStyle.ALTERNATE') @@ -366,13 +377,14 @@ Data Types ... SOMETHING_ELSE = auto() ... def __str__(self): ... return f'{self.name}' + ... >>> OtherStyle.ALTERNATE, str(OtherStyle.ALTERNATE), f"{OtherStyle.ALTERNATE}" (, 'ALTERNATE', 'ALTERNATE') .. method:: Enum.__format__(self) Returns the string used for *format()* and *f-string* calls. By default, - returns :meth:`__str__` returns, but can be overridden:: + returns :meth:`__str__` return value, but can be overridden:: >>> class OtherStyle(Enum): ... ALTERNATE = auto() @@ -380,6 +392,7 @@ Data Types ... SOMETHING_ELSE = auto() ... def __format__(self, spec): ... return f'{self.name}' + ... >>> OtherStyle.ALTERNATE, str(OtherStyle.ALTERNATE), f"{OtherStyle.ALTERNATE}" (, 'OtherStyle.ALTERNATE', 'ALTERNATE') @@ -388,25 +401,28 @@ Data Types Using :class:`auto` with :class:`Enum` results in integers of increasing value, starting with ``1``. + .. versionchanged:: 3.12 Added :ref:`enum-dataclass-support` + .. class:: IntEnum - *IntEnum* is the same as *Enum*, but its members are also integers and can be + *IntEnum* is the same as :class:`Enum`, but its members are also integers and can be used anywhere that an integer can be used. If any integer operation is performed with an *IntEnum* member, the resulting value loses its enumeration status. >>> from enum import IntEnum - >>> class Numbers(IntEnum): + >>> class Number(IntEnum): ... ONE = 1 ... TWO = 2 ... THREE = 3 - >>> Numbers.THREE - - >>> Numbers.ONE + Numbers.TWO + ... + >>> Number.THREE + + >>> Number.ONE + Number.TWO 3 - >>> Numbers.THREE + 5 + >>> Number.THREE + 5 8 - >>> Numbers.THREE == 3 + >>> Number.THREE == 3 True .. note:: @@ -414,30 +430,34 @@ Data Types Using :class:`auto` with :class:`IntEnum` results in integers of increasing value, starting with ``1``. - .. versionchanged:: 3.11 :meth:`__str__` is now :func:`int.__str__` to + .. versionchanged:: 3.11 :meth:`~object.__str__` is now :meth:`!int.__str__` to better support the *replacement of existing constants* use-case. - :meth:`__format__` was already :func:`int.__format__` for that same reason. + :meth:`~object.__format__` was already :meth:`!int.__format__` for that same reason. .. class:: StrEnum - *StrEnum* is the same as *Enum*, but its members are also strings and can be used + ``StrEnum`` is the same as :class:`Enum`, but its members are also strings and can be used in most of the same places that a string can be used. The result of any string operation performed on or with a *StrEnum* member is not part of the enumeration. - .. note:: There are places in the stdlib that check for an exact :class:`str` - instead of a :class:`str` subclass (i.e. ``type(unknown) == str`` - instead of ``isinstance(unknown, str)``), and in those locations you - will need to use ``str(StrEnum.member)``. + .. note:: + + There are places in the stdlib that check for an exact :class:`str` + instead of a :class:`str` subclass (i.e. ``type(unknown) == str`` + instead of ``isinstance(unknown, str)``), and in those locations you + will need to use ``str(StrEnum.member)``. .. note:: Using :class:`auto` with :class:`StrEnum` results in the lower-cased member name as the value. - .. note:: :meth:`__str__` is :func:`str.__str__` to better support the - *replacement of existing constants* use-case. :meth:`__format__` is likewise - :func:`str.__format__` for that same reason. + .. note:: + + :meth:`~object.__str__` is :meth:`!str.__str__` to better support the + *replacement of existing constants* use-case. :meth:`~object.__format__` is likewise + :meth:`!str.__format__` for that same reason. .. versionadded:: 3.11 @@ -456,6 +476,7 @@ Data Types ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> purple = Color.RED | Color.BLUE >>> white = Color.RED | Color.GREEN | Color.BLUE >>> Color.GREEN in purple @@ -469,13 +490,17 @@ Data Types .. method:: __iter__(self): - Returns all contained members:: + Returns all contained non-alias members:: >>> list(Color.RED) [] >>> list(purple) [, ] + .. versionchanged:: 3.11 + + Aliases are no longer returned during iteration. + .. method:: __len__(self): Returns number of members in flag:: @@ -543,15 +568,15 @@ Data Types Using :class:`auto` with :class:`Flag` results in integers that are powers of two, starting with ``1``. - .. versionchanged:: 3.11 The *repr()* of zero-valued flags has changed. It + .. versionchanged:: 3.11 The *repr()* of zero-valued flags has changed. It is now:: - >>> Color(0) # doctest: +SKIP - + >>> Color(0) # doctest: +SKIP + .. class:: IntFlag - *IntFlag* is the same as *Flag*, but its members are also integers and can be + ``IntFlag`` is the same as :class:`Flag`, but its members are also integers and can be used anywhere that an integer can be used. >>> from enum import IntFlag, auto @@ -559,6 +584,7 @@ Data Types ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> Color.RED & 2 >>> Color.RED | 2 @@ -570,12 +596,12 @@ Data Types >>> Color.RED + 2 3 - If a *Flag* operation is performed with an *IntFlag* member and: + If a :class:`Flag` operation is performed with an *IntFlag* member and: - * the result is a valid *IntFlag*: an *IntFlag* is returned - * the result is not a valid *IntFlag*: the result depends on the *FlagBoundary* setting + * the result is a valid *IntFlag*: an *IntFlag* is returned + * the result is not a valid *IntFlag*: the result depends on the :class:`FlagBoundary` setting - The *repr()* of unnamed zero-valued flags has changed. It is now: + The :func:`repr()` of unnamed zero-valued flags has changed. It is now: >>> Color(0) @@ -585,19 +611,25 @@ Data Types Using :class:`auto` with :class:`IntFlag` results in integers that are powers of two, starting with ``1``. - .. versionchanged:: 3.11 :meth:`__str__` is now :func:`int.__str__` to - better support the *replacement of existing constants* use-case. - :meth:`__format__` was already :func:`int.__format__` for that same reason. + .. versionchanged:: 3.11 + + :meth:`~object.__str__` is now :meth:`!int.__str__` to better support the + *replacement of existing constants* use-case. :meth:`~object.__format__` was + already :meth:`!int.__format__` for that same reason. + + Inversion of an :class:`!IntFlag` now returns a positive value that is the + union of all flags not in the given flag, rather than a negative value. + This matches the existing :class:`Flag` behavior. .. class:: ReprEnum - :class:`!ReprEum` uses the :meth:`repr() ` of :class:`Enum`, + :class:`!ReprEnum` uses the :meth:`repr() ` of :class:`Enum`, but the :class:`str() ` of the mixed-in data type: - * :meth:`!int.__str__` for :class:`IntEnum` and :class:`IntFlag` - * :meth:`!str.__str__` for :class:`StrEnum` + * :meth:`!int.__str__` for :class:`IntEnum` and :class:`IntFlag` + * :meth:`!str.__str__` for :class:`StrEnum` - Inherit from :class:`!ReprEnum` to keep the :class:`str() / :func:`format` + Inherit from :class:`!ReprEnum` to keep the :class:`str() ` / :func:`format` of the mixed-in data type instead of using the :class:`Enum`-default :meth:`str() `. @@ -643,7 +675,7 @@ Data Types .. attribute:: NAMED_FLAGS Ensure that any flag groups/masks contain only named flags -- useful when - values are specified instead of being generated by :func:`auto` + values are specified instead of being generated by :func:`auto`:: >>> from enum import Flag, verify, NAMED_FLAGS >>> @verify(NAMED_FLAGS) @@ -665,19 +697,20 @@ Data Types .. class:: FlagBoundary - *FlagBoundary* controls how out-of-range values are handled in *Flag* and its + ``FlagBoundary`` controls how out-of-range values are handled in :class:`Flag` and its subclasses. .. attribute:: STRICT - Out-of-range values cause a :exc:`ValueError` to be raised. This is the + Out-of-range values cause a :exc:`ValueError` to be raised. This is the default for :class:`Flag`:: - >>> from enum import Flag, STRICT + >>> from enum import Flag, STRICT, auto >>> class StrictFlag(Flag, boundary=STRICT): ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> StrictFlag(2**2 + 2**4) Traceback (most recent call last): ... @@ -687,40 +720,42 @@ Data Types .. attribute:: CONFORM - Out-of-range values have invalid values removed, leaving a valid *Flag* + Out-of-range values have invalid values removed, leaving a valid :class:`Flag` value:: - >>> from enum import Flag, CONFORM + >>> from enum import Flag, CONFORM, auto >>> class ConformFlag(Flag, boundary=CONFORM): ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> ConformFlag(2**2 + 2**4) .. attribute:: EJECT - Out-of-range values lose their *Flag* membership and revert to :class:`int`. - This is the default for :class:`IntFlag`:: + Out-of-range values lose their :class:`Flag` membership and revert to :class:`int`. - >>> from enum import Flag, EJECT + >>> from enum import Flag, EJECT, auto >>> class EjectFlag(Flag, boundary=EJECT): ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> EjectFlag(2**2 + 2**4) 20 .. attribute:: KEEP - Out-of-range values are kept, and the *Flag* membership is kept. This is - used for some stdlib flags: + Out-of-range values are kept, and the :class:`Flag` membership is kept. + This is the default for :class:`IntFlag`:: - >>> from enum import Flag, KEEP + >>> from enum import Flag, KEEP, auto >>> class KeepFlag(Flag, boundary=KEEP): ... RED = auto() ... GREEN = auto() ... BLUE = auto() + ... >>> KeepFlag(2**2 + 2**4) @@ -731,11 +766,11 @@ Data Types Supported ``__dunder__`` names """""""""""""""""""""""""""""" -:attr:`__members__` is a read-only ordered mapping of ``member_name``:``member`` +:attr:`~EnumType.__members__` is a read-only ordered mapping of ``member_name``:``member`` items. It is only available on the class. -:meth:`__new__`, if specified, must create and return the enum members; it is -also a very good idea to set the member's :attr:`_value_` appropriately. Once +:meth:`~object.__new__`, if specified, must create and return the enum members; it is +also a very good idea to set the member's :attr:`!_value_` appropriately. Once all the members are created it is no longer used. @@ -755,13 +790,13 @@ Supported ``_sunder_`` names - ``_generate_next_value_`` -- used to get an appropriate value for an enum member; may be overridden - .. note:: + .. note:: - For standard :class:`Enum` classes the next value chosen is the last value seen - incremented by one. + For standard :class:`Enum` classes the next value chosen is the last value seen + incremented by one. - For :class:`Flag` classes the next value chosen will be the next highest - power-of-two, regardless of the last value seen. + For :class:`Flag` classes the next value chosen will be the next highest + power-of-two, regardless of the last value seen. .. versionadded:: 3.6 ``_missing_``, ``_order_``, ``_generate_next_value_`` .. versionadded:: 3.7 ``_ignore_`` @@ -774,16 +809,30 @@ Utilities and Decorators .. class:: auto *auto* can be used in place of a value. If used, the *Enum* machinery will - call an *Enum*'s :meth:`_generate_next_value_` to get an appropriate value. - For *Enum* and *IntEnum* that appropriate value will be the last value plus - one; for *Flag* and *IntFlag* it will be the first power-of-two greater - than the last value; for *StrEnum* it will be the lower-cased version of the - member's name. + call an :class:`Enum`'s :meth:`~Enum._generate_next_value_` to get an appropriate value. + For :class:`Enum` and :class:`IntEnum` that appropriate value will be the last value plus + one; for :class:`Flag` and :class:`IntFlag` it will be the first power-of-two greater + than the highest value; for :class:`StrEnum` it will be the lower-cased version of + the member's name. Care must be taken if mixing *auto()* with manually + specified values. + + *auto* instances are only resolved when at the top level of an assignment: + + * ``FIRST = auto()`` will work (auto() is replaced with ``1``); + * ``SECOND = auto(), -2`` will work (auto is replaced with ``2``, so ``2, -2`` is + used to create the ``SECOND`` enum member; + * ``THREE = [auto(), -3]`` will *not* work (``, -3`` is used to + create the ``THREE`` enum member) + + .. versionchanged:: 3.11.1 + + In prior versions, ``auto()`` had to be the only thing + on the assignment line to work properly. ``_generate_next_value_`` can be overridden to customize the values used by *auto*. - .. note:: in 3.13 the default ``"generate_next_value_`` will always return + .. note:: in 3.13 the default ``_generate_next_value_`` will always return the highest member value incremented by 1, and will fail if any member is an incompatible type. @@ -803,7 +852,7 @@ Utilities and Decorators .. decorator:: unique A :keyword:`class` decorator specifically for enumerations. It searches an - enumeration's :attr:`__members__`, gathering any aliases it finds; if any are + enumeration's :attr:`~EnumType.__members__`, gathering any aliases it finds; if any are found :exc:`ValueError` is raised with the details:: >>> from enum import Enum, unique @@ -861,23 +910,23 @@ Notes :class:`IntEnum`, :class:`StrEnum`, and :class:`IntFlag` - These three enum types are designed to be drop-in replacements for existing - integer- and string-based values; as such, they have extra limitations: + These three enum types are designed to be drop-in replacements for existing + integer- and string-based values; as such, they have extra limitations: - - ``__str__`` uses the value and not the name of the enum member + - ``__str__`` uses the value and not the name of the enum member - - ``__format__``, because it uses ``__str__``, will also use the value of - the enum member instead of its name + - ``__format__``, because it uses ``__str__``, will also use the value of + the enum member instead of its name - If you do not need/want those limitations, you can either create your own - base class by mixing in the ``int`` or ``str`` type yourself:: + If you do not need/want those limitations, you can either create your own + base class by mixing in the ``int`` or ``str`` type yourself:: - >>> from enum import Enum - >>> class MyIntEnum(int, Enum): - ... pass + >>> from enum import Enum + >>> class MyIntEnum(int, Enum): + ... pass or you can reassign the appropriate :meth:`str`, etc., in your enum:: - >>> from enum import IntEnum - >>> class MyIntEnum(IntEnum): - ... __str__ = IntEnum.__str__ + >>> from enum import Enum, IntEnum + >>> class MyIntEnum(IntEnum): + ... __str__ = Enum.__str__ diff --git a/Doc/library/errno.rst b/Doc/library/errno.rst index 5122c69697ef914..283e8b013265d95 100644 --- a/Doc/library/errno.rst +++ b/Doc/library/errno.rst @@ -511,6 +511,13 @@ defined by the module. The specific list of defined symbols is available as Operation not supported on transport endpoint +.. data:: ENOTSUP + + Operation not supported + + .. versionadded:: 3.2 + + .. data:: EPFNOSUPPORT Protocol family not supported @@ -666,3 +673,24 @@ defined by the module. The specific list of defined symbols is available as .. availability:: WASI, FreeBSD .. versionadded:: 3.11.1 + + +.. data:: ECANCELED + + Operation canceled + + .. versionadded:: 3.2 + + +.. data:: EOWNERDEAD + + Owner died + + .. versionadded:: 3.2 + + +.. data:: ENOTRECOVERABLE + + State not recoverable + + .. versionadded:: 3.2 diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst index fc856277d67b2ec..cd85df8723a76be 100644 --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -4,8 +4,8 @@ Built-in Exceptions =================== .. index:: - statement: try - statement: except + pair: statement; try + pair: statement; except In Python, all exceptions must be instances of a class that derives from :class:`BaseException`. In a :keyword:`try` statement with an :keyword:`except` @@ -14,7 +14,7 @@ classes derived from that class (but not exception classes from which *it* is derived). Two exception classes that are not related via subclassing are never equivalent, even if they have the same name. -.. index:: statement: raise +.. index:: pair: statement; raise The built-in exceptions listed below can be generated by the interpreter or built-in functions. Except where mentioned, they have an "associated value" @@ -123,7 +123,7 @@ The following exceptions are used mostly as base classes for other exceptions. try: ... except SomeException: - tb = sys.exc_info()[2] + tb = sys.exception().__traceback__ raise OtherException(...).with_traceback(tb) .. method:: add_note(note) @@ -175,7 +175,7 @@ The following exceptions are the exceptions that are usually raised. .. exception:: AssertionError - .. index:: statement: assert + .. index:: pair: statement; assert Raised when an :keyword:`assert` statement fails. @@ -220,10 +220,16 @@ The following exceptions are the exceptions that are usually raised. load a module. Also raised when the "from list" in ``from ... import`` has a name that cannot be found. - The :attr:`name` and :attr:`path` attributes can be set using keyword-only - arguments to the constructor. When set they represent the name of the module - that was attempted to be imported and the path to any file which triggered - the exception, respectively. + The optional *name* and *path* keyword-only arguments + set the corresponding attributes: + + .. attribute:: name + + The name of the module that was attempted to be imported. + + .. attribute:: path + + The path to any file which triggered the exception. .. versionchanged:: 3.3 Added the :attr:`name` and :attr:`path` attributes. @@ -318,7 +324,7 @@ The following exceptions are the exceptions that are usually raised. .. exception:: OSError([arg]) OSError(errno, strerror[, filename[, winerror[, filename2]]]) - .. index:: module: errno + .. index:: pair: module; errno This exception is raised when a system function returns a system-related error, including I/O failures such as "file not found" or "disk full" @@ -450,7 +456,7 @@ The following exceptions are the exceptions that are usually raised. .. exception:: StopAsyncIteration - Must be raised by :meth:`__anext__` method of an + Must be raised by :meth:`~object.__anext__` method of an :term:`asynchronous iterator` object to stop the iteration. .. versionadded:: 3.5 @@ -659,8 +665,8 @@ depending on the system error code. Raised when an operation would block on an object (e.g. socket) set for non-blocking operation. - Corresponds to :c:data:`errno` :py:data:`~errno.EAGAIN`, :py:data:`~errno.EALREADY`, - :py:data:`~errno.EWOULDBLOCK` and :py:data:`~errno.EINPROGRESS`. + Corresponds to :c:data:`errno` :py:const:`~errno.EAGAIN`, :py:const:`~errno.EALREADY`, + :py:const:`~errno.EWOULDBLOCK` and :py:const:`~errno.EINPROGRESS`. In addition to those of :exc:`OSError`, :exc:`BlockingIOError` can have one more attribute: @@ -674,7 +680,7 @@ depending on the system error code. .. exception:: ChildProcessError Raised when an operation on a child process failed. - Corresponds to :c:data:`errno` :py:data:`~errno.ECHILD`. + Corresponds to :c:data:`errno` :py:const:`~errno.ECHILD`. .. exception:: ConnectionError @@ -688,40 +694,40 @@ depending on the system error code. A subclass of :exc:`ConnectionError`, raised when trying to write on a pipe while the other end has been closed, or trying to write on a socket which has been shutdown for writing. - Corresponds to :c:data:`errno` :py:data:`~errno.EPIPE` and :py:data:`~errno.ESHUTDOWN`. + Corresponds to :c:data:`errno` :py:const:`~errno.EPIPE` and :py:const:`~errno.ESHUTDOWN`. .. exception:: ConnectionAbortedError A subclass of :exc:`ConnectionError`, raised when a connection attempt is aborted by the peer. - Corresponds to :c:data:`errno` :py:data:`~errno.ECONNABORTED`. + Corresponds to :c:data:`errno` :py:const:`~errno.ECONNABORTED`. .. exception:: ConnectionRefusedError A subclass of :exc:`ConnectionError`, raised when a connection attempt is refused by the peer. - Corresponds to :c:data:`errno` :py:data:`~errno.ECONNREFUSED`. + Corresponds to :c:data:`errno` :py:const:`~errno.ECONNREFUSED`. .. exception:: ConnectionResetError A subclass of :exc:`ConnectionError`, raised when a connection is reset by the peer. - Corresponds to :c:data:`errno` :py:data:`~errno.ECONNRESET`. + Corresponds to :c:data:`errno` :py:const:`~errno.ECONNRESET`. .. exception:: FileExistsError Raised when trying to create a file or directory which already exists. - Corresponds to :c:data:`errno` :py:data:`~errno.EEXIST`. + Corresponds to :c:data:`errno` :py:const:`~errno.EEXIST`. .. exception:: FileNotFoundError Raised when a file or directory is requested but doesn't exist. - Corresponds to :c:data:`errno` :py:data:`~errno.ENOENT`. + Corresponds to :c:data:`errno` :py:const:`~errno.ENOENT`. .. exception:: InterruptedError Raised when a system call is interrupted by an incoming signal. - Corresponds to :c:data:`errno` :py:data:`~errno.EINTR`. + Corresponds to :c:data:`errno` :py:const:`~errno.EINTR`. .. versionchanged:: 3.5 Python now retries system calls when a syscall is interrupted by a @@ -732,7 +738,7 @@ depending on the system error code. Raised when a file operation (such as :func:`os.remove`) is requested on a directory. - Corresponds to :c:data:`errno` :py:data:`~errno.EISDIR`. + Corresponds to :c:data:`errno` :py:const:`~errno.EISDIR`. .. exception:: NotADirectoryError @@ -740,28 +746,28 @@ depending on the system error code. something which is not a directory. On most POSIX platforms, it may also be raised if an operation attempts to open or traverse a non-directory file as if it were a directory. - Corresponds to :c:data:`errno` :py:data:`~errno.ENOTDIR`. + Corresponds to :c:data:`errno` :py:const:`~errno.ENOTDIR`. .. exception:: PermissionError Raised when trying to run an operation without the adequate access rights - for example filesystem permissions. - Corresponds to :c:data:`errno` :py:data:`~errno.EACCES`, - :py:data:`~errno.EPERM`, and :py:data:`~errno.ENOTCAPABLE`. + Corresponds to :c:data:`errno` :py:const:`~errno.EACCES`, + :py:const:`~errno.EPERM`, and :py:const:`~errno.ENOTCAPABLE`. .. versionchanged:: 3.11.1 - WASI's :py:data:`~errno.ENOTCAPABLE` is now mapped to + WASI's :py:const:`~errno.ENOTCAPABLE` is now mapped to :exc:`PermissionError`. .. exception:: ProcessLookupError Raised when a given process doesn't exist. - Corresponds to :c:data:`errno` :py:data:`~errno.ESRCH`. + Corresponds to :c:data:`errno` :py:const:`~errno.ESRCH`. .. exception:: TimeoutError Raised when a system function timed out at the system level. - Corresponds to :c:data:`errno` :py:data:`~errno.ETIMEDOUT`. + Corresponds to :c:data:`errno` :py:const:`~errno.ETIMEDOUT`. .. versionadded:: 3.3 All the above :exc:`OSError` subclasses were added. @@ -871,6 +877,8 @@ The following exceptions are used as warning categories; see the .. versionadded:: 3.2 +.. _lib-exception-groups: + Exception groups ---------------- @@ -912,10 +920,11 @@ their subgroups based on the types of the contained exceptions. Returns an exception group that contains only the exceptions from the current group that match *condition*, or ``None`` if the result is empty. - The condition can be either a function that accepts an exception and returns - true for those that should be in the subgroup, or it can be an exception type - or a tuple of exception types, which is used to check for a match using the - same check that is used in an ``except`` clause. + The condition can be an exception type or tuple of exception types, in which + case each exception is checked for a match using the same check that is used + in an ``except`` clause. The condition can also be a callable (other than + a type object) that accepts an exception as its single argument and returns + true for the exceptions that should be in the subgroup. The nesting structure of the current exception is preserved in the result, as are the values of its :attr:`message`, :attr:`__traceback__`, @@ -926,6 +935,9 @@ their subgroups based on the types of the contained exceptions. including the top-level and any nested exception groups. If the condition is true for such an exception group, it is included in the result in full. + .. versionadded:: 3.13 + ``condition`` can be any callable which is not a type object. + .. method:: split(condition) Like :meth:`subgroup`, but returns the pair ``(match, rest)`` where ``match`` @@ -934,21 +946,42 @@ their subgroups based on the types of the contained exceptions. .. method:: derive(excs) - Returns an exception group with the same :attr:`message`, - :attr:`__traceback__`, :attr:`__cause__`, :attr:`__context__` - and :attr:`__notes__` but which wraps the exceptions in ``excs``. + Returns an exception group with the same :attr:`message`, but which + wraps the exceptions in ``excs``. This method is used by :meth:`subgroup` and :meth:`split`. A subclass needs to override it in order to make :meth:`subgroup` and :meth:`split` return instances of the subclass rather - than :exc:`ExceptionGroup`. :: + than :exc:`ExceptionGroup`. + + :meth:`subgroup` and :meth:`split` copy the :attr:`__traceback__`, + :attr:`__cause__`, :attr:`__context__` and :attr:`__notes__` fields from + the original exception group to the one returned by :meth:`derive`, so + these fields do not need to be updated by :meth:`derive`. :: >>> class MyGroup(ExceptionGroup): - ... def derive(self, exc): - ... return MyGroup(self.message, exc) + ... def derive(self, excs): + ... return MyGroup(self.message, excs) + ... + >>> e = MyGroup("eg", [ValueError(1), TypeError(2)]) + >>> e.add_note("a note") + >>> e.__context__ = Exception("context") + >>> e.__cause__ = Exception("cause") + >>> try: + ... raise e + ... except Exception as e: + ... exc = e ... - >>> MyGroup("eg", [ValueError(1), TypeError(2)]).split(TypeError) - (MyGroup('eg', [TypeError(2)]), MyGroup('eg', [ValueError(1)])) + >>> match, rest = exc.split(ValueError) + >>> exc, exc.__context__, exc.__cause__, exc.__notes__ + (MyGroup('eg', [ValueError(1), TypeError(2)]), Exception('context'), Exception('cause'), ['a note']) + >>> match, match.__context__, match.__cause__, match.__notes__ + (MyGroup('eg', [ValueError(1)]), Exception('context'), Exception('cause'), ['a note']) + >>> rest, rest.__context__, rest.__cause__, rest.__notes__ + (MyGroup('eg', [TypeError(2)]), Exception('context'), Exception('cause'), ['a note']) + >>> exc.__traceback__ is match.__traceback__ is rest.__traceback__ + True + Note that :exc:`BaseExceptionGroup` defines :meth:`__new__`, so subclasses that need a different constructor signature need to @@ -965,6 +998,10 @@ their subgroups based on the types of the contained exceptions. def derive(self, excs): return Errors(excs, self.exit_code) + Like :exc:`ExceptionGroup`, any subclass of :exc:`BaseExceptionGroup` which + is also a subclass of :exc:`Exception` can only wrap instances of + :exc:`Exception`. + .. versionadded:: 3.11 diff --git a/Doc/library/faulthandler.rst b/Doc/library/faulthandler.rst index be0912376bd8ef3..f64dfeb5e081c7c 100644 --- a/Doc/library/faulthandler.rst +++ b/Doc/library/faulthandler.rst @@ -43,6 +43,13 @@ Python is deadlocked. The :ref:`Python Development Mode ` calls :func:`faulthandler.enable` at Python startup. +.. seealso:: + + Module :mod:`pdb` + Interactive source code debugger for Python programs. + + Module :mod:`traceback` + Standard interface to extract, format and print stack traces of Python programs. Dumping the traceback --------------------- @@ -52,6 +59,8 @@ Dumping the traceback Dump the tracebacks of all threads into *file*. If *all_threads* is ``False``, dump only the current thread. + .. seealso:: :func:`traceback.print_tb`, which can be used to print a traceback object. + .. versionchanged:: 3.5 Added support for passing file descriptor to this function. @@ -166,10 +175,10 @@ handler: .. code-block:: shell-session - $ python3 -c "import ctypes; ctypes.string_at(0)" + $ python -c "import ctypes; ctypes.string_at(0)" Segmentation fault - $ python3 -q -X faulthandler + $ python -q -X faulthandler >>> import ctypes >>> ctypes.string_at(0) Fatal Python error: Segmentation fault @@ -178,4 +187,3 @@ handler: File "/home/python/cpython/Lib/ctypes/__init__.py", line 486 in string_at File "", line 1 in Segmentation fault - diff --git a/Doc/library/fcntl.rst b/Doc/library/fcntl.rst index 997c7ea571fc03f..309ad652d4af34a 100644 --- a/Doc/library/fcntl.rst +++ b/Doc/library/fcntl.rst @@ -18,7 +18,7 @@ interface to the :c:func:`fcntl` and :c:func:`ioctl` Unix routines. For a complete description of these calls, see :manpage:`fcntl(2)` and :manpage:`ioctl(2)` Unix manual pages. -.. include:: ../includes/wasm-notavail.rst +.. availability:: Unix, not Emscripten, not WASI. All functions in this module take a file descriptor *fd* as their first argument. This can be an integer file descriptor, such as returned by @@ -172,9 +172,9 @@ The module defines the following functions: which the lock starts, relative to *whence*, and *whence* is as with :func:`io.IOBase.seek`, specifically: - * :const:`0` -- relative to the start of the file (:data:`os.SEEK_SET`) - * :const:`1` -- relative to the current buffer position (:data:`os.SEEK_CUR`) - * :const:`2` -- relative to the end of the file (:data:`os.SEEK_END`) + * ``0`` -- relative to the start of the file (:const:`os.SEEK_SET`) + * ``1`` -- relative to the current buffer position (:const:`os.SEEK_CUR`) + * ``2`` -- relative to the end of the file (:const:`os.SEEK_END`) The default for *start* is 0, which means to start at the beginning of the file. The default for *len* is 0 which means to lock to the end of the file. The @@ -201,7 +201,7 @@ using the :func:`flock` call may be better. .. seealso:: Module :mod:`os` - If the locking flags :data:`~os.O_SHLOCK` and :data:`~os.O_EXLOCK` are + If the locking flags :const:`~os.O_SHLOCK` and :const:`~os.O_EXLOCK` are present in the :mod:`os` module (on BSD only), the :func:`os.open` function provides an alternative to the :func:`lockf` and :func:`flock` functions. diff --git a/Doc/library/filecmp.rst b/Doc/library/filecmp.rst index 83e9e14ddcacd8f..dfe4b7c59fd578b 100644 --- a/Doc/library/filecmp.rst +++ b/Doc/library/filecmp.rst @@ -74,7 +74,7 @@ The :class:`dircmp` class Construct a new directory comparison object, to compare the directories *a* and *b*. *ignore* is a list of names to ignore, and defaults to - :attr:`filecmp.DEFAULT_IGNORES`. *hide* is a list of names to hide, and + :const:`filecmp.DEFAULT_IGNORES`. *hide* is a list of names to hide, and defaults to ``[os.curdir, os.pardir]``. The :class:`dircmp` class compares files by doing *shallow* comparisons @@ -100,7 +100,7 @@ The :class:`dircmp` class used to get various bits of information about the directory trees being compared. - Note that via :meth:`__getattr__` hooks, all attributes are computed lazily, + Note that via :meth:`~object.__getattr__` hooks, all attributes are computed lazily, so there is no speed penalty if only those attributes which are lightweight to compute are used. diff --git a/Doc/library/fileinput.rst b/Doc/library/fileinput.rst index 4bc868759f2025d..f93e9a58791eebd 100644 --- a/Doc/library/fileinput.rst +++ b/Doc/library/fileinput.rst @@ -177,7 +177,7 @@ available for subclassing as well: The keyword-only parameter *encoding* and *errors* are added. .. versionchanged:: 3.11 - The ``'rU'`` and ``'U'`` modes and the :meth:`__getitem__` method have + The ``'rU'`` and ``'U'`` modes and the :meth:`!__getitem__` method have been removed. diff --git a/Doc/library/fnmatch.rst b/Doc/library/fnmatch.rst index 9163da57c7b9994..aed8991d44772f5 100644 --- a/Doc/library/fnmatch.rst +++ b/Doc/library/fnmatch.rst @@ -8,7 +8,7 @@ .. index:: single: filenames; wildcard expansion -.. index:: module: re +.. index:: pair: module; re -------------- @@ -38,7 +38,7 @@ special characters used in shell-style wildcards are: For a literal match, wrap the meta-characters in brackets. For example, ``'[?]'`` matches the character ``'?'``. -.. index:: module: glob +.. index:: pair: module; glob Note that the filename separator (``'/'`` on Unix) is *not* special to this module. See module :mod:`glob` for pathname expansion (:mod:`glob` uses @@ -48,7 +48,7 @@ patterns. Also note that :func:`functools.lru_cache` with the *maxsize* of 32768 is used to cache the compiled regex patterns in the following functions: :func:`fnmatch`, -:func:`fnmatchcase`, :func:`filter`. +:func:`fnmatchcase`, :func:`.filter`. .. function:: fnmatch(filename, pattern) diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst index c46d88b2297aa16..509c63686f5a7fd 100644 --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -25,7 +25,7 @@ another rational number, or from a string. The first version requires that *numerator* and *denominator* are instances of :class:`numbers.Rational` and returns a new :class:`Fraction` instance - with value ``numerator/denominator``. If *denominator* is :const:`0`, it + with value ``numerator/denominator``. If *denominator* is ``0``, it raises a :exc:`ZeroDivisionError`. The second version requires that *other_fraction* is an instance of :class:`numbers.Rational` and returns a :class:`Fraction` instance with the same value. The next two versions accept @@ -77,7 +77,7 @@ another rational number, or from a string. The :class:`Fraction` class inherits from the abstract base class :class:`numbers.Rational`, and implements all of the methods and - operations from that class. :class:`Fraction` instances are hashable, + operations from that class. :class:`Fraction` instances are :term:`hashable`, and should be treated as immutable. In addition, :class:`Fraction` has the following properties and methods: @@ -101,6 +101,11 @@ another rational number, or from a string. .. versionchanged:: 3.12 Space is allowed around the slash for string inputs: ``Fraction('2 / 3')``. + .. versionchanged:: 3.12 + :class:`Fraction` instances now support float-style formatting, with + presentation types ``"e"``, ``"E"``, ``"f"``, ``"F"``, ``"g"``, ``"G"`` + and ``"%""``. + .. attribute:: numerator Numerator of the Fraction in lowest term. @@ -113,10 +118,17 @@ another rational number, or from a string. .. method:: as_integer_ratio() Return a tuple of two integers, whose ratio is equal - to the Fraction and with a positive denominator. + to the original Fraction. The ratio is in lowest terms + and has a positive denominator. .. versionadded:: 3.8 + .. method:: is_integer() + + Return ``True`` if the Fraction is an integer. + + .. versionadded:: 3.12 + .. classmethod:: from_float(flt) Alternative constructor which only accepts instances of @@ -187,6 +199,29 @@ another rational number, or from a string. ``ndigits`` is negative), again rounding half toward even. This method can also be accessed through the :func:`round` function. + .. method:: __format__(format_spec, /) + + Provides support for float-style formatting of :class:`Fraction` + instances via the :meth:`str.format` method, the :func:`format` built-in + function, or :ref:`Formatted string literals `. The + presentation types ``"e"``, ``"E"``, ``"f"``, ``"F"``, ``"g"``, ``"G"`` + and ``"%"`` are supported. For these presentation types, formatting for a + :class:`Fraction` object ``x`` follows the rules outlined for + the :class:`float` type in the :ref:`formatspec` section. + + Here are some examples:: + + >>> from fractions import Fraction + >>> format(Fraction(1, 7), '.40g') + '0.1428571428571428571428571428571428571429' + >>> format(Fraction('1234567.855'), '_.2f') + '1_234_567.86' + >>> f"{Fraction(355, 113):*>20.6e}" + '********3.141593e+00' + >>> old_price, new_price = 499, 672 + >>> "{:.2%} price increase".format(Fraction(new_price, old_price) - 1) + '34.67% price increase' + .. seealso:: diff --git a/Doc/library/ftplib.rst b/Doc/library/ftplib.rst index e5ba9eef4074b91..d1fe6414ea020cf 100644 --- a/Doc/library/ftplib.rst +++ b/Doc/library/ftplib.rst @@ -85,7 +85,8 @@ The module defines the following items: The *encoding* parameter was added, and the default was changed from Latin-1 to UTF-8 to follow :rfc:`2640`. -.. class:: FTP_TLS(host='', user='', passwd='', acct='', keyfile=None, certfile=None, context=None, timeout=None, source_address=None, *, encoding='utf-8') +.. class:: FTP_TLS(host='', user='', passwd='', acct='', *, context=None, + timeout=None, source_address=None, encoding='utf-8') A :class:`FTP` subclass which adds TLS support to FTP as described in :rfc:`4217`. @@ -96,10 +97,6 @@ The module defines the following items: options, certificates and private keys into a single (potentially long-lived) structure. Please read :ref:`ssl-security` for best practices. - *keyfile* and *certfile* are a legacy alternative to *context* -- they - can point to PEM-formatted private key and certificate chain files - (respectively) for the SSL connection. - .. versionadded:: 3.2 .. versionchanged:: 3.3 @@ -108,14 +105,7 @@ The module defines the following items: .. versionchanged:: 3.4 The class now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - - .. deprecated:: 3.6 - - *keyfile* and *certfile* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + :const:`ssl.HAS_SNI`). .. versionchanged:: 3.9 If the *timeout* parameter is set to be zero, it will raise a @@ -123,6 +113,9 @@ The module defines the following items: The *encoding* parameter was added, and the default was changed from Latin-1 to UTF-8 to follow :rfc:`2640`. + .. versionchanged:: 3.12 + The deprecated *keyfile* and *certfile* parameters have been removed. + Here's a sample session using the :class:`FTP_TLS` class:: >>> ftps = FTP_TLS('ftp.pureftpd.org') @@ -438,7 +431,7 @@ FTP_TLS Objects .. attribute:: FTP_TLS.ssl_version - The SSL version to use (defaults to :attr:`ssl.PROTOCOL_SSLv23`). + The SSL version to use (defaults to :data:`ssl.PROTOCOL_SSLv23`). .. method:: FTP_TLS.auth() @@ -448,7 +441,7 @@ FTP_TLS Objects .. versionchanged:: 3.4 The method now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). + :const:`ssl.HAS_SNI`). .. method:: FTP_TLS.ccc() diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst index 8108c98332e9645..b2dd32f925ef4df 100644 --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -14,8 +14,8 @@ are always available. They are listed here in alphabetical order. | | :func:`abs` | | :func:`enumerate` | | :func:`len` | | |func-range|_ | | | :func:`aiter` | | :func:`eval` | | |func-list|_ | | :func:`repr` | | | :func:`all` | | :func:`exec` | | :func:`locals` | | :func:`reversed` | -| | :func:`any` | | | | | | :func:`round` | -| | :func:`anext` | | **F** | | **M** | | | +| | :func:`anext` | | | | | | :func:`round` | +| | :func:`any` | | **F** | | **M** | | | | | :func:`ascii` | | :func:`filter` | | :func:`map` | | **S** | | | | | :func:`float` | | :func:`max` | | |func-set|_ | | | **B** | | :func:`format` | | |func-memoryview|_ | | :func:`setattr` | @@ -54,14 +54,14 @@ are always available. They are listed here in alphabetical order. .. |func-bytearray| replace:: ``bytearray()`` .. |func-bytes| replace:: ``bytes()`` -.. function:: abs(x, /) +.. function:: abs(x) Return the absolute value of a number. The argument may be an integer, a floating point number, or an object implementing :meth:`__abs__`. If the argument is a complex number, its magnitude is returned. -.. function:: aiter(async_iterable, /) +.. function:: aiter(async_iterable) Return an :term:`asynchronous iterator` for an :term:`asynchronous iterable`. Equivalent to calling ``x.__aiter__()``. @@ -70,7 +70,7 @@ are always available. They are listed here in alphabetical order. .. versionadded:: 3.10 -.. function:: all(iterable, /) +.. function:: all(iterable) Return ``True`` if all elements of the *iterable* are true (or if the iterable is empty). Equivalent to:: @@ -82,8 +82,8 @@ are always available. They are listed here in alphabetical order. return True -.. awaitablefunction:: anext(async_iterator, /) - anext(async_iterator, default, /) +.. awaitablefunction:: anext(async_iterator) + anext(async_iterator, default) When awaited, return the next item from the given :term:`asynchronous iterator`, or *default* if given and the iterator is exhausted. @@ -98,7 +98,7 @@ are always available. They are listed here in alphabetical order. .. versionadded:: 3.10 -.. function:: any(iterable, /) +.. function:: any(iterable) Return ``True`` if any element of the *iterable* is true. If the iterable is empty, return ``False``. Equivalent to:: @@ -110,7 +110,7 @@ are always available. They are listed here in alphabetical order. return False -.. function:: ascii(object, /) +.. function:: ascii(object) As :func:`repr`, return a string containing a printable representation of an object, but escape the non-ASCII characters in the string returned by @@ -118,11 +118,11 @@ are always available. They are listed here in alphabetical order. similar to that returned by :func:`repr` in Python 2. -.. function:: bin(x, /) +.. function:: bin(x) Convert an integer number to a binary string prefixed with "0b". The result is a valid Python expression. If *x* is not a Python :class:`int` object, it - has to define an :meth:`__index__` method that returns an integer. Some + has to define an :meth:`~object.__index__` method that returns an integer. Some examples: >>> bin(3) @@ -140,14 +140,14 @@ are always available. They are listed here in alphabetical order. See also :func:`format` for more information. -.. class:: bool(x=False, /) +.. class:: bool(x=False) Return a Boolean value, i.e. one of ``True`` or ``False``. *x* is converted using the standard :ref:`truth testing procedure `. If *x* is false or omitted, this returns ``False``; otherwise, it returns ``True``. The :class:`bool` class is a subclass of :class:`int` (see :ref:`typesnumeric`). It cannot be subclassed further. Its only instances are ``False`` and - ``True`` (see :ref:`bltin-boolean-values`). + ``True`` (see :ref:`typebool`). .. index:: pair: Boolean; type @@ -168,6 +168,13 @@ are always available. They are listed here in alphabetical order. If :func:`sys.breakpointhook` is not accessible, this function will raise :exc:`RuntimeError`. + By default, the behavior of :func:`breakpoint` can be changed with + the :envvar:`PYTHONBREAKPOINT` environment variable. + See :func:`sys.breakpointhook` for usage details. + + Note that this is not guaranteed if :func:`sys.breakpointhook` + has been replaced. + .. audit-event:: builtins.breakpoint breakpointhook breakpoint .. versionadded:: 3.7 @@ -222,7 +229,7 @@ are always available. They are listed here in alphabetical order. See also :ref:`binaryseq`, :ref:`typebytes`, and :ref:`bytes-methods`. -.. function:: callable(object, /) +.. function:: callable(object) Return :const:`True` if the *object* argument appears callable, :const:`False` if not. If this returns ``True``, it is still possible that a @@ -235,7 +242,7 @@ are always available. They are listed here in alphabetical order. in Python 3.2. -.. function:: chr(i, /) +.. function:: chr(i) Return the string representing a character whose Unicode code point is the integer *i*. For example, ``chr(97)`` returns the string ``'a'``, while @@ -278,7 +285,7 @@ are always available. They are listed here in alphabetical order. ``__name__``, ``__qualname__``, ``__doc__`` and ``__annotations__``) and have a new ``__wrapped__`` attribute. - .. versionchanged:: 3.11 + .. deprecated-removed:: 3.11 3.13 Class methods can no longer wrap other :term:`descriptors ` such as :func:`property`. @@ -364,7 +371,7 @@ are always available. They are listed here in alphabetical order. .. class:: complex(real=0, imag=0) - complex(string, /) + complex(string) Return a complex number with the value *real* + *imag*\*1j or convert a string or number to a complex number. If the first parameter is a string, it will @@ -376,9 +383,9 @@ are always available. They are listed here in alphabetical order. ``0j``. For a general Python object ``x``, ``complex(x)`` delegates to - ``x.__complex__()``. If ``__complex__()`` is not defined then it falls back - to :meth:`__float__`. If ``__float__()`` is not defined then it falls back - to :meth:`__index__`. + ``x.__complex__()``. If :meth:`~object.__complex__` is not defined then it falls back + to :meth:`~object.__float__`. If :meth:`!__float__` is not defined then it falls back + to :meth:`~object.__index__`. .. note:: @@ -393,11 +400,11 @@ are always available. They are listed here in alphabetical order. Grouping digits with underscores as in code literals is allowed. .. versionchanged:: 3.8 - Falls back to :meth:`__index__` if :meth:`__complex__` and - :meth:`__float__` are not defined. + Falls back to :meth:`~object.__index__` if :meth:`~object.__complex__` and + :meth:`~object.__float__` are not defined. -.. function:: delattr(object, name, /) +.. function:: delattr(object, name) This is a relative of :func:`setattr`. The arguments are an object and a string. The string must be the name of one of the object's attributes. The @@ -408,8 +415,8 @@ are always available. They are listed here in alphabetical order. .. _func-dict: .. class:: dict(**kwarg) - dict(mapping, /, **kwarg) - dict(iterable, /, **kwarg) + dict(mapping, **kwarg) + dict(iterable, **kwarg) :noindex: Create a new dictionary. The :class:`dict` object is the dictionary class. @@ -420,7 +427,7 @@ are always available. They are listed here in alphabetical order. .. function:: dir() - dir(object, /) + dir(object) Without arguments, return the list of names in the current local scope. With an argument, attempt to return a list of valid attributes for that object. @@ -462,6 +469,7 @@ are always available. They are listed here in alphabetical order. >>> class Shape: ... def __dir__(self): ... return ['area', 'perimeter', 'location'] + ... >>> s = Shape() >>> dir(s) ['area', 'location', 'perimeter'] @@ -476,7 +484,7 @@ are always available. They are listed here in alphabetical order. class. -.. function:: divmod(a, b, /) +.. function:: divmod(a, b) Take two (non-complex) numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using integer division. With @@ -504,15 +512,15 @@ are always available. They are listed here in alphabetical order. Equivalent to:: - def enumerate(sequence, start=0): + def enumerate(iterable, start=0): n = start - for elem in sequence: + for elem in iterable: yield n, elem n += 1 .. _func-eval: -.. function:: eval(expression, /, globals=None, locals=None) +.. function:: eval(expression, globals=None, locals=None) The arguments are a string and optional globals and locals. If provided, *globals* must be a dictionary. If provided, *locals* can be any mapping @@ -561,7 +569,7 @@ are always available. They are listed here in alphabetical order. Raises an :ref:`auditing event ` ``exec`` with the code object as the argument. Code compilation events may also be raised. -.. index:: builtin: exec +.. index:: pair: built-in function; exec .. function:: exec(object, globals=None, locals=None, /, *, closure=None) @@ -619,10 +627,10 @@ are always available. They are listed here in alphabetical order. Added the *closure* parameter. -.. function:: filter(function, iterable, /) +.. function:: filter(function, iterable) Construct an iterator from those elements of *iterable* for which *function* - returns true. *iterable* may be either a sequence, a container which + is true. *iterable* may be either a sequence, a container which supports iteration, or an iterator. If *function* is ``None``, the identity function is assumed, that is, all elements of *iterable* that are false are removed. @@ -633,10 +641,10 @@ are always available. They are listed here in alphabetical order. ``None``. See :func:`itertools.filterfalse` for the complementary function that returns - elements of *iterable* for which *function* returns false. + elements of *iterable* for which *function* is false. -.. class:: float(x=0.0, /) +.. class:: float(x=0.0) .. index:: single: NaN @@ -649,20 +657,23 @@ are always available. They are listed here in alphabetical order. sign may be ``'+'`` or ``'-'``; a ``'+'`` sign has no effect on the value produced. The argument may also be a string representing a NaN (not-a-number), or positive or negative infinity. More precisely, the - input must conform to the following grammar after leading and trailing - whitespace characters are removed: + input must conform to the ``floatvalue`` production rule in the following + grammar, after leading and trailing whitespace characters are removed: .. productionlist:: float sign: "+" | "-" infinity: "Infinity" | "inf" nan: "nan" - numeric_value: `floatnumber` | `infinity` | `nan` - numeric_string: [`sign`] `numeric_value` + digitpart: `digit` (["_"] `digit`)* + number: [`digitpart`] "." `digitpart` | `digitpart` ["."] + exponent: ("e" | "E") ["+" | "-"] `digitpart` + floatnumber: number [`exponent`] + floatvalue: [`sign`] (`floatnumber` | `infinity` | `nan`) - Here ``floatnumber`` is the form of a Python floating-point literal, - described in :ref:`floating`. Case is not significant, so, for example, - "inf", "Inf", "INFINITY", and "iNfINity" are all acceptable spellings for - positive infinity. + Here ``digit`` is a Unicode decimal digit (character in the Unicode general + category ``Nd``). Case is not significant, so, for example, "inf", "Inf", + "INFINITY", and "iNfINity" are all acceptable spellings for positive + infinity. Otherwise, if the argument is an integer or a floating point number, a floating point number with the same value (within Python's floating point @@ -670,8 +681,8 @@ are always available. They are listed here in alphabetical order. float, an :exc:`OverflowError` will be raised. For a general Python object ``x``, ``float(x)`` delegates to - ``x.__float__()``. If ``__float__()`` is not defined then it falls back - to :meth:`__index__`. + ``x.__float__()``. If :meth:`~object.__float__` is not defined then it falls back + to :meth:`~object.__index__`. If no argument is given, ``0.0`` is returned. @@ -697,14 +708,14 @@ are always available. They are listed here in alphabetical order. *x* is now a positional-only parameter. .. versionchanged:: 3.8 - Falls back to :meth:`__index__` if :meth:`__float__` is not defined. + Falls back to :meth:`~object.__index__` if :meth:`~object.__float__` is not defined. .. index:: single: __format__ single: string; format() (built-in function) -.. function:: format(value, format_spec="", /) +.. function:: format(value, format_spec="") Convert a *value* to a "formatted" representation, as controlled by *format_spec*. The interpretation of *format_spec* will depend on the type @@ -727,7 +738,7 @@ are always available. They are listed here in alphabetical order. .. _func-frozenset: -.. class:: frozenset(iterable=set(), /) +.. class:: frozenset(iterable=set()) :noindex: Return a new :class:`frozenset` object, optionally with elements taken from @@ -739,8 +750,8 @@ are always available. They are listed here in alphabetical order. module. -.. function:: getattr(object, name, /) - getattr(object, name, default, /) +.. function:: getattr(object, name) + getattr(object, name, default) Return the value of the named attribute of *object*. *name* must be a string. If the string is the name of one of the object's attributes, the result is the @@ -764,7 +775,7 @@ are always available. They are listed here in alphabetical order. regardless of where the function is called. -.. function:: hasattr(object, name, /) +.. function:: hasattr(object, name) The arguments are an object and a string. The result is ``True`` if the string is the name of one of the object's attributes, ``False`` if not. (This @@ -772,7 +783,7 @@ are always available. They are listed here in alphabetical order. raises an :exc:`AttributeError` or not.) -.. function:: hash(object, /) +.. function:: hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a @@ -783,7 +794,7 @@ are always available. They are listed here in alphabetical order. For objects with custom :meth:`__hash__` methods, note that :func:`hash` truncates the return value based on the bit width of the host machine. - See :meth:`__hash__` for details. + See :meth:`__hash__ ` for details. .. function:: help() help(request) @@ -807,11 +818,11 @@ are always available. They are listed here in alphabetical order. signatures for callables are now more comprehensive and consistent. -.. function:: hex(x, /) +.. function:: hex(x) Convert an integer number to a lowercase hexadecimal string prefixed with "0x". If *x* is not a Python :class:`int` object, it has to define an - :meth:`__index__` method that returns an integer. Some examples: + :meth:`~object.__index__` method that returns an integer. Some examples: >>> hex(255) '0xff' @@ -839,7 +850,7 @@ are always available. They are listed here in alphabetical order. :meth:`float.hex` method. -.. function:: id(object, /) +.. function:: id(object) Return the "identity" of an object. This is an integer which is guaranteed to be unique and constant for this object during its lifetime. @@ -852,7 +863,7 @@ are always available. They are listed here in alphabetical order. .. function:: input() - input(prompt, /) + input(prompt) If the *prompt* argument is present, it is written to standard output without a trailing newline. The function then reads a line from input, converts it @@ -878,28 +889,32 @@ are always available. They are listed here in alphabetical order. with the result after successfully reading input. -.. class:: int(x=0, /) - int(x, /, base=10) +.. class:: int(x=0) + int(x, base=10) Return an integer object constructed from a number or string *x*, or return - ``0`` if no arguments are given. If *x* defines :meth:`__int__`, - ``int(x)`` returns ``x.__int__()``. If *x* defines :meth:`__index__`, - it returns ``x.__index__()``. If *x* defines :meth:`__trunc__`, + ``0`` if no arguments are given. If *x* defines :meth:`~object.__int__`, + ``int(x)`` returns ``x.__int__()``. If *x* defines :meth:`~object.__index__`, + it returns ``x.__index__()``. If *x* defines :meth:`~object.__trunc__`, it returns ``x.__trunc__()``. For floating point numbers, this truncates towards zero. If *x* is not a number or if *base* is given, then *x* must be a string, - :class:`bytes`, or :class:`bytearray` instance representing an :ref:`integer - literal ` in radix *base*. Optionally, the literal can be - preceded by ``+`` or ``-`` (with no space in between) and surrounded by - whitespace. A base-n literal consists of the digits 0 to n-1, with ``a`` - to ``z`` (or ``A`` to ``Z``) having - values 10 to 35. The default *base* is 10. The allowed values are 0 and 2--36. - Base-2, -8, and -16 literals can be optionally prefixed with ``0b``/``0B``, - ``0o``/``0O``, or ``0x``/``0X``, as with integer literals in code. Base 0 - means to interpret exactly as a code literal, so that the actual base is 2, - 8, 10, or 16, and so that ``int('010', 0)`` is not legal, while - ``int('010')`` is, as well as ``int('010', 8)``. + :class:`bytes`, or :class:`bytearray` instance representing an integer + in radix *base*. Optionally, the string can be preceded by ``+`` or ``-`` + (with no space in between), have leading zeros, be surrounded by whitespace, + and have single underscores interspersed between digits. + + A base-n integer string contains digits, each representing a value from 0 to + n-1. The values 0--9 can be represented by any Unicode decimal digit. The + values 10--35 can be represented by ``a`` to ``z`` (or ``A`` to ``Z``). The + default *base* is 10. The allowed bases are 0 and 2--36. Base-2, -8, and -16 + strings can be optionally prefixed with ``0b``/``0B``, ``0o``/``0O``, or + ``0x``/``0X``, as with integer literals in code. For base 0, the string is + interpreted in a similar way to an :ref:`integer literal in code `, + in that the actual base is 2, 8, 10, or 16 as determined by the prefix. Base + 0 also disallows leading zeros: ``int('010', 0)`` is not legal, while + ``int('010')`` and ``int('010', 8)`` are. The integer type is described in :ref:`typesnumeric`. @@ -917,10 +932,10 @@ are always available. They are listed here in alphabetical order. *x* is now a positional-only parameter. .. versionchanged:: 3.8 - Falls back to :meth:`__index__` if :meth:`__int__` is not defined. + Falls back to :meth:`~object.__index__` if :meth:`~object.__int__` is not defined. .. versionchanged:: 3.11 - The delegation to :meth:`__trunc__` is deprecated. + The delegation to :meth:`~object.__trunc__` is deprecated. .. versionchanged:: 3.11 :class:`int` string inputs and string representations can be limited to @@ -930,7 +945,7 @@ are always available. They are listed here in alphabetical order. See the :ref:`integer string conversion length limitation ` documentation. -.. function:: isinstance(object, classinfo, /) +.. function:: isinstance(object, classinfo) Return ``True`` if the *object* argument is an instance of the *classinfo* argument, or of a (direct, indirect, or :term:`virtual `) of *classinfo*. A @@ -961,14 +976,14 @@ are always available. They are listed here in alphabetical order. *classinfo* can be a :ref:`types-union`. -.. function:: iter(object, /) - iter(object, sentinel, /) +.. function:: iter(object) + iter(object, sentinel) Return an :term:`iterator` object. The first argument is interpreted very differently depending on the presence of the second argument. Without a second argument, *object* must be a collection object which supports the :term:`iterable` protocol (the :meth:`__iter__` method), or it must support - the sequence protocol (the :meth:`__getitem__` method with integer arguments + the sequence protocol (the :meth:`~object.__getitem__` method with integer arguments starting at ``0``). If it does not support either of those protocols, :exc:`TypeError` is raised. If the second argument, *sentinel*, is given, then *object* must be a callable object. The iterator created in this case @@ -989,7 +1004,7 @@ are always available. They are listed here in alphabetical order. process_block(block) -.. function:: len(s, /) +.. function:: len(s) Return the length (the number of items) of an object. The argument may be a sequence (such as a string, bytes, tuple, list, or range) or a collection @@ -1003,7 +1018,7 @@ are always available. They are listed here in alphabetical order. .. _func-list: .. class:: list() - list(iterable, /) + list(iterable) :noindex: Rather than being a function, :class:`list` is actually a mutable @@ -1021,7 +1036,7 @@ are always available. They are listed here in alphabetical order. The contents of this dictionary should not be modified; changes may not affect the values of local and free variables used by the interpreter. -.. function:: map(function, iterable, /, *iterables) +.. function:: map(function, iterable, *iterables) Return an iterator that applies *function* to every item of *iterable*, yielding the results. If additional *iterables* arguments are passed, @@ -1031,9 +1046,9 @@ are always available. They are listed here in alphabetical order. already arranged into argument tuples, see :func:`itertools.starmap`\. -.. function:: max(iterable, /, *, key=None) - max(iterable, /, *, default, key=None) - max(arg1, arg2, /, *args, key=None) +.. function:: max(iterable, *, key=None) + max(iterable, *, default, key=None) + max(arg1, arg2, *args, key=None) Return the largest item in an iterable or the largest of two or more arguments. @@ -1069,9 +1084,9 @@ are always available. They are listed here in alphabetical order. :ref:`typememoryview` for more information. -.. function:: min(iterable, /, *, key=None) - min(iterable, /, *, default, key=None) - min(arg1, arg2, /, *args, key=None) +.. function:: min(iterable, *, key=None) + min(iterable, *, default, key=None) + min(arg1, arg2, *args, key=None) Return the smallest item in an iterable or the smallest of two or more arguments. @@ -1099,8 +1114,8 @@ are always available. They are listed here in alphabetical order. The *key* can be ``None``. -.. function:: next(iterator, /) - next(iterator, default, /) +.. function:: next(iterator) + next(iterator, default) Retrieve the next item from the :term:`iterator` by calling its :meth:`~iterator.__next__` method. If *default* is given, it is returned @@ -1119,11 +1134,11 @@ are always available. They are listed here in alphabetical order. assign arbitrary attributes to an instance of the :class:`object` class. -.. function:: oct(x, /) +.. function:: oct(x) Convert an integer number to an octal string prefixed with "0o". The result is a valid Python expression. If *x* is not a Python :class:`int` object, it - has to define an :meth:`__index__` method that returns an integer. For + has to define an :meth:`~object.__index__` method that returns an integer. For example: >>> oct(8) @@ -1143,8 +1158,8 @@ are always available. They are listed here in alphabetical order. See also :func:`format` for more information. - .. index:: - single: file object; open() built-in function +.. index:: + single: file object; open() built-in function .. function:: open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None) @@ -1206,7 +1221,7 @@ are always available. They are listed here in alphabetical order. *buffering* is an optional integer used to set the buffering policy. Pass 0 to switch buffering off (only allowed in binary mode), 1 to select line - buffering (only usable in text mode), and an integer > 1 to indicate the size + buffering (only usable when writing in text mode), and an integer > 1 to indicate the size in bytes of a fixed-size chunk buffer. Note that specifying a buffer size this way applies for binary buffered I/O, but ``TextIOWrapper`` (i.e., files opened with ``mode='r+'``) would have another buffering. To disable buffering in @@ -1216,7 +1231,7 @@ are always available. They are listed here in alphabetical order. * Binary files are buffered in fixed-size chunks; the size of the buffer is chosen using a heuristic trying to determine the underlying device's "block - size" and falling back on :attr:`io.DEFAULT_BUFFER_SIZE`. On many systems, + size" and falling back on :const:`io.DEFAULT_BUFFER_SIZE`. On many systems, the buffer will typically be 4096 or 8192 bytes long. * "Interactive" text files (files for which :meth:`~io.IOBase.isatty` @@ -1256,7 +1271,7 @@ are always available. They are listed here in alphabetical order. * ``'xmlcharrefreplace'`` is only supported when writing to a file. Characters not supported by the encoding are replaced with the - appropriate XML character reference ``&#nnn;``. + appropriate XML character reference :samp:`&#{nnn};`. * ``'backslashreplace'`` replaces malformed data by Python's backslashed escape sequences. @@ -1332,7 +1347,7 @@ are always available. They are listed here in alphabetical order. single: I/O control; buffering single: binary mode single: text mode - module: sys + pair: module; sys See also the file handling modules, such as :mod:`fileinput`, :mod:`io` (where :func:`open` is declared), :mod:`os`, :mod:`os.path`, :mod:`tempfile`, @@ -1345,33 +1360,33 @@ are always available. They are listed here in alphabetical order. .. versionchanged:: 3.3 - * The *opener* parameter was added. - * The ``'x'`` mode was added. - * :exc:`IOError` used to be raised, it is now an alias of :exc:`OSError`. - * :exc:`FileExistsError` is now raised if the file opened in exclusive - creation mode (``'x'``) already exists. + * The *opener* parameter was added. + * The ``'x'`` mode was added. + * :exc:`IOError` used to be raised, it is now an alias of :exc:`OSError`. + * :exc:`FileExistsError` is now raised if the file opened in exclusive + creation mode (``'x'``) already exists. .. versionchanged:: 3.4 - * The file is now non-inheritable. + * The file is now non-inheritable. .. versionchanged:: 3.5 - * If the system call is interrupted and the signal handler does not raise an - exception, the function now retries the system call instead of raising an - :exc:`InterruptedError` exception (see :pep:`475` for the rationale). - * The ``'namereplace'`` error handler was added. + * If the system call is interrupted and the signal handler does not raise an + exception, the function now retries the system call instead of raising an + :exc:`InterruptedError` exception (see :pep:`475` for the rationale). + * The ``'namereplace'`` error handler was added. .. versionchanged:: 3.6 - * Support added to accept objects implementing :class:`os.PathLike`. - * On Windows, opening a console buffer may return a subclass of - :class:`io.RawIOBase` other than :class:`io.FileIO`. + * Support added to accept objects implementing :class:`os.PathLike`. + * On Windows, opening a console buffer may return a subclass of + :class:`io.RawIOBase` other than :class:`io.FileIO`. .. versionchanged:: 3.11 The ``'U'`` mode has been removed. -.. function:: ord(c, /) +.. function:: ord(c) Given a string representing one Unicode character, return an integer representing the Unicode code point of that character. For example, @@ -1419,7 +1434,7 @@ are always available. They are listed here in alphabetical order. supported. -.. function:: print(*objects, sep=' ', end='\n', file=sys.stdout, flush=False) +.. function:: print(*objects, sep=' ', end='\n', file=None, flush=False) Print *objects* to the text stream *file*, separated by *sep* and followed by *end*. *sep*, *end*, *file*, and *flush*, if present, must be given as keyword @@ -1436,8 +1451,9 @@ are always available. They are listed here in alphabetical order. arguments are converted to text strings, :func:`print` cannot be used with binary mode file objects. For these, use ``file.write(...)`` instead. - Whether the output is buffered is usually determined by *file*, but if the - *flush* keyword argument is true, the stream is forcibly flushed. + Output buffering is usually determined by *file*. + However, if *flush* is true, the stream is forcibly flushed. + .. versionchanged:: 3.3 Added the *flush* keyword argument. @@ -1522,15 +1538,15 @@ are always available. They are listed here in alphabetical order. .. _func-range: -.. class:: range(stop, /) - range(start, stop, step=1, /) +.. class:: range(stop) + range(start, stop, step=1) :noindex: Rather than being a function, :class:`range` is actually an immutable sequence type, as documented in :ref:`typesseq-range` and :ref:`typesseq`. -.. function:: repr(object, /) +.. function:: repr(object) Return a string containing a printable representation of an object. For many types, this function makes an attempt to return a string that would yield an @@ -1543,11 +1559,11 @@ are always available. They are listed here in alphabetical order. :exc:`RuntimeError`. -.. function:: reversed(seq, /) +.. function:: reversed(seq) Return a reverse :term:`iterator`. *seq* must be an object which has a :meth:`__reversed__` method or supports the sequence protocol (the - :meth:`__len__` method and the :meth:`__getitem__` method with integer + :meth:`__len__` method and the :meth:`~object.__getitem__` method with integer arguments starting at ``0``). @@ -1580,7 +1596,7 @@ are always available. They are listed here in alphabetical order. .. _func-set: .. class:: set() - set(iterable, /) + set(iterable) :noindex: Return a new :class:`set` object, optionally with elements taken from @@ -1592,7 +1608,7 @@ are always available. They are listed here in alphabetical order. module. -.. function:: setattr(object, name, value, /) +.. function:: setattr(object, name, value) This is the counterpart of :func:`getattr`. The arguments are an object, a string, and an arbitrary value. The string may name an existing attribute or a @@ -1614,8 +1630,8 @@ are always available. They are listed here in alphabetical order. :func:`setattr`. -.. class:: slice(stop, /) - slice(start, stop, step=1, /) +.. class:: slice(stop) + slice(start, stop, step=None) Return a :term:`slice` object representing the set of indices specified by ``range(start, stop, step)``. The *start* and *step* arguments default to @@ -1627,6 +1643,9 @@ are always available. They are listed here in alphabetical order. example: ``a[start:stop:step]`` or ``a[start:stop, i]``. See :func:`itertools.islice` for an alternate version that returns an iterator. + .. versionchanged:: 3.12 + Slice objects are now :term:`hashable` (provided :attr:`~slice.start`, + :attr:`~slice.stop`, and :attr:`~slice.step` are hashable). .. function:: sorted(iterable, /, *, key=None, reverse=False) @@ -1669,7 +1688,7 @@ are always available. They are listed here in alphabetical order. class C: @staticmethod - def f(arg1, arg2, ...): ... + def f(arg1, arg2, argN): ... The ``@staticmethod`` form is a function :term:`decorator` -- see :ref:`function` for details. @@ -1732,8 +1751,12 @@ are always available. They are listed here in alphabetical order. .. versionchanged:: 3.8 The *start* parameter can be specified as a keyword argument. + .. versionchanged:: 3.12 Summation of floats switched to an algorithm + that gives higher accuracy and better commutativity on most builds. + + .. class:: super() - super(type, object_or_type=None, /) + super(type, object_or_type=None) Return a proxy object that delegates method calls to a parent or sibling class of *type*. This is useful for accessing inherited methods that have @@ -1804,17 +1827,17 @@ are always available. They are listed here in alphabetical order. .. _func-tuple: .. class:: tuple() - tuple(iterable, /) + tuple(iterable) :noindex: Rather than being a function, :class:`tuple` is actually an immutable sequence type, as documented in :ref:`typesseq-tuple` and :ref:`typesseq`. -.. class:: type(object, /) - type(name, bases, dict, /, **kwds) +.. class:: type(object) + type(name, bases, dict, **kwds) - .. index:: object: type + .. index:: pair: object; type With one argument, return the type of an *object*. The return value is a type object and generally the same object as returned by @@ -1853,7 +1876,7 @@ are always available. They are listed here in alphabetical order. longer use the one-argument form to get the type of an object. .. function:: vars() - vars(object, /) + vars(object) Return the :attr:`~object.__dict__` attribute for a module, class, instance, or any other object with a :attr:`~object.__dict__` attribute. @@ -1915,14 +1938,24 @@ are always available. They are listed here in alphabetical order. >>> list(zip(('a', 'b', 'c'), (1, 2, 3), strict=True)) [('a', 1), ('b', 2), ('c', 3)] - Unlike the default behavior, it checks that the lengths of iterables are - identical, raising a :exc:`ValueError` if they aren't: + Unlike the default behavior, it raises a :exc:`ValueError` if one iterable + is exhausted before the others: - >>> list(zip(range(3), ['fee', 'fi', 'fo', 'fum'], strict=True)) + >>> for item in zip(range(3), ['fee', 'fi', 'fo', 'fum'], strict=True): # doctest: +SKIP + ... print(item) + ... + (0, 'fee') + (1, 'fi') + (2, 'fo') Traceback (most recent call last): ... ValueError: zip() argument 2 is longer than argument 1 + .. + This doctest is disabled because doctest does not support capturing + output and exceptions in the same code unit. + https://github.com/python/cpython/issues/65382 + Without the ``strict=True`` argument, any bug that results in iterables of different lengths will be silenced, possibly manifesting as a hard-to-find bug in another part of the program. @@ -1960,8 +1993,8 @@ are always available. They are listed here in alphabetical order. .. function:: __import__(name, globals=None, locals=None, fromlist=(), level=0) .. index:: - statement: import - module: imp + pair: statement; import + pair: module; builtins .. note:: diff --git a/Doc/library/functools.rst b/Doc/library/functools.rst index 2f0a9bd8be88155..69ec1eb3ecd89d4 100644 --- a/Doc/library/functools.rst +++ b/Doc/library/functools.rst @@ -49,8 +49,13 @@ The :mod:`functools` module defines the following functions: >>> factorial(12) # makes two new recursive calls, the other 10 are cached 479001600 - The cache is threadsafe so the wrapped function can be used in multiple - threads. + The cache is threadsafe so that the wrapped function can be used in + multiple threads. This means that the underlying data structure will + remain coherent during concurrent updates. + + It is possible for the wrapped function to be called more than once if + another thread makes an additional call before the initial call has been + completed and cached. .. versionadded:: 3.9 @@ -86,6 +91,14 @@ The :mod:`functools` module defines the following functions: The cached value can be cleared by deleting the attribute. This allows the *cached_property* method to run again. + The *cached_property* does not prevent a possible race condition in + multi-threaded usage. The getter function could run more than once on the + same instance, with the latest run setting the cached value. If the cached + property is idempotent or otherwise not harmful to run more than once on an + instance, this is fine. If synchronization is needed, implement the necessary + locking inside the decorated getter function or around the cached property + access. + Note, this decorator interferes with the operation of :pep:`412` key-sharing dictionaries. This means that instance dictionaries can take more space than usual. @@ -97,21 +110,20 @@ The :mod:`functools` module defines the following functions: ``__slots__`` without including ``__dict__`` as one of the defined slots (as such classes don't provide a ``__dict__`` attribute at all). - If a mutable mapping is not available or if space-efficient key sharing - is desired, an effect similar to :func:`cached_property` can be achieved - by a stacking :func:`property` on top of :func:`cache`:: - - class DataSet: - def __init__(self, sequence_of_numbers): - self._data = sequence_of_numbers - - @property - @cache - def stdev(self): - return statistics.stdev(self._data) + If a mutable mapping is not available or if space-efficient key sharing is + desired, an effect similar to :func:`cached_property` can also be achieved by + stacking :func:`property` on top of :func:`lru_cache`. See + :ref:`faq-cache-method-calls` for more details on how this differs from :func:`cached_property`. .. versionadded:: 3.8 + .. versionchanged:: 3.12 + Prior to Python 3.12, ``cached_property`` included an undocumented lock to + ensure that in multi-threaded usage the getter function was guaranteed to + run only once per instance. However, the lock was per-property, not + per-instance, which could result in unacceptably high lock contention. In + Python 3.12+ this locking is removed. + .. function:: cmp_to_key(func) @@ -143,11 +155,16 @@ The :mod:`functools` module defines the following functions: *maxsize* most recent calls. It can save time when an expensive or I/O bound function is periodically called with the same arguments. - The cache is threadsafe so the wrapped function can be used in multiple - threads. + The cache is threadsafe so that the wrapped function can be used in + multiple threads. This means that the underlying data structure will + remain coherent during concurrent updates. + + It is possible for the wrapped function to be called more than once if + another thread makes an additional call before the initial call has been + completed and cached. Since a dictionary is used to cache results, the positional and keyword - arguments to the function must be hashable. + arguments to the function must be :term:`hashable`. Distinct argument patterns may be considered to be distinct calls with separate cache entries. For example, ``f(a=1, b=2)`` and ``f(b=2, a=1)`` @@ -209,15 +226,16 @@ The :mod:`functools` module defines the following functions: In general, the LRU cache should only be used when you want to reuse previously computed values. Accordingly, it doesn't make sense to cache - functions with side-effects, functions that need to create distinct mutable - objects on each call, or impure functions such as time() or random(). + functions with side-effects, functions that need to create + distinct mutable objects on each call (such as generators and async functions), + or impure functions such as time() or random(). Example of an LRU cache for static web content:: @lru_cache(maxsize=32) def get_pep(num): 'Retrieve text of a Python Enhancement Proposal' - resource = 'https://peps.python.org/pep-%04d/' % num + resource = f'https://peps.python.org/pep-{num:04d}' try: with urllib.request.urlopen(resource) as s: return s.read() @@ -385,25 +403,27 @@ The :mod:`functools` module defines the following functions: .. versionadded:: 3.4 -.. function:: reduce(function, iterable[, initializer]) +.. function:: reduce(function, iterable[, initial], /) Apply *function* of two arguments cumulatively to the items of *iterable*, from left to right, so as to reduce the iterable to a single value. For example, ``reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])`` calculates ``((((1+2)+3)+4)+5)``. The left argument, *x*, is the accumulated value and the right argument, *y*, is - the update value from the *iterable*. If the optional *initializer* is present, + the update value from the *iterable*. If the optional *initial* is present, it is placed before the items of the iterable in the calculation, and serves as - a default when the iterable is empty. If *initializer* is not given and + a default when the iterable is empty. If *initial* is not given and *iterable* contains only one item, the first item is returned. Roughly equivalent to:: - def reduce(function, iterable, initializer=None): + initial_missing = object() + + def reduce(function, iterable, initial=initial_missing, /): it = iter(iterable) - if initializer is None: + if initial is initial_missing: value = next(it) else: - value = initializer + value = initial for element in it: value = function(value, element) return value diff --git a/Doc/library/gc.rst b/Doc/library/gc.rst index 69a1a8313b75931..331c071cda76924 100644 --- a/Doc/library/gc.rst +++ b/Doc/library/gc.rst @@ -50,6 +50,9 @@ The :mod:`gc` module provides the following functions: is run. Not all items in some free lists may be freed due to the particular implementation, in particular :class:`float`. + The effect of calling ``gc.collect()`` while the interpreter is already + performing a collection is undefined. + .. function:: set_debug(flags) @@ -206,12 +209,17 @@ The :mod:`gc` module provides the following functions: .. function:: freeze() - Freeze all the objects tracked by gc - move them to a permanent generation - and ignore all the future collections. This can be used before a POSIX - fork() call to make the gc copy-on-write friendly or to speed up collection. - Also collection before a POSIX fork() call may free pages for future - allocation which can cause copy-on-write too so it's advised to disable gc - in parent process and freeze before fork and enable gc in child process. + Freeze all the objects tracked by the garbage collector; move them to a + permanent generation and ignore them in all the future collections. + + If a process will ``fork()`` without ``exec()``, avoiding unnecessary + copy-on-write in child processes will maximize memory sharing and reduce + overall memory usage. This requires both avoiding creation of freed "holes" + in memory pages in the parent process and ensuring that GC collections in + child processes won't touch the ``gc_refs`` counter of long-lived objects + originating in the parent process. To accomplish both, call ``gc.disable()`` + early in the parent process, ``gc.freeze()`` right before ``fork()``, and + ``gc.enable()`` early in child processes. .. versionadded:: 3.7 @@ -251,8 +259,8 @@ values but should not rebind them): are printed. .. versionchanged:: 3.4 - Following :pep:`442`, objects with a :meth:`__del__` method don't end - up in :attr:`gc.garbage` anymore. + Following :pep:`442`, objects with a :meth:`~object.__del__` method don't end + up in :data:`gc.garbage` anymore. .. data:: callbacks diff --git a/Doc/library/getopt.rst b/Doc/library/getopt.rst index 336deab28cb8a46..ada68b240143e89 100644 --- a/Doc/library/getopt.rst +++ b/Doc/library/getopt.rst @@ -7,18 +7,23 @@ **Source code:** :source:`Lib/getopt.py` +.. deprecated:: 3.13 + The :mod:`getopt` module is :term:`soft deprecated` and will not be + developed further; development will continue with the :mod:`argparse` + module. + .. note:: The :mod:`getopt` module is a parser for command line options whose API is - designed to be familiar to users of the C :c:func:`getopt` function. Users who - are unfamiliar with the C :c:func:`getopt` function or who would like to write + designed to be familiar to users of the C :c:func:`!getopt` function. Users who + are unfamiliar with the C :c:func:`!getopt` function or who would like to write less code and get better help and error messages should consider using the :mod:`argparse` module instead. -------------- This module helps scripts to parse the command line arguments in ``sys.argv``. -It supports the same conventions as the Unix :c:func:`getopt` function (including +It supports the same conventions as the Unix :c:func:`!getopt` function (including the special meanings of arguments of the form '``-``' and '``--``'). Long options similar to those supported by GNU software may be used as well via an optional third argument. @@ -33,11 +38,11 @@ exception: be parsed, without the leading reference to the running program. Typically, this means ``sys.argv[1:]``. *shortopts* is the string of option letters that the script wants to recognize, with options that require an argument followed by a - colon (``':'``; i.e., the same format that Unix :c:func:`getopt` uses). + colon (``':'``; i.e., the same format that Unix :c:func:`!getopt` uses). .. note:: - Unlike GNU :c:func:`getopt`, after a non-option argument, all further + Unlike GNU :c:func:`!getopt`, after a non-option argument, all further arguments are considered also non-options. This is similar to the way non-GNU Unix systems work. @@ -71,7 +76,7 @@ exception: non-option argument is encountered. If the first character of the option string is ``'+'``, or if the environment - variable :envvar:`POSIXLY_CORRECT` is set, then option processing stops as + variable :envvar:`!POSIXLY_CORRECT` is set, then option processing stops as soon as a non-option argument is encountered. @@ -81,9 +86,9 @@ exception: an option requiring an argument is given none. The argument to the exception is a string indicating the cause of the error. For long options, an argument given to an option which does not require one will also cause this exception to be - raised. The attributes :attr:`msg` and :attr:`opt` give the error message and + raised. The attributes :attr:`!msg` and :attr:`!opt` give the error message and related option; if there is no specific option to which the exception relates, - :attr:`opt` is an empty string. + :attr:`!opt` is an empty string. .. XXX deprecated? .. exception:: error diff --git a/Doc/library/getpass.rst b/Doc/library/getpass.rst index d5bbe67fb30a621..54c84d45a59856c 100644 --- a/Doc/library/getpass.rst +++ b/Doc/library/getpass.rst @@ -43,10 +43,13 @@ The :mod:`getpass` module provides two functions: Return the "login name" of the user. This function checks the environment variables :envvar:`LOGNAME`, - :envvar:`USER`, :envvar:`LNAME` and :envvar:`USERNAME`, in order, and + :envvar:`USER`, :envvar:`!LNAME` and :envvar:`USERNAME`, in order, and returns the value of the first one which is set to a non-empty string. If none are set, the login name from the password database is returned on - systems which support the :mod:`pwd` module, otherwise, an exception is - raised. + systems which support the :mod:`pwd` module, otherwise, an :exc:`OSError` + is raised. In general, this function should be preferred over :func:`os.getlogin()`. + + .. versionchanged:: 3.13 + Previously, various exceptions beyond just :exc:`OSError` were raised. diff --git a/Doc/library/gettext.rst b/Doc/library/gettext.rst index 747f8703b750ec3..dc6cf5533fccbe5 100644 --- a/Doc/library/gettext.rst +++ b/Doc/library/gettext.rst @@ -58,7 +58,7 @@ class-based API instead. Return the localized translation of *message*, based on the current global domain, language, and locale directory. This function is usually aliased as - :func:`_` in the local namespace (see examples below). + :func:`!_` in the local namespace (see examples below). .. function:: dgettext(domain, message) @@ -98,7 +98,7 @@ class-based API instead. .. versionadded:: 3.8 -Note that GNU :program:`gettext` also defines a :func:`dcgettext` method, but +Note that GNU :program:`gettext` also defines a :func:`!dcgettext` method, but this was deemed not useful and so it is currently unimplemented. Here's an example of typical usage for this API:: @@ -119,7 +119,7 @@ greater convenience than the GNU :program:`gettext` API. It is the recommended way of localizing your Python applications and modules. :mod:`!gettext` defines a :class:`GNUTranslations` class which implements the parsing of GNU :file:`.mo` format files, and has methods for returning strings. Instances of this class can also -install themselves in the built-in namespace as the function :func:`_`. +install themselves in the built-in namespace as the function :func:`!_`. .. function:: find(domain, localedir=None, languages=None, all=False) @@ -150,15 +150,12 @@ install themselves in the built-in namespace as the function :func:`_`. .. function:: translation(domain, localedir=None, languages=None, class_=None, fallback=False) - Return a :class:`*Translations` instance based on the *domain*, *localedir*, + Return a ``*Translations`` instance based on the *domain*, *localedir*, and *languages*, which are first passed to :func:`find` to get a list of the associated :file:`.mo` file paths. Instances with identical :file:`.mo` file names are cached. The actual class instantiated is *class_* if provided, otherwise :class:`GNUTranslations`. The class's constructor must - take a single :term:`file object` argument. If provided, *codeset* will change - the charset used to encode translated strings in the - :meth:`~NullTranslations.lgettext` and :meth:`~NullTranslations.lngettext` - methods. + take a single :term:`file object` argument. If multiple files are found, later files are used as fallbacks for earlier ones. To allow setting the fallback, :func:`copy.copy` is used to clone each @@ -170,26 +167,26 @@ install themselves in the built-in namespace as the function :func:`_`. :class:`NullTranslations` instance if *fallback* is true. .. versionchanged:: 3.3 - :exc:`IOError` used to be raised instead of :exc:`OSError`. + :exc:`IOError` used to be raised, it is now an alias of :exc:`OSError`. .. versionchanged:: 3.11 *codeset* parameter is removed. .. function:: install(domain, localedir=None, *, names=None) - This installs the function :func:`_` in Python's builtins namespace, based on + This installs the function :func:`!_` in Python's builtins namespace, based on *domain* and *localedir* which are passed to the function :func:`translation`. For the *names* parameter, please see the description of the translation object's :meth:`~NullTranslations.install` method. As seen below, you usually mark the strings in your application that are - candidates for translation, by wrapping them in a call to the :func:`_` + candidates for translation, by wrapping them in a call to the :func:`!_` function, like this:: print(_('This string will be translated.')) - For convenience, you want the :func:`_` function to be installed in Python's + For convenience, you want the :func:`!_` function to be installed in Python's builtins namespace, so it is easily accessible in all modules of your application. @@ -276,20 +273,20 @@ are the methods of :class:`!NullTranslations`: If the *names* parameter is given, it must be a sequence containing the names of functions you want to install in the builtins namespace in - addition to :func:`_`. Supported names are ``'gettext'``, ``'ngettext'``, - ``'pgettext'``, ``'npgettext'``, ``'lgettext'``, and ``'lngettext'``. + addition to :func:`!_`. Supported names are ``'gettext'``, ``'ngettext'``, + ``'pgettext'``, and ``'npgettext'``. Note that this is only one way, albeit the most convenient way, to make - the :func:`_` function available to your application. Because it affects + the :func:`!_` function available to your application. Because it affects the entire application globally, and specifically the built-in namespace, - localized modules should never install :func:`_`. Instead, they should use - this code to make :func:`_` available to their module:: + localized modules should never install :func:`!_`. Instead, they should use + this code to make :func:`!_` available to their module:: import gettext t = gettext.translation('mymodule', ...) _ = t.gettext - This puts :func:`_` only in the module's global namespace and so only + This puts :func:`!_` only in the module's global namespace and so only affects calls within this module. .. versionchanged:: 3.8 @@ -314,7 +311,7 @@ initialize the "protected" :attr:`_charset` instance variable, defaulting to ids and message strings read from the catalog are converted to Unicode using this encoding, else ASCII is assumed. -Since message ids are read as Unicode strings too, all :meth:`*gettext` methods +Since message ids are read as Unicode strings too, all ``*gettext()`` methods will assume message ids as Unicode strings, not byte strings. The entire set of key/value pairs are placed into a dictionary and set as the @@ -404,13 +401,14 @@ version has a slightly different API. Its documented usage was:: _ = cat.gettext print(_('hello world')) -For compatibility with this older module, the function :func:`Catalog` is an +For compatibility with this older module, the function :func:`!Catalog` is an alias for the :func:`translation` function described above. One difference between this module and Henstridge's: his catalog objects supported access through a mapping API, but this appears to be unused and so is not currently supported. +.. _i18n-howto: Internationalizing your programs and modules -------------------------------------------- @@ -431,7 +429,7 @@ take the following steps: In order to prepare your code for I18N, you need to look at all the strings in your files. Any string that needs to be translated should be marked by wrapping -it in ``_('...')`` --- that is, a call to the function :func:`_`. For example:: +it in ``_('...')`` --- that is, a call to the function :func:`_ `. For example:: filename = 'mylog.txt' message = _('writing a log message') @@ -503,7 +501,7 @@ module:: Localizing your application ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you are localizing your application, you can install the :func:`_` function +If you are localizing your application, you can install the :func:`!_` function globally into the built-in namespace, usually in the main driver file of your application. This will let all your application-specific files just use ``_('...')`` without having to explicitly install it in each file. @@ -580,13 +578,13 @@ Here is one way you can handle this situation:: for a in animals: print(_(a)) -This works because the dummy definition of :func:`_` simply returns the string +This works because the dummy definition of :func:`!_` simply returns the string unchanged. And this dummy definition will temporarily override any definition -of :func:`_` in the built-in namespace (until the :keyword:`del` command). Take -care, though if you have a previous definition of :func:`_` in the local +of :func:`!_` in the built-in namespace (until the :keyword:`del` command). Take +care, though if you have a previous definition of :func:`!_` in the local namespace. -Note that the second use of :func:`_` will not identify "a" as being +Note that the second use of :func:`!_` will not identify "a" as being translatable to the :program:`gettext` program, because the parameter is not a string literal. @@ -605,13 +603,13 @@ Another way to handle this is with the following example:: print(_(a)) In this case, you are marking translatable strings with the function -:func:`N_`, which won't conflict with any definition of :func:`_`. +:func:`!N_`, which won't conflict with any definition of :func:`!_`. However, you will need to teach your message extraction program to -look for translatable strings marked with :func:`N_`. :program:`xgettext`, +look for translatable strings marked with :func:`!N_`. :program:`xgettext`, :program:`pygettext`, ``pybabel extract``, and :program:`xpot` all support this through the use of the :option:`!-k` command-line switch. -The choice of :func:`N_` here is totally arbitrary; it could have just -as easily been :func:`MarkThisStringForTranslation`. +The choice of :func:`!N_` here is totally arbitrary; it could have just +as easily been :func:`!MarkThisStringForTranslation`. Acknowledgements diff --git a/Doc/library/glob.rst b/Doc/library/glob.rst index 0e4cfe7ebed797c..6e4f72c19ff4c97 100644 --- a/Doc/library/glob.rst +++ b/Doc/library/glob.rst @@ -34,9 +34,7 @@ unlike :func:`fnmatch.fnmatch` or :func:`pathlib.Path.glob`. For a literal match, wrap the meta-characters in brackets. For example, ``'[?]'`` matches the character ``'?'``. - -.. seealso:: - The :mod:`pathlib` module offers high-level path objects. +The :mod:`glob` module defines the following functions: .. function:: glob(pathname, *, root_dir=None, dir_fd=None, recursive=False, \ @@ -117,7 +115,48 @@ For example, ``'[?]'`` matches the character ``'?'``. .. versionadded:: 3.4 -For example, consider a directory containing the following files: +.. function:: translate(pathname, *, recursive=False, include_hidden=False, seps=None) + + Convert the given path specification to a regular expression for use with + :func:`re.match`. The path specification can contain shell-style wildcards. + + For example: + + >>> import glob, re + >>> + >>> regex = glob.translate('**/*.txt', recursive=True, include_hidden=True) + >>> regex + '(?s:(?:.+/)?[^/]*\\.txt)\\Z' + >>> reobj = re.compile(regex) + >>> reobj.match('foo/bar/baz.txt') + + + Path separators and segments are meaningful to this function, unlike + :func:`fnmatch.translate`. By default wildcards do not match path + separators, and ``*`` pattern segments match precisely one path segment. + + If *recursive* is true, the pattern segment "``**``" will match any number + of path segments. If "``**``" occurs in any position other than a full + pattern segment, :exc:`ValueError` is raised. + + If *include_hidden* is true, wildcards can match path segments that start + with a dot (``.``). + + A sequence of path separators may be supplied to the *seps* argument. If + not given, :data:`os.sep` and :data:`~os.altsep` (if available) are used. + + .. seealso:: + + :meth:`pathlib.PurePath.match` and :meth:`pathlib.Path.glob` methods, + which call this function to implement pattern matching and globbing. + + .. versionadded:: 3.13 + + +Examples +-------- + +Consider a directory containing the following files: :file:`1.gif`, :file:`2.txt`, :file:`card.gif` and a subdirectory :file:`sub` which contains only the file :file:`3.txt`. :func:`glob` will produce the following results. Notice how any leading components of the path are @@ -146,6 +185,7 @@ default. For example, consider a directory containing :file:`card.gif` and ['.card.gif'] .. seealso:: + The :mod:`fnmatch` module offers shell-style filename (not path) expansion. - Module :mod:`fnmatch` - Shell-style filename (not path) expansion +.. seealso:: + The :mod:`pathlib` module offers high-level path objects. diff --git a/Doc/library/graphlib.rst b/Doc/library/graphlib.rst index 2bc80da4ead2a25..5414d6370b78ce6 100644 --- a/Doc/library/graphlib.rst +++ b/Doc/library/graphlib.rst @@ -17,7 +17,7 @@ .. class:: TopologicalSorter(graph=None) - Provides functionality to topologically sort a graph of hashable nodes. + Provides functionality to topologically sort a graph of :term:`hashable` nodes. A topological order is a linear ordering of the vertices in a graph such that for every directed edge u -> v from vertex u to vertex v, vertex u comes @@ -37,14 +37,14 @@ In the general case, the steps required to perform the sorting of a given graph are as follows: - * Create an instance of the :class:`TopologicalSorter` with an optional - initial graph. - * Add additional nodes to the graph. - * Call :meth:`~TopologicalSorter.prepare` on the graph. - * While :meth:`~TopologicalSorter.is_active` is ``True``, iterate over - the nodes returned by :meth:`~TopologicalSorter.get_ready` and - process them. Call :meth:`~TopologicalSorter.done` on each node as it - finishes processing. + * Create an instance of the :class:`TopologicalSorter` with an optional + initial graph. + * Add additional nodes to the graph. + * Call :meth:`~TopologicalSorter.prepare` on the graph. + * While :meth:`~TopologicalSorter.is_active` is ``True``, iterate over + the nodes returned by :meth:`~TopologicalSorter.get_ready` and + process them. Call :meth:`~TopologicalSorter.done` on each node as it + finishes processing. In case just an immediate sorting of the nodes in the graph is required and no parallelism is involved, the convenience method @@ -85,7 +85,7 @@ .. method:: add(node, *predecessors) Add a new node and its predecessors to the graph. Both the *node* and all - elements in *predecessors* must be hashable. + elements in *predecessors* must be :term:`hashable`. If called multiple times with the same node argument, the set of dependencies will be the union of all dependencies passed in. @@ -115,7 +115,7 @@ :meth:`TopologicalSorter.done` is less than the number that have been returned by :meth:`TopologicalSorter.get_ready`. - The :meth:`~TopologicalSorter.__bool__` method of this class defers to + The :meth:`~object.__bool__` method of this class defers to this function, so instead of:: if ts.is_active(): @@ -204,7 +204,7 @@ The :mod:`graphlib` module defines the following exception classes: in the working graph. If multiple cycles exist, only one undefined choice among them will be reported and included in the exception. - The detected cycle can be accessed via the second element in the :attr:`~CycleError.args` + The detected cycle can be accessed via the second element in the :attr:`~BaseException.args` attribute of the exception instance and consists in a list of nodes, such that each node is, in the graph, an immediate predecessor of the next node in the list. In the reported list, the first and the last node will be the same, to make it clear that it is cyclic. diff --git a/Doc/library/grp.rst b/Doc/library/grp.rst index 14af744e3ae8f4e..274a353103b488b 100644 --- a/Doc/library/grp.rst +++ b/Doc/library/grp.rst @@ -10,7 +10,7 @@ This module provides access to the Unix group database. It is available on all Unix versions. -.. include:: ../includes/wasm-notavail.rst +.. availability:: Unix, not Emscripten, not WASI. Group database entries are reported as a tuple-like object, whose attributes correspond to the members of the ``group`` structure (Attribute field below, see @@ -63,7 +63,3 @@ It defines the following items: Module :mod:`pwd` An interface to the user database, similar to this. - - Module :mod:`spwd` - An interface to the shadow password database, similar to this. - diff --git a/Doc/library/gzip.rst b/Doc/library/gzip.rst index 1a2582d6a904b22..50cde09fa10a9d9 100644 --- a/Doc/library/gzip.rst +++ b/Doc/library/gzip.rst @@ -70,7 +70,7 @@ The module defines the following items: .. class:: GzipFile(filename=None, mode=None, compresslevel=9, fileobj=None, mtime=None) Constructor for the :class:`GzipFile` class, which simulates most of the - methods of a :term:`file object`, with the exception of the :meth:`truncate` + methods of a :term:`file object`, with the exception of the :meth:`~io.IOBase.truncate` method. At least one of *fileobj* and *filename* must be given a non-trivial value. @@ -105,7 +105,7 @@ The module defines the following items: should only be provided in compression mode. If omitted or ``None``, the current time is used. See the :attr:`mtime` attribute for more details. - Calling a :class:`GzipFile` object's :meth:`close` method does not close + Calling a :class:`GzipFile` object's :meth:`!close` method does not close *fileobj*, since you might wish to append more material after the compressed data. This also allows you to pass an :class:`io.BytesIO` object opened for writing as *fileobj*, and retrieve the resulting memory buffer using the @@ -113,7 +113,7 @@ The module defines the following items: :class:`GzipFile` supports the :class:`io.BufferedIOBase` interface, including iteration and the :keyword:`with` statement. Only the - :meth:`truncate` method isn't implemented. + :meth:`~io.IOBase.truncate` method isn't implemented. :class:`GzipFile` also provides the following method and attribute: @@ -143,6 +143,12 @@ The module defines the following items: :func:`time.time` and the :attr:`~os.stat_result.st_mtime` attribute of the object returned by :func:`os.stat`. + .. attribute:: name + + The path to the gzip file on disk, as a :class:`str` or :class:`bytes`. + Equivalent to the output of :func:`os.fspath` on the original input path, + with no other normalization, resolution or expansion. + .. versionchanged:: 3.1 Support for the :keyword:`with` statement was added, along with the *mtime* constructor argument and :attr:`mtime` attribute. @@ -244,6 +250,8 @@ Example of how to GZIP compress a binary string:: .. program:: gzip +.. _gzip-cli: + Command Line Interface ---------------------- @@ -260,23 +268,23 @@ Once executed the :mod:`gzip` module keeps the input file(s). Command line options ^^^^^^^^^^^^^^^^^^^^ -.. cmdoption:: file +.. option:: file - If *file* is not specified, read from :attr:`sys.stdin`. + If *file* is not specified, read from :data:`sys.stdin`. -.. cmdoption:: --fast +.. option:: --fast Indicates the fastest compression method (less compression). -.. cmdoption:: --best +.. option:: --best Indicates the slowest compression method (best compression). -.. cmdoption:: -d, --decompress +.. option:: -d, --decompress Decompress the given file. -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message. diff --git a/Doc/library/hashlib.rst b/Doc/library/hashlib.rst index 8e47312fe77bf57..761dd84edee2996 100644 --- a/Doc/library/hashlib.rst +++ b/Doc/library/hashlib.rst @@ -11,7 +11,7 @@ .. index:: single: message digest, MD5 - single: secure hash algorithm, SHA1, SHA224, SHA256, SHA384, SHA512 + single: secure hash algorithm, SHA1, SHA2, SHA224, SHA256, SHA384, SHA512, SHA3, Shake, Blake2 .. testsetup:: @@ -22,7 +22,8 @@ This module implements a common interface to many different secure hash and message digest algorithms. Included are the FIPS secure hash algorithms SHA1, -SHA224, SHA256, SHA384, and SHA512 (defined in FIPS 180-2) as well as RSA's MD5 +SHA224, SHA256, SHA384, SHA512, (defined in `the FIPS 180-4 standard`_), +the SHA-3 series (defined in `the FIPS 202 standard`_) as well as RSA's MD5 algorithm (defined in internet :rfc:`1321`). The terms "secure hash" and "message digest" are interchangeable. Older algorithms were called message digests. The modern term is secure hash. @@ -32,11 +33,6 @@ digests. The modern term is secure hash. If you want the adler32 or crc32 hash functions, they are available in the :mod:`zlib` module. -.. warning:: - - Some algorithms have known hash collision weaknesses, refer to the "See - also" section at the end. - .. _hash-algorithms: @@ -44,38 +40,43 @@ Hash algorithms --------------- There is one constructor method named for each type of :dfn:`hash`. All return -a hash object with the same simple interface. For example: use :func:`sha256` to -create a SHA-256 hash object. You can now feed this object with :term:`bytes-like -objects ` (normally :class:`bytes`) using the :meth:`update` method. -At any point you can ask it for the :dfn:`digest` of the -concatenation of the data fed to it so far using the :meth:`digest` or -:meth:`hexdigest` methods. +a hash object with the same simple interface. For example: use :func:`sha256` +to create a SHA-256 hash object. You can now feed this object with +:term:`bytes-like objects ` (normally :class:`bytes`) using +the :meth:`update` method. At any point you can ask it for the +:dfn:`digest` of the concatenation of the data fed to it so far using the +:meth:`digest()` or :meth:`hexdigest()` methods. -.. note:: +To allow multithreading, the Python :term:`GIL` is released while computing a +hash supplied more than 2047 bytes of data at once in its constructor or +:meth:`.update` method. - For better multithreading performance, the Python :term:`GIL` is released for - data larger than 2047 bytes at object creation or on update. - -.. note:: - - Feeding string objects into :meth:`update` is not supported, as hashes work - on bytes, not on characters. .. index:: single: OpenSSL; (use in module hashlib) Constructors for hash algorithms that are always present in this module are -:func:`sha1`, :func:`sha224`, :func:`sha256`, :func:`sha384`, -:func:`sha512`, :func:`blake2b`, and :func:`blake2s`. -:func:`md5` is normally available as well, though it -may be missing or blocked if you are using a rare "FIPS compliant" build of Python. -Additional algorithms may also be available depending upon the OpenSSL -library that Python uses on your platform. On most platforms the +:func:`sha1`, :func:`sha224`, :func:`sha256`, :func:`sha384`, :func:`sha512`, :func:`sha3_224`, :func:`sha3_256`, :func:`sha3_384`, :func:`sha3_512`, -:func:`shake_128`, :func:`shake_256` are also available. +:func:`shake_128`, :func:`shake_256`, :func:`blake2b`, and :func:`blake2s`. +:func:`md5` is normally available as well, though it may be missing or blocked +if you are using a rare "FIPS compliant" build of Python. +These correspond to :data:`algorithms_guaranteed`. + +Additional algorithms may also be available if your Python distribution's +:mod:`hashlib` was linked against a build of OpenSSL that provides others. +Others *are not guaranteed available* on all installations and will only be +accessible by name via :func:`new`. See :data:`algorithms_available`. + +.. warning:: + + Some algorithms have known hash collision weaknesses (including MD5 and + SHA1). Refer to `Attacks on cryptographic hash algorithms`_ and the + `hashlib-seealso`_ section at the end of this document. .. versionadded:: 3.6 SHA3 (Keccak) and SHAKE constructors :func:`sha3_224`, :func:`sha3_256`, - :func:`sha3_384`, :func:`sha3_512`, :func:`shake_128`, :func:`shake_256`. + :func:`sha3_384`, :func:`sha3_512`, :func:`shake_128`, :func:`shake_256` + were added. .. versionadded:: 3.6 :func:`blake2b` and :func:`blake2s` were added. @@ -89,10 +90,19 @@ library that Python uses on your platform. On most platforms the that the hashing algorithm is not used in a security context, e.g. as a non-cryptographic one-way compression function. - Hashlib now uses SHA3 and SHAKE from OpenSSL 1.1.1 and newer. +.. versionchanged:: 3.9 + Hashlib now uses SHA3 and SHAKE from OpenSSL if it provides it. + +.. versionchanged:: 3.12 + For any of the MD5, SHA1, SHA2, or SHA3 algorithms that the linked + OpenSSL does not provide we fall back to a verified implementation from + the `HACL\* project`_. -For example, to obtain the digest of the byte string ``b"Nobody inspects the -spammish repetition"``:: +Usage +----- + +To obtain the digest of the byte string ``b"Nobody inspects the spammish +repetition"``:: >>> import hashlib >>> m = hashlib.sha256() @@ -108,22 +118,42 @@ More condensed: >>> hashlib.sha256(b"Nobody inspects the spammish repetition").hexdigest() '031edd7d41651593c5fe5c006fa5752b37fddff7bc4e843aa6af0c950f4b9406' -.. function:: new(name[, data], *, usedforsecurity=True) +Constructors +------------ + +.. function:: new(name[, data], \*, usedforsecurity=True) Is a generic constructor that takes the string *name* of the desired algorithm as its first parameter. It also exists to allow access to the above listed hashes as well as any other algorithms that your OpenSSL - library may offer. The named constructors are much faster than :func:`new` - and should be preferred. + library may offer. -Using :func:`new` with an algorithm provided by OpenSSL: +Using :func:`new` with an algorithm name: >>> h = hashlib.new('sha256') >>> h.update(b"Nobody inspects the spammish repetition") >>> h.hexdigest() '031edd7d41651593c5fe5c006fa5752b37fddff7bc4e843aa6af0c950f4b9406' -Hashlib provides the following constant attributes: + +.. function:: md5([, data], *, usedforsecurity=True) +.. function:: sha1([, data], *, usedforsecurity=True) +.. function:: sha224([, data], *, usedforsecurity=True) +.. function:: sha256([, data], *, usedforsecurity=True) +.. function:: sha384([, data], *, usedforsecurity=True) +.. function:: sha512([, data], *, usedforsecurity=True) +.. function:: sha3_224([, data], *, usedforsecurity=True) +.. function:: sha3_256([, data], *, usedforsecurity=True) +.. function:: sha3_384([, data], *, usedforsecurity=True) +.. function:: sha3_512([, data], *, usedforsecurity=True) + +Named constructors such as these are faster than passing an algorithm name to +:func:`new`. + +Attributes +---------- + +Hashlib provides the following constant module attributes: .. data:: algorithms_guaranteed @@ -144,10 +174,12 @@ Hashlib provides the following constant attributes: .. versionadded:: 3.2 +Hash Objects +------------ + The following values are provided as constant attributes of the hash objects returned by the constructors: - .. data:: hash.digest_size The size of the resulting hash in bytes. @@ -178,11 +210,6 @@ A hash object has the following methods: concatenation of all the arguments: ``m.update(a); m.update(b)`` is equivalent to ``m.update(a+b)``. - .. versionchanged:: 3.1 - The Python GIL is released to allow other threads to run while hash - updates on data larger than 2047 bytes is taking place when using hash - algorithms supplied by OpenSSL. - .. method:: hash.digest() @@ -207,6 +234,9 @@ A hash object has the following methods: SHAKE variable length digests ----------------------------- +.. function:: shake_128([, data], *, usedforsecurity=True) +.. function:: shake_256([, data], *, usedforsecurity=True) + The :func:`shake_128` and :func:`shake_256` algorithms provide variable length digests with length_in_bits//2 up to 128 or 256 bits of security. As such, their digest methods require a length. Maximum length is not limited @@ -214,7 +244,7 @@ by the SHAKE algorithm. .. method:: shake.digest(length) - Return the digest of the data passed to the :meth:`update` method so far. + Return the digest of the data passed to the :meth:`~hash.update` method so far. This is a bytes object of size *length* which may contain bytes in the whole range from 0 to 255. @@ -223,8 +253,13 @@ by the SHAKE algorithm. Like :meth:`digest` except the digest is returned as a string object of double length, containing only hexadecimal digits. This may be used to - exchange the value safely in email or other non-binary environments. + exchange the value in email or other non-binary environments. +Example use: + + >>> h = hashlib.shake_256(b'Nobody inspects the spammish repetition') + >>> h.hexdigest(20) + '44709d6fcb83d92a76dcb0b668c98e1b1d3dafe7' File hashing ------------ @@ -329,6 +364,8 @@ include a `salt `_. .. versionadded:: 3.6 +.. _hashlib-blake2: + BLAKE2 ------ @@ -430,9 +467,10 @@ Constructor functions also accept the following tree hashing parameters: .. figure:: hashlib-blake2-tree.png :alt: Explanation of tree mode parameters. + :class: invert-in-dark-mode See section 2.10 in `BLAKE2 specification -`_ for comprehensive review of tree +`_ for comprehensive review of tree hashing. @@ -471,9 +509,9 @@ Simple hashing To calculate hash of some data, you should first construct a hash object by calling the appropriate constructor function (:func:`blake2b` or -:func:`blake2s`), then update it with the data by calling :meth:`update` on the +:func:`blake2s`), then update it with the data by calling :meth:`~hash.update` on the object, and, finally, get the digest out of the object by calling -:meth:`digest` (or :meth:`hexdigest` for hex-encoded string). +:meth:`~hash.digest` (or :meth:`~hash.hexdigest` for hex-encoded string). >>> from hashlib import blake2b >>> h = blake2b() @@ -497,6 +535,7 @@ update the hash: >>> h = blake2b() >>> for item in items: ... h.update(item) + ... >>> h.hexdigest() '6ff843ba685842aa82031d3f53c48b66326df7639a63d128974c5c14f31a0f33343a8c65551134ed1ae0f2b0dd2bb495dc81039e3eeb0aa1bb0388bbeac29183' @@ -618,7 +657,7 @@ on the hash function used in digital signatures. by the signer. (`NIST SP-800-106 "Randomized Hashing for Digital Signatures" - `_) + `_) In BLAKE2 the salt is processed as a one-time input to the hash function during initialization, rather than as an input to each compression function. @@ -627,7 +666,7 @@ initialization, rather than as an input to each compression function. *Salted hashing* (or just hashing) with BLAKE2 or any other general-purpose cryptographic hash function, such as SHA-256, is not suitable for hashing - passwords. See `BLAKE2 FAQ `_ for more + passwords. See `BLAKE2 FAQ `_ for more information. .. @@ -763,16 +802,22 @@ Domain Dedication 1.0 Universal: * *Alexandr Sokolovskiy* -.. _BLAKE2: https://blake2.net +.. _BLAKE2: https://www.blake2.net .. _HMAC: https://en.wikipedia.org/wiki/Hash-based_message_authentication_code -.. _BLAKE: https://131002.net/blake/ -.. _SHA-3: https://en.wikipedia.org/wiki/NIST_hash_function_competition +.. _BLAKE: https://web.archive.org/web/20200918190133/https://131002.net/blake/ +.. _SHA-3: https://en.wikipedia.org/wiki/Secure_Hash_Algorithms .. _ChaCha: https://cr.yp.to/chacha.html .. _pyblake2: https://pythonhosted.org/pyblake2/ .. _NIST-SP-800-132: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-132.pdf .. _stackexchange pbkdf2 iterations question: https://security.stackexchange.com/questions/3959/recommended-of-iterations-when-using-pbkdf2-sha256/ +.. _Attacks on cryptographic hash algorithms: https://en.wikipedia.org/wiki/Cryptographic_hash_function#Attacks_on_cryptographic_hash_algorithms +.. _the FIPS 180-4 standard: https://csrc.nist.gov/publications/detail/fips/180/4/final +.. _the FIPS 202 standard: https://csrc.nist.gov/publications/detail/fips/202/final +.. _HACL\* project: https://github.com/hacl-star/hacl-star +.. _hashlib-seealso: + .. seealso:: Module :mod:`hmac` @@ -781,15 +826,18 @@ Domain Dedication 1.0 Universal: Module :mod:`base64` Another way to encode binary hashes for non-binary environments. - https://blake2.net - Official BLAKE2 website. + https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.180-4.pdf + The FIPS 180-4 publication on Secure Hash Algorithms. - https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2.pdf - The FIPS 180-2 publication on Secure Hash Algorithms. + https://csrc.nist.gov/publications/detail/fips/202/final + The FIPS 202 publication on the SHA-3 Standard. + + https://www.blake2.net/ + Official BLAKE2 website. - https://en.wikipedia.org/wiki/Cryptographic_hash_function#Cryptographic_hash_algorithms - Wikipedia article with information on which algorithms have known issues and - what that means regarding their use. + https://en.wikipedia.org/wiki/Cryptographic_hash_function + Wikipedia article with information on which algorithms have known issues + and what that means regarding their use. https://www.ietf.org/rfc/rfc8018.txt PKCS #5: Password-Based Cryptography Specification Version 2.1 diff --git a/Doc/library/html.parser.rst b/Doc/library/html.parser.rst index 03aff25ce6117a5..d35090111e0822c 100644 --- a/Doc/library/html.parser.rst +++ b/Doc/library/html.parser.rst @@ -173,7 +173,7 @@ implementations do nothing (except for :meth:`~HTMLParser.handle_startendtag`): .. method:: HTMLParser.handle_charref(name) This method is called to process decimal and hexadecimal numeric character - references of the form ``&#NNN;`` and ``&#xNNN;``. For example, the decimal + references of the form :samp:`&#{NNN};` and :samp:`&#x{NNN};`. For example, the decimal equivalent for ``>`` is ``>``, whereas the hexadecimal is ``>``; in this case the method will receive ``'62'`` or ``'x3E'``. This method is never called if *convert_charrefs* is ``True``. diff --git a/Doc/library/http.client.rst b/Doc/library/http.client.rst index 06f92510a5e4b31..c46314fc5e253b8 100644 --- a/Doc/library/http.client.rst +++ b/Doc/library/http.client.rst @@ -10,7 +10,7 @@ pair: HTTP; protocol single: HTTP; http.client (standard module) -.. index:: module: urllib.request +.. index:: pair: module; urllib.request -------------- @@ -20,7 +20,7 @@ HTTPS protocols. It is normally not used directly --- the module .. seealso:: - The `Requests package `_ + The `Requests package `_ is recommended for a higher-level HTTP client interface. .. note:: @@ -67,10 +67,9 @@ The module provides the following classes: *blocksize* parameter was added. -.. class:: HTTPSConnection(host, port=None, key_file=None, \ - cert_file=None[, timeout], \ - source_address=None, *, context=None, \ - check_hostname=None, blocksize=8192) +.. class:: HTTPSConnection(host, port=None, *[, timeout], \ + source_address=None, context=None, \ + blocksize=8192) A subclass of :class:`HTTPConnection` that uses SSL for communication with secure servers. Default port is ``443``. If *context* is specified, it @@ -84,7 +83,7 @@ The module provides the following classes: .. versionchanged:: 3.2 This class now supports HTTPS virtual hosts if possible (that is, - if :data:`ssl.HAS_SNI` is true). + if :const:`ssl.HAS_SNI` is true). .. versionchanged:: 3.4 The *strict* parameter was removed. HTTP 0.9-style "Simple Responses" are @@ -106,16 +105,9 @@ The module provides the following classes: ``http/1.1`` when no *context* is given. Custom *context* should set ALPN protocols with :meth:`~ssl.SSLContext.set_alpn_protocol`. - .. deprecated:: 3.6 - - *key_file* and *cert_file* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. - - The *check_hostname* parameter is also deprecated; the - :attr:`ssl.SSLContext.check_hostname` attribute of *context* should - be used instead. + .. versionchanged:: 3.12 + The deprecated *key_file*, *cert_file* and *check_hostname* parameters + have been removed. .. class:: HTTPResponse(sock, debuglevel=0, method=None, url=None) @@ -262,7 +254,10 @@ HTTPConnection Objects encode_chunked=False) This will send a request to the server using the HTTP request - method *method* and the selector *url*. + method *method* and the request URI *url*. The provided *url* must be + an absolute path to conform with :rfc:`RFC 2616 §5.1.2 <2616#section-5.1.2>` + (unless connecting to an HTTP proxy server or using the ``OPTIONS`` or + ``CONNECT`` methods). If *body* is specified, the specified data is sent after the headers are finished. It may be a :class:`str`, a :term:`bytes-like object`, an @@ -277,7 +272,10 @@ HTTPConnection Objects iterable are sent as is until the iterable is exhausted. The *headers* argument should be a mapping of extra HTTP headers to send - with the request. + with the request. A :rfc:`Host header <2616#section-14.23>` + must be provided to conform with :rfc:`RFC 2616 §5.1.2 <2616#section-5.1.2>` + (unless connecting to an HTTP proxy server or using the ``OPTIONS`` or + ``CONNECT`` methods). If *headers* contains neither Content-Length nor Transfer-Encoding, but there is a request body, one of those @@ -296,6 +294,16 @@ HTTPConnection Objects HTTPConnection object assumes that all encoding is handled by the calling code. If it is ``True``, the body will be chunk-encoded. + For example, to perform a ``GET`` request to ``https://docs.python.org/3/``:: + + >>> import http.client + >>> host = "docs.python.org" + >>> conn = http.client.HTTPSConnection(host) + >>> conn.request("GET", "/3/", headers={"Host": host}) + >>> response = conn.getresponse() + >>> print(response.status, response.reason) + 200 OK + .. note:: Chunked transfer encoding has been added to the HTTP protocol version 1.1. Unless the HTTP server is known to handle HTTP 1.1, @@ -344,13 +352,20 @@ HTTPConnection Objects Set the host and the port for HTTP Connect Tunnelling. This allows running the connection through a proxy server. - The host and port arguments specify the endpoint of the tunneled connection + The *host* and *port* arguments specify the endpoint of the tunneled connection (i.e. the address included in the CONNECT request, *not* the address of the proxy server). - The headers argument should be a mapping of extra HTTP headers to send with + The *headers* argument should be a mapping of extra HTTP headers to send with the CONNECT request. + As HTTP/1.1 is used for HTTP CONNECT tunnelling request, `as per the RFC + `_, a HTTP ``Host:`` + header must be provided, matching the authority-form of the request target + provided as the destination for the CONNECT request. If a HTTP ``Host:`` + header is not provided via the headers argument, one is generated and + transmitted automatically. + For example, to tunnel through a HTTPS proxy server running locally on port 8080, we would pass the address of the proxy to the :class:`HTTPSConnection` constructor, and the address of the host that we eventually want to reach to @@ -363,6 +378,22 @@ HTTPConnection Objects .. versionadded:: 3.2 + .. versionchanged:: 3.12 + HTTP CONNECT tunnelling requests use protocol HTTP/1.1, upgraded from + protocol HTTP/1.0. ``Host:`` HTTP headers are mandatory for HTTP/1.1, so + one will be automatically generated and transmitted if not provided in + the headers argument. + + +.. method:: HTTPConnection.get_proxy_response_headers() + + Returns a dictionary with the headers of the response received from + the proxy server to the CONNECT request. + + If the CONNECT request was not sent, the method returns ``None``. + + .. versionadded:: 3.12 + .. method:: HTTPConnection.connect() @@ -530,7 +561,7 @@ statement. .. deprecated:: 3.9 Deprecated in favor of :attr:`~HTTPResponse.headers`. -.. method:: HTTPResponse.getstatus() +.. method:: HTTPResponse.getcode() .. deprecated:: 3.9 Deprecated in favor of :attr:`~HTTPResponse.status`. diff --git a/Doc/library/http.cookiejar.rst b/Doc/library/http.cookiejar.rst index 87ef156a0bed578..12a6d768437ea54 100644 --- a/Doc/library/http.cookiejar.rst +++ b/Doc/library/http.cookiejar.rst @@ -44,8 +44,8 @@ The module defines the following exception: cookies from a file. :exc:`LoadError` is a subclass of :exc:`OSError`. .. versionchanged:: 3.3 - LoadError was made a subclass of :exc:`OSError` instead of - :exc:`IOError`. + :exc:`LoadError` used to be a subtype of :exc:`IOError`, which is now an + alias of :exc:`OSError`. The following classes are provided: diff --git a/Doc/library/http.rst b/Doc/library/http.rst index 9d002dc33ccb8d4..5e1912716e5319f 100644 --- a/Doc/library/http.rst +++ b/Doc/library/http.rst @@ -171,16 +171,25 @@ Property Indicates that Details Usage:: >>> from http import HTTPMethod - >>> HTTMethod.GET - HTTMethod.GET - >>> HTTMethod.GET == 'GET' + >>> + >>> HTTPMethod.GET + + >>> HTTPMethod.GET == 'GET' True - >>> HTTMethod.GET.value + >>> HTTPMethod.GET.value 'GET' - >>> HTTMethod.GET.description - 'Transfer a current representation of the target resource.' + >>> HTTPMethod.GET.description + 'Retrieve the target.' >>> list(HTTPMethod) - [HTTPMethod.GET, HTTPMethod.HEAD, ...] + [, + , + , + , + , + , + , + , + ] .. _http-methods: diff --git a/Doc/library/http.server.rst b/Doc/library/http.server.rst index 81b6bf5373b495c..6f79b222790094a 100644 --- a/Doc/library/http.server.rst +++ b/Doc/library/http.server.rst @@ -217,7 +217,7 @@ provides three different variants: attribute holds the default values for *message* and *explain* that will be used if no value is provided; for unknown codes the default value for both is the string ``???``. The body will be empty if the method is - HEAD or the response code is one of the following: ``1xx``, + HEAD or the response code is one of the following: :samp:`1{xx}`, ``204 No Content``, ``205 Reset Content``, ``304 Not Modified``. .. versionchanged:: 3.4 @@ -413,6 +413,11 @@ the current directory:: print("serving at port", PORT) httpd.serve_forever() + +:class:`SimpleHTTPRequestHandler` can also be subclassed to enhance behavior, +such as using different index file names by overriding the class attribute +:attr:`index_pages`. + .. _http-server-cli: :mod:`http.server` can also be invoked directly using the :option:`-m` @@ -497,11 +502,24 @@ following command runs an HTTP/1.1 conformant server:: Note that CGI scripts will be run with UID of user nobody, for security reasons. Problems with the CGI script will be translated to error 403. + .. deprecated-removed:: 3.13 3.15 + + :class:`CGIHTTPRequestHandler` is being removed in 3.15. CGI has not + been considered a good way to do things for well over a decade. This code + has been unmaintained for a while now and sees very little practical use. + Retaining it could lead to further :ref:`security considerations + `. + :class:`CGIHTTPRequestHandler` can be enabled in the command line by passing the ``--cgi`` option:: python -m http.server --cgi +.. deprecated-removed:: 3.13 3.15 + + :mod:`http.server` command line ``--cgi`` support is being removed + because :class:`CGIHTTPRequestHandler` is being removed. + .. _http.server-security: Security Considerations @@ -512,3 +530,12 @@ Security Considerations :class:`SimpleHTTPRequestHandler` will follow symbolic links when handling requests, this makes it possible for files outside of the specified directory to be served. + +Earlier versions of Python did not scrub control characters from the +log messages emitted to stderr from ``python -m http.server`` or the +default :class:`BaseHTTPRequestHandler` ``.log_message`` +implementation. This could allow remote clients connecting to your +server to send nefarious control codes to your terminal. + +.. versionadded:: 3.12 + Control characters are scrubbed in stderr logs. diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst index 3058bcead661f3e..e710d0bacf3fee5 100644 --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -439,24 +439,24 @@ the :kbd:`Command` key on macOS. * Some useful Emacs bindings are inherited from Tcl/Tk: - * :kbd:`C-a` beginning of line + * :kbd:`C-a` beginning of line - * :kbd:`C-e` end of line + * :kbd:`C-e` end of line - * :kbd:`C-k` kill line (but doesn't put it in clipboard) + * :kbd:`C-k` kill line (but doesn't put it in clipboard) - * :kbd:`C-l` center window around the insertion point + * :kbd:`C-l` center window around the insertion point - * :kbd:`C-b` go backward one character without deleting (usually you can - also use the cursor key for this) + * :kbd:`C-b` go backward one character without deleting (usually you can + also use the cursor key for this) - * :kbd:`C-f` go forward one character without deleting (usually you can - also use the cursor key for this) + * :kbd:`C-f` go forward one character without deleting (usually you can + also use the cursor key for this) - * :kbd:`C-p` go up one line (usually you can also use the cursor key for - this) + * :kbd:`C-p` go up one line (usually you can also use the cursor key for + this) - * :kbd:`C-d` delete next character + * :kbd:`C-d` delete next character Standard keybindings (like :kbd:`C-c` to copy and :kbd:`C-v` to paste) may work. Keybindings are selected in the Configure IDLE dialog. @@ -479,7 +479,7 @@ Search and Replace Any selection becomes a search target. However, only selections within a line work because searches are only performed within lines with the -terminal newline removed. If ``[x] Regular expresion`` is checked, the +terminal newline removed. If ``[x] Regular expression`` is checked, the target is interpreted according to the Python re module. .. _completions: diff --git a/Doc/library/imaplib.rst b/Doc/library/imaplib.rst index 0c10e7afee401fb..1f774e64b0eae33 100644 --- a/Doc/library/imaplib.rst +++ b/Doc/library/imaplib.rst @@ -84,8 +84,8 @@ Three exceptions are defined as attributes of the :class:`IMAP4` class: There's also a subclass for secure connections: -.. class:: IMAP4_SSL(host='', port=IMAP4_SSL_PORT, keyfile=None, \ - certfile=None, ssl_context=None, timeout=None) +.. class:: IMAP4_SSL(host='', port=IMAP4_SSL_PORT, *, ssl_context=None, \ + timeout=None) This is a subclass derived from :class:`IMAP4` that connects over an SSL encrypted socket (to use this class you need a socket module that was compiled @@ -96,12 +96,6 @@ There's also a subclass for secure connections: (potentially long-lived) structure. Please read :ref:`ssl-security` for best practices. - *keyfile* and *certfile* are a legacy alternative to *ssl_context* - they - can point to PEM-formatted private key and certificate chain files for - the SSL connection. Note that the *keyfile*/*certfile* parameters are - mutually exclusive with *ssl_context*, a :class:`ValueError` is raised - if *keyfile*/*certfile* is provided along with *ssl_context*. - The optional *timeout* parameter specifies a timeout in seconds for the connection attempt. If timeout is not given or is None, the global default socket timeout is used. @@ -112,18 +106,14 @@ There's also a subclass for secure connections: .. versionchanged:: 3.4 The class now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - - .. deprecated:: 3.6 - - *keyfile* and *certfile* are deprecated in favor of *ssl_context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + :const:`ssl.HAS_SNI`). .. versionchanged:: 3.9 The optional *timeout* parameter was added. + .. versionchanged:: 3.12 + The deprecated *keyfile* and *certfile* parameters have been removed. + The second subclass allows for connections created by a child process: @@ -187,7 +177,7 @@ IMAP4 Objects ------------- All IMAP4rev1 commands are represented by methods of the same name, either -upper-case or lower-case. +uppercase or lowercase. All arguments to commands are converted to strings, except for ``AUTHENTICATE``, and the last argument to ``APPEND`` which is passed as an IMAP4 literal. If @@ -513,7 +503,7 @@ An :class:`IMAP4` instance has the following methods: .. versionchanged:: 3.4 The method now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). + :const:`ssl.HAS_SNI`). .. method:: IMAP4.status(mailbox, names) @@ -564,7 +554,7 @@ An :class:`IMAP4` instance has the following methods: ``search``, the searching *charset* argument is mandatory. There is also a ``uid thread`` command which corresponds to ``thread`` the way that ``uid search`` corresponds to ``search``. The ``thread`` command first searches the - mailbox for messages that match the given searching criteria using the charset + mailbox for messages that match the given searching criteria using the *charset* argument for the interpretation of strings in the searching criteria. It then returns the matching messages threaded according to the specified threading algorithm. diff --git a/Doc/library/imghdr.rst b/Doc/library/imghdr.rst deleted file mode 100644 index 630fd7019f94dea..000000000000000 --- a/Doc/library/imghdr.rst +++ /dev/null @@ -1,86 +0,0 @@ -:mod:`imghdr` --- Determine the type of an image -================================================ - -.. module:: imghdr - :synopsis: Determine the type of image contained in a file or byte stream. - :deprecated: - -**Source code:** :source:`Lib/imghdr.py` - -.. deprecated-removed:: 3.11 3.13 - The :mod:`imghdr` module is deprecated - (see :pep:`PEP 594 <594#imghdr>` for details and alternatives). - --------------- - -The :mod:`imghdr` module determines the type of image contained in a file or -byte stream. - -The :mod:`imghdr` module defines the following function: - - -.. function:: what(file, h=None) - - Test the image data contained in the file named *file* and return a - string describing the image type. If *h* is provided, the *file* - argument is ignored and *h* is assumed to contain the byte stream to test. - - .. versionchanged:: 3.6 - Accepts a :term:`path-like object`. - -The following image types are recognized, as listed below with the return value -from :func:`what`: - -+------------+-----------------------------------+ -| Value | Image format | -+============+===================================+ -| ``'rgb'`` | SGI ImgLib Files | -+------------+-----------------------------------+ -| ``'gif'`` | GIF 87a and 89a Files | -+------------+-----------------------------------+ -| ``'pbm'`` | Portable Bitmap Files | -+------------+-----------------------------------+ -| ``'pgm'`` | Portable Graymap Files | -+------------+-----------------------------------+ -| ``'ppm'`` | Portable Pixmap Files | -+------------+-----------------------------------+ -| ``'tiff'`` | TIFF Files | -+------------+-----------------------------------+ -| ``'rast'`` | Sun Raster Files | -+------------+-----------------------------------+ -| ``'xbm'`` | X Bitmap Files | -+------------+-----------------------------------+ -| ``'jpeg'`` | JPEG data in JFIF or Exif formats | -+------------+-----------------------------------+ -| ``'bmp'`` | BMP files | -+------------+-----------------------------------+ -| ``'png'`` | Portable Network Graphics | -+------------+-----------------------------------+ -| ``'webp'`` | WebP files | -+------------+-----------------------------------+ -| ``'exr'`` | OpenEXR Files | -+------------+-----------------------------------+ - -.. versionadded:: 3.5 - The *exr* and *webp* formats were added. - - -You can extend the list of file types :mod:`imghdr` can recognize by appending -to this variable: - - -.. data:: tests - - A list of functions performing the individual tests. Each function takes two - arguments: the byte-stream and an open file-like object. When :func:`what` is - called with a byte-stream, the file-like object will be ``None``. - - The test function should return a string describing the image type if the test - succeeded, or ``None`` if it failed. - -Example:: - - >>> import imghdr - >>> imghdr.what('bass.gif') - 'gif' - diff --git a/Doc/library/imp.rst b/Doc/library/imp.rst deleted file mode 100644 index 000793a7e66cae0..000000000000000 --- a/Doc/library/imp.rst +++ /dev/null @@ -1,411 +0,0 @@ -:mod:`imp` --- Access the :ref:`import ` internals -================================================================ - -.. module:: imp - :synopsis: Access the implementation of the import statement. - :deprecated: - -**Source code:** :source:`Lib/imp.py` - -.. deprecated-removed:: 3.4 3.12 - The :mod:`imp` module is deprecated in favor of :mod:`importlib`. - -.. index:: statement: import - --------------- - -This module provides an interface to the mechanisms used to implement the -:keyword:`import` statement. It defines the following constants and functions: - - -.. function:: get_magic() - - .. index:: pair: file; byte-code - - Return the magic string value used to recognize byte-compiled code files - (:file:`.pyc` files). (This value may be different for each Python version.) - - .. deprecated:: 3.4 - Use :attr:`importlib.util.MAGIC_NUMBER` instead. - - -.. function:: get_suffixes() - - Return a list of 3-element tuples, each describing a particular type of - module. Each triple has the form ``(suffix, mode, type)``, where *suffix* is - a string to be appended to the module name to form the filename to search - for, *mode* is the mode string to pass to the built-in :func:`open` function - to open the file (this can be ``'r'`` for text files or ``'rb'`` for binary - files), and *type* is the file type, which has one of the values - :const:`PY_SOURCE`, :const:`PY_COMPILED`, or :const:`C_EXTENSION`, described - below. - - .. deprecated:: 3.3 - Use the constants defined on :mod:`importlib.machinery` instead. - - -.. function:: find_module(name[, path]) - - Try to find the module *name*. If *path* is omitted or ``None``, the list of - directory names given by ``sys.path`` is searched, but first a few special - places are searched: the function tries to find a built-in module with the - given name (:const:`C_BUILTIN`), then a frozen module (:const:`PY_FROZEN`), - and on some systems some other places are looked in as well (on Windows, it - looks in the registry which may point to a specific file). - - Otherwise, *path* must be a list of directory names; each directory is - searched for files with any of the suffixes returned by :func:`get_suffixes` - above. Invalid names in the list are silently ignored (but all list items - must be strings). - - If search is successful, the return value is a 3-element tuple ``(file, - pathname, description)``: - - *file* is an open :term:`file object` positioned at the beginning, *pathname* - is the pathname of the file found, and *description* is a 3-element tuple as - contained in the list returned by :func:`get_suffixes` describing the kind of - module found. - - If the module is built-in or frozen then *file* and *pathname* are both ``None`` - and the *description* tuple contains empty strings for its suffix and mode; - the module type is indicated as given in parentheses above. If the search - is unsuccessful, :exc:`ImportError` is raised. Other exceptions indicate - problems with the arguments or environment. - - If the module is a package, *file* is ``None``, *pathname* is the package - path and the last item in the *description* tuple is :const:`PKG_DIRECTORY`. - - This function does not handle hierarchical module names (names containing - dots). In order to find *P.M*, that is, submodule *M* of package *P*, use - :func:`find_module` and :func:`load_module` to find and load package *P*, and - then use :func:`find_module` with the *path* argument set to ``P.__path__``. - When *P* itself has a dotted name, apply this recipe recursively. - - .. deprecated:: 3.3 - Use :func:`importlib.util.find_spec` instead unless Python 3.3 - compatibility is required, in which case use - :func:`importlib.find_loader`. For example usage of the former case, - see the :ref:`importlib-examples` section of the :mod:`importlib` - documentation. - - -.. function:: load_module(name, file, pathname, description) - - Load a module that was previously found by :func:`find_module` (or by an - otherwise conducted search yielding compatible results). This function does - more than importing the module: if the module was already imported, it will - reload the module! The *name* argument indicates the full - module name (including the package name, if this is a submodule of a - package). The *file* argument is an open file, and *pathname* is the - corresponding file name; these can be ``None`` and ``''``, respectively, when - the module is a package or not being loaded from a file. The *description* - argument is a tuple, as would be returned by :func:`get_suffixes`, describing - what kind of module must be loaded. - - If the load is successful, the return value is the module object; otherwise, - an exception (usually :exc:`ImportError`) is raised. - - **Important:** the caller is responsible for closing the *file* argument, if - it was not ``None``, even when an exception is raised. This is best done - using a :keyword:`try` ... :keyword:`finally` statement. - - .. deprecated:: 3.3 - If previously used in conjunction with :func:`imp.find_module` then - consider using :func:`importlib.import_module`, otherwise use the loader - returned by the replacement you chose for :func:`imp.find_module`. If you - called :func:`imp.load_module` and related functions directly with file - path arguments then use a combination of - :func:`importlib.util.spec_from_file_location` and - :func:`importlib.util.module_from_spec`. See the :ref:`importlib-examples` - section of the :mod:`importlib` documentation for details of the various - approaches. - - -.. function:: new_module(name) - - Return a new empty module object called *name*. This object is *not* inserted - in ``sys.modules``. - - .. deprecated:: 3.4 - Use :func:`importlib.util.module_from_spec` instead. - - -.. function:: reload(module) - - Reload a previously imported *module*. The argument must be a module object, so - it must have been successfully imported before. This is useful if you have - edited the module source file using an external editor and want to try out the - new version without leaving the Python interpreter. The return value is the - module object (the same as the *module* argument). - - When ``reload(module)`` is executed: - - * Python modules' code is recompiled and the module-level code reexecuted, - defining a new set of objects which are bound to names in the module's - dictionary. The ``init`` function of extension modules is not called a second - time. - - * As with all other objects in Python the old objects are only reclaimed after - their reference counts drop to zero. - - * The names in the module namespace are updated to point to any new or changed - objects. - - * Other references to the old objects (such as names external to the module) are - not rebound to refer to the new objects and must be updated in each namespace - where they occur if that is desired. - - There are a number of other caveats: - - When a module is reloaded, its dictionary (containing the module's global - variables) is retained. Redefinitions of names will override the old - definitions, so this is generally not a problem. If the new version of a module - does not define a name that was defined by the old version, the old definition - remains. This feature can be used to the module's advantage if it maintains a - global table or cache of objects --- with a :keyword:`try` statement it can test - for the table's presence and skip its initialization if desired:: - - try: - cache - except NameError: - cache = {} - - It is legal though generally not very useful to reload built-in or dynamically - loaded modules, except for :mod:`sys`, :mod:`__main__` and :mod:`builtins`. - In many cases, however, extension modules are not designed to be initialized - more than once, and may fail in arbitrary ways when reloaded. - - If a module imports objects from another module using :keyword:`from` ... - :keyword:`import` ..., calling :func:`reload` for the other module does not - redefine the objects imported from it --- one way around this is to re-execute - the :keyword:`!from` statement, another is to use :keyword:`!import` and qualified - names (*module*.*name*) instead. - - If a module instantiates instances of a class, reloading the module that defines - the class does not affect the method definitions of the instances --- they - continue to use the old class definition. The same is true for derived classes. - - .. versionchanged:: 3.3 - Relies on both ``__name__`` and ``__loader__`` being defined on the module - being reloaded instead of just ``__name__``. - - .. deprecated:: 3.4 - Use :func:`importlib.reload` instead. - - -The following functions are conveniences for handling :pep:`3147` byte-compiled -file paths. - -.. versionadded:: 3.2 - -.. function:: cache_from_source(path, debug_override=None) - - Return the :pep:`3147` path to the byte-compiled file associated with the - source *path*. For example, if *path* is ``/foo/bar/baz.py`` the return - value would be ``/foo/bar/__pycache__/baz.cpython-32.pyc`` for Python 3.2. - The ``cpython-32`` string comes from the current magic tag (see - :func:`get_tag`; if :attr:`sys.implementation.cache_tag` is not defined then - :exc:`NotImplementedError` will be raised). By passing in ``True`` or - ``False`` for *debug_override* you can override the system's value for - ``__debug__``, leading to optimized bytecode. - - *path* need not exist. - - .. versionchanged:: 3.3 - If :attr:`sys.implementation.cache_tag` is ``None``, then - :exc:`NotImplementedError` is raised. - - .. deprecated:: 3.4 - Use :func:`importlib.util.cache_from_source` instead. - - .. versionchanged:: 3.5 - The *debug_override* parameter no longer creates a ``.pyo`` file. - - -.. function:: source_from_cache(path) - - Given the *path* to a :pep:`3147` file name, return the associated source code - file path. For example, if *path* is - ``/foo/bar/__pycache__/baz.cpython-32.pyc`` the returned path would be - ``/foo/bar/baz.py``. *path* need not exist, however if it does not conform - to :pep:`3147` format, a :exc:`ValueError` is raised. If - :attr:`sys.implementation.cache_tag` is not defined, - :exc:`NotImplementedError` is raised. - - .. versionchanged:: 3.3 - Raise :exc:`NotImplementedError` when - :attr:`sys.implementation.cache_tag` is not defined. - - .. deprecated:: 3.4 - Use :func:`importlib.util.source_from_cache` instead. - - -.. function:: get_tag() - - Return the :pep:`3147` magic tag string matching this version of Python's - magic number, as returned by :func:`get_magic`. - - .. deprecated:: 3.4 - Use :attr:`sys.implementation.cache_tag` directly starting - in Python 3.3. - - -The following functions help interact with the import system's internal -locking mechanism. Locking semantics of imports are an implementation -detail which may vary from release to release. However, Python ensures -that circular imports work without any deadlocks. - - -.. function:: lock_held() - - Return ``True`` if the global import lock is currently held, else - ``False``. On platforms without threads, always return ``False``. - - On platforms with threads, a thread executing an import first holds a - global import lock, then sets up a per-module lock for the rest of the - import. This blocks other threads from importing the same module until - the original import completes, preventing other threads from seeing - incomplete module objects constructed by the original thread. An - exception is made for circular imports, which by construction have to - expose an incomplete module object at some point. - - .. versionchanged:: 3.3 - The locking scheme has changed to per-module locks for - the most part. A global import lock is kept for some critical tasks, - such as initializing the per-module locks. - - .. deprecated:: 3.4 - - -.. function:: acquire_lock() - - Acquire the interpreter's global import lock for the current thread. - This lock should be used by import hooks to ensure thread-safety when - importing modules. - - Once a thread has acquired the import lock, the same thread may acquire it - again without blocking; the thread must release it once for each time it has - acquired it. - - On platforms without threads, this function does nothing. - - .. versionchanged:: 3.3 - The locking scheme has changed to per-module locks for - the most part. A global import lock is kept for some critical tasks, - such as initializing the per-module locks. - - .. deprecated:: 3.4 - - -.. function:: release_lock() - - Release the interpreter's global import lock. On platforms without - threads, this function does nothing. - - .. versionchanged:: 3.3 - The locking scheme has changed to per-module locks for - the most part. A global import lock is kept for some critical tasks, - such as initializing the per-module locks. - - .. deprecated:: 3.4 - - -The following constants with integer values, defined in this module, are used -to indicate the search result of :func:`find_module`. - - -.. data:: PY_SOURCE - - The module was found as a source file. - - .. deprecated:: 3.3 - - -.. data:: PY_COMPILED - - The module was found as a compiled code object file. - - .. deprecated:: 3.3 - - -.. data:: C_EXTENSION - - The module was found as dynamically loadable shared library. - - .. deprecated:: 3.3 - - -.. data:: PKG_DIRECTORY - - The module was found as a package directory. - - .. deprecated:: 3.3 - - -.. data:: C_BUILTIN - - The module was found as a built-in module. - - .. deprecated:: 3.3 - - -.. data:: PY_FROZEN - - The module was found as a frozen module. - - .. deprecated:: 3.3 - - -.. class:: NullImporter(path_string) - - The :class:`NullImporter` type is a :pep:`302` import hook that handles - non-directory path strings by failing to find any modules. Calling this type - with an existing directory or empty string raises :exc:`ImportError`. - Otherwise, a :class:`NullImporter` instance is returned. - - Instances have only one method: - - .. method:: NullImporter.find_module(fullname [, path]) - - This method always returns ``None``, indicating that the requested module could - not be found. - - .. versionchanged:: 3.3 - ``None`` is inserted into ``sys.path_importer_cache`` instead of an - instance of :class:`NullImporter`. - - .. deprecated:: 3.4 - Insert ``None`` into ``sys.path_importer_cache`` instead. - - -.. _examples-imp: - -Examples --------- - -The following function emulates what was the standard import statement up to -Python 1.4 (no hierarchical module names). (This *implementation* wouldn't work -in that version, since :func:`find_module` has been extended and -:func:`load_module` has been added in 1.4.) :: - - import imp - import sys - - def __import__(name, globals=None, locals=None, fromlist=None): - # Fast path: see if the module has already been imported. - try: - return sys.modules[name] - except KeyError: - pass - - # If any of the following calls raises an exception, - # there's a problem we can't handle -- let the caller handle it. - - fp, pathname, description = imp.find_module(name) - - try: - return imp.load_module(name, fp, pathname, description) - finally: - # Since we may exit via an exception, close fp explicitly. - if fp: - fp.close() diff --git a/Doc/library/importlib.metadata.rst b/Doc/library/importlib.metadata.rst index 988d1a317f59602..1df7d8d772a2747 100644 --- a/Doc/library/importlib.metadata.rst +++ b/Doc/library/importlib.metadata.rst @@ -1,11 +1,11 @@ .. _using: -================================= - Using :mod:`!importlib.metadata` -================================= +======================================================== +:mod:`!importlib.metadata` -- Accessing package metadata +======================================================== .. module:: importlib.metadata - :synopsis: The implementation of the importlib metadata. + :synopsis: Accessing package metadata .. versionadded:: 3.8 .. versionchanged:: 3.10 @@ -13,7 +13,7 @@ **Source code:** :source:`Lib/importlib/metadata/__init__.py` -``importlib_metadata`` is a library that provides access to +``importlib.metadata`` is a library that provides access to the metadata of an installed `Distribution Package `_, such as its entry points or its top-level names (`Import Package `_\s, modules, if any). @@ -24,7 +24,7 @@ API`_ and `metadata API`_ of ``pkg_resources``. Along with this package can eliminate the need to use the older and less efficient ``pkg_resources`` package. -``importlib_metadata`` operates on third-party *distribution packages* +``importlib.metadata`` operates on third-party *distribution packages* installed into Python's ``site-packages`` directory via tools such as `pip `_. Specifically, it works with distributions with discoverable @@ -41,7 +41,7 @@ and metadata defined by the `Core metadata specifications ` + You can use :ref:`packages_distributions() ` to get a mapping between them. By default, distribution metadata can live on the file system @@ -73,7 +73,7 @@ something into it: .. code-block:: shell-session - $ python3 -m venv example + $ python -m venv example $ source example/bin/activate (example) $ python -m pip install wheel @@ -178,7 +178,7 @@ The "selectable" entry points were introduced in ``importlib_metadata`` no parameters and always returned a dictionary of entry points, keyed by group. With ``importlib_metadata`` 5.0 and Python 3.12, ``entry_points`` always returns an ``EntryPoints`` object. See -`backports.entry_points_selectable `_ +`backports.entry_points_selectable `_ for compatibility options. @@ -308,6 +308,10 @@ Python module or `Import Package >> packages_distributions() {'importlib_metadata': ['importlib-metadata'], 'yaml': ['PyYAML'], 'jaraco': ['jaraco.classes', 'jaraco.functools'], ...} +Some editable installs, `do not supply top-level names +`_, and thus this +function is not reliable with such installs. + .. versionadded:: 3.10 .. _distributions: @@ -364,7 +368,7 @@ system :ref:`finders `. To find a distribution package's m ``importlib.metadata`` queries the list of :term:`meta path finders ` on :data:`sys.meta_path`. -By default ``importlib_metadata`` installs a finder for distribution packages +By default ``importlib.metadata`` installs a finder for distribution packages found on the file system. This finder doesn't actually find any *distributions*, but it can find their metadata. diff --git a/Doc/library/importlib.resources.abc.rst b/Doc/library/importlib.resources.abc.rst index 57fffe0d905cbe7..c508b6ba965cc02 100644 --- a/Doc/library/importlib.resources.abc.rst +++ b/Doc/library/importlib.resources.abc.rst @@ -43,8 +43,6 @@ :const:`None`. An object compatible with this ABC should only be returned when the specified module is a package. - .. versionadded:: 3.7 - .. deprecated-removed:: 3.12 3.14 Use :class:`importlib.resources.abc.TraversableResources` instead. @@ -89,13 +87,11 @@ .. class:: Traversable - An object with a subset of pathlib.Path methods suitable for + An object with a subset of :class:`pathlib.Path` methods suitable for traversing directories and opening files. - .. versionadded:: 3.9 - - .. deprecated-removed:: 3.12 3.14 - Use :class:`importlib.resources.abc.Traversable` instead. + For a representation of the object on the file-system, use + :meth:`importlib.resources.as_file`. .. attribute:: name @@ -127,7 +123,7 @@ suitable for reading (same as :attr:`pathlib.Path.open`). When opening as text, accepts encoding parameters such as those - accepted by :attr:`io.TextIOWrapper`. + accepted by :class:`io.TextIOWrapper`. .. method:: read_bytes() @@ -142,19 +138,14 @@ An abstract base class for resource readers capable of serving the :meth:`importlib.resources.files` interface. Subclasses - :class:`importlib.resources.abc.ResourceReader` and provides - concrete implementations of the :class:`importlib.resources.abc.ResourceReader`'s + :class:`ResourceReader` and provides + concrete implementations of the :class:`!ResourceReader`'s abstract methods. Therefore, any loader supplying - :class:`importlib.abc.TraversableReader` also supplies ResourceReader. + :class:`!TraversableResources` also supplies :class:`!ResourceReader`. Loaders that wish to support resource reading are expected to implement this interface. - .. versionadded:: 3.9 - - .. deprecated-removed:: 3.12 3.14 - Use :class:`importlib.resources.abc.TraversableResources` instead. - .. abstractmethod:: files() Returns a :class:`importlib.resources.abc.Traversable` object for the loaded diff --git a/Doc/library/importlib.resources.rst b/Doc/library/importlib.resources.rst index 827e7d8d5aced40..a5adf0b8546dbfc 100644 --- a/Doc/library/importlib.resources.rst +++ b/Doc/library/importlib.resources.rst @@ -1,5 +1,5 @@ -:mod:`importlib.resources` -- Resources ---------------------------------------- +:mod:`importlib.resources` -- Package resource reading, opening and access +-------------------------------------------------------------------------- .. module:: importlib.resources :synopsis: Package resource reading, opening, and access @@ -11,9 +11,18 @@ .. versionadded:: 3.7 This module leverages Python's import system to provide access to *resources* -within *packages*. If you can import a package, you can access resources -within that package. Resources can be opened or read, in either binary or -text mode. +within *packages*. + +"Resources" are file-like resources associated with a module or package in +Python. The resources may be contained directly in a package, within a +subdirectory contained in that package, or adjacent to modules outside a +package. Resources may be text or binary. As a result, Python module sources +(.py) of a package and compilation artifacts (pycache) are technically +de-facto resources of that package. In practice, however, resources are +primarily those non-Python artifacts exposed specifically by the package +author. + +Resources can be opened or read in either binary or text mode. Resources are roughly akin to files inside directories, though it's important to keep in mind that this is just a metaphor. Resources and packages **do @@ -41,181 +50,50 @@ for example, a package and its resources can be imported from a zip file using ``get_resource_reader(fullname)`` method as specified by :class:`importlib.resources.abc.ResourceReader`. -.. data:: Package +.. class:: Anchor - Whenever a function accepts a ``Package`` argument, you can pass in - either a :class:`module object ` or a module name - as a string. You can only pass module objects whose - ``__spec__.submodule_search_locations`` is not ``None``. + Represents an anchor for resources, either a :class:`module object + ` or a module name as a string. Defined as + ``Union[str, ModuleType]``. - The ``Package`` type is defined as ``Union[str, ModuleType]``. - -.. function:: files(package) +.. function:: files(anchor: Optional[Anchor] = None) Returns a :class:`~importlib.resources.abc.Traversable` object - representing the resource container for the package (think directory) - and its resources (think files). A Traversable may contain other - containers (think subdirectories). + representing the resource container (think directory) and its resources + (think files). A Traversable may contain other containers (think + subdirectories). - *package* is either a name or a module object which conforms to the - :data:`Package` requirements. + *anchor* is an optional :class:`Anchor`. If the anchor is a + package, resources are resolved from that package. If a module, + resources are resolved adjacent to that module (in the same package + or the package root). If the anchor is omitted, the caller's module + is used. .. versionadded:: 3.9 + .. versionchanged:: 3.12 + *package* parameter was renamed to *anchor*. *anchor* can now + be a non-package module and if omitted will default to the caller's + module. *package* is still accepted for compatibility but will raise + a :exc:`DeprecationWarning`. Consider passing the anchor positionally or + using ``importlib_resources >= 5.10`` for a compatible interface + on older Pythons. + .. function:: as_file(traversable) Given a :class:`~importlib.resources.abc.Traversable` object representing - a file, typically from :func:`importlib.resources.files`, return - a context manager for use in a :keyword:`with` statement. + a file or directory, typically from :func:`importlib.resources.files`, + return a context manager for use in a :keyword:`with` statement. The context manager provides a :class:`pathlib.Path` object. - Exiting the context manager cleans up any temporary file created when the - resource was extracted from e.g. a zip file. + Exiting the context manager cleans up any temporary file or directory + created when the resource was extracted from e.g. a zip file. Use ``as_file`` when the Traversable methods - (``read_text``, etc) are insufficient and an actual file on + (``read_text``, etc) are insufficient and an actual file or directory on the file system is required. .. versionadded:: 3.9 -Deprecated functions --------------------- - -An older, deprecated set of functions is still available, but is -scheduled for removal in a future version of Python. -The main drawback of these functions is that they do not support -directories: they assume all resources are located directly within a *package*. - -.. data:: Resource - - For *resource* arguments of the functions below, you can pass in - the name of a resource as a string or - a :class:`path-like object `. - - The ``Resource`` type is defined as ``Union[str, os.PathLike]``. - -.. function:: open_binary(package, resource) - - Open for binary reading the *resource* within *package*. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. *resource* is the name of the resource to open - within *package*; it may not contain path separators and it may not have - sub-resources (i.e. it cannot be a directory). This function returns a - ``typing.BinaryIO`` instance, a binary I/O stream open for reading. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - files(package).joinpath(resource).open('rb') - - -.. function:: open_text(package, resource, encoding='utf-8', errors='strict') - - Open for text reading the *resource* within *package*. By default, the - resource is opened for reading as UTF-8. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. *resource* is the name of the resource to open - within *package*; it may not contain path separators and it may not have - sub-resources (i.e. it cannot be a directory). *encoding* and *errors* - have the same meaning as with built-in :func:`open`. - - This function returns a ``typing.TextIO`` instance, a text I/O stream open - for reading. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - files(package).joinpath(resource).open('r', encoding=encoding) - - -.. function:: read_binary(package, resource) - - Read and return the contents of the *resource* within *package* as - ``bytes``. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. *resource* is the name of the resource to open - within *package*; it may not contain path separators and it may not have - sub-resources (i.e. it cannot be a directory). This function returns the - contents of the resource as :class:`bytes`. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - files(package).joinpath(resource).read_bytes() - - -.. function:: read_text(package, resource, encoding='utf-8', errors='strict') - - Read and return the contents of *resource* within *package* as a ``str``. - By default, the contents are read as strict UTF-8. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. *resource* is the name of the resource to open - within *package*; it may not contain path separators and it may not have - sub-resources (i.e. it cannot be a directory). *encoding* and *errors* - have the same meaning as with built-in :func:`open`. This function - returns the contents of the resource as :class:`str`. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - files(package).joinpath(resource).read_text(encoding=encoding) - - -.. function:: path(package, resource) - - Return the path to the *resource* as an actual file system path. This - function returns a context manager for use in a :keyword:`with` statement. - The context manager provides a :class:`pathlib.Path` object. - - Exiting the context manager cleans up any temporary file created when the - resource needs to be extracted from e.g. a zip file. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. *resource* is the name of the resource to open - within *package*; it may not contain path separators and it may not have - sub-resources (i.e. it cannot be a directory). - - .. deprecated:: 3.11 - - Calls to this function can be replaced using :func:`as_file`:: - - as_file(files(package).joinpath(resource)) - - -.. function:: is_resource(package, name) - - Return ``True`` if there is a resource named *name* in the package, - otherwise ``False``. - This function does not consider directories to be resources. - *package* is either a name or a module object which conforms to the - ``Package`` requirements. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - files(package).joinpath(resource).is_file() - - -.. function:: contents(package) - - Return an iterable over the named items within the package. The iterable - returns :class:`str` resources (e.g. files) and non-resources - (e.g. directories). The iterable does not recurse into subdirectories. - - *package* is either a name or a module object which conforms to the - ``Package`` requirements. - - .. deprecated:: 3.11 - - Calls to this function can be replaced by:: - - (resource.name for resource in files(package).iterdir() if resource.is_file()) + .. versionchanged:: 3.12 + Added support for *traversable* representing a directory. diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst index 3fc1531c0cdf19a..2402bc5cd3ee2c3 100644 --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -127,28 +127,6 @@ Functions .. versionchanged:: 3.3 Parent packages are automatically imported. -.. function:: find_loader(name, path=None) - - Find the loader for a module, optionally within the specified *path*. If the - module is in :attr:`sys.modules`, then ``sys.modules[name].__loader__`` is - returned (unless the loader would be ``None`` or is not set, in which case - :exc:`ValueError` is raised). Otherwise a search using :attr:`sys.meta_path` - is done. ``None`` is returned if no loader is found. - - A dotted name does not have its parents implicitly imported as that requires - loading them and that may not be desired. To properly import a submodule you - will need to import all parent packages of the submodule and use the correct - argument to *path*. - - .. versionadded:: 3.3 - - .. versionchanged:: 3.4 - If ``__loader__`` is not set, raise :exc:`ValueError`, just like when the - attribute is set to ``None``. - - .. deprecated:: 3.4 - Use :func:`importlib.util.find_spec` instead. - .. function:: invalidate_caches() Invalidate the internal caches of finders stored at @@ -247,7 +225,6 @@ are also provided to help in implementing the core ABCs. ABC hierarchy:: object - +-- Finder (deprecated) +-- MetaPathFinder +-- PathEntryFinder +-- Loader @@ -258,28 +235,6 @@ ABC hierarchy:: +-- SourceLoader -.. class:: Finder - - An abstract base class representing a :term:`finder`. - - .. deprecated:: 3.3 - Use :class:`MetaPathFinder` or :class:`PathEntryFinder` instead. - - .. abstractmethod:: find_module(fullname, path=None) - - An abstract method for finding a :term:`loader` for the specified - module. Originally specified in :pep:`302`, this method was meant - for use in :data:`sys.meta_path` and in the path-based import subsystem. - - .. versionchanged:: 3.4 - Returns ``None`` when called instead of raising - :exc:`NotImplementedError`. - - .. deprecated:: 3.10 - Implement :meth:`MetaPathFinder.find_spec` or - :meth:`PathEntryFinder.find_spec` instead. - - .. class:: MetaPathFinder An abstract base class representing a :term:`meta path finder`. @@ -287,7 +242,7 @@ ABC hierarchy:: .. versionadded:: 3.3 .. versionchanged:: 3.10 - No longer a subclass of :class:`Finder`. + No longer a subclass of :class:`!Finder`. .. method:: find_spec(fullname, path, target=None) @@ -303,25 +258,6 @@ ABC hierarchy:: .. versionadded:: 3.4 - .. method:: find_module(fullname, path) - - A legacy method for finding a :term:`loader` for the specified - module. If this is a top-level import, *path* will be ``None``. - Otherwise, this is a search for a subpackage or module and *path* - will be the value of :attr:`__path__` from the parent - package. If a loader cannot be found, ``None`` is returned. - - If :meth:`find_spec` is defined, backwards-compatible functionality is - provided. - - .. versionchanged:: 3.4 - Returns ``None`` when called instead of raising - :exc:`NotImplementedError`. Can use :meth:`find_spec` to provide - functionality. - - .. deprecated:: 3.4 - Use :meth:`find_spec` instead. - .. method:: invalidate_caches() An optional method which, when called, should invalidate any internal @@ -342,7 +278,7 @@ ABC hierarchy:: .. versionadded:: 3.3 .. versionchanged:: 3.10 - No longer a subclass of :class:`Finder`. + No longer a subclass of :class:`!Finder`. .. method:: find_spec(fullname, target=None) @@ -356,36 +292,6 @@ ABC hierarchy:: .. versionadded:: 3.4 - .. method:: find_loader(fullname) - - A legacy method for finding a :term:`loader` for the specified - module. Returns a 2-tuple of ``(loader, portion)`` where ``portion`` - is a sequence of file system locations contributing to part of a namespace - package. The loader may be ``None`` while specifying ``portion`` to - signify the contribution of the file system locations to a namespace - package. An empty list can be used for ``portion`` to signify the loader - is not part of a namespace package. If ``loader`` is ``None`` and - ``portion`` is the empty list then no loader or location for a namespace - package were found (i.e. failure to find anything for the module). - - If :meth:`find_spec` is defined then backwards-compatible functionality is - provided. - - .. versionchanged:: 3.4 - Returns ``(None, [])`` instead of raising :exc:`NotImplementedError`. - Uses :meth:`find_spec` when available to provide functionality. - - .. deprecated:: 3.4 - Use :meth:`find_spec` instead. - - .. method:: find_module(fullname) - - A concrete implementation of :meth:`Finder.find_module` which is - equivalent to ``self.find_loader(fullname)[0]``. - - .. deprecated:: 3.4 - Use :meth:`find_spec` instead. - .. method:: invalidate_caches() An optional method which, when called, should invalidate any internal @@ -466,7 +372,7 @@ ABC hierarchy:: The list of locations where the package's submodules will be found. Most of the time this is a single directory. The import system passes this attribute to ``__import__()`` and to finders - in the same way as :attr:`sys.path` but just for the package. + in the same way as :data:`sys.path` but just for the package. It is not set on non-package modules so it can be used as an indicator that the module is a package. @@ -703,7 +609,7 @@ ABC hierarchy:: automatically. When writing to the path fails because the path is read-only - (:attr:`errno.EACCES`/:exc:`PermissionError`), do not propagate the + (:const:`errno.EACCES`/:exc:`PermissionError`), do not propagate the exception. .. versionchanged:: 3.4 @@ -739,6 +645,160 @@ ABC hierarchy:: itself does not end in ``__init__``. +.. class:: ResourceReader + + *Superseded by TraversableResources* + + An :term:`abstract base class` to provide the ability to read + *resources*. + + From the perspective of this ABC, a *resource* is a binary + artifact that is shipped within a package. Typically this is + something like a data file that lives next to the ``__init__.py`` + file of the package. The purpose of this class is to help abstract + out the accessing of such data files so that it does not matter if + the package and its data file(s) are stored in a e.g. zip file + versus on the file system. + + For any of methods of this class, a *resource* argument is + expected to be a :term:`path-like object` which represents + conceptually just a file name. This means that no subdirectory + paths should be included in the *resource* argument. This is + because the location of the package the reader is for, acts as the + "directory". Hence the metaphor for directories and file + names is packages and resources, respectively. This is also why + instances of this class are expected to directly correlate to + a specific package (instead of potentially representing multiple + packages or a module). + + Loaders that wish to support resource reading are expected to + provide a method called ``get_resource_reader(fullname)`` which + returns an object implementing this ABC's interface. If the module + specified by fullname is not a package, this method should return + :const:`None`. An object compatible with this ABC should only be + returned when the specified module is a package. + + .. versionadded:: 3.7 + + .. deprecated-removed:: 3.12 3.14 + Use :class:`importlib.resources.abc.TraversableResources` instead. + + .. abstractmethod:: open_resource(resource) + + Returns an opened, :term:`file-like object` for binary reading + of the *resource*. + + If the resource cannot be found, :exc:`FileNotFoundError` is + raised. + + .. abstractmethod:: resource_path(resource) + + Returns the file system path to the *resource*. + + If the resource does not concretely exist on the file system, + raise :exc:`FileNotFoundError`. + + .. abstractmethod:: is_resource(name) + + Returns ``True`` if the named *name* is considered a resource. + :exc:`FileNotFoundError` is raised if *name* does not exist. + + .. abstractmethod:: contents() + + Returns an :term:`iterable` of strings over the contents of + the package. Do note that it is not required that all names + returned by the iterator be actual resources, e.g. it is + acceptable to return names for which :meth:`is_resource` would + be false. + + Allowing non-resource names to be returned is to allow for + situations where how a package and its resources are stored + are known a priori and the non-resource names would be useful. + For instance, returning subdirectory names is allowed so that + when it is known that the package and resources are stored on + the file system then those subdirectory names can be used + directly. + + The abstract method returns an iterable of no items. + + +.. class:: Traversable + + An object with a subset of :class:`pathlib.Path` methods suitable for + traversing directories and opening files. + + For a representation of the object on the file-system, use + :meth:`importlib.resources.as_file`. + + .. versionadded:: 3.9 + + .. deprecated-removed:: 3.12 3.14 + Use :class:`importlib.resources.abc.Traversable` instead. + + .. attribute:: name + + Abstract. The base name of this object without any parent references. + + .. abstractmethod:: iterdir() + + Yield ``Traversable`` objects in ``self``. + + .. abstractmethod:: is_dir() + + Return ``True`` if ``self`` is a directory. + + .. abstractmethod:: is_file() + + Return ``True`` if ``self`` is a file. + + .. abstractmethod:: joinpath(child) + + Return Traversable child in ``self``. + + .. abstractmethod:: __truediv__(child) + + Return ``Traversable`` child in ``self``. + + .. abstractmethod:: open(mode='r', *args, **kwargs) + + *mode* may be 'r' or 'rb' to open as text or binary. Return a handle + suitable for reading (same as :attr:`pathlib.Path.open`). + + When opening as text, accepts encoding parameters such as those + accepted by :attr:`io.TextIOWrapper`. + + .. method:: read_bytes() + + Read contents of ``self`` as bytes. + + .. method:: read_text(encoding=None) + + Read contents of ``self`` as text. + + +.. class:: TraversableResources + + An abstract base class for resource readers capable of serving + the :meth:`importlib.resources.files` interface. Subclasses + :class:`importlib.resources.abc.ResourceReader` and provides + concrete implementations of the :class:`importlib.resources.abc.ResourceReader`'s + abstract methods. Therefore, any loader supplying + :class:`importlib.abc.TraversableResources` also supplies ResourceReader. + + Loaders that wish to support resource reading are expected to + implement this interface. + + .. versionadded:: 3.9 + + .. deprecated-removed:: 3.12 3.14 + Use :class:`importlib.resources.abc.TraversableResources` instead. + + .. abstractmethod:: files() + + Returns a :class:`importlib.resources.abc.Traversable` object for the loaded + package. + + :mod:`importlib.machinery` -- Importers and path hooks ------------------------------------------------------ @@ -881,13 +941,6 @@ find and load modules. is no longer valid then ``None`` is returned but no value is cached in :data:`sys.path_importer_cache`. - .. classmethod:: find_module(fullname, path=None) - - A legacy wrapper around :meth:`find_spec`. - - .. deprecated:: 3.4 - Use :meth:`find_spec` instead. - .. classmethod:: invalidate_caches() Calls :meth:`importlib.abc.PathEntryFinder.invalidate_caches` on all @@ -938,20 +991,13 @@ find and load modules. .. versionadded:: 3.4 - .. method:: find_loader(fullname) - - Attempt to find the loader to handle *fullname* within :attr:`path`. - - .. deprecated:: 3.10 - Use :meth:`find_spec` instead. - .. method:: invalidate_caches() Clear out the internal cache. .. classmethod:: path_hook(*loader_details) - A class method which returns a closure for use on :attr:`sys.path_hooks`. + A class method which returns a closure for use on :data:`sys.path_hooks`. An instance of :class:`FileFinder` is returned by the closure using the path argument given to the closure directly and *loader_details* indirectly. @@ -1049,8 +1095,15 @@ find and load modules. The *fullname* argument specifies the name of the module the loader is to support. The *path* argument is the path to the extension module's file. + Note that, by default, importing an extension module will fail + in subinterpreters if it doesn't implement multi-phase init + (see :pep:`489`), even if it would otherwise import successfully. + .. versionadded:: 3.3 + .. versionchanged:: 3.12 + Multi-phase init is now required for use in subinterpreters. + .. attribute:: name Name of the module the loader supports. @@ -1092,7 +1145,7 @@ find and load modules. .. versionadded:: 3.4 -.. class:: NamespaceLoader(name, path, path_finder): +.. class:: NamespaceLoader(name, path, path_finder) A concrete implementation of :class:`importlib.abc.InspectLoader` for namespace packages. This is an alias for a private class and is only made @@ -1292,10 +1345,10 @@ an :term:`importer`. .. function:: find_spec(name, package=None) Find the :term:`spec ` for a module, optionally relative to - the specified **package** name. If the module is in :attr:`sys.modules`, + the specified **package** name. If the module is in :data:`sys.modules`, then ``sys.modules[name].__spec__`` is returned (unless the spec would be ``None`` or is not set, in which case :exc:`ValueError` is raised). - Otherwise a search using :attr:`sys.meta_path` is done. ``None`` is + Otherwise a search using :data:`sys.meta_path` is done. ``None`` is returned if no spec is found. If **name** is for a submodule (contains a dot), the parent module is @@ -1356,6 +1409,30 @@ an :term:`importer`. .. versionadded:: 3.7 +.. function:: _incompatible_extension_module_restrictions(*, disable_check) + + A context manager that can temporarily skip the compatibility check + for extension modules. By default the check is enabled and will fail + when a single-phase init module is imported in a subinterpreter. + It will also fail for a multi-phase init module that doesn't + explicitly support a per-interpreter GIL, when imported + in an interpreter with its own GIL. + + Note that this function is meant to accommodate an unusual case; + one which is likely to eventually go away. There's is a pretty good + chance this is not what you were looking for. + + You can get the same effect as this function by implementing the + basic interface of multi-phase init (:pep:`489`) and lying about + support for multiple interpreters (or per-interpreter GIL). + + .. warning:: + Using this function to disable the check can lead to + unexpected behavior and even crashes. It should only be used during + extension module development. + + .. versionadded:: 3.12 + .. class:: LazyLoader(loader) A class which postpones the execution of the loader of a module until the @@ -1367,7 +1444,7 @@ an :term:`importer`. :meth:`~importlib.abc.Loader.create_module` method must return ``None`` or a type for which its ``__class__`` attribute can be mutated along with not using :term:`slots <__slots__>`. Finally, modules which substitute the object - placed into :attr:`sys.modules` will not work as there is no way to properly + placed into :data:`sys.modules` will not work as there is no way to properly replace the module references throughout the interpreter safely; :exc:`ValueError` is raised if such a substitution is detected. @@ -1387,7 +1464,7 @@ an :term:`importer`. .. classmethod:: factory(loader) - A static method which returns a callable that creates a lazy loader. This + A class method which returns a callable that creates a lazy loader. This is meant to be used in situations where the loader is passed by class instead of by instance. :: @@ -1491,9 +1568,9 @@ For deep customizations of import, you typically want to implement an :term:`importer`. This means managing both the :term:`finder` and :term:`loader` side of things. For finders there are two flavours to choose from depending on your needs: a :term:`meta path finder` or a :term:`path entry finder`. The -former is what you would put on :attr:`sys.meta_path` while the latter is what -you create using a :term:`path entry hook` on :attr:`sys.path_hooks` which works -with :attr:`sys.path` entries to potentially create a finder. This example will +former is what you would put on :data:`sys.meta_path` while the latter is what +you create using a :term:`path entry hook` on :data:`sys.path_hooks` which works +with :data:`sys.path` entries to potentially create a finder. This example will show you how to register your own importers so that import will use them (for creating an importer for yourself, read the documentation for the appropriate classes defined within this package):: diff --git a/Doc/library/index.rst b/Doc/library/index.rst index 7d2002b37df12c1..0b348ae6f5c8c0a 100644 --- a/Doc/library/index.rst +++ b/Doc/library/index.rst @@ -27,8 +27,8 @@ as a collection of packages, so it may be necessary to use the packaging tools provided with the operating system to obtain some or all of the optional components. -In addition to the standard library, there is a growing collection of -several thousand components (from individual programs and modules to +In addition to the standard library, there is an active collection of +hundreds of thousands of components (from individual programs and modules to packages and entire application development frameworks), available from the `Python Package Index `_. @@ -73,5 +73,6 @@ the `Python Package Index `_. language.rst windows.rst unix.rst + cmdline.rst superseded.rst security_warnings.rst diff --git a/Doc/library/inspect.rst b/Doc/library/inspect.rst index 44f1ae04c9e39eb..b463c0b6d0e4020 100644 --- a/Doc/library/inspect.rst +++ b/Doc/library/inspect.rst @@ -32,7 +32,7 @@ The :func:`getmembers` function retrieves the members of an object such as a class or module. The functions whose names begin with "is" are mainly provided as convenient choices for the second argument to :func:`getmembers`. They also help you determine when you can expect to find the following special -attributes: +attributes (see :ref:`import-mod-attrs` for module attributes): .. this function name is too big to fit in the ascii-art table below .. |coroutine-origin-link| replace:: :func:`sys.set_coroutine_origin_tracking_depth` @@ -40,11 +40,6 @@ attributes: +-----------+-------------------+---------------------------+ | Type | Attribute | Description | +===========+===================+===========================+ -| module | __doc__ | documentation string | -+-----------+-------------------+---------------------------+ -| | __file__ | filename (missing for | -| | | built-in modules) | -+-----------+-------------------+---------------------------+ | class | __doc__ | documentation string | +-----------+-------------------+---------------------------+ | | __name__ | name with which this | @@ -348,8 +343,10 @@ attributes: .. function:: iscoroutinefunction(object) - Return ``True`` if the object is a :term:`coroutine function` - (a function defined with an :keyword:`async def` syntax). + Return ``True`` if the object is a :term:`coroutine function` (a function + defined with an :keyword:`async def` syntax), a :func:`functools.partial` + wrapping a :term:`coroutine function`, or a sync function marked with + :func:`markcoroutinefunction`. .. versionadded:: 3.5 @@ -357,6 +354,25 @@ attributes: Functions wrapped in :func:`functools.partial` now return ``True`` if the wrapped function is a :term:`coroutine function`. + .. versionchanged:: 3.12 + Sync functions marked with :func:`markcoroutinefunction` now return + ``True``. + + +.. function:: markcoroutinefunction(func) + + Decorator to mark a callable as a :term:`coroutine function` if it would not + otherwise be detected by :func:`iscoroutinefunction`. + + This may be of use for sync functions that return a :term:`coroutine`, if + the function is passed to an API that requires :func:`iscoroutinefunction`. + + When possible, using an :keyword:`async def` function is preferred. Also + acceptable is calling the function and testing the return with + :func:`iscoroutine`. + + .. versionadded:: 3.12 + .. function:: iscoroutine(object) @@ -558,6 +574,8 @@ Retrieving source code object and the line number indicates where in the original source file the first line of code was found. An :exc:`OSError` is raised if the source code cannot be retrieved. + A :exc:`TypeError` is raised if the object is a built-in module, class, or + function. .. versionchanged:: 3.3 :exc:`OSError` is raised instead of :exc:`IOError`, now an alias of the @@ -570,6 +588,8 @@ Retrieving source code class, method, function, traceback, frame, or code object. The source code is returned as a single string. An :exc:`OSError` is raised if the source code cannot be retrieved. + A :exc:`TypeError` is raised if the object is a built-in module, class, or + function. .. versionchanged:: 3.3 :exc:`OSError` is raised instead of :exc:`IOError`, now an alias of the @@ -620,6 +640,9 @@ function. Accepts a wide range of Python callables, from plain functions and classes to :func:`functools.partial` objects. + If the passed object has a ``__signature__`` attribute, this function + returns it without further computations. + For objects defined in modules using stringized annotations (``from __future__ import annotations``), :func:`signature` will attempt to automatically un-stringize the annotations using @@ -669,11 +692,11 @@ function. The optional *return_annotation* argument, can be an arbitrary Python object, is the "return" annotation of the callable. - Signature objects are *immutable*. Use :meth:`Signature.replace` to make a - modified copy. + Signature objects are *immutable*. Use :meth:`Signature.replace` or + :func:`copy.replace` to make a modified copy. .. versionchanged:: 3.5 - Signature objects are picklable and hashable. + Signature objects are picklable and :term:`hashable`. .. attribute:: Signature.empty @@ -710,7 +733,7 @@ function. .. method:: Signature.replace(*[, parameters][, return_annotation]) - Create a new Signature instance based on the instance replace was invoked + Create a new Signature instance based on the instance :meth:`replace` was invoked on. It is possible to pass different ``parameters`` and/or ``return_annotation`` to override the corresponding properties of the base signature. To remove return_annotation from the copied Signature, pass in @@ -720,11 +743,15 @@ function. >>> def test(a, b): ... pass + ... >>> sig = signature(test) >>> new_sig = sig.replace(return_annotation="new return anno") >>> str(new_sig) "(a, b) -> 'new return anno'" + Signature objects are also supported by generic function + :func:`copy.replace`. + .. classmethod:: Signature.from_callable(obj, *, follow_wrapped=True, globalns=None, localns=None) Return a :class:`Signature` (or its subclass) object for a given callable @@ -739,6 +766,8 @@ function. sig = MySignature.from_callable(min) assert isinstance(sig, MySignature) + Its behavior is otherwise identical to that of :func:`signature`. + .. versionadded:: 3.5 .. versionadded:: 3.10 @@ -748,10 +777,10 @@ function. .. class:: Parameter(name, kind, *, default=Parameter.empty, annotation=Parameter.empty) Parameter objects are *immutable*. Instead of modifying a Parameter object, - you can use :meth:`Parameter.replace` to create a modified copy. + you can use :meth:`Parameter.replace` or :func:`copy.replace` to create a modified copy. .. versionchanged:: 3.5 - Parameter objects are picklable and hashable. + Parameter objects are picklable and :term:`hashable`. .. attribute:: Parameter.empty @@ -785,8 +814,9 @@ function. .. attribute:: Parameter.kind - Describes how argument values are bound to the parameter. Possible values - (accessible via :class:`Parameter`, like ``Parameter.KEYWORD_ONLY``): + Describes how argument values are bound to the parameter. The possible + values are accessible via :class:`Parameter` (like ``Parameter.KEYWORD_ONLY``), + and support comparison and ordering, in the following order: .. tabularcolumns:: |l|L| @@ -870,6 +900,8 @@ function. >>> str(param.replace(default=Parameter.empty, annotation='spam')) "foo:'spam'" + Parameter objects are also supported by generic function :func:`copy.replace`. + .. versionchanged:: 3.4 In Python 3.3 Parameter objects were allowed to have ``name`` set to ``None`` if their ``kind`` was set to ``POSITIONAL_ONLY``. @@ -1059,6 +1091,7 @@ Classes and functions >>> from inspect import getcallargs >>> def f(a, b=1, *pos, **named): ... pass + ... >>> getcallargs(f, 1, 2, 3) == {'a': 1, 'named': {}, 'b': 2, 'pos': (3,)} True >>> getcallargs(f, a=2, x=4) == {'a': 2, 'named': {'x': 4}, 'b': 1, 'pos': ()} @@ -1421,8 +1454,8 @@ code execution:: pass -Current State of Generators and Coroutines ------------------------------------------- +Current State of Generators, Coroutines, and Asynchronous Generators +-------------------------------------------------------------------- When implementing coroutine schedulers and for other advanced uses of generators, it is useful to determine whether a generator is currently @@ -1435,10 +1468,11 @@ generator to be determined easily. Get current state of a generator-iterator. Possible states are: - * GEN_CREATED: Waiting to start execution. - * GEN_RUNNING: Currently being executed by the interpreter. - * GEN_SUSPENDED: Currently suspended at a yield expression. - * GEN_CLOSED: Execution has completed. + + * GEN_CREATED: Waiting to start execution. + * GEN_RUNNING: Currently being executed by the interpreter. + * GEN_SUSPENDED: Currently suspended at a yield expression. + * GEN_CLOSED: Execution has completed. .. versionadded:: 3.2 @@ -1450,13 +1484,31 @@ generator to be determined easily. ``cr_frame`` attributes. Possible states are: - * CORO_CREATED: Waiting to start execution. - * CORO_RUNNING: Currently being executed by the interpreter. - * CORO_SUSPENDED: Currently suspended at an await expression. - * CORO_CLOSED: Execution has completed. + + * CORO_CREATED: Waiting to start execution. + * CORO_RUNNING: Currently being executed by the interpreter. + * CORO_SUSPENDED: Currently suspended at an await expression. + * CORO_CLOSED: Execution has completed. .. versionadded:: 3.5 +.. function:: getasyncgenstate(agen) + + Get current state of an asynchronous generator object. The function is + intended to be used with asynchronous iterator objects created by + :keyword:`async def` functions which use the :keyword:`yield` statement, + but will accept any asynchronous generator-like object that has + ``ag_running`` and ``ag_frame`` attributes. + + Possible states are: + + * AGEN_CREATED: Waiting to start execution. + * AGEN_RUNNING: Currently being executed by the interpreter. + * AGEN_SUSPENDED: Currently suspended at a yield expression. + * AGEN_CLOSED: Execution has completed. + + .. versionadded:: 3.12 + The current internal state of the generator can also be queried. This is mostly useful for testing purposes, to ensure that internal state is being updated as expected: @@ -1488,6 +1540,14 @@ updated as expected: .. versionadded:: 3.5 +.. function:: getasyncgenlocals(agen) + + This function is analogous to :func:`~inspect.getgeneratorlocals`, but + works for asynchronous generator objects created by :keyword:`async def` + functions which use the :keyword:`yield` statement. + + .. versionadded:: 3.12 + .. _inspect-module-co-flags: @@ -1556,6 +1616,39 @@ the following flags: for any introspection needs. +Buffer flags +------------ + +.. class:: BufferFlags + + This is an :class:`enum.IntFlag` that represents the flags that + can be passed to the :meth:`~object.__buffer__` method of objects + implementing the :ref:`buffer protocol `. + + The meaning of the flags is explained at :ref:`buffer-request-types`. + + .. attribute:: BufferFlags.SIMPLE + .. attribute:: BufferFlags.WRITABLE + .. attribute:: BufferFlags.FORMAT + .. attribute:: BufferFlags.ND + .. attribute:: BufferFlags.STRIDES + .. attribute:: BufferFlags.C_CONTIGUOUS + .. attribute:: BufferFlags.F_CONTIGUOUS + .. attribute:: BufferFlags.ANY_CONTIGUOUS + .. attribute:: BufferFlags.INDIRECT + .. attribute:: BufferFlags.CONTIG + .. attribute:: BufferFlags.CONTIG_RO + .. attribute:: BufferFlags.STRIDED + .. attribute:: BufferFlags.STRIDED_RO + .. attribute:: BufferFlags.RECORDS + .. attribute:: BufferFlags.RECORDS_RO + .. attribute:: BufferFlags.FULL + .. attribute:: BufferFlags.FULL_RO + .. attribute:: BufferFlags.READ + .. attribute:: BufferFlags.WRITE + + .. versionadded:: 3.12 + .. _inspect-module-cli: Command Line Interface @@ -1570,6 +1663,6 @@ By default, accepts the name of a module and prints the source of that module. A class or function within the module can be printed instead by appended a colon and the qualified name of the target object. -.. cmdoption:: --details +.. option:: --details Print information about the specified object rather than the source code diff --git a/Doc/library/internet.rst b/Doc/library/internet.rst index ff58dcf4d89c36f..681769a4820dba6 100644 --- a/Doc/library/internet.rst +++ b/Doc/library/internet.rst @@ -9,7 +9,7 @@ Internet Protocols and Support single: Internet single: World Wide Web -.. index:: module: socket +.. index:: pair: module; socket The modules described in this chapter implement internet protocols and support for related technology. They are all implemented in Python. Most of these diff --git a/Doc/library/intro.rst b/Doc/library/intro.rst index 1020924038e91fb..5a4c9b8b16ab3b9 100644 --- a/Doc/library/intro.rst +++ b/Doc/library/intro.rst @@ -114,7 +114,7 @@ DOM APIs as well as limited networking capabilities with JavaScript's .. _WebAssembly: https://webassembly.org/ .. _Emscripten: https://emscripten.org/ -.. _Emscripten Networking: https://emscripten.org/docs/porting/networking.html> +.. _Emscripten Networking: https://emscripten.org/docs/porting/networking.html .. _WASI: https://wasi.dev/ .. _wasmtime: https://wasmtime.dev/ .. _Pyodide: https://pyodide.org/ diff --git a/Doc/library/io.rst b/Doc/library/io.rst index 0968509fbafec29..6736aa9ee2b0efc 100644 --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -38,9 +38,9 @@ location), or only sequential access (for example in the case of a socket or pipe). All streams are careful about the type of data you give to them. For example -giving a :class:`str` object to the ``write()`` method of a binary stream +giving a :class:`str` object to the :meth:`!write` method of a binary stream will raise a :exc:`TypeError`. So will giving a :class:`bytes` object to the -``write()`` method of a text stream. +:meth:`!write` method of a text stream. .. versionchanged:: 3.3 Operations that used to raise :exc:`IOError` now raise :exc:`OSError`, since @@ -146,7 +146,7 @@ Opt-in EncodingWarning See :pep:`597` for more details. To find where the default locale encoding is used, you can enable -the ``-X warn_default_encoding`` command line option or set the +the :option:`-X warn_default_encoding <-X>` command line option or set the :envvar:`PYTHONWARNDEFAULTENCODING` environment variable, which will emit an :exc:`EncodingWarning` when the default encoding is used. @@ -175,7 +175,7 @@ High-level Module Interface .. audit-event:: open path,mode,flags io.open This function raises an :ref:`auditing event ` ``open`` with - arguments ``path``, ``mode`` and ``flags``. The ``mode`` and ``flags`` + arguments *path*, *mode* and *flags*. The *mode* and *flags* arguments may have been modified or inferred from the original call. @@ -184,10 +184,10 @@ High-level Module Interface Opens the provided file with mode ``'rb'``. This function should be used when the intent is to treat the contents as executable code. - ``path`` should be a :class:`str` and an absolute path. + *path* should be a :class:`str` and an absolute path. The behavior of this function may be overridden by an earlier call to the - :c:func:`PyFile_SetOpenCodeHook`. However, assuming that ``path`` is a + :c:func:`PyFile_SetOpenCodeHook`. However, assuming that *path* is a :class:`str` and an absolute path, ``open_code(path)`` should always behave the same as ``open(path, 'rb')``. Overriding the behavior is intended for additional validation or preprocessing of the file. @@ -253,12 +253,12 @@ The implementation of I/O streams is organized as a hierarchy of classes. First specify the various categories of streams, then concrete classes providing the standard stream implementations. - .. note:: +.. note:: - The abstract base classes also provide default implementations of some - methods in order to help implementation of concrete stream classes. For - example, :class:`BufferedIOBase` provides unoptimized implementations of - :meth:`~IOBase.readinto` and :meth:`~IOBase.readline`. + The abstract base classes also provide default implementations of some + methods in order to help implementation of concrete stream classes. For + example, :class:`BufferedIOBase` provides unoptimized implementations of + :meth:`!readinto` and :meth:`!readline`. At the top of the I/O hierarchy is the abstract base class :class:`IOBase`. It defines the basic interface to a stream. Note, however, that there is no @@ -320,8 +320,8 @@ I/O Base Classes implementations represent a file that cannot be read, written or seeked. - Even though :class:`IOBase` does not declare :meth:`read` - or :meth:`write` because their signatures will vary, implementations and + Even though :class:`IOBase` does not declare :meth:`!read` + or :meth:`!write` because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations may raise a :exc:`ValueError` (or :exc:`UnsupportedOperation`) when operations they do not support are called. @@ -379,8 +379,8 @@ I/O Base Classes .. method:: readable() - Return ``True`` if the stream can be read from. If ``False``, :meth:`read` - will raise :exc:`OSError`. + Return ``True`` if the stream can be read from. + If ``False``, :meth:`!read` will raise :exc:`OSError`. .. method:: readline(size=-1, /) @@ -401,29 +401,28 @@ I/O Base Classes hint. Note that it's already possible to iterate on file objects using ``for - line in file: ...`` without calling ``file.readlines()``. + line in file: ...`` without calling :meth:`!file.readlines`. - .. method:: seek(offset, whence=SEEK_SET, /) + .. method:: seek(offset, whence=os.SEEK_SET, /) - Change the stream position to the given byte *offset*. *offset* is - interpreted relative to the position indicated by *whence*. The default - value for *whence* is :data:`SEEK_SET`. Values for *whence* are: + Change the stream position to the given byte *offset*, + interpreted relative to the position indicated by *whence*, + and return the new absolute position. + Values for *whence* are: - * :data:`SEEK_SET` or ``0`` -- start of the stream (the default); + * :data:`os.SEEK_SET` or ``0`` -- start of the stream (the default); *offset* should be zero or positive - * :data:`SEEK_CUR` or ``1`` -- current stream position; *offset* may - be negative - * :data:`SEEK_END` or ``2`` -- end of the stream; *offset* is usually - negative - - Return the new absolute position. + * :data:`os.SEEK_CUR` or ``1`` -- current stream position; + *offset* may be negative + * :data:`os.SEEK_END` or ``2`` -- end of the stream; + *offset* is usually negative .. versionadded:: 3.1 - The ``SEEK_*`` constants. + The :data:`!SEEK_*` constants. .. versionadded:: 3.3 Some operating systems could support additional values, like - :data:`os.SEEK_HOLE` or :data:`os.SEEK_DATA`. The valid values + :const:`os.SEEK_HOLE` or :const:`os.SEEK_DATA`. The valid values for a file could depend on it being open in text or binary mode. .. method:: seekable() @@ -450,7 +449,7 @@ I/O Base Classes .. method:: writable() Return ``True`` if the stream supports writing. If ``False``, - :meth:`write` and :meth:`truncate` will raise :exc:`OSError`. + :meth:`!write` and :meth:`truncate` will raise :exc:`OSError`. .. method:: writelines(lines, /) @@ -654,8 +653,9 @@ Raw File I/O implies writing, so this mode behaves in a similar way to ``'w'``. Add a ``'+'`` to the mode to allow simultaneous reading and writing. - The :meth:`read` (when called with a positive argument), :meth:`readinto` - and :meth:`write` methods on this class will only make one system call. + The :meth:`~RawIOBase.read` (when called with a positive argument), + :meth:`~RawIOBase.readinto` and :meth:`~RawIOBase.write` methods on this + class will only make one system call. A custom opener can be used by passing a callable as *opener*. The underlying file descriptor for the file object is then obtained by calling *opener* with @@ -791,8 +791,8 @@ than raw I/O does. object under various conditions, including: * when the buffer gets too small for all pending data; - * when :meth:`flush()` is called; - * when a :meth:`seek()` is requested (for :class:`BufferedRandom` objects); + * when :meth:`flush` is called; + * when a :meth:`~IOBase.seek` is requested (for :class:`BufferedRandom` objects); * when the :class:`BufferedWriter` object is closed or destroyed. The constructor creates a :class:`BufferedWriter` for the given writeable @@ -826,8 +826,8 @@ than raw I/O does. :data:`DEFAULT_BUFFER_SIZE`. :class:`BufferedRandom` is capable of anything :class:`BufferedReader` or - :class:`BufferedWriter` can do. In addition, :meth:`seek` and :meth:`tell` - are guaranteed to be implemented. + :class:`BufferedWriter` can do. In addition, :meth:`~IOBase.seek` and + :meth:`~IOBase.tell` are guaranteed to be implemented. .. class:: BufferedRWPair(reader, writer, buffer_size=DEFAULT_BUFFER_SIZE, /) @@ -904,7 +904,7 @@ Text I/O .. method:: readline(size=-1, /) - Read until newline or EOF and return a single ``str``. If the stream is + Read until newline or EOF and return a single :class:`str`. If the stream is already at EOF, an empty string is returned. If *size* is specified, at most *size* characters will be read. @@ -913,22 +913,22 @@ Text I/O Change the stream position to the given *offset*. Behaviour depends on the *whence* parameter. The default value for *whence* is - :data:`SEEK_SET`. + :data:`!SEEK_SET`. - * :data:`SEEK_SET` or ``0``: seek from the start of the stream + * :data:`!SEEK_SET` or ``0``: seek from the start of the stream (the default); *offset* must either be a number returned by :meth:`TextIOBase.tell`, or zero. Any other *offset* value produces undefined behaviour. - * :data:`SEEK_CUR` or ``1``: "seek" to the current position; + * :data:`!SEEK_CUR` or ``1``: "seek" to the current position; *offset* must be zero, which is a no-operation (all other values are unsupported). - * :data:`SEEK_END` or ``2``: seek to the end of the stream; + * :data:`!SEEK_END` or ``2``: seek to the end of the stream; *offset* must be zero (all other values are unsupported). Return the new absolute position as an opaque number. .. versionadded:: 3.1 - The ``SEEK_*`` constants. + The :data:`!SEEK_*` constants. .. method:: tell() @@ -988,10 +988,10 @@ Text I/O takes place. If *newline* is any of the other legal values, any ``'\n'`` characters written are translated to the given string. - If *line_buffering* is ``True``, :meth:`flush` is implied when a call to + If *line_buffering* is ``True``, :meth:`~IOBase.flush` is implied when a call to write contains a newline character or a carriage return. - If *write_through* is ``True``, calls to :meth:`write` are guaranteed + If *write_through* is ``True``, calls to :meth:`~BufferedIOBase.write` are guaranteed not to be buffered: any data written on the :class:`TextIOWrapper` object is immediately handled to its underlying binary *buffer*. @@ -1021,8 +1021,8 @@ Text I/O .. versionadded:: 3.7 - .. method:: reconfigure(*[, encoding][, errors][, newline][, \ - line_buffering][, write_through]) + .. method:: reconfigure(*, encoding=None, errors=None, newline=None, \ + line_buffering=None, write_through=None) Reconfigure this text stream using new settings for *encoding*, *errors*, *newline*, *line_buffering* and *write_through*. @@ -1043,6 +1043,33 @@ Text I/O .. versionchanged:: 3.11 The method supports ``encoding="locale"`` option. + .. method:: seek(cookie, whence=os.SEEK_SET, /) + + Set the stream position. + Return the new stream position as an :class:`int`. + + Four operations are supported, + given by the following argument combinations: + + * ``seek(0, SEEK_SET)``: Rewind to the start of the stream. + * ``seek(cookie, SEEK_SET)``: Restore a previous position; + *cookie* **must be** a number returned by :meth:`tell`. + * ``seek(0, SEEK_END)``: Fast-forward to the end of the stream. + * ``seek(0, SEEK_CUR)``: Leave the current stream position unchanged. + + Any other argument combinations are invalid, + and may raise exceptions. + + .. seealso:: + + :data:`os.SEEK_SET`, :data:`os.SEEK_CUR`, and :data:`os.SEEK_END`. + + .. method:: tell() + + Return the stream position as an opaque number. + The return value of :meth:`!tell` can be given as input to :meth:`seek`, + to restore a previous stream position. + .. class:: StringIO(initial_value='', newline='\n') @@ -1070,7 +1097,7 @@ Text I/O .. method:: getvalue() - Return a ``str`` containing the entire contents of the buffer. + Return a :class:`str` containing the entire contents of the buffer. Newlines are decoded as if by :meth:`~TextIOBase.read`, although the stream position is not changed. @@ -1125,7 +1152,7 @@ Text I/O over a binary storage (such as a file) is significantly slower than binary I/O over the same storage, because it requires conversions between unicode and binary data using a character codec. This can become noticeable handling huge amounts of text data like large log files. Also, -:meth:`TextIOWrapper.tell` and :meth:`TextIOWrapper.seek` are both quite slow +:meth:`~TextIOBase.tell` and :meth:`~TextIOBase.seek` are both quite slow due to the reconstruction algorithm used. :class:`StringIO`, however, is a native in-memory unicode container and will @@ -1135,7 +1162,7 @@ Multi-threading ^^^^^^^^^^^^^^^ :class:`FileIO` objects are thread-safe to the extent that the operating system -calls (such as ``read(2)`` under Unix) they wrap are thread-safe too. +calls (such as :manpage:`read(2)` under Unix) they wrap are thread-safe too. Binary buffered objects (instances of :class:`BufferedReader`, :class:`BufferedWriter`, :class:`BufferedRandom` and :class:`BufferedRWPair`) diff --git a/Doc/library/ipaddress.rst b/Doc/library/ipaddress.rst index 9c2dff55703273f..1de36b643c4dca6 100644 --- a/Doc/library/ipaddress.rst +++ b/Doc/library/ipaddress.rst @@ -219,6 +219,13 @@ write code that handles both IP versions correctly. Address objects are ``True`` if the address is reserved for link-local usage. See :RFC:`3927`. + .. attribute:: ipv6_mapped + + :class:`IPv4Address` object representing the IPv4-mapped IPv6 address. See :RFC:`4291`. + + .. versionadded:: 3.13 + + .. _iana-ipv4-special-registry: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml .. _iana-ipv6-special-registry: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml diff --git a/Doc/library/itertools.rst b/Doc/library/itertools.rst index 0b5978505a96729..ebb4ebcfa7618a1 100644 --- a/Doc/library/itertools.rst +++ b/Doc/library/itertools.rst @@ -33,7 +33,7 @@ by combining :func:`map` and :func:`count` to form ``map(f, count())``. These tools and their built-in counterparts also work well with the high-speed functions in the :mod:`operator` module. For example, the multiplication operator can be mapped across two vectors to form an efficient dot-product: -``sum(map(operator.mul, vector1, vector2))``. +``sum(starmap(operator.mul, zip(vec1, vec2, strict=True)))``. **Infinite iterators:** @@ -41,7 +41,7 @@ operator can be mapped across two vectors to form an efficient dot-product: ================== ================= ================================================= ========================================= Iterator Arguments Results Example ================== ================= ================================================= ========================================= -:func:`count` start, [step] start, start+step, start+2*step, ... ``count(10) --> 10 11 12 13 14 ...`` +:func:`count` [start[, step]] start, start+step, start+2*step, ... ``count(10) --> 10 11 12 13 14 ...`` :func:`cycle` p p0, p1, ... plast, p0, p1, ... ``cycle('ABCD') --> A B C D A B C D ...`` :func:`repeat` elem [,n] elem, elem, elem, ... endlessly or up to n times ``repeat(10, 3) --> 10 10 10`` ================== ================= ================================================= ========================================= @@ -52,7 +52,7 @@ Iterator Arguments Results Iterator Arguments Results Example ============================ ============================ ================================================= ============================================================= :func:`accumulate` p [,func] p0, p0+p1, p0+p1+p2, ... ``accumulate([1,2,3,4,5]) --> 1 3 6 10 15`` -:func:`batched` p, n [p0, p1, ..., p_n-1], ... ``batched('ABCDEFG', n=3) --> ABC DEF G`` +:func:`batched` p, n (p0, p1, ..., p_n-1), ... ``batched('ABCDEFG', n=3) --> ABC DEF G`` :func:`chain` p, q, ... p0, p1, ... plast, q0, q1, ... ``chain('ABC', 'DEF') --> A B C D E F`` :func:`chain.from_iterable` iterable p0, p1, ... plast, q0, q1, ... ``chain.from_iterable(['ABC', 'DEF']) --> A B C D E F`` :func:`compress` data, selectors (d[0] if s[0]), (d[1] if s[1]), ... ``compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F`` @@ -147,10 +147,10 @@ loops that truncate the stream. >>> list(accumulate(data, max)) # running maximum [3, 4, 6, 6, 6, 9, 9, 9, 9, 9] - # Amortize a 5% loan of 1000 with 4 annual payments of 90 - >>> cashflows = [1000, -90, -90, -90, -90] - >>> list(accumulate(cashflows, lambda bal, pmt: bal*1.05 + pmt)) - [1000, 960.0, 918.0, 873.9000000000001, 827.5950000000001] + # Amortize a 5% loan of 1000 with 10 annual payments of 90 + >>> account_update = lambda bal, pmt: round(bal * 1.05) + pmt + >>> list(accumulate(repeat(-90, 10), account_update, initial=1_000)) + [1000, 960, 918, 874, 828, 779, 728, 674, 618, 559, 497] See :func:`functools.reduce` for a similar function that returns only the final accumulated value. @@ -166,11 +166,11 @@ loops that truncate the stream. .. function:: batched(iterable, n) - Batch data from the *iterable* into lists of length *n*. The last + Batch data from the *iterable* into tuples of length *n*. The last batch may be shorter than *n*. - Loops over the input iterable and accumulates data into lists up to - size *n*. The input is consumed lazily, just enough to fill a list. + Loops over the input iterable and accumulates data into tuples up to + size *n*. The input is consumed lazily, just enough to fill a batch. The result is yielded as soon as the batch is full or when the input iterable is exhausted: @@ -179,14 +179,14 @@ loops that truncate the stream. >>> flattened_data = ['roses', 'red', 'violets', 'blue', 'sugar', 'sweet'] >>> unflattened = list(batched(flattened_data, 2)) >>> unflattened - [['roses', 'red'], ['violets', 'blue'], ['sugar', 'sweet']] + [('roses', 'red'), ('violets', 'blue'), ('sugar', 'sweet')] >>> for batch in batched('ABCDEFG', 3): ... print(batch) ... - ['A', 'B', 'C'] - ['D', 'E', 'F'] - ['G'] + ('A', 'B', 'C') + ('D', 'E', 'F') + ('G',) Roughly equivalent to:: @@ -195,7 +195,7 @@ loops that truncate the stream. if n < 1: raise ValueError('n must be at least one') it = iter(iterable) - while (batch := list(islice(it, n))): + while batch := tuple(islice(it, n)): yield batch .. versionadded:: 3.12 @@ -398,7 +398,7 @@ loops that truncate the stream. .. function:: filterfalse(predicate, iterable) Make an iterator that filters elements from iterable returning only those for - which the predicate is ``False``. If *predicate* is ``None``, return the items + which the predicate is false. If *predicate* is ``None``, return the items that are false. Roughly equivalent to:: def filterfalse(predicate, iterable): @@ -769,8 +769,8 @@ well as with the built-in itertools such as ``map()``, ``filter()``, A secondary purpose of the recipes is to serve as an incubator. The ``accumulate()``, ``compress()``, and ``pairwise()`` itertools started out as -recipes. Currently, the ``iter_index()`` recipe is being tested to see -whether it proves its worth. +recipes. Currently, the ``sliding_window()`` and ``iter_index()`` recipes +are being tested to see whether they prove their worth. Substantially all of these recipes and many, many others can be installed from the `more-itertools project `_ found @@ -788,19 +788,42 @@ which incur interpreter overhead. .. testcode:: + import collections + import functools + import math + import operator + import random + def take(n, iterable): "Return first n items of the iterable as a list" return list(islice(iterable, n)) - def prepend(value, iterator): - "Prepend a single value in front of an iterator" + def prepend(value, iterable): + "Prepend a single value in front of an iterable" # prepend(1, [2, 3, 4]) --> 1 2 3 4 - return chain([value], iterator) + return chain([value], iterable) def tabulate(function, start=0): "Return function(0), function(1), ..." return map(function, count(start)) + def repeatfunc(func, times=None, *args): + """Repeat calls to func with specified arguments. + + Example: repeatfunc(random.random) + """ + if times is None: + return starmap(func, repeat(args)) + return starmap(func, repeat(args, times)) + + def flatten(list_of_lists): + "Flatten one level of nesting" + return chain.from_iterable(list_of_lists) + + def ncycles(iterable, n): + "Returns the sequence elements n times" + return chain.from_iterable(repeat(tuple(iterable), n)) + def tail(n, iterable): "Return an iterator over the last n items" # tail(3, 'ABCDEFG') --> E F G @@ -820,93 +843,70 @@ which incur interpreter overhead. "Returns the nth item or a default value" return next(islice(iterable, n, None), default) + def quantify(iterable, pred=bool): + "Given a predicate that returns True or False, count the True results." + return sum(map(pred, iterable)) + def all_equal(iterable): "Returns True if all the elements are equal to each other" g = groupby(iterable) return next(g, True) and not next(g, False) - def quantify(iterable, pred=bool): - "Count how many times the predicate is true" - return sum(map(pred, iterable)) - - def pad_none(iterable): - "Returns the sequence elements and then returns None indefinitely." - return chain(iterable, repeat(None)) - - def ncycles(iterable, n): - "Returns the sequence elements n times" - return chain.from_iterable(repeat(tuple(iterable), n)) - - def dotproduct(vec1, vec2): - return sum(map(operator.mul, vec1, vec2)) + def first_true(iterable, default=False, pred=None): + """Returns the first true value in the iterable. - def convolve(signal, kernel): - # See: https://betterexplained.com/articles/intuitive-convolution/ - # convolve(data, [0.25, 0.25, 0.25, 0.25]) --> Moving average (blur) - # convolve(data, [1, -1]) --> 1st finite difference (1st derivative) - # convolve(data, [1, -2, 1]) --> 2nd finite difference (2nd derivative) - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = collections.deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n-1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) + If no true value is found, returns *default* - def polynomial_from_roots(roots): - """Compute a polynomial's coefficients from its roots. + If *pred* is not None, returns the first item + for which pred(item) is true. - (x - 5) (x + 4) (x - 3) expands to: x³ -4x² -17x + 60 """ - # polynomial_from_roots([5, -4, 3]) --> [1, -4, -17, 60] - roots = list(map(operator.neg, roots)) - return [ - sum(map(math.prod, combinations(roots, k))) - for k in range(len(roots) + 1) - ] + # first_true([a,b,c], x) --> a or b or c or x + # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x + return next(filter(pred, iterable), default) - def iter_index(iterable, value, start=0): + def iter_index(iterable, value, start=0, stop=None): "Return indices where a value occurs in a sequence or iterable." # iter_index('AABCADEAF', 'A') --> 0 1 4 7 - try: - seq_index = iterable.index - except AttributeError: + seq_index = getattr(iterable, 'index', None) + if seq_index is None: # Slow path for general iterables - it = islice(iterable, start, None) + it = islice(iterable, start, stop) for i, element in enumerate(it, start): if element is value or element == value: yield i else: # Fast path for sequences + stop = len(iterable) if stop is None else stop i = start - 1 try: while True: - yield (i := seq_index(value, i+1)) + yield (i := seq_index(value, i+1, stop)) except ValueError: pass - def sieve(n): - "Primes less than n" - # sieve(30) --> 2 3 5 7 11 13 17 19 23 29 - data = bytearray((0, 1)) * (n // 2) - data[:3] = 0, 0, 0 - limit = math.isqrt(n) + 1 - for p in compress(range(limit), data): - data[p*p : n : p+p] = bytes(len(range(p*p, n, p+p))) - data[2] = 1 - return iter_index(data, 1) if n > 2 else iter([]) + def iter_except(func, exception, first=None): + """ Call a function repeatedly until an exception is raised. - def flatten(list_of_lists): - "Flatten one level of nesting" - return chain.from_iterable(list_of_lists) + Converts a call-until-exception interface to an iterator interface. + Like builtins.iter(func, sentinel) but uses an exception instead + of a sentinel to end the loop. - def repeatfunc(func, times=None, *args): - """Repeat calls to func with specified arguments. + Examples: + iter_except(functools.partial(heappop, h), IndexError) # priority queue iterator + iter_except(d.popitem, KeyError) # non-blocking dict iterator + iter_except(d.popleft, IndexError) # non-blocking deque iterator + iter_except(q.get_nowait, Queue.Empty) # loop over a producer Queue + iter_except(s.pop, KeyError) # non-blocking set iterator - Example: repeatfunc(random.random) """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) + try: + if first is not None: + yield first() # For database APIs needing an initial cast to db.first() + while True: + yield func() + except exception: + pass def grouper(iterable, n, *, incomplete='fill', fillvalue=None): "Collect data into non-overlapping fixed-length chunks or blocks" @@ -923,18 +923,10 @@ which incur interpreter overhead. else: raise ValueError('Expected fill, strict, or ignore') - def triplewise(iterable): - "Return overlapping triplets from an iterable" - # triplewise('ABCDEFG') --> ABC BCD CDE DEF EFG - for (a, _), (b, c) in pairwise(pairwise(iterable)): - yield a, b, c - def sliding_window(iterable, n): # sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG it = iter(iterable) - window = collections.deque(islice(it, n), maxlen=n) - if len(window) == n: - yield tuple(window) + window = collections.deque(islice(it, n-1), maxlen=n) for x in it: window.append(x) yield tuple(window) @@ -954,11 +946,20 @@ which incur interpreter overhead. nexts = cycle(islice(nexts, num_active)) def partition(pred, iterable): - "Use a predicate to partition entries into false entries and true entries" + """Partition entries into false entries and true entries. + + If *pred* is slow, consider wrapping it with functools.lru_cache(). + """ # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = tee(iterable) return filterfalse(pred, t1), filter(pred, t2) + def subslices(seq): + "Return all contiguous non-empty subslices of a sequence" + # subslices('ABCD') --> A AB ABC ABCD B BC BCD C CD D + slices = starmap(slice, combinations(range(len(seq) + 1), 2)) + return map(operator.getitem, repeat(seq), slices) + def before_and_after(predicate, it): """ Variant of takewhile() that allows complete access to the remainder of the iterator. @@ -988,29 +989,17 @@ which incur interpreter overhead. yield from it return true_iterator(), remainder_iterator() - def subslices(seq): - "Return all contiguous non-empty subslices of a sequence" - # subslices('ABCD') --> A AB ABC ABCD B BC BCD C CD D - slices = starmap(slice, combinations(range(len(seq) + 1), 2)) - return map(operator.getitem, repeat(seq), slices) - - def powerset(iterable): - "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) - def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D + # unique_everseen('ABBcCAD', str.lower) --> A B c D seen = set() if key is None: for element in filterfalse(seen.__contains__, iterable): seen.add(element) yield element - # Note: The steps shown above are intended to demonstrate - # filterfalse(). For order preserving deduplication, - # a better solution is: + # For order preserving deduplication, + # a faster but non-lazy solution is: # yield from dict.fromkeys(iterable) else: for element in iterable: @@ -1018,48 +1007,125 @@ which incur interpreter overhead. if k not in seen: seen.add(k) yield element + # For use cases that allow the last matching element to be returned, + # a faster but non-lazy solution is: + # t1, t2 = tee(iterable) + # yield from dict(zip(map(key, t1), t2)).values() def unique_justseen(iterable, key=None): "List unique elements, preserving order. Remember only the element just seen." # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B - # unique_justseen('ABBCcAD', str.lower) --> A B C A D + # unique_justseen('ABBcCAD', str.lower) --> A B c A D return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - def iter_except(func, exception, first=None): - """ Call a function repeatedly until an exception is raised. - Converts a call-until-exception interface to an iterator interface. - Like builtins.iter(func, sentinel) but uses an exception instead - of a sentinel to end the loop. +The following recipes have a more mathematical flavor: - Examples: - iter_except(functools.partial(heappop, h), IndexError) # priority queue iterator - iter_except(d.popitem, KeyError) # non-blocking dict iterator - iter_except(d.popleft, IndexError) # non-blocking deque iterator - iter_except(q.get_nowait, Queue.Empty) # loop over a producer Queue - iter_except(s.pop, KeyError) # non-blocking set iterator +.. testcode:: + + def powerset(iterable): + "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" + s = list(iterable) + return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) + + def sum_of_squares(it): + "Add up the squares of the input values." + # sum_of_squares([10, 20, 30]) -> 1400 + return math.sumprod(*tee(it)) + + def transpose(it): + "Swap the rows and columns of the input." + # transpose([(1, 2, 3), (11, 22, 33)]) --> (1, 11) (2, 22) (3, 33) + return zip(*it, strict=True) + def matmul(m1, m2): + "Multiply two matrices." + # matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]) --> (49, 80), (41, 60) + n = len(m2[0]) + return batched(starmap(math.sumprod, product(m1, transpose(m2))), n) + + def convolve(signal, kernel): + """Discrete linear convolution of two iterables. + + The kernel is fully consumed before the calculations begin. + The signal is consumed lazily and can be infinite. + + Convolutions are mathematically commutative. + If the signal and kernel are swapped, + the output will be the same. + + Article: https://betterexplained.com/articles/intuitive-convolution/ + Video: https://www.youtube.com/watch?v=KuXjwB4LzSA """ - try: - if first is not None: - yield first() # For database APIs needing an initial cast to db.first() - while True: - yield func() - except exception: - pass + # convolve(data, [0.25, 0.25, 0.25, 0.25]) --> Moving average (blur) + # convolve(data, [1/2, 0, -1/2]) --> 1st derivative estimate + # convolve(data, [1, -2, 1]) --> 2nd derivative estimate + kernel = tuple(kernel)[::-1] + n = len(kernel) + padded_signal = chain(repeat(0, n-1), signal, repeat(0, n-1)) + windowed_signal = sliding_window(padded_signal, n) + return map(math.sumprod, repeat(kernel), windowed_signal) - def first_true(iterable, default=False, pred=None): - """Returns the first true value in the iterable. + def polynomial_from_roots(roots): + """Compute a polynomial's coefficients from its roots. - If no true value is found, returns *default* + (x - 5) (x + 4) (x - 3) expands to: x³ -4x² -17x + 60 + """ + # polynomial_from_roots([5, -4, 3]) --> [1, -4, -17, 60] + factors = zip(repeat(1), map(operator.neg, roots)) + return list(functools.reduce(convolve, factors, [1])) - If *pred* is not None, returns the first item - for which pred(item) is true. + def polynomial_eval(coefficients, x): + """Evaluate a polynomial at a specific value. + Computes with better numeric stability than Horner's method. """ - # first_true([a,b,c], x) --> a or b or c or x - # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x - return next(filter(pred, iterable), default) + # Evaluate x³ -4x² -17x + 60 at x = 2.5 + # polynomial_eval([1, -4, -17, 60], x=2.5) --> 8.125 + n = len(coefficients) + if not n: + return type(x)(0) + powers = map(pow, repeat(x), reversed(range(n))) + return math.sumprod(coefficients, powers) + + def polynomial_derivative(coefficients): + """Compute the first derivative of a polynomial. + + f(x) = x³ -4x² -17x + 60 + f'(x) = 3x² -8x -17 + """ + # polynomial_derivative([1, -4, -17, 60]) -> [3, -8, -17] + n = len(coefficients) + powers = reversed(range(1, n)) + return list(map(operator.mul, coefficients, powers)) + + def sieve(n): + "Primes less than n." + # sieve(30) --> 2 3 5 7 11 13 17 19 23 29 + if n > 2: + yield 2 + start = 3 + data = bytearray((0, 1)) * (n // 2) + limit = math.isqrt(n) + 1 + for p in iter_index(data, 1, start, limit): + yield from iter_index(data, 1, start, p*p) + data[p*p : n : p+p] = bytes(len(range(p*p, n, p+p))) + start = p*p + yield from iter_index(data, 1, start) + + def factor(n): + "Prime factors of n." + # factor(99) --> 3 3 11 + # factor(1_000_000_000_000_007) --> 47 59 360620266859 + # factor(1_000_000_000_000_403) --> 1000000000000403 + for prime in sieve(math.isqrt(n) + 1): + while not n % prime: + yield prime + n //= prime + if n == 1: + return + if n > 1: + yield n def nth_combination(iterable, r, index): "Equivalent to list(combinations(iterable, r))[index]" @@ -1079,6 +1145,7 @@ which incur interpreter overhead. result.append(pool[-1-n]) return tuple(result) + .. doctest:: :hide: @@ -1137,11 +1204,6 @@ which incur interpreter overhead. Now, we test all of the itertool recipes - >>> import operator - >>> import collections - >>> import math - >>> import random - >>> take(10, count()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] @@ -1193,14 +1255,19 @@ which incur interpreter overhead. >>> take(5, map(int, repeatfunc(random.random))) [0, 0, 0, 0, 0] - >>> list(islice(pad_none('abc'), 0, 6)) - ['a', 'b', 'c', None, None, None] - >>> list(ncycles('abc', 3)) ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'] - >>> dotproduct([1,2,3], [4,5,6]) - 32 + >>> sum_of_squares([10, 20, 30]) + 1400 + + >>> list(transpose([(1, 2, 3), (11, 22, 33)])) + [(1, 11), (2, 22), (3, 33)] + + >>> list(matmul([(7, 5), (3, 5)], [[2, 5], [7, 9]])) + [(49, 80), (41, 60)] + >>> list(matmul([[2, 5], [7, 9], [3, 4]], [[7, 11, 5, 4, 9], [3, 5, 2, 6, 3]])) + [(29, 47, 20, 38, 33), (76, 122, 53, 82, 90), (33, 53, 23, 36, 39)] >>> data = [20, 40, 24, 32, 20, 28, 16] >>> list(convolve(data, [0.25, 0.25, 0.25, 0.25])) @@ -1210,6 +1277,37 @@ which incur interpreter overhead. >>> list(convolve(data, [1, -2, 1])) [20, 0, -36, 24, -20, 20, -20, -4, 16] + >>> from fractions import Fraction + >>> from decimal import Decimal + >>> polynomial_eval([1, -4, -17, 60], x=2) + 18 + >>> x = 2; x**3 - 4*x**2 -17*x + 60 + 18 + >>> polynomial_eval([1, -4, -17, 60], x=2.5) + 8.125 + >>> x = 2.5; x**3 - 4*x**2 -17*x + 60 + 8.125 + >>> polynomial_eval([1, -4, -17, 60], x=Fraction(2, 3)) + Fraction(1274, 27) + >>> x = Fraction(2, 3); x**3 - 4*x**2 -17*x + 60 + Fraction(1274, 27) + >>> polynomial_eval([1, -4, -17, 60], x=Decimal('1.75')) + Decimal('23.359375') + >>> x = Decimal('1.75'); x**3 - 4*x**2 -17*x + 60 + Decimal('23.359375') + >>> polynomial_eval([], 2) + 0 + >>> polynomial_eval([], 2.5) + 0.0 + >>> polynomial_eval([], Fraction(2, 3)) + Fraction(0, 1) + >>> polynomial_eval([], Decimal('1.75')) + Decimal('0') + >>> polynomial_eval([11], 7) == 11 + True + >>> polynomial_eval([11, 2], 7) == 11 * 7 + 2 + True + >>> polynomial_from_roots([5, -4, 3]) [1, -4, -17, 60] >>> factored = lambda x: (x - 5) * (x + 4) * (x - 3) @@ -1217,6 +1315,9 @@ which incur interpreter overhead. >>> all(factored(x) == expanded(x) for x in range(-10, 11)) True + >>> polynomial_derivative([1, -4, -17, 60]) + [3, -8, -17] + >>> list(iter_index('AABCADEAF', 'A')) [0, 1, 4, 7] >>> list(iter_index('AABCADEAF', 'B')) @@ -1237,6 +1338,31 @@ which incur interpreter overhead. [] >>> list(iter_index(iter('AABCADEAF'), 'A', 10)) [] + >>> list(iter_index('AABCADEAF', 'A', 1, 7)) + [1, 4] + >>> list(iter_index(iter('AABCADEAF'), 'A', 1, 7)) + [1, 4] + >>> # Verify that ValueErrors not swallowed (gh-107208) + >>> def assert_no_value(iterable, forbidden_value): + ... for item in iterable: + ... if item == forbidden_value: + ... raise ValueError + ... yield item + ... + >>> list(iter_index(assert_no_value('AABCADEAF', 'B'), 'A')) + Traceback (most recent call last): + ... + ValueError + >>> # Verify that both paths can find identical NaN values + >>> x = float('NaN') + >>> y = float('NaN') + >>> list(iter_index([0, x, x, y, 0], x)) + [1, 2] + >>> list(iter_index(iter([0, x, x, y, 0]), x)) + [1, 2] + >>> # Test list input. Lists do not support None for the stop argument + >>> list(iter_index(list('AABCADEAF'), 'A')) + [0, 1, 4, 7] >>> list(sieve(30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] @@ -1257,6 +1383,51 @@ which incur interpreter overhead. >>> set(sieve(10_000)).isdisjoint(carmichael) True + >>> list(factor(99)) # Code example 1 + [3, 3, 11] + >>> list(factor(1_000_000_000_000_007)) # Code example 2 + [47, 59, 360620266859] + >>> list(factor(1_000_000_000_000_403)) # Code example 3 + [1000000000000403] + >>> list(factor(0)) + [] + >>> list(factor(1)) + [] + >>> list(factor(2)) + [2] + >>> list(factor(3)) + [3] + >>> list(factor(4)) + [2, 2] + >>> list(factor(5)) + [5] + >>> list(factor(6)) + [2, 3] + >>> list(factor(7)) + [7] + >>> list(factor(8)) + [2, 2, 2] + >>> list(factor(9)) + [3, 3] + >>> list(factor(10)) + [2, 5] + >>> list(factor(128_884_753_939)) # large prime + [128884753939] + >>> list(factor(999953 * 999983)) # large semiprime + [999953, 999983] + >>> list(factor(6 ** 20)) == [2] * 20 + [3] * 20 # large power + True + >>> list(factor(909_909_090_909)) # large multiterm composite + [3, 3, 7, 13, 13, 751, 113797] + >>> math.prod([3, 3, 7, 13, 13, 751, 113797]) + 909909090909 + >>> all(math.prod(factor(n)) == n for n in range(1, 2_000)) + True + >>> all(set(factor(n)) <= set(sieve(n+1)) for n in range(2_000)) + True + >>> all(list(factor(n)) == sorted(factor(n)) for n in range(2_000)) + True + >>> list(flatten([('a', 'b'), (), ('c', 'd', 'e'), ('f',), ('g', 'h', 'i')])) ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'] @@ -1284,11 +1455,34 @@ which incur interpreter overhead. >>> list(grouper('abcdefg', n=3, incomplete='ignore')) [('a', 'b', 'c'), ('d', 'e', 'f')] - >>> list(triplewise('ABCDEFG')) + >>> list(sliding_window('ABCDEFG', 1)) + [('A',), ('B',), ('C',), ('D',), ('E',), ('F',), ('G',)] + >>> list(sliding_window('ABCDEFG', 2)) + [('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E'), ('E', 'F'), ('F', 'G')] + >>> list(sliding_window('ABCDEFG', 3)) [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E'), ('D', 'E', 'F'), ('E', 'F', 'G')] - >>> list(sliding_window('ABCDEFG', 4)) [('A', 'B', 'C', 'D'), ('B', 'C', 'D', 'E'), ('C', 'D', 'E', 'F'), ('D', 'E', 'F', 'G')] + >>> list(sliding_window('ABCDEFG', 5)) + [('A', 'B', 'C', 'D', 'E'), ('B', 'C', 'D', 'E', 'F'), ('C', 'D', 'E', 'F', 'G')] + >>> list(sliding_window('ABCDEFG', 6)) + [('A', 'B', 'C', 'D', 'E', 'F'), ('B', 'C', 'D', 'E', 'F', 'G')] + >>> list(sliding_window('ABCDEFG', 7)) + [('A', 'B', 'C', 'D', 'E', 'F', 'G')] + >>> list(sliding_window('ABCDEFG', 8)) + [] + >>> try: + ... list(sliding_window('ABCDEFG', -1)) + ... except ValueError: + ... 'zero or negative n not supported' + ... + 'zero or negative n not supported' + >>> try: + ... list(sliding_window('ABCDEFG', 0)) + ... except ValueError: + ... 'zero or negative n not supported' + ... + 'zero or negative n not supported' >>> list(roundrobin('abc', 'd', 'ef')) ['a', 'd', 'e', 'b', 'f', 'c'] @@ -1323,15 +1517,17 @@ which incur interpreter overhead. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] + >>> list(unique_everseen('ABBcCAD', str.lower)) + ['A', 'B', 'c', 'D'] >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] + >>> list(unique_justseen('ABBcCAD', str.lower)) + ['A', 'B', 'c', 'A', 'D'] >>> d = dict(a=1, b=2, c=3) >>> it = iter_except(d.popitem, KeyError) @@ -1366,3 +1562,45 @@ which incur interpreter overhead. >>> combos = list(combinations(iterable, r)) >>> all(nth_combination(iterable, r, i) == comb for i, comb in enumerate(combos)) True + + +.. testcode:: + :hide: + + # Old recipes and their tests which are guaranteed to continue to work. + + def sumprod(vec1, vec2): + "Compute a sum of products." + return sum(starmap(operator.mul, zip(vec1, vec2, strict=True))) + + def dotproduct(vec1, vec2): + return sum(map(operator.mul, vec1, vec2)) + + def pad_none(iterable): + """Returns the sequence elements and then returns None indefinitely. + + Useful for emulating the behavior of the built-in map() function. + """ + return chain(iterable, repeat(None)) + + def triplewise(iterable): + "Return overlapping triplets from an iterable" + # triplewise('ABCDEFG') --> ABC BCD CDE DEF EFG + for (a, _), (b, c) in pairwise(pairwise(iterable)): + yield a, b, c + + +.. doctest:: + :hide: + + >>> dotproduct([1,2,3], [4,5,6]) + 32 + + >>> sumprod([1,2,3], [4,5,6]) + 32 + + >>> list(islice(pad_none('abc'), 0, 6)) + ['a', 'b', 'c', None, None, None] + + >>> list(triplewise('ABCDEFG')) + [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E'), ('D', 'E', 'F'), ('E', 'F', 'G')] diff --git a/Doc/library/json.rst b/Doc/library/json.rst index 00f585124a86b3a..0ce4b697145cb3e 100644 --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -9,11 +9,6 @@ **Source code:** :source:`Lib/json/__init__.py` -.. testsetup:: * - - import json - from json import AttrDict - -------------- `JSON (JavaScript Object Notation) `_, specified by @@ -59,12 +54,23 @@ Compact encoding:: Pretty printing:: >>> import json - >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) + >>> print(json.dumps({'6': 7, '4': 5}, sort_keys=True, indent=4)) { "4": 5, "6": 7 } +Specializing JSON object encoding:: + + >>> import json + >>> def custom_json(obj): + ... if isinstance(obj, complex): + ... return {'__complex__': True, 'real': obj.real, 'imag': obj.imag} + ... raise TypeError(f'Cannot serialize object of {type(obj)}') + ... + >>> json.dumps(1 + 2j, default=custom_json) + '{"__complex__": true, "real": 1.0, "imag": 2.0}' + Decoding JSON:: >>> import json @@ -197,7 +203,7 @@ Basic Usage dictionaries will be sorted by key. To use a custom :class:`JSONEncoder` subclass (e.g. one that overrides the - :meth:`default` method to serialize additional types), specify it with the + :meth:`~JSONEncoder.default` method to serialize additional types), specify it with the *cls* kwarg; otherwise :class:`JSONEncoder` is used. .. versionchanged:: 3.6 @@ -427,7 +433,7 @@ Encoders and Decoders Added support for int- and float-derived Enum classes. To extend this to recognize other objects, subclass and implement a - :meth:`default` method with another method that returns a serializable object + :meth:`~JSONEncoder.default` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise :exc:`TypeError`). @@ -488,7 +494,7 @@ Encoders and Decoders :exc:`TypeError`). For example, to support arbitrary iterators, you could implement - :meth:`default` like this:: + :meth:`~JSONEncoder.default` like this:: def default(self, o): try: @@ -548,44 +554,6 @@ Exceptions .. versionadded:: 3.5 -.. class:: AttrDict(**kwargs) - AttrDict(mapping, **kwargs) - AttrDict(iterable, **kwargs) - - Subclass of :class:`dict` object that also supports attribute style dotted access. - - This class is intended for use with the :attr:`object_hook` in - :func:`json.load` and :func:`json.loads`:: - - .. doctest:: - - >>> json_string = '{"mercury": 88, "venus": 225, "earth": 365, "mars": 687}' - >>> orbital_period = json.loads(json_string, object_hook=AttrDict) - >>> orbital_period['earth'] # Dict style lookup - 365 - >>> orbital_period.earth # Attribute style lookup - 365 - >>> orbital_period.keys() # All dict methods are present - dict_keys(['mercury', 'venus', 'earth', 'mars']) - - Attribute style access only works for keys that are valid attribute - names. In contrast, dictionary style access works for all keys. For - example, ``d.two words`` contains a space and is not syntactically - valid Python, so ``d["two words"]`` should be used instead. - - If a key has the same name as a dictionary method, then a dictionary - lookup finds the key and an attribute lookup finds the method: - - .. doctest:: - - >>> d = AttrDict(items=50) - >>> d['items'] # Lookup the key - 50 - >>> d.items() # Call the method - dict_items([('items', 50)]) - - .. versionadded:: 3.12 - Standard Compliance and Interoperability ---------------------------------------- @@ -726,7 +694,7 @@ The :mod:`json.tool` module provides a simple command line interface to validate and pretty-print JSON objects. If the optional ``infile`` and ``outfile`` arguments are not -specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively: +specified, :data:`sys.stdin` and :data:`sys.stdout` will be used respectively: .. code-block:: shell-session @@ -746,7 +714,7 @@ specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively: Command line options ^^^^^^^^^^^^^^^^^^^^ -.. cmdoption:: infile +.. option:: infile The JSON file to be validated or pretty-printed: @@ -764,38 +732,38 @@ Command line options } ] - If *infile* is not specified, read from :attr:`sys.stdin`. + If *infile* is not specified, read from :data:`sys.stdin`. -.. cmdoption:: outfile +.. option:: outfile Write the output of the *infile* to the given *outfile*. Otherwise, write it - to :attr:`sys.stdout`. + to :data:`sys.stdout`. -.. cmdoption:: --sort-keys +.. option:: --sort-keys Sort the output of dictionaries alphabetically by key. .. versionadded:: 3.5 -.. cmdoption:: --no-ensure-ascii +.. option:: --no-ensure-ascii Disable escaping of non-ascii characters, see :func:`json.dumps` for more information. .. versionadded:: 3.9 -.. cmdoption:: --json-lines +.. option:: --json-lines Parse every input line as separate JSON object. .. versionadded:: 3.8 -.. cmdoption:: --indent, --tab, --no-indent, --compact +.. option:: --indent, --tab, --no-indent, --compact Mutually exclusive options for whitespace control. .. versionadded:: 3.9 -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message. diff --git a/Doc/library/kde_example.png b/Doc/library/kde_example.png new file mode 100644 index 000000000000000..f45048956999741 Binary files /dev/null and b/Doc/library/kde_example.png differ diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst index 5bb09b68ca138c5..0d48892fcdab97a 100644 --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -16,7 +16,7 @@ functionality. The POSIX locale mechanism allows programmers to deal with certain cultural issues in an application, without requiring the programmer to know all the specifics of each country where the software is executed. -.. index:: module: _locale +.. index:: pair: module; _locale The :mod:`locale` module is implemented on top of the :mod:`_locale` module, which in turn uses an ANSI C locale implementation if available. @@ -147,12 +147,12 @@ The :mod:`locale` module defines the following exception and functions: | ``CHAR_MAX`` | Nothing is specified in this locale. | +--------------+-----------------------------------------+ - The function sets temporarily the ``LC_CTYPE`` locale to the ``LC_NUMERIC`` + The function temporarily sets the ``LC_CTYPE`` locale to the ``LC_NUMERIC`` locale or the ``LC_MONETARY`` locale if locales are different and numeric or monetary strings are non-ASCII. This temporary change affects other threads. .. versionchanged:: 3.7 - The function now sets temporarily the ``LC_CTYPE`` locale to the + The function now temporarily sets the ``LC_CTYPE`` locale to the ``LC_NUMERIC`` locale in some cases. @@ -227,16 +227,18 @@ The :mod:`locale` module defines the following exception and functions: Get a regular expression that can be used with the regex function to recognize a positive response to a yes/no question. - .. note:: - - The expression is in the syntax suitable for the :c:func:`regex` function - from the C library, which might differ from the syntax used in :mod:`re`. - .. data:: NOEXPR Get a regular expression that can be used with the regex(3) function to recognize a negative response to a yes/no question. + .. note:: + + The regular expressions for :const:`YESEXPR` and + :const:`NOEXPR` use syntax suitable for the + :c:func:`regex` function from the C library, which might + differ from the syntax used in :mod:`re`. + .. data:: CRNCYSTR Get the currency symbol, preceded by "-" if the symbol should appear before @@ -301,7 +303,7 @@ The :mod:`locale` module defines the following exception and functions: *language code* and *encoding* may be ``None`` if their values cannot be determined. - .. deprecated-removed:: 3.11 3.13 + .. deprecated-removed:: 3.11 3.15 .. function:: getlocale(category=LC_CTYPE) @@ -368,16 +370,6 @@ The :mod:`locale` module defines the following exception and functions: encoding for the locale code just like :func:`setlocale`. -.. function:: resetlocale(category=LC_ALL) - - Sets the locale for *category* to the default setting. - - The default setting is determined by calling :func:`getdefaultlocale`. - *category* defaults to :const:`LC_ALL`. - - .. deprecated-removed:: 3.11 3.13 - - .. function:: strcoll(string1, string2) Compares two strings according to the current :const:`LC_COLLATE` setting. As @@ -399,7 +391,7 @@ The :mod:`locale` module defines the following exception and functions: Formats a number *val* according to the current :const:`LC_NUMERIC` setting. The format follows the conventions of the ``%`` operator. For floating point - values, the decimal point is modified if appropriate. If *grouping* is true, + values, the decimal point is modified if appropriate. If *grouping* is ``True``, also takes the grouping into account. If *monetary* is true, the conversion uses monetary thousands separator and @@ -417,12 +409,14 @@ The :mod:`locale` module defines the following exception and functions: Formats a number *val* according to the current :const:`LC_MONETARY` settings. The returned string includes the currency symbol if *symbol* is true, which is - the default. If *grouping* is true (which is not the default), grouping is done - with the value. If *international* is true (which is not the default), the + the default. If *grouping* is ``True`` (which is not the default), grouping is done + with the value. If *international* is ``True`` (which is not the default), the international currency symbol is used. - Note that this function will not work with the 'C' locale, so you have to set a - locale via :func:`setlocale` first. + .. note:: + + This function will not work with the 'C' locale, so you have to set a + locale via :func:`setlocale` first. .. function:: str(float) @@ -460,11 +454,16 @@ The :mod:`locale` module defines the following exception and functions: .. data:: LC_CTYPE - .. index:: module: string + Locale category for the character type functions. Most importantly, this + category defines the text encoding, i.e. how bytes are interpreted as + Unicode codepoints. See :pep:`538` and :pep:`540` for how this variable + might be automatically coerced to ``C.UTF-8`` to avoid issues created by + invalid settings in containers or incompatible settings passed over remote + SSH connections. - Locale category for the character type functions. Depending on the settings of - this category, the functions of module :mod:`string` dealing with case change - their behaviour. + Python doesn't internally use locale-dependent character transformation functions + from ``ctype.h``. Instead, an internal ``pyctype.h`` provides locale-independent + equivalents like :c:macro:`!Py_TOLOWER`. .. data:: LC_COLLATE @@ -492,6 +491,9 @@ The :mod:`locale` module defines the following exception and functions: system, like those returned by :func:`os.strerror` might be affected by this category. + This value may not be available on operating systems not conforming to the + POSIX standard, most notably Windows. + .. data:: LC_NUMERIC @@ -609,4 +611,3 @@ applications that link with additional C libraries which internally invoke :c:func:`gettext` or :c:func:`dcgettext`. For these applications, it may be necessary to bind the text domain, so that the libraries can properly locate their message catalogs. - diff --git a/Doc/library/logging.config.rst b/Doc/library/logging.config.rst index 9b82c1f75e3f073..85a53e6aa7a78b0 100644 --- a/Doc/library/logging.config.rst +++ b/Doc/library/logging.config.rst @@ -87,6 +87,10 @@ in :mod:`logging` itself) and defining handlers which are declared either in provides a mechanism to present the choices and load the chosen configuration). + It will raise :exc:`FileNotFoundError` if the file + doesn't exist and :exc:`RuntimeError` if the file is invalid or + empty. + :param fname: A filename, or a file-like object, or an instance derived from :class:`~configparser.RawConfigParser`. If a ``RawConfigParser``-derived instance is passed, it is used as @@ -111,7 +115,7 @@ in :mod:`logging` itself) and defining handlers which are declared either in they or their ancestors are explicitly named in the logging configuration. - :param encoding: The encoding used to open file when *fname* is filename. + :param encoding: The encoding used to open file when *fname* is filename. .. versionchanged:: 3.4 An instance of a subclass of :class:`~configparser.RawConfigParser` is @@ -126,6 +130,10 @@ in :mod:`logging` itself) and defining handlers which are declared either in .. versionadded:: 3.10 The *encoding* parameter is added. + .. versionchanged:: 3.12 + An exception will be thrown if the provided file + doesn't exist or is invalid or empty. + .. function:: listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None) Starts up a socket server on the specified port, and listens for new @@ -249,10 +257,11 @@ otherwise, the context is used to determine what to instantiate. which correspond to the arguments passed to create a :class:`~logging.Formatter` object: - * ``format`` - * ``datefmt`` - * ``style`` - * ``validate`` (since version >=3.8) + * ``format`` + * ``datefmt`` + * ``style`` + * ``validate`` (since version >=3.8) + * ``defaults`` (since version >=3.12) An optional ``class`` key indicates the name of the formatter's class (as a dotted module and class name). The instantiation @@ -525,14 +534,19 @@ returned by the call:: my.package.customFormatterFactory(bar='baz', spam=99.9, answer=42) +.. warning:: The values for keys such as ``bar``, ``spam`` and ``answer`` in + the above example should not be configuration dictionaries or references such + as ``cfg://foo`` or ``ext://bar``, because they will not be processed by the + configuration machinery, but passed to the callable as-is. + The key ``'()'`` has been used as the special key because it is not a valid keyword parameter name, and so will not clash with the names of the keyword arguments used in the call. The ``'()'`` also serves as a mnemonic that the corresponding value is a callable. - .. versionchanged:: 3.11 - The ``filters`` member of ``handlers`` and ``loggers`` can take - filter instances in addition to ids. +.. versionchanged:: 3.11 + The ``filters`` member of ``handlers`` and ``loggers`` can take + filter instances in addition to ids. You can also specify a special key ``'.'`` whose value is a dictionary is a mapping of attribute names to values. If found, the specified attributes will @@ -553,6 +567,34 @@ following configuration:: the returned formatter will have attribute ``foo`` set to ``'bar'`` and attribute ``baz`` set to ``'bozz'``. +.. warning:: The values for attributes such as ``foo`` and ``baz`` in + the above example should not be configuration dictionaries or references such + as ``cfg://foo`` or ``ext://bar``, because they will not be processed by the + configuration machinery, but set as attribute values as-is. + + +.. _handler-config-dict-order: + +Handler configuration order +""""""""""""""""""""""""""" + +Handlers are configured in alphabetical order of their keys, and a configured +handler replaces the configuration dictionary in (a working copy of) the +``handlers`` dictionary in the schema. If you use a construct such as +``cfg://handlers.foo``, then initially ``handlers['foo']`` points to the +configuration dictionary for the handler named ``foo``, and later (once that +handler has been configured) it points to the configured handler instance. +Thus, ``cfg://handlers.foo`` could resolve to either a dictionary or a handler +instance. In general, it is wise to name handlers in a way such that dependent +handlers are configured _after_ any handlers they depend on; that allows +something like ``cfg://handlers.foo`` to be used in configuring a handler that +depends on handler ``foo``. If that dependent handler were named ``bar``, +problems would result, because the configuration of ``bar`` would be attempted +before that of ``foo``, and ``foo`` would not yet have been configured. +However, if the dependent handler were named ``foobar``, it would be configured +after ``foo``, with the result that ``cfg://handlers.foo`` would resolve to +configured handler ``foo``, and not its configuration dictionary. + .. _logging-config-dict-externalobj: @@ -643,7 +685,8 @@ resolve to ``'dev_team@domain.tld'`` and the string ``'support_team@domain.tld'``. The ``subject`` value could be accessed using either ``'cfg://handlers.email.subject'`` or, equivalently, ``'cfg://handlers.email[subject]'``. The latter form only needs to be -used if the key contains spaces or non-alphanumeric characters. If an +used if the key contains spaces or non-alphanumeric characters. Please note +that the characters ``[`` and ``]`` are not allowed in the keys. If an index value consists only of decimal digits, access will be attempted using the corresponding integer value, falling back to the string value if needed. @@ -920,16 +963,22 @@ Sections which specify formatter configuration are typified by the following. .. code-block:: ini [formatter_form01] - format=F1 %(asctime)s %(levelname)s %(message)s + format=F1 %(asctime)s %(levelname)s %(message)s %(customfield)s datefmt= style=% validate=True + defaults={'customfield': 'defaultvalue'} class=logging.Formatter The arguments for the formatter configuration are the same as the keys in the dictionary schema :ref:`formatters section `. +The ``defaults`` entry, when :ref:`evaluated ` in the context of +the ``logging`` package's namespace, is a dictionary of default values for +custom formatting fields. If not provided, it defaults to ``None``. + + .. note:: Due to the use of :func:`eval` as described above, there are diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst index d4429d3d0a4f737..2dd4bd081b0429c 100644 --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -97,7 +97,7 @@ sends logging output to a disk file. It inherits the output functionality from Returns a new instance of the :class:`FileHandler` class. The specified file is opened and used as the stream for logging. If *mode* is not specified, - :const:`'a'` is used. If *encoding* is not ``None``, it is used to open the file + ``'a'`` is used. If *encoding* is not ``None``, it is used to open the file with that encoding. If *delay* is true, then file opening is deferred until the first call to :meth:`emit`. By default, the file grows indefinitely. If *errors* is specified, it's used to determine how encoding errors are handled. @@ -182,7 +182,7 @@ for this value. Returns a new instance of the :class:`WatchedFileHandler` class. The specified file is opened and used as the stream for logging. If *mode* is not specified, - :const:`'a'` is used. If *encoding* is not ``None``, it is used to open the file + ``'a'`` is used. If *encoding* is not ``None``, it is used to open the file with that encoding. If *delay* is true, then file opening is deferred until the first call to :meth:`emit`. By default, the file grows indefinitely. If *errors* is provided, it determines how encoding errors are handled. @@ -656,9 +656,7 @@ supports sending logging messages to a remote or local Unix syslog. to the other end. This method is called during handler initialization, but it's not regarded as an error if the other end isn't listening at this point - the method will be called again when emitting an event, if - but it's not regarded as an error if the other end isn't listening yet - --- the method will be called again when emitting an event, - if there is no socket at that point. + there is no socket at that point. .. versionadded:: 3.11 @@ -917,8 +915,9 @@ should, then :meth:`flush` is expected to do the flushing. .. method:: flush() - You can override this to implement custom flushing behavior. This version - just zaps the buffer to empty. + For a :class:`BufferingHandler` instance, flushing means that it sets the + buffer to an empty list. This method can be overwritten to implement more useful + flushing behavior. .. method:: shouldFlush(record) @@ -950,9 +949,9 @@ should, then :meth:`flush` is expected to do the flushing. .. method:: flush() - For a :class:`MemoryHandler`, flushing means just sending the buffered + For a :class:`MemoryHandler` instance, flushing means just sending the buffered records to the target, if there is one. The buffer is also cleared when - this happens. Override if you want different behavior. + buffered records are sent to the target. Override if you want different behavior. .. method:: setTarget(target) @@ -1051,8 +1050,8 @@ possible, while any potentially slow operations (such as sending an email via occur (e.g. because a bounded queue has filled up), the :meth:`~logging.Handler.handleError` method is called to handle the error. This can result in the record silently being dropped (if - :attr:`logging.raiseExceptions` is ``False``) or a message printed to - ``sys.stderr`` (if :attr:`logging.raiseExceptions` is ``True``). + :data:`logging.raiseExceptions` is ``False``) or a message printed to + ``sys.stderr`` (if :data:`logging.raiseExceptions` is ``True``). .. method:: prepare(record) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst index 34e98fc2577003d..acdeb88a546261c 100644 --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -275,6 +275,10 @@ is the module's name in the Python package namespace. .. versionchanged:: 3.8 The *stacklevel* parameter was added. + .. versionchanged:: 3.13 + Remove the undocumented ``warn()`` method which was an alias to the + :meth:`warning` method. + .. method:: Logger.info(msg, *args, **kwargs) @@ -287,10 +291,6 @@ is the module's name in the Python package namespace. Logs a message with level :const:`WARNING` on this logger. The arguments are interpreted as for :meth:`debug`. - .. note:: There is an obsolete method ``warn`` which is functionally - identical to ``warning``. As ``warn`` is deprecated, please do not use - it - use ``warning`` instead. - .. method:: Logger.error(msg, *args, **kwargs) Logs a message with level :const:`ERROR` on this logger. The arguments are @@ -397,21 +397,39 @@ have specific values relative to the predefined levels. If you define a level with the same numeric value, it overwrites the predefined value; the predefined name is lost. -+--------------+---------------+ -| Level | Numeric value | -+==============+===============+ -| ``CRITICAL`` | 50 | -+--------------+---------------+ -| ``ERROR`` | 40 | -+--------------+---------------+ -| ``WARNING`` | 30 | -+--------------+---------------+ -| ``INFO`` | 20 | -+--------------+---------------+ -| ``DEBUG`` | 10 | -+--------------+---------------+ -| ``NOTSET`` | 0 | -+--------------+---------------+ ++-----------------------+---------------+-------------------------------------+ +| Level | Numeric value | What it means / When to use it | ++=======================+===============+=====================================+ +| .. py:data:: NOTSET | 0 | When set on a logger, indicates that| +| | | ancestor loggers are to be consulted| +| | | to determine the effective level. | +| | | If that still resolves to | +| | | :const:`!NOTSET`, then all events | +| | | are logged. When set on a handler, | +| | | all events are handled. | ++-----------------------+---------------+-------------------------------------+ +| .. py:data:: DEBUG | 10 | Detailed information, typically only| +| | | of interest to a developer trying to| +| | | diagnose a problem. | ++-----------------------+---------------+-------------------------------------+ +| .. py:data:: INFO | 20 | Confirmation that things are working| +| | | as expected. | ++-----------------------+---------------+-------------------------------------+ +| .. py:data:: WARNING | 30 | An indication that something | +| | | unexpected happened, or that a | +| | | problem might occur in the near | +| | | future (e.g. 'disk space low'). The | +| | | software is still working as | +| | | expected. | ++-----------------------+---------------+-------------------------------------+ +| .. py:data:: ERROR | 40 | Due to a more serious problem, the | +| | | software has not been able to | +| | | perform some function. | ++-----------------------+---------------+-------------------------------------+ +| .. py:data:: CRITICAL | 50 | A serious error, indicating that the| +| | | program itself may be unable to | +| | | continue running. | ++-----------------------+---------------+-------------------------------------+ .. _handler: @@ -421,7 +439,7 @@ Handler Objects Handlers have the following attributes and methods. Note that :class:`Handler` is never instantiated directly; this class acts as a base for more useful -subclasses. However, the :meth:`__init__` method in subclasses needs to call +subclasses. However, the :meth:`!__init__` method in subclasses needs to call :meth:`Handler.__init__`. .. class:: Handler @@ -813,8 +831,9 @@ wire). :type lineno: int :param msg: The event description message, - which can be a %-format string with placeholders for variable data. - :type msg: str + which can be a %-format string with placeholders for variable data, + or an arbitrary object (see :ref:`arbitrary-object-messages`). + :type msg: typing.Any :param args: Variable data to merge into the *msg* argument to obtain the event description. @@ -888,7 +907,7 @@ you want to use. In the case of {}-formatting, you can specify formatting flags by placing them after the attribute name, separated from it with a colon. For example: a -placeholder of ``{msecs:03d}`` would format a millisecond value of ``4`` as +placeholder of ``{msecs:03.0f}`` would format a millisecond value of ``4`` as ``004``. Refer to the :meth:`str.format` documentation for full details on the options available to you. @@ -983,10 +1002,14 @@ LoggerAdapter Objects information into logging calls. For a usage example, see the section on :ref:`adding contextual information to your logging output `. -.. class:: LoggerAdapter(logger, extra) +.. class:: LoggerAdapter(logger, extra, merge_extra=False) Returns an instance of :class:`LoggerAdapter` initialized with an - underlying :class:`Logger` instance and a dict-like object. + underlying :class:`Logger` instance, a dict-like object (*extra*), and a + boolean (*merge_extra*) indicating whether or not the *extra* argument of + individual log calls should be merged with the :class:`LoggerAdapter` extra. + The default behavior is to ignore the *extra* argument of individual log + calls and only use the one of the :class:`LoggerAdapter` instance .. method:: process(msg, kwargs) @@ -996,23 +1019,42 @@ information into logging calls. For a usage example, see the section on 'extra'. The return value is a (*msg*, *kwargs*) tuple which has the (possibly modified) versions of the arguments passed in. -In addition to the above, :class:`LoggerAdapter` supports the following -methods of :class:`Logger`: :meth:`~Logger.debug`, :meth:`~Logger.info`, -:meth:`~Logger.warning`, :meth:`~Logger.error`, :meth:`~Logger.exception`, -:meth:`~Logger.critical`, :meth:`~Logger.log`, :meth:`~Logger.isEnabledFor`, -:meth:`~Logger.getEffectiveLevel`, :meth:`~Logger.setLevel` and -:meth:`~Logger.hasHandlers`. These methods have the same signatures as their -counterparts in :class:`Logger`, so you can use the two types of instances -interchangeably. + .. attribute:: manager -.. versionchanged:: 3.2 - The :meth:`~Logger.isEnabledFor`, :meth:`~Logger.getEffectiveLevel`, - :meth:`~Logger.setLevel` and :meth:`~Logger.hasHandlers` methods were added - to :class:`LoggerAdapter`. These methods delegate to the underlying logger. + Delegates to the underlying :attr:`!manager`` on *logger*. + + .. attribute:: _log + + Delegates to the underlying :meth:`!_log`` method on *logger*. + + In addition to the above, :class:`LoggerAdapter` supports the following + methods of :class:`Logger`: :meth:`~Logger.debug`, :meth:`~Logger.info`, + :meth:`~Logger.warning`, :meth:`~Logger.error`, :meth:`~Logger.exception`, + :meth:`~Logger.critical`, :meth:`~Logger.log`, :meth:`~Logger.isEnabledFor`, + :meth:`~Logger.getEffectiveLevel`, :meth:`~Logger.setLevel` and + :meth:`~Logger.hasHandlers`. These methods have the same signatures as their + counterparts in :class:`Logger`, so you can use the two types of instances + interchangeably. + + .. versionchanged:: 3.2 -.. versionchanged:: 3.6 - Attribute :attr:`manager` and method :meth:`_log` were added, which - delegate to the underlying logger and allow adapters to be nested. + The :meth:`~Logger.isEnabledFor`, :meth:`~Logger.getEffectiveLevel`, + :meth:`~Logger.setLevel` and :meth:`~Logger.hasHandlers` methods were added + to :class:`LoggerAdapter`. These methods delegate to the underlying logger. + + .. versionchanged:: 3.6 + + Attribute :attr:`!manager` and method :meth:`!_log` were added, which + delegate to the underlying logger and allow adapters to be nested. + + .. versionchanged:: 3.13 + + Remove the undocumented :meth:`!warn`` method which was an alias to the + :meth:`!warning` method. + + .. versionchanged:: 3.13 + + The *merge_extra* argument was added. Thread Safety @@ -1161,6 +1203,10 @@ functions. identical to ``warning``. As ``warn`` is deprecated, please do not use it - use ``warning`` instead. + .. versionchanged:: 3.13 + Remove the undocumented ``warn()`` function which was an alias to the + :func:`warning` function. + .. function:: error(msg, *args, **kwargs) @@ -1396,8 +1442,8 @@ functions. .. function:: setLoggerClass(klass) Tells the logging system to use the class *klass* when instantiating a logger. - The class should define :meth:`__init__` such that only a name argument is - required, and the :meth:`__init__` should call :meth:`Logger.__init__`. This + The class should define :meth:`!__init__` such that only a name argument is + required, and the :meth:`!__init__` should call :meth:`!Logger.__init__`. This function is typically called before any loggers are instantiated by applications which need to use custom logger behavior. After this call, as at any other time, do not instantiate loggers directly using the subclass: continue to use diff --git a/Doc/library/lzma.rst b/Doc/library/lzma.rst index a9311f2a03563fb..0d69c3bc01d1e24 100644 --- a/Doc/library/lzma.rst +++ b/Doc/library/lzma.rst @@ -100,7 +100,8 @@ Reading and writing compressed files *filters* arguments have the same meanings as for :class:`LZMACompressor`. :class:`LZMAFile` supports all the members specified by - :class:`io.BufferedIOBase`, except for :meth:`detach` and :meth:`truncate`. + :class:`io.BufferedIOBase`, except for :meth:`~io.BufferedIOBase.detach` + and :meth:`~io.IOBase.truncate`. Iteration and the :keyword:`with` statement are supported. The following method is also provided: @@ -147,7 +148,7 @@ Compressing and decompressing data in memory This format is more limited than ``.xz`` -- it does not support integrity checks or multiple filters. - * :const:`FORMAT_RAW`: A raw data stream, not using sequences format. + * :const:`FORMAT_RAW`: A raw data stream, not using any container format. This format specifier does not support integrity checks, and requires that you always specify a custom filter chain (for both compression and decompression). Additionally, data compressed in this manner cannot be @@ -332,19 +333,22 @@ the key ``"id"``, and may contain additional keys to specify filter-dependent options. Valid filter IDs are as follows: * Compression filters: - * :const:`FILTER_LZMA1` (for use with :const:`FORMAT_ALONE`) - * :const:`FILTER_LZMA2` (for use with :const:`FORMAT_XZ` and :const:`FORMAT_RAW`) + + * :const:`FILTER_LZMA1` (for use with :const:`FORMAT_ALONE`) + * :const:`FILTER_LZMA2` (for use with :const:`FORMAT_XZ` and :const:`FORMAT_RAW`) * Delta filter: - * :const:`FILTER_DELTA` + + * :const:`FILTER_DELTA` * Branch-Call-Jump (BCJ) filters: - * :const:`FILTER_X86` - * :const:`FILTER_IA64` - * :const:`FILTER_ARM` - * :const:`FILTER_ARMTHUMB` - * :const:`FILTER_POWERPC` - * :const:`FILTER_SPARC` + + * :const:`FILTER_X86` + * :const:`FILTER_IA64` + * :const:`FILTER_ARM` + * :const:`FILTER_ARMTHUMB` + * :const:`FILTER_POWERPC` + * :const:`FILTER_SPARC` A filter chain can consist of up to 4 filters, and cannot be empty. The last filter in the chain must be a compression filter, and any other filters must be @@ -353,21 +357,21 @@ delta or BCJ filters. Compression filters support the following options (specified as additional entries in the dictionary representing the filter): - * ``preset``: A compression preset to use as a source of default values for - options that are not specified explicitly. - * ``dict_size``: Dictionary size in bytes. This should be between 4 KiB and - 1.5 GiB (inclusive). - * ``lc``: Number of literal context bits. - * ``lp``: Number of literal position bits. The sum ``lc + lp`` must be at - most 4. - * ``pb``: Number of position bits; must be at most 4. - * ``mode``: :const:`MODE_FAST` or :const:`MODE_NORMAL`. - * ``nice_len``: What should be considered a "nice length" for a match. - This should be 273 or less. - * ``mf``: What match finder to use -- :const:`MF_HC3`, :const:`MF_HC4`, - :const:`MF_BT2`, :const:`MF_BT3`, or :const:`MF_BT4`. - * ``depth``: Maximum search depth used by match finder. 0 (default) means to - select automatically based on other filter options. +* ``preset``: A compression preset to use as a source of default values for + options that are not specified explicitly. +* ``dict_size``: Dictionary size in bytes. This should be between 4 KiB and + 1.5 GiB (inclusive). +* ``lc``: Number of literal context bits. +* ``lp``: Number of literal position bits. The sum ``lc + lp`` must be at + most 4. +* ``pb``: Number of position bits; must be at most 4. +* ``mode``: :const:`MODE_FAST` or :const:`MODE_NORMAL`. +* ``nice_len``: What should be considered a "nice length" for a match. + This should be 273 or less. +* ``mf``: What match finder to use -- :const:`MF_HC3`, :const:`MF_HC4`, + :const:`MF_BT2`, :const:`MF_BT3`, or :const:`MF_BT4`. +* ``depth``: Maximum search depth used by match finder. 0 (default) means to + select automatically based on other filter options. The delta filter stores the differences between bytes, producing more repetitive input for the compressor in certain circumstances. It supports one option, diff --git a/Doc/library/mailbox.rst b/Doc/library/mailbox.rst index 56908dedea1b405..05ffaf6c9b336e2 100644 --- a/Doc/library/mailbox.rst +++ b/Doc/library/mailbox.rst @@ -167,7 +167,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. Return a representation of the message corresponding to *key*. If no such message exists, *default* is returned if the method was called as :meth:`get` and a :exc:`KeyError` exception is raised if the method was - called as :meth:`__getitem__`. The message is represented as an instance + called as :meth:`~object.__getitem__`. The message is represented as an instance of the appropriate format-specific :class:`Message` subclass unless a custom message factory was specified when the :class:`Mailbox` instance was initialized. @@ -424,6 +424,108 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. remove the underlying message while the returned file remains open. + .. method:: get_flags(key) + + Return as a string the flags that are set on the message + corresponding to *key*. + This is the same as ``get_message(key).get_flags()`` but much + faster, because it does not open the message file. + Use this method when iterating over the keys to determine which + messages are interesting to get. + + If you do have a :class:`MaildirMessage` object, use + its :meth:`~MaildirMessage.get_flags` method instead, because + changes made by the message's :meth:`~MaildirMessage.set_flags`, + :meth:`~MaildirMessage.add_flag` and :meth:`~MaildirMessage.remove_flag` + methods are not reflected here until the mailbox's + :meth:`__setitem__` method is called. + + .. versionadded:: 3.13 + + + .. method:: set_flags(key, flags) + + On the message corresponding to *key*, set the flags specified + by *flags* and unset all others. + Calling ``some_mailbox.set_flags(key, flags)`` is similar to :: + + one_message = some_mailbox.get_message(key) + one_message.set_flags(flags) + some_mailbox[key] = one_message + + but faster, because it does not open the message file. + + If you do have a :class:`MaildirMessage` object, use + its :meth:`~MaildirMessage.set_flags` method instead, because + changes made with this mailbox method will not be visible to the + message object's method, :meth:`~MaildirMessage.get_flags`. + + .. versionadded:: 3.13 + + + .. method:: add_flag(key, flag) + + On the message corresponding to *key*, set the flags specified + by *flag* without changing other flags. To add more than one + flag at a time, *flag* may be a string of more than one character. + + Considerations for using this method versus the message object's + :meth:`~MaildirMessage.add_flag` method are similar to + those for :meth:`set_flags`; see the discussion there. + + .. versionadded:: 3.13 + + + .. method:: remove_flag(key, flag) + + On the message corresponding to *key*, unset the flags specified + by *flag* without changing other flags. To remove more than one + flag at a time, *flag* may be a string of more than one character. + + Considerations for using this method versus the message object's + :meth:`~MaildirMessage.remove_flag` method are similar to + those for :meth:`set_flags`; see the discussion there. + + .. versionadded:: 3.13 + + + .. method:: get_info(key) + + Return a string containing the info for the message + corresponding to *key*. + This is the same as ``get_message(key).get_info()`` but much + faster, because it does not open the message file. + Use this method when iterating over the keys to determine which + messages are interesting to get. + + If you do have a :class:`MaildirMessage` object, use + its :meth:`~MaildirMessage.get_info` method instead, because + changes made by the message's :meth:`~MaildirMessage.set_info` method + are not reflected here until the mailbox's :meth:`__setitem__` method + is called. + + .. versionadded:: 3.13 + + + .. method:: set_info(key, info) + + Set the info of the message corresponding to *key* to *info*. + Calling ``some_mailbox.set_info(key, flags)`` is similar to :: + + one_message = some_mailbox.get_message(key) + one_message.set_info(info) + some_mailbox[key] = one_message + + but faster, because it does not open the message file. + + If you do have a :class:`MaildirMessage` object, use + its :meth:`~MaildirMessage.set_info` method instead, because + changes made with this mailbox method will not be visible to the + message object's method, :meth:`~MaildirMessage.get_info`. + + .. versionadded:: 3.13 + + .. seealso:: `maildir man page from Courier `_ @@ -477,7 +579,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. unlock() Three locking mechanisms are used---dot locking and, if available, the - :c:func:`flock` and :c:func:`lockf` system calls. + :c:func:`!flock` and :c:func:`!lockf` system calls. .. seealso:: @@ -588,7 +690,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. unlock() Three locking mechanisms are used---dot locking and, if available, the - :c:func:`flock` and :c:func:`lockf` system calls. For MH mailboxes, locking + :c:func:`!flock` and :c:func:`!lockf` system calls. For MH mailboxes, locking the mailbox means locking the :file:`.mh_sequences` file and, only for the duration of any operations that affect them, locking individual message files. @@ -686,7 +788,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. unlock() Three locking mechanisms are used---dot locking and, if available, the - :c:func:`flock` and :c:func:`lockf` system calls. + :c:func:`!flock` and :c:func:`!lockf` system calls. .. seealso:: @@ -737,7 +839,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. unlock() Three locking mechanisms are used---dot locking and, if available, the - :c:func:`flock` and :c:func:`lockf` system calls. + :c:func:`!flock` and :c:func:`!lockf` system calls. .. seealso:: @@ -838,7 +940,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. .. note:: A message is typically moved from :file:`new` to :file:`cur` after its - mailbox has been accessed, whether or not the message is has been + mailbox has been accessed, whether or not the message has been read. A message ``msg`` has been read if ``"S" in msg.get_flags()`` is ``True``. diff --git a/Doc/library/mailcap.rst b/Doc/library/mailcap.rst deleted file mode 100644 index bfaedb460919915..000000000000000 --- a/Doc/library/mailcap.rst +++ /dev/null @@ -1,94 +0,0 @@ -:mod:`mailcap` --- Mailcap file handling -======================================== - -.. module:: mailcap - :synopsis: Mailcap file handling. - :deprecated: - -**Source code:** :source:`Lib/mailcap.py` - -.. deprecated-removed:: 3.11 3.13 - The :mod:`mailcap` module is deprecated - (see :pep:`PEP 594 <594#mailcap>` for details). - The :mod:`mimetypes` module provides an alternative. - --------------- - -Mailcap files are used to configure how MIME-aware applications such as mail -readers and web browsers react to files with different MIME types. (The name -"mailcap" is derived from the phrase "mail capability".) For example, a mailcap -file might contain a line like ``video/mpeg; xmpeg %s``. Then, if the user -encounters an email message or web document with the MIME type -:mimetype:`video/mpeg`, ``%s`` will be replaced by a filename (usually one -belonging to a temporary file) and the :program:`xmpeg` program can be -automatically started to view the file. - -The mailcap format is documented in :rfc:`1524`, "A User Agent Configuration -Mechanism For Multimedia Mail Format Information", but is not an internet -standard. However, mailcap files are supported on most Unix systems. - - -.. function:: findmatch(caps, MIMEtype, key='view', filename='/dev/null', plist=[]) - - Return a 2-tuple; the first element is a string containing the command line to - be executed (which can be passed to :func:`os.system`), and the second element - is the mailcap entry for a given MIME type. If no matching MIME type can be - found, ``(None, None)`` is returned. - - *key* is the name of the field desired, which represents the type of activity to - be performed; the default value is 'view', since in the most common case you - simply want to view the body of the MIME-typed data. Other possible values - might be 'compose' and 'edit', if you wanted to create a new body of the given - MIME type or alter the existing body data. See :rfc:`1524` for a complete list - of these fields. - - *filename* is the filename to be substituted for ``%s`` in the command line; the - default value is ``'/dev/null'`` which is almost certainly not what you want, so - usually you'll override it by specifying a filename. - - *plist* can be a list containing named parameters; the default value is simply - an empty list. Each entry in the list must be a string containing the parameter - name, an equals sign (``'='``), and the parameter's value. Mailcap entries can - contain named parameters like ``%{foo}``, which will be replaced by the value - of the parameter named 'foo'. For example, if the command line ``showpartial - %{id} %{number} %{total}`` was in a mailcap file, and *plist* was set to - ``['id=1', 'number=2', 'total=3']``, the resulting command line would be - ``'showpartial 1 2 3'``. - - In a mailcap file, the "test" field can optionally be specified to test some - external condition (such as the machine architecture, or the window system in - use) to determine whether or not the mailcap line applies. :func:`findmatch` - will automatically check such conditions and skip the entry if the check fails. - - .. versionchanged:: 3.11 - - To prevent security issues with shell metacharacters (symbols that have - special effects in a shell command line), ``findmatch`` will refuse - to inject ASCII characters other than alphanumerics and ``@+=:,./-_`` - into the returned command line. - - If a disallowed character appears in *filename*, ``findmatch`` will always - return ``(None, None)`` as if no entry was found. - If such a character appears elsewhere (a value in *plist* or in *MIMEtype*), - ``findmatch`` will ignore all mailcap entries which use that value. - A :mod:`warning ` will be raised in either case. - -.. function:: getcaps() - - Returns a dictionary mapping MIME types to a list of mailcap file entries. This - dictionary must be passed to the :func:`findmatch` function. An entry is stored - as a list of dictionaries, but it shouldn't be necessary to know the details of - this representation. - - The information is derived from all of the mailcap files found on the system. - Settings in the user's mailcap file :file:`$HOME/.mailcap` will override - settings in the system mailcap files :file:`/etc/mailcap`, - :file:`/usr/etc/mailcap`, and :file:`/usr/local/etc/mailcap`. - -An example usage:: - - >>> import mailcap - >>> d = mailcap.getcaps() - >>> mailcap.findmatch(d, 'video/mpeg', filename='tmp1223') - ('xmpeg tmp1223', {'view': 'xmpeg %s'}) - diff --git a/Doc/library/marshal.rst b/Doc/library/marshal.rst index 24f9dc1689da4a2..0556f19699dc158 100644 --- a/Doc/library/marshal.rst +++ b/Doc/library/marshal.rst @@ -15,8 +15,8 @@ undocumented on purpose; it may change between Python versions (although it rarely does). [#]_ .. index:: - module: pickle - module: shelve + pair: module; pickle + pair: module; shelve This is not a general "persistence" module. For general persistence and transfer of Python objects through RPC calls, see the modules :mod:`pickle` and diff --git a/Doc/library/math.rst b/Doc/library/math.rst index 559c6ec5dd9d8a6..9caf7230eed0aa9 100644 --- a/Doc/library/math.rst +++ b/Doc/library/math.rst @@ -71,8 +71,8 @@ Number-theoretic and representation functions Return *n* factorial as an integer. Raises :exc:`ValueError` if *n* is not integral or is negative. - .. deprecated:: 3.9 - Accepting floats with integral values (like ``5.0``) is deprecated. + .. versionchanged:: 3.10 + Floats with integral values (like ``5.0``) are no longer accepted. .. function:: floor(x) @@ -108,12 +108,7 @@ Number-theoretic and representation functions .. function:: fsum(iterable) Return an accurate floating point sum of values in the iterable. Avoids - loss of precision by tracking multiple intermediate partial sums: - - >>> sum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) - 0.9999999999999999 - >>> fsum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) - 1.0 + loss of precision by tracking multiple intermediate partial sums. The algorithm's accuracy depends on IEEE-754 arithmetic guarantees and the typical case where the rounding mode is half-even. On some non-Windows @@ -229,11 +224,11 @@ Number-theoretic and representation functions of *x* and are floats. -.. function:: nextafter(x, y) +.. function:: nextafter(x, y, steps=1) - Return the next floating-point value after *x* towards *y*. + Return the floating-point value *steps* steps after *x* towards *y*. - If *x* is equal to *y*, return *y*. + If *x* is equal to *y*, return *y*, unless *steps* is zero. Examples: @@ -244,6 +239,9 @@ Number-theoretic and representation functions See also :func:`math.ulp`. + .. versionchanged:: 3.12 + Added the *steps* argument. + .. versionadded:: 3.9 .. function:: perm(n, k=None) @@ -296,6 +294,22 @@ Number-theoretic and representation functions .. versionadded:: 3.7 +.. function:: sumprod(p, q) + + Return the sum of products of values from two iterables *p* and *q*. + + Raises :exc:`ValueError` if the inputs do not have the same length. + + Roughly equivalent to:: + + sum(itertools.starmap(operator.mul, zip(p, q, strict=True))) + + For float and mixed int/float inputs, the intermediate products + and sums are computed with extended precision. + + .. versionadded:: 3.12 + + .. function:: trunc(x) Return *x* with the fractional part diff --git a/Doc/library/mmap.rst b/Doc/library/mmap.rst index c4f8781f2ac9936..3fa69e717329e44 100644 --- a/Doc/library/mmap.rst +++ b/Doc/library/mmap.rst @@ -19,7 +19,7 @@ the current file position, and :meth:`seek` through the file to different positi A memory-mapped file is created by the :class:`~mmap.mmap` constructor, which is different on Unix and on Windows. In either case you must provide a file descriptor for a file opened for update. If you wish to map an existing Python -file object, use its :meth:`fileno` method to obtain the correct value for the +file object, use its :meth:`~io.IOBase.fileno` method to obtain the correct value for the *fileno* parameter. Otherwise, you can open the file using the :func:`os.open` function, which returns a file descriptor directly (the file still needs to be closed when done). @@ -285,6 +285,14 @@ To map anonymous memory, -1 should be passed as the fileno along with the length values are ``os.SEEK_CUR`` or ``1`` (seek relative to the current position) and ``os.SEEK_END`` or ``2`` (seek relative to the file's end). + .. versionchanged:: 3.13 + Return the new absolute position instead of ``None``. + + .. method:: seekable() + + Return whether the file supports seeking, and the return value is always ``True``. + + .. versionadded:: 3.13 .. method:: size() @@ -370,11 +378,19 @@ MAP_* Constants MAP_ANONYMOUS MAP_POPULATE MAP_STACK + MAP_ALIGNED_SUPER + MAP_CONCEAL - These are the various flags that can be passed to :meth:`mmap.mmap`. Note that some options might not be present on some systems. + These are the various flags that can be passed to :meth:`mmap.mmap`. :data:`MAP_ALIGNED_SUPER` + is only available at FreeBSD and :data:`MAP_CONCEAL` is only available at OpenBSD. Note + that some options might not be present on some systems. .. versionchanged:: 3.10 - Added MAP_POPULATE constant. + Added :data:`MAP_POPULATE` constant. .. versionadded:: 3.11 - Added MAP_STACK constant. + Added :data:`MAP_STACK` constant. + + .. versionadded:: 3.12 + Added :data:`MAP_ALIGNED_SUPER` constant. + Added :data:`MAP_CONCEAL` constant. diff --git a/Doc/library/msilib.rst b/Doc/library/msilib.rst deleted file mode 100644 index fbe55db9372300d..000000000000000 --- a/Doc/library/msilib.rst +++ /dev/null @@ -1,565 +0,0 @@ -:mod:`msilib` --- Read and write Microsoft Installer files -========================================================== - -.. module:: msilib - :platform: Windows - :synopsis: Creation of Microsoft Installer files, and CAB files. - :deprecated: - -.. moduleauthor:: Martin v. Löwis -.. sectionauthor:: Martin v. Löwis - -**Source code:** :source:`Lib/msilib/__init__.py` - -.. index:: single: msi - -.. deprecated-removed:: 3.11 3.13 - The :mod:`msilib` module is deprecated - (see :pep:`PEP 594 <594#msilib>` for details). - --------------- - -The :mod:`msilib` supports the creation of Microsoft Installer (``.msi``) files. -Because these files often contain an embedded "cabinet" file (``.cab``), it also -exposes an API to create CAB files. Support for reading ``.cab`` files is -currently not implemented; read support for the ``.msi`` database is possible. - -This package aims to provide complete access to all tables in an ``.msi`` file, -therefore, it is a fairly low-level API. One primary application of this -package is the creation of Python installer package itself (although that currently -uses a different version of ``msilib``). - -The package contents can be roughly split into four parts: low-level CAB -routines, low-level MSI routines, higher-level MSI routines, and standard table -structures. - - -.. function:: FCICreate(cabname, files) - - Create a new CAB file named *cabname*. *files* must be a list of tuples, each - containing the name of the file on disk, and the name of the file inside the CAB - file. - - The files are added to the CAB file in the order they appear in the list. All - files are added into a single CAB file, using the MSZIP compression algorithm. - - Callbacks to Python for the various steps of MSI creation are currently not - exposed. - - -.. function:: UuidCreate() - - Return the string representation of a new unique identifier. This wraps the - Windows API functions :c:func:`UuidCreate` and :c:func:`UuidToString`. - - -.. function:: OpenDatabase(path, persist) - - Return a new database object by calling MsiOpenDatabase. *path* is the file - name of the MSI file; *persist* can be one of the constants - ``MSIDBOPEN_CREATEDIRECT``, ``MSIDBOPEN_CREATE``, ``MSIDBOPEN_DIRECT``, - ``MSIDBOPEN_READONLY``, or ``MSIDBOPEN_TRANSACT``, and may include the flag - ``MSIDBOPEN_PATCHFILE``. See the Microsoft documentation for the meaning of - these flags; depending on the flags, an existing database is opened, or a new - one created. - - -.. function:: CreateRecord(count) - - Return a new record object by calling :c:func:`MSICreateRecord`. *count* is the - number of fields of the record. - - -.. function:: init_database(name, schema, ProductName, ProductCode, ProductVersion, Manufacturer) - - Create and return a new database *name*, initialize it with *schema*, and set - the properties *ProductName*, *ProductCode*, *ProductVersion*, and - *Manufacturer*. - - *schema* must be a module object containing ``tables`` and - ``_Validation_records`` attributes; typically, :mod:`msilib.schema` should be - used. - - The database will contain just the schema and the validation records when this - function returns. - - -.. function:: add_data(database, table, records) - - Add all *records* to the table named *table* in *database*. - - The *table* argument must be one of the predefined tables in the MSI schema, - e.g. ``'Feature'``, ``'File'``, ``'Component'``, ``'Dialog'``, ``'Control'``, - etc. - - *records* should be a list of tuples, each one containing all fields of a - record according to the schema of the table. For optional fields, - ``None`` can be passed. - - Field values can be ints, strings, or instances of the Binary class. - - -.. class:: Binary(filename) - - Represents entries in the Binary table; inserting such an object using - :func:`add_data` reads the file named *filename* into the table. - - -.. function:: add_tables(database, module) - - Add all table content from *module* to *database*. *module* must contain an - attribute *tables* listing all tables for which content should be added, and one - attribute per table that has the actual content. - - This is typically used to install the sequence tables. - - -.. function:: add_stream(database, name, path) - - Add the file *path* into the ``_Stream`` table of *database*, with the stream - name *name*. - - -.. function:: gen_uuid() - - Return a new UUID, in the format that MSI typically requires (i.e. in curly - braces, and with all hexdigits in uppercase). - - -.. seealso:: - - `FCICreate `_ - `UuidCreate `_ - `UuidToString `_ - -.. _database-objects: - -Database Objects ----------------- - - -.. method:: Database.OpenView(sql) - - Return a view object, by calling :c:func:`MSIDatabaseOpenView`. *sql* is the SQL - statement to execute. - - -.. method:: Database.Commit() - - Commit the changes pending in the current transaction, by calling - :c:func:`MSIDatabaseCommit`. - - -.. method:: Database.GetSummaryInformation(count) - - Return a new summary information object, by calling - :c:func:`MsiGetSummaryInformation`. *count* is the maximum number of updated - values. - -.. method:: Database.Close() - - Close the database object, through :c:func:`MsiCloseHandle`. - - .. versionadded:: 3.7 - -.. seealso:: - - `MSIDatabaseOpenView `_ - `MSIDatabaseCommit `_ - `MSIGetSummaryInformation `_ - `MsiCloseHandle `_ - -.. _view-objects: - -View Objects ------------- - - -.. method:: View.Execute(params) - - Execute the SQL query of the view, through :c:func:`MSIViewExecute`. If - *params* is not ``None``, it is a record describing actual values of the - parameter tokens in the query. - - -.. method:: View.GetColumnInfo(kind) - - Return a record describing the columns of the view, through calling - :c:func:`MsiViewGetColumnInfo`. *kind* can be either ``MSICOLINFO_NAMES`` or - ``MSICOLINFO_TYPES``. - - -.. method:: View.Fetch() - - Return a result record of the query, through calling :c:func:`MsiViewFetch`. - - -.. method:: View.Modify(kind, data) - - Modify the view, by calling :c:func:`MsiViewModify`. *kind* can be one of - ``MSIMODIFY_SEEK``, ``MSIMODIFY_REFRESH``, ``MSIMODIFY_INSERT``, - ``MSIMODIFY_UPDATE``, ``MSIMODIFY_ASSIGN``, ``MSIMODIFY_REPLACE``, - ``MSIMODIFY_MERGE``, ``MSIMODIFY_DELETE``, ``MSIMODIFY_INSERT_TEMPORARY``, - ``MSIMODIFY_VALIDATE``, ``MSIMODIFY_VALIDATE_NEW``, - ``MSIMODIFY_VALIDATE_FIELD``, or ``MSIMODIFY_VALIDATE_DELETE``. - - *data* must be a record describing the new data. - - -.. method:: View.Close() - - Close the view, through :c:func:`MsiViewClose`. - - -.. seealso:: - - `MsiViewExecute `_ - `MSIViewGetColumnInfo `_ - `MsiViewFetch `_ - `MsiViewModify `_ - `MsiViewClose `_ - -.. _summary-objects: - -Summary Information Objects ---------------------------- - - -.. method:: SummaryInformation.GetProperty(field) - - Return a property of the summary, through :c:func:`MsiSummaryInfoGetProperty`. - *field* is the name of the property, and can be one of the constants - ``PID_CODEPAGE``, ``PID_TITLE``, ``PID_SUBJECT``, ``PID_AUTHOR``, - ``PID_KEYWORDS``, ``PID_COMMENTS``, ``PID_TEMPLATE``, ``PID_LASTAUTHOR``, - ``PID_REVNUMBER``, ``PID_LASTPRINTED``, ``PID_CREATE_DTM``, - ``PID_LASTSAVE_DTM``, ``PID_PAGECOUNT``, ``PID_WORDCOUNT``, ``PID_CHARCOUNT``, - ``PID_APPNAME``, or ``PID_SECURITY``. - - -.. method:: SummaryInformation.GetPropertyCount() - - Return the number of summary properties, through - :c:func:`MsiSummaryInfoGetPropertyCount`. - - -.. method:: SummaryInformation.SetProperty(field, value) - - Set a property through :c:func:`MsiSummaryInfoSetProperty`. *field* can have the - same values as in :meth:`GetProperty`, *value* is the new value of the property. - Possible value types are integer and string. - - -.. method:: SummaryInformation.Persist() - - Write the modified properties to the summary information stream, using - :c:func:`MsiSummaryInfoPersist`. - - -.. seealso:: - - `MsiSummaryInfoGetProperty `_ - `MsiSummaryInfoGetPropertyCount `_ - `MsiSummaryInfoSetProperty `_ - `MsiSummaryInfoPersist `_ - -.. _record-objects: - -Record Objects --------------- - - -.. method:: Record.GetFieldCount() - - Return the number of fields of the record, through - :c:func:`MsiRecordGetFieldCount`. - - -.. method:: Record.GetInteger(field) - - Return the value of *field* as an integer where possible. *field* must - be an integer. - - -.. method:: Record.GetString(field) - - Return the value of *field* as a string where possible. *field* must - be an integer. - - -.. method:: Record.SetString(field, value) - - Set *field* to *value* through :c:func:`MsiRecordSetString`. *field* must be an - integer; *value* a string. - - -.. method:: Record.SetStream(field, value) - - Set *field* to the contents of the file named *value*, through - :c:func:`MsiRecordSetStream`. *field* must be an integer; *value* a string. - - -.. method:: Record.SetInteger(field, value) - - Set *field* to *value* through :c:func:`MsiRecordSetInteger`. Both *field* and - *value* must be an integer. - - -.. method:: Record.ClearData() - - Set all fields of the record to 0, through :c:func:`MsiRecordClearData`. - - -.. seealso:: - - `MsiRecordGetFieldCount `_ - `MsiRecordSetString `_ - `MsiRecordSetStream `_ - `MsiRecordSetInteger `_ - `MsiRecordClearData `_ - -.. _msi-errors: - -Errors ------- - -All wrappers around MSI functions raise :exc:`MSIError`; the string inside the -exception will contain more detail. - - -.. _cab: - -CAB Objects ------------ - - -.. class:: CAB(name) - - The class :class:`CAB` represents a CAB file. During MSI construction, files - will be added simultaneously to the ``Files`` table, and to a CAB file. Then, - when all files have been added, the CAB file can be written, then added to the - MSI file. - - *name* is the name of the CAB file in the MSI file. - - - .. method:: append(full, file, logical) - - Add the file with the pathname *full* to the CAB file, under the name - *logical*. If there is already a file named *logical*, a new file name is - created. - - Return the index of the file in the CAB file, and the new name of the file - inside the CAB file. - - - .. method:: commit(database) - - Generate a CAB file, add it as a stream to the MSI file, put it into the - ``Media`` table, and remove the generated file from the disk. - - -.. _msi-directory: - -Directory Objects ------------------ - - -.. class:: Directory(database, cab, basedir, physical, logical, default, [componentflags]) - - Create a new directory in the Directory table. There is a current component at - each point in time for the directory, which is either explicitly created through - :meth:`start_component`, or implicitly when files are added for the first time. - Files are added into the current component, and into the cab file. To create a - directory, a base directory object needs to be specified (can be ``None``), the - path to the physical directory, and a logical directory name. *default* - specifies the DefaultDir slot in the directory table. *componentflags* specifies - the default flags that new components get. - - - .. method:: start_component(component=None, feature=None, flags=None, keyfile=None, uuid=None) - - Add an entry to the Component table, and make this component the current - component for this directory. If no component name is given, the directory - name is used. If no *feature* is given, the current feature is used. If no - *flags* are given, the directory's default flags are used. If no *keyfile* - is given, the KeyPath is left null in the Component table. - - - .. method:: add_file(file, src=None, version=None, language=None) - - Add a file to the current component of the directory, starting a new one - if there is no current component. By default, the file name in the source - and the file table will be identical. If the *src* file is specified, it - is interpreted relative to the current directory. Optionally, a *version* - and a *language* can be specified for the entry in the File table. - - - .. method:: glob(pattern, exclude=None) - - Add a list of files to the current component as specified in the glob - pattern. Individual files can be excluded in the *exclude* list. - - - .. method:: remove_pyc() - - Remove ``.pyc`` files on uninstall. - - -.. seealso:: - - `Directory Table `_ - `File Table `_ - `Component Table `_ - `FeatureComponents Table `_ - -.. _features: - -Features --------- - - -.. class:: Feature(db, id, title, desc, display, level=1, parent=None, directory=None, attributes=0) - - Add a new record to the ``Feature`` table, using the values *id*, *parent.id*, - *title*, *desc*, *display*, *level*, *directory*, and *attributes*. The - resulting feature object can be passed to the :meth:`start_component` method of - :class:`Directory`. - - - .. method:: set_current() - - Make this feature the current feature of :mod:`msilib`. New components are - automatically added to the default feature, unless a feature is explicitly - specified. - - -.. seealso:: - - `Feature Table `_ - -.. _msi-gui: - -GUI classes ------------ - -:mod:`msilib` provides several classes that wrap the GUI tables in an MSI -database. However, no standard user interface is provided. - - -.. class:: Control(dlg, name) - - Base class of the dialog controls. *dlg* is the dialog object the control - belongs to, and *name* is the control's name. - - - .. method:: event(event, argument, condition=1, ordering=None) - - Make an entry into the ``ControlEvent`` table for this control. - - - .. method:: mapping(event, attribute) - - Make an entry into the ``EventMapping`` table for this control. - - - .. method:: condition(action, condition) - - Make an entry into the ``ControlCondition`` table for this control. - - -.. class:: RadioButtonGroup(dlg, name, property) - - Create a radio button control named *name*. *property* is the installer property - that gets set when a radio button is selected. - - - .. method:: add(name, x, y, width, height, text, value=None) - - Add a radio button named *name* to the group, at the coordinates *x*, *y*, - *width*, *height*, and with the label *text*. If *value* is ``None``, it - defaults to *name*. - - -.. class:: Dialog(db, name, x, y, w, h, attr, title, first, default, cancel) - - Return a new :class:`Dialog` object. An entry in the ``Dialog`` table is made, - with the specified coordinates, dialog attributes, title, name of the first, - default, and cancel controls. - - - .. method:: control(name, type, x, y, width, height, attributes, property, text, control_next, help) - - Return a new :class:`Control` object. An entry in the ``Control`` table is - made with the specified parameters. - - This is a generic method; for specific types, specialized methods are - provided. - - - .. method:: text(name, x, y, width, height, attributes, text) - - Add and return a ``Text`` control. - - - .. method:: bitmap(name, x, y, width, height, text) - - Add and return a ``Bitmap`` control. - - - .. method:: line(name, x, y, width, height) - - Add and return a ``Line`` control. - - - .. method:: pushbutton(name, x, y, width, height, attributes, text, next_control) - - Add and return a ``PushButton`` control. - - - .. method:: radiogroup(name, x, y, width, height, attributes, property, text, next_control) - - Add and return a ``RadioButtonGroup`` control. - - - .. method:: checkbox(name, x, y, width, height, attributes, property, text, next_control) - - Add and return a ``CheckBox`` control. - - -.. seealso:: - - `Dialog Table `_ - `Control Table `_ - `Control Types `_ - `ControlCondition Table `_ - `ControlEvent Table `_ - `EventMapping Table `_ - `RadioButton Table `_ - -.. _msi-tables: - -Precomputed tables ------------------- - -:mod:`msilib` provides a few subpackages that contain only schema and table -definitions. Currently, these definitions are based on MSI version 2.0. - - -.. data:: schema - - This is the standard MSI schema for MSI 2.0, with the *tables* variable - providing a list of table definitions, and *_Validation_records* providing the - data for MSI validation. - - -.. data:: sequence - - This module contains table contents for the standard sequence tables: - *AdminExecuteSequence*, *AdminUISequence*, *AdvtExecuteSequence*, - *InstallExecuteSequence*, and *InstallUISequence*. - - -.. data:: text - - This module contains definitions for the UIText and ActionText tables, for the - standard installer actions. diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst index 42fffee6a0f4492..0b059e746c61af3 100644 --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -10,8 +10,8 @@ -------------- These functions provide access to some useful capabilities on Windows platforms. -Some higher-level modules use these functions to build the Windows -implementations of their services. For example, the :mod:`getpass` module uses +Some higher-level modules use these functions to build the Windows +implementations of their services. For example, the :mod:`getpass` module uses this in the implementation of the :func:`getpass` function. Further documentation on these functions can be found in the Platform API @@ -35,11 +35,11 @@ File Operations .. function:: locking(fd, mode, nbytes) - Lock part of a file based on file descriptor *fd* from the C runtime. Raises - :exc:`OSError` on failure. The locked region of the file extends from the + Lock part of a file based on file descriptor *fd* from the C runtime. Raises + :exc:`OSError` on failure. The locked region of the file extends from the current file position for *nbytes* bytes, and may continue beyond the end of the - file. *mode* must be one of the :const:`LK_\*` constants listed below. Multiple - regions in a file may be locked at the same time, but may not overlap. Adjacent + file. *mode* must be one of the :const:`!LK_\*` constants listed below. Multiple + regions in a file may be locked at the same time, but may not overlap. Adjacent regions are not merged; they must be unlocked individually. .. audit-event:: msvcrt.locking fd,mode,nbytes msvcrt.locking @@ -49,7 +49,7 @@ File Operations LK_RLCK Locks the specified bytes. If the bytes cannot be locked, the program - immediately tries again after 1 second. If, after 10 attempts, the bytes cannot + immediately tries again after 1 second. If, after 10 attempts, the bytes cannot be locked, :exc:`OSError` is raised. @@ -74,9 +74,9 @@ File Operations .. function:: open_osfhandle(handle, flags) - Create a C runtime file descriptor from the file handle *handle*. The *flags* + Create a C runtime file descriptor from the file handle *handle*. The *flags* parameter should be a bitwise OR of :const:`os.O_APPEND`, :const:`os.O_RDONLY`, - and :const:`os.O_TEXT`. The returned file descriptor may be used as a parameter + and :const:`os.O_TEXT`. The returned file descriptor may be used as a parameter to :func:`os.fdopen` to create a file object. .. audit-event:: msvcrt.open_osfhandle handle,flags msvcrt.open_osfhandle @@ -84,7 +84,7 @@ File Operations .. function:: get_osfhandle(fd) - Return the file handle for the file descriptor *fd*. Raises :exc:`OSError` if + Return the file handle for the file descriptor *fd*. Raises :exc:`OSError` if *fd* is not recognized. .. audit-event:: msvcrt.get_osfhandle fd msvcrt.get_osfhandle @@ -98,13 +98,14 @@ Console I/O .. function:: kbhit() - Return ``True`` if a keypress is waiting to be read. + Returns a nonzero value if a keypress is waiting to be read. Otherwise, + return 0. .. function:: getch() Read a keypress and return the resulting character as a byte string. - Nothing is echoed to the console. This call will block if a keypress + Nothing is echoed to the console. This call will block if a keypress is not already available, but will not wait for :kbd:`Enter` to be pressed. If the pressed key was a special function key, this will return ``'\000'`` or ``'\xe0'``; the next call will return the keycode. @@ -118,7 +119,7 @@ Console I/O .. function:: getche() - Similar to :func:`getch`, but the keypress will be echoed if it represents a + Similar to :func:`getch`, but the keypress will be echoed if it represents a printable character. @@ -157,4 +158,93 @@ Other Functions .. function:: heapmin() Force the :c:func:`malloc` heap to clean itself up and return unused blocks to - the operating system. On failure, this raises :exc:`OSError`. + the operating system. On failure, this raises :exc:`OSError`. + + +.. function:: set_error_mode(mode) + + Changes the location where the C runtime writes an error message for an error + that might end the program. *mode* must be one of the :const:`!OUT_\*` + constants listed below or :const:`REPORT_ERRMODE`. Returns the old setting + or -1 if an error occurs. Only available in + :ref:`debug build of Python `. + + +.. data:: OUT_TO_DEFAULT + + Error sink is determined by the app's type. Only available in + :ref:`debug build of Python `. + + +.. data:: OUT_TO_STDERR + + Error sink is a standard error. Only available in + :ref:`debug build of Python `. + + +.. data:: OUT_TO_MSGBOX + + Error sink is a message box. Only available in + :ref:`debug build of Python `. + + +.. data:: REPORT_ERRMODE + + Report the current error mode value. Only available in + :ref:`debug build of Python `. + + +.. function:: CrtSetReportMode(type, mode) + + Specifies the destination or destinations for a specific report type + generated by :c:func:`!_CrtDbgReport` in the MS VC++ runtime. *type* must be + one of the :const:`!CRT_\*` constants listed below. *mode* must be one of the + :const:`!CRTDBG_\*` constants listed below. Only available in + :ref:`debug build of Python `. + + +.. function:: CrtSetReportFile(type, file) + + After you use :func:`CrtSetReportMode` to specify :const:`CRTDBG_MODE_FILE`, + you can specify the file handle to receive the message text. *type* must be + one of the :const:`!CRT_\*` constants listed below. *file* shuld be the file + handle your want specified. Only available in + :ref:`debug build of Python `. + + +.. data:: CRT_WARN + + Warnings, messages, and information that doesn't need immediate attention. + + +.. data:: CRT_ERROR + + Errors, unrecoverable problems, and issues that require immediate attention. + + +.. data:: CRT_ASSERT + + Assertion failures. + + +.. data:: CRTDBG_MODE_DEBUG + + Writes the message to the debugger's output window. + + +.. data:: CRTDBG_MODE_FILE + + Writes the message to a user-supplied file handle. :func:`CrtSetReportFile` + should be called to define the specific file or stream to use as + the destination. + + +.. data:: CRTDBG_MODE_WNDW + + Creates a message box to display the message along with the ``Abort``, + ``Retry``, and ``Ignore`` buttons. + + +.. data:: CRTDBG_REPORT_MODE + + Returns current *mode* for the specified *type*. diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst index 5516084780673fb..789a84b02d59d23 100644 --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -19,7 +19,7 @@ offers both local and remote concurrency, effectively side-stepping the :term:`Global Interpreter Lock ` by using subprocesses instead of threads. Due to this, the :mod:`multiprocessing` module allows the programmer to fully -leverage multiple processors on a given machine. It runs on both Unix and +leverage multiple processors on a given machine. It runs on both POSIX and Windows. The :mod:`multiprocessing` module also introduces APIs which do not have @@ -99,11 +99,11 @@ necessary, see :ref:`multiprocessing-programming`. +.. _multiprocessing-start-methods: + Contexts and start methods ~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _multiprocessing-start-methods: - Depending on the platform, :mod:`multiprocessing` supports three ways to start a process. These *start methods* are @@ -115,7 +115,7 @@ to start a process. These *start methods* are will not be inherited. Starting a process using this method is rather slow compared to using *fork* or *forkserver*. - Available on Unix and Windows. The default on Windows and macOS. + Available on POSIX and Windows platforms. The default on Windows and macOS. *fork* The parent process uses :func:`os.fork` to fork the Python @@ -124,32 +124,45 @@ to start a process. These *start methods* are inherited by the child process. Note that safely forking a multithreaded process is problematic. - Available on Unix only. The default on Unix. + Available on POSIX systems. Currently the default on POSIX except macOS. + + .. note:: + The default start method will change away from *fork* in Python 3.14. + Code that requires *fork* should explicitly specify that via + :func:`get_context` or :func:`set_start_method`. + + .. versionchanged:: 3.12 + If Python is able to detect that your process has multiple threads, the + :func:`os.fork` function that this start method calls internally will + raise a :exc:`DeprecationWarning`. Use a different start method. + See the :func:`os.fork` documentation for further explanation. *forkserver* When the program starts and selects the *forkserver* start method, - a server process is started. From then on, whenever a new process + a server process is spawned. From then on, whenever a new process is needed, the parent process connects to the server and requests - that it fork a new process. The fork server process is single - threaded so it is safe for it to use :func:`os.fork`. No - unnecessary resources are inherited. + that it fork a new process. The fork server process is single threaded + unless system libraries or preloaded imports spawn threads as a + side-effect so it is generally safe for it to use :func:`os.fork`. + No unnecessary resources are inherited. + + Available on POSIX platforms which support passing file descriptors + over Unix pipes such as Linux. - Available on Unix platforms which support passing file descriptors - over Unix pipes. .. versionchanged:: 3.8 On macOS, the *spawn* start method is now the default. The *fork* start method should be considered unsafe as it can lead to crashes of the - subprocess. See :issue:`33725`. + subprocess as macOS system libraries may start threads. See :issue:`33725`. .. versionchanged:: 3.4 - *spawn* added on all Unix platforms, and *forkserver* added for - some Unix platforms. + *spawn* added on all POSIX platforms, and *forkserver* added for + some POSIX platforms. Child processes no longer inherit all of the parents inheritable handles on Windows. -On Unix using the *spawn* or *forkserver* start methods will also +On POSIX using the *spawn* or *forkserver* start methods will also start a *resource tracker* process which tracks the unlinked named system resources (such as named semaphores or :class:`~multiprocessing.shared_memory.SharedMemory` objects) created @@ -211,10 +224,10 @@ library user. .. warning:: - The ``'spawn'`` and ``'forkserver'`` start methods cannot currently + The ``'spawn'`` and ``'forkserver'`` start methods generally cannot be used with "frozen" executables (i.e., binaries produced by - packages like **PyInstaller** and **cx_Freeze**) on Unix. - The ``'fork'`` start method does work. + packages like **PyInstaller** and **cx_Freeze**) on POSIX systems. + The ``'fork'`` start method may work if code does not use threads. Exchanging objects between processes @@ -453,16 +466,16 @@ process which created it. ... return x*x ... >>> with p: - ... p.map(f, [1,2,3]) + ... p.map(f, [1,2,3]) Process PoolWorker-1: Process PoolWorker-2: Process PoolWorker-3: Traceback (most recent call last): Traceback (most recent call last): Traceback (most recent call last): - AttributeError: 'module' object has no attribute 'f' - AttributeError: 'module' object has no attribute 'f' - AttributeError: 'module' object has no attribute 'f' + AttributeError: Can't get attribute 'f' on )> + AttributeError: Can't get attribute 'f' on )> + AttributeError: Can't get attribute 'f' on )> (If you try this it will actually output three full tracebacks interleaved in a semi-random fashion, and then you may have to @@ -629,14 +642,14 @@ The :mod:`multiprocessing` package mostly replicates the API of the calling :meth:`join()` is simpler. On Windows, this is an OS handle usable with the ``WaitForSingleObject`` - and ``WaitForMultipleObjects`` family of API calls. On Unix, this is + and ``WaitForMultipleObjects`` family of API calls. On POSIX, this is a file descriptor usable with primitives from the :mod:`select` module. .. versionadded:: 3.3 .. method:: terminate() - Terminate the process. On Unix this is done using the ``SIGTERM`` signal; + Terminate the process. On POSIX this is done using the ``SIGTERM`` signal; on Windows :c:func:`TerminateProcess` is used. Note that exit handlers and finally clauses, etc., will not be executed. @@ -653,7 +666,7 @@ The :mod:`multiprocessing` package mostly replicates the API of the .. method:: kill() - Same as :meth:`terminate()` but using the ``SIGKILL`` signal on Unix. + Same as :meth:`terminate()` but using the ``SIGKILL`` signal on POSIX. .. versionadded:: 3.7 @@ -676,16 +689,17 @@ The :mod:`multiprocessing` package mostly replicates the API of the .. doctest:: >>> import multiprocessing, time, signal - >>> p = multiprocessing.Process(target=time.sleep, args=(1000,)) + >>> mp_context = multiprocessing.get_context('spawn') + >>> p = mp_context.Process(target=time.sleep, args=(1000,)) >>> print(p, p.is_alive()) - False + <...Process ... initial> False >>> p.start() >>> print(p, p.is_alive()) - True + <...Process ... started> True >>> p.terminate() >>> time.sleep(0.1) >>> print(p, p.is_alive()) - False + <...Process ... stopped exitcode=-SIGTERM> False >>> p.exitcode == -signal.SIGTERM True @@ -815,7 +829,7 @@ For an example of the usage of queues for interprocess communication see Return the approximate size of the queue. Because of multithreading/multiprocessing semantics, this number is not reliable. - Note that this may raise :exc:`NotImplementedError` on Unix platforms like + Note that this may raise :exc:`NotImplementedError` on platforms like macOS where ``sem_getvalue()`` is not implemented. .. method:: empty() @@ -982,13 +996,20 @@ Miscellaneous This number is not equivalent to the number of CPUs the current process can use. The number of usable CPUs can be obtained with - ``len(os.sched_getaffinity(0))`` + :func:`os.process_cpu_count` (or ``len(os.sched_getaffinity(0))``). When the number of CPUs cannot be determined a :exc:`NotImplementedError` is raised. .. seealso:: :func:`os.cpu_count` + :func:`os.process_cpu_count` + + .. versionchanged:: 3.13 + + The return value can also be overridden using the + :option:`-X cpu_count <-X>` flag or :envvar:`PYTHON_CPU_COUNT` as this is + merely a wrapper around the :mod:`os` cpu count APIs. .. function:: current_process() @@ -1034,9 +1055,8 @@ Miscellaneous Returns a list of the supported start methods, the first of which is the default. The possible start methods are ``'fork'``, - ``'spawn'`` and ``'forkserver'``. On Windows only ``'spawn'`` is - available. On Unix ``'fork'`` and ``'spawn'`` are always - supported, with ``'fork'`` being the default. + ``'spawn'`` and ``'forkserver'``. Not all platforms support all + methods. See :ref:`multiprocessing-start-methods`. .. versionadded:: 3.4 @@ -1048,7 +1068,7 @@ Miscellaneous If *method* is ``None`` then the default context is returned. Otherwise *method* should be ``'fork'``, ``'spawn'``, ``'forkserver'``. :exc:`ValueError` is raised if the specified - start method is not available. + start method is not available. See :ref:`multiprocessing-start-methods`. .. versionadded:: 3.4 @@ -1062,8 +1082,7 @@ Miscellaneous is true then ``None`` is returned. The return value can be ``'fork'``, ``'spawn'``, ``'forkserver'`` - or ``None``. ``'fork'`` is the default on Unix, while ``'spawn'`` is - the default on Windows and macOS. + or ``None``. See :ref:`multiprocessing-start-methods`. .. versionchanged:: 3.8 @@ -1084,20 +1103,42 @@ Miscellaneous before they can create child processes. .. versionchanged:: 3.4 - Now supported on Unix when the ``'spawn'`` start method is used. + Now supported on POSIX when the ``'spawn'`` start method is used. .. versionchanged:: 3.11 Accepts a :term:`path-like object`. -.. function:: set_start_method(method) +.. function:: set_forkserver_preload(module_names) + + Set a list of module names for the forkserver main process to attempt to + import so that their already imported state is inherited by forked + processes. Any :exc:`ImportError` when doing so is silently ignored. + This can be used as a performance enhancement to avoid repeated work + in every process. + + For this to work, it must be called before the forkserver process has been + launched (before creating a :class:`Pool` or starting a :class:`Process`). + + Only meaningful when using the ``'forkserver'`` start method. + See :ref:`multiprocessing-start-methods`. + + .. versionadded:: 3.4 + +.. function:: set_start_method(method, force=False) Set the method which should be used to start child processes. - *method* can be ``'fork'``, ``'spawn'`` or ``'forkserver'``. + The *method* argument can be ``'fork'``, ``'spawn'`` or ``'forkserver'``. + Raises :exc:`RuntimeError` if the start method has already been set and *force* + is not ``True``. If *method* is ``None`` and *force* is ``True`` then the start + method is set to ``None``. If *method* is ``None`` and *force* is ``False`` + then the context is set to the default context. Note that this should be called at most once, and it should be protected inside the ``if __name__ == '__main__'`` clause of the main module. + See :ref:`multiprocessing-start-methods`. + .. versionadded:: 3.4 .. note:: @@ -1902,7 +1943,8 @@ their parent process exits. The manager classes are defined in the .. doctest:: - >>> manager = multiprocessing.Manager() + >>> mp_context = multiprocessing.get_context('spawn') + >>> manager = mp_context.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' @@ -2014,8 +2056,8 @@ the proxy). In this way, a proxy can be used just like its referent can: .. doctest:: - >>> from multiprocessing import Manager - >>> manager = Manager() + >>> mp_context = multiprocessing.get_context('spawn') + >>> manager = mp_context.Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print(l) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] @@ -2179,7 +2221,7 @@ with the :class:`Pool` class. callbacks and has a parallel map implementation. *processes* is the number of worker processes to use. If *processes* is - ``None`` then the number returned by :func:`os.cpu_count` is used. + ``None`` then the number returned by :func:`os.process_cpu_count` is used. If *initializer* is not ``None`` then each worker process will call ``initializer(*initargs)`` when it starts. @@ -2214,6 +2256,10 @@ with the :class:`Pool` class. .. versionadded:: 3.4 *context* + .. versionchanged:: 3.13 + *processes* uses :func:`os.process_cpu_count` by default, instead of + :func:`os.cpu_count`. + .. note:: Worker processes within a :class:`Pool` typically live for the complete @@ -2516,7 +2562,7 @@ multiple connections at the same time. *timeout* is ``None`` then it will block for an unlimited period. A negative timeout is equivalent to a zero timeout. - For both Unix and Windows, an object can appear in *object_list* if + For both POSIX and Windows, an object can appear in *object_list* if it is * a readable :class:`~multiprocessing.connection.Connection` object; @@ -2527,7 +2573,7 @@ multiple connections at the same time. A connection or socket object is ready when there is data available to be read from it, or the other end has been closed. - **Unix**: ``wait(object_list, timeout)`` almost equivalent + **POSIX**: ``wait(object_list, timeout)`` almost equivalent ``select.select(object_list, [], [], timeout)``. The difference is that, if :func:`select.select` is interrupted by a signal, it can raise :exc:`OSError` with an error number of ``EINTR``, whereas @@ -2536,7 +2582,7 @@ multiple connections at the same time. **Windows**: An item in *object_list* must either be an integer handle which is waitable (according to the definition used by the documentation of the Win32 function ``WaitForMultipleObjects()``) - or it can be an object with a :meth:`fileno` method which returns a + or it can be an object with a :meth:`~io.IOBase.fileno` method which returns a socket handle or pipe handle. (Note that pipe handles and socket handles are **not** waitable handles.) @@ -2584,7 +2630,6 @@ server:: The following code uses :func:`~multiprocessing.connection.wait` to wait for messages from multiple processes at once:: - import time, random from multiprocessing import Process, Pipe, current_process from multiprocessing.connection import wait @@ -2678,7 +2723,7 @@ handler type) for messages from different processes to get mixed up. Returns the logger used by :mod:`multiprocessing`. If necessary, a new one will be created. - When first created the logger has level :data:`logging.NOTSET` and no + When first created the logger has level :const:`logging.NOTSET` and no default handler. Messages sent to this logger will not by default propagate to the root logger. @@ -2740,27 +2785,27 @@ worker threads rather than worker processes. :meth:`~multiprocessing.pool.Pool.terminate` manually. *processes* is the number of worker threads to use. If *processes* is - ``None`` then the number returned by :func:`os.cpu_count` is used. + ``None`` then the number returned by :func:`os.process_cpu_count` is used. If *initializer* is not ``None`` then each worker process will call ``initializer(*initargs)`` when it starts. Unlike :class:`Pool`, *maxtasksperchild* and *context* cannot be provided. - .. note:: + .. note:: - A :class:`ThreadPool` shares the same interface as :class:`Pool`, which - is designed around a pool of processes and predates the introduction of - the :class:`concurrent.futures` module. As such, it inherits some - operations that don't make sense for a pool backed by threads, and it - has its own type for representing the status of asynchronous jobs, - :class:`AsyncResult`, that is not understood by any other libraries. + A :class:`ThreadPool` shares the same interface as :class:`Pool`, which + is designed around a pool of processes and predates the introduction of + the :class:`concurrent.futures` module. As such, it inherits some + operations that don't make sense for a pool backed by threads, and it + has its own type for representing the status of asynchronous jobs, + :class:`AsyncResult`, that is not understood by any other libraries. - Users should generally prefer to use - :class:`concurrent.futures.ThreadPoolExecutor`, which has a simpler - interface that was designed around threads from the start, and which - returns :class:`concurrent.futures.Future` instances that are - compatible with many other libraries, including :mod:`asyncio`. + Users should generally prefer to use + :class:`concurrent.futures.ThreadPoolExecutor`, which has a simpler + interface that was designed around threads from the start, and which + returns :class:`concurrent.futures.Future` instances that are + compatible with many other libraries, including :mod:`asyncio`. .. _multiprocessing-programming: @@ -2799,7 +2844,7 @@ Thread safety of proxies Joining zombie processes - On Unix when a process finishes but has not been joined it becomes a zombie. + On POSIX when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or :func:`~multiprocessing.active_children` is called) all completed processes which have not yet been joined will be joined. Also calling a finished @@ -2862,7 +2907,7 @@ Joining processes that use queues Explicitly pass resources to child processes - On Unix using the *fork* start method, a child process can make + On POSIX using the *fork* start method, a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. @@ -2959,7 +3004,7 @@ Global variables Safe importing of main module Make sure that the main module can be safely imported by a new Python - interpreter without causing unintended side effects (such a starting a new + interpreter without causing unintended side effects (such as starting a new process). For example, using the *spawn* or *forkserver* start method diff --git a/Doc/library/multiprocessing.shared_memory.rst b/Doc/library/multiprocessing.shared_memory.rst index 76046b34610abef..f453e6403d932d4 100644 --- a/Doc/library/multiprocessing.shared_memory.rst +++ b/Doc/library/multiprocessing.shared_memory.rst @@ -255,16 +255,17 @@ shared memory blocks created using that manager are all released when the :keyword:`with` statement's code block finishes execution. -.. class:: ShareableList(sequence=None, *, name=None) +.. class:: ShareableList(sequence=None, \*, name=None) Provides a mutable list-like object where all values stored within are stored in a shared memory block. This constrains storable values to - only the ``int``, ``float``, ``bool``, ``str`` (less than 10M bytes each), - ``bytes`` (less than 10M bytes each), and ``None`` built-in data types. - It also notably differs from the built-in ``list`` type in that these - lists can not change their overall length (i.e. no append, insert, etc.) - and do not support the dynamic creation of new :class:`ShareableList` - instances via slicing. + only the ``int`` (signed 64-bit), ``float``, ``bool``, ``str`` (less + than 10M bytes each when encoded as utf-8), ``bytes`` (less than 10M + bytes each), and ``None`` built-in data types. It also notably + differs from the built-in ``list`` type in that these lists can not + change their overall length (i.e. no append, insert, etc.) and do not + support the dynamic creation of new :class:`ShareableList` instances + via slicing. *sequence* is used in populating a new ``ShareableList`` full of values. Set to ``None`` to instead attach to an already existing @@ -275,6 +276,35 @@ shared memory blocks created using that manager are all released when the existing ``ShareableList``, specify its shared memory block's unique name while leaving ``sequence`` set to ``None``. + .. note:: + + A known issue exists for :class:`bytes` and :class:`str` values. + If they end with ``\x00`` nul bytes or characters, those may be + *silently stripped* when fetching them by index from the + :class:`ShareableList`. This ``.rstrip(b'\x00')`` behavior is + considered a bug and may go away in the future. See :gh:`106939`. + + For applications where rstripping of trailing nulls is a problem, + work around it by always unconditionally appending an extra non-0 + byte to the end of such values when storing and unconditionally + removing it when fetching: + + .. doctest:: + + >>> from multiprocessing import shared_memory + >>> nul_bug_demo = shared_memory.ShareableList(['?\x00', b'\x03\x02\x01\x00\x00\x00']) + >>> nul_bug_demo[0] + '?' + >>> nul_bug_demo[1] + b'\x03\x02\x01' + >>> nul_bug_demo.shm.unlink() + >>> padded = shared_memory.ShareableList(['?\x00\x07', b'\x03\x02\x01\x00\x00\x00\x07']) + >>> padded[0][:-1] + '?\x00' + >>> padded[1][:-1] + b'\x03\x02\x01\x00\x00\x00' + >>> padded.shm.unlink() + .. method:: count(value) Returns the number of occurrences of ``value``. diff --git a/Doc/library/netrc.rst b/Doc/library/netrc.rst index 88265d9b9e9e931..c36e5cfecfc6a8a 100644 --- a/Doc/library/netrc.rst +++ b/Doc/library/netrc.rst @@ -51,9 +51,19 @@ the Unix :program:`ftp` program and other FTP clients. Exception raised by the :class:`~netrc.netrc` class when syntactical errors are encountered in source text. Instances of this exception provide three - interesting attributes: :attr:`msg` is a textual explanation of the error, - :attr:`filename` is the name of the source file, and :attr:`lineno` gives the - line number on which the error was found. + interesting attributes: + + .. attribute:: msg + + Textual explanation of the error. + + .. attribute:: filename + + The name of the source file. + + .. attribute:: lineno + + The line number on which the error was found. .. _netrc-objects: diff --git a/Doc/library/nis.rst b/Doc/library/nis.rst deleted file mode 100644 index 3fa7916c37b6a56..000000000000000 --- a/Doc/library/nis.rst +++ /dev/null @@ -1,72 +0,0 @@ - -:mod:`nis` --- Interface to Sun's NIS (Yellow Pages) -==================================================== - -.. module:: nis - :platform: Unix - :synopsis: Interface to Sun's NIS (Yellow Pages) library. - :deprecated: - -.. moduleauthor:: Fred Gansevles -.. sectionauthor:: Moshe Zadka - -.. deprecated-removed:: 3.11 3.13 - The :mod:`nis` module is deprecated - (see :pep:`PEP 594 <594#nis>` for details). - --------------- - -The :mod:`nis` module gives a thin wrapper around the NIS library, useful for -central administration of several hosts. - -Because NIS exists only on Unix systems, this module is only available for Unix. - -.. include:: ../includes/wasm-notavail.rst - -The :mod:`nis` module defines the following functions: - - -.. function:: match(key, mapname, domain=default_domain) - - Return the match for *key* in map *mapname*, or raise an error - (:exc:`nis.error`) if there is none. Both should be strings, *key* is 8-bit - clean. Return value is an arbitrary array of bytes (may contain ``NULL`` and - other joys). - - Note that *mapname* is first checked if it is an alias to another name. - - The *domain* argument allows overriding the NIS domain used for the lookup. If - unspecified, lookup is in the default NIS domain. - - -.. function:: cat(mapname, domain=default_domain) - - Return a dictionary mapping *key* to *value* such that ``match(key, - mapname)==value``. Note that both keys and values of the dictionary are - arbitrary arrays of bytes. - - Note that *mapname* is first checked if it is an alias to another name. - - The *domain* argument allows overriding the NIS domain used for the lookup. If - unspecified, lookup is in the default NIS domain. - - -.. function:: maps(domain=default_domain) - - Return a list of all valid maps. - - The *domain* argument allows overriding the NIS domain used for the lookup. If - unspecified, lookup is in the default NIS domain. - - -.. function:: get_default_domain() - - Return the system default NIS domain. - - -The :mod:`nis` module defines the following exception: - -.. exception:: error - - An error raised when a NIS function returns an error code. - diff --git a/Doc/library/nntplib.rst b/Doc/library/nntplib.rst deleted file mode 100644 index 143e4e0c427f9a6..000000000000000 --- a/Doc/library/nntplib.rst +++ /dev/null @@ -1,586 +0,0 @@ -:mod:`nntplib` --- NNTP protocol client -======================================= - -.. module:: nntplib - :synopsis: NNTP protocol client (requires sockets). - :deprecated: - -**Source code:** :source:`Lib/nntplib.py` - -.. index:: - pair: NNTP; protocol - single: Network News Transfer Protocol - -.. deprecated:: 3.11 - The :mod:`nntplib` module is deprecated (see :pep:`594` for details). - -.. testsetup:: - - import warnings - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import nntplib - -.. testcleanup:: - - try: - s.quit() - except NameError: - pass - import sys - # Force a warning if any other file imports nntplib - sys.modules.pop('nntplib') - --------------- - -This module defines the class :class:`NNTP` which implements the client side of -the Network News Transfer Protocol. It can be used to implement a news reader -or poster, or automated news processors. It is compatible with :rfc:`3977` -as well as the older :rfc:`977` and :rfc:`2980`. - -.. include:: ../includes/wasm-notavail.rst - -Here are two small examples of how it can be used. To list some statistics -about a newsgroup and print the subjects of the last 10 articles:: - - >>> s = nntplib.NNTP('news.gmane.io') - >>> resp, count, first, last, name = s.group('gmane.comp.python.committers') - >>> print('Group', name, 'has', count, 'articles, range', first, 'to', last) - Group gmane.comp.python.committers has 1096 articles, range 1 to 1096 - >>> resp, overviews = s.over((last - 9, last)) - >>> for id, over in overviews: - ... print(id, nntplib.decode_header(over['subject'])) - ... - 1087 Re: Commit privileges for Åukasz Langa - 1088 Re: 3.2 alpha 2 freeze - 1089 Re: 3.2 alpha 2 freeze - 1090 Re: Commit privileges for Åukasz Langa - 1091 Re: Commit privileges for Åukasz Langa - 1092 Updated ssh key - 1093 Re: Updated ssh key - 1094 Re: Updated ssh key - 1095 Hello fellow committers! - 1096 Re: Hello fellow committers! - >>> s.quit() - '205 Bye!' - -To post an article from a binary file (this assumes that the article has valid -headers, and that you have right to post on the particular newsgroup):: - - >>> s = nntplib.NNTP('news.gmane.io') - >>> f = open('article.txt', 'rb') - >>> s.post(f) - '240 Article posted successfully.' - >>> s.quit() - '205 Bye!' - -The module itself defines the following classes: - - -.. class:: NNTP(host, port=119, user=None, password=None, readermode=None, usenetrc=False, [timeout]) - - Return a new :class:`NNTP` object, representing a connection - to the NNTP server running on host *host*, listening at port *port*. - An optional *timeout* can be specified for the socket connection. - If the optional *user* and *password* are provided, or if suitable - credentials are present in :file:`/.netrc` and the optional flag *usenetrc* - is true, the ``AUTHINFO USER`` and ``AUTHINFO PASS`` commands are used - to identify and authenticate the user to the server. If the optional - flag *readermode* is true, then a ``mode reader`` command is sent before - authentication is performed. Reader mode is sometimes necessary if you are - connecting to an NNTP server on the local machine and intend to call - reader-specific commands, such as ``group``. If you get unexpected - :exc:`NNTPPermanentError`\ s, you might need to set *readermode*. - The :class:`NNTP` class supports the :keyword:`with` statement to - unconditionally consume :exc:`OSError` exceptions and to close the NNTP - connection when done, e.g.: - - >>> from nntplib import NNTP - >>> with NNTP('news.gmane.io') as n: - ... n.group('gmane.comp.python.committers') - ... # doctest: +SKIP - ('211 1755 1 1755 gmane.comp.python.committers', 1755, 1, 1755, 'gmane.comp.python.committers') - >>> - - .. audit-event:: nntplib.connect self,host,port nntplib.NNTP - - .. audit-event:: nntplib.putline self,line nntplib.NNTP - - All commands will raise an :ref:`auditing event ` - ``nntplib.putline`` with arguments ``self`` and ``line``, - where ``line`` is the bytes about to be sent to the remote host. - - .. versionchanged:: 3.2 - *usenetrc* is now ``False`` by default. - - .. versionchanged:: 3.3 - Support for the :keyword:`with` statement was added. - - .. versionchanged:: 3.9 - If the *timeout* parameter is set to be zero, it will raise a - :class:`ValueError` to prevent the creation of a non-blocking socket. - -.. class:: NNTP_SSL(host, port=563, user=None, password=None, ssl_context=None, readermode=None, usenetrc=False, [timeout]) - - Return a new :class:`NNTP_SSL` object, representing an encrypted - connection to the NNTP server running on host *host*, listening at - port *port*. :class:`NNTP_SSL` objects have the same methods as - :class:`NNTP` objects. If *port* is omitted, port 563 (NNTPS) is used. - *ssl_context* is also optional, and is a :class:`~ssl.SSLContext` object. - Please read :ref:`ssl-security` for best practices. - All other parameters behave the same as for :class:`NNTP`. - - Note that SSL-on-563 is discouraged per :rfc:`4642`, in favor of - STARTTLS as described below. However, some servers only support the - former. - - .. audit-event:: nntplib.connect self,host,port nntplib.NNTP_SSL - - .. audit-event:: nntplib.putline self,line nntplib.NNTP_SSL - - All commands will raise an :ref:`auditing event ` - ``nntplib.putline`` with arguments ``self`` and ``line``, - where ``line`` is the bytes about to be sent to the remote host. - - .. versionadded:: 3.2 - - .. versionchanged:: 3.4 - The class now supports hostname check with - :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - - .. versionchanged:: 3.9 - If the *timeout* parameter is set to be zero, it will raise a - :class:`ValueError` to prevent the creation of a non-blocking socket. - -.. exception:: NNTPError - - Derived from the standard exception :exc:`Exception`, this is the base - class for all exceptions raised by the :mod:`nntplib` module. Instances - of this class have the following attribute: - - .. attribute:: response - - The response of the server if available, as a :class:`str` object. - - -.. exception:: NNTPReplyError - - Exception raised when an unexpected reply is received from the server. - - -.. exception:: NNTPTemporaryError - - Exception raised when a response code in the range 400--499 is received. - - -.. exception:: NNTPPermanentError - - Exception raised when a response code in the range 500--599 is received. - - -.. exception:: NNTPProtocolError - - Exception raised when a reply is received from the server that does not begin - with a digit in the range 1--5. - - -.. exception:: NNTPDataError - - Exception raised when there is some error in the response data. - - -.. _nntp-objects: - -NNTP Objects ------------- - -When connected, :class:`NNTP` and :class:`NNTP_SSL` objects support the -following methods and attributes. - -Attributes -^^^^^^^^^^ - -.. attribute:: NNTP.nntp_version - - An integer representing the version of the NNTP protocol supported by the - server. In practice, this should be ``2`` for servers advertising - :rfc:`3977` compliance and ``1`` for others. - - .. versionadded:: 3.2 - -.. attribute:: NNTP.nntp_implementation - - A string describing the software name and version of the NNTP server, - or :const:`None` if not advertised by the server. - - .. versionadded:: 3.2 - -Methods -^^^^^^^ - -The *response* that is returned as the first item in the return tuple of almost -all methods is the server's response: a string beginning with a three-digit -code. If the server's response indicates an error, the method raises one of -the above exceptions. - -Many of the following methods take an optional keyword-only argument *file*. -When the *file* argument is supplied, it must be either a :term:`file object` -opened for binary writing, or the name of an on-disk file to be written to. -The method will then write any data returned by the server (except for the -response line and the terminating dot) to the file; any list of lines, -tuples or objects that the method normally returns will be empty. - -.. versionchanged:: 3.2 - Many of the following methods have been reworked and fixed, which makes - them incompatible with their 3.1 counterparts. - - -.. method:: NNTP.quit() - - Send a ``QUIT`` command and close the connection. Once this method has been - called, no other methods of the NNTP object should be called. - - -.. method:: NNTP.getwelcome() - - Return the welcome message sent by the server in reply to the initial - connection. (This message sometimes contains disclaimers or help information - that may be relevant to the user.) - - -.. method:: NNTP.getcapabilities() - - Return the :rfc:`3977` capabilities advertised by the server, as a - :class:`dict` instance mapping capability names to (possibly empty) lists - of values. On legacy servers which don't understand the ``CAPABILITIES`` - command, an empty dictionary is returned instead. - - >>> s = NNTP('news.gmane.io') - >>> 'POST' in s.getcapabilities() - True - - .. versionadded:: 3.2 - - -.. method:: NNTP.login(user=None, password=None, usenetrc=True) - - Send ``AUTHINFO`` commands with the user name and password. If *user* - and *password* are ``None`` and *usenetrc* is true, credentials from - ``~/.netrc`` will be used if possible. - - Unless intentionally delayed, login is normally performed during the - :class:`NNTP` object initialization and separately calling this function - is unnecessary. To force authentication to be delayed, you must not set - *user* or *password* when creating the object, and must set *usenetrc* to - False. - - .. versionadded:: 3.2 - - -.. method:: NNTP.starttls(context=None) - - Send a ``STARTTLS`` command. This will enable encryption on the NNTP - connection. The *context* argument is optional and should be a - :class:`ssl.SSLContext` object. Please read :ref:`ssl-security` for best - practices. - - Note that this may not be done after authentication information has - been transmitted, and authentication occurs by default if possible during a - :class:`NNTP` object initialization. See :meth:`NNTP.login` for information - on suppressing this behavior. - - .. versionadded:: 3.2 - - .. versionchanged:: 3.4 - The method now supports hostname check with - :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - -.. method:: NNTP.newgroups(date, *, file=None) - - Send a ``NEWGROUPS`` command. The *date* argument should be a - :class:`datetime.date` or :class:`datetime.datetime` object. - Return a pair ``(response, groups)`` where *groups* is a list representing - the groups that are new since the given *date*. If *file* is supplied, - though, then *groups* will be empty. - - >>> from datetime import date, timedelta - >>> resp, groups = s.newgroups(date.today() - timedelta(days=3)) - >>> len(groups) # doctest: +SKIP - 85 - >>> groups[0] # doctest: +SKIP - GroupInfo(group='gmane.network.tor.devel', last='4', first='1', flag='m') - - -.. method:: NNTP.newnews(group, date, *, file=None) - - Send a ``NEWNEWS`` command. Here, *group* is a group name or ``'*'``, and - *date* has the same meaning as for :meth:`newgroups`. Return a pair - ``(response, articles)`` where *articles* is a list of message ids. - - This command is frequently disabled by NNTP server administrators. - - -.. method:: NNTP.list(group_pattern=None, *, file=None) - - Send a ``LIST`` or ``LIST ACTIVE`` command. Return a pair - ``(response, list)`` where *list* is a list of tuples representing all - the groups available from this NNTP server, optionally matching the - pattern string *group_pattern*. Each tuple has the form - ``(group, last, first, flag)``, where *group* is a group name, *last* - and *first* are the last and first article numbers, and *flag* usually - takes one of these values: - - * ``y``: Local postings and articles from peers are allowed. - * ``m``: The group is moderated and all postings must be approved. - * ``n``: No local postings are allowed, only articles from peers. - * ``j``: Articles from peers are filed in the junk group instead. - * ``x``: No local postings, and articles from peers are ignored. - * ``=foo.bar``: Articles are filed in the ``foo.bar`` group instead. - - If *flag* has another value, then the status of the newsgroup should be - considered unknown. - - This command can return very large results, especially if *group_pattern* - is not specified. It is best to cache the results offline unless you - really need to refresh them. - - .. versionchanged:: 3.2 - *group_pattern* was added. - - -.. method:: NNTP.descriptions(grouppattern) - - Send a ``LIST NEWSGROUPS`` command, where *grouppattern* is a wildmat string as - specified in :rfc:`3977` (it's essentially the same as DOS or UNIX shell wildcard - strings). Return a pair ``(response, descriptions)``, where *descriptions* - is a dictionary mapping group names to textual descriptions. - - >>> resp, descs = s.descriptions('gmane.comp.python.*') - >>> len(descs) # doctest: +SKIP - 295 - >>> descs.popitem() # doctest: +SKIP - ('gmane.comp.python.bio.general', 'BioPython discussion list (Moderated)') - - -.. method:: NNTP.description(group) - - Get a description for a single group *group*. If more than one group matches - (if 'group' is a real wildmat string), return the first match. If no group - matches, return an empty string. - - This elides the response code from the server. If the response code is needed, - use :meth:`descriptions`. - - -.. method:: NNTP.group(name) - - Send a ``GROUP`` command, where *name* is the group name. The group is - selected as the current group, if it exists. Return a tuple - ``(response, count, first, last, name)`` where *count* is the (estimated) - number of articles in the group, *first* is the first article number in - the group, *last* is the last article number in the group, and *name* - is the group name. - - -.. method:: NNTP.over(message_spec, *, file=None) - - Send an ``OVER`` command, or an ``XOVER`` command on legacy servers. - *message_spec* can be either a string representing a message id, or - a ``(first, last)`` tuple of numbers indicating a range of articles in - the current group, or a ``(first, None)`` tuple indicating a range of - articles starting from *first* to the last article in the current group, - or :const:`None` to select the current article in the current group. - - Return a pair ``(response, overviews)``. *overviews* is a list of - ``(article_number, overview)`` tuples, one for each article selected - by *message_spec*. Each *overview* is a dictionary with the same number - of items, but this number depends on the server. These items are either - message headers (the key is then the lower-cased header name) or metadata - items (the key is then the metadata name prepended with ``":"``). The - following items are guaranteed to be present by the NNTP specification: - - * the ``subject``, ``from``, ``date``, ``message-id`` and ``references`` - headers - * the ``:bytes`` metadata: the number of bytes in the entire raw article - (including headers and body) - * the ``:lines`` metadata: the number of lines in the article body - - The value of each item is either a string, or :const:`None` if not present. - - It is advisable to use the :func:`decode_header` function on header - values when they may contain non-ASCII characters:: - - >>> _, _, first, last, _ = s.group('gmane.comp.python.devel') - >>> resp, overviews = s.over((last, last)) - >>> art_num, over = overviews[0] - >>> art_num - 117216 - >>> list(over.keys()) - ['xref', 'from', ':lines', ':bytes', 'references', 'date', 'message-id', 'subject'] - >>> over['from'] - '=?UTF-8?B?Ik1hcnRpbiB2LiBMw7Z3aXMi?= ' - >>> nntplib.decode_header(over['from']) - '"Martin v. Löwis" ' - - .. versionadded:: 3.2 - - -.. method:: NNTP.help(*, file=None) - - Send a ``HELP`` command. Return a pair ``(response, list)`` where *list* is a - list of help strings. - - -.. method:: NNTP.stat(message_spec=None) - - Send a ``STAT`` command, where *message_spec* is either a message id - (enclosed in ``'<'`` and ``'>'``) or an article number in the current group. - If *message_spec* is omitted or :const:`None`, the current article in the - current group is considered. Return a triple ``(response, number, id)`` - where *number* is the article number and *id* is the message id. - - >>> _, _, first, last, _ = s.group('gmane.comp.python.devel') - >>> resp, number, message_id = s.stat(first) - >>> number, message_id - (9099, '<20030112190404.GE29873@epoch.metaslash.com>') - - -.. method:: NNTP.next() - - Send a ``NEXT`` command. Return as for :meth:`.stat`. - - -.. method:: NNTP.last() - - Send a ``LAST`` command. Return as for :meth:`.stat`. - - -.. method:: NNTP.article(message_spec=None, *, file=None) - - Send an ``ARTICLE`` command, where *message_spec* has the same meaning as - for :meth:`.stat`. Return a tuple ``(response, info)`` where *info* - is a :class:`~collections.namedtuple` with three attributes *number*, - *message_id* and *lines* (in that order). *number* is the article number - in the group (or 0 if the information is not available), *message_id* the - message id as a string, and *lines* a list of lines (without terminating - newlines) comprising the raw message including headers and body. - - >>> resp, info = s.article('<20030112190404.GE29873@epoch.metaslash.com>') - >>> info.number - 0 - >>> info.message_id - '<20030112190404.GE29873@epoch.metaslash.com>' - >>> len(info.lines) - 65 - >>> info.lines[0] - b'Path: main.gmane.org!not-for-mail' - >>> info.lines[1] - b'From: Neal Norwitz ' - >>> info.lines[-3:] - [b'There is a patch for 2.3 as well as 2.2.', b'', b'Neal'] - - -.. method:: NNTP.head(message_spec=None, *, file=None) - - Same as :meth:`article()`, but sends a ``HEAD`` command. The *lines* - returned (or written to *file*) will only contain the message headers, not - the body. - - -.. method:: NNTP.body(message_spec=None, *, file=None) - - Same as :meth:`article()`, but sends a ``BODY`` command. The *lines* - returned (or written to *file*) will only contain the message body, not the - headers. - - -.. method:: NNTP.post(data) - - Post an article using the ``POST`` command. The *data* argument is either - a :term:`file object` opened for binary reading, or any iterable of bytes - objects (representing raw lines of the article to be posted). It should - represent a well-formed news article, including the required headers. The - :meth:`post` method automatically escapes lines beginning with ``.`` and - appends the termination line. - - If the method succeeds, the server's response is returned. If the server - refuses posting, a :class:`NNTPReplyError` is raised. - - -.. method:: NNTP.ihave(message_id, data) - - Send an ``IHAVE`` command. *message_id* is the id of the message to send - to the server (enclosed in ``'<'`` and ``'>'``). The *data* parameter - and the return value are the same as for :meth:`post()`. - - -.. method:: NNTP.date() - - Return a pair ``(response, date)``. *date* is a :class:`~datetime.datetime` - object containing the current date and time of the server. - - -.. method:: NNTP.slave() - - Send a ``SLAVE`` command. Return the server's *response*. - - -.. method:: NNTP.set_debuglevel(level) - - Set the instance's debugging level. This controls the amount of debugging - output printed. The default, ``0``, produces no debugging output. A value of - ``1`` produces a moderate amount of debugging output, generally a single line - per request or response. A value of ``2`` or higher produces the maximum amount - of debugging output, logging each line sent and received on the connection - (including message text). - - -The following are optional NNTP extensions defined in :rfc:`2980`. Some of -them have been superseded by newer commands in :rfc:`3977`. - - -.. method:: NNTP.xhdr(hdr, str, *, file=None) - - Send an ``XHDR`` command. The *hdr* argument is a header keyword, e.g. - ``'subject'``. The *str* argument should have the form ``'first-last'`` - where *first* and *last* are the first and last article numbers to search. - Return a pair ``(response, list)``, where *list* is a list of pairs ``(id, - text)``, where *id* is an article number (as a string) and *text* is the text of - the requested header for that article. If the *file* parameter is supplied, then - the output of the ``XHDR`` command is stored in a file. If *file* is a string, - then the method will open a file with that name, write to it then close it. - If *file* is a :term:`file object`, then it will start calling :meth:`write` on - it to store the lines of the command output. If *file* is supplied, then the - returned *list* is an empty list. - - -.. method:: NNTP.xover(start, end, *, file=None) - - Send an ``XOVER`` command. *start* and *end* are article numbers - delimiting the range of articles to select. The return value is the - same of for :meth:`over()`. It is recommended to use :meth:`over()` - instead, since it will automatically use the newer ``OVER`` command - if available. - - -Utility functions ------------------ - -The module also defines the following utility function: - - -.. function:: decode_header(header_str) - - Decode a header value, un-escaping any escaped non-ASCII characters. - *header_str* must be a :class:`str` object. The unescaped value is - returned. Using this function is recommended to display some headers - in a human readable form:: - - >>> decode_header("Some subject") - 'Some subject' - >>> decode_header("=?ISO-8859-15?Q?D=E9buter_en_Python?=") - 'Débuter en Python' - >>> decode_header("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=") - 'Re: problème de matrice' diff --git a/Doc/library/numbers.rst b/Doc/library/numbers.rst index b3dce151aee2893..2a05b56db051f98 100644 --- a/Doc/library/numbers.rst +++ b/Doc/library/numbers.rst @@ -160,23 +160,23 @@ refer to ``MyIntegral`` and ``OtherTypeIKnowAbout`` as of :class:`Complex` (``a : A <: Complex``), and ``b : B <: Complex``. I'll consider ``a + b``: - 1. If ``A`` defines an :meth:`__add__` which accepts ``b``, all is - well. - 2. If ``A`` falls back to the boilerplate code, and it were to - return a value from :meth:`__add__`, we'd miss the possibility - that ``B`` defines a more intelligent :meth:`__radd__`, so the - boilerplate should return :const:`NotImplemented` from - :meth:`__add__`. (Or ``A`` may not implement :meth:`__add__` at - all.) - 3. Then ``B``'s :meth:`__radd__` gets a chance. If it accepts - ``a``, all is well. - 4. If it falls back to the boilerplate, there are no more possible - methods to try, so this is where the default implementation - should live. - 5. If ``B <: A``, Python tries ``B.__radd__`` before - ``A.__add__``. This is ok, because it was implemented with - knowledge of ``A``, so it can handle those instances before - delegating to :class:`Complex`. +1. If ``A`` defines an :meth:`__add__` which accepts ``b``, all is + well. +2. If ``A`` falls back to the boilerplate code, and it were to + return a value from :meth:`__add__`, we'd miss the possibility + that ``B`` defines a more intelligent :meth:`__radd__`, so the + boilerplate should return :const:`NotImplemented` from + :meth:`__add__`. (Or ``A`` may not implement :meth:`__add__` at + all.) +3. Then ``B``'s :meth:`__radd__` gets a chance. If it accepts + ``a``, all is well. +4. If it falls back to the boilerplate, there are no more possible + methods to try, so this is where the default implementation + should live. +5. If ``B <: A``, Python tries ``B.__radd__`` before + ``A.__add__``. This is ok, because it was implemented with + knowledge of ``A``, so it can handle those instances before + delegating to :class:`Complex`. If ``A <: Complex`` and ``B <: Real`` without sharing any other knowledge, then the appropriate shared operation is the one involving the built diff --git a/Doc/library/operator.rst b/Doc/library/operator.rst index 35e9b49ea8bccae..96f2c287875d412 100644 --- a/Doc/library/operator.rst +++ b/Doc/library/operator.rst @@ -59,9 +59,9 @@ truth tests, identity tests, and boolean operations: __not__(obj) Return the outcome of :keyword:`not` *obj*. (Note that there is no - :meth:`__not__` method for object instances; only the interpreter core defines - this operation. The result is affected by the :meth:`__bool__` and - :meth:`__len__` methods.) + :meth:`!__not__` method for object instances; only the interpreter core defines + this operation. The result is affected by the :meth:`~object.__bool__` and + :meth:`~object.__len__` methods.) .. function:: truth(obj) @@ -244,7 +244,7 @@ Operations which work with sequences (some of them with mappings too) include: .. function:: length_hint(obj, default=0) - Return an estimated length for the object *o*. First try to return its + Return an estimated length for the object *obj*. First try to return its actual length, then an estimate using :meth:`object.__length_hint__`, and finally return the default value. @@ -306,7 +306,7 @@ expect a function argument. itemgetter(*items) Return a callable object that fetches *item* from its operand using the - operand's :meth:`__getitem__` method. If multiple items are specified, + operand's :meth:`~object.__getitem__` method. If multiple items are specified, returns a tuple of lookup values. For example: * After ``f = itemgetter(2)``, the call ``f(r)`` returns ``r[2]``. @@ -326,8 +326,8 @@ expect a function argument. return tuple(obj[item] for item in items) return g - The items can be any type accepted by the operand's :meth:`__getitem__` - method. Dictionaries accept any hashable value. Lists, tuples, and + The items can be any type accepted by the operand's :meth:`~object.__getitem__` + method. Dictionaries accept any :term:`hashable` value. Lists, tuples, and strings accept an index or a slice: >>> itemgetter(1)('ABCDEFG') diff --git a/Doc/library/optparse.rst b/Doc/library/optparse.rst index 47e62553fb03588..015e83ed2ce5f77 100644 --- a/Doc/library/optparse.rst +++ b/Doc/library/optparse.rst @@ -11,8 +11,9 @@ **Source code:** :source:`Lib/optparse.py` .. deprecated:: 3.2 - The :mod:`optparse` module is deprecated and will not be developed further; - development will continue with the :mod:`argparse` module. + The :mod:`optparse` module is :term:`soft deprecated` and will not be + developed further; development will continue with the :mod:`argparse` + module. -------------- @@ -42,8 +43,8 @@ on the command-line, for example:: --file=outfile -q As it parses the command line, :mod:`optparse` sets attributes of the -``options`` object returned by :meth:`parse_args` based on user-supplied -command-line values. When :meth:`parse_args` returns from parsing this command +``options`` object returned by :meth:`~OptionParser.parse_args` based on user-supplied +command-line values. When :meth:`~OptionParser.parse_args` returns from parsing this command line, ``options.filename`` will be ``"outfile"`` and ``options.verbose`` will be ``False``. :mod:`optparse` supports both long and short options, allows short options to be merged together, and allows options to be associated with their @@ -285,10 +286,10 @@ program's command line:: (options, args) = parser.parse_args() -(If you like, you can pass a custom argument list to :meth:`parse_args`, but +(If you like, you can pass a custom argument list to :meth:`~OptionParser.parse_args`, but that's rarely necessary: by default it uses ``sys.argv[1:]``.) -:meth:`parse_args` returns two values: +:meth:`~OptionParser.parse_args` returns two values: * ``options``, an object containing values for all of your options---e.g. if ``--file`` takes a single string argument, then ``options.file`` will be the @@ -339,7 +340,7 @@ Now let's make up a fake command line and ask :mod:`optparse` to parse it:: When :mod:`optparse` sees the option string ``-f``, it consumes the next argument, ``foo.txt``, and stores it in ``options.filename``. So, after this -call to :meth:`parse_args`, ``options.filename`` is ``"foo.txt"``. +call to :meth:`~OptionParser.parse_args`, ``options.filename`` is ``"foo.txt"``. Some other option types supported by :mod:`optparse` are ``int`` and ``float``. Here's an option that expects an integer argument:: @@ -404,7 +405,7 @@ Other actions Some other actions supported by :mod:`optparse` are: ``"store_const"`` - store a constant value + store a constant value, pre-set via :attr:`Option.const` ``"append"`` append this option's argument to a list @@ -453,7 +454,8 @@ Again, the default value for ``verbose`` will be ``True``: the last default value supplied for any particular destination is the one that counts. A clearer way to specify default values is the :meth:`set_defaults` method of -OptionParser, which you can call at any time before calling :meth:`parse_args`:: +OptionParser, which you can call at any time before calling +:meth:`~OptionParser.parse_args`:: parser.set_defaults(verbose=True) parser.add_option(...) @@ -811,7 +813,7 @@ The first step in using :mod:`optparse` is to create an OptionParser instance. help option. When :mod:`optparse` prints the usage string, it expands ``%prog`` to ``os.path.basename(sys.argv[0])`` (or to ``prog`` if you passed that keyword argument). To suppress a usage message, pass the - special value :data:`optparse.SUPPRESS_USAGE`. + special value :const:`optparse.SUPPRESS_USAGE`. ``option_list`` (default: ``[]``) A list of Option objects to populate the parser with. The options in @@ -925,7 +927,7 @@ The canonical way to create an :class:`Option` instance is with the store this option's argument (default) ``"store_const"`` - store a constant value + store a constant value, pre-set via :attr:`Option.const` ``"store_true"`` store ``True`` @@ -937,7 +939,7 @@ The canonical way to create an :class:`Option` instance is with the append this option's argument to a list ``"append_const"`` - append a constant value to a list + append a constant value to a list, pre-set via :attr:`Option.const` ``"count"`` increment a counter by one @@ -954,7 +956,16 @@ The canonical way to create an :class:`Option` instance is with the As you can see, most actions involve storing or updating a value somewhere. :mod:`optparse` always creates a special object for this, conventionally called -``options`` (it happens to be an instance of :class:`optparse.Values`). Option +``options``, which is an instance of :class:`optparse.Values`. + +.. class:: Values + + An object holding parsed argument names and values as attributes. + Normally created by calling when calling :meth:`OptionParser.parse_args`, + and can be overridden by a custom subclass passed to the *values* argument of + :meth:`OptionParser.parse_args` (as described in :ref:`optparse-parsing-arguments`). + +Option arguments (and various other values) are stored as attributes of this object, according to the :attr:`~Option.dest` (destination) option attribute. @@ -991,6 +1002,14 @@ one that makes sense for *all* options. Option attributes ^^^^^^^^^^^^^^^^^ +.. class:: Option + + A single command line argument, + with various attributes passed by keyword to the constructor. + Normally created with :meth:`OptionParser.add_option` rather than directly, + and can be overridden by a custom class via the *option_class* argument + to :class:`OptionParser`. + The following option attributes may be passed as keyword arguments to :meth:`OptionParser.add_option`. If you pass an option attribute that is not relevant to a particular option, or fail to pass a required option attribute, @@ -1060,7 +1079,7 @@ relevant to a particular option, or fail to pass a required option attribute, Help text to print for this option when listing all available options after the user supplies a :attr:`~Option.help` option (such as ``--help``). If no help text is supplied, the option will be listed without help text. To - hide this option, use the special value :data:`optparse.SUPPRESS_HELP`. + hide this option, use the special value :const:`optparse.SUPPRESS_HELP`. .. attribute:: Option.metavar @@ -1232,7 +1251,7 @@ must specify for any option using that action. If no :attr:`~Option.help` string is supplied for an option, it will still be listed in the help message. To omit an option entirely, use the special value - :data:`optparse.SUPPRESS_HELP`. + :const:`optparse.SUPPRESS_HELP`. :mod:`optparse` automatically adds a :attr:`~Option.help` option to all OptionParsers, so you do not normally need to create one. @@ -1321,35 +1340,37 @@ Parsing arguments ^^^^^^^^^^^^^^^^^ The whole point of creating and populating an OptionParser is to call its -:meth:`parse_args` method:: +:meth:`~OptionParser.parse_args` method. - (options, args) = parser.parse_args(args=None, values=None) +.. method:: OptionParser.parse_args(args=None, values=None) -where the input parameters are + Parse the command-line options found in *args*. -``args`` - the list of arguments to process (default: ``sys.argv[1:]``) + The input parameters are -``values`` - an :class:`optparse.Values` object to store option arguments in (default: a - new instance of :class:`Values`) -- if you give an existing object, the - option defaults will not be initialized on it + ``args`` + the list of arguments to process (default: ``sys.argv[1:]``) -and the return values are + ``values`` + an :class:`Values` object to store option arguments in (default: a + new instance of :class:`Values`) -- if you give an existing object, the + option defaults will not be initialized on it -``options`` - the same object that was passed in as ``values``, or the optparse.Values - instance created by :mod:`optparse` + and the return value is a pair ``(options, args)`` where -``args`` - the leftover positional arguments after all options have been processed + ``options`` + the same object that was passed in as *values*, or the ``optparse.Values`` + instance created by :mod:`optparse` + + ``args`` + the leftover positional arguments after all options have been processed The most common usage is to supply neither keyword argument. If you supply ``values``, it will be modified with repeated :func:`setattr` calls (roughly one for every option argument stored to an option destination) and returned by -:meth:`parse_args`. +:meth:`~OptionParser.parse_args`. -If :meth:`parse_args` encounters any errors in the argument list, it calls the +If :meth:`~OptionParser.parse_args` encounters any errors in the argument list, it calls the OptionParser's :meth:`error` method with an appropriate end-user error message. This ultimately terminates your process with an exit status of 2 (the traditional Unix exit status for command-line errors). @@ -1501,7 +1522,7 @@ OptionParser supports several other public methods: Set the usage string according to the rules described above for the ``usage`` constructor keyword argument. Passing ``None`` sets the default usage - string; use :data:`optparse.SUPPRESS_USAGE` to suppress a usage message. + string; use :const:`optparse.SUPPRESS_USAGE` to suppress a usage message. .. method:: OptionParser.print_usage(file=None) @@ -1644,7 +1665,7 @@ where the current list of leftover arguments, ie. arguments that have been consumed but are neither options nor option arguments. Feel free to modify ``parser.largs``, e.g. by adding more arguments to it. (This list will - become ``args``, the second return value of :meth:`parse_args`.) + become ``args``, the second return value of :meth:`~OptionParser.parse_args`.) ``parser.rargs`` the current list of remaining arguments, ie. with ``opt_str`` and @@ -2027,7 +2048,7 @@ Features of note: values.ensure_value(attr, value) If the ``attr`` attribute of ``values`` doesn't exist or is ``None``, then - ensure_value() first sets it to ``value``, and then returns 'value. This is + ensure_value() first sets it to ``value``, and then returns ``value``. This is very handy for actions like ``"extend"``, ``"append"``, and ``"count"``, all of which accumulate data in a variable and expect that variable to be of a certain type (a list for the first two, an integer for the latter). Using @@ -2035,3 +2056,27 @@ Features of note: about setting a default value for the option destinations in question; they can just leave the default as ``None`` and :meth:`ensure_value` will take care of getting it right when it's needed. + +Exceptions +---------- + +.. exception:: OptionError + + Raised if an :class:`Option` instance is created with invalid or + inconsistent arguments. + +.. exception:: OptionConflictError + + Raised if conflicting options are added to an :class:`OptionParser`. + +.. exception:: OptionValueError + + Raised if an invalid option value is encountered on the command line. + +.. exception:: BadOptionError + + Raised if an invalid option is passed on the command line. + +.. exception:: AmbiguousOptionError + + Raised if an ambiguous option is passed on the command line. diff --git a/Doc/library/os.path.rst b/Doc/library/os.path.rst index 6d52a03ba95704e..95933f56d50542e 100644 --- a/Doc/library/os.path.rst +++ b/Doc/library/os.path.rst @@ -159,7 +159,7 @@ the :mod:`glob` module.) On Unix and Windows, return the argument with an initial component of ``~`` or ``~user`` replaced by that *user*'s home directory. - .. index:: module: pwd + .. index:: pair: module; pwd On Unix, an initial ``~`` is replaced by the environment variable :envvar:`HOME` if it is set; otherwise the current user's home directory is looked up in the @@ -266,6 +266,15 @@ the :mod:`glob` module.) Accepts a :term:`path-like object`. +.. function:: isjunction(path) + + Return ``True`` if *path* refers to an :func:`existing ` directory + entry that is a junction. Always return ``False`` if junctions are not + supported on the current platform. + + .. versionadded:: 3.12 + + .. function:: islink(path) Return ``True`` if *path* refers to an :func:`existing ` directory @@ -295,19 +304,38 @@ the :mod:`glob` module.) Accepts a :term:`path-like object`. +.. function:: isdevdrive(path) + + Return ``True`` if pathname *path* is located on a Windows Dev Drive. + A Dev Drive is optimized for developer scenarios, and offers faster + performance for reading and writing files. It is recommended for use for + source code, temporary build directories, package caches, and other + IO-intensive operations. + + May raise an error for an invalid path, for example, one without a + recognizable drive, but returns ``False`` on platforms that do not support + Dev Drives. See `the Windows documentation `_ + for information on enabling and creating Dev Drives. + + .. availability:: Windows. + + .. versionadded:: 3.12 + + .. function:: join(path, *paths) - Join one or more path components intelligently. The return value is the - concatenation of *path* and any members of *\*paths* with exactly one - directory separator following each non-empty part except the last, meaning - that the result will only end in a separator if the last part is empty. If - a component is an absolute path, all previous components are thrown away - and joining continues from the absolute path component. - - On Windows, the drive letter is not reset when an absolute path component - (e.g., ``r'\foo'``) is encountered. If a component contains a drive - letter, all previous components are thrown away and the drive letter is - reset. Note that since there is a current directory for each drive, + Join one or more path segments intelligently. The return value is the + concatenation of *path* and all members of *\*paths*, with exactly one + directory separator following each non-empty part, except the last. That is, + the result will only end in a separator if the last part is either empty or + ends in a separator. If a segment is an absolute path (which on Windows + requires both a drive and a root), then all previous segments are ignored and + joining continues from the absolute path segment. + + On Windows, the drive is not reset when a rooted path segment (e.g., + ``r'\foo'``) is encountered. If a segment is on a different drive or is an + absolute path, all previous segments are ignored and the drive is reset. Note + that since there is a current directory for each drive, ``os.path.join("c:", "foo")`` represents a path relative to the current directory on drive :file:`C:` (:file:`c:foo`), not :file:`c:\\foo`. @@ -349,7 +377,8 @@ the :mod:`glob` module.) Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path (if they are supported by the operating - system). + system). On Windows, this function will also resolve MS-DOS (also called 8.3) + style names such as ``C:\\PROGRA~1`` to ``C:\\Program Files``. If a path doesn't exist or a symlink loop is encountered, and *strict* is ``True``, :exc:`OSError` is raised. If *strict* is ``False``, the path is @@ -382,7 +411,7 @@ the :mod:`glob` module.) *start*. On Windows, :exc:`ValueError` is raised when *path* and *start* are on different drives. - *start* defaults to :attr:`os.curdir`. + *start* defaults to :data:`os.curdir`. .. availability:: Unix, Windows. @@ -478,6 +507,39 @@ the :mod:`glob` module.) Accepts a :term:`path-like object`. +.. function:: splitroot(path) + + Split the pathname *path* into a 3-item tuple ``(drive, root, tail)`` where + *drive* is a device name or mount point, *root* is a string of separators + after the drive, and *tail* is everything after the root. Any of these + items may be the empty string. In all cases, ``drive + root + tail`` will + be the same as *path*. + + On POSIX systems, *drive* is always empty. The *root* may be empty (if *path* is + relative), a single forward slash (if *path* is absolute), or two forward slashes + (implementation-defined per `IEEE Std 1003.1-2017; 4.13 Pathname Resolution + `_.) + For example:: + + >>> splitroot('/home/sam') + ('', '/', 'home/sam') + >>> splitroot('//home/sam') + ('', '//', 'home/sam') + >>> splitroot('///home/sam') + ('', '/', '//home/sam') + + On Windows, *drive* may be empty, a drive-letter name, a UNC share, or a device + name. The *root* may be empty, a forward slash, or a backward slash. For + example:: + + >>> splitroot('C:/Users/Sam') + ('C:', '/', 'Users/Sam') + >>> splitroot('//Server/Share/Users/Sam') + ('//Server/Share', '/', 'Users/Sam') + + .. versionadded:: 3.12 + + .. function:: splitext(path) Split the pathname *path* into a pair ``(root, ext)`` such that ``root + ext == diff --git a/Doc/library/os.rst b/Doc/library/os.rst index 0f0fb55e315c952..fe573f188ab0666 100644 --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -60,7 +60,7 @@ Notes on the availability of these functions: ``'java'``. .. seealso:: - :attr:`sys.platform` has a finer granularity. :func:`os.uname` gives + :data:`sys.platform` has a finer granularity. :func:`os.uname` gives system-dependent version information. The :mod:`platform` module provides detailed checks for the @@ -88,8 +88,8 @@ startup by the :c:func:`PyConfig_Read` function: see On some systems, conversion using the file system encoding may fail. In this case, Python uses the :ref:`surrogateescape encoding error handler `, which means that undecodable bytes are replaced by a - Unicode character U+DCxx on decoding, and these are again translated to the - original byte on encoding. + Unicode character U+DC\ *xx* on decoding, and these are again + translated to the original byte on encoding. The :term:`file system encoding ` must @@ -201,6 +201,11 @@ process and user. ``'surrogateescape'`` error handler. Use :data:`environb` if you would like to use a different encoding. + On Windows, the keys are converted to uppercase. This also applies when + getting, setting, or deleting an item. For example, + ``environ['monty'] = 'python'`` maps the key ``'MONTY'`` to the value + ``'python'``. + .. note:: Calling :func:`putenv` directly does not change :data:`os.environ`, so it's better @@ -210,7 +215,7 @@ process and user. On some platforms, including FreeBSD and macOS, setting ``environ`` may cause memory leaks. Refer to the system documentation for - :c:func:`putenv`. + :c:func:`!putenv`. You can delete items in this mapping to unset environment variables. :func:`unsetenv` will be called automatically when an item is deleted from @@ -228,7 +233,7 @@ process and user. :data:`environ` and :data:`environb` are synchronized (modifying :data:`environb` updates :data:`environ`, and vice versa). - :data:`environb` is only available if :data:`supports_bytes_environ` is + :data:`environb` is only available if :const:`supports_bytes_environ` is ``True``. .. versionadded:: 3.2 @@ -326,7 +331,7 @@ process and user. future environment changes. - :func:`getenvb` is only available if :data:`supports_bytes_environ` + :func:`getenvb` is only available if :const:`supports_bytes_environ` is ``True``. .. availability:: Unix. @@ -396,11 +401,11 @@ process and user. On macOS, :func:`getgroups` behavior differs somewhat from other Unix platforms. If the Python interpreter was built with a - deployment target of :const:`10.5` or earlier, :func:`getgroups` returns + deployment target of ``10.5`` or earlier, :func:`getgroups` returns the list of effective group ids associated with the current user process; this list is limited to a system-defined number of entries, typically 16, and may be modified by calls to :func:`setgroups` if suitably privileged. - If built with a deployment target greater than :const:`10.5`, + If built with a deployment target greater than ``10.5``, :func:`getgroups` returns the current group access list for the user associated with the effective user id of the process; the group access list may change over the lifetime of the process, it is not affected by @@ -488,6 +493,17 @@ process and user. .. versionadded:: 3.3 +.. data:: PRIO_DARWIN_THREAD + PRIO_DARWIN_PROCESS + PRIO_DARWIN_BG + PRIO_DARWIN_NONUI + + Parameters for the :func:`getpriority` and :func:`setpriority` functions. + + .. availability:: macOS + + .. versionadded:: 3.12 + .. function:: getresuid() Return a tuple (ruid, euid, suid) denoting the current process's @@ -548,7 +564,7 @@ process and user. .. note:: On some platforms, including FreeBSD and macOS, setting ``environ`` may - cause memory leaks. Refer to the system documentation for :c:func:`putenv`. + cause memory leaks. Refer to the system documentation for :c:func:`!putenv`. .. audit-event:: os.putenv key,value os.putenv @@ -630,7 +646,7 @@ process and user. .. function:: setpgrp() - Call the system call :c:func:`setpgrp` or ``setpgrp(0, 0)`` depending on + Call the system call :c:func:`!setpgrp` or ``setpgrp(0, 0)`` depending on which version is implemented (if any). See the Unix manual for the semantics. .. availability:: Unix, not Emscripten, not WASI. @@ -638,7 +654,7 @@ process and user. .. function:: setpgid(pid, pgrp, /) - Call the system call :c:func:`setpgid` to set the process group id of the + Call the system call :c:func:`!setpgid` to set the process group id of the process with id *pid* to the process group with id *pgrp*. See the Unix manual for the semantics. @@ -698,14 +714,14 @@ process and user. .. function:: getsid(pid, /) - Call the system call :c:func:`getsid`. See the Unix manual for the semantics. + Call the system call :c:func:`!getsid`. See the Unix manual for the semantics. .. availability:: Unix, not Emscripten, not WASI. .. function:: setsid() - Call the system call :c:func:`setsid`. See the Unix manual for the semantics. + Call the system call :c:func:`!setsid`. See the Unix manual for the semantics. .. availability:: Unix, not Emscripten, not WASI. @@ -723,7 +739,7 @@ process and user. .. function:: strerror(code, /) Return the error message corresponding to the error code in *code*. - On platforms where :c:func:`strerror` returns ``NULL`` when given an unknown + On platforms where :c:func:`!strerror` returns ``NULL`` when given an unknown error number, :exc:`ValueError` is raised. @@ -907,7 +923,7 @@ as internal buffering of data. In Linux kernel older than 5.3, the files pointed by *src* and *dst* must reside in the same filesystem, otherwise an :exc:`OSError` is - raised with :attr:`~OSError.errno` set to :data:`errno.EXDEV`. + raised with :attr:`~OSError.errno` set to :const:`errno.EXDEV`. This copy is done without the additional cost of transferring data from the kernel to user space and then back into the kernel. Additionally, @@ -1061,7 +1077,7 @@ as internal buffering of data. .. function:: fsync(fd) Force write of file with filedescriptor *fd* to disk. On Unix, this calls the - native :c:func:`fsync` function; on Windows, the MS :c:func:`_commit` function. + native :c:func:`!fsync` function; on Windows, the MS :c:func:`!_commit` function. If you're starting with a buffered Python :term:`file object` *f*, first do ``f.flush()``, and then do ``os.fsync(f.fileno())``, to ensure that all internal @@ -1091,13 +1107,17 @@ as internal buffering of data. See also :func:`set_blocking` and :meth:`socket.socket.setblocking`. - .. availability:: Unix. + .. availability:: Unix, Windows. The function is limited on Emscripten and WASI, see :ref:`wasm-availability` for more information. + On Windows, this function is limited to pipes. + .. versionadded:: 3.5 + .. versionchanged:: 3.12 + Added support for pipes on Windows. .. function:: isatty(fd, /) @@ -1143,25 +1163,65 @@ as internal buffering of data. .. versionadded:: 3.11 -.. function:: lseek(fd, pos, how, /) +.. function:: lseek(fd, pos, whence, /) Set the current position of file descriptor *fd* to position *pos*, modified - by *how*: :const:`SEEK_SET` or ``0`` to set the position relative to the - beginning of the file; :const:`SEEK_CUR` or ``1`` to set it relative to the - current position; :const:`SEEK_END` or ``2`` to set it relative to the end of - the file. Return the new cursor position in bytes, starting from the beginning. + by *whence*, and return the new position in bytes relative to + the start of the file. + Valid values for *whence* are: + + * :const:`SEEK_SET` or ``0`` -- set *pos* relative to the beginning of the file + * :const:`SEEK_CUR` or ``1`` -- set *pos* relative to the current file position + * :const:`SEEK_END` or ``2`` -- set *pos* relative to the end of the file + * :const:`SEEK_HOLE` -- set *pos* to the next data location, relative to *pos* + * :const:`SEEK_DATA` -- set *pos* to the next data hole, relative to *pos* + + .. versionchanged:: 3.3 + + Add support for :const:`!SEEK_HOLE` and :const:`!SEEK_DATA`. .. data:: SEEK_SET SEEK_CUR SEEK_END - Parameters to the :func:`lseek` function. Their values are 0, 1, and 2, - respectively. + Parameters to the :func:`lseek` function and the :meth:`~io.IOBase.seek` + method on :term:`file-like objects `, + for whence to adjust the file position indicator. + + :const:`SEEK_SET` + Adjust the file position relative to the beginning of the file. + :const:`SEEK_CUR` + Adjust the file position relative to the current file position. + :const:`SEEK_END` + Adjust the file position relative to the end of the file. + + Their values are 0, 1, and 2, respectively. + + +.. data:: SEEK_HOLE + SEEK_DATA + + Parameters to the :func:`lseek` function and the :meth:`~io.IOBase.seek` + method on :term:`file-like objects `, + for seeking file data and holes on sparsely allocated files. + + :data:`!SEEK_DATA` + Adjust the file offset to the next location containing data, + relative to the seek position. + + :data:`!SEEK_HOLE` + Adjust the file offset to the next location containing a hole, + relative to the seek position. + A hole is defined as a sequence of zeros. + + .. note:: + + These operations only make sense for filesystems that support them. + + .. availability:: Linux >= 3.1, macOS, Unix .. versionadded:: 3.3 - Some operating systems could support additional values, like - :data:`os.SEEK_HOLE` or :data:`os.SEEK_DATA`. .. function:: open(path, flags, mode=0o777, *, dir_fd=None) @@ -1275,7 +1335,7 @@ or `the MSDN `_ on Windo .. function:: openpty() - .. index:: module: pty + .. index:: pair: module; pty Open a new pseudo-terminal pair. Return a pair of file descriptors ``(master, slave)`` for the pty and the tty, respectively. The new file @@ -1402,7 +1462,7 @@ or `the MSDN `_ on Windo If some data was successfully read, it will return the number of bytes read. If no bytes were read, it will return ``-1`` and set errno to - :data:`errno.EAGAIN`. + :const:`errno.EAGAIN`. .. availability:: Linux >= 4.14. @@ -1558,21 +1618,6 @@ or `the MSDN `_ on Windo Parameters *out* and *in* was renamed to *out_fd* and *in_fd*. -.. function:: set_blocking(fd, blocking, /) - - Set the blocking mode of the specified file descriptor. Set the - :data:`O_NONBLOCK` flag if blocking is ``False``, clear the flag otherwise. - - See also :func:`get_blocking` and :meth:`socket.socket.setblocking`. - - .. availability:: Unix. - - The function is limited on Emscripten and WASI, see - :ref:`wasm-availability` for more information. - - .. versionadded:: 3.5 - - .. data:: SF_NODISKIO SF_MNOWAIT SF_SYNC @@ -1594,6 +1639,26 @@ or `the MSDN `_ on Windo .. versionadded:: 3.11 +.. function:: set_blocking(fd, blocking, /) + + Set the blocking mode of the specified file descriptor. Set the + :data:`O_NONBLOCK` flag if blocking is ``False``, clear the flag otherwise. + + See also :func:`get_blocking` and :meth:`socket.socket.setblocking`. + + .. availability:: Unix, Windows. + + The function is limited on Emscripten and WASI, see + :ref:`wasm-availability` for more information. + + On Windows, this function is limited to pipes. + + .. versionadded:: 3.5 + + .. versionchanged:: 3.12 + Added support for pipes on Windows. + + .. function:: splice(src, dst, count, offset_src=None, offset_dst=None) Transfer *count* bytes from file descriptor *src*, starting from offset @@ -1603,7 +1668,7 @@ or `the MSDN `_ on Windo *offset_dst*. The offset associated to the file descriptor that refers to a pipe must be ``None``. The files pointed by *src* and *dst* must reside in the same filesystem, otherwise an :exc:`OSError` is raised with - :attr:`~OSError.errno` set to :data:`errno.EXDEV`. + :attr:`~OSError.errno` set to :const:`errno.EXDEV`. This copy is done without the additional cost of transferring data from the kernel to user space and then back into the kernel. Additionally, @@ -1936,18 +2001,18 @@ features: Set the flags of *path* to the numeric *flags*. *flags* may take a combination (bitwise OR) of the following values (as defined in the :mod:`stat` module): - * :data:`stat.UF_NODUMP` - * :data:`stat.UF_IMMUTABLE` - * :data:`stat.UF_APPEND` - * :data:`stat.UF_OPAQUE` - * :data:`stat.UF_NOUNLINK` - * :data:`stat.UF_COMPRESSED` - * :data:`stat.UF_HIDDEN` - * :data:`stat.SF_ARCHIVED` - * :data:`stat.SF_IMMUTABLE` - * :data:`stat.SF_APPEND` - * :data:`stat.SF_NOUNLINK` - * :data:`stat.SF_SNAPSHOT` + * :const:`stat.UF_NODUMP` + * :const:`stat.UF_IMMUTABLE` + * :const:`stat.UF_APPEND` + * :const:`stat.UF_OPAQUE` + * :const:`stat.UF_NOUNLINK` + * :const:`stat.UF_COMPRESSED` + * :const:`stat.UF_HIDDEN` + * :const:`stat.SF_ARCHIVED` + * :const:`stat.SF_IMMUTABLE` + * :const:`stat.SF_APPEND` + * :const:`stat.SF_NOUNLINK` + * :const:`stat.SF_SNAPSHOT` This function can support :ref:`not following symlinks `. @@ -1968,25 +2033,25 @@ features: following values (as defined in the :mod:`stat` module) or bitwise ORed combinations of them: - * :data:`stat.S_ISUID` - * :data:`stat.S_ISGID` - * :data:`stat.S_ENFMT` - * :data:`stat.S_ISVTX` - * :data:`stat.S_IREAD` - * :data:`stat.S_IWRITE` - * :data:`stat.S_IEXEC` - * :data:`stat.S_IRWXU` - * :data:`stat.S_IRUSR` - * :data:`stat.S_IWUSR` - * :data:`stat.S_IXUSR` - * :data:`stat.S_IRWXG` - * :data:`stat.S_IRGRP` - * :data:`stat.S_IWGRP` - * :data:`stat.S_IXGRP` - * :data:`stat.S_IRWXO` - * :data:`stat.S_IROTH` - * :data:`stat.S_IWOTH` - * :data:`stat.S_IXOTH` + * :const:`stat.S_ISUID` + * :const:`stat.S_ISGID` + * :const:`stat.S_ENFMT` + * :const:`stat.S_ISVTX` + * :const:`stat.S_IREAD` + * :const:`stat.S_IWRITE` + * :const:`stat.S_IEXEC` + * :const:`stat.S_IRWXU` + * :const:`stat.S_IRUSR` + * :const:`stat.S_IWUSR` + * :const:`stat.S_IXUSR` + * :const:`stat.S_IRWXG` + * :const:`stat.S_IRGRP` + * :const:`stat.S_IWGRP` + * :const:`stat.S_IXGRP` + * :const:`stat.S_IRWXO` + * :const:`stat.S_IROTH` + * :const:`stat.S_IWOTH` + * :const:`stat.S_IXOTH` This function can support :ref:`specifying a file descriptor `, :ref:`paths relative to directory descriptors ` and :ref:`not @@ -2126,7 +2191,7 @@ features: .. audit-event:: os.link src,dst,src_dir_fd,dst_dir_fd os.link - .. availability:: Unix, Windows. + .. availability:: Unix, Windows, not Emscripten. .. versionchanged:: 3.2 Added Windows support. @@ -2175,9 +2240,72 @@ features: Accepts a :term:`path-like object`. +.. function:: listdrives() + + Return a list containing the names of drives on a Windows system. + + A drive name typically looks like ``'C:\\'``. Not every drive name + will be associated with a volume, and some may be inaccessible for + a variety of reasons, including permissions, network connectivity + or missing media. This function does not test for access. + + May raise :exc:`OSError` if an error occurs collecting the drive + names. + + .. audit-event:: os.listdrives "" os.listdrives + + .. availability:: Windows + + .. versionadded:: 3.12 + + +.. function:: listmounts(volume) + + Return a list containing the mount points for a volume on a Windows + system. + + *volume* must be represented as a GUID path, like those returned by + :func:`os.listvolumes`. Volumes may be mounted in multiple locations + or not at all. In the latter case, the list will be empty. Mount + points that are not associated with a volume will not be returned by + this function. + + The mount points return by this function will be absolute paths, and + may be longer than the drive name. + + Raises :exc:`OSError` if the volume is not recognized or if an error + occurs collecting the paths. + + .. audit-event:: os.listmounts volume os.listmounts + + .. availability:: Windows + + .. versionadded:: 3.12 + + +.. function:: listvolumes() + + Return a list containing the volumes in the system. + + Volumes are typically represented as a GUID path that looks like + ``\\?\Volume{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}\``. Files can + usually be accessed through a GUID path, permissions allowing. + However, users are generally not familiar with them, and so the + recommended use of this function is to retrieve mount points + using :func:`os.listmounts`. + + May raise :exc:`OSError` if an error occurs collecting the volumes. + + .. audit-event:: os.listvolumes "" os.listvolumes + + .. availability:: Windows + + .. versionadded:: 3.12 + + .. function:: lstat(path, *, dir_fd=None) - Perform the equivalent of an :c:func:`lstat` system call on the given path. + Perform the equivalent of an :c:func:`!lstat` system call on the given path. Similar to :func:`~os.stat`, but does not follow symbolic links. Return a :class:`stat_result` object. @@ -2333,13 +2461,13 @@ features: .. function:: major(device, /) Extract the device major number from a raw device number (usually the - :attr:`st_dev` or :attr:`st_rdev` field from :c:type:`stat`). + :attr:`st_dev` or :attr:`st_rdev` field from :c:struct:`stat`). .. function:: minor(device, /) Extract the device minor number from a raw device number (usually the - :attr:`st_dev` or :attr:`st_rdev` field from :c:type:`stat`). + :attr:`st_dev` or :attr:`st_rdev` field from :c:struct:`stat`). .. function:: makedev(major, minor, /) @@ -2421,7 +2549,7 @@ features: .. function:: remove(path, *, dir_fd=None) Remove (delete) the file *path*. If *path* is a directory, an - :exc:`IsADirectoryError` is raised. Use :func:`rmdir` to remove directories. + :exc:`OSError` is raised. Use :func:`rmdir` to remove directories. If the file does not exist, a :exc:`FileNotFoundError` is raised. This function can support :ref:`paths relative to directory descriptors @@ -2467,6 +2595,8 @@ features: will fail with an :exc:`OSError` subclass in a number of cases: On Windows, if *dst* exists a :exc:`FileExistsError` is always raised. + The operation may fail if *src* and *dst* are on different filesystems. Use + :func:`shutil.move` to support moves to a different filesystem. On Unix, if *src* is a file and *dst* is a directory or vice-versa, an :exc:`IsADirectoryError` or a :exc:`NotADirectoryError` will be raised @@ -2738,6 +2868,17 @@ features: This method can raise :exc:`OSError`, such as :exc:`PermissionError`, but :exc:`FileNotFoundError` is caught and not raised. + .. method:: is_junction() + + Return ``True`` if this entry is a junction (even if broken); + return ``False`` if the entry points to a regular directory, any kind + of file, a symlink, or if it doesn't exist anymore. + + The result is cached on the ``os.DirEntry`` object. Call + :func:`os.path.isjunction` to fetch up-to-date information. + + .. versionadded:: 3.12 + .. method:: stat(*, follow_symlinks=True) Return a :class:`stat_result` object for this entry. This method @@ -2760,8 +2901,8 @@ features: Note that there is a nice correspondence between several attributes and methods of ``os.DirEntry`` and of :class:`pathlib.Path`. In particular, the ``name`` attribute has the same - meaning, as do the ``is_dir()``, ``is_file()``, ``is_symlink()`` - and ``stat()`` methods. + meaning, as do the ``is_dir()``, ``is_file()``, ``is_symlink()``, + ``is_junction()``, and ``stat()`` methods. .. versionadded:: 3.5 @@ -2769,6 +2910,12 @@ features: Added support for the :class:`~os.PathLike` interface. Added support for :class:`bytes` paths on Windows. + .. versionchanged:: 3.12 + The ``st_ctime`` attribute of a stat result is deprecated on Windows. + The file creation time is properly available as ``st_birthtime``, and + in the future ``st_ctime`` may be changed to return zero or the + metadata change time, if available. + .. function:: stat(path, *, dir_fd=None, follow_symlinks=True) @@ -2795,7 +2942,7 @@ features: possible and call :func:`lstat` on the result. This does not apply to dangling symlinks or junction points, which will raise the usual exceptions. - .. index:: module: stat + .. index:: pair: module; stat Example:: @@ -2831,7 +2978,7 @@ features: .. class:: stat_result Object whose attributes correspond roughly to the members of the - :c:type:`stat` structure. It is used for the result of :func:`os.stat`, + :c:struct:`stat` structure. It is used for the result of :func:`os.stat`, :func:`os.fstat` and :func:`os.lstat`. Attributes: @@ -2884,10 +3031,12 @@ features: .. attribute:: st_ctime - Platform dependent: + Time of most recent metadata change expressed in seconds. - * the time of most recent metadata change on Unix, - * the time of creation on Windows, expressed in seconds. + .. versionchanged:: 3.12 + ``st_ctime`` is deprecated on Windows. Use ``st_birthtime`` for + the file creation time. In the future, ``st_ctime`` will contain + the time of the most recent metadata change, as for other platforms. .. attribute:: st_atime_ns @@ -2900,29 +3049,48 @@ features: .. attribute:: st_ctime_ns - Platform dependent: + Time of most recent metadata change expressed in nanoseconds as an + integer. + + .. versionchanged:: 3.12 + ``st_ctime_ns`` is deprecated on Windows. Use ``st_birthtime_ns`` + for the file creation time. In the future, ``st_ctime`` will contain + the time of the most recent metadata change, as for other platforms. + + .. attribute:: st_birthtime + + Time of file creation expressed in seconds. This attribute is not + always available, and may raise :exc:`AttributeError`. + + .. versionchanged:: 3.12 + ``st_birthtime`` is now available on Windows. - * the time of most recent metadata change on Unix, - * the time of creation on Windows, expressed in nanoseconds as an - integer. + .. attribute:: st_birthtime_ns + + Time of file creation expressed in nanoseconds as an integer. + This attribute is not always available, and may raise + :exc:`AttributeError`. + + .. versionadded:: 3.12 .. note:: The exact meaning and resolution of the :attr:`st_atime`, - :attr:`st_mtime`, and :attr:`st_ctime` attributes depend on the operating - system and the file system. For example, on Windows systems using the FAT - or FAT32 file systems, :attr:`st_mtime` has 2-second resolution, and - :attr:`st_atime` has only 1-day resolution. See your operating system - documentation for details. + :attr:`st_mtime`, :attr:`st_ctime` and :attr:`st_birthtime` attributes + depend on the operating system and the file system. For example, on + Windows systems using the FAT32 file systems, :attr:`st_mtime` has + 2-second resolution, and :attr:`st_atime` has only 1-day resolution. + See your operating system documentation for details. Similarly, although :attr:`st_atime_ns`, :attr:`st_mtime_ns`, - and :attr:`st_ctime_ns` are always expressed in nanoseconds, many - systems do not provide nanosecond precision. On systems that do - provide nanosecond precision, the floating-point object used to - store :attr:`st_atime`, :attr:`st_mtime`, and :attr:`st_ctime` - cannot preserve all of it, and as such will be slightly inexact. - If you need the exact timestamps you should always use - :attr:`st_atime_ns`, :attr:`st_mtime_ns`, and :attr:`st_ctime_ns`. + :attr:`st_ctime_ns` and :attr:`st_birthtime_ns` are always expressed in + nanoseconds, many systems do not provide nanosecond precision. On + systems that do provide nanosecond precision, the floating-point object + used to store :attr:`st_atime`, :attr:`st_mtime`, :attr:`st_ctime` and + :attr:`st_birthtime` cannot preserve all of it, and as such will be + slightly inexact. If you need the exact timestamps you should always use + :attr:`st_atime_ns`, :attr:`st_mtime_ns`, :attr:`st_ctime_ns` and + :attr:`st_birthtime_ns`. On some Unix systems (such as Linux), the following attributes may also be available: @@ -2952,10 +3120,6 @@ features: File generation number. - .. attribute:: st_birthtime - - Time of file creation. - On Solaris and derivatives, the following attributes may also be available: @@ -2984,22 +3148,24 @@ features: Windows file attributes: ``dwFileAttributes`` member of the ``BY_HANDLE_FILE_INFORMATION`` structure returned by - :c:func:`GetFileInformationByHandle`. See the ``FILE_ATTRIBUTE_*`` + :c:func:`!GetFileInformationByHandle`. + See the :const:`!FILE_ATTRIBUTE_* ` constants in the :mod:`stat` module. .. attribute:: st_reparse_tag - When :attr:`st_file_attributes` has the ``FILE_ATTRIBUTE_REPARSE_POINT`` + When :attr:`st_file_attributes` has the :const:`~stat.FILE_ATTRIBUTE_REPARSE_POINT` set, this field contains the tag identifying the type of reparse point. - See the ``IO_REPARSE_TAG_*`` constants in the :mod:`stat` module. + See the :const:`IO_REPARSE_TAG_* ` + constants in the :mod:`stat` module. The standard module :mod:`stat` defines functions and constants that are - useful for extracting information from a :c:type:`stat` structure. (On + useful for extracting information from a :c:struct:`stat` structure. (On Windows, some items are filled with dummy values.) For backward compatibility, a :class:`stat_result` instance is also accessible as a tuple of at least 10 integers giving the most important (and - portable) members of the :c:type:`stat` structure, in the order + portable) members of the :c:struct:`stat` structure, in the order :attr:`st_mode`, :attr:`st_ino`, :attr:`st_dev`, :attr:`st_nlink`, :attr:`st_uid`, :attr:`st_gid`, :attr:`st_size`, :attr:`st_atime`, :attr:`st_mtime`, :attr:`st_ctime`. More items may be added at the end by @@ -3028,11 +3194,30 @@ features: files as :const:`S_IFCHR`, :const:`S_IFIFO` or :const:`S_IFBLK` as appropriate. + .. versionchanged:: 3.12 + On Windows, :attr:`st_ctime` is deprecated. Eventually, it will + contain the last metadata change time, for consistency with other + platforms, but for now still contains creation time. + Use :attr:`st_birthtime` for the creation time. + + .. versionchanged:: 3.12 + On Windows, :attr:`st_ino` may now be up to 128 bits, depending + on the file system. Previously it would not be above 64 bits, and + larger file identifiers would be arbitrarily packed. + + .. versionchanged:: 3.12 + On Windows, :attr:`st_rdev` no longer returns a value. Previously + it would contain the same as :attr:`st_dev`, which was incorrect. + + .. versionadded:: 3.12 + Added the :attr:`st_birthtime` member on Windows. + + .. function:: statvfs(path) - Perform a :c:func:`statvfs` system call on the given path. The return value is + Perform a :c:func:`!statvfs` system call on the given path. The return value is an object whose attributes describe the filesystem on the given path, and - correspond to the members of the :c:type:`statvfs` structure, namely: + correspond to the members of the :c:struct:`statvfs` structure, namely: :attr:`f_bsize`, :attr:`f_frsize`, :attr:`f_blocks`, :attr:`f_bfree`, :attr:`f_bavail`, :attr:`f_files`, :attr:`f_ffree`, :attr:`f_favail`, :attr:`f_flag`, :attr:`f_namemax`, :attr:`f_fsid`. @@ -3596,6 +3781,217 @@ features: .. versionadded:: 3.10 +Timer File Descriptors +~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 3.13 + +These functions provide support for Linux's *timer file descriptor* API. +Naturally, they are all only available on Linux. + +.. function:: timerfd_create(clockid, /, *, flags=0) + + Create and return a timer file descriptor (*timerfd*). + + The file descriptor returned by :func:`timerfd_create` supports: + + - :func:`read` + - :func:`~select.select` + - :func:`~select.poll` + + The file descriptor's :func:`read` method can be called with a buffer size + of 8. If the timer has already expired one or more times, :func:`read` + returns the number of expirations with the host's endianness, which may be + converted to an :class:`int` by ``int.from_bytes(x, byteorder=sys.byteorder)``. + + :func:`~select.select` and :func:`~select.poll` can be used to wait until + timer expires and the file descriptor is readable. + + *clockid* must be a valid :ref:`clock ID `, + as defined in the :py:mod:`time` module: + + - :const:`time.CLOCK_REALTIME` + - :const:`time.CLOCK_MONOTONIC` + - :const:`time.CLOCK_BOOTTIME` (Since Linux 3.15 for timerfd_create) + + If *clockid* is :const:`time.CLOCK_REALTIME`, a settable system-wide + real-time clock is used. If system clock is changed, timer setting need + to be updated. To cancel timer when system clock is changed, see + :const:`TFD_TIMER_CANCEL_ON_SET`. + + If *clockid* is :const:`time.CLOCK_MONOTONIC`, a non-settable monotonically + increasing clock is used. Even if the system clock is changed, the timer + setting will not be affected. + + If *clockid* is :const:`time.CLOCK_BOOTTIME`, same as :const:`time.CLOCK_MONOTONIC` + except it includes any time that the system is suspended. + + The file descriptor's behaviour can be modified by specifying a *flags* value. + Any of the following variables may used, combined using bitwise OR + (the ``|`` operator): + + - :const:`TFD_NONBLOCK` + - :const:`TFD_CLOEXEC` + + If :const:`TFD_NONBLOCK` is not set as a flag, :func:`read` blocks until + the timer expires. If it is set as a flag, :func:`read` doesn't block, but + If there hasn't been an expiration since the last call to read, + :func:`read` raises :class:`OSError` with ``errno`` is set to + :const:`errno.EAGAIN`. + + :const:`TFD_CLOEXEC` is always set by Python automatically. + + The file descriptor must be closed with :func:`os.close` when it is no + longer needed, or else the file descriptor will be leaked. + + .. seealso:: The :manpage:`timerfd_create(2)` man page. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + + +.. function:: timerfd_settime(fd, /, *, flags=flags, initial=0.0, interval=0.0) + + Alter a timer file descriptor's internal timer. + This function operates the same interval timer as :func:`timerfd_settime_ns`. + + *fd* must be a valid timer file descriptor. + + The timer's behaviour can be modified by specifying a *flags* value. + Any of the following variables may used, combined using bitwise OR + (the ``|`` operator): + + - :const:`TFD_TIMER_ABSTIME` + - :const:`TFD_TIMER_CANCEL_ON_SET` + + The timer is disabled by setting *initial* to zero (``0``). + If *initial* is equal to or greater than zero, the timer is enabled. + If *initial* is less than zero, it raises an :class:`OSError` exception + with ``errno`` set to :const:`errno.EINVAL` + + By default the timer will fire when *initial* seconds have elapsed. + (If *initial* is zero, timer will fire immediately.) + + However, if the :const:`TFD_TIMER_ABSTIME` flag is set, + the timer will fire when the timer's clock + (set by *clockid* in :func:`timerfd_create`) reaches *initial* seconds. + + The timer's interval is set by the *interval* :py:class:`float`. + If *interval* is zero, the timer only fires once, on the initial expiration. + If *interval* is greater than zero, the timer fires every time *interval* + seconds have elapsed since the previous expiration. + If *interval* is less than zero, it raises :class:`OSError` with ``errno`` + set to :const:`errno.EINVAL` + + If the :const:`TFD_TIMER_CANCEL_ON_SET` flag is set along with + :const:`TFD_TIMER_ABSTIME` and the clock for this timer is + :const:`time.CLOCK_REALTIME`, the timer is marked as cancelable if the + real-time clock is changed discontinuously. Reading the descriptor is + aborted with the error ECANCELED. + + Linux manages system clock as UTC. A daylight-savings time transition is + done by changing time offset only and doesn't cause discontinuous system + clock change. + + Discontinuous system clock change will be caused by the following events: + + - ``settimeofday`` + - ``clock_settime`` + - set the system date and time by ``date`` command + + Return a two-item tuple of (``next_expiration``, ``interval``) from + the previous timer state, before this function executed. + + .. seealso:: + + :manpage:`timerfd_create(2)`, :manpage:`timerfd_settime(2)`, + :manpage:`settimeofday(2)`, :manpage:`clock_settime(2)`, + and :manpage:`date(1)`. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + + +.. function:: timerfd_settime_ns(fd, /, *, flags=0, initial=0, interval=0) + + Similar to :func:`timerfd_settime`, but use time as nanoseconds. + This function operates the same interval timer as :func:`timerfd_settime`. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + + +.. function:: timerfd_gettime(fd, /) + + Return a two-item tuple of floats (``next_expiration``, ``interval``). + + ``next_expiration`` denotes the relative time until next the timer next fires, + regardless of if the :const:`TFD_TIMER_ABSTIME` flag is set. + + ``interval`` denotes the timer's interval. + If zero, the timer will only fire once, after ``next_expiration`` seconds + have elapsed. + + .. seealso:: :manpage:`timerfd_gettime(2)` + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + + +.. function:: timerfd_gettime_ns(fd, /) + + Similar to :func:`timerfd_gettime`, but return time as nanoseconds. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + +.. data:: TFD_NONBLOCK + + A flag for the :func:`timerfd_create` function, + which sets the :const:`O_NONBLOCK` status flag for the new timer file + descriptor. If :const:`TFD_NONBLOCK` is not set as a flag, :func:`read` blocks. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + +.. data:: TFD_CLOEXEC + + A flag for the :func:`timerfd_create` function, + If :const:`TFD_CLOEXEC` is set as a flag, set close-on-exec flag for new file + descriptor. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + +.. data:: TFD_TIMER_ABSTIME + + A flag for the :func:`timerfd_settime` and :func:`timerfd_settime_ns` functions. + If this flag is set, *initial* is interpreted as an absolute value on the + timer's clock (in UTC seconds or nanoseconds since the Unix Epoch). + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + +.. data:: TFD_TIMER_CANCEL_ON_SET + + A flag for the :func:`timerfd_settime` and :func:`timerfd_settime_ns` + functions along with :const:`TFD_TIMER_ABSTIME`. + The timer is cancelled when the time of the underlying clock changes + discontinuously. + + .. availability:: Linux >= 2.6.27 with glibc >= 2.8 + + .. versionadded:: 3.13 + + Linux extended attributes ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -3788,7 +4184,8 @@ to be ignored. the :envvar:`PATH` variable. The other variants, :func:`execl`, :func:`execle`, :func:`execv`, and :func:`execve`, will not use the :envvar:`PATH` variable to locate the executable; *path* must contain an appropriate absolute or relative - path. + path. Relative paths must include at least one slash, even on Windows, as + plain names will not be resolved. For :func:`execle`, :func:`execlpe`, :func:`execve`, and :func:`execvpe` (note that these all end in "e"), the *env* parameter must be a mapping which is @@ -3820,7 +4217,7 @@ to be ignored. .. note:: - The standard way to exit is ``sys.exit(n)``. :func:`_exit` should + The standard way to exit is :func:`sys.exit(n) `. :func:`!_exit` should normally only be used in the child process after a :func:`fork`. The following exit codes are defined and can be used with :func:`_exit`, @@ -3971,15 +4368,38 @@ written in Python, such as a mail server's external command delivery program. .. audit-event:: os.fork "" os.fork + .. warning:: + + If you use TLS sockets in an application calling ``fork()``, see + the warning in the :mod:`ssl` documentation. + .. versionchanged:: 3.8 Calling ``fork()`` in a subinterpreter is no longer supported (:exc:`RuntimeError` is raised). - .. warning:: + .. versionchanged:: 3.12 + If Python is able to detect that your process has multiple + threads, :func:`os.fork` now raises a :exc:`DeprecationWarning`. - See :mod:`ssl` for applications that use the SSL module with fork(). + We chose to surface this as a warning, when detectable, to better + inform developers of a design problem that the POSIX platform + specifically notes as not supported. Even in code that + *appears* to work, it has never been safe to mix threading with + :func:`os.fork` on POSIX platforms. The CPython runtime itself has + always made API calls that are not safe for use in the child + process when threads existed in the parent (such as ``malloc`` and + ``free``). - .. availability:: Unix, not Emscripten, not WASI. + Users of macOS or users of libc or malloc implementations other + than those typically found in glibc to date are among those + already more likely to experience deadlocks running such code. + + See `this discussion on fork being incompatible with threads + `_ + for technical details of why we're surfacing this longstanding + platform compatibility problem to developers. + + .. availability:: POSIX, not Emscripten, not WASI. .. function:: forkpty() @@ -3992,6 +4412,11 @@ written in Python, such as a mail server's external command delivery program. .. audit-event:: os.forkpty "" os.forkpty + .. versionchanged:: 3.12 + If Python is able to detect that your process has multiple + threads, this now raises a :exc:`DeprecationWarning`. See the + longer explanation on :func:`os.fork`. + .. versionchanged:: 3.8 Calling ``forkpty()`` in a subinterpreter is no longer supported (:exc:`RuntimeError` is raised). @@ -4008,8 +4433,8 @@ written in Python, such as a mail server's external command delivery program. Send signal *sig* to the process *pid*. Constants for the specific signals available on the host platform are defined in the :mod:`signal` module. - Windows: The :data:`signal.CTRL_C_EVENT` and - :data:`signal.CTRL_BREAK_EVENT` signals are special signals which can + Windows: The :const:`signal.CTRL_C_EVENT` and + :const:`signal.CTRL_BREAK_EVENT` signals are special signals which can only be sent to console processes which share a common console window, e.g., some subprocesses. Any other value for *sig* will cause the process to be unconditionally killed by the TerminateProcess API, and the exit code @@ -4062,7 +4487,7 @@ written in Python, such as a mail server's external command delivery program. This flag indicates that the file descriptor will be non-blocking. If the process referred to by the file descriptor has not yet terminated, then an attempt to wait on the file descriptor using :manpage:`waitid(2)` - will immediately return the error :data:`~errno.EAGAIN` rather than blocking. + will immediately return the error :const:`~errno.EAGAIN` rather than blocking. .. availability:: Linux >= 5.10 .. versionadded:: 3.12 @@ -4120,7 +4545,7 @@ written in Python, such as a mail server's external command delivery program. setpgroup=None, resetids=False, setsid=False, setsigmask=(), \ setsigdef=(), scheduler=None) - Wraps the :c:func:`posix_spawn` C library API for use from Python. + Wraps the :c:func:`!posix_spawn` C library API for use from Python. Most users should use :func:`subprocess.run` instead of :func:`posix_spawn`. @@ -4156,16 +4581,16 @@ written in Python, such as a mail server's external command delivery program. Performs ``os.dup2(fd, new_fd)``. These tuples correspond to the C library - :c:func:`posix_spawn_file_actions_addopen`, - :c:func:`posix_spawn_file_actions_addclose`, and - :c:func:`posix_spawn_file_actions_adddup2` API calls used to prepare - for the :c:func:`posix_spawn` call itself. + :c:func:`!posix_spawn_file_actions_addopen`, + :c:func:`!posix_spawn_file_actions_addclose`, and + :c:func:`!posix_spawn_file_actions_adddup2` API calls used to prepare + for the :c:func:`!posix_spawn` call itself. The *setpgroup* argument will set the process group of the child to the value specified. If the value specified is 0, the child's process group ID will be made the same as its process ID. If the value of *setpgroup* is not set, the child will inherit the parent's process group ID. This argument corresponds - to the C library :c:data:`POSIX_SPAWN_SETPGROUP` flag. + to the C library :c:macro:`!POSIX_SPAWN_SETPGROUP` flag. If the *resetids* argument is ``True`` it will reset the effective UID and GID of the child to the real UID and GID of the parent process. If the @@ -4173,27 +4598,27 @@ written in Python, such as a mail server's external command delivery program. the parent. In either case, if the set-user-ID and set-group-ID permission bits are enabled on the executable file, their effect will override the setting of the effective UID and GID. This argument corresponds to the C - library :c:data:`POSIX_SPAWN_RESETIDS` flag. + library :c:macro:`!POSIX_SPAWN_RESETIDS` flag. If the *setsid* argument is ``True``, it will create a new session ID - for ``posix_spawn``. *setsid* requires :c:data:`POSIX_SPAWN_SETSID` - or :c:data:`POSIX_SPAWN_SETSID_NP` flag. Otherwise, :exc:`NotImplementedError` + for ``posix_spawn``. *setsid* requires :c:macro:`!POSIX_SPAWN_SETSID` + or :c:macro:`!POSIX_SPAWN_SETSID_NP` flag. Otherwise, :exc:`NotImplementedError` is raised. The *setsigmask* argument will set the signal mask to the signal set specified. If the parameter is not used, then the child inherits the parent's signal mask. This argument corresponds to the C library - :c:data:`POSIX_SPAWN_SETSIGMASK` flag. + :c:macro:`!POSIX_SPAWN_SETSIGMASK` flag. The *sigdef* argument will reset the disposition of all signals in the set specified. This argument corresponds to the C library - :c:data:`POSIX_SPAWN_SETSIGDEF` flag. + :c:macro:`!POSIX_SPAWN_SETSIGDEF` flag. The *scheduler* argument must be a tuple containing the (optional) scheduler policy and an instance of :class:`sched_param` with the scheduler parameters. A value of ``None`` in the place of the scheduler policy indicates that is not being provided. This argument is a combination of the C library - :c:data:`POSIX_SPAWN_SETSCHEDPARAM` and :c:data:`POSIX_SPAWN_SETSCHEDULER` + :c:macro:`!POSIX_SPAWN_SETSCHEDPARAM` and :c:macro:`!POSIX_SPAWN_SETSCHEDULER` flags. .. audit-event:: os.posix_spawn path,argv,env os.posix_spawn @@ -4206,7 +4631,7 @@ written in Python, such as a mail server's external command delivery program. setpgroup=None, resetids=False, setsid=False, setsigmask=(), \ setsigdef=(), scheduler=None) - Wraps the :c:func:`posix_spawnp` C library API for use from Python. + Wraps the :c:func:`!posix_spawnp` C library API for use from Python. Similar to :func:`posix_spawn` except that the system searches for the *executable* file in the list of directories specified by the @@ -4387,7 +4812,7 @@ written in Python, such as a mail server's external command delivery program. Use *show_cmd* to override the default window style. Whether this has any effect will depend on the application being launched. Values are integers as - supported by the Win32 :c:func:`ShellExecute` function. + supported by the Win32 :c:func:`!ShellExecute` function. :func:`startfile` returns as soon as the associated application is launched. There is no option to wait for the application to close, and no way to retrieve @@ -4397,7 +4822,7 @@ written in Python, such as a mail server's external command delivery program. :func:`os.path.normpath` function to ensure that paths are properly encoded for Win32. - To reduce interpreter startup overhead, the Win32 :c:func:`ShellExecute` + To reduce interpreter startup overhead, the Win32 :c:func:`!ShellExecute` function is not resolved until this function is first called. If the function cannot be resolved, :exc:`NotImplementedError` will be raised. @@ -4461,7 +4886,7 @@ written in Python, such as a mail server's external command delivery program. :attr:`!children_system`, and :attr:`!elapsed` in that order. See the Unix manual page - :manpage:`times(2)` and `times(3) `_ manual page on Unix or `the GetProcessTimes MSDN + :manpage:`times(2)` and `times(3) `_ manual page on Unix or `the GetProcessTimes MSDN `_ on Windows. On Windows, only :attr:`!user` and :attr:`!system` are known; the other attributes are zero. @@ -4480,6 +4905,9 @@ written in Python, such as a mail server's external command delivery program. number is zero); the high bit of the low byte is set if a core file was produced. + If there are no children that could be waited for, :exc:`ChildProcessError` + is raised. + :func:`waitstatus_to_exitcode` can be used to convert the exit status into an exit code. @@ -4487,77 +4915,41 @@ written in Python, such as a mail server's external command delivery program. .. seealso:: - :func:`waitpid` can be used to wait for the completion of a specific - child process and has more options. - -.. function:: waitid(idtype, id, options, /) - - Wait for the completion of one or more child processes. - *idtype* can be :data:`P_PID`, :data:`P_PGID`, :data:`P_ALL`, or - :data:`P_PIDFD` on Linux. - *id* specifies the pid to wait on. - *options* is constructed from the ORing of one or more of :data:`WEXITED`, - :data:`WSTOPPED` or :data:`WCONTINUED` and additionally may be ORed with - :data:`WNOHANG` or :data:`WNOWAIT`. The return value is an object - representing the data contained in the :c:type:`siginfo_t` structure, namely: - :attr:`si_pid`, :attr:`si_uid`, :attr:`si_signo`, :attr:`si_status`, - :attr:`si_code` or ``None`` if :data:`WNOHANG` is specified and there are no - children in a waitable state. - - .. availability:: Unix, not Emscripten, not WASI. + The other :func:`!wait*` functions documented below can be used to wait for the + completion of a specific child process and have more options. + :func:`waitpid` is the only one also available on Windows. - .. versionadded:: 3.3 - -.. data:: P_PID - P_PGID - P_ALL - These are the possible values for *idtype* in :func:`waitid`. They affect - how *id* is interpreted. +.. function:: waitid(idtype, id, options, /) - .. availability:: Unix, not Emscripten, not WASI. + Wait for the completion of a child process. - .. versionadded:: 3.3 + *idtype* can be :data:`P_PID`, :data:`P_PGID`, :data:`P_ALL`, or (on Linux) :data:`P_PIDFD`. + The interpretation of *id* depends on it; see their individual descriptions. -.. data:: P_PIDFD + *options* is an OR combination of flags. At least one of :data:`WEXITED`, + :data:`WSTOPPED` or :data:`WCONTINUED` is required; + :data:`WNOHANG` and :data:`WNOWAIT` are additional optional flags. - This is a Linux-specific *idtype* that indicates that *id* is a file - descriptor that refers to a process. + The return value is an object representing the data contained in the + :c:type:`siginfo_t` structure with the following attributes: - .. availability:: Linux >= 5.4 + * :attr:`!si_pid` (process ID) + * :attr:`!si_uid` (real user ID of the child) + * :attr:`!si_signo` (always :const:`~signal.SIGCHLD`) + * :attr:`!si_status` (the exit status or signal number, depending on :attr:`!si_code`) + * :attr:`!si_code` (see :data:`CLD_EXITED` for possible values) - .. versionadded:: 3.9 - -.. data:: WEXITED - WSTOPPED - WNOWAIT - - Flags that can be used in *options* in :func:`waitid` that specify what - child signal to wait for. + If :data:`WNOHANG` is specified and there are no matching children in the + requested state, ``None`` is returned. + Otherwise, if there are no matching children + that could be waited for, :exc:`ChildProcessError` is raised. .. availability:: Unix, not Emscripten, not WASI. .. versionadded:: 3.3 -.. data:: CLD_EXITED - CLD_KILLED - CLD_DUMPED - CLD_TRAPPED - CLD_STOPPED - CLD_CONTINUED - - These are the possible values for :attr:`si_code` in the result returned by - :func:`waitid`. - - .. availability:: Unix, not Emscripten, not WASI. - - .. versionadded:: 3.3 - - .. versionchanged:: 3.9 - Added :data:`CLD_KILLED` and :data:`CLD_STOPPED` values. - - .. function:: waitpid(pid, options, /) The details of this function differ on Unix and Windows. @@ -4574,8 +4966,11 @@ written in Python, such as a mail server's external command delivery program. ``-1``, status is requested for any process in the process group ``-pid`` (the absolute value of *pid*). - An :exc:`OSError` is raised with the value of errno when the syscall - returns -1. + *options* is an OR combination of flags. If it contains :data:`WNOHANG` and + there are no matching children in the requested state, ``(0, 0)`` is + returned. Otherwise, if there are no matching children that could be waited + for, :exc:`ChildProcessError` is raised. Other options that can be used are + :data:`WUNTRACED` and :data:`WCONTINUED`. On Windows: Wait for completion of a process given by process handle *pid*, and return a tuple containing *pid*, and its exit status shifted left by 8 bits @@ -4588,7 +4983,7 @@ written in Python, such as a mail server's external command delivery program. :func:`waitstatus_to_exitcode` can be used to convert the exit status into an exit code. - .. availability:: Unix, not Emscripten, not WASI. + .. availability:: Unix, Windows, not Emscripten, not WASI. .. versionchanged:: 3.5 If the system call is interrupted and the signal handler does not raise an @@ -4601,9 +4996,9 @@ written in Python, such as a mail server's external command delivery program. Similar to :func:`waitpid`, except no process id argument is given and a 3-element tuple containing the child's process id, exit status indication, and resource usage information is returned. Refer to - :mod:`resource`.\ :func:`~resource.getrusage` for details on resource usage - information. The option argument is the same as that provided to - :func:`waitpid` and :func:`wait4`. + :func:`resource.getrusage` for details on resource usage information. The + *options* argument is the same as that provided to :func:`waitpid` and + :func:`wait4`. :func:`waitstatus_to_exitcode` can be used to convert the exit status into an exitcode. @@ -4614,10 +5009,10 @@ written in Python, such as a mail server's external command delivery program. .. function:: wait4(pid, options) Similar to :func:`waitpid`, except a 3-element tuple, containing the child's - process id, exit status indication, and resource usage information is returned. - Refer to :mod:`resource`.\ :func:`~resource.getrusage` for details on - resource usage information. The arguments to :func:`wait4` are the same - as those provided to :func:`waitpid`. + process id, exit status indication, and resource usage information is + returned. Refer to :func:`resource.getrusage` for details on resource usage + information. The arguments to :func:`wait4` are the same as those provided + to :func:`waitpid`. :func:`waitstatus_to_exitcode` can be used to convert the exit status into an exitcode. @@ -4625,6 +5020,111 @@ written in Python, such as a mail server's external command delivery program. .. availability:: Unix, not Emscripten, not WASI. +.. data:: P_PID + P_PGID + P_ALL + P_PIDFD + + These are the possible values for *idtype* in :func:`waitid`. They affect + how *id* is interpreted: + + * :data:`!P_PID` - wait for the child whose PID is *id*. + * :data:`!P_PGID` - wait for any child whose progress group ID is *id*. + * :data:`!P_ALL` - wait for any child; *id* is ignored. + * :data:`!P_PIDFD` - wait for the child identified by the file descriptor + *id* (a process file descriptor created with :func:`pidfd_open`). + + .. availability:: Unix, not Emscripten, not WASI. + + .. note:: :data:`!P_PIDFD` is only available on Linux >= 5.4. + + .. versionadded:: 3.3 + .. versionadded:: 3.9 + The :data:`!P_PIDFD` constant. + + +.. data:: WCONTINUED + + This *options* flag for :func:`waitpid`, :func:`wait3`, :func:`wait4`, and + :func:`waitid` causes child processes to be reported if they have been + continued from a job control stop since they were last reported. + + .. availability:: Unix, not Emscripten, not WASI. + + +.. data:: WEXITED + + This *options* flag for :func:`waitid` causes child processes that have terminated to + be reported. + + The other ``wait*`` functions always report children that have terminated, + so this option is not available for them. + + .. availability:: Unix, not Emscripten, not WASI. + + .. versionadded:: 3.3 + + +.. data:: WSTOPPED + + This *options* flag for :func:`waitid` causes child processes that have been stopped + by the delivery of a signal to be reported. + + This option is not available for the other ``wait*`` functions. + + .. availability:: Unix, not Emscripten, not WASI. + + .. versionadded:: 3.3 + + +.. data:: WUNTRACED + + This *options* flag for :func:`waitpid`, :func:`wait3`, and :func:`wait4` causes + child processes to also be reported if they have been stopped but their + current state has not been reported since they were stopped. + + This option is not available for :func:`waitid`. + + .. availability:: Unix, not Emscripten, not WASI. + + +.. data:: WNOHANG + + This *options* flag causes :func:`waitpid`, :func:`wait3`, :func:`wait4`, and + :func:`waitid` to return right away if no child process status is available + immediately. + + .. availability:: Unix, not Emscripten, not WASI. + + +.. data:: WNOWAIT + + This *options* flag causes :func:`waitid` to leave the child in a waitable state, so that + a later :func:`!wait*` call can be used to retrieve the child status information again. + + This option is not available for the other ``wait*`` functions. + + .. availability:: Unix, not Emscripten, not WASI. + + +.. data:: CLD_EXITED + CLD_KILLED + CLD_DUMPED + CLD_TRAPPED + CLD_STOPPED + CLD_CONTINUED + + These are the possible values for :attr:`!si_code` in the result returned by + :func:`waitid`. + + .. availability:: Unix, not Emscripten, not WASI. + + .. versionadded:: 3.3 + + .. versionchanged:: 3.9 + Added :data:`CLD_KILLED` and :data:`CLD_STOPPED` values. + + .. function:: waitstatus_to_exitcode(status) Convert a wait status to an exit code. @@ -4657,32 +5157,6 @@ written in Python, such as a mail server's external command delivery program. .. versionadded:: 3.9 -.. data:: WNOHANG - - The option for :func:`waitpid` to return immediately if no child process status - is available immediately. The function returns ``(0, 0)`` in this case. - - .. availability:: Unix, not Emscripten, not WASI. - - -.. data:: WCONTINUED - - This option causes child processes to be reported if they have been continued - from a job control stop since their status was last reported. - - .. availability:: Unix, not Emscripten, not WASI. - - Some Unix systems. - - -.. data:: WUNTRACED - - This option causes child processes to be reported if they have been stopped but - their current state has not been reported since they were stopped. - - .. availability:: Unix, not Emscripten, not WASI. - - The following functions take a process status code as returned by :func:`system`, :func:`wait`, or :func:`waitpid` as a parameter. They may be used to determine the disposition of a process. @@ -4700,7 +5174,7 @@ used to determine the disposition of a process. .. function:: WIFCONTINUED(status) Return ``True`` if a stopped child has been resumed by delivery of - :data:`~signal.SIGCONT` (if the process has been continued from a job + :const:`~signal.SIGCONT` (if the process has been continued from a job control stop), otherwise return ``False``. See :data:`WCONTINUED` option. @@ -4878,8 +5352,12 @@ operating system. .. function:: sched_getaffinity(pid, /) - Return the set of CPUs the process with PID *pid* (or the current process - if zero) is restricted to. + Return the set of CPUs the process with PID *pid* is restricted to. + + If *pid* is zero, return the set of CPUs the calling thread of the current + process is restricted to. + + See also the :func:`process_cpu_count` function. .. _os-path: @@ -4920,15 +5398,18 @@ Miscellaneous System Information .. function:: cpu_count() - Return the number of CPUs in the system. Returns ``None`` if undetermined. - - This number is not equivalent to the number of CPUs the current process can - use. The number of usable CPUs can be obtained with - ``len(os.sched_getaffinity(0))`` + Return the number of logical CPUs in the **system**. Returns ``None`` if + undetermined. + The :func:`process_cpu_count` function can be used to get the number of + logical CPUs usable by the calling thread of the **current process**. .. versionadded:: 3.4 + .. versionchanged:: 3.13 + If :option:`-X cpu_count <-X>` is given or :envvar:`PYTHON_CPU_COUNT` is set, + :func:`cpu_count` returns the overridden value *n*. + .. function:: getloadavg() @@ -4939,6 +5420,23 @@ Miscellaneous System Information .. availability:: Unix. +.. function:: process_cpu_count() + + Get the number of logical CPUs usable by the calling thread of the **current + process**. Returns ``None`` if undetermined. It can be less than + :func:`cpu_count` depending on the CPU affinity. + + The :func:`cpu_count` function can be used to get the number of logical CPUs + in the **system**. + + If :option:`-X cpu_count <-X>` is given or :envvar:`PYTHON_CPU_COUNT` is set, + :func:`process_cpu_count` returns the overridden value *n*. + + See also the :func:`sched_getaffinity` functions. + + .. versionadded:: 3.13 + + .. function:: sysconf(name, /) Return integer-valued system configuration values. If the configuration value @@ -5072,7 +5570,7 @@ Random numbers ``/dev/urandom`` devices. The flags argument is a bit mask that can contain zero or more of the - following values ORed together: :py:data:`os.GRND_RANDOM` and + following values ORed together: :py:const:`os.GRND_RANDOM` and :py:data:`GRND_NONBLOCK`. See also the `Linux getrandom() manual page diff --git a/Doc/library/ossaudiodev.rst b/Doc/library/ossaudiodev.rst deleted file mode 100644 index e14c1bf8d5367ef..000000000000000 --- a/Doc/library/ossaudiodev.rst +++ /dev/null @@ -1,453 +0,0 @@ -:mod:`ossaudiodev` --- Access to OSS-compatible audio devices -============================================================= - -.. module:: ossaudiodev - :platform: Linux, FreeBSD - :synopsis: Access to OSS-compatible audio devices. - :deprecated: - -.. deprecated-removed:: 3.11 3.13 - The :mod:`ossaudiodev` module is deprecated - (see :pep:`PEP 594 <594#ossaudiodev>` for details). - --------------- - -This module allows you to access the OSS (Open Sound System) audio interface. -OSS is available for a wide range of open-source and commercial Unices, and is -the standard audio interface for Linux and recent versions of FreeBSD. - -.. Things will get more complicated for future Linux versions, since - ALSA is in the standard kernel as of 2.5.x. Presumably if you - use ALSA, you'll have to make sure its OSS compatibility layer - is active to use ossaudiodev, but you're going to need it for the vast - majority of Linux audio apps anyway. - - Sounds like things are also complicated for other BSDs. In response - to my python-dev query, Thomas Wouters said: - - > Likewise, googling shows OpenBSD also uses OSS/Free -- the commercial - > OSS installation manual tells you to remove references to OSS/Free from the - > kernel :) - - but Aleksander Piotrowsk actually has an OpenBSD box, and he quotes - from its : - > * WARNING! WARNING! - > * This is an OSS (Linux) audio emulator. - > * Use the Native NetBSD API for developing new code, and this - > * only for compiling Linux programs. - - There's also an ossaudio manpage on OpenBSD that explains things - further. Presumably NetBSD and OpenBSD have a different standard - audio interface. That's the great thing about standards, there are so - many to choose from ... ;-) - - This probably all warrants a footnote or two, but I don't understand - things well enough right now to write it! --GPW - -.. versionchanged:: 3.3 - Operations in this module now raise :exc:`OSError` where :exc:`IOError` - was raised. - - -.. seealso:: - - `Open Sound System Programmer's Guide `_ - the official documentation for the OSS C API - - The module defines a large number of constants supplied by the OSS device - driver; see ```` on either Linux or FreeBSD for a listing. - -:mod:`ossaudiodev` defines the following variables and functions: - - -.. exception:: OSSAudioError - - This exception is raised on certain errors. The argument is a string describing - what went wrong. - - (If :mod:`ossaudiodev` receives an error from a system call such as - :c:func:`open`, :c:func:`write`, or :c:func:`ioctl`, it raises :exc:`OSError`. - Errors detected directly by :mod:`ossaudiodev` result in :exc:`OSSAudioError`.) - - (For backwards compatibility, the exception class is also available as - ``ossaudiodev.error``.) - - -.. function:: open(mode) - open(device, mode) - - Open an audio device and return an OSS audio device object. This object - supports many file-like methods, such as :meth:`read`, :meth:`write`, and - :meth:`fileno` (although there are subtle differences between conventional Unix - read/write semantics and those of OSS audio devices). It also supports a number - of audio-specific methods; see below for the complete list of methods. - - *device* is the audio device filename to use. If it is not specified, this - module first looks in the environment variable :envvar:`AUDIODEV` for a device - to use. If not found, it falls back to :file:`/dev/dsp`. - - *mode* is one of ``'r'`` for read-only (record) access, ``'w'`` for - write-only (playback) access and ``'rw'`` for both. Since many sound cards - only allow one process to have the recorder or player open at a time, it is a - good idea to open the device only for the activity needed. Further, some - sound cards are half-duplex: they can be opened for reading or writing, but - not both at once. - - Note the unusual calling syntax: the *first* argument is optional, and the - second is required. This is a historical artifact for compatibility with the - older :mod:`linuxaudiodev` module which :mod:`ossaudiodev` supersedes. - - .. XXX it might also be motivated - by my unfounded-but-still-possibly-true belief that the default - audio device varies unpredictably across operating systems. -GW - - -.. function:: openmixer([device]) - - Open a mixer device and return an OSS mixer device object. *device* is the - mixer device filename to use. If it is not specified, this module first looks - in the environment variable :envvar:`MIXERDEV` for a device to use. If not - found, it falls back to :file:`/dev/mixer`. - - -.. _ossaudio-device-objects: - -Audio Device Objects --------------------- - -Before you can write to or read from an audio device, you must call three -methods in the correct order: - -#. :meth:`setfmt` to set the output format - -#. :meth:`channels` to set the number of channels - -#. :meth:`speed` to set the sample rate - -Alternately, you can use the :meth:`setparameters` method to set all three audio -parameters at once. This is more convenient, but may not be as flexible in all -cases. - -The audio device objects returned by :func:`.open` define the following methods -and (read-only) attributes: - - -.. method:: oss_audio_device.close() - - Explicitly close the audio device. When you are done writing to or reading from - an audio device, you should explicitly close it. A closed device cannot be used - again. - - -.. method:: oss_audio_device.fileno() - - Return the file descriptor associated with the device. - - -.. method:: oss_audio_device.read(size) - - Read *size* bytes from the audio input and return them as a Python string. - Unlike most Unix device drivers, OSS audio devices in blocking mode (the - default) will block :func:`read` until the entire requested amount of data is - available. - - -.. method:: oss_audio_device.write(data) - - Write a :term:`bytes-like object` *data* to the audio device and return the - number of bytes written. If the audio device is in blocking mode (the - default), the entire data is always written (again, this is different from - usual Unix device semantics). If the device is in non-blocking mode, some - data may not be written---see :meth:`writeall`. - - .. versionchanged:: 3.5 - Writable :term:`bytes-like object` is now accepted. - - -.. method:: oss_audio_device.writeall(data) - - Write a :term:`bytes-like object` *data* to the audio device: waits until - the audio device is able to accept data, writes as much data as it will - accept, and repeats until *data* has been completely written. If the device - is in blocking mode (the default), this has the same effect as - :meth:`write`; :meth:`writeall` is only useful in non-blocking mode. Has - no return value, since the amount of data written is always equal to the - amount of data supplied. - - .. versionchanged:: 3.5 - Writable :term:`bytes-like object` is now accepted. - - -.. versionchanged:: 3.2 - Audio device objects also support the context management protocol, i.e. they can - be used in a :keyword:`with` statement. - - -The following methods each map to exactly one :c:func:`ioctl` system call. The -correspondence is obvious: for example, :meth:`setfmt` corresponds to the -``SNDCTL_DSP_SETFMT`` ioctl, and :meth:`sync` to ``SNDCTL_DSP_SYNC`` (this can -be useful when consulting the OSS documentation). If the underlying -:c:func:`ioctl` fails, they all raise :exc:`OSError`. - - -.. method:: oss_audio_device.nonblock() - - Put the device into non-blocking mode. Once in non-blocking mode, there is no - way to return it to blocking mode. - - -.. method:: oss_audio_device.getfmts() - - Return a bitmask of the audio output formats supported by the soundcard. Some - of the formats supported by OSS are: - - +-------------------------+---------------------------------------------+ - | Format | Description | - +=========================+=============================================+ - | :const:`AFMT_MU_LAW` | a logarithmic encoding (used by Sun ``.au`` | - | | files and :file:`/dev/audio`) | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_A_LAW` | a logarithmic encoding | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_IMA_ADPCM` | a 4:1 compressed format defined by the | - | | Interactive Multimedia Association | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_U8` | Unsigned, 8-bit audio | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_S16_LE` | Signed, 16-bit audio, little-endian byte | - | | order (as used by Intel processors) | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_S16_BE` | Signed, 16-bit audio, big-endian byte order | - | | (as used by 68k, PowerPC, Sparc) | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_S8` | Signed, 8 bit audio | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_U16_LE` | Unsigned, 16-bit little-endian audio | - +-------------------------+---------------------------------------------+ - | :const:`AFMT_U16_BE` | Unsigned, 16-bit big-endian audio | - +-------------------------+---------------------------------------------+ - - Consult the OSS documentation for a full list of audio formats, and note that - most devices support only a subset of these formats. Some older devices only - support :const:`AFMT_U8`; the most common format used today is - :const:`AFMT_S16_LE`. - - -.. method:: oss_audio_device.setfmt(format) - - Try to set the current audio format to *format*---see :meth:`getfmts` for a - list. Returns the audio format that the device was set to, which may not be the - requested format. May also be used to return the current audio format---do this - by passing an "audio format" of :const:`AFMT_QUERY`. - - -.. method:: oss_audio_device.channels(nchannels) - - Set the number of output channels to *nchannels*. A value of 1 indicates - monophonic sound, 2 stereophonic. Some devices may have more than 2 channels, - and some high-end devices may not support mono. Returns the number of channels - the device was set to. - - -.. method:: oss_audio_device.speed(samplerate) - - Try to set the audio sampling rate to *samplerate* samples per second. Returns - the rate actually set. Most sound devices don't support arbitrary sampling - rates. Common rates are: - - +-------+-------------------------------------------+ - | Rate | Description | - +=======+===========================================+ - | 8000 | default rate for :file:`/dev/audio` | - +-------+-------------------------------------------+ - | 11025 | speech recording | - +-------+-------------------------------------------+ - | 22050 | | - +-------+-------------------------------------------+ - | 44100 | CD quality audio (at 16 bits/sample and 2 | - | | channels) | - +-------+-------------------------------------------+ - | 96000 | DVD quality audio (at 24 bits/sample) | - +-------+-------------------------------------------+ - - -.. method:: oss_audio_device.sync() - - Wait until the sound device has played every byte in its buffer. (This happens - implicitly when the device is closed.) The OSS documentation recommends closing - and re-opening the device rather than using :meth:`sync`. - - -.. method:: oss_audio_device.reset() - - Immediately stop playing or recording and return the device to a state where it - can accept commands. The OSS documentation recommends closing and re-opening - the device after calling :meth:`reset`. - - -.. method:: oss_audio_device.post() - - Tell the driver that there is likely to be a pause in the output, making it - possible for the device to handle the pause more intelligently. You might use - this after playing a spot sound effect, before waiting for user input, or before - doing disk I/O. - -The following convenience methods combine several ioctls, or one ioctl and some -simple calculations. - - -.. method:: oss_audio_device.setparameters(format, nchannels, samplerate[, strict=False]) - - Set the key audio sampling parameters---sample format, number of channels, and - sampling rate---in one method call. *format*, *nchannels*, and *samplerate* - should be as specified in the :meth:`setfmt`, :meth:`channels`, and - :meth:`speed` methods. If *strict* is true, :meth:`setparameters` checks to - see if each parameter was actually set to the requested value, and raises - :exc:`OSSAudioError` if not. Returns a tuple (*format*, *nchannels*, - *samplerate*) indicating the parameter values that were actually set by the - device driver (i.e., the same as the return values of :meth:`setfmt`, - :meth:`channels`, and :meth:`speed`). - - For example, :: - - (fmt, channels, rate) = dsp.setparameters(fmt, channels, rate) - - is equivalent to :: - - fmt = dsp.setfmt(fmt) - channels = dsp.channels(channels) - rate = dsp.rate(rate) - - -.. method:: oss_audio_device.bufsize() - - Returns the size of the hardware buffer, in samples. - - -.. method:: oss_audio_device.obufcount() - - Returns the number of samples that are in the hardware buffer yet to be played. - - -.. method:: oss_audio_device.obuffree() - - Returns the number of samples that could be queued into the hardware buffer to - be played without blocking. - -Audio device objects also support several read-only attributes: - - -.. attribute:: oss_audio_device.closed - - Boolean indicating whether the device has been closed. - - -.. attribute:: oss_audio_device.name - - String containing the name of the device file. - - -.. attribute:: oss_audio_device.mode - - The I/O mode for the file, either ``"r"``, ``"rw"``, or ``"w"``. - - -.. _mixer-device-objects: - -Mixer Device Objects --------------------- - -The mixer object provides two file-like methods: - - -.. method:: oss_mixer_device.close() - - This method closes the open mixer device file. Any further attempts to use the - mixer after this file is closed will raise an :exc:`OSError`. - - -.. method:: oss_mixer_device.fileno() - - Returns the file handle number of the open mixer device file. - -.. versionchanged:: 3.2 - Mixer objects also support the context management protocol. - - -The remaining methods are specific to audio mixing: - - -.. method:: oss_mixer_device.controls() - - This method returns a bitmask specifying the available mixer controls ("Control" - being a specific mixable "channel", such as :const:`SOUND_MIXER_PCM` or - :const:`SOUND_MIXER_SYNTH`). This bitmask indicates a subset of all available - mixer controls---the :const:`SOUND_MIXER_\*` constants defined at module level. - To determine if, for example, the current mixer object supports a PCM mixer, use - the following Python code:: - - mixer=ossaudiodev.openmixer() - if mixer.controls() & (1 << ossaudiodev.SOUND_MIXER_PCM): - # PCM is supported - ... code ... - - For most purposes, the :const:`SOUND_MIXER_VOLUME` (master volume) and - :const:`SOUND_MIXER_PCM` controls should suffice---but code that uses the mixer - should be flexible when it comes to choosing mixer controls. On the Gravis - Ultrasound, for example, :const:`SOUND_MIXER_VOLUME` does not exist. - - -.. method:: oss_mixer_device.stereocontrols() - - Returns a bitmask indicating stereo mixer controls. If a bit is set, the - corresponding control is stereo; if it is unset, the control is either - monophonic or not supported by the mixer (use in combination with - :meth:`controls` to determine which). - - See the code example for the :meth:`controls` function for an example of getting - data from a bitmask. - - -.. method:: oss_mixer_device.reccontrols() - - Returns a bitmask specifying the mixer controls that may be used to record. See - the code example for :meth:`controls` for an example of reading from a bitmask. - - -.. method:: oss_mixer_device.get(control) - - Returns the volume of a given mixer control. The returned volume is a 2-tuple - ``(left_volume,right_volume)``. Volumes are specified as numbers from 0 - (silent) to 100 (full volume). If the control is monophonic, a 2-tuple is still - returned, but both volumes are the same. - - Raises :exc:`OSSAudioError` if an invalid control is specified, or - :exc:`OSError` if an unsupported control is specified. - - -.. method:: oss_mixer_device.set(control, (left, right)) - - Sets the volume for a given mixer control to ``(left,right)``. ``left`` and - ``right`` must be ints and between 0 (silent) and 100 (full volume). On - success, the new volume is returned as a 2-tuple. Note that this may not be - exactly the same as the volume specified, because of the limited resolution of - some soundcard's mixers. - - Raises :exc:`OSSAudioError` if an invalid mixer control was specified, or if the - specified volumes were out-of-range. - - -.. method:: oss_mixer_device.get_recsrc() - - This method returns a bitmask indicating which control(s) are currently being - used as a recording source. - - -.. method:: oss_mixer_device.set_recsrc(bitmask) - - Call this function to specify a recording source. Returns a bitmask indicating - the new recording source (or sources) if successful; raises :exc:`OSError` if an - invalid source was specified. To set the current recording source to the - microphone input:: - - mixer.setrecsrc (1 << ossaudiodev.SOUND_MIXER_MIC) diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst index 944963e1e1ae79e..7ecfd120db8d154 100644 --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -21,6 +21,7 @@ inherit from pure paths but also provide I/O operations. .. image:: pathlib-inheritance.png :align: center + :class: invert-in-dark-mode If you've never used this module before or just aren't sure which class is right for your task, :class:`Path` is most likely what you need. It instantiates @@ -87,6 +88,17 @@ Opening a file:: '#!/bin/bash\n' +Exceptions +---------- + +.. exception:: UnsupportedOperation + + An exception inheriting :exc:`NotImplementedError` that is raised when an + unsupported operation is called on a path object. + + .. versionadded:: 3.13 + + .. _pure-paths: Pure paths @@ -105,8 +117,9 @@ we also call *flavours*: PurePosixPath('setup.py') Each element of *pathsegments* can be either a string representing a - path segment, an object implementing the :class:`os.PathLike` interface - which returns a string, or another path object:: + path segment, or an object implementing the :class:`os.PathLike` interface + where the :meth:`~os.PathLike.__fspath__` method returns a string, + such as another path object:: >>> PurePath('foo', 'some/path', 'bar') PurePosixPath('foo/some/path/bar') @@ -118,16 +131,16 @@ we also call *flavours*: >>> PurePath() PurePosixPath('.') - When several absolute paths are given, the last is taken as an anchor - (mimicking :func:`os.path.join`'s behaviour):: + If a segment is an absolute path, all previous segments are ignored + (like :func:`os.path.join`):: >>> PurePath('/etc', '/usr', 'lib64') PurePosixPath('/usr/lib64') >>> PureWindowsPath('c:/Windows', 'd:bar') PureWindowsPath('d:bar') - However, in a Windows path, changing the local root doesn't discard the - previous drive setting:: + On Windows, the drive is not reset when a rooted relative path + segment (e.g., ``r'\foo'``) is encountered:: >>> PureWindowsPath('c:/Windows', '/Program Files') PureWindowsPath('c:/Program Files') @@ -186,7 +199,7 @@ these classes, since they don't provide any operation that does system calls. General properties ^^^^^^^^^^^^^^^^^^ -Paths are immutable and hashable. Paths of a same flavour are comparable +Paths are immutable and :term:`hashable`. Paths of a same flavour are comparable and orderable. These properties respect the flavour's case-folding semantics:: @@ -212,7 +225,10 @@ Paths of a different flavour compare unequal and cannot be ordered:: Operators ^^^^^^^^^ -The slash operator helps create child paths, similarly to :func:`os.path.join`:: +The slash operator helps create child paths, like :func:`os.path.join`. +If the argument is an absolute path, the previous path is ignored. +On Windows, the drive is not reset when the argument is a rooted +relative path (e.g., ``r'\foo'``):: >>> p = PurePath('/etc') >>> p @@ -222,6 +238,10 @@ The slash operator helps create child paths, similarly to :func:`os.path.join`:: >>> q = PurePath('bin') >>> '/usr' / q PurePosixPath('/usr/bin') + >>> p / '/an_absolute_path' + PurePosixPath('/an_absolute_path') + >>> PureWindowsPath('c:/Windows', '/Program Files') + PureWindowsPath('c:/Program Files') A path object can be used anywhere an object implementing :class:`os.PathLike` is accepted:: @@ -259,7 +279,7 @@ Accessing individual parts To access the individual "parts" (components) of a path, use the following property: -.. data:: PurePath.parts +.. attribute:: PurePath.parts A tuple giving access to the path's various components:: @@ -283,7 +303,14 @@ Methods and properties Pure paths provide the following methods and properties: -.. data:: PurePath.drive +.. attribute:: PurePath.pathmod + + The implementation of the :mod:`os.path` module used for low-level path + operations: either ``posixpath`` or ``ntpath``. + + .. versionadded:: 3.13 + +.. attribute:: PurePath.drive A string representing the drive letter or name, if any:: @@ -299,7 +326,7 @@ Pure paths provide the following methods and properties: >>> PureWindowsPath('//host/share/foo.txt').drive '\\\\host\\share' -.. data:: PurePath.root +.. attribute:: PurePath.root A string representing the (local or global) root, if any:: @@ -335,7 +362,7 @@ Pure paths provide the following methods and properties: an implementation-defined manner, although more than two leading slashes shall be treated as a single slash."* -.. data:: PurePath.anchor +.. attribute:: PurePath.anchor The concatenation of the drive and root:: @@ -349,7 +376,7 @@ Pure paths provide the following methods and properties: '\\\\host\\share\\' -.. data:: PurePath.parents +.. attribute:: PurePath.parents An immutable sequence providing access to the logical ancestors of the path:: @@ -365,7 +392,7 @@ Pure paths provide the following methods and properties: .. versionchanged:: 3.10 The parents sequence now supports :term:`slices ` and negative index values. -.. data:: PurePath.parent +.. attribute:: PurePath.parent The logical parent of the path:: @@ -394,7 +421,7 @@ Pure paths provide the following methods and properties: symlinks and eliminate ``".."`` components. -.. data:: PurePath.name +.. attribute:: PurePath.name A string representing the final path component, excluding the drive and root, if any:: @@ -410,9 +437,9 @@ Pure paths provide the following methods and properties: '' -.. data:: PurePath.suffix +.. attribute:: PurePath.suffix - The file extension of the final component, if any:: + The last dot-separated portion of the final component, if any:: >>> PurePosixPath('my/library/setup.py').suffix '.py' @@ -421,10 +448,11 @@ Pure paths provide the following methods and properties: >>> PurePosixPath('my/library').suffix '' + This is commonly called the file extension. -.. data:: PurePath.suffixes +.. attribute:: PurePath.suffixes - A list of the path's file extensions:: + A list of the path's suffixes, often called file extensions:: >>> PurePosixPath('my/library.tar.gar').suffixes ['.tar', '.gar'] @@ -434,7 +462,7 @@ Pure paths provide the following methods and properties: [] -.. data:: PurePath.stem +.. attribute:: PurePath.stem The final path component, without its suffix:: @@ -490,7 +518,7 @@ Pure paths provide the following methods and properties: True -.. method:: PurePath.is_relative_to(*other) +.. method:: PurePath.is_relative_to(other) Return whether or not this path is relative to the *other* path. @@ -502,6 +530,10 @@ Pure paths provide the following methods and properties: .. versionadded:: 3.9 + .. deprecated-removed:: 3.12 3.14 + + Passing additional arguments is deprecated; if supplied, they are joined + with *other*. .. method:: PurePath.is_reserved() @@ -518,10 +550,10 @@ Pure paths provide the following methods and properties: unintended effects. -.. method:: PurePath.joinpath(*other) +.. method:: PurePath.joinpath(*pathsegments) Calling this method is equivalent to combining the path with each of - the *other* arguments in turn:: + the given *pathsegments* in turn:: >>> PurePosixPath('/etc').joinpath('passwd') PurePosixPath('/etc/passwd') @@ -533,7 +565,7 @@ Pure paths provide the following methods and properties: PureWindowsPath('c:/Program Files') -.. method:: PurePath.match(pattern) +.. method:: PurePath.match(pattern, *, case_sensitive=None) Match this path against the provided glob-style pattern. Return ``True`` if matching is successful, ``False`` otherwise. @@ -556,6 +588,13 @@ Pure paths provide the following methods and properties: >>> PurePath('a/b.py').match('/*.py') False + The *pattern* may be another path object; this speeds up matching the same + pattern against multiple files:: + + >>> pattern = PurePath('*.py') + >>> PurePath('a/b.py').match(pattern) + True + As with other methods, case-sensitivity follows platform defaults:: >>> PurePosixPath('b.py').match('*.PY') @@ -563,8 +602,17 @@ Pure paths provide the following methods and properties: >>> PureWindowsPath('b.py').match('*.PY') True + Set *case_sensitive* to ``True`` or ``False`` to override this behaviour. + + .. versionchanged:: 3.12 + The *case_sensitive* parameter was added. + + .. versionchanged:: 3.13 + Support for the recursive wildcard "``**``" was added. In previous + versions, it acted like the non-recursive wildcard "``*``". + -.. method:: PurePath.relative_to(*other, walk_up=False) +.. method:: PurePath.relative_to(other, walk_up=False) Compute a version of this path relative to the path represented by *other*. If it's impossible, :exc:`ValueError` is raised:: @@ -581,7 +629,7 @@ Pure paths provide the following methods and properties: raise ValueError(error_message.format(str(self), str(formatted))) ValueError: '/etc/passwd' is not in the subpath of '/usr' OR one path is relative and the other is absolute. -When *walk_up* is False (the default), the path must start with *other*. + When *walk_up* is False (the default), the path must start with *other*. When the argument is True, ``..`` entries may be added to form the relative path. In all other cases, such as the paths referencing different drives, :exc:`ValueError` is raised.:: @@ -602,9 +650,13 @@ When *walk_up* is False (the default), the path must start with *other*. are present in the path; call :meth:`~Path.resolve` first if necessary to resolve symlinks. - .. versionadded:: 3.12 - The *walk_up* argument (old behavior is the same as ``walk_up=False``). + .. versionchanged:: 3.12 + The *walk_up* parameter was added (old behavior is the same as ``walk_up=False``). + + .. deprecated-removed:: 3.12 3.14 + Passing additional positional arguments is deprecated; if supplied, + they are joined with *other*. .. method:: PurePath.with_name(name) @@ -664,6 +716,30 @@ When *walk_up* is False (the default), the path must start with *other*. PureWindowsPath('README') +.. method:: PurePath.with_segments(*pathsegments) + + Create a new path object of the same type by combining the given + *pathsegments*. This method is called whenever a derivative path is created, + such as from :attr:`parent` and :meth:`relative_to`. Subclasses may + override this method to pass information to derivative paths, for example:: + + from pathlib import PurePosixPath + + class MyPath(PurePosixPath): + def __init__(self, *pathsegments, session_id): + super().__init__(*pathsegments) + self.session_id = session_id + + def with_segments(self, *pathsegments): + return type(self)(*pathsegments, session_id=self.session_id) + + etc = MyPath('/etc', session_id=42) + hosts = etc / 'hosts' + print(hosts.session_id) # 42 + + .. versionadded:: 3.12 + + .. _concrete-paths: @@ -695,6 +771,11 @@ calls on path objects. There are three ways to instantiate concrete paths: *pathsegments* is specified similarly to :class:`PurePath`. + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` on Windows. In previous versions, + :exc:`NotImplementedError` was raised instead. + + .. class:: WindowsPath(*pathsegments) A subclass of :class:`Path` and :class:`PureWindowsPath`, this class @@ -705,6 +786,11 @@ calls on path objects. There are three ways to instantiate concrete paths: *pathsegments* is specified similarly to :class:`PurePath`. + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` on non-Windows platforms. In previous + versions, :exc:`NotImplementedError` was raised instead. + + You can only instantiate the class flavour that corresponds to your system (allowing system calls on non-compatible path flavours could lead to bugs or failures in your application):: @@ -721,7 +807,7 @@ bugs or failures in your application):: File "", line 1, in File "pathlib.py", line 798, in __new__ % (cls.__name__,)) - NotImplementedError: cannot instantiate 'WindowsPath' on your system + UnsupportedOperation: cannot instantiate 'WindowsPath' on your system Methods @@ -764,6 +850,42 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.5 +.. classmethod:: Path.from_uri(uri) + + Return a new path object from parsing a 'file' URI conforming to + :rfc:`8089`. For example:: + + >>> p = Path.from_uri('file:///etc/hosts') + PosixPath('/etc/hosts') + + On Windows, DOS device and UNC paths may be parsed from URIs:: + + >>> p = Path.from_uri('file:///c:/windows') + WindowsPath('c:/windows') + >>> p = Path.from_uri('file://server/share') + WindowsPath('//server/share') + + Several variant forms are supported:: + + >>> p = Path.from_uri('file:////server/share') + WindowsPath('//server/share') + >>> p = Path.from_uri('file://///server/share') + WindowsPath('//server/share') + >>> p = Path.from_uri('file:c:/windows') + WindowsPath('c:/windows') + >>> p = Path.from_uri('file:/c|/windows') + WindowsPath('c:/windows') + + :exc:`ValueError` is raised if the URI does not start with ``file:``, or + the parsed path isn't absolute. + + :func:`os.fsdecode` is used to decode percent-escaped byte sequences, and + so file URIs are not portable across machines with different + :ref:`filesystem encodings `. + + .. versionadded:: 3.13 + + .. method:: Path.stat(*, follow_symlinks=True) Return a :class:`os.stat_result` object containing information about this path, like :func:`os.stat`. @@ -803,9 +925,14 @@ call fails (for example because the path doesn't exist). .. versionchanged:: 3.10 The *follow_symlinks* parameter was added. -.. method:: Path.exists() +.. method:: Path.exists(*, follow_symlinks=True) + + Return ``True`` if the path points to an existing file or directory. + + This method normally follows symlinks; to check if a symlink exists, add + the argument ``follow_symlinks=False``. - Whether the path points to an existing file or directory:: + :: >>> Path('.').exists() True @@ -816,10 +943,8 @@ call fails (for example because the path doesn't exist). >>> Path('nonexistentfile').exists() False - .. note:: - If the path points to a symlink, :meth:`exists` returns whether the - symlink *points to* an existing file or directory. - + .. versionchanged:: 3.12 + The *follow_symlinks* parameter was added. .. method:: Path.expanduser() @@ -836,7 +961,7 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.5 -.. method:: Path.glob(pattern) +.. method:: Path.glob(pattern, *, case_sensitive=None, follow_symlinks=None) Glob the given relative *pattern* in the directory represented by this path, yielding all matching files (of any kind):: @@ -861,35 +986,84 @@ call fails (for example because the path doesn't exist). Using the "``**``" pattern in large directory trees may consume an inordinate amount of time. + .. tip:: + Set *follow_symlinks* to ``True`` or ``False`` to improve performance + of recursive globbing. + + By default, or when the *case_sensitive* keyword-only argument is set to + ``None``, this method matches paths using platform-specific casing rules: + typically, case-sensitive on POSIX, and case-insensitive on Windows. + Set *case_sensitive* to ``True`` or ``False`` to override this behaviour. + + By default, or when the *follow_symlinks* keyword-only argument is set to + ``None``, this method follows symlinks except when expanding "``**``" + wildcards. Set *follow_symlinks* to ``True`` to always follow symlinks, or + ``False`` to treat all symlinks as files. + .. audit-event:: pathlib.Path.glob self,pattern pathlib.Path.glob .. versionchanged:: 3.11 Return only directories if *pattern* ends with a pathname components separator (:data:`~os.sep` or :data:`~os.altsep`). + .. versionchanged:: 3.12 + The *case_sensitive* parameter was added. + + .. versionchanged:: 3.13 + The *follow_symlinks* parameter was added. + + .. versionchanged:: 3.13 + Emits :exc:`FutureWarning` if the pattern ends with "``**``". In a + future Python release, patterns with this ending will match both files + and directories. Add a trailing slash to match only directories. + .. method:: Path.group() Return the name of the group owning the file. :exc:`KeyError` is raised if the file's gid isn't found in the system database. + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` if the :mod:`grp` module is not + available. In previous versions, :exc:`NotImplementedError` was raised. + -.. method:: Path.is_dir() +.. method:: Path.is_dir(*, follow_symlinks=True) - Return ``True`` if the path points to a directory (or a symbolic link - pointing to a directory), ``False`` if it points to another kind of file. + Return ``True`` if the path points to a directory, ``False`` if it points + to another kind of file. ``False`` is also returned if the path doesn't exist or is a broken symlink; other errors (such as permission errors) are propagated. + This method normally follows symlinks; to exclude symlinks to directories, + add the argument ``follow_symlinks=False``. -.. method:: Path.is_file() + .. versionchanged:: 3.13 + The *follow_symlinks* parameter was added. + + +.. method:: Path.is_file(*, follow_symlinks=True) - Return ``True`` if the path points to a regular file (or a symbolic link - pointing to a regular file), ``False`` if it points to another kind of file. + Return ``True`` if the path points to a regular file, ``False`` if it + points to another kind of file. ``False`` is also returned if the path doesn't exist or is a broken symlink; other errors (such as permission errors) are propagated. + This method normally follows symlinks; to exclude symlinks, add the + argument ``follow_symlinks=False``. + + .. versionchanged:: 3.13 + The *follow_symlinks* parameter was added. + + +.. method:: Path.is_junction() + + Return ``True`` if the path points to a junction, and ``False`` for any other + type of file. Currently only Windows supports junctions. + + .. versionadded:: 3.12 + .. method:: Path.is_mount() @@ -1122,6 +1296,10 @@ call fails (for example because the path doesn't exist). Return the name of the user owning the file. :exc:`KeyError` is raised if the file's uid isn't found in the system database. + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` if the :mod:`pwd` module is not + available. In previous versions, :exc:`NotImplementedError` was raised. + .. method:: Path.read_bytes() @@ -1136,7 +1314,7 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.5 -.. method:: Path.read_text(encoding=None, errors=None) +.. method:: Path.read_text(encoding=None, errors=None, newline=None) Return the decoded contents of the pointed-to file as a string:: @@ -1151,6 +1329,8 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.5 + .. versionchanged:: 3.13 + The *newline* parameter was added. .. method:: Path.readlink() @@ -1164,6 +1344,10 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.9 + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` if :func:`os.readlink` is not + available. In previous versions, :exc:`NotImplementedError` was raised. + .. method:: Path.rename(target) @@ -1235,19 +1419,24 @@ call fails (for example because the path doesn't exist). >>> p.resolve() PosixPath('/home/antoine/pathlib/setup.py') - If the path doesn't exist and *strict* is ``True``, :exc:`FileNotFoundError` - is raised. If *strict* is ``False``, the path is resolved as far as possible - and any remainder is appended without checking whether it exists. If an - infinite loop is encountered along the resolution path, :exc:`RuntimeError` - is raised. + If a path doesn't exist or a symlink loop is encountered, and *strict* is + ``True``, :exc:`OSError` is raised. If *strict* is ``False``, the path is + resolved as far as possible and any remainder is appended without checking + whether it exists. - .. versionadded:: 3.6 - The *strict* argument (pre-3.6 behavior is strict). + .. versionchanged:: 3.6 + The *strict* parameter was added (pre-3.6 behavior is strict). -.. method:: Path.rglob(pattern) + .. versionchanged:: 3.13 + Symlink loops are treated like other errors: :exc:`OSError` is raised in + strict mode, and no exception is raised in non-strict mode. In previous + versions, :exc:`RuntimeError` is raised no matter the value of *strict*. + +.. method:: Path.rglob(pattern, *, case_sensitive=None, follow_symlinks=None) Glob the given relative *pattern* recursively. This is like calling - :func:`Path.glob` with "``**/``" added in front of the *pattern*:: + :func:`Path.glob` with "``**/``" added in front of the *pattern*, where + *patterns* are the same as for :mod:`fnmatch`:: >>> sorted(Path().rglob("*.py")) [PosixPath('build/lib/pathlib.py'), @@ -1256,12 +1445,28 @@ call fails (for example because the path doesn't exist). PosixPath('setup.py'), PosixPath('test_pathlib.py')] + By default, or when the *case_sensitive* keyword-only argument is set to + ``None``, this method matches paths using platform-specific casing rules: + typically, case-sensitive on POSIX, and case-insensitive on Windows. + Set *case_sensitive* to ``True`` or ``False`` to override this behaviour. + + By default, or when the *follow_symlinks* keyword-only argument is set to + ``None``, this method follows symlinks except when expanding "``**``" + wildcards. Set *follow_symlinks* to ``True`` to always follow symlinks, or + ``False`` to treat all symlinks as files. + .. audit-event:: pathlib.Path.rglob self,pattern pathlib.Path.rglob .. versionchanged:: 3.11 Return only directories if *pattern* ends with a pathname components separator (:data:`~os.sep` or :data:`~os.altsep`). + .. versionchanged:: 3.12 + The *case_sensitive* parameter was added. + + .. versionchanged:: 3.13 + The *follow_symlinks* parameter was added. + .. method:: Path.rmdir() Remove this directory. The directory must be empty. @@ -1309,6 +1514,11 @@ call fails (for example because the path doesn't exist). The order of arguments (link, target) is the reverse of :func:`os.symlink`'s. + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` if :func:`os.symlink` is not + available. In previous versions, :exc:`NotImplementedError` was raised. + + .. method:: Path.hardlink_to(target) Make this path a hard link to the same file as *target*. @@ -1319,6 +1529,10 @@ call fails (for example because the path doesn't exist). .. versionadded:: 3.10 + .. versionchanged:: 3.13 + Raises :exc:`UnsupportedOperation` if :func:`os.link` is not + available. In previous versions, :exc:`NotImplementedError` was raised. + .. method:: Path.touch(mode=0o666, exist_ok=True) @@ -1422,11 +1636,11 @@ Below is a table mapping various :mod:`os` functions to their corresponding :meth:`Path.group` :func:`os.path.isabs` :meth:`PurePath.is_absolute` :func:`os.path.join` :func:`PurePath.joinpath` -:func:`os.path.basename` :data:`PurePath.name` -:func:`os.path.dirname` :data:`PurePath.parent` +:func:`os.path.basename` :attr:`PurePath.name` +:func:`os.path.dirname` :attr:`PurePath.parent` :func:`os.path.samefile` :meth:`Path.samefile` -:func:`os.path.splitext` :data:`PurePath.stem` and - :data:`PurePath.suffix` +:func:`os.path.splitext` :attr:`PurePath.stem` and + :attr:`PurePath.suffix` ==================================== ============================== .. rubric:: Footnotes diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst index 383c3adcf289d5f..bbc6aacc62aafa7 100644 --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -20,80 +20,107 @@ supports post-mortem debugging and can be called under program control. .. index:: single: Pdb (class in pdb) - module: bdb - module: cmd + pair: module; bdb + pair: module; cmd The debugger is extensible -- it is actually defined as the class :class:`Pdb`. This is currently undocumented but easily understood by reading the source. The extension interface uses the modules :mod:`bdb` and :mod:`cmd`. -The debugger's prompt is ``(Pdb)``. Typical usage to run a program under control -of the debugger is:: +.. seealso:: - >>> import pdb - >>> import mymodule - >>> pdb.run('mymodule.test()') - > (0)?() - (Pdb) continue - > (1)?() + Module :mod:`faulthandler` + Used to dump Python tracebacks explicitly, on a fault, after a timeout, + or on a user signal. + + Module :mod:`traceback` + Standard interface to extract, format and print stack traces of Python programs. + +The typical usage to break into the debugger is to insert:: + + import pdb; pdb.set_trace() + +Or:: + + breakpoint() + +at the location you want to break into the debugger, and then run the program. +You can then step through the code following this statement, and continue +running without the debugger using the :pdbcmd:`continue` command. + +.. versionadded:: 3.7 + The built-in :func:`breakpoint()`, when called with defaults, can be used + instead of ``import pdb; pdb.set_trace()``. + +:: + + def double(x): + breakpoint() + return x * 2 + val = 3 + print(f"{val} * 2 is {double(val)}") + +The debugger's prompt is ``(Pdb)``, which is the indicator that you are in debug mode:: + + > ...(3)double() + -> return x * 2 + (Pdb) p x + 3 (Pdb) continue - NameError: 'spam' - > (1)?() - (Pdb) + 3 * 2 is 6 .. versionchanged:: 3.3 Tab-completion via the :mod:`readline` module is available for commands and command arguments, e.g. the current global and local names are offered as arguments of the ``p`` command. -:file:`pdb.py` can also be invoked as a script to debug other scripts. For + +You can also invoke :mod:`pdb` from the command line to debug other scripts. For example:: - python3 -m pdb myscript.py + python -m pdb myscript.py -When invoked as a script, pdb will automatically enter post-mortem debugging if +When invoked as a module, pdb will automatically enter post-mortem debugging if the program being debugged exits abnormally. After post-mortem debugging (or after normal exit of the program), pdb will restart the program. Automatic restarting preserves pdb's state (such as breakpoints) and in most cases is more useful than quitting the debugger upon program's exit. .. versionadded:: 3.2 - :file:`pdb.py` now accepts a ``-c`` option that executes commands as if given + ``-c`` option is introduced to execute commands as if given in a :file:`.pdbrc` file, see :ref:`debugger-commands`. .. versionadded:: 3.7 - :file:`pdb.py` now accepts a ``-m`` option that execute modules similar to the way - ``python3 -m`` does. As with a script, the debugger will pause execution just + ``-m`` option is introduced to execute modules similar to the way + ``python -m`` does. As with a script, the debugger will pause execution just before the first line of the module. +Typical usage to execute a statement under control of the debugger is:: -The typical usage to break into the debugger is to insert:: - - import pdb; pdb.set_trace() - -at the location you want to break into the debugger, and then run the program. -You can then step through the code following this statement, and continue -running without the debugger using the :pdbcmd:`continue` command. - -.. versionadded:: 3.7 - The built-in :func:`breakpoint()`, when called with defaults, can be used - instead of ``import pdb; pdb.set_trace()``. + >>> import pdb + >>> def f(x): + ... print(1 / x) + >>> pdb.run("f(2)") + > (1)() + (Pdb) continue + 0.5 + >>> The typical usage to inspect a crashed program is:: >>> import pdb - >>> import mymodule - >>> mymodule.test() + >>> def f(x): + ... print(1 / x) + ... + >>> f(0) Traceback (most recent call last): File "", line 1, in - File "./mymodule.py", line 4, in test - test2() - File "./mymodule.py", line 3, in test2 - print(spam) - NameError: spam + File "", line 2, in f + ZeroDivisionError: division by zero >>> pdb.pm() - > ./mymodule.py(3)test2() - -> print(spam) + > (2)f() + (Pdb) p x + 0 (Pdb) @@ -116,7 +143,7 @@ slightly different way: Evaluate the *expression* (given as a string or a code object) under debugger control. When :func:`runeval` returns, it returns the value of the - expression. Otherwise this function is similar to :func:`run`. + *expression*. Otherwise this function is similar to :func:`run`. .. function:: runcall(function, *args, **kwds) @@ -148,8 +175,8 @@ slightly different way: .. function:: pm() - Enter post-mortem debugging of the traceback found in - :data:`sys.last_traceback`. + Enter post-mortem debugging of the exception found in + :data:`sys.last_exc`. The ``run*`` functions and :func:`set_trace` are aliases for instantiating the @@ -169,7 +196,7 @@ access further features, you have to do this yourself: that matches one of these patterns. [1]_ By default, Pdb sets a handler for the SIGINT signal (which is sent when the - user presses :kbd:`Ctrl-C` on the console) when you give a ``continue`` command. + user presses :kbd:`Ctrl-C` on the console) when you give a :pdbcmd:`continue` command. This allows you to break into the debugger again by pressing :kbd:`Ctrl-C`. If you want Pdb not to touch the SIGINT handler, set *nosigint* to true. @@ -225,6 +252,10 @@ change a variable or call a function. When an exception occurs in such a statement, the exception name is printed but the debugger's state is not changed. +.. versionchanged:: 3.13 + Expressions/Statements whose prefix is a pdb command are now correctly + identified and executed. + The debugger supports :ref:`aliases `. Aliases can have parameters which allows one a certain level of adaptability to the context under examination. @@ -236,6 +267,21 @@ the commands; the input is split at the first ``;;`` pair, even if it is in the middle of a quoted string. A workaround for strings with double semicolons is to use implicit string concatenation ``';'';'`` or ``";"";"``. +To set a temporary global variable, use a *convenience variable*. A *convenience +variable* is a variable whose name starts with ``$``. For example, ``$foo = 1`` +sets a global variable ``$foo`` which you can use in the debugger session. The +*convenience variables* are cleared when the program resumes execution so it's +less likely to interfere with your program compared to using normal variables +like ``foo = 1``. + +There are three preset *convenience variables*: + +* ``$_frame``: the current frame you are debugging +* ``$_retval``: the return value if the frame is returning +* ``$_exception``: the exception if the frame is raising an exception + +.. versionadded:: 3.12 + .. index:: pair: .pdbrc; file triple: debugger; configuration; file @@ -266,7 +312,7 @@ can be overridden by the local file. .. pdbcommand:: w(here) - Print a stack trace, with the most recent frame at the bottom. An arrow + Print a stack trace, with the most recent frame at the bottom. An arrow (``>``) indicates the current frame, which determines the context of most commands. .. pdbcommand:: d(own) [count] @@ -306,22 +352,22 @@ can be overridden by the local file. With a space separated list of breakpoint numbers, clear those breakpoints. Without argument, clear all breaks (but first ask confirmation). -.. pdbcommand:: disable [bpnumber ...] +.. pdbcommand:: disable bpnumber [bpnumber ...] Disable the breakpoints given as a space separated list of breakpoint numbers. Disabling a breakpoint means it cannot cause the program to stop execution, but unlike clearing a breakpoint, it remains in the list of breakpoints and can be (re-)enabled. -.. pdbcommand:: enable [bpnumber ...] +.. pdbcommand:: enable bpnumber [bpnumber ...] Enable the breakpoints specified. .. pdbcommand:: ignore bpnumber [count] - Set the ignore count for the given breakpoint number. If count is omitted, + Set the ignore count for the given breakpoint number. If *count* is omitted, the ignore count is set to 0. A breakpoint becomes active when the ignore - count is zero. When non-zero, the count is decremented each time the + count is zero. When non-zero, the *count* is decremented each time the breakpoint is reached and the breakpoint is not disabled and any associated condition evaluates to true. @@ -360,7 +406,7 @@ can be overridden by the local file. breakpoint—which could have its own command list, leading to ambiguities about which list to execute. - If you use the 'silent' command in the command list, the usual message about + If you use the ``silent`` command in the command list, the usual message about stopping at a breakpoint is not printed. This may be desirable for breakpoints that are to print a specific message and then continue. If none of the other commands print anything, you see no sign that the breakpoint was reached. @@ -383,8 +429,8 @@ can be overridden by the local file. Without argument, continue execution until the line with a number greater than the current one is reached. - With a line number, continue execution until a line with a number greater or - equal to that is reached. In both cases, also stop when the current frame + With *lineno*, continue execution until a line with a number greater or + equal to *lineno* is reached. In both cases, also stop when the current frame returns. .. versionchanged:: 3.2 @@ -433,11 +479,11 @@ can be overridden by the local file. .. pdbcommand:: a(rgs) - Print the argument list of the current function. + Print the arguments of the current function and their current values. .. pdbcommand:: p expression - Evaluate the *expression* in the current context and print its value. + Evaluate *expression* in the current context and print its value. .. note:: @@ -447,32 +493,76 @@ can be overridden by the local file. .. pdbcommand:: pp expression - Like the :pdbcmd:`p` command, except the value of the expression is + Like the :pdbcmd:`p` command, except the value of *expression* is pretty-printed using the :mod:`pprint` module. .. pdbcommand:: whatis expression - Print the type of the *expression*. + Print the type of *expression*. .. pdbcommand:: source expression - Try to get source code for the given object and display it. + Try to get source code of *expression* and display it. .. versionadded:: 3.2 .. pdbcommand:: display [expression] - Display the value of the expression if it changed, each time execution stops + Display the value of *expression* if it changed, each time execution stops in the current frame. - Without expression, list all display expressions for the current frame. + Without *expression*, list all display expressions for the current frame. + + .. note:: + + Display evaluates *expression* and compares to the result of the previous + evaluation of *expression*, so when the result is mutable, display may not + be able to pick up the changes. + + Example:: + + lst = [] + breakpoint() + pass + lst.append(1) + print(lst) + + Display won't realize ``lst`` has been changed because the result of evaluation + is modified in place by ``lst.append(1)`` before being compared:: + + > example.py(3)() + -> pass + (Pdb) display lst + display lst: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + (Pdb) + + You can do some tricks with copy mechanism to make it work:: + + > example.py(3)() + -> pass + (Pdb) display lst[:] + display lst[:]: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + display lst[:]: [1] [old: []] + (Pdb) .. versionadded:: 3.2 .. pdbcommand:: undisplay [expression] - Do not display the expression any more in the current frame. Without - expression, clear all display expressions for the current frame. + Do not display *expression* anymore in the current frame. Without + *expression*, clear all display expressions for the current frame. .. versionadded:: 3.2 @@ -488,10 +578,10 @@ can be overridden by the local file. .. pdbcommand:: alias [name [command]] - Create an alias called *name* that executes *command*. The command must + Create an alias called *name* that executes *command*. The *command* must *not* be enclosed in quotes. Replaceable parameters can be indicated by - ``%1``, ``%2``, and so on, while ``%*`` is replaced by all the parameters. - If no command is given, the current alias for *name* is shown. If no + ``%1``, ``%2``, ... and ``%9``, while ``%*`` is replaced by all the parameters. + If *command* is omitted, the current alias for *name* is shown. If no arguments are given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at @@ -504,21 +594,29 @@ can be overridden by the local file. :file:`.pdbrc` file):: # Print instance variables (usage "pi classInst") - alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}") # Print instance variables in self alias ps pi self .. pdbcommand:: unalias name - Delete the specified alias. + Delete the specified alias *name*. .. pdbcommand:: ! statement Execute the (one-line) *statement* in the context of the current stack frame. The exclamation point can be omitted unless the first word of the statement - resembles a debugger command. To set a global variable, you can prefix the - assignment command with a :keyword:`global` statement on the same line, - e.g.:: + resembles a debugger command, e.g.: + + .. code-block:: none + + (Pdb) ! n=42 + (Pdb) + + To set a global variable, you can prefix the assignment command with a + :keyword:`global` statement on the same line, e.g.: + + .. code-block:: none (Pdb) global list_options; list_options = ['-l'] (Pdb) @@ -526,7 +624,7 @@ can be overridden by the local file. .. pdbcommand:: run [args ...] restart [args ...] - Restart the debugged Python program. If an argument is supplied, it is split + Restart the debugged Python program. If *args* is supplied, it is split with :mod:`shlex` and the result is used as the new :data:`sys.argv`. History, breakpoints, actions and debugger options are preserved. :pdbcmd:`restart` is an alias for :pdbcmd:`run`. @@ -537,13 +635,62 @@ can be overridden by the local file. .. pdbcommand:: debug code - Enter a recursive debugger that steps through the code - argument (which is an arbitrary expression or statement to be + Enter a recursive debugger that steps through *code* + (which is an arbitrary expression or statement to be executed in the current environment). .. pdbcommand:: retval - Print the return value for the last return of a function. + Print the return value for the last return of the current function. + +.. pdbcommand:: exceptions [excnumber] + + List or jump between chained exceptions. + + When using ``pdb.pm()`` or ``Pdb.post_mortem(...)`` with a chained exception + instead of a traceback, it allows the user to move between the + chained exceptions using ``exceptions`` command to list exceptions, and + ``exception `` to switch to that exception. + + + Example:: + + def out(): + try: + middle() + except Exception as e: + raise ValueError("reraise middle() error") from e + + def middle(): + try: + return inner(0) + except Exception as e: + raise ValueError("Middle fail") + + def inner(x): + 1 / x + + out() + + calling ``pdb.pm()`` will allow to move between exceptions:: + + > example.py(5)out() + -> raise ValueError("reraise middle() error") from e + + (Pdb) exceptions + 0 ZeroDivisionError('division by zero') + 1 ValueError('Middle fail') + > 2 ValueError('reraise middle() error') + + (Pdb) exceptions 0 + > example.py(16)inner() + -> 1 / x + + (Pdb) up + > example.py(10)middle() + -> return inner(0) + + .. versionadded:: 3.13 .. rubric:: Footnotes diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst index 79476b04cd914da..93387fb0b450382 100644 --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -125,7 +125,7 @@ Data stream format The data format used by :mod:`pickle` is Python-specific. This has the advantage that there are no restrictions imposed by external standards such as -JSON or XDR (which can't represent pointer sharing); however it means that +JSON (which can't represent pointer sharing); however it means that non-Python programs may not be able to reconstruct pickled Python objects. By default, the :mod:`pickle` data format uses a relatively compact binary @@ -494,7 +494,8 @@ What can be pickled and unpickled? The following types can be pickled: -* ``None``, ``True``, and ``False``; +* built-in constants (``None``, ``True``, ``False``, ``Ellipsis``, and + ``NotImplemented``); * integers, floating-point numbers, complex numbers; diff --git a/Doc/library/pickletools.rst b/Doc/library/pickletools.rst index 480f4a6d3208156..41930f8cbe8412d 100644 --- a/Doc/library/pickletools.rst +++ b/Doc/library/pickletools.rst @@ -17,6 +17,8 @@ are useful for Python core developers who are working on the :mod:`pickle`; ordinary users of the :mod:`pickle` module probably won't find the :mod:`pickletools` module relevant. +.. _pickletools-cli: + Command line usage ------------------ @@ -51,24 +53,24 @@ Command line options .. program:: pickletools -.. cmdoption:: -a, --annotate +.. option:: -a, --annotate Annotate each line with a short opcode description. -.. cmdoption:: -o, --output= +.. option:: -o, --output= Name of a file where the output should be written. -.. cmdoption:: -l, --indentlevel= +.. option:: -l, --indentlevel= The number of blanks by which to indent a new MARK level. -.. cmdoption:: -m, --memo +.. option:: -m, --memo When multiple objects are disassembled, preserve memo between disassemblies. -.. cmdoption:: -p, --preamble= +.. option:: -p, --preamble= When more than one pickle file are specified, print given preamble before each disassembly. diff --git a/Doc/library/pipes.rst b/Doc/library/pipes.rst deleted file mode 100644 index 471ae0dbc9768a1..000000000000000 --- a/Doc/library/pipes.rst +++ /dev/null @@ -1,103 +0,0 @@ -:mod:`pipes` --- Interface to shell pipelines -============================================= - -.. module:: pipes - :platform: Unix - :synopsis: A Python interface to Unix shell pipelines. - :deprecated: - -.. sectionauthor:: Moshe Zadka - -**Source code:** :source:`Lib/pipes.py` - -.. deprecated-removed:: 3.11 3.13 - The :mod:`pipes` module is deprecated - (see :pep:`PEP 594 <594#pipes>` for details). - Please use the :mod:`subprocess` module instead. - --------------- - -The :mod:`pipes` module defines a class to abstract the concept of a *pipeline* ---- a sequence of converters from one file to another. - -Because the module uses :program:`/bin/sh` command lines, a POSIX or compatible -shell for :func:`os.system` and :func:`os.popen` is required. - -.. availability:: Unix, not VxWorks. - -The :mod:`pipes` module defines the following class: - - -.. class:: Template() - - An abstraction of a pipeline. - -Example:: - - >>> import pipes - >>> t = pipes.Template() - >>> t.append('tr a-z A-Z', '--') - >>> f = t.open('pipefile', 'w') - >>> f.write('hello world') - >>> f.close() - >>> open('pipefile').read() - 'HELLO WORLD' - - -.. _template-objects: - -Template Objects ----------------- - -Template objects following methods: - - -.. method:: Template.reset() - - Restore a pipeline template to its initial state. - - -.. method:: Template.clone() - - Return a new, equivalent, pipeline template. - - -.. method:: Template.debug(flag) - - If *flag* is true, turn debugging on. Otherwise, turn debugging off. When - debugging is on, commands to be executed are printed, and the shell is given - ``set -x`` command to be more verbose. - - -.. method:: Template.append(cmd, kind) - - Append a new action at the end. The *cmd* variable must be a valid bourne shell - command. The *kind* variable consists of two letters. - - The first letter can be either of ``'-'`` (which means the command reads its - standard input), ``'f'`` (which means the commands reads a given file on the - command line) or ``'.'`` (which means the commands reads no input, and hence - must be first.) - - Similarly, the second letter can be either of ``'-'`` (which means the command - writes to standard output), ``'f'`` (which means the command writes a file on - the command line) or ``'.'`` (which means the command does not write anything, - and hence must be last.) - - -.. method:: Template.prepend(cmd, kind) - - Add a new action at the beginning. See :meth:`append` for explanations of the - arguments. - - -.. method:: Template.open(file, mode) - - Return a file-like object, open to *file*, but read from or written to by the - pipeline. Note that only one of ``'r'``, ``'w'`` may be given. - - -.. method:: Template.copy(infile, outfile) - - Copy *infile* to *outfile* through the pipe. - diff --git a/Doc/library/pkgutil.rst b/Doc/library/pkgutil.rst index 788a02dcb8922f0..891a867d1ceb68d 100644 --- a/Doc/library/pkgutil.rst +++ b/Doc/library/pkgutil.rst @@ -25,9 +25,9 @@ support. from pkgutil import extend_path __path__ = extend_path(__path__, __name__) - This will add to the package's ``__path__`` all subdirectories of directories - on :data:`sys.path` named after the package. This is useful if one wants to - distribute different parts of a single logical package as multiple + For each directory on :data:`sys.path` that has a subdirectory that matches the + package name, add the subdirectory to the package's :attr:`__path__`. This is useful + if one wants to distribute different parts of a single logical package as multiple directories. It also looks for :file:`\*.pkg` files beginning where ``*`` matches the @@ -48,33 +48,6 @@ support. this function to raise an exception (in line with :func:`os.path.isdir` behavior). - -.. class:: ImpImporter(dirname=None) - - :pep:`302` Finder that wraps Python's "classic" import algorithm. - - If *dirname* is a string, a :pep:`302` finder is created that searches that - directory. If *dirname* is ``None``, a :pep:`302` finder is created that - searches the current :data:`sys.path`, plus any modules that are frozen or - built-in. - - Note that :class:`ImpImporter` does not currently support being used by - placement on :data:`sys.meta_path`. - - .. deprecated:: 3.3 - This emulation is no longer needed, as the standard import mechanism - is now fully :pep:`302` compliant and available in :mod:`importlib`. - - -.. class:: ImpLoader(fullname, file, filename, etc) - - :term:`Loader ` that wraps Python's "classic" import algorithm. - - .. deprecated:: 3.3 - This emulation is no longer needed, as the standard import mechanism - is now fully :pep:`302` compliant and available in :mod:`importlib`. - - .. function:: find_loader(fullname) Retrieve a module :term:`loader` for the given *fullname*. @@ -82,7 +55,7 @@ support. This is a backwards compatibility wrapper around :func:`importlib.util.find_spec` that converts most failures to :exc:`ImportError` and only returns the loader rather than the full - :class:`ModuleSpec`. + :class:`importlib.machinery.ModuleSpec`. .. versionchanged:: 3.3 Updated to be based directly on :mod:`importlib` rather than relying @@ -91,6 +64,10 @@ support. .. versionchanged:: 3.4 Updated to be based on :pep:`451` + .. deprecated-removed:: 3.12 3.14 + Use :func:`importlib.util.find_spec` instead. + + .. function:: get_importer(path_item) Retrieve a :term:`finder` for the given *path_item*. @@ -123,6 +100,9 @@ support. .. versionchanged:: 3.4 Updated to be based on :pep:`451` + .. deprecated-removed:: 3.12 3.14 + Use :func:`importlib.util.find_spec` instead. + .. function:: iter_importers(fullname='') diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst index a0c9f63ab9f9573..ec2a7ebd5d6e0b1 100644 --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -46,7 +46,7 @@ Cross Platform universal files containing multiple architectures. To get at the "64-bitness" of the current interpreter, it is more - reliable to query the :attr:`sys.maxsize` attribute:: + reliable to query the :data:`sys.maxsize` attribute:: is_64bits = sys.maxsize > 2**32 @@ -63,7 +63,7 @@ Cross Platform string is returned if the value cannot be determined. -.. function:: platform(aliased=0, terse=0) +.. function:: platform(aliased=False, terse=False) Returns a single string identifying the underlying platform with as much useful information as possible. diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst index 5ded9661f080143..732ef3536863cc8 100644 --- a/Doc/library/plistlib.rst +++ b/Doc/library/plistlib.rst @@ -46,7 +46,7 @@ or :class:`datetime.datetime` objects. .. seealso:: - `PList manual page `_ + `PList manual page `_ Apple's documentation of the file format. @@ -159,6 +159,9 @@ Examples Generating a plist:: + import datetime + import plistlib + pl = dict( aString = "Doodah", aList = ["A", "B", 12, 32.1, [1, 2, 3]], @@ -172,13 +175,19 @@ Generating a plist:: ), someData = b"", someMoreData = b"" * 10, - aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())), + aDate = datetime.datetime.now() ) - with open(fileName, 'wb') as fp: - dump(pl, fp) + print(plistlib.dumps(pl).decode()) Parsing a plist:: - with open(fileName, 'rb') as fp: - pl = load(fp) - print(pl["aKey"]) + import plistlib + + plist = b""" + + foo + bar + + """ + pl = plistlib.loads(plist) + print(pl["foo"]) diff --git a/Doc/library/poplib.rst b/Doc/library/poplib.rst index e22a2e1455e7fcd..943eb21f6eec027 100644 --- a/Doc/library/poplib.rst +++ b/Doc/library/poplib.rst @@ -53,7 +53,7 @@ The :mod:`poplib` module provides two classes: If the *timeout* parameter is set to be zero, it will raise a :class:`ValueError` to prevent the creation of a non-blocking socket. -.. class:: POP3_SSL(host, port=POP3_SSL_PORT, keyfile=None, certfile=None, timeout=None, context=None) +.. class:: POP3_SSL(host, port=POP3_SSL_PORT, *, timeout=None, context=None) This is a subclass of :class:`POP3` that connects to the server over an SSL encrypted socket. If *port* is not specified, 995, the standard POP3-over-SSL @@ -63,10 +63,6 @@ The :mod:`poplib` module provides two classes: single (potentially long-lived) structure. Please read :ref:`ssl-security` for best practices. - *keyfile* and *certfile* are a legacy alternative to *context* - they can - point to PEM-formatted private key and certificate chain files, - respectively, for the SSL connection. - .. audit-event:: poplib.connect self,host,port poplib.POP3_SSL .. audit-event:: poplib.putline self,line poplib.POP3_SSL @@ -81,19 +77,15 @@ The :mod:`poplib` module provides two classes: .. versionchanged:: 3.4 The class now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - - .. deprecated:: 3.6 - - *keyfile* and *certfile* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + :const:`ssl.HAS_SNI`). .. versionchanged:: 3.9 If the *timeout* parameter is set to be zero, it will raise a :class:`ValueError` to prevent the creation of a non-blocking socket. + .. versionchanged:: 3.12 + The deprecated *keyfile* and *certfile* parameters have been removed. + One exception is defined as an attribute of the :mod:`poplib` module: @@ -156,7 +148,7 @@ A :class:`POP3` instance has the following methods: .. method:: POP3.pass_(password) Send password, response includes message count and mailbox size. Note: the - mailbox on the server is locked until :meth:`~poplib.quit` is called. + mailbox on the server is locked until :meth:`~POP3.quit` is called. .. method:: POP3.apop(user, secret) @@ -248,7 +240,7 @@ A :class:`POP3` instance has the following methods: This method supports hostname checking via :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). + :const:`ssl.HAS_SNI`). .. versionadded:: 3.4 diff --git a/Doc/library/posix.rst b/Doc/library/posix.rst index ec04b0dcfc162f6..5871574b442667e 100644 --- a/Doc/library/posix.rst +++ b/Doc/library/posix.rst @@ -11,7 +11,9 @@ This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly disguised Unix interface). -.. index:: module: os +.. availability:: Unix. + +.. index:: pair: module; os **Do not import this module directly.** Instead, import the module :mod:`os`, which provides a *portable* version of this interface. On Unix, the :mod:`os` diff --git a/Doc/library/pprint.rst b/Doc/library/pprint.rst index 4e29192311fc211..e883acd67d6c72f 100644 --- a/Doc/library/pprint.rst +++ b/Doc/library/pprint.rst @@ -45,7 +45,7 @@ The :mod:`pprint` module defines one class: several keyword parameters. *stream* (default ``sys.stdout``) is a :term:`file-like object` to - which the output will be written by calling its :meth:`write` method. + which the output will be written by calling its :meth:`!write` method. If both *stream* and ``sys.stdout`` are ``None``, then :meth:`~PrettyPrinter.pprint` silently returns. @@ -159,7 +159,7 @@ The :mod:`pprint` module defines one class: .. function:: isreadable(object) - .. index:: builtin: eval + .. index:: pair: built-in function; eval Determine if the formatted representation of *object* is "readable", or can be used to reconstruct the value using :func:`eval`. This always returns ``False`` @@ -218,7 +218,7 @@ created. .. method:: PrettyPrinter.isreadable(object) - .. index:: builtin: eval + .. index:: pair: built-in function; eval Determine if the formatted representation of the object is "readable," or can be used to reconstruct the value using :func:`eval`. Note that this returns diff --git a/Doc/library/profile.rst b/Doc/library/profile.rst index 2d95096f4cb83a8..cc059b66fcb84b5 100644 --- a/Doc/library/profile.rst +++ b/Doc/library/profile.rst @@ -82,8 +82,8 @@ the following:: The first line indicates that 214 calls were monitored. Of those calls, 207 were :dfn:`primitive`, meaning that the call was not induced via recursion. The -next line: ``Ordered by: cumulative name``, indicates that the text string in the -far right column was used to sort the output. The column headings include: +next line: ``Ordered by: cumulative time`` indicates the output is sorted +by the ``cumtime`` values. The column headings include: ncalls for the number of calls. @@ -121,6 +121,8 @@ results to a file by specifying a filename to the :func:`run` function:: The :class:`pstats.Stats` class reads profile results from a file and formats them in various ways. +.. _profile-cli: + The files :mod:`cProfile` and :mod:`profile` can also be invoked as a script to profile another script. For example:: @@ -133,11 +135,11 @@ the output by. This only applies when ``-o`` is not supplied. ``-m`` specifies that a module is being profiled instead of a script. - .. versionadded:: 3.7 - Added the ``-m`` option to :mod:`cProfile`. +.. versionadded:: 3.7 + Added the ``-m`` option to :mod:`cProfile`. - .. versionadded:: 3.8 - Added the ``-m`` option to :mod:`profile`. +.. versionadded:: 3.8 + Added the ``-m`` option to :mod:`profile`. The :mod:`pstats` module's :class:`~pstats.Stats` class has a variety of methods for manipulating and printing the data saved into a profile results file:: @@ -274,7 +276,7 @@ functions: with cProfile.Profile() as pr: # ... do something ... - pr.print_stats() + pr.print_stats() .. versionchanged:: 3.8 Added context manager support. diff --git a/Doc/library/pty.rst b/Doc/library/pty.rst index 7f4da41e93802dd..af9378464edb9f6 100644 --- a/Doc/library/pty.rst +++ b/Doc/library/pty.rst @@ -16,6 +16,8 @@ The :mod:`pty` module defines operations for handling the pseudo-terminal concept: starting another process and being able to write to and read from its controlling terminal programmatically. +.. availability:: Unix. + Pseudo-terminal handling is highly platform dependent. This code is mainly tested on Linux, FreeBSD, and macOS (it is supposed to work on other POSIX platforms but it's not been thoroughly tested). @@ -71,7 +73,7 @@ The :mod:`pty` module defines the following functions: Return the exit status value from :func:`os.waitpid` on the child process. - :func:`waitstatus_to_exitcode` can be used to convert the exit status into + :func:`os.waitstatus_to_exitcode` can be used to convert the exit status into an exit code. .. audit-event:: pty.spawn argv pty.spawn diff --git a/Doc/library/pwd.rst b/Doc/library/pwd.rst index 98f3c45e29cbcbc..dbe68cd14ec4d4e 100644 --- a/Doc/library/pwd.rst +++ b/Doc/library/pwd.rst @@ -10,7 +10,7 @@ This module provides access to the Unix user account and password database. It is available on all Unix versions. -.. include:: ../includes/wasm-notavail.rst +.. availability:: Unix, not Emscripten, not WASI. Password database entries are reported as a tuple-like object, whose attributes correspond to the members of the ``passwd`` structure (Attribute field below, @@ -39,16 +39,13 @@ raised if the entry asked for cannot be found. .. note:: - .. index:: module: crypt - In traditional Unix the field ``pw_passwd`` usually contains a password - encrypted with a DES derived algorithm (see module :mod:`crypt`). However most + encrypted with a DES derived algorithm. However most modern unices use a so-called *shadow password* system. On those unices the *pw_passwd* field only contains an asterisk (``'*'``) or the letter ``'x'`` where the encrypted password is stored in a file :file:`/etc/shadow` which is not world readable. Whether the *pw_passwd* field contains anything useful is - system-dependent. If available, the :mod:`spwd` module should be used where - access to the encrypted password is required. + system-dependent. It defines the following items: @@ -72,7 +69,3 @@ It defines the following items: Module :mod:`grp` An interface to the group database, similar to this. - - Module :mod:`spwd` - An interface to the shadow password database, similar to this. - diff --git a/Doc/library/py_compile.rst b/Doc/library/py_compile.rst index 69b93a3bdfcb265..38c416f9ad03051 100644 --- a/Doc/library/py_compile.rst +++ b/Doc/library/py_compile.rst @@ -125,6 +125,7 @@ byte-code cache files in the directory containing the source code. This option is useful when the ``.pycs`` are kept up to date by some system external to Python like a build system. +.. _py_compile-cli: Command-Line Interface ---------------------- @@ -138,13 +139,13 @@ not be compiled. .. program:: python -m py_compile -.. cmdoption:: ... - - +.. option:: ... + - Positional arguments are files to compile. If ``-`` is the only parameter, the list of files is taken from standard input. -.. cmdoption:: -q, --quiet +.. option:: -q, --quiet Suppress errors output. diff --git a/Doc/library/pydoc.rst b/Doc/library/pydoc.rst index 94daf4a58f9c24b..03e0915bf6d1350 100644 --- a/Doc/library/pydoc.rst +++ b/Doc/library/pydoc.rst @@ -33,7 +33,7 @@ as text on the console. The same text documentation can also be viewed from outside the Python interpreter by running :program:`pydoc` as a script at the operating system's command prompt. For example, running :: - pydoc sys + python -m pydoc sys at a shell prompt will display documentation on the :mod:`sys` module, in a style similar to the manual pages shown by the Unix :program:`man` command. The @@ -65,18 +65,18 @@ manner similar to the Unix :program:`man` command. The synopsis line of a module is the first line of its documentation string. You can also use :program:`pydoc` to start an HTTP server on the local machine -that will serve documentation to visiting web browsers. :program:`pydoc -p 1234` +that will serve documentation to visiting web browsers. :program:`python -m pydoc -p 1234` will start a HTTP server on port 1234, allowing you to browse the documentation at ``http://localhost:1234/`` in your preferred web browser. Specifying ``0`` as the port number will select an arbitrary unused port. -:program:`pydoc -n ` will start the server listening at the given +:program:`python -m pydoc -n ` will start the server listening at the given hostname. By default the hostname is 'localhost' but if you want the server to be reached from other machines, you may want to change the host name that the server responds to. During development this is especially useful if you want to run pydoc from within a container. -:program:`pydoc -b` will start the server and additionally open a web +:program:`python -m pydoc -b` will start the server and additionally open a web browser to a module index page. Each served page has a navigation bar at the top where you can *Get* help on an individual item, *Search* all modules with a keyword in their synopsis line, and go to the *Module index*, *Topics* and diff --git a/Doc/library/pyexpat.rst b/Doc/library/pyexpat.rst index d6581e21b01c0e9..935e872480efda7 100644 --- a/Doc/library/pyexpat.rst +++ b/Doc/library/pyexpat.rst @@ -33,7 +33,7 @@ can be set to handler functions. When an XML document is then fed to the parser, the handler functions are called for the character data and markup in the XML document. -.. index:: module: pyexpat +.. index:: pair: module; pyexpat This module uses the :mod:`pyexpat` module to provide access to the Expat parser. Direct use of the :mod:`pyexpat` module is deprecated. diff --git a/Doc/library/python.rst b/Doc/library/python.rst index f39613f572884f0..610435999d9f48c 100644 --- a/Doc/library/python.rst +++ b/Doc/library/python.rst @@ -12,6 +12,7 @@ overview: .. toctree:: sys.rst + sys.monitoring.rst sysconfig.rst builtins.rst __main__.rst diff --git a/Doc/library/queue.rst b/Doc/library/queue.rst index c67f15e953bccc9..b2b787c5a8260cf 100644 --- a/Doc/library/queue.rst +++ b/Doc/library/queue.rst @@ -15,7 +15,7 @@ module implements all the required locking semantics. The module implements three types of queue, which differ only in the order in which the entries are retrieved. In a :abbr:`FIFO (first-in, first-out)` -queue, the first tasks added are the first retrieved. In a +queue, the first tasks added are the first retrieved. In a :abbr:`LIFO (last-in, first-out)` queue, the most recently added entry is the first retrieved (operating like a stack). With a priority queue, the entries are kept sorted (using the :mod:`heapq` module) and the @@ -57,8 +57,8 @@ The :mod:`queue` module defines the following classes and exceptions: *maxsize* is less than or equal to zero, the queue size is infinite. The lowest valued entries are retrieved first (the lowest valued entry is the - one returned by ``sorted(list(entries))[0]``). A typical pattern for entries - is a tuple in the form: ``(priority_number, data)``. + one that would be returned by ``min(entries)``). A typical pattern for + entries is a tuple in the form: ``(priority_number, data)``. If the *data* elements are not comparable, the data can be wrapped in a class that ignores the data item and only compares the priority number:: @@ -127,8 +127,8 @@ provide the public methods described below. .. method:: Queue.put(item, block=True, timeout=None) - Put *item* into the queue. If optional args *block* is true and *timeout* is - ``None`` (the default), block if necessary until a free slot is available. If + Put *item* into the queue. If optional args *block* is true and *timeout* is + ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is @@ -143,7 +143,7 @@ provide the public methods described below. .. method:: Queue.get(block=True, timeout=None) - Remove and return an item from the queue. If optional args *block* is true and + Remove and return an item from the queue. If optional args *block* is true and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`Empty` exception if no item was available within that time. @@ -152,7 +152,7 @@ provide the public methods described below. Prior to 3.0 on POSIX systems, and for all versions on Windows, if *block* is true and *timeout* is ``None``, this operation goes into - an uninterruptible wait on an underlying lock. This means that no exceptions + an uninterruptible wait on an underlying lock. This means that no exceptions can occur, and in particular a SIGINT will not trigger a :exc:`KeyboardInterrupt`. @@ -184,7 +184,7 @@ fully processed by daemon consumer threads. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to - indicate that the item was retrieved and all work on it is complete. When the + indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`join` unblocks. @@ -227,7 +227,7 @@ SimpleQueue Objects .. method:: SimpleQueue.empty() - Return ``True`` if the queue is empty, ``False`` otherwise. If empty() + Return ``True`` if the queue is empty, ``False`` otherwise. If empty() returns ``False`` it doesn't guarantee that a subsequent call to get() will not block. diff --git a/Doc/library/random.rst b/Doc/library/random.rst index 669204ba65b77e1..76ae97a8be7e63c 100644 --- a/Doc/library/random.rst +++ b/Doc/library/random.rst @@ -21,8 +21,8 @@ lognormal, negative exponential, gamma, and beta distributions. For generating distributions of angles, the von Mises distribution is available. Almost all module functions depend on the basic function :func:`.random`, which -generates a random float uniformly in the semi-open range [0.0, 1.0). Python -uses the Mersenne Twister as the core generator. It produces 53-bit precision +generates a random float uniformly in the half-open range ``0.0 <= X < 1.0``. +Python uses the Mersenne Twister as the core generator. It produces 53-bit precision floats and has a period of 2\*\*19937-1. The underlying implementation in C is both fast and threadsafe. The Mersenne Twister is one of the most extensively tested random number generators in existence. However, being completely @@ -294,7 +294,7 @@ be found in any statistics text. .. function:: random() - Return the next random floating point number in the range [0.0, 1.0). + Return the next random floating point number in the range ``0.0 <= X < 1.0`` .. function:: uniform(a, b) @@ -320,7 +320,7 @@ be found in any statistics text. ``beta > 0``. Returned values range between 0 and 1. -.. function:: expovariate(lambd) +.. function:: expovariate(lambd = 1.0) Exponential distribution. *lambd* is 1.0 divided by the desired mean. It should be nonzero. (The parameter would be called @@ -328,11 +328,16 @@ be found in any statistics text. range from 0 to positive infinity if *lambd* is positive, and from negative infinity to 0 if *lambd* is negative. + .. versionchanged:: 3.12 + Added the default value for ``lambd``. + .. function:: gammavariate(alpha, beta) - Gamma distribution. (*Not* the gamma function!) Conditions on the - parameters are ``alpha > 0`` and ``beta > 0``. + Gamma distribution. (*Not* the gamma function!) The shape and + scale parameters, *alpha* and *beta*, must have positive values. + (Calling conventions vary and some sources define 'beta' + as the inverse of the scale). The probability distribution function is:: @@ -343,7 +348,8 @@ be found in any statistics text. .. function:: gauss(mu=0.0, sigma=1.0) - Normal distribution, also called the Gaussian distribution. *mu* is the mean, + Normal distribution, also called the Gaussian distribution. + *mu* is the mean, and *sigma* is the standard deviation. This is slightly faster than the :func:`normalvariate` function defined below. @@ -401,8 +407,8 @@ Alternative Generator Class that implements the default pseudo-random number generator used by the :mod:`random` module. - .. deprecated:: 3.9 - In the future, the *seed* must be one of the following types: + .. deprecated-removed:: 3.9 3.11 + Formerly the *seed* could be any hashable object. Now it is limited to: :class:`NoneType`, :class:`int`, :class:`float`, :class:`str`, :class:`bytes`, or :class:`bytearray`. @@ -420,7 +426,7 @@ Notes on Reproducibility ------------------------ Sometimes it is useful to be able to reproduce the sequences given by a -pseudo-random number generator. By re-using a seed value, the same sequence should be +pseudo-random number generator. By reusing a seed value, the same sequence should be reproducible from run to run as long as multiple threads are not running. Most of the random module's algorithms and seeding functions are subject to @@ -607,7 +613,8 @@ from the combinatoric iterators in the :mod:`itertools` module: return tuple(pool[i] for i in indices) def random_combination_with_replacement(iterable, r): - "Random selection from itertools.combinations_with_replacement(iterable, r)" + "Choose r elements with replacement. Order the result to match the iterable." + # Result will be in set(itertools.combinations_with_replacement(iterable, r)). pool = tuple(iterable) n = len(pool) indices = sorted(random.choices(range(n), k=r)) diff --git a/Doc/library/re.rst b/Doc/library/re.rst index 0e7dda04b1d68d5..251ec8ca0021a65 100644 --- a/Doc/library/re.rst +++ b/Doc/library/re.rst @@ -29,7 +29,7 @@ a literal backslash, one might have to write ``'\\\\'`` as the pattern string, because the regular expression must be ``\\``, and each backslash must be expressed as ``\\`` inside a regular Python string literal. Also, please note that any invalid escape sequences in Python's -usage of the backslash in string literals now generate a :exc:`DeprecationWarning` +usage of the backslash in string literals now generate a :exc:`SyntaxWarning` and in the future this will become a :exc:`SyntaxError`. This behaviour will happen even if it is a valid escape sequence for a regular expression. @@ -176,7 +176,7 @@ The special characters are: ``x*+``, ``x++`` and ``x?+`` are equivalent to ``(?>x*)``, ``(?>x+)`` and ``(?>x?)`` correspondingly. - .. versionadded:: 3.11 + .. versionadded:: 3.11 .. index:: single: {} (curly brackets); in regular expressions @@ -271,7 +271,8 @@ The special characters are: * To match a literal ``']'`` inside a set, precede it with a backslash, or place it at the beginning of the set. For example, both ``[()[\]{}]`` and - ``[]()[{}]`` will both match a parenthesis. + ``[]()[{}]`` will match a right bracket, as well as left bracket, braces, + and parentheses. .. .. index:: single: --; in regular expressions .. .. index:: single: &&; in regular expressions @@ -395,9 +396,9 @@ The special characters are: ``(?P...)`` Similar to regular parentheses, but the substring matched by the group is accessible via the symbolic group name *name*. Group names must be valid - Python identifiers, and in bytes patterns they must contain only characters - in the ASCII range. Each group name must be defined only once within a - regular expression. A symbolic group is also a numbered group, just as if + Python identifiers, and in :class:`bytes` patterns they can only contain + bytes in the ASCII range. Each group name must be defined only once within + a regular expression. A symbolic group is also a numbered group, just as if the group were not named. Named groups can be referenced in three contexts. If the pattern is @@ -419,8 +420,8 @@ The special characters are: +---------------------------------------+----------------------------------+ .. versionchanged:: 3.12 - In bytes patterns group names must contain only characters in - the ASCII range. + In :class:`bytes` patterns, group *name* can only contain bytes + in the ASCII range (``b'\x00'``-``b'\x7f'``). .. index:: single: (?P=; in regular expressions @@ -496,7 +497,11 @@ The special characters are: .. versionchanged:: 3.12 Group *id* can only contain ASCII digits. + In :class:`bytes` patterns, group *name* can only contain bytes + in the ASCII range (``b'\x00'``-``b'\x7f'``). + +.. _re-special-sequences: The special sequences consist of ``'\'`` and a character from the list below. If the ordinary character is not an ASCII digit or an ASCII letter, then the @@ -591,10 +596,9 @@ character ``'$'``. ``\w`` For Unicode (str) patterns: - Matches Unicode word characters; this includes most characters - that can be part of a word in any language, as well as numbers and - the underscore. If the :const:`ASCII` flag is used, only - ``[a-zA-Z0-9_]`` is matched. + Matches Unicode word characters; this includes alphanumeric characters (as defined by :meth:`str.isalnum`) + as well as the underscore (``_``). + If the :const:`ASCII` flag is used, only ``[a-zA-Z0-9_]`` is matched. For 8-bit (bytes) patterns: Matches characters considered alphanumeric in the ASCII character set; @@ -630,8 +634,8 @@ character ``'$'``. single: \x; in regular expressions single: \\; in regular expressions -Most of the standard escapes supported by Python string literals are also -accepted by the regular expression parser:: +Most of the :ref:`escape sequences ` supported by Python +string literals are also accepted by the regular expression parser:: \a \b \f \n \N \r \t \u @@ -656,7 +660,7 @@ three digits in length. Unknown escapes consisting of ``'\'`` and an ASCII letter now are errors. .. versionchanged:: 3.8 - The ``'\N{name}'`` escape sequence has been added. As in string literals, + The :samp:`'\\N\\{{name}\\}'` escape sequence has been added. As in string literals, it expands to the named Unicode character (e.g. ``'\N{EM DASH}'``). @@ -777,6 +781,17 @@ Flags Corresponds to the inline flag ``(?s)``. +.. data:: U + UNICODE + + In Python 2, this flag made :ref:`special sequences ` + include Unicode characters in matches. Since Python 3, Unicode characters + are matched by default. + + See :const:`A` for restricting matching on ASCII characters instead. + + This flag is only kept for backward compatibility. + .. data:: X VERBOSE @@ -841,18 +856,17 @@ Functions .. function:: search(pattern, string, flags=0) Scan through *string* looking for the first location where the regular expression - *pattern* produces a match, and return a corresponding :ref:`match object - `. Return ``None`` if no position in the string matches the - pattern; note that this is different from finding a zero-length match at some - point in the string. + *pattern* produces a match, and return a corresponding :class:`~re.Match`. Return + ``None`` if no position in the string matches the pattern; note that this is + different from finding a zero-length match at some point in the string. .. function:: match(pattern, string, flags=0) If zero or more characters at the beginning of *string* match the regular - expression *pattern*, return a corresponding :ref:`match object - `. Return ``None`` if the string does not match the pattern; - note that this is different from a zero-length match. + expression *pattern*, return a corresponding :class:`~re.Match`. Return + ``None`` if the string does not match the pattern; note that this is + different from a zero-length match. Note that even in :const:`MULTILINE` mode, :func:`re.match` will only match at the beginning of the string and not at the beginning of each line. @@ -864,9 +878,8 @@ Functions .. function:: fullmatch(pattern, string, flags=0) If the whole *string* matches the regular expression *pattern*, return a - corresponding :ref:`match object `. Return ``None`` if the - string does not match the pattern; note that this is different from a - zero-length match. + corresponding :class:`~re.Match`. Return ``None`` if the string does not match + the pattern; note that this is different from a zero-length match. .. versionadded:: 3.4 @@ -883,7 +896,7 @@ Functions ['Words', 'words', 'words', ''] >>> re.split(r'(\W+)', 'Words, words, words.') ['Words', ', ', 'words', ', ', 'words', '.', ''] - >>> re.split(r'\W+', 'Words, words, words.', 1) + >>> re.split(r'\W+', 'Words, words, words.', maxsplit=1) ['Words', 'words, words.'] >>> re.split('[a-f]+', '0a3B9', flags=re.IGNORECASE) ['0', '3', '9'] @@ -914,6 +927,11 @@ Functions .. versionchanged:: 3.7 Added support of splitting on a pattern that could match an empty string. + .. deprecated:: 3.13 + Passing *maxsplit* and *flags* as positional arguments is deprecated. + In future Python versions they will be + :ref:`keyword-only parameters `. + .. function:: findall(pattern, string, flags=0) @@ -939,7 +957,7 @@ Functions .. function:: finditer(pattern, string, flags=0) - Return an :term:`iterator` yielding :ref:`match objects ` over + Return an :term:`iterator` yielding :class:`~re.Match` objects over all non-overlapping matches for the RE *pattern* in *string*. The *string* is scanned left-to-right, and matches are returned in the order found. Empty matches are included in the result. @@ -967,18 +985,19 @@ Functions 'static PyObject*\npy_myfunc(void)\n{' If *repl* is a function, it is called for every non-overlapping occurrence of - *pattern*. The function takes a single :ref:`match object ` - argument, and returns the replacement string. For example:: + *pattern*. The function takes a single :class:`~re.Match` argument, and returns + the replacement string. For example:: >>> def dashrepl(matchobj): ... if matchobj.group(0) == '-': return ' ' ... else: return '-' + ... >>> re.sub('-{1,2}', dashrepl, 'pro----gram-files') 'pro--gram files' >>> re.sub(r'\sAND\s', ' & ', 'Baked Beans And Spam', flags=re.IGNORECASE) 'Baked Beans & Spam' - The pattern may be a string or a :ref:`pattern object `. + The pattern may be a string or a :class:`~re.Pattern`. The optional argument *count* is the maximum number of pattern occurrences to be replaced; *count* must be a non-negative integer. If omitted or zero, all @@ -1011,15 +1030,18 @@ Functions .. versionchanged:: 3.7 Unknown escapes in *repl* consisting of ``'\'`` and an ASCII letter now are errors. - - .. versionchanged:: 3.7 Empty matches for the pattern are replaced when adjacent to a previous non-empty match. .. versionchanged:: 3.12 Group *id* can only contain ASCII digits. - In bytes replacement strings group names must contain only characters - in the ASCII range. + In :class:`bytes` replacement strings, group *name* can only contain bytes + in the ASCII range (``b'\x00'``-``b'\x7f'``). + + .. deprecated:: 3.13 + Passing *count* and *flags* as positional arguments is deprecated. + In future Python versions they will be + :ref:`keyword-only parameters `. .. function:: subn(pattern, repl, string, count=0, flags=0) @@ -1027,12 +1049,6 @@ Functions Perform the same operation as :func:`sub`, but return a tuple ``(new_string, number_of_subs_made)``. - .. versionchanged:: 3.1 - Added the optional flags argument. - - .. versionchanged:: 3.5 - Unmatched groups are replaced with an empty string. - .. function:: escape(pattern) @@ -1113,16 +1129,20 @@ Exceptions Regular Expression Objects -------------------------- -Compiled regular expression objects support the following methods and -attributes: +.. class:: Pattern + + Compiled regular expression object returned by :func:`re.compile`. + + .. versionchanged:: 3.9 + :py:class:`re.Pattern` supports ``[]`` to indicate a Unicode (str) or bytes pattern. + See :ref:`types-genericalias`. .. method:: Pattern.search(string[, pos[, endpos]]) Scan through *string* looking for the first location where this regular - expression produces a match, and return a corresponding :ref:`match object - `. Return ``None`` if no position in the string matches the - pattern; note that this is different from finding a zero-length match at some - point in the string. + expression produces a match, and return a corresponding :class:`~re.Match`. + Return ``None`` if no position in the string matches the pattern; note that + this is different from finding a zero-length match at some point in the string. The optional second parameter *pos* gives an index in the string where the search is to start; it defaults to ``0``. This is not completely equivalent to @@ -1146,9 +1166,9 @@ attributes: .. method:: Pattern.match(string[, pos[, endpos]]) If zero or more characters at the *beginning* of *string* match this regular - expression, return a corresponding :ref:`match object `. - Return ``None`` if the string does not match the pattern; note that this is - different from a zero-length match. + expression, return a corresponding :class:`~re.Match`. Return ``None`` if the + string does not match the pattern; note that this is different from a + zero-length match. The optional *pos* and *endpos* parameters have the same meaning as for the :meth:`~Pattern.search` method. :: @@ -1165,8 +1185,8 @@ attributes: .. method:: Pattern.fullmatch(string[, pos[, endpos]]) If the whole *string* matches this regular expression, return a corresponding - :ref:`match object `. Return ``None`` if the string does not - match the pattern; note that this is different from a zero-length match. + :class:`~re.Match`. Return ``None`` if the string does not match the pattern; + note that this is different from a zero-length match. The optional *pos* and *endpos* parameters have the same meaning as for the :meth:`~Pattern.search` method. :: @@ -1252,8 +1272,13 @@ when there is no match, you can test whether there was a match with a simple if match: process(match) -Match objects support the following methods and attributes: +.. class:: Match + + Match object returned by successful ``match``\ es and ``search``\ es. + .. versionchanged:: 3.9 + :py:class:`re.Match` supports ``[]`` to indicate a Unicode (str) or bytes match. + See :ref:`types-genericalias`. .. method:: Match.expand(template) @@ -1517,14 +1542,14 @@ Simulating scanf() .. index:: single: scanf() -Python does not currently have an equivalent to :c:func:`scanf`. Regular +Python does not currently have an equivalent to :c:func:`!scanf`. Regular expressions are generally more powerful, though also more verbose, than -:c:func:`scanf` format strings. The table below offers some more-or-less -equivalent mappings between :c:func:`scanf` format tokens and regular +:c:func:`!scanf` format strings. The table below offers some more-or-less +equivalent mappings between :c:func:`!scanf` format tokens and regular expressions. +--------------------------------+---------------------------------------------+ -| :c:func:`scanf` Token | Regular Expression | +| :c:func:`!scanf` Token | Regular Expression | +================================+=============================================+ | ``%c`` | ``.`` | +--------------------------------+---------------------------------------------+ @@ -1549,7 +1574,7 @@ To extract the filename and numbers from a string like :: /usr/sbin/sendmail - 0 errors, 4 warnings -you would use a :c:func:`scanf` format like :: +you would use a :c:func:`!scanf` format like :: %s - %d errors, %d warnings @@ -1565,16 +1590,22 @@ search() vs. match() .. sectionauthor:: Fred L. Drake, Jr. -Python offers two different primitive operations based on regular expressions: -:func:`re.match` checks for a match only at the beginning of the string, while -:func:`re.search` checks for a match anywhere in the string (this is what Perl -does by default). +Python offers different primitive operations based on regular expressions: + ++ :func:`re.match` checks for a match only at the beginning of the string ++ :func:`re.search` checks for a match anywhere in the string + (this is what Perl does by default) ++ :func:`re.fullmatch` checks for entire string to be a match + For example:: >>> re.match("c", "abcdef") # No match >>> re.search("c", "abcdef") # Match + >>> re.fullmatch("p.*n", "python") # Match + + >>> re.fullmatch("r.*n", "python") # No match Regular expressions beginning with ``'^'`` can be used with :func:`search` to restrict the match at the beginning of the string:: @@ -1588,8 +1619,8 @@ Note however that in :const:`MULTILINE` mode :func:`match` only matches at the beginning of the string, whereas using :func:`search` with a regular expression beginning with ``'^'`` will match at the beginning of each line. :: - >>> re.match('X', 'A\nB\nX', re.MULTILINE) # No match - >>> re.search('^X', 'A\nB\nX', re.MULTILINE) # Match + >>> re.match("X", "A\nB\nX", re.MULTILINE) # No match + >>> re.search("^X", "A\nB\nX", re.MULTILINE) # Match @@ -1634,7 +1665,7 @@ because the address has spaces, our splitting pattern, in it: .. doctest:: :options: +NORMALIZE_WHITESPACE - >>> [re.split(":? ", entry, 3) for entry in entries] + >>> [re.split(":? ", entry, maxsplit=3) for entry in entries] [['Ross', 'McFluff', '834.345.1254', '155 Elm Street'], ['Ronald', 'Heathmore', '892.345.3428', '436 Finley Avenue'], ['Frank', 'Burger', '925.541.7625', '662 South Dogwood Way'], @@ -1647,7 +1678,7 @@ house number from the street name: .. doctest:: :options: +NORMALIZE_WHITESPACE - >>> [re.split(":? ", entry, 4) for entry in entries] + >>> [re.split(":? ", entry, maxsplit=4) for entry in entries] [['Ross', 'McFluff', '834.345.1254', '155', 'Elm Street'], ['Ronald', 'Heathmore', '892.345.3428', '436', 'Finley Avenue'], ['Frank', 'Burger', '925.541.7625', '662', 'South Dogwood Way'], @@ -1666,6 +1697,7 @@ in each word of a sentence except for the first and last characters:: ... inner_word = list(m.group(2)) ... random.shuffle(inner_word) ... return m.group(1) + "".join(inner_word) + m.group(3) + ... >>> text = "Professor Abdolmalek, please report your absences promptly." >>> re.sub(r"(\w)(\w+)(\w)", repl, text) 'Poefsrosr Aealmlobdk, pslaee reorpt your abnseces plmrptoy.' @@ -1690,10 +1722,10 @@ Finding all Adverbs and their Positions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If one wants more information about all matches of a pattern than the matched -text, :func:`finditer` is useful as it provides :ref:`match objects -` instead of strings. Continuing with the previous example, if -a writer wanted to find all of the adverbs *and their positions* in -some text, they would use :func:`finditer` in the following manner:: +text, :func:`finditer` is useful as it provides :class:`~re.Match` objects +instead of strings. Continuing with the previous example, if a writer wanted +to find all of the adverbs *and their positions* in some text, they would use +:func:`finditer` in the following manner:: >>> text = "He was carefully disguised but captured quickly by police." >>> for m in re.finditer(r"\w+ly\b", text): diff --git a/Doc/library/readline.rst b/Doc/library/readline.rst index 4d485d25b540209..8fb0eca8df74d89 100644 --- a/Doc/library/readline.rst +++ b/Doc/library/readline.rst @@ -19,7 +19,7 @@ function. Readline keybindings may be configured via an initialization file, typically ``.inputrc`` in your home directory. See `Readline Init File -`_ +`_ in the GNU Readline manual for information about the format and allowable constructs of that file, and the capabilities of the Readline library in general. diff --git a/Doc/library/resource.rst b/Doc/library/resource.rst index e7bf45d7d569fa0..ef65674d1b0a787 100644 --- a/Doc/library/resource.rst +++ b/Doc/library/resource.rst @@ -13,7 +13,7 @@ This module provides basic mechanisms for measuring and controlling system resources utilized by a program. -.. include:: ../includes/wasm-notavail.rst +.. availability:: Unix, not Emscripten, not WASI. Symbolic constants are used to specify particular system resources and to request usage information about either the current process or its children. @@ -244,7 +244,7 @@ platform. used by all of this user id's processes. This limit is enforced only if bit 1 of the vm.overcommit sysctl is set. Please see - `tuning(7) `__ + `tuning(7) `__ for a complete description of this sysctl. .. availability:: FreeBSD. diff --git a/Doc/library/runpy.rst b/Doc/library/runpy.rst index 501f4ddf5a3e3f1..406b080b7be30f9 100644 --- a/Doc/library/runpy.rst +++ b/Doc/library/runpy.rst @@ -30,7 +30,7 @@ The :mod:`runpy` module provides two functions: .. function:: run_module(mod_name, init_globals=None, run_name=None, alter_sys=False) .. index:: - module: __main__ + pair: module; __main__ Execute the code of the specified module and return the resulting module globals dictionary. The module's code is first located using the standard @@ -39,7 +39,7 @@ The :mod:`runpy` module provides two functions: The *mod_name* argument should be an absolute module name. If the module name refers to a package rather than a normal - module, then that package is imported and the ``__main__`` submodule within + module, then that package is imported and the :mod:`__main__` submodule within that package is then executed and the resulting module globals dictionary returned. @@ -74,7 +74,7 @@ The :mod:`runpy` module provides two functions: Note that this manipulation of :mod:`sys` is not thread-safe. Other threads may see the partially initialised module, as well as the altered list of - arguments. It is recommended that the :mod:`sys` module be left alone when + arguments. It is recommended that the ``sys`` module be left alone when invoking this function from threaded code. .. seealso:: @@ -82,7 +82,7 @@ The :mod:`runpy` module provides two functions: command line. .. versionchanged:: 3.1 - Added ability to execute packages by looking for a ``__main__`` submodule. + Added ability to execute packages by looking for a :mod:`__main__` submodule. .. versionchanged:: 3.2 Added ``__cached__`` global variable (see :pep:`3147`). @@ -101,20 +101,21 @@ The :mod:`runpy` module provides two functions: .. function:: run_path(path_name, init_globals=None, run_name=None) .. index:: - module: __main__ + pair: module; __main__ Execute the code at the named filesystem location and return the resulting module globals dictionary. As with a script name supplied to the CPython command line, the supplied path may refer to a Python source file, a - compiled bytecode file or a valid sys.path entry containing a ``__main__`` - module (e.g. a zipfile containing a top-level ``__main__.py`` file). + compiled bytecode file or a valid :data:`sys.path` entry containing a + :mod:`__main__` module + (e.g. a zipfile containing a top-level ``__main__.py`` file). For a simple script, the specified code is simply executed in a fresh - module namespace. For a valid sys.path entry (typically a zipfile or + module namespace. For a valid :data:`sys.path` entry (typically a zipfile or directory), the entry is first added to the beginning of ``sys.path``. The function then looks for and executes a :mod:`__main__` module using the updated path. Note that there is no special protection against invoking - an existing :mod:`__main__` entry located elsewhere on ``sys.path`` if + an existing ``__main__`` entry located elsewhere on ``sys.path`` if there is no such module at the specified location. The optional dictionary argument *init_globals* may be used to pre-populate @@ -137,14 +138,14 @@ The :mod:`runpy` module provides two functions: supplied path, and ``__spec__``, ``__cached__``, ``__loader__`` and ``__package__`` will all be set to :const:`None`. - If the supplied path is a reference to a valid sys.path entry, then - ``__spec__`` will be set appropriately for the imported ``__main__`` + If the supplied path is a reference to a valid :data:`sys.path` entry, then + ``__spec__`` will be set appropriately for the imported :mod:`__main__` module (that is, ``__spec__.name`` will always be ``__main__``). ``__file__``, ``__cached__``, ``__loader__`` and ``__package__`` will be :ref:`set as normal ` based on the module spec. A number of alterations are also made to the :mod:`sys` module. Firstly, - ``sys.path`` may be altered as described above. ``sys.argv[0]`` is updated + :data:`sys.path` may be altered as described above. ``sys.argv[0]`` is updated with the value of ``path_name`` and ``sys.modules[__name__]`` is updated with a temporary module object for the module being executed. All modifications to items in :mod:`sys` are reverted before the function @@ -152,7 +153,7 @@ The :mod:`runpy` module provides two functions: Note that, unlike :func:`run_module`, the alterations made to :mod:`sys` are not optional in this function as these adjustments are essential to - allowing the execution of sys.path entries. As the thread-safety + allowing the execution of :data:`sys.path` entries. As the thread-safety limitations still apply, use of this function in threaded code should be either serialised with the import lock or delegated to a separate process. @@ -165,7 +166,7 @@ The :mod:`runpy` module provides two functions: .. versionchanged:: 3.4 Updated to take advantage of the module spec feature added by :pep:`451`. This allows ``__cached__`` to be set correctly in the - case where ``__main__`` is imported from a valid sys.path entry rather + case where ``__main__`` is imported from a valid :data:`sys.path` entry rather than being executed directly. .. versionchanged:: 3.12 diff --git a/Doc/library/sched.rst b/Doc/library/sched.rst index a4ba2848f11ddea..01bac5afd0b9b38 100644 --- a/Doc/library/sched.rst +++ b/Doc/library/sched.rst @@ -36,7 +36,7 @@ scheduler: Example:: >>> import sched, time - >>> s = sched.scheduler(time.time, time.sleep) + >>> s = sched.scheduler(time.monotonic, time.sleep) >>> def print_time(a='default'): ... print("From print_time", time.time(), a) ... @@ -44,16 +44,22 @@ Example:: ... print(time.time()) ... s.enter(10, 1, print_time) ... s.enter(5, 2, print_time, argument=('positional',)) + ... # despite having higher priority, 'keyword' runs after 'positional' as enter() is relative ... s.enter(5, 1, print_time, kwargs={'a': 'keyword'}) + ... s.enterabs(1_650_000_000, 10, print_time, argument=("first enterabs",)) + ... s.enterabs(1_650_000_000, 5, print_time, argument=("second enterabs",)) ... s.run() ... print(time.time()) ... >>> print_some_times() - 930343690.257 - From print_time 930343695.274 positional - From print_time 930343695.275 keyword - From print_time 930343700.273 default - 930343700.276 + 1652342830.3640375 + From print_time 1652342830.3642538 second enterabs + From print_time 1652342830.3643398 first enterabs + From print_time 1652342835.3694863 positional + From print_time 1652342835.3696074 keyword + From print_time 1652342840.369612 default + 1652342840.3697174 + .. _scheduler-objects: @@ -109,7 +115,7 @@ Scheduler Objects .. method:: scheduler.run(blocking=True) - Run all scheduled events. This method will wait (using the :func:`delayfunc` + Run all scheduled events. This method will wait (using the *delayfunc* function passed to the constructor) for the next event, then execute it and so on until there are no more scheduled events. diff --git a/Doc/library/secrets.rst b/Doc/library/secrets.rst index dc8e5f46fb581ea..4405dfc0535973e 100644 --- a/Doc/library/secrets.rst +++ b/Doc/library/secrets.rst @@ -128,7 +128,9 @@ Other functions .. function:: compare_digest(a, b) - Return ``True`` if strings *a* and *b* are equal, otherwise ``False``, + Return ``True`` if strings or + :term:`bytes-like objects ` + *a* and *b* are equal, otherwise ``False``, using a "constant-time compare" to reduce the risk of `timing attacks `_. See :func:`hmac.compare_digest` for additional details. diff --git a/Doc/library/security_warnings.rst b/Doc/library/security_warnings.rst index 284f36583206236..a573c98f73eb0a0 100644 --- a/Doc/library/security_warnings.rst +++ b/Doc/library/security_warnings.rst @@ -9,7 +9,6 @@ The following modules have specific security considerations: * :mod:`base64`: :ref:`base64 security considerations ` in :rfc:`4648` -* :mod:`cgi`: :ref:`CGI security considerations ` * :mod:`hashlib`: :ref:`all constructors take a "usedforsecurity" keyword-only argument disabling known insecure and blocked algorithms ` diff --git a/Doc/library/select.rst b/Doc/library/select.rst index 2890706bab729c5..c2941e628d9d789 100644 --- a/Doc/library/select.rst +++ b/Doc/library/select.rst @@ -6,10 +6,10 @@ -------------- -This module provides access to the :c:func:`select` and :c:func:`poll` functions -available in most operating systems, :c:func:`devpoll` available on -Solaris and derivatives, :c:func:`epoll` available on Linux 2.5+ and -:c:func:`kqueue` available on most BSD. +This module provides access to the :c:func:`!select` and :c:func:`!poll` functions +available in most operating systems, :c:func:`!devpoll` available on +Solaris and derivatives, :c:func:`!epoll` available on Linux 2.5+ and +:c:func:`!kqueue` available on most BSD. Note that on Windows, it only works for sockets; on other operating systems, it also works for other file types (in particular, on Unix, it works on pipes). It cannot be used on regular files to determine whether a file has grown since @@ -41,10 +41,10 @@ The module defines the following: polling object; see section :ref:`devpoll-objects` below for the methods supported by devpoll objects. - :c:func:`devpoll` objects are linked to the number of file + :c:func:`!devpoll` objects are linked to the number of file descriptors allowed at the time of instantiation. If your program - reduces this value, :c:func:`devpoll` will fail. If your program - increases this value, :c:func:`devpoll` may return an + reduces this value, :c:func:`!devpoll` will fail. If your program + increases this value, :c:func:`!devpoll` may return an incomplete list of active file descriptors. The new file descriptor is :ref:`non-inheritable `. @@ -62,7 +62,7 @@ The module defines the following: *sizehint* informs epoll about the expected number of events to be registered. It must be positive, or ``-1`` to use the default. It is only - used on older systems where :c:func:`epoll_create1` is not available; + used on older systems where :c:func:`!epoll_create1` is not available; otherwise it has no effect (though its value is still checked). *flags* is deprecated and completely ignored. However, when supplied, its @@ -117,7 +117,7 @@ The module defines the following: .. function:: select(rlist, wlist, xlist[, timeout]) - This is a straightforward interface to the Unix :c:func:`select` system call. + This is a straightforward interface to the Unix :c:func:`!select` system call. The first three arguments are iterables of 'waitable objects': either integers representing file descriptors or objects with a parameterless method named :meth:`~io.IOBase.fileno` returning such an integer: @@ -154,7 +154,7 @@ The module defines the following: .. index:: single: WinSock File objects on Windows are not acceptable, but sockets are. On Windows, - the underlying :c:func:`select` function is provided by the WinSock + the underlying :c:func:`!select` function is provided by the WinSock library, and does not handle file descriptors that don't originate from WinSock. @@ -169,7 +169,7 @@ The module defines the following: The minimum number of bytes which can be written without blocking to a pipe when the pipe has been reported as ready for writing by :func:`~select.select`, - :func:`poll` or another interface in this module. This doesn't apply + :func:`!poll` or another interface in this module. This doesn't apply to other kind of file-like objects such as sockets. This value is guaranteed by POSIX to be at least 512. @@ -184,11 +184,11 @@ The module defines the following: ``/dev/poll`` Polling Objects ----------------------------- -Solaris and derivatives have ``/dev/poll``. While :c:func:`select` is -O(highest file descriptor) and :c:func:`poll` is O(number of file +Solaris and derivatives have ``/dev/poll``. While :c:func:`!select` is +O(highest file descriptor) and :c:func:`!poll` is O(number of file descriptors), ``/dev/poll`` is O(active file descriptors). -``/dev/poll`` behaviour is very close to the standard :c:func:`poll` +``/dev/poll`` behaviour is very close to the standard :c:func:`!poll` object. @@ -222,7 +222,7 @@ object. implement :meth:`!fileno`, so they can also be used as the argument. *eventmask* is an optional bitmask describing the type of events you want to - check for. The constants are the same that with :c:func:`poll` + check for. The constants are the same that with :c:func:`!poll` object. The default value is a combination of the constants :const:`POLLIN`, :const:`POLLPRI`, and :const:`POLLOUT`. @@ -231,7 +231,7 @@ object. Registering a file descriptor that's already registered is not an error, but the result is undefined. The appropriate action is to unregister or modify it first. This is an important difference - compared with :c:func:`poll`. + compared with :c:func:`!poll`. .. method:: devpoll.modify(fd[, eventmask]) @@ -376,13 +376,13 @@ Edge and Level Trigger Polling (epoll) Objects Polling Objects --------------- -The :c:func:`poll` system call, supported on most Unix systems, provides better +The :c:func:`!poll` system call, supported on most Unix systems, provides better scalability for network servers that service many, many clients at the same -time. :c:func:`poll` scales better because the system call only requires listing -the file descriptors of interest, while :c:func:`select` builds a bitmap, turns +time. :c:func:`!poll` scales better because the system call only requires listing +the file descriptors of interest, while :c:func:`!select` builds a bitmap, turns on bits for the fds of interest, and then afterward the whole bitmap has to be -linearly scanned again. :c:func:`select` is O(highest file descriptor), while -:c:func:`poll` is O(number of file descriptors). +linearly scanned again. :c:func:`!select` is O(highest file descriptor), while +:c:func:`!poll` is O(number of file descriptors). .. method:: poll.register(fd[, eventmask]) @@ -505,7 +505,7 @@ Kqueue Objects Kevent Objects -------------- -https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 +https://man.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 .. attribute:: kevent.ident diff --git a/Doc/library/selectors.rst b/Doc/library/selectors.rst index 0deb15cf4c50376..76cbf91412f7633 100644 --- a/Doc/library/selectors.rst +++ b/Doc/library/selectors.rst @@ -21,7 +21,7 @@ It defines a :class:`BaseSelector` abstract base class, along with several concrete implementations (:class:`KqueueSelector`, :class:`EpollSelector`...), that can be used to wait for I/O readiness notification on multiple file objects. In the following, "file object" refers to any object with a -:meth:`fileno()` method, or a raw file descriptor. See :term:`file object`. +:meth:`~io.IOBase.fileno` method, or a raw file descriptor. See :term:`file object`. :class:`DefaultSelector` is an alias to the most efficient implementation available on the current platform: this should be the default choice for most @@ -60,9 +60,9 @@ constants below: +-----------------------+-----------------------------------------------+ | Constant | Meaning | +=======================+===============================================+ - | :const:`EVENT_READ` | Available for read | + | .. data:: EVENT_READ | Available for read | +-----------------------+-----------------------------------------------+ - | :const:`EVENT_WRITE` | Available for write | + | .. data:: EVENT_WRITE | Available for write | +-----------------------+-----------------------------------------------+ @@ -132,8 +132,8 @@ constants below: Change a registered file object's monitored events or attached data. - This is equivalent to :meth:`BaseSelector.unregister(fileobj)` followed - by :meth:`BaseSelector.register(fileobj, events, data)`, except that it + This is equivalent to ``BaseSelector.unregister(fileobj)`` followed + by ``BaseSelector.register(fileobj, events, data)``, except that it can be implemented more efficiently. This returns a new :class:`SelectorKey` instance, or raises a diff --git a/Doc/library/shelve.rst b/Doc/library/shelve.rst index a50fc6f0bf77b20..219219af6fd87f1 100644 --- a/Doc/library/shelve.rst +++ b/Doc/library/shelve.rst @@ -6,7 +6,7 @@ **Source code:** :source:`Lib/shelve.py` -.. index:: module: pickle +.. index:: pair: module; pickle -------------- @@ -25,7 +25,7 @@ lots of shared sub-objects. The keys are ordinary strings. database file is opened for reading and writing. The optional *flag* parameter has the same interpretation as the *flag* parameter of :func:`dbm.open`. - By default, pickles created with :data:`pickle.DEFAULT_PROTOCOL` are used + By default, pickles created with :const:`pickle.DEFAULT_PROTOCOL` are used to serialize values. The version of the pickle protocol can be specified with the *protocol* parameter. @@ -42,7 +42,7 @@ lots of shared sub-objects. The keys are ordinary strings. mutated). .. versionchanged:: 3.10 - :data:`pickle.DEFAULT_PROTOCOL` is now used as the default pickle + :const:`pickle.DEFAULT_PROTOCOL` is now used as the default pickle protocol. .. versionchanged:: 3.11 @@ -94,9 +94,9 @@ Two additional methods are supported: Restrictions ------------ - .. index:: - module: dbm.ndbm - module: dbm.gnu +.. index:: + pair: module; dbm.ndbm + pair: module; dbm.gnu * The choice of which database package will be used (such as :mod:`dbm.ndbm` or :mod:`dbm.gnu`) depends on which interface is available. Therefore it is not @@ -119,7 +119,7 @@ Restrictions A subclass of :class:`collections.abc.MutableMapping` which stores pickled values in the *dict* object. - By default, pickles created with :data:`pickle.DEFAULT_PROTOCOL` are used + By default, pickles created with :const:`pickle.DEFAULT_PROTOCOL` are used to serialize values. The version of the pickle protocol can be specified with the *protocol* parameter. See the :mod:`pickle` documentation for a discussion of the pickle protocols. @@ -143,7 +143,7 @@ Restrictions Added context manager support. .. versionchanged:: 3.10 - :data:`pickle.DEFAULT_PROTOCOL` is now used as the default pickle + :const:`pickle.DEFAULT_PROTOCOL` is now used as the default pickle protocol. diff --git a/Doc/library/shlex.rst b/Doc/library/shlex.rst index 0bad51833aae131..f94833ad5331a94 100644 --- a/Doc/library/shlex.rst +++ b/Doc/library/shlex.rst @@ -30,12 +30,6 @@ The :mod:`shlex` module defines the following functions: in POSIX mode by default, but uses non-POSIX mode if the *posix* argument is false. - .. note:: - - Since the :func:`split` function instantiates a :class:`~shlex.shlex` - instance, passing ``None`` for *s* will read the string to split from - standard input. - .. versionchanged:: 3.12 Passing ``None`` for *s* argument now raises an exception, rather than reading :data:`sys.stdin`. diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst index b33dbe21b1fa197..d1949d698f56144 100644 --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -292,15 +292,15 @@ Directory and files operations .. versionadded:: 3.8 The *dirs_exist_ok* parameter. -.. function:: rmtree(path, ignore_errors=False, onerror=None, *, dir_fd=None) +.. function:: rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None) .. index:: single: directory; deleting Delete an entire directory tree; *path* must point to a directory (but not a symbolic link to a directory). If *ignore_errors* is true, errors resulting from failed removals will be ignored; if false or omitted, such errors are - handled by calling a handler specified by *onerror* or, if that is omitted, - they raise an exception. + handled by calling a handler specified by *onexc* or *onerror* or, if both + are omitted, exceptions are propagated to the caller. This function can support :ref:`paths relative to directory descriptors `. @@ -315,14 +315,17 @@ Directory and files operations otherwise. Applications can use the :data:`rmtree.avoids_symlink_attacks` function attribute to determine which case applies. - If *onerror* is provided, it must be a callable that accepts three - parameters: *function*, *path*, and *excinfo*. + If *onexc* is provided, it must be a callable that accepts three parameters: + *function*, *path*, and *excinfo*. The first parameter, *function*, is the function which raised the exception; it depends on the platform and implementation. The second parameter, *path*, will be the path name passed to *function*. The third parameter, - *excinfo*, will be the exception information returned by - :func:`sys.exc_info`. Exceptions raised by *onerror* will not be caught. + *excinfo*, is the exception that was raised. Exceptions raised by *onexc* + will not be caught. + + The deprecated *onerror* is similar to *onexc*, except that the third + parameter it receives is the tuple returned from :func:`sys.exc_info`. .. audit-event:: shutil.rmtree path,dir_fd shutil.rmtree @@ -337,6 +340,9 @@ Directory and files operations .. versionchanged:: 3.11 The *dir_fd* parameter. + .. versionchanged:: 3.12 + Added the *onexc* parameter, deprecated *onerror*. + .. attribute:: rmtree.avoids_symlink_attacks Indicates whether the current platform and implementation provides a @@ -363,7 +369,7 @@ Directory and files operations If *copy_function* is given, it must be a callable that takes two arguments *src* and *dst*, and will be used to copy *src* to *dst* if :func:`os.rename` cannot be used. If the source is a directory, - :func:`copytree` is called, passing it the :func:`copy_function`. The + :func:`copytree` is called, passing it the *copy_function*. The default *copy_function* is :func:`copy2`. Using :func:`~shutil.copy` as the *copy_function* allows the move to succeed when it is not possible to also copy the metadata, at the expense of not copying any of the metadata. @@ -393,6 +399,12 @@ Directory and files operations total, used and free space, in bytes. *path* may be a file or a directory. + .. note:: + + On Unix filesystems, *path* must point to a path within a **mounted** + filesystem partition. On those platforms, CPython doesn't attempt to + retrieve disk usage information from non-mounted filesystems. + .. versionadded:: 3.3 .. versionchanged:: 3.8 @@ -425,25 +437,51 @@ Directory and files operations determining if the file exists and executable. When no *path* is specified, the results of :func:`os.environ` are used, - returning either the "PATH" value or a fallback of :attr:`os.defpath`. + returning either the "PATH" value or a fallback of :data:`os.defpath`. - On Windows, the current directory is always prepended to the *path* whether - or not you use the default or provide your own, which is the behavior the - command shell uses when finding executables. Additionally, when finding the - *cmd* in the *path*, the ``PATHEXT`` environment variable is checked. For - example, if you call ``shutil.which("python")``, :func:`which` will search - ``PATHEXT`` to know that it should look for ``python.exe`` within the *path* - directories. For example, on Windows:: + On Windows, the current directory is prepended to the *path* if *mode* does + not include ``os.X_OK``. When the *mode* does include ``os.X_OK``, the + Windows API ``NeedCurrentDirectoryForExePathW`` will be consulted to + determine if the current directory should be prepended to *path*. To avoid + consulting the current working directory for executables: set the environment + variable ``NoDefaultCurrentDirectoryInExePath``. + + Also on Windows, the ``PATHEXT`` variable is used to resolve commands + that may not already include an extension. For example, if you call + ``shutil.which("python")``, :func:`which` will search ``PATHEXT`` + to know that it should look for ``python.exe`` within the *path* + directories. For example, on Windows:: >>> shutil.which("python") 'C:\\Python33\\python.EXE' + This is also applied when *cmd* is a path that contains a directory + component:: + + >> shutil.which("C:\\Python33\\python") + 'C:\\Python33\\python.EXE' + .. versionadded:: 3.3 .. versionchanged:: 3.8 The :class:`bytes` type is now accepted. If *cmd* type is :class:`bytes`, the result type is also :class:`bytes`. + .. versionchanged:: 3.12 + On Windows, the current directory is no longer prepended to the search + path if *mode* includes ``os.X_OK`` and WinAPI + ``NeedCurrentDirectoryForExePathW(cmd)`` is false, else the current + directory is prepended even if it is already in the search path; + ``PATHEXT`` is used now even when *cmd* includes a directory component + or ends with an extension that is in ``PATHEXT``; and filenames that + have no extension can now be found. + + .. versionchanged:: 3.12.1 + On Windows, if *mode* includes ``os.X_OK``, executables with an + extension in ``PATHEXT`` will be preferred over executables without a + matching extension. + This brings behavior closer to that of Python 3.11. + .. exception:: Error This exception collects exceptions that are raised during a multi-file @@ -509,7 +547,7 @@ rmtree example ~~~~~~~~~~~~~~ This example shows how to remove a directory tree on Windows where some -of the files have their read-only bit set. It uses the onerror callback +of the files have their read-only bit set. It uses the onexc callback to clear the readonly bit and reattempt the remove. Any subsequent failure will propagate. :: @@ -521,7 +559,7 @@ will propagate. :: os.chmod(path, stat.S_IWRITE) func(path) - shutil.rmtree(directory, onerror=remove_readonly) + shutil.rmtree(directory, onexc=remove_readonly) .. _archiving-operations: @@ -636,7 +674,7 @@ provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules. Remove the archive format *name* from the list of supported formats. -.. function:: unpack_archive(filename[, extract_dir[, format]]) +.. function:: unpack_archive(filename[, extract_dir[, format[, filter]]]) Unpack an archive. *filename* is the full path of the archive. @@ -650,6 +688,14 @@ provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules. registered for that extension. In case none is found, a :exc:`ValueError` is raised. + The keyword-only *filter* argument is passed to the underlying unpacking + function. For zip files, *filter* is not accepted. + For tar files, it is recommended to set it to ``'data'``, + unless using features specific to tar and UNIX-like filesystems. + (See :ref:`tarfile-extraction-filter` for details.) + The ``'data'`` filter will become the default for tar files + in Python 3.14. + .. audit-event:: shutil.unpack_archive filename,extract_dir,format shutil.unpack_archive .. warning:: @@ -662,6 +708,9 @@ provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules. .. versionchanged:: 3.7 Accepts a :term:`path-like object` for *filename* and *extract_dir*. + .. versionchanged:: 3.12 + Added the *filter* argument. + .. function:: register_unpack_format(name, extensions, function[, extra_args[, description]]) Registers an unpack format. *name* is the name of the format and @@ -669,11 +718,14 @@ provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules. ``.zip`` for Zip files. *function* is the callable that will be used to unpack archives. The - callable will receive the path of the archive, followed by the directory - the archive must be extracted to. - - When provided, *extra_args* is a sequence of ``(name, value)`` tuples that - will be passed as keywords arguments to the callable. + callable will receive: + + - the path of the archive, as a positional argument; + - the directory the archive must be extracted to, as a positional argument; + - possibly a *filter* keyword argument, if it was given to + :func:`unpack_archive`; + - additional keyword arguments, specified by *extra_args* as a sequence + of ``(name, value)`` tuples. *description* can be provided to describe the format, and will be returned by the :func:`get_unpack_formats` function. diff --git a/Doc/library/signal.rst b/Doc/library/signal.rst index 2269f50cbaff991..7ee5ece88598250 100644 --- a/Doc/library/signal.rst +++ b/Doc/library/signal.rst @@ -4,6 +4,8 @@ .. module:: signal :synopsis: Set handlers for asynchronous events. +**Source code:** :source:`Lib/signal.py` + -------------- This module provides mechanisms to use signal handlers in Python. @@ -362,9 +364,9 @@ The :mod:`signal` module defines the following functions: .. function:: strsignal(signalnum) - Return the system description of the signal *signalnum*, such as - "Interrupt", "Segmentation fault", etc. Returns :const:`None` if the signal - is not recognized. + Returns the description of signal *signalnum*, such as "Interrupt" + for :const:`SIGINT`. Returns :const:`None` if *signalnum* has no + description. Raises :exc:`ValueError` if *signalnum* is invalid. .. versionadded:: 3.8 @@ -560,7 +562,7 @@ The :mod:`signal` module defines the following functions: Note that installing a signal handler with :func:`signal` will reset the restart behaviour to interruptible by implicitly calling - :c:func:`siginterrupt` with a true *flag* value for the given signal. + :c:func:`!siginterrupt` with a true *flag* value for the given signal. .. function:: signal(signalnum, handler) @@ -654,7 +656,7 @@ The :mod:`signal` module defines the following functions: .. function:: sigtimedwait(sigset, timeout) Like :func:`sigwaitinfo`, but takes an additional *timeout* argument - specifying a timeout. If *timeout* is specified as :const:`0`, a poll is + specifying a timeout. If *timeout* is specified as ``0``, a poll is performed. Returns :const:`None` if a timeout occurs. .. availability:: Unix. diff --git a/Doc/library/site.rst b/Doc/library/site.rst index 5941739ee6942f7..2dc9fb09d727e2c 100644 --- a/Doc/library/site.rst +++ b/Doc/library/site.rst @@ -19,7 +19,7 @@ Importing this module will append site-specific paths to the module search path and add a few builtins, unless :option:`-S` was used. In that case, this module can be safely imported with no automatic modifications to the module search path or additions to the builtins. To explicitly trigger the usual site-specific -additions, call the :func:`site.main` function. +additions, call the :func:`main` function. .. versionchanged:: 3.3 Importing the module used to trigger paths manipulation even when using @@ -51,7 +51,7 @@ searched for site-packages; otherwise they will. .. index:: single: # (hash); comment - statement: import + pair: statement; import A path configuration file is a file whose name has the form :file:`{name}.pth` and exists in one of the four directories mentioned above; its contents are @@ -109,32 +109,40 @@ directory precedes the :file:`foo` directory because :file:`bar.pth` comes alphabetically before :file:`foo.pth`; and :file:`spam` is omitted because it is not mentioned in either path configuration file. -.. index:: module: sitecustomize +:mod:`sitecustomize` +-------------------- + +.. module:: sitecustomize After these path manipulations, an attempt is made to import a module named :mod:`sitecustomize`, which can perform arbitrary site-specific customizations. It is typically created by a system administrator in the site-packages directory. If this import fails with an :exc:`ImportError` or its subclass -exception, and the exception's :attr:`name` attribute equals to ``'sitecustomize'``, +exception, and the exception's :attr:`~ImportError.name` +attribute equals to ``'sitecustomize'``, it is silently ignored. If Python is started without output streams available, as with :file:`pythonw.exe` on Windows (which is used by default to start IDLE), attempted output from :mod:`sitecustomize` is ignored. Any other exception causes a silent and perhaps mysterious failure of the process. -.. index:: module: usercustomize +:mod:`usercustomize` +-------------------- + +.. module:: usercustomize After this, an attempt is made to import a module named :mod:`usercustomize`, which can perform arbitrary user-specific customizations, if -:data:`ENABLE_USER_SITE` is true. This file is intended to be created in the +:data:`~site.ENABLE_USER_SITE` is true. This file is intended to be created in the user site-packages directory (see below), which is part of ``sys.path`` unless disabled by :option:`-s`. If this import fails with an :exc:`ImportError` or -its subclass exception, and the exception's :attr:`name` attribute equals to -``'usercustomize'``, it is silently ignored. +its subclass exception, and the exception's :attr:`~ImportError.name` +attribute equals to ``'usercustomize'``, it is silently ignored. Note that for some non-Unix systems, ``sys.prefix`` and ``sys.exec_prefix`` are empty, and the path manipulations are skipped; however the import of :mod:`sitecustomize` and :mod:`usercustomize` is still attempted. +.. currentmodule:: site .. _rlcompleter-config: @@ -189,9 +197,9 @@ Module contents :func:`getuserbase` hasn't been called yet. Default value is :file:`~/.local` for UNIX and macOS non-framework builds, :file:`~/Library/Python/{X.Y}` for macOS framework builds, and - :file:`{%APPDATA%}\\Python` for Windows. This value is used by Distutils to + :file:`{%APPDATA%}\\Python` for Windows. This value is used to compute the installation directories for scripts, data files, Python modules, - etc. for the :ref:`user installation scheme `. + etc. for the :ref:`user installation scheme `. See also :envvar:`PYTHONUSERBASE`. @@ -250,19 +258,19 @@ command line: .. code-block:: shell-session - $ python3 -m site --user-site - /home/user/.local/lib/python3.3/site-packages + $ python -m site --user-site + /home/user/.local/lib/python3.11/site-packages If it is called without arguments, it will print the contents of :data:`sys.path` on the standard output, followed by the value of :data:`USER_BASE` and whether the directory exists, then the same thing for :data:`USER_SITE`, and finally the value of :data:`ENABLE_USER_SITE`. -.. cmdoption:: --user-base +.. option:: --user-base Print the path to the user base directory. -.. cmdoption:: --user-site +.. option:: --user-site Print the path to the user site-packages directory. diff --git a/Doc/library/smtplib.rst b/Doc/library/smtplib.rst index 4e50ad1568ed097..aaec2aa1ef1dbe4 100644 --- a/Doc/library/smtplib.rst +++ b/Doc/library/smtplib.rst @@ -25,7 +25,7 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). An :class:`SMTP` instance encapsulates an SMTP connection. It has methods that support a full repertoire of SMTP and ESMTP operations. If the optional - host and port parameters are given, the SMTP :meth:`connect` method is + *host* and *port* parameters are given, the SMTP :meth:`connect` method is called with those parameters during initialization. If specified, *local_hostname* is used as the FQDN of the local host in the HELO/EHLO command. Otherwise, the local hostname is found using @@ -34,12 +34,12 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). *timeout* parameter specifies a timeout in seconds for blocking operations like the connection attempt (if not specified, the global default timeout setting will be used). If the timeout expires, :exc:`TimeoutError` is - raised. The optional source_address parameter allows binding + raised. The optional *source_address* parameter allows binding to some specific source address in a machine with multiple network interfaces, and/or to some specific source TCP port. It takes a 2-tuple - (host, port), for the socket to bind to as its source address before - connecting. If omitted (or if host or port are ``''`` and/or 0 respectively) - the OS default behavior will be used. + ``(host, port)``, for the socket to bind to as its source address before + connecting. If omitted (or if *host* or *port* are ``''`` and/or ``0`` + respectively) the OS default behavior will be used. For normal use, you should only require the initialization/connect, :meth:`sendmail`, and :meth:`SMTP.quit` methods. @@ -66,18 +66,17 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). Support for the :keyword:`with` statement was added. .. versionchanged:: 3.3 - source_address argument was added. + *source_address* argument was added. .. versionadded:: 3.5 The SMTPUTF8 extension (:rfc:`6531`) is now supported. .. versionchanged:: 3.9 If the *timeout* parameter is set to be zero, it will raise a - :class:`ValueError` to prevent the creation of a non-blocking socket + :class:`ValueError` to prevent the creation of a non-blocking socket. -.. class:: SMTP_SSL(host='', port=0, local_hostname=None, keyfile=None, \ - certfile=None [, timeout], context=None, \ - source_address=None) +.. class:: SMTP_SSL(host='', port=0, local_hostname=None, * [, timeout], \ + context=None, source_address=None) An :class:`SMTP_SSL` instance behaves exactly the same as instances of :class:`SMTP`. :class:`SMTP_SSL` should be used for situations where SSL is @@ -90,39 +89,31 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). aspects of the secure connection. Please read :ref:`ssl-security` for best practices. - *keyfile* and *certfile* are a legacy alternative to *context*, and can - point to a PEM formatted private key and certificate chain file for the - SSL connection. - .. versionchanged:: 3.3 *context* was added. .. versionchanged:: 3.3 - source_address argument was added. + The *source_address* argument was added. .. versionchanged:: 3.4 The class now supports hostname check with :attr:`ssl.SSLContext.check_hostname` and *Server Name Indication* (see - :data:`ssl.HAS_SNI`). - - .. deprecated:: 3.6 - - *keyfile* and *certfile* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + :const:`ssl.HAS_SNI`). .. versionchanged:: 3.9 If the *timeout* parameter is set to be zero, it will raise a :class:`ValueError` to prevent the creation of a non-blocking socket + .. versionchanged:: 3.12 + The deprecated *keyfile* and *certfile* parameters have been removed. + .. class:: LMTP(host='', port=LMTP_PORT, local_hostname=None, \ source_address=None[, timeout]) The LMTP protocol, which is very similar to ESMTP, is heavily based on the standard SMTP client. It's common to use Unix sockets for LMTP, so our :meth:`connect` method must support that as well as a regular host:port - server. The optional arguments local_hostname and source_address have the + server. The optional arguments *local_hostname* and *source_address* have the same meaning as they do in the :class:`SMTP` class. To specify a Unix socket, you must use an absolute path for *host*, starting with a '/'. @@ -360,7 +351,7 @@ An :class:`SMTP` instance has the following methods: be used as argument to the ``AUTH`` command; the valid values are those listed in the ``auth`` element of :attr:`esmtp_features`. - *authobject* must be a callable object taking an optional single argument: + *authobject* must be a callable object taking an optional single argument:: data = authobject(challenge=None) @@ -393,7 +384,7 @@ An :class:`SMTP` instance has the following methods: .. versionadded:: 3.5 -.. method:: SMTP.starttls(keyfile=None, certfile=None, context=None) +.. method:: SMTP.starttls(*, context=None) Put the SMTP connection in TLS (Transport Layer Security) mode. All SMTP commands that follow will be encrypted. You should then call :meth:`ehlo` @@ -409,12 +400,8 @@ An :class:`SMTP` instance has the following methods: If there has been no previous ``EHLO`` or ``HELO`` command this session, this method tries ESMTP ``EHLO`` first. - .. deprecated:: 3.6 - - *keyfile* and *certfile* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + .. versionchanged:: 3.12 + The deprecated *keyfile* and *certfile* parameters have been removed. :exc:`SMTPHeloError` The server didn't reply properly to the ``HELO`` greeting. @@ -431,7 +418,7 @@ An :class:`SMTP` instance has the following methods: .. versionchanged:: 3.4 The method now supports hostname check with :attr:`SSLContext.check_hostname` and *Server Name Indicator* (see - :data:`~ssl.HAS_SNI`). + :const:`~ssl.HAS_SNI`). .. versionchanged:: 3.5 The error raised for lack of STARTTLS support is now the diff --git a/Doc/library/sndhdr.rst b/Doc/library/sndhdr.rst deleted file mode 100644 index fa9323e18dc3483..000000000000000 --- a/Doc/library/sndhdr.rst +++ /dev/null @@ -1,104 +0,0 @@ -:mod:`sndhdr` --- Determine type of sound file -============================================== - -.. module:: sndhdr - :synopsis: Determine type of a sound file. - :deprecated: - -.. sectionauthor:: Fred L. Drake, Jr. -.. Based on comments in the module source file. - -**Source code:** :source:`Lib/sndhdr.py` - -.. index:: - single: A-LAW - single: u-LAW - -.. deprecated-removed:: 3.11 3.13 - The :mod:`sndhdr` module is deprecated - (see :pep:`PEP 594 <594#sndhdr>` for details and alternatives). - --------------- - -The :mod:`sndhdr` provides utility functions which attempt to determine the type -of sound data which is in a file. When these functions are able to determine -what type of sound data is stored in a file, they return a -:func:`~collections.namedtuple`, containing five attributes: (``filetype``, -``framerate``, ``nchannels``, ``nframes``, ``sampwidth``). The value for *type* -indicates the data type and will be one of the strings ``'aifc'``, ``'aiff'``, -``'au'``, ``'hcom'``, ``'sndr'``, ``'sndt'``, ``'voc'``, ``'wav'``, ``'8svx'``, -``'sb'``, ``'ub'``, or ``'ul'``. The *sampling_rate* will be either the actual -value or ``0`` if unknown or difficult to decode. Similarly, *channels* will be -either the number of channels or ``0`` if it cannot be determined or if the -value is difficult to decode. The value for *frames* will be either the number -of frames or ``-1``. The last item in the tuple, *bits_per_sample*, will either -be the sample size in bits or ``'A'`` for A-LAW or ``'U'`` for u-LAW. - - -.. function:: what(filename) - - Determines the type of sound data stored in the file *filename* using - :func:`whathdr`. If it succeeds, returns a namedtuple as described above, otherwise - ``None`` is returned. - - .. versionchanged:: 3.5 - Result changed from a tuple to a namedtuple. - - -.. function:: whathdr(filename) - - Determines the type of sound data stored in a file based on the file header. - The name of the file is given by *filename*. This function returns a namedtuple as - described above on success, or ``None``. - - .. versionchanged:: 3.5 - Result changed from a tuple to a namedtuple. - -The following sound header types are recognized, as listed below with the return value -from :func:`whathdr`: and :func:`what`: - -+------------+------------------------------------+ -| Value | Sound header format | -+============+====================================+ -| ``'aifc'`` | Compressed Audio Interchange Files | -+------------+------------------------------------+ -| ``'aiff'`` | Audio Interchange Files | -+------------+------------------------------------+ -| ``'au'`` | Au Files | -+------------+------------------------------------+ -| ``'hcom'`` | HCOM Files | -+------------+------------------------------------+ -| ``'sndt'`` | Sndtool Sound Files | -+------------+------------------------------------+ -| ``'voc'`` | Creative Labs Audio Files | -+------------+------------------------------------+ -| ``'wav'`` | Waveform Audio File Format Files | -+------------+------------------------------------+ -| ``'8svx'`` | 8-Bit Sampled Voice Files | -+------------+------------------------------------+ -| ``'sb'`` | Signed Byte Audio Data Files | -+------------+------------------------------------+ -| ``'ub'`` | UB Files | -+------------+------------------------------------+ -| ``'ul'`` | uLAW Audio Files | -+------------+------------------------------------+ - -.. data:: tests - - A list of functions performing the individual tests. Each function takes two - arguments: the byte-stream and an open file-like object. When :func:`what` is - called with a byte-stream, the file-like object will be ``None``. - - The test function should return a string describing the image type if the test - succeeded, or ``None`` if it failed. - -Example: - -.. code-block:: pycon - - >>> import sndhdr - >>> imghdr.what('bass.wav') - 'wav' - >>> imghdr.whathdr('bass.wav') - 'wav' - diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst index 3f6cb4803716173..e0a75304ef16064 100644 --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -19,11 +19,11 @@ all modern Unix systems, Windows, MacOS, and probably additional platforms. .. include:: ../includes/wasm-notavail.rst -.. index:: object: socket +.. index:: pair: object; socket The Python interface is a straightforward transliteration of the Unix system call and library interface for sockets to Python's object-oriented style: the -:func:`.socket` function returns a :dfn:`socket object` whose methods implement +:func:`~socket.socket` function returns a :dfn:`socket object` whose methods implement the various socket system calls. Parameter types are somewhat higher-level than in the C interface: as with :meth:`read` and :meth:`write` operations on Python files, buffer allocation on receive operations is automatic, and buffer length @@ -185,12 +185,14 @@ created. Socket addresses are represented as follows: .. versionadded:: 3.7 - :const:`AF_PACKET` is a low-level interface directly to network devices. - The packets are represented by the tuple + The addresses are represented by the tuple ``(ifname, proto[, pkttype[, hatype[, addr]]])`` where: - *ifname* - String specifying the device name. - - *proto* - An in network-byte-order integer specifying the Ethernet - protocol number. + - *proto* - The Ethernet protocol number. + May be :data:`ETH_P_ALL` to capture all protocols, + one of the :ref:`ETHERTYPE_* constants ` + or any other Ethernet protocol number. - *pkttype* - Optional integer specifying the packet type: - ``PACKET_HOST`` (the default) - Packet addressed to the local host. @@ -204,14 +206,14 @@ created. Socket addresses are represented as follows: - *addr* - Optional bytes-like object specifying the hardware physical address, whose interpretation depends on the device. - .. availability:: Linux >= 2.2. + .. availability:: Linux >= 2.2. - :const:`AF_QIPCRTR` is a Linux-only socket based interface for communicating with services running on co-processors in Qualcomm platforms. The address family is represented as a ``(node, port)`` tuple where the *node* and *port* are non-negative integers. - .. availability:: Linux >= 4.7. + .. availability:: Linux >= 4.7. .. versionadded:: 3.8 @@ -345,10 +347,15 @@ Constants AF_INET6 These constants represent the address (and protocol) families, used for the - first argument to :func:`.socket`. If the :const:`AF_UNIX` constant is not + first argument to :func:`~socket.socket`. If the :const:`AF_UNIX` constant is not defined then this protocol is unsupported. More constants may be available depending on the system. +.. data:: AF_UNSPEC + + :const:`AF_UNSPEC` means that + :func:`getaddrinfo` should return socket addresses for any + address family (either IPv4, IPv6, or any other) that can be used. .. data:: SOCK_STREAM SOCK_DGRAM @@ -357,7 +364,7 @@ Constants SOCK_SEQPACKET These constants represent the socket types, used for the second argument to - :func:`.socket`. More constants may be available depending on the system. + :func:`~socket.socket`. More constants may be available depending on the system. (Only :const:`SOCK_STREAM` and :const:`SOCK_DGRAM` appear to be generally useful.) @@ -377,6 +384,8 @@ Constants .. versionadded:: 3.2 +.. _socket-unix-constants: + .. data:: SO_* SOMAXCONN MSG_* @@ -394,7 +403,7 @@ Constants Many constants of these forms, documented in the Unix documentation on sockets and/or the IP protocol, are also defined in the socket module. They are - generally used in arguments to the :meth:`setsockopt` and :meth:`getsockopt` + generally used in arguments to the :meth:`~socket.setsockopt` and :meth:`~socket.getsockopt` methods of socket objects. In most cases, only those symbols that are defined in the Unix header files are defined; for a few symbols, default values are provided. @@ -425,7 +434,16 @@ Constants .. versionchanged:: 3.12 Added ``SO_RTABLE`` and ``SO_USER_COOKIE``. On OpenBSD and FreeBSD respectively those constants can be used in the same way that - ``SO_MARK`` is used on Linux. + ``SO_MARK`` is used on Linux. Also added missing TCP socket options from + Linux: ``TCP_MD5SIG``, ``TCP_THIN_LINEAR_TIMEOUTS``, ``TCP_THIN_DUPACK``, + ``TCP_REPAIR``, ``TCP_REPAIR_QUEUE``, ``TCP_QUEUE_SEQ``, + ``TCP_REPAIR_OPTIONS``, ``TCP_TIMESTAMP``, ``TCP_CC_INFO``, + ``TCP_SAVE_SYN``, ``TCP_SAVED_SYN``, ``TCP_REPAIR_WINDOW``, + ``TCP_FASTOPEN_CONNECT``, ``TCP_ULP``, ``TCP_MD5SIG_EXT``, + ``TCP_FASTOPEN_KEY``, ``TCP_FASTOPEN_NO_COOKIE``, + ``TCP_ZEROCOPY_RECEIVE``, ``TCP_INQ``, ``TCP_TX_DELAY``. + Added ``IP_PKTINFO``, ``IP_UNBLOCK_SOURCE``, ``IP_BLOCK_SOURCE``, + ``IP_ADD_SOURCE_MEMBERSHIP``, ``IP_DROP_SOURCE_MEMBERSHIP``. .. data:: AF_CAN PF_CAN @@ -498,6 +516,17 @@ Constants .. versionadded:: 3.9 +.. data:: AF_DIVERT + PF_DIVERT + + These two constants, documented in the FreeBSD divert(4) manual page, are + also defined in the socket module. + + .. availability:: FreeBSD >= 14.0. + + .. versionadded:: 3.12 + + .. data:: AF_PACKET PF_PACKET PACKET_* @@ -508,6 +537,19 @@ Constants .. availability:: Linux >= 2.2. +.. data:: ETH_P_ALL + + :data:`!ETH_P_ALL` can be used in the :class:`~socket.socket` + constructor as *proto* for the :const:`AF_PACKET` family in order to + capture every packet, regardless of protocol. + + For more information, see the :manpage:`packet(7)` manpage. + + .. availability:: Linux. + + .. versionadded:: 3.12 + + .. data:: AF_RDS PF_RDS SOL_RDS @@ -630,7 +672,7 @@ Constants HV_GUID_BROADCAST HV_GUID_CHILDREN HV_GUID_LOOPBACK - HV_GUID_LOOPBACK + HV_GUID_PARENT Constants for Windows Hyper-V sockets for host/guest communications. @@ -638,6 +680,22 @@ Constants .. versionadded:: 3.12 +.. _socket-ethernet-types: + +.. data:: ETHERTYPE_ARP + ETHERTYPE_IP + ETHERTYPE_IPV6 + ETHERTYPE_VLAN + + `IEEE 802.3 protocol number + `_. + constants. + + .. availability:: Linux, FreeBSD, macOS. + + .. versionadded:: 3.12 + + Functions ^^^^^^^^^ @@ -711,7 +769,7 @@ The following functions all create :ref:`socket objects `. Build a pair of connected socket objects using the given address family, socket type, and protocol number. Address family, socket type, and protocol number are - as for the :func:`.socket` function above. The default family is :const:`AF_UNIX` + as for the :func:`~socket.socket` function above. The default family is :const:`AF_UNIX` if defined on the platform; otherwise, the default is :const:`AF_INET`. The newly created sockets are :ref:`non-inheritable `. @@ -764,8 +822,8 @@ The following functions all create :ref:`socket objects `. ``(host, port)``) and returns the socket object. *family* should be either :data:`AF_INET` or :data:`AF_INET6`. - *backlog* is the queue size passed to :meth:`socket.listen`; when ``0`` - a default reasonable value is chosen. + *backlog* is the queue size passed to :meth:`socket.listen`; if not specified + , a default reasonable value is chosen. *reuse_port* dictates whether to set the :data:`SO_REUSEPORT` socket option. If *dualstack_ipv6* is true and the platform supports it the socket will @@ -806,8 +864,8 @@ The following functions all create :ref:`socket objects `. .. function:: fromfd(fd, family, type, proto=0) Duplicate the file descriptor *fd* (an integer as returned by a file object's - :meth:`fileno` method) and build a socket object from the result. Address - family, socket type and protocol number are as for the :func:`.socket` function + :meth:`~io.IOBase.fileno` method) and build a socket object from the result. Address + family, socket type and protocol number are as for the :func:`~socket.socket` function above. The file descriptor should refer to a socket, but this is not checked --- subsequent operations on the object may fail if the file descriptor is invalid. This function is rarely needed, but can be used to get or set socket options on @@ -872,7 +930,7 @@ The :mod:`socket` module also offers various network-related services: ``(family, type, proto, canonname, sockaddr)`` In these tuples, *family*, *type*, *proto* are all integers and are - meant to be passed to the :func:`.socket` function. *canonname* will be + meant to be passed to the :func:`~socket.socket` function. *canonname* will be a string representing the canonical name of the *host* if :const:`AI_CANONNAME` is part of the *flags* argument; else *canonname* will be empty. *sockaddr* is a tuple describing a socket address, whose @@ -927,7 +985,7 @@ The :mod:`socket` module also offers various network-related services: .. function:: gethostbyname_ex(hostname) Translate a host name to IPv4 address format, extended interface. Return a - triple ``(hostname, aliaslist, ipaddrlist)`` where *hostname* is the host's + 3-tuple ``(hostname, aliaslist, ipaddrlist)`` where *hostname* is the host's primary host name, *aliaslist* is a (possibly empty) list of alternative host names for the same address, and *ipaddrlist* is a list of IPv4 addresses for the same interface on the same host (often but not @@ -955,7 +1013,7 @@ The :mod:`socket` module also offers various network-related services: .. function:: gethostbyaddr(ip_address) - Return a triple ``(hostname, aliaslist, ipaddrlist)`` where *hostname* is the + Return a 3-tuple ``(hostname, aliaslist, ipaddrlist)`` where *hostname* is the primary host name responding to the given *ip_address*, *aliaslist* is a (possibly empty) list of alternative host names for the same address, and *ipaddrlist* is a list of IPv4/v6 addresses for the same interface on the same @@ -988,7 +1046,7 @@ The :mod:`socket` module also offers various network-related services: .. function:: getprotobyname(protocolname) Translate an internet protocol name (for example, ``'icmp'``) to a constant - suitable for passing as the (optional) third argument to the :func:`.socket` + suitable for passing as the (optional) third argument to the :func:`~socket.socket` function. This is usually only needed for sockets opened in "raw" mode (:const:`SOCK_RAW`); for the normal socket modes, the correct protocol is chosen automatically if the protocol is omitted or zero. @@ -1272,7 +1330,7 @@ The :mod:`socket` module also offers various network-related services: Send the list of file descriptors *fds* over an :const:`AF_UNIX` socket *sock*. The *fds* parameter is a sequence of file descriptors. - Consult :meth:`sendmsg` for the documentation of these parameters. + Consult :meth:`~socket.sendmsg` for the documentation of these parameters. .. availability:: Unix, Windows, not Emscripten, not WASI. @@ -1286,7 +1344,7 @@ The :mod:`socket` module also offers various network-related services: Receive up to *maxfds* file descriptors from an :const:`AF_UNIX` socket *sock*. Return ``(msg, list(fds), flags, addr)``. - Consult :meth:`recvmsg` for the documentation of these parameters. + Consult :meth:`~socket.recvmsg` for the documentation of these parameters. .. availability:: Unix, Windows, not Emscripten, not WASI. @@ -1475,7 +1533,7 @@ to sockets. Return ``True`` if socket is in blocking mode, ``False`` if in non-blocking. - This is equivalent to checking ``socket.gettimeout() == 0``. + This is equivalent to checking ``socket.gettimeout() != 0``. .. versionadded:: 3.7 @@ -1735,7 +1793,7 @@ to sockets. much data, if any, was successfully sent. .. versionchanged:: 3.5 - The socket timeout is no more reset each time data is sent successfully. + The socket timeout is no longer reset each time data is sent successfully. The socket timeout is now the maximum total duration to send all data. .. versionchanged:: 3.5 @@ -1876,7 +1934,7 @@ to sockets. .. method:: socket.setsockopt(level, optname, None, optlen: int) :noindex: - .. index:: module: struct + .. index:: pair: module; struct Set the value of the given socket option (see the Unix manual page :manpage:`setsockopt(2)`). The needed symbolic constants are defined in the @@ -1958,8 +2016,8 @@ can be changed by calling :func:`setdefaulttimeout`. * In *non-blocking mode*, operations fail (with an error that is unfortunately system-dependent) if they cannot be completed immediately: functions from the - :mod:`select` can be used to know when and whether a socket is available for - reading or writing. + :mod:`select` module can be used to know when and whether a socket is available + for reading or writing. * In *timeout mode*, operations fail if they cannot be completed within the timeout specified for the socket (they raise a :exc:`timeout` exception) @@ -2005,10 +2063,10 @@ Example Here are four minimal example programs using the TCP/IP protocol: a server that echoes all data that it receives back (servicing only one client), and a client -using it. Note that a server must perform the sequence :func:`.socket`, +using it. Note that a server must perform the sequence :func:`~socket.socket`, :meth:`~socket.bind`, :meth:`~socket.listen`, :meth:`~socket.accept` (possibly repeating the :meth:`~socket.accept` to service more than one client), while a -client only needs the sequence :func:`.socket`, :meth:`~socket.connect`. Also +client only needs the sequence :func:`~socket.socket`, :meth:`~socket.connect`. Also note that the server does not :meth:`~socket.sendall`/:meth:`~socket.recv` on the socket it is listening on but on the new socket returned by :meth:`~socket.accept`. @@ -2048,7 +2106,7 @@ The next two examples are identical to the above two, but support both IPv4 and IPv6. The server side will listen to the first address family available (it should listen to both instead). On most of IPv6-ready systems, IPv6 will take precedence and the server may not accept IPv4 traffic. The client side will try -to connect to the all addresses returned as a result of the name resolution, and +to connect to all the addresses returned as a result of the name resolution, and sends traffic to the first one connected successfully. :: # Echo server program @@ -2148,7 +2206,7 @@ manager protocol instead, open a socket with:: socket.socket(socket.AF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) After binding (:const:`CAN_RAW`) or connecting (:const:`CAN_BCM`) the socket, you -can use the :meth:`socket.send`, and the :meth:`socket.recv` operations (and +can use the :meth:`socket.send` and :meth:`socket.recv` operations (and their counterparts) on the socket object as usual. This last example might require special privileges:: @@ -2200,7 +2258,7 @@ This is because the previous execution has left the socket in a ``TIME_WAIT`` state, and can't be immediately reused. There is a :mod:`socket` flag to set, in order to prevent this, -:data:`socket.SO_REUSEADDR`:: +:const:`socket.SO_REUSEADDR`:: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) diff --git a/Doc/library/socketserver.rst b/Doc/library/socketserver.rst index 70d56a1dad94f20..5fd213fa613c8d4 100644 --- a/Doc/library/socketserver.rst +++ b/Doc/library/socketserver.rst @@ -96,8 +96,7 @@ synchronous servers of four types:: Note that :class:`UnixDatagramServer` derives from :class:`UDPServer`, not from :class:`UnixStreamServer` --- the only difference between an IP and a Unix -stream server is the address family, which is simply repeated in both Unix -server classes. +server is the address family. .. class:: ForkingMixIn @@ -117,23 +116,28 @@ server classes. :class:`ForkingMixIn` and the Forking classes mentioned below are only available on POSIX platforms that support :func:`~os.fork`. - :meth:`socketserver.ForkingMixIn.server_close` waits until all child - processes complete, except if - :attr:`socketserver.ForkingMixIn.block_on_close` attribute is false. + .. attribute:: block_on_close - :meth:`socketserver.ThreadingMixIn.server_close` waits until all non-daemon - threads complete, except if - :attr:`socketserver.ThreadingMixIn.block_on_close` attribute is false. Use - daemonic threads by setting - :data:`ThreadingMixIn.daemon_threads` to ``True`` to not wait until threads - complete. + :meth:`ForkingMixIn.server_close ` + waits until all child processes complete, except if + :attr:`block_on_close` attribute is ``False``. + + :meth:`ThreadingMixIn.server_close ` + waits until all non-daemon threads complete, except if + :attr:`block_on_close` attribute is ``False``. + + .. attribute:: daemon_threads + + For :class:`ThreadingMixIn` use daemonic threads by setting + :data:`ThreadingMixIn.daemon_threads ` + to ``True`` to not wait until threads complete. .. versionchanged:: 3.7 - :meth:`socketserver.ForkingMixIn.server_close` and - :meth:`socketserver.ThreadingMixIn.server_close` now waits until all + :meth:`ForkingMixIn.server_close ` and + :meth:`ThreadingMixIn.server_close ` now waits until all child processes and non-daemonic threads complete. - Add a new :attr:`socketserver.ForkingMixIn.block_on_close` class + Add a new :attr:`ForkingMixIn.block_on_close ` class attribute to opt-in for the pre-3.7 behaviour. @@ -141,9 +145,16 @@ server classes. ForkingUDPServer ThreadingTCPServer ThreadingUDPServer + ForkingUnixStreamServer + ForkingUnixDatagramServer + ThreadingUnixStreamServer + ThreadingUnixDatagramServer These classes are pre-defined using the mix-in classes. +.. versionadded:: 3.12 + The ``ForkingUnixStreamServer`` and ``ForkingUnixDatagramServer`` classes + were added. To implement a service, you must derive a class from :class:`BaseRequestHandler` and redefine its :meth:`~BaseRequestHandler.handle` method. @@ -177,8 +188,7 @@ expensive or inappropriate for the service) is to maintain an explicit table of partially finished requests and to use :mod:`selectors` to decide which request to work on next (or whether to handle a new incoming request). This is particularly important for stream services where each client can potentially be -connected for a long time (if threads or subprocesses cannot be used). See -:mod:`asyncore` for another way to manage this. +connected for a long time (if threads or subprocesses cannot be used). .. XXX should data and methods be intermingled, or separate? how should the distinction between class and instance variables be drawn? @@ -407,13 +417,13 @@ Request Handler Objects This function must do all the work required to service a request. The default implementation does nothing. Several instance attributes are - available to it; the request is available as :attr:`self.request`; the client - address as :attr:`self.client_address`; and the server instance as - :attr:`self.server`, in case it needs access to per-server information. + available to it; the request is available as :attr:`request`; the client + address as :attr:`client_address`; and the server instance as + :attr:`server`, in case it needs access to per-server information. - The type of :attr:`self.request` is different for datagram or stream - services. For stream services, :attr:`self.request` is a socket object; for - datagram services, :attr:`self.request` is a pair of string and socket. + The type of :attr:`request` is different for datagram or stream + services. For stream services, :attr:`request` is a socket object; for + datagram services, :attr:`request` is a pair of string and socket. .. method:: finish() @@ -423,23 +433,42 @@ Request Handler Objects raises an exception, this function will not be called. + .. attribute:: request + + The *new* :class:`socket.socket` object + to be used to communicate with the client. + + + .. attribute:: client_address + + Client address returned by :meth:`BaseServer.get_request`. + + + .. attribute:: server + + :class:`BaseServer` object used for handling the request. + + .. class:: StreamRequestHandler DatagramRequestHandler These :class:`BaseRequestHandler` subclasses override the :meth:`~BaseRequestHandler.setup` and :meth:`~BaseRequestHandler.finish` - methods, and provide :attr:`self.rfile` and :attr:`self.wfile` attributes. - The :attr:`self.rfile` and :attr:`self.wfile` attributes can be - read or written, respectively, to get the request data or return data - to the client. + methods, and provide :attr:`rfile` and :attr:`wfile` attributes. + + .. attribute:: rfile + + A file object from which receives the request is read. + Support the :class:`io.BufferedIOBase` readable interface. + + .. attribute:: wfile + + A file object to which the reply is written. + Support the :class:`io.BufferedIOBase` writable interface - The :attr:`rfile` attributes of both classes support the - :class:`io.BufferedIOBase` readable interface, and - :attr:`DatagramRequestHandler.wfile` supports the - :class:`io.BufferedIOBase` writable interface. .. versionchanged:: 3.6 - :attr:`StreamRequestHandler.wfile` also supports the + :attr:`wfile` also supports the :class:`io.BufferedIOBase` writable interface. diff --git a/Doc/library/spwd.rst b/Doc/library/spwd.rst deleted file mode 100644 index d1693ea67f0ceb7..000000000000000 --- a/Doc/library/spwd.rst +++ /dev/null @@ -1,82 +0,0 @@ -:mod:`spwd` --- The shadow password database -============================================ - -.. module:: spwd - :platform: Unix - :synopsis: The shadow password database (getspnam() and friends). - :deprecated: - -.. deprecated-removed:: 3.11 3.13 - The :mod:`spwd` module is deprecated - (see :pep:`PEP 594 <594#spwd>` for details and alternatives). - --------------- - -This module provides access to the Unix shadow password database. It is -available on various Unix versions. - -.. include:: ../includes/wasm-notavail.rst - -You must have enough privileges to access the shadow password database (this -usually means you have to be root). - -Shadow password database entries are reported as a tuple-like object, whose -attributes correspond to the members of the ``spwd`` structure (Attribute field -below, see ````): - -+-------+---------------+---------------------------------+ -| Index | Attribute | Meaning | -+=======+===============+=================================+ -| 0 | ``sp_namp`` | Login name | -+-------+---------------+---------------------------------+ -| 1 | ``sp_pwdp`` | Encrypted password | -+-------+---------------+---------------------------------+ -| 2 | ``sp_lstchg`` | Date of last change | -+-------+---------------+---------------------------------+ -| 3 | ``sp_min`` | Minimal number of days between | -| | | changes | -+-------+---------------+---------------------------------+ -| 4 | ``sp_max`` | Maximum number of days between | -| | | changes | -+-------+---------------+---------------------------------+ -| 5 | ``sp_warn`` | Number of days before password | -| | | expires to warn user about it | -+-------+---------------+---------------------------------+ -| 6 | ``sp_inact`` | Number of days after password | -| | | expires until account is | -| | | disabled | -+-------+---------------+---------------------------------+ -| 7 | ``sp_expire`` | Number of days since 1970-01-01 | -| | | when account expires | -+-------+---------------+---------------------------------+ -| 8 | ``sp_flag`` | Reserved | -+-------+---------------+---------------------------------+ - -The sp_namp and sp_pwdp items are strings, all others are integers. -:exc:`KeyError` is raised if the entry asked for cannot be found. - -The following functions are defined: - - -.. function:: getspnam(name) - - Return the shadow password database entry for the given user name. - - .. versionchanged:: 3.6 - Raises a :exc:`PermissionError` instead of :exc:`KeyError` if the user - doesn't have privileges. - -.. function:: getspall() - - Return a list of all available shadow password database entries, in arbitrary - order. - - -.. seealso:: - - Module :mod:`grp` - An interface to the group database, similar to this. - - Module :mod:`pwd` - An interface to the normal password database, similar to this. - diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst index a9bab9add763cc1..6dbb34a84a4c406 100644 --- a/Doc/library/sqlite3.rst +++ b/Doc/library/sqlite3.rst @@ -29,7 +29,7 @@ PostgreSQL or Oracle. The :mod:`!sqlite3` module was written by Gerhard Häring. It provides an SQL interface compliant with the DB-API 2.0 specification described by :pep:`249`, and -requires SQLite 3.7.15 or newer. +requires SQLite 3.15.2 or newer. This document includes four main sections: @@ -72,7 +72,7 @@ including `cursors`_ and `transactions`_. First, we need to create a new database and open a database connection to allow :mod:`!sqlite3` to work with it. -Call :func:`sqlite3.connect` to to create a connection to +Call :func:`sqlite3.connect` to create a connection to the database :file:`tutorial.db` in the current working directory, implicitly creating it if it does not exist: @@ -235,10 +235,11 @@ inserted data and retrieved values from it in multiple ways. * :ref:`sqlite3-howtos` for further reading: - * :ref:`sqlite3-placeholders` - * :ref:`sqlite3-adapters` - * :ref:`sqlite3-converters` - * :ref:`sqlite3-connection-context-manager` + * :ref:`sqlite3-placeholders` + * :ref:`sqlite3-adapters` + * :ref:`sqlite3-converters` + * :ref:`sqlite3-connection-context-manager` + * :ref:`sqlite3-howto-row-factory` * :ref:`sqlite3-explanation` for in-depth background on transaction control. @@ -258,21 +259,23 @@ Module functions .. function:: connect(database, timeout=5.0, detect_types=0, \ isolation_level="DEFERRED", check_same_thread=True, \ factory=sqlite3.Connection, cached_statements=128, \ - uri=False) + uri=False, *, \ + autocommit=sqlite3.LEGACY_TRANSACTION_CONTROL) Open a connection to an SQLite database. :param database: The path to the database file to be opened. - Pass ``":memory:"`` to open a connection to a database that is - in RAM instead of on disk. + You can pass ``":memory:"`` to create an `SQLite database existing only + in memory `_, and open a connection + to it. :type database: :term:`path-like object` :param float timeout: How many seconds the connection should wait before raising - an exception, if the database is locked by another connection. - If another connection opens a transaction to modify the database, - it will be locked until that transaction is committed. + an :exc:`OperationalError` when a table is locked. + If another connection opens a transaction to modify a table, + that table will be locked until the transaction is committed. Default five seconds. :param int detect_types: @@ -290,20 +293,25 @@ Module functions By default (``0``), type detection is disabled. :param isolation_level: - The :attr:`~Connection.isolation_level` of the connection, - controlling whether and how transactions are implicitly opened. + Control legacy transaction handling behaviour. + See :attr:`Connection.isolation_level` and + :ref:`sqlite3-transaction-control-isolation-level` for more information. Can be ``"DEFERRED"`` (default), ``"EXCLUSIVE"`` or ``"IMMEDIATE"``; or ``None`` to disable opening transactions implicitly. - See :ref:`sqlite3-controlling-transactions` for more. + Has no effect unless :attr:`Connection.autocommit` is set to + :const:`~sqlite3.LEGACY_TRANSACTION_CONTROL` (the default). :type isolation_level: str | None :param bool check_same_thread: - If ``True`` (default), only the creating thread may use the connection. - If ``False``, the connection may be shared across multiple threads; - if so, write operations should be serialized by the user to avoid data - corruption. - - :param Connection factory: + If ``True`` (default), :exc:`ProgrammingError` will be raised + if the database connection is used by a thread + other than the one that created it. + If ``False``, the connection may be accessed in multiple threads; + write operations may need to be serialized by the user + to avoid data corruption. + See :attr:`threadsafety` for more information. + + :param ~sqlite3.Connection factory: A custom subclass of :class:`Connection` to create the connection with, if not the default :class:`Connection` class. @@ -321,7 +329,16 @@ Module functions The query string allows passing parameters to SQLite, enabling various :ref:`sqlite3-uri-tricks`. - :rtype: Connection + :param autocommit: + Control :pep:`249` transaction handling behaviour. + See :attr:`Connection.autocommit` and + :ref:`sqlite3-transaction-control-autocommit` for more information. + *autocommit* currently defaults to + :const:`~sqlite3.LEGACY_TRANSACTION_CONTROL`. + The default will change to ``False`` in a future Python release. + :type autocommit: bool + + :rtype: ~sqlite3.Connection .. audit-event:: sqlite3.connect database sqlite3.connect .. audit-event:: sqlite3.connect/handle connection_handle sqlite3.connect @@ -335,6 +352,15 @@ Module functions .. versionadded:: 3.10 The ``sqlite3.connect/handle`` auditing event. + .. versionadded:: 3.12 + The *autocommit* parameter. + + .. versionchanged:: 3.13 + Positional use of the parameters *timeout*, *detect_types*, + *isolation_level*, *check_same_thread*, *factory*, *cached_statements*, + and *uri* is deprecated. + They will become keyword-only parameters in Python 3.15. + .. function:: complete_statement(statement) Return ``True`` if the string *statement* appears to contain @@ -381,6 +407,7 @@ Module functions >>> con = sqlite3.connect(":memory:") >>> def evil_trace(stmt): ... 5/0 + ... >>> con.set_trace_callback(evil_trace) >>> def debug(unraisable): ... print(f"{unraisable.exc_value!r} in callback {unraisable.object.__name__}") @@ -393,15 +420,15 @@ Module functions .. function:: register_adapter(type, adapter, /) - Register an *adapter* callable to adapt the Python type *type* into an - SQLite type. + Register an *adapter* :term:`callable` to adapt the Python type *type* + into an SQLite type. The adapter is called with a Python object of type *type* as its sole argument, and must return a value of a :ref:`type that SQLite natively understands `. .. function:: register_converter(typename, converter, /) - Register the *converter* callable to convert SQLite objects of type + Register the *converter* :term:`callable` to convert SQLite objects of type *typename* into a Python object of a specific type. The converter is invoked for all SQLite values of type *typename*; it is passed a :class:`bytes` object and should return an object of the @@ -418,6 +445,12 @@ Module functions Module constants ^^^^^^^^^^^^^^^^ +.. data:: LEGACY_TRANSACTION_CONTROL + + Set :attr:`~Connection.autocommit` to this constant to select + old style (pre-Python 3.12) transaction control behaviour. + See :ref:`sqlite3-transaction-control-isolation-level` for more information. + .. data:: PARSE_COLNAMES Pass this flag value to the *detect_types* parameter of @@ -458,7 +491,7 @@ Module constants SQLITE_DENY SQLITE_IGNORE - Flags that should be returned by the *authorizer_callback* callable + Flags that should be returned by the *authorizer_callback* :term:`callable` passed to :meth:`Connection.set_authorizer`, to indicate whether: * Access is allowed (:const:`!SQLITE_OK`), @@ -478,10 +511,7 @@ Module constants .. note:: - The :mod:`!sqlite3` module supports both ``qmark`` and ``numeric`` DB-API - parameter styles, because that is what the underlying SQLite library - supports. However, the DB-API does not allow multiple values for - the ``paramstyle`` attribute. + The ``named`` DB-API parameter style is also supported. .. data:: sqlite_version @@ -499,13 +529,13 @@ Module constants the default `threading mode `_ the underlying SQLite library is compiled with. The SQLite threading modes are: - 1. **Single-thread**: In this mode, all mutexes are disabled and SQLite is - unsafe to use in more than a single thread at once. - 2. **Multi-thread**: In this mode, SQLite can be safely used by multiple - threads provided that no single database connection is used - simultaneously in two or more threads. - 3. **Serialized**: In serialized mode, SQLite can be safely used by - multiple threads with no restriction. + 1. **Single-thread**: In this mode, all mutexes are disabled and SQLite is + unsafe to use in more than a single thread at once. + 2. **Multi-thread**: In this mode, SQLite can be safely used by multiple + threads provided that no single database connection is used + simultaneously in two or more threads. + 3. **Serialized**: In serialized mode, SQLite can be safely used by + multiple threads with no restriction. The mappings from SQLite threading modes to DB-API 2.0 threadsafety levels are as follows: @@ -550,6 +580,38 @@ Module constants package, a third-party library which used to upstream changes to :mod:`!sqlite3`. Today, it carries no meaning or practical value. +.. _sqlite3-dbconfig-constants: + +.. data:: SQLITE_DBCONFIG_DEFENSIVE + SQLITE_DBCONFIG_DQS_DDL + SQLITE_DBCONFIG_DQS_DML + SQLITE_DBCONFIG_ENABLE_FKEY + SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER + SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION + SQLITE_DBCONFIG_ENABLE_QPSG + SQLITE_DBCONFIG_ENABLE_TRIGGER + SQLITE_DBCONFIG_ENABLE_VIEW + SQLITE_DBCONFIG_LEGACY_ALTER_TABLE + SQLITE_DBCONFIG_LEGACY_FILE_FORMAT + SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE + SQLITE_DBCONFIG_RESET_DATABASE + SQLITE_DBCONFIG_TRIGGER_EQP + SQLITE_DBCONFIG_TRUSTED_SCHEMA + SQLITE_DBCONFIG_WRITABLE_SCHEMA + + These constants are used for the :meth:`Connection.setconfig` + and :meth:`~Connection.getconfig` methods. + + The availability of these constants varies depending on the version of SQLite + Python was compiled with. + + .. versionadded:: 3.12 + + .. seealso:: + + https://www.sqlite.org/c3ref/c_dbconfig_defensive.html + SQLite docs: Database Connection Configuration Options + .. _sqlite3-connection-objects: @@ -568,14 +630,20 @@ Connection objects * :ref:`sqlite3-connection-shortcuts` * :ref:`sqlite3-connection-context-manager` + + .. versionchanged:: 3.13 + + A :exc:`ResourceWarning` is emitted if :meth:`close` is not called before + a :class:`!Connection` object is deleted. + An SQLite database connection has the following attributes and methods: .. method:: cursor(factory=Cursor) Create and return a :class:`Cursor` object. The cursor method accepts a single optional parameter *factory*. If - supplied, this must be a callable returning an instance of :class:`Cursor` - or its subclasses. + supplied, this must be a :term:`callable` returning + an instance of :class:`Cursor` or its subclasses. .. method:: blobopen(table, column, row, /, *, readonly=False, name="main") @@ -615,18 +683,27 @@ Connection objects .. method:: commit() Commit any pending transaction to the database. - If there is no open transaction, this method is a no-op. + If :attr:`autocommit` is ``True``, or there is no open transaction, + this method does nothing. + If :attr:`!autocommit` is ``False``, a new transaction is implicitly + opened if a pending transaction was committed by this method. .. method:: rollback() Roll back to the start of any pending transaction. - If there is no open transaction, this method is a no-op. + If :attr:`autocommit` is ``True``, or there is no open transaction, + this method does nothing. + If :attr:`!autocommit` is ``False``, a new transaction is implicitly + opened if a pending transaction was rolled back by this method. .. method:: close() Close the database connection. - Any pending transaction is not committed implicitly; - make sure to :meth:`commit` before closing + If :attr:`autocommit` is ``False``, + any pending transaction is implicitly rolled back. + If :attr:`!autocommit` is ``True`` or :data:`LEGACY_TRANSACTION_CONTROL`, + no implicit transaction control is executed. + Make sure to :meth:`commit` before closing to avoid losing pending changes. .. method:: execute(sql, parameters=(), /) @@ -659,7 +736,7 @@ Connection objects If ``-1``, it may take any number of arguments. :param func: - A callable that is called when the SQL function is invoked. + A :term:`callable` that is called when the SQL function is invoked. The callable must return :ref:`a type natively supported by SQLite `. Set to ``None`` to remove an existing SQL function. @@ -670,9 +747,6 @@ Connection objects `deterministic `_, which allows SQLite to perform additional optimizations. - :raises NotSupportedError: - If *deterministic* is used with SQLite versions older than 3.8.3. - .. versionadded:: 3.8 The *deterministic* parameter. @@ -689,8 +763,13 @@ Connection objects ... print(row) ('acbd18db4cc2f85cedef654fccc4a4d8',) + .. versionchanged:: 3.13 + + Passing *name*, *narg*, and *func* as keyword arguments is deprecated. + These parameters will become positional-only in Python 3.15. + - .. method:: create_aggregate(name, /, n_arg, aggregate_class) + .. method:: create_aggregate(name, n_arg, aggregate_class) Create or remove a user-defined SQL aggregate function. @@ -743,6 +822,11 @@ Connection objects 3 + .. versionchanged:: 3.13 + + Passing *name*, *n_arg*, and *aggregate_class* as keyword arguments is deprecated. + These parameters will become positional-only in Python 3.15. + .. method:: create_window_function(name, num_params, aggregate_class, /) @@ -830,7 +914,7 @@ Connection objects [('a', 9), ('b', 12), ('c', 16), ('d', 12), ('e', 9)] - .. method:: create_collation(name, callable) + .. method:: create_collation(name, callable, /) Create a collation named *name* using the collating function *callable*. *callable* is passed two :class:`string ` arguments, @@ -879,14 +963,15 @@ Connection objects Call this method from a different thread to abort any queries that might be executing on the connection. - Aborted queries will raise an exception. + Aborted queries will raise an :exc:`OperationalError`. .. method:: set_authorizer(authorizer_callback) - Register callable *authorizer_callback* to be invoked for each attempt to - access a column of a table in the database. The callback should return - one of :const:`SQLITE_OK`, :const:`SQLITE_DENY`, or :const:`SQLITE_IGNORE` + Register :term:`callable` *authorizer_callback* to be invoked + for each attempt to access a column of a table in the database. + The callback should return one of :const:`SQLITE_OK`, + :const:`SQLITE_DENY`, or :const:`SQLITE_IGNORE` to signal how access to the column should be handled by the underlying SQLite library. @@ -906,10 +991,14 @@ Connection objects .. versionchanged:: 3.11 Added support for disabling the authorizer using ``None``. + .. versionchanged:: 3.13 + Passing *authorizer_callback* as a keyword argument is deprecated. + The parameter will become positional-only in Python 3.15. + .. method:: set_progress_handler(progress_handler, n) - Register callable *progress_handler* to be invoked for every *n* + Register :term:`callable` *progress_handler* to be invoked for every *n* instructions of the SQLite virtual machine. This is useful if you want to get called from SQLite during long-running operations, for example to update a GUI. @@ -921,11 +1010,15 @@ Connection objects currently executing query and cause it to raise a :exc:`DatabaseError` exception. + .. versionchanged:: 3.13 + Passing *progress_handler* as a keyword argument is deprecated. + The parameter will become positional-only in Python 3.15. + .. method:: set_trace_callback(trace_callback) - Register callable *trace_callback* to be invoked for each SQL statement - that is actually executed by the SQLite backend. + Register :term:`callable` *trace_callback* to be invoked + for each SQL statement that is actually executed by the SQLite backend. The only argument passed to the callback is the statement (as :class:`str`) that is being executed. The return value of the callback is @@ -945,6 +1038,10 @@ Connection objects .. versionadded:: 3.3 + .. versionchanged:: 3.13 + Passing *trace_callback* as a keyword argument is deprecated. + The parameter will become positional-only in Python 3.15. + .. method:: enable_load_extension(enabled, /) @@ -1009,12 +1106,25 @@ Connection objects (2, 'broccoli pie', 'broccoli cheese onions flour') (3, 'pumpkin pie', 'pumpkin sugar flour butter') - .. method:: load_extension(path, /) + .. method:: load_extension(path, /, *, entrypoint=None) - Load an SQLite extension from a shared library located at *path*. + Load an SQLite extension from a shared library. Enable extension loading with :meth:`enable_load_extension` before calling this method. + :param str path: + + The path to the SQLite extension. + + :param entrypoint: + + Entry point name. + If ``None`` (the default), + SQLite will come up with an entry point name of its own; + see the SQLite docs `Loading an Extension`_ for details. + + :type entrypoint: str | None + .. audit-event:: sqlite3.load_extension connection,path sqlite3.Connection.load_extension .. versionadded:: 3.2 @@ -1022,6 +1132,11 @@ Connection objects .. versionchanged:: 3.10 Added the ``sqlite3.load_extension`` auditing event. + .. versionadded:: 3.12 + The *entrypoint* parameter. + + .. _Loading an Extension: https://www.sqlite.org/loadext.html#loading_an_extension_ + .. method:: iterdump Return an :term:`iterator` to dump the database as SQL source code. @@ -1039,6 +1154,10 @@ Connection objects f.write('%s\n' % line) con.close() + .. seealso:: + + :ref:`sqlite3-howto-encoding` + .. method:: backup(target, *, pages=-1, progress=None, name="main", sleep=0.250) @@ -1047,7 +1166,7 @@ Connection objects Works even if the database is being accessed by other clients or concurrently by the same connection. - :param Connection target: + :param ~sqlite3.Connection target: The database connection to save the backup to. :param int pages: @@ -1057,8 +1176,8 @@ Connection objects Defaults to ``-1``. :param progress: - If set to a callable, it is invoked with three integer arguments for - every backup iteration: + If set to a :term:`callable`, + it is invoked with three integer arguments for every backup iteration: the *status* of the last iteration, the *remaining* number of pages still to be copied, and the *total* number of pages. @@ -1105,6 +1224,10 @@ Connection objects .. versionadded:: 3.7 + .. seealso:: + + :ref:`sqlite3-howto-encoding` + .. method:: getlimit(category, /) Get a connection runtime limit. @@ -1169,6 +1292,30 @@ Connection objects .. _SQLite limit category: https://www.sqlite.org/c3ref/c_limit_attached.html + .. method:: getconfig(op, /) + + Query a boolean connection configuration option. + + :param int op: + A :ref:`SQLITE_DBCONFIG code `. + + :rtype: bool + + .. versionadded:: 3.12 + + .. method:: setconfig(op, enable=True, /) + + Set a boolean connection configuration option. + + :param int op: + A :ref:`SQLITE_DBCONFIG code `. + + :param bool enable: + ``True`` if the configuration option should be enabled (default); + ``False`` if it should be disabled. + + .. versionadded:: 3.12 + .. method:: serialize(*, name="main") Serialize a database into a :class:`bytes` object. For an @@ -1223,6 +1370,38 @@ Connection objects .. versionadded:: 3.11 + .. attribute:: autocommit + + This attribute controls :pep:`249`-compliant transaction behaviour. + :attr:`!autocommit` has three allowed values: + + * ``False``: Select :pep:`249`-compliant transaction behaviour, + implying that :mod:`!sqlite3` ensures a transaction is always open. + Use :meth:`commit` and :meth:`rollback` to close transactions. + + This is the recommended value of :attr:`!autocommit`. + + * ``True``: Use SQLite's `autocommit mode`_. + :meth:`commit` and :meth:`rollback` have no effect in this mode. + + * :data:`LEGACY_TRANSACTION_CONTROL`: + Pre-Python 3.12 (non-:pep:`249`-compliant) transaction control. + See :attr:`isolation_level` for more details. + + This is currently the default value of :attr:`!autocommit`. + + Changing :attr:`!autocommit` to ``False`` will open a new transaction, + and changing it to ``True`` will commit any pending transaction. + + See :ref:`sqlite3-transaction-control-autocommit` for more details. + + .. note:: + + The :attr:`isolation_level` attribute has no effect unless + :attr:`autocommit` is :data:`LEGACY_TRANSACTION_CONTROL`. + + .. versionadded:: 3.12 + .. attribute:: in_transaction This read-only attribute corresponds to the low-level SQLite @@ -1235,84 +1414,43 @@ Connection objects .. attribute:: isolation_level - This attribute controls the :ref:`transaction handling - ` performed by :mod:`!sqlite3`. + Controls the :ref:`legacy transaction handling mode + ` of :mod:`!sqlite3`. If set to ``None``, transactions are never implicitly opened. If set to one of ``"DEFERRED"``, ``"IMMEDIATE"``, or ``"EXCLUSIVE"``, corresponding to the underlying `SQLite transaction behaviour`_, - implicit :ref:`transaction management - ` is performed. + :ref:`implicit transaction management + ` is performed. If not overridden by the *isolation_level* parameter of :func:`connect`, the default is ``""``, which is an alias for ``"DEFERRED"``. - .. attribute:: row_factory - - A callable that accepts two arguments, - a :class:`Cursor` object and the raw row results as a :class:`tuple`, - and returns a custom object representing an SQLite row. - - Example: + .. note:: - .. doctest:: + Using :attr:`autocommit` to control transaction handling is + recommended over using :attr:`!isolation_level`. + :attr:`!isolation_level` has no effect unless :attr:`autocommit` is + set to :data:`LEGACY_TRANSACTION_CONTROL` (the default). - >>> def dict_factory(cursor, row): - ... col_names = [col[0] for col in cursor.description] - ... return {key: value for key, value in zip(col_names, row)} - >>> con = sqlite3.connect(":memory:") - >>> con.row_factory = dict_factory - >>> for row in con.execute("SELECT 1 AS a, 2 AS b"): - ... print(row) - {'a': 1, 'b': 2} + .. attribute:: row_factory - If returning a tuple doesn't suffice and you want name-based access to - columns, you should consider setting :attr:`row_factory` to the - highly optimized :class:`sqlite3.Row` type. :class:`Row` provides both - index-based and case-insensitive name-based access to columns with almost no - memory overhead. It will probably be better than your own custom - dictionary-based approach or even a db_row based solution. + The initial :attr:`~Cursor.row_factory` + for :class:`Cursor` objects created from this connection. + Assigning to this attribute does not affect the :attr:`!row_factory` + of existing cursors belonging to this connection, only new ones. + Is ``None`` by default, + meaning each row is returned as a :class:`tuple`. - .. XXX what's a db_row-based solution? + See :ref:`sqlite3-howto-row-factory` for more details. .. attribute:: text_factory - A callable that accepts a :class:`bytes` parameter and returns a text - representation of it. + A :term:`callable` that accepts a :class:`bytes` parameter + and returns a text representation of it. The callable is invoked for SQLite values with the ``TEXT`` data type. By default, this attribute is set to :class:`str`. - If you want to return ``bytes`` instead, set *text_factory* to ``bytes``. - - Example: - .. testcode:: - - con = sqlite3.connect(":memory:") - cur = con.cursor() - - AUSTRIA = "Österreich" - - # by default, rows are returned as str - cur.execute("SELECT ?", (AUSTRIA,)) - row = cur.fetchone() - assert row[0] == AUSTRIA - - # but we can make sqlite3 always return bytestrings ... - con.text_factory = bytes - cur.execute("SELECT ?", (AUSTRIA,)) - row = cur.fetchone() - assert type(row[0]) is bytes - # the bytestrings will be encoded in UTF-8, unless you stored garbage in the - # database ... - assert row[0] == AUSTRIA.encode("utf-8") - - # we can also implement a custom text_factory ... - # here we implement one that appends "foo" to all strings - con.text_factory = lambda x: x.decode("utf-8") + "foo" - cur.execute("SELECT ?", ("bar",)) - row = cur.fetchone() - assert row[0] == "barfoo" - - con.close() + See :ref:`sqlite3-howto-encoding` for more details. .. attribute:: total_changes @@ -1364,30 +1502,61 @@ Cursor objects .. method:: execute(sql, parameters=(), /) - Execute SQL statement *sql*. - Bind values to the statement using :ref:`placeholders - ` that map to the :term:`sequence` or :class:`dict` - *parameters*. + Execute a single SQL statement, + optionally binding Python values using + :ref:`placeholders `. + + :param str sql: + A single SQL statement. - :meth:`execute` will only execute a single SQL statement. If you try to execute - more than one statement with it, it will raise a :exc:`ProgrammingError`. Use - :meth:`executescript` if you want to execute multiple SQL statements with one - call. + :param parameters: + Python values to bind to placeholders in *sql*. + A :class:`!dict` if named placeholders are used. + A :term:`!sequence` if unnamed placeholders are used. + See :ref:`sqlite3-placeholders`. + :type parameters: :class:`dict` | :term:`sequence` - If :attr:`~Connection.isolation_level` is not ``None``, + :raises ProgrammingError: + If *sql* contains more than one SQL statement. + + If :attr:`~Connection.autocommit` is + :data:`LEGACY_TRANSACTION_CONTROL`, + :attr:`~Connection.isolation_level` is not ``None``, *sql* is an ``INSERT``, ``UPDATE``, ``DELETE``, or ``REPLACE`` statement, and there is no open transaction, a transaction is implicitly opened before executing *sql*. + .. deprecated-removed:: 3.12 3.14 + + :exc:`DeprecationWarning` is emitted if + :ref:`named placeholders ` are used + and *parameters* is a sequence instead of a :class:`dict`. + Starting with Python 3.14, :exc:`ProgrammingError` will + be raised instead. + + Use :meth:`executescript` to execute multiple SQL statements. .. method:: executemany(sql, parameters, /) - Execute :ref:`parameterized ` SQL statement *sql* - against all parameter sequences or mappings found in the sequence - *parameters*. It is also possible to use an - :term:`iterator` yielding parameters instead of a sequence. + For every item in *parameters*, + repeatedly execute the :ref:`parameterized ` + :abbr:`DML (Data Manipulation Language)` SQL statement *sql*. + Uses the same implicit transaction handling as :meth:`~Cursor.execute`. + :param str sql: + A single SQL DML statement. + + :param parameters: + An :term:`!iterable` of parameters to bind with + the placeholders in *sql*. + See :ref:`sqlite3-placeholders`. + :type parameters: :term:`iterable` + + :raises ProgrammingError: + If *sql* contains more than one SQL statement, + or is not a DML statement. + Example: .. testcode:: sqlite3.cursor @@ -1399,10 +1568,28 @@ Cursor objects # cur is an sqlite3.Cursor object cur.executemany("INSERT INTO data VALUES(?)", rows) + .. note:: + + Any resulting rows are discarded, + including DML statements with `RETURNING clauses`_. + + .. _RETURNING clauses: https://www.sqlite.org/lang_returning.html + + .. deprecated-removed:: 3.12 3.14 + + :exc:`DeprecationWarning` is emitted if + :ref:`named placeholders ` are used + and the items in *parameters* are sequences + instead of :class:`dict`\s. + Starting with Python 3.14, :exc:`ProgrammingError` will + be raised instead. + .. method:: executescript(sql_script, /) Execute the SQL statements in *sql_script*. - If there is a pending transaction, + If the :attr:`~Connection.autocommit` is + :data:`LEGACY_TRANSACTION_CONTROL` + and there is a pending transaction, an implicit ``COMMIT`` statement is executed first. No other implicit transaction control is performed; any transaction control must be added to *sql_script*. @@ -1422,10 +1609,9 @@ Cursor objects COMMIT; """) - .. method:: fetchone() - If :attr:`~Connection.row_factory` is ``None``, + If :attr:`~Cursor.row_factory` is ``None``, return the next row query result set as a :class:`tuple`. Else, pass it to the row factory and return its result. Return ``None`` if no more data is available. @@ -1517,7 +1703,26 @@ Cursor objects ``INSERT``, ``UPDATE``, ``DELETE``, and ``REPLACE`` statements; is ``-1`` for other statements, including :abbr:`CTE (Common Table Expression)` queries. - It is only updated by the :meth:`execute` and :meth:`executemany` methods. + It is only updated by the :meth:`execute` and :meth:`executemany` methods, + after the statement has run to completion. + This means that any resulting rows must be fetched in order for + :attr:`!rowcount` to be updated. + + .. attribute:: row_factory + + Control how a row fetched from this :class:`!Cursor` is represented. + If ``None``, a row is represented as a :class:`tuple`. + Can be set to the included :class:`sqlite3.Row`; + or a :term:`callable` that accepts two arguments, + a :class:`Cursor` object and the :class:`!tuple` of row values, + and returns a custom object representing an SQLite row. + + Defaults to what :attr:`Connection.row_factory` was set to + when the :class:`!Cursor` was created. + Assigning to this attribute does not affect + :attr:`Connection.row_factory` of the parent connection. + + See :ref:`sqlite3-howto-row-factory` for more details. .. The sqlite3.Row example used to be a how-to. It has now been incorporated @@ -1537,7 +1742,10 @@ Row objects It supports iteration, equality testing, :func:`len`, and :term:`mapping` access by column name and index. - Two row objects compare equal if have equal columns and equal members. + Two :class:`!Row` objects compare equal + if they have identical column names and values. + + See :ref:`sqlite3-howto-row-factory` for more details. .. method:: keys @@ -1548,21 +1756,6 @@ Row objects .. versionchanged:: 3.5 Added support of slicing. - Example: - - .. doctest:: - - >>> con = sqlite3.connect(":memory:") - >>> con.row_factory = sqlite3.Row - >>> res = con.execute("SELECT 'Earth' AS name, 6378 AS radius") - >>> row = res.fetchone() - >>> row.keys() - ['name', 'radius'] - >>> row[0], row["name"] # Access by index and name. - ('Earth', 'Earth') - >>> row["RADIUS"] # Column names are case-insensitive. - 6378 - .. _sqlite3-blob-objects: @@ -1635,9 +1828,9 @@ Blob objects .. method:: seek(offset, origin=os.SEEK_SET, /) Set the current access position of the blob to *offset*. The *origin* - argument defaults to :data:`os.SEEK_SET` (absolute blob positioning). - Other values for *origin* are :data:`os.SEEK_CUR` (seek relative to the - current position) and :data:`os.SEEK_END` (seek relative to the blob’s + argument defaults to :const:`os.SEEK_SET` (absolute blob positioning). + Other values for *origin* are :const:`os.SEEK_CUR` (seek relative to the + current position) and :const:`os.SEEK_END` (seek relative to the blob’s end). @@ -1833,8 +2026,13 @@ The deprecated default adapters and converters consist of: Command-line interface ^^^^^^^^^^^^^^^^^^^^^^ -The :mod:`!sqlite3` module can be invoked as a script +The :mod:`!sqlite3` module can be invoked as a script, +using the interpreter's :option:`-m` switch, in order to provide a simple SQLite shell. +The argument signature is as follows:: + + python -m sqlite3 [-h] [-v] [filename] [sql] + Type ``.quit`` or CTRL-D to exit the shell. .. program:: python -m sqlite3 [-h] [-v] [filename] [sql] @@ -1862,44 +2060,50 @@ How to use placeholders to bind values in SQL queries SQL operations usually need to use values from Python variables. However, beware of using Python's string operations to assemble queries, as they -are vulnerable to `SQL injection attacks`_ (see the `xkcd webcomic -`_ for a humorous example of what can go wrong):: +are vulnerable to `SQL injection attacks`_. For example, an attacker can simply +close the single quote and inject ``OR TRUE`` to select all rows:: - # Never do this -- insecure! - symbol = 'RHAT' - cur.execute("SELECT * FROM stocks WHERE symbol = '%s'" % symbol) + >>> # Never do this -- insecure! + >>> symbol = input() + ' OR TRUE; -- + >>> sql = "SELECT * FROM stocks WHERE symbol = '%s'" % symbol + >>> print(sql) + SELECT * FROM stocks WHERE symbol = '' OR TRUE; --' + >>> cur.execute(sql) Instead, use the DB-API's parameter substitution. To insert a variable into a query string, use a placeholder in the string, and substitute the actual values into the query by providing them as a :class:`tuple` of values to the second -argument of the cursor's :meth:`~Cursor.execute` method. An SQL statement may -use one of two kinds of placeholders: question marks (qmark style) or named -placeholders (named style). For the qmark style, ``parameters`` must be a -:term:`sequence `. For the named style, it can be either a -:term:`sequence ` or :class:`dict` instance. The length of the -:term:`sequence ` must match the number of placeholders, or a -:exc:`ProgrammingError` is raised. If a :class:`dict` is given, it must contain -keys for all named parameters. Any extra items are ignored. Here's an example of -both styles: +argument of the cursor's :meth:`~Cursor.execute` method. + +An SQL statement may use one of two kinds of placeholders: +question marks (qmark style) or named placeholders (named style). +For the qmark style, *parameters* must be a +:term:`sequence` whose length must match the number of placeholders, +or a :exc:`ProgrammingError` is raised. +For the named style, *parameters* must be +an instance of a :class:`dict` (or a subclass), +which must contain keys for all named parameters; +any extra items are ignored. +Here's an example of both styles: .. testcode:: con = sqlite3.connect(":memory:") cur = con.execute("CREATE TABLE lang(name, first_appeared)") - # This is the qmark style: - cur.execute("INSERT INTO lang VALUES(?, ?)", ("C", 1972)) - - # The qmark style used with executemany(): - lang_list = [ - ("Fortran", 1957), - ("Python", 1991), - ("Go", 2009), - ] - cur.executemany("INSERT INTO lang VALUES(?, ?)", lang_list) - - # And this is the named style: - cur.execute("SELECT * FROM lang WHERE first_appeared = :year", {"year": 1972}) + # This is the named style used with executemany(): + data = ( + {"name": "C", "year": 1972}, + {"name": "Fortran", "year": 1957}, + {"name": "Python", "year": 1991}, + {"name": "Go", "year": 2009}, + ) + cur.executemany("INSERT INTO lang VALUES(:name, :year)", data) + + # This is the qmark style used in a SELECT query: + params = (1972,) + cur.execute("SELECT * FROM lang WHERE first_appeared = ?", params) print(cur.fetchall()) .. testoutput:: @@ -1907,6 +2111,11 @@ both styles: [('C', 1972)] +.. note:: + + :pep:`249` numeric placeholders are *not* supported. + If used, they will be interpreted as named placeholders. + .. _sqlite3-adapters: @@ -2040,7 +2249,7 @@ The following example illustrates the implicit and explicit approaches: return f"Point({self.x}, {self.y})" def adapt_point(point): - return f"{point.x};{point.y}".encode("utf-8") + return f"{point.x};{point.y}" def convert_point(s): x, y = list(map(float, s.split(b";"))) @@ -2106,20 +2315,39 @@ This section shows recipes for common adapters and converters. def convert_date(val): """Convert ISO 8601 date to datetime.date object.""" - return datetime.date.fromisoformat(val) + return datetime.date.fromisoformat(val.decode()) def convert_datetime(val): """Convert ISO 8601 datetime to datetime.datetime object.""" - return datetime.datetime.fromisoformat(val) + return datetime.datetime.fromisoformat(val.decode()) def convert_timestamp(val): """Convert Unix epoch timestamp to datetime.datetime object.""" - return datetime.datetime.fromtimestamp(val) + return datetime.datetime.fromtimestamp(int(val)) sqlite3.register_converter("date", convert_date) sqlite3.register_converter("datetime", convert_datetime) sqlite3.register_converter("timestamp", convert_timestamp) +.. testcode:: + :hide: + + dt = datetime.datetime(2019, 5, 18, 15, 17, 8, 123456) + + assert adapt_date_iso(dt.date()) == "2019-05-18" + assert convert_date(b"2019-05-18") == dt.date() + + assert adapt_datetime_iso(dt) == "2019-05-18T15:17:08.123456" + assert convert_datetime(b"2019-05-18T15:17:08.123456") == dt + + # Using current time as fromtimestamp() returns local date/time. + # Dropping microseconds as adapt_datetime_epoch truncates fractional second part. + now = datetime.datetime.now().replace(microsecond=0) + current_timestamp = int(now.timestamp()) + + assert adapt_datetime_epoch(now) == current_timestamp + assert convert_timestamp(str(current_timestamp).encode()) == now + .. _sqlite3-connection-shortcuts: @@ -2177,14 +2405,17 @@ the transaction is committed. If this commit fails, or if the body of the ``with`` statement raises an uncaught exception, the transaction is rolled back. +If :attr:`~Connection.autocommit` is ``False``, +a new transaction is implicitly opened after committing or rolling back. If there is no open transaction upon leaving the body of the ``with`` statement, -the context manager is a no-op. +or if :attr:`~Connection.autocommit` is ``True``, +the context manager does nothing. .. note:: - The context manager neither implicitly opens a new transaction - nor closes the connection. + nor closes the connection. If you need a closing context manager, consider + using :meth:`contextlib.closing`. .. testcode:: @@ -2259,18 +2490,215 @@ can be found in the `SQLite URI documentation`_. .. _SQLite URI documentation: https://www.sqlite.org/uri.html +.. _sqlite3-howto-row-factory: + +How to create and use row factories +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, :mod:`!sqlite3` represents each row as a :class:`tuple`. +If a :class:`!tuple` does not suit your needs, +you can use the :class:`sqlite3.Row` class +or a custom :attr:`~Cursor.row_factory`. + +While :attr:`!row_factory` exists as an attribute both on the +:class:`Cursor` and the :class:`Connection`, +it is recommended to set :class:`Connection.row_factory`, +so all cursors created from the connection will use the same row factory. + +:class:`!Row` provides indexed and case-insensitive named access to columns, +with minimal memory overhead and performance impact over a :class:`!tuple`. +To use :class:`!Row` as a row factory, +assign it to the :attr:`!row_factory` attribute: + +.. doctest:: + + >>> con = sqlite3.connect(":memory:") + >>> con.row_factory = sqlite3.Row + +Queries now return :class:`!Row` objects: + +.. doctest:: + + >>> res = con.execute("SELECT 'Earth' AS name, 6378 AS radius") + >>> row = res.fetchone() + >>> row.keys() + ['name', 'radius'] + >>> row[0] # Access by index. + 'Earth' + >>> row["name"] # Access by name. + 'Earth' + >>> row["RADIUS"] # Column names are case-insensitive. + 6378 + +.. note:: + + The ``FROM`` clause can be omitted in the ``SELECT`` statement, as in the + above example. In such cases, SQLite returns a single row with columns + defined by expressions, e.g. literals, with the given aliases + ``expr AS alias``. + +You can create a custom :attr:`~Cursor.row_factory` +that returns each row as a :class:`dict`, with column names mapped to values: + +.. testcode:: + + def dict_factory(cursor, row): + fields = [column[0] for column in cursor.description] + return {key: value for key, value in zip(fields, row)} + +Using it, queries now return a :class:`!dict` instead of a :class:`!tuple`: + +.. doctest:: + + >>> con = sqlite3.connect(":memory:") + >>> con.row_factory = dict_factory + >>> for row in con.execute("SELECT 1 AS a, 2 AS b"): + ... print(row) + {'a': 1, 'b': 2} + +The following row factory returns a :term:`named tuple`: + +.. testcode:: + + from collections import namedtuple + + def namedtuple_factory(cursor, row): + fields = [column[0] for column in cursor.description] + cls = namedtuple("Row", fields) + return cls._make(row) + +:func:`!namedtuple_factory` can be used as follows: + +.. doctest:: + + >>> con = sqlite3.connect(":memory:") + >>> con.row_factory = namedtuple_factory + >>> cur = con.execute("SELECT 1 AS a, 2 AS b") + >>> row = cur.fetchone() + >>> row + Row(a=1, b=2) + >>> row[0] # Indexed access. + 1 + >>> row.b # Attribute access. + 2 + +With some adjustments, the above recipe can be adapted to use a +:class:`~dataclasses.dataclass`, or any other custom class, +instead of a :class:`~collections.namedtuple`. + + +.. _sqlite3-howto-encoding: + +How to handle non-UTF-8 text encodings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, :mod:`!sqlite3` uses :class:`str` to adapt SQLite values +with the ``TEXT`` data type. +This works well for UTF-8 encoded text, but it might fail for other encodings +and invalid UTF-8. +You can use a custom :attr:`~Connection.text_factory` to handle such cases. + +Because of SQLite's `flexible typing`_, it is not uncommon to encounter table +columns with the ``TEXT`` data type containing non-UTF-8 encodings, +or even arbitrary data. +To demonstrate, let's assume we have a database with ISO-8859-2 (Latin-2) +encoded text, for example a table of Czech-English dictionary entries. +Assuming we now have a :class:`Connection` instance :py:data:`!con` +connected to this database, +we can decode the Latin-2 encoded text using this :attr:`~Connection.text_factory`: + +.. testcode:: + + con.text_factory = lambda data: str(data, encoding="latin2") + +For invalid UTF-8 or arbitrary data in stored in ``TEXT`` table columns, +you can use the following technique, borrowed from the :ref:`unicode-howto`: + +.. testcode:: + + con.text_factory = lambda data: str(data, errors="surrogateescape") + +.. note:: + + The :mod:`!sqlite3` module API does not support strings + containing surrogates. + +.. seealso:: + + :ref:`unicode-howto` + + .. _sqlite3-explanation: Explanation ----------- +.. _sqlite3-transaction-control: .. _sqlite3-controlling-transactions: Transaction control ^^^^^^^^^^^^^^^^^^^ -The :mod:`!sqlite3` module does not adhere to the transaction handling recommended -by :pep:`249`. +:mod:`!sqlite3` offers multiple methods of controlling whether, +when and how database transactions are opened and closed. +:ref:`sqlite3-transaction-control-autocommit` is recommended, +while :ref:`sqlite3-transaction-control-isolation-level` +retains the pre-Python 3.12 behaviour. + +.. _sqlite3-transaction-control-autocommit: + +Transaction control via the ``autocommit`` attribute +"""""""""""""""""""""""""""""""""""""""""""""""""""" + +The recommended way of controlling transaction behaviour is through +the :attr:`Connection.autocommit` attribute, +which should preferably be set using the *autocommit* parameter +of :func:`connect`. + +It is suggested to set *autocommit* to ``False``, +which implies :pep:`249`-compliant transaction control. +This means: + +* :mod:`!sqlite3` ensures that a transaction is always open, + so :func:`connect`, :meth:`Connection.commit`, and :meth:`Connection.rollback` + will implicitly open a new transaction + (immediately after closing the pending one, for the latter two). + :mod:`!sqlite3` uses ``BEGIN DEFERRED`` statements when opening transactions. +* Transactions should be committed explicitly using :meth:`!commit`. +* Transactions should be rolled back explicitly using :meth:`!rollback`. +* An implicit rollback is performed if the database is + :meth:`~Connection.close`-ed with pending changes. + +Set *autocommit* to ``True`` to enable SQLite's `autocommit mode`_. +In this mode, :meth:`Connection.commit` and :meth:`Connection.rollback` +have no effect. +Note that SQLite's autocommit mode is distinct from +the :pep:`249`-compliant :attr:`Connection.autocommit` attribute; +use :attr:`Connection.in_transaction` to query +the low-level SQLite autocommit mode. + +Set *autocommit* to :data:`LEGACY_TRANSACTION_CONTROL` +to leave transaction control behaviour to the +:attr:`Connection.isolation_level` attribute. +See :ref:`sqlite3-transaction-control-isolation-level` for more information. + + +.. _sqlite3-transaction-control-isolation-level: + +Transaction control via the ``isolation_level`` attribute +""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +.. note:: + + The recommended way of controlling transactions is via the + :attr:`~Connection.autocommit` attribute. + See :ref:`sqlite3-transaction-control-autocommit`. + +If :attr:`Connection.autocommit` is set to +:data:`LEGACY_TRANSACTION_CONTROL` (the default), +transaction behaviour is controlled using +the :attr:`Connection.isolation_level` attribute. +Otherwise, :attr:`!isolation_level` has no effect. If the connection attribute :attr:`~Connection.isolation_level` is not ``None``, @@ -2301,6 +2729,10 @@ regardless of the value of :attr:`~Connection.isolation_level`. :mod:`!sqlite3` used to implicitly commit an open transaction before DDL statements. This is no longer the case. +.. versionchanged:: 3.12 + The recommended way of controlling transactions is now via the + :attr:`~Connection.autocommit` attribute. + .. _autocommit mode: https://www.sqlite.org/lang_transaction.html#implicit_versus_explicit_transactions diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst index 4e6d06dc38d80c6..0db233e2dde33c0 100644 --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -43,8 +43,10 @@ This module provides a class, :class:`ssl.SSLSocket`, which is derived from the :class:`socket.socket` type, and provides a socket-like wrapper that also encrypts and decrypts the data going over the socket with SSL. It supports additional methods such as :meth:`getpeercert`, which retrieves the -certificate of the other side of the connection, and :meth:`cipher`, which -retrieves the cipher being used for the secure connection. +certificate of the other side of the connection, :meth:`cipher`, which +retrieves the cipher being used for the secure connection or +:meth:`get_verified_chain`, :meth:`get_unverified_chain` which retrieves +certificate chain. For more sophisticated applications, the :class:`ssl.SSLContext` class helps manage settings and certificates, which can then be inherited @@ -139,7 +141,7 @@ purposes. The settings are: :data:`PROTOCOL_TLS_CLIENT` or :data:`PROTOCOL_TLS_SERVER`, :data:`OP_NO_SSLv2`, and :data:`OP_NO_SSLv3` with high encryption cipher suites without RC4 and - without unauthenticated cipher suites. Passing :data:`~Purpose.SERVER_AUTH` + without unauthenticated cipher suites. Passing :const:`~Purpose.SERVER_AUTH` as *purpose* sets :data:`~SSLContext.verify_mode` to :data:`CERT_REQUIRED` and either loads CA certificates (when at least one of *cafile*, *capath* or *cadata* is given) or uses :meth:`SSLContext.load_default_certs` to load @@ -320,7 +322,7 @@ Random generation Mix the given *bytes* into the SSL pseudo-random number generator. The parameter *entropy* (a float) is a lower bound on the entropy contained in - string (so you can always use :const:`0.0`). See :rfc:`1750` for more + string (so you can always use ``0.0``). See :rfc:`1750` for more information on sources of entropy. .. versionchanged:: 3.5 @@ -807,6 +809,29 @@ Constants .. versionadded:: 3.10 +.. data:: OP_ENABLE_KTLS + + Enable the use of the kernel TLS. To benefit from the feature, OpenSSL must + have been compiled with support for it, and the negotiated cipher suites and + extensions must be supported by it (a list of supported ones may vary by + platform and kernel version). + + Note that with enabled kernel TLS some cryptographic operations are + performed by the kernel directly and not via any available OpenSSL + Providers. This might be undesirable if, for example, the application + requires all cryptographic operations to be performed by the FIPS provider. + + This option is only available with OpenSSL 3.0.0 and later. + + .. versionadded:: 3.12 + +.. data:: OP_LEGACY_SERVER_CONNECT + + Allow legacy insecure renegotiation between OpenSSL and unpatched servers + only. + + .. versionadded:: 3.12 + .. data:: HAS_ALPN Whether the OpenSSL library has built-in support for the *Application-Layer @@ -883,6 +908,12 @@ Constants .. versionadded:: 3.7 +.. data:: HAS_PSK + + Whether the OpenSSL library has built-in support for TLS-PSK. + + .. versionadded:: 3.13 + .. data:: CHANNEL_BINDING_TYPES List of supported TLS channel binding types. Strings in this list @@ -1031,7 +1062,7 @@ SSL Sockets .. versionchanged:: 3.5 The :meth:`shutdown` does not reset the socket timeout each time bytes - are received or sent. The socket timeout is now to maximum total duration + are received or sent. The socket timeout is now the maximum total duration of the shutdown. .. deprecated:: 3.6 @@ -1064,8 +1095,8 @@ SSL sockets also have the following additional methods and attributes: cause write operations. .. versionchanged:: 3.5 - The socket timeout is no more reset each time bytes are received or sent. - The socket timeout is now to maximum total duration to read up to *len* + The socket timeout is no longer reset each time bytes are received or sent. + The socket timeout is now the maximum total duration to read up to *len* bytes. .. deprecated:: 3.6 @@ -1083,8 +1114,8 @@ SSL sockets also have the following additional methods and attributes: also cause read operations. .. versionchanged:: 3.5 - The socket timeout is no more reset each time bytes are received or sent. - The socket timeout is now to maximum total duration to write *buf*. + The socket timeout is no longer reset each time bytes are received or sent. + The socket timeout is now the maximum total duration to write *buf*. .. deprecated:: 3.6 Use :meth:`~SSLSocket.send` instead of :meth:`~SSLSocket.write`. @@ -1111,14 +1142,14 @@ SSL sockets also have the following additional methods and attributes: :attr:`~SSLSocket.context` is true. .. versionchanged:: 3.5 - The socket timeout is no more reset each time bytes are received or sent. - The socket timeout is now to maximum total duration of the handshake. + The socket timeout is no longer reset each time bytes are received or sent. + The socket timeout is now the maximum total duration of the handshake. .. versionchanged:: 3.7 Hostname or IP address is matched by OpenSSL during handshake. The function :func:`match_hostname` is no longer used. In case OpenSSL refuses a hostname or IP address, the handshake is aborted early and - a TLS alert message is send to the peer. + a TLS alert message is sent to the peer. .. method:: SSLSocket.getpeercert(binary_form=False) @@ -1187,6 +1218,22 @@ SSL sockets also have the following additional methods and attributes: .. versionchanged:: 3.9 IPv6 address strings no longer have a trailing new line. +.. method:: SSLSocket.get_verified_chain() + + Returns verified certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + If certificate verification was disabled method acts the same as + :meth:`~SSLSocket.get_unverified_chain`. + + .. versionadded:: 3.13 + +.. method:: SSLSocket.get_unverified_chain() + + Returns raw certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + + .. versionadded:: 3.13 + .. method:: SSLSocket.cipher() Returns a three-value tuple containing the name of the cipher being used, the @@ -1195,7 +1242,7 @@ SSL sockets also have the following additional methods and attributes: .. method:: SSLSocket.shared_ciphers() - Return the list of ciphers shared by the client during the handshake. Each + Return the list of ciphers available in both the client and server. Each entry of the returned list is a three-value tuple containing the name of the cipher, the version of the SSL protocol that defines its use, and the number of secret bits the cipher uses. :meth:`~SSLSocket.shared_ciphers` returns @@ -1357,18 +1404,18 @@ to speed up repeated connections from the same clients. Here's a table showing which versions in a client (down the side) can connect to which versions in a server (along the top): - .. table:: + .. table:: - ======================== ============ ============ ============= ========= =========== =========== - *client* / **server** **SSLv2** **SSLv3** **TLS** [3]_ **TLSv1** **TLSv1.1** **TLSv1.2** - ------------------------ ------------ ------------ ------------- --------- ----------- ----------- - *SSLv2* yes no no [1]_ no no no - *SSLv3* no yes no [2]_ no no no - *TLS* (*SSLv23*) [3]_ no [1]_ no [2]_ yes yes yes yes - *TLSv1* no no yes yes no no - *TLSv1.1* no no yes no yes no - *TLSv1.2* no no yes no no yes - ======================== ============ ============ ============= ========= =========== =========== + ======================== ============ ============ ============= ========= =========== =========== + *client* / **server** **SSLv2** **SSLv3** **TLS** [3]_ **TLSv1** **TLSv1.1** **TLSv1.2** + ------------------------ ------------ ------------ ------------- --------- ----------- ----------- + *SSLv2* yes no no [1]_ no no no + *SSLv3* no yes no [2]_ no no no + *TLS* (*SSLv23*) [3]_ no [1]_ no [2]_ yes yes yes yes + *TLSv1* no no yes yes no no + *TLSv1.1* no no yes no yes no + *TLSv1.2* no no yes no no yes + ======================== ============ ============ ============= ========= =========== =========== .. rubric:: Footnotes .. [1] :class:`SSLContext` disables SSLv2 with :data:`OP_NO_SSLv2` by default. @@ -1461,9 +1508,9 @@ to speed up repeated connections from the same clients. load CA certificates from other locations, too. The *purpose* flag specifies what kind of CA certificates are loaded. The - default settings :data:`Purpose.SERVER_AUTH` loads certificates, that are + default settings :const:`Purpose.SERVER_AUTH` loads certificates, that are flagged and trusted for TLS web server authentication (client side - sockets). :data:`Purpose.CLIENT_AUTH` loads CA certificates for client + sockets). :const:`Purpose.CLIENT_AUTH` loads CA certificates for client certificate verification on the server side. .. versionadded:: 3.4 @@ -1633,8 +1680,9 @@ to speed up repeated connections from the same clients. Due to the early negotiation phase of the TLS connection, only limited methods and attributes are usable like :meth:`SSLSocket.selected_alpn_protocol` and :attr:`SSLSocket.context`. - The :meth:`SSLSocket.getpeercert`, - :meth:`SSLSocket.cipher` and :meth:`SSLSocket.compression` methods require that + The :meth:`SSLSocket.getpeercert`, :meth:`SSLSocket.get_verified_chain`, + :meth:`SSLSocket.get_unverified_chain` :meth:`SSLSocket.cipher` + and :meth:`SSLSocket.compression` methods require that the TLS connection has progressed beyond the TLS Client Hello and therefore will not return meaningful values nor can they be called safely. @@ -1696,7 +1744,7 @@ to speed up repeated connections from the same clients. .. versionadded:: 3.3 .. seealso:: - `SSL/TLS & Perfect Forward Secrecy `_ + `SSL/TLS & Perfect Forward Secrecy `_ Vincent Bernat. .. method:: SSLContext.wrap_socket(sock, server_side=False, \ @@ -1706,7 +1754,7 @@ to speed up repeated connections from the same clients. Wrap an existing Python socket *sock* and return an instance of :attr:`SSLContext.sslsocket_class` (default :class:`SSLSocket`). The returned SSL socket is tied to the context, its settings and certificates. - *sock* must be a :data:`~socket.SOCK_STREAM` socket; other + *sock* must be a :const:`~socket.SOCK_STREAM` socket; other socket types are unsupported. The parameter ``server_side`` is a boolean which identifies whether @@ -1964,6 +2012,100 @@ to speed up repeated connections from the same clients. >>> ssl.create_default_context().verify_mode # doctest: +SKIP +.. method:: SSLContext.set_psk_client_callback(callback) + + Enables TLS-PSK (pre-shared key) authentication on a client-side connection. + + In general, certificate based authentication should be preferred over this method. + + The parameter ``callback`` is a callable object with the signature: + ``def callback(hint: str | None) -> tuple[str | None, bytes]``. + The ``hint`` parameter is an optional identity hint sent by the server. + The return value is a tuple in the form (client-identity, psk). + Client-identity is an optional string which may be used by the server to + select a corresponding PSK for the client. The string must be less than or + equal to ``256`` octets when UTF-8 encoded. PSK is a + :term:`bytes-like object` representing the pre-shared key. Return a zero + length PSK to reject the connection. + + Setting ``callback`` to :const:`None` removes any existing callback. + + .. note:: + When using TLS 1.3: + + - the ``hint`` parameter is always :const:`None`. + - client-identity must be a non-empty string. + + Example usage:: + + context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + context.maximum_version = ssl.TLSVersion.TLSv1_2 + context.set_ciphers('PSK') + + # A simple lambda: + psk = bytes.fromhex('c0ffee') + context.set_psk_client_callback(lambda hint: (None, psk)) + + # A table using the hint from the server: + psk_table = { 'ServerId_1': bytes.fromhex('c0ffee'), + 'ServerId_2': bytes.fromhex('facade') + } + def callback(hint): + return 'ClientId_1', psk_table.get(hint, b'') + context.set_psk_client_callback(callback) + + This method will raise :exc:`NotImplementedError` if :data:`HAS_PSK` is + ``False``. + + .. versionadded:: 3.13 + +.. method:: SSLContext.set_psk_server_callback(callback, identity_hint=None) + + Enables TLS-PSK (pre-shared key) authentication on a server-side connection. + + In general, certificate based authentication should be preferred over this method. + + The parameter ``callback`` is a callable object with the signature: + ``def callback(identity: str | None) -> bytes``. + The ``identity`` parameter is an optional identity sent by the client which can + be used to select a corresponding PSK. + The return value is a :term:`bytes-like object` representing the pre-shared key. + Return a zero length PSK to reject the connection. + + Setting ``callback`` to :const:`None` removes any existing callback. + + The parameter ``identity_hint`` is an optional identity hint string sent to + the client. The string must be less than or equal to ``256`` octets when + UTF-8 encoded. + + .. note:: + When using TLS 1.3 the ``identity_hint`` parameter is not sent to the client. + + Example usage:: + + context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) + context.maximum_version = ssl.TLSVersion.TLSv1_2 + context.set_ciphers('PSK') + + # A simple lambda: + psk = bytes.fromhex('c0ffee') + context.set_psk_server_callback(lambda identity: psk) + + # A table using the identity of the client: + psk_table = { 'ClientId_1': bytes.fromhex('c0ffee'), + 'ClientId_2': bytes.fromhex('facade') + } + def callback(identity): + return psk_table.get(identity, b'') + context.set_psk_server_callback(callback, 'ServerId_1') + + This method will raise :exc:`NotImplementedError` if :data:`HAS_PSK` is + ``False``. + + .. versionadded:: 3.13 + .. index:: single: certificates .. index:: single: X509 certificate @@ -2391,6 +2533,8 @@ provided. - :meth:`~SSLSocket.read` - :meth:`~SSLSocket.write` - :meth:`~SSLSocket.getpeercert` + - :meth:`~SSLSocket.get_verified_chain` + - :meth:`~SSLSocket.get_unverified_chain` - :meth:`~SSLSocket.selected_alpn_protocol` - :meth:`~SSLSocket.selected_npn_protocol` - :meth:`~SSLSocket.cipher` @@ -2569,7 +2713,7 @@ disabled by default. >>> client_context.maximum_version = ssl.TLSVersion.TLSv1_3 -The SSL context created above will only allow TLSv1.2 and later (if +The SSL context created above will only allow TLSv1.3 and later (if supported by your system) connections to a server. :const:`PROTOCOL_TLS_CLIENT` implies certificate validation and hostname checks by default. You have to load certificates into the context. diff --git a/Doc/library/stat.rst b/Doc/library/stat.rst index 083dc5e3bcfd686..77538514598a509 100644 --- a/Doc/library/stat.rst +++ b/Doc/library/stat.rst @@ -13,8 +13,8 @@ The :mod:`stat` module defines constants and functions for interpreting the results of :func:`os.stat`, :func:`os.fstat` and :func:`os.lstat` (if they -exist). For complete details about the :c:func:`stat`, :c:func:`fstat` and -:c:func:`lstat` calls, consult the documentation for your system. +exist). For complete details about the :c:func:`stat`, :c:func:`!fstat` and +:c:func:`!lstat` calls, consult the documentation for your system. .. versionchanged:: 3.4 The stat module is backed by a C implementation. @@ -89,9 +89,9 @@ mode: .. function:: S_IFMT(mode) Return the portion of the file's mode that describes the file type (used by the - :func:`S_IS\*` functions above). + :func:`!S_IS\*` functions above). -Normally, you would use the :func:`os.path.is\*` functions for testing the type +Normally, you would use the :func:`!os.path.is\*` functions for testing the type of a file; the functions here are useful when you are doing multiple tests of the same file and wish to avoid the overhead of the :c:func:`stat` system call for each test. These are also useful when checking for information about a file diff --git a/Doc/library/statistics.rst b/Doc/library/statistics.rst index 88a887960edb587..5c8ad3a7dd73803 100644 --- a/Doc/library/statistics.rst +++ b/Doc/library/statistics.rst @@ -14,6 +14,7 @@ .. testsetup:: * from statistics import * + import math __name__ = '' -------------- @@ -22,7 +23,7 @@ This module provides functions for calculating mathematical statistics of numeric (:class:`~numbers.Real`-valued) data. The module is not intended to be a competitor to third-party libraries such -as `NumPy `_, `SciPy `_, or +as `NumPy `_, `SciPy `_, or proprietary full-featured statistics packages aimed at professional statisticians such as Minitab, SAS and Matlab. It is aimed at the level of graphing and scientific calculators. @@ -584,7 +585,7 @@ However, for reading convenience, most of the examples show sorted sequences. The *data* can be any iterable containing sample data. For meaningful results, the number of data points in *data* should be larger than *n*. - Raises :exc:`StatisticsError` if there are not at least two data points. + Raises :exc:`StatisticsError` if there is not at least one data point. The cut points are linearly interpolated from the two nearest data points. For example, if a cut point falls one-third @@ -624,6 +625,11 @@ However, for reading convenience, most of the examples show sorted sequences. .. versionadded:: 3.8 + .. versionchanged:: 3.13 + No longer raises an exception for an input with only a single data point. + This allows quantile estimates to be built up one sample point + at a time becoming gradually more refined with each new data point. + .. function:: covariance(x, y, /) Return the sample covariance of two inputs *x* and *y*. Covariance @@ -741,6 +747,24 @@ However, for reading convenience, most of the examples show sorted sequences. *y = slope \* x + noise* + Continuing the example from :func:`correlation`, we look to see + how well a model based on major planets can predict the orbital + distances for dwarf planets: + + .. doctest:: + + >>> model = linear_regression(period_squared, dist_cubed, proportional=True) + >>> slope = model.slope + + >>> # Dwarf planets: Pluto, Eris, Makemake, Haumea, Ceres + >>> orbital_periods = [90_560, 204_199, 111_845, 103_410, 1_680] # days + >>> predicted_dist = [math.cbrt(slope * (p * p)) for p in orbital_periods] + >>> list(map(round, predicted_dist)) + [5912, 10166, 6806, 6459, 414] + + >>> [5_906, 10_152, 6_796, 6_450, 414] # actual distance in million km + [5906, 10152, 6796, 6450, 414] + .. versionadded:: 3.10 .. versionchanged:: 3.11 @@ -828,6 +852,11 @@ of applications in statistics. number generator. This is useful for creating reproducible results, even in a multi-threading context. + .. versionchanged:: 3.13 + + Switched to a faster algorithm. To reproduce samples from previous + versions, use :func:`random.seed` and :func:`random.gauss`. + .. method:: NormalDist.pdf(x) Using a `probability density function (pdf) @@ -922,6 +951,10 @@ of applications in statistics. :class:`NormalDist` Examples and Recipes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Classic probability problems +**************************** + :class:`NormalDist` readily solves classic probability problems. For example, given `historical data for SAT exams @@ -947,6 +980,10 @@ Find the `quartiles `_ and `deciles >>> list(map(round, sat.quantiles(n=10))) [810, 896, 958, 1011, 1060, 1109, 1162, 1224, 1310] + +Monte Carlo inputs for simulations +********************************** + To estimate the distribution for a model than isn't easy to solve analytically, :class:`NormalDist` can generate input samples for a `Monte Carlo simulation `_: @@ -963,6 +1000,9 @@ Carlo simulation `_: >>> quantiles(map(model, X, Y, Z)) # doctest: +SKIP [1.4591308524824727, 1.8035946855390597, 2.175091447274739] +Approximating binomial distributions +************************************ + Normal distributions can be used to approximate `Binomial distributions `_ when the sample size is large and when the probability of a successful @@ -996,9 +1036,14 @@ probability that the Python room will stay within its capacity limits? >>> seed(8675309) >>> def trial(): ... return choices(('Python', 'Ruby'), (p, q), k=n).count('Python') + ... >>> mean(trial() <= k for i in range(10_000)) 0.8398 + +Naive bayesian classifier +************************* + Normal distributions commonly arise in machine learning problems. Wikipedia has a `nice example of a Naive Bayesian Classifier @@ -1053,6 +1098,48 @@ The final prediction goes to the largest posterior. This is known as the 'female' +Kernel density estimation +************************* + +It is possible to estimate a continuous probability density function +from a fixed number of discrete samples. + +The basic idea is to smooth the data using `a kernel function such as a +normal distribution, triangular distribution, or uniform distribution +`_. +The degree of smoothing is controlled by a single +parameter, ``h``, representing the variance of the kernel function. + +.. testcode:: + + import math + + def kde_normal(sample, h): + "Create a continuous probability density function from a sample." + # Smooth the sample with a normal distribution of variance h. + kernel_h = NormalDist(0.0, math.sqrt(h)).pdf + n = len(sample) + def pdf(x): + return sum(kernel_h(x - x_i) for x_i in sample) / n + return pdf + +`Wikipedia has an example +`_ +where we can use the ``kde_normal()`` recipe to generate and plot +a probability density function estimated from a small sample: + +.. doctest:: + + >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] + >>> f_hat = kde_normal(sample, h=2.25) + >>> xarr = [i/100 for i in range(-750, 1100)] + >>> yarr = [f_hat(x) for x in xarr] + +The points in ``xarr`` and ``yarr`` can be used to make a PDF plot: + +.. image:: kde_example.png + :alt: Scatter plot of the estimated probability density function. + .. # This modelines must appear within the last ten lines of the file. kate: indent-width 3; remove-trailing-space on; replace-tabs on; encoding utf-8; diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst index 6701d794b5111bc..f204b287b565eb3 100644 --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -32,8 +32,8 @@ Truth Value Testing =================== .. index:: - statement: if - statement: while + pair: statement; if + pair: statement; while pair: truth; value pair: Boolean; operations single: false @@ -44,15 +44,15 @@ Any object can be tested for truth value, for use in an :keyword:`if` or .. index:: single: true By default, an object is considered true unless its class defines either a -:meth:`__bool__` method that returns ``False`` or a :meth:`__len__` method that +:meth:`~object.__bool__` method that returns ``False`` or a :meth:`__len__` method that returns zero, when called with the object. [1]_ Here are most of the built-in objects considered false: - .. index:: - single: None (Built-in object) - single: False (Built-in object) +.. index:: + single: None (Built-in object) + single: False (Built-in object) -* constants defined to be false: ``None`` and ``False``. +* constants defined to be false: ``None`` and ``False`` * zero of any numeric type: ``0``, ``0.0``, ``0j``, ``Decimal(0)``, ``Fraction(0, 1)`` @@ -61,8 +61,8 @@ objects considered false: ``range(0)`` .. index:: - operator: or - operator: and + pair: operator; or + pair: operator; and single: False single: True @@ -84,8 +84,8 @@ These are the Boolean operations, ordered by ascending priority: +-------------+---------------------------------+-------+ | Operation | Result | Notes | +=============+=================================+=======+ -| ``x or y`` | if *x* is false, then *y*, else | \(1) | -| | *x* | | +| ``x or y`` | if *x* is true, then *x*, else | \(1) | +| | *y* | | +-------------+---------------------------------+-------+ | ``x and y`` | if *x* is false, then *x*, else | \(2) | | | *y* | | @@ -95,9 +95,9 @@ These are the Boolean operations, ordered by ascending priority: +-------------+---------------------------------+-------+ .. index:: - operator: and - operator: or - operator: not + pair: operator; and + pair: operator; or + pair: operator; not Notes: @@ -122,14 +122,14 @@ Comparisons .. index:: pair: chaining; comparisons pair: operator; comparison - operator: == - operator: < (less) - operator: <= - operator: > (greater) - operator: >= - operator: != - operator: is - operator: is not + pair: operator; == + pair: operator; < (less) + pair: operator; <= + pair: operator; > (greater) + pair: operator; >= + pair: operator; != + pair: operator; is + pair: operator; is not There are eight comparison operations in Python. They all have the same priority (which is higher than that of the Boolean operations). Comparisons can @@ -192,8 +192,8 @@ customized; also they can be applied to any two objects and never raise an exception. .. index:: - operator: in - operator: not in + pair: operator; in + pair: operator; not in Two more operations with the same syntactic priority, :keyword:`in` and :keyword:`not in`, are supported by types that are :term:`iterable` or @@ -205,11 +205,11 @@ Numeric Types --- :class:`int`, :class:`float`, :class:`complex` ================================================================ .. index:: - object: numeric - object: Boolean - object: integer - object: floating point - object: complex number + pair: object; numeric + pair: object; Boolean + pair: object; integer + pair: object; floating point + pair: object; complex number pair: C; language There are three distinct numeric types: :dfn:`integers`, :dfn:`floating @@ -244,20 +244,20 @@ and imaginary parts. .. index:: single: arithmetic - builtin: int - builtin: float - builtin: complex + pair: built-in function; int + pair: built-in function; float + pair: built-in function; complex single: operator; + (plus) single: + (plus); unary operator single: + (plus); binary operator single: operator; - (minus) single: - (minus); unary operator single: - (minus); binary operator - operator: * (asterisk) - operator: / (slash) - operator: // - operator: % (percent) - operator: ** + pair: operator; * (asterisk) + pair: operator; / (slash) + pair: operator; // + pair: operator; % (percent) + pair: operator; ** Python fully supports mixed arithmetic: when a binary arithmetic operator has operands of different numeric types, the operand with the "narrower" type is @@ -282,7 +282,7 @@ the operations, see :ref:`operator-summary`): +---------------------+---------------------------------+---------+--------------------+ | ``x / y`` | quotient of *x* and *y* | | | +---------------------+---------------------------------+---------+--------------------+ -| ``x // y`` | floored quotient of *x* and | \(1) | | +| ``x // y`` | floored quotient of *x* and | \(1)\(2)| | | | *y* | | | +---------------------+---------------------------------+---------+--------------------+ | ``x % y`` | remainder of ``x / y`` | \(2) | | @@ -319,8 +319,10 @@ the operations, see :ref:`operator-summary`): Notes: (1) - Also referred to as integer division. The resultant value is a whole - integer, though the result's type is not necessarily int. The result is + Also referred to as integer division. For operands of type :class:`int`, + the result has type :class:`int`. For operands of type :class:`float`, + the result has type :class:`float`. In general, the result is a whole + integer, though the result's type is not necessarily :class:`int`. The result is always rounded towards minus infinity: ``1//2`` is ``0``, ``(-1)//2`` is ``-1``, ``1//(-2)`` is ``-1``, and ``(-1)//(-2)`` is ``0``. @@ -330,16 +332,15 @@ Notes: (3) .. index:: - module: math + pair: module; math single: floor() (in module math) single: ceil() (in module math) single: trunc() (in module math) pair: numeric; conversions - pair: C; language - Conversion from floating point to integer may round or truncate - as in C; see functions :func:`math.floor` and :func:`math.ceil` for - well-defined conversions. + Conversion from :class:`float` to :class:`int` truncates, discarding the + fractional part. See functions :func:`math.floor` and :func:`math.ceil` for + alternative conversions. (4) float also accepts the strings "nan" and "inf" with an optional prefix "+" @@ -353,7 +354,7 @@ Notes: The numeric literals accepted include the digits ``0`` to ``9`` or any Unicode equivalent (code points with the ``Nd`` property). - See https://www.unicode.org/Public/15.0.0/ucd/extracted/DerivedNumericType.txt + See `the Unicode Standard `_ for a complete list of code points with the ``Nd`` property. @@ -393,12 +394,12 @@ Bitwise Operations on Integer Types pair: bitwise; operations pair: shifting; operations pair: masking; operations - operator: | (vertical bar) - operator: ^ (caret) - operator: & (ampersand) - operator: << - operator: >> - operator: ~ (tilde) + pair: operator; | (vertical bar) + pair: operator; ^ (caret) + pair: operator; & (ampersand) + pair: operator; << + pair: operator; >> + pair: operator; ~ (tilde) Bitwise operations only make sense for integers. The result of bitwise operations is calculated as though carried out in two's complement with an @@ -530,12 +531,14 @@ class`. In addition, it provides a few more methods: is ``False``. The default values can be used to conveniently turn an integer into a - single byte object. However, when using the default arguments, don't try - to convert a value greater than 255 or you'll get an :exc:`OverflowError`:: + single byte object:: >>> (65).to_bytes() b'A' + However, when using the default arguments, don't try + to convert a value greater than 255 or you'll get an :exc:`OverflowError`. + Equivalent to:: def to_bytes(n, length=1, byteorder='big', signed=False): @@ -602,13 +605,19 @@ class`. In addition, it provides a few more methods: .. method:: int.as_integer_ratio() - Return a pair of integers whose ratio is exactly equal to the original - integer and with a positive denominator. The integer ratio of integers + Return a pair of integers whose ratio is equal to the original + integer and has a positive denominator. The integer ratio of integers (whole numbers) is always the integer as the numerator and ``1`` as the denominator. .. versionadded:: 3.8 +.. method:: int.is_integer() + + Returns ``True``. Exists for duck type compatibility with :meth:`float.is_integer`. + + .. versionadded:: 3.12 + Additional Methods on Float --------------------------- @@ -618,7 +627,7 @@ class`. float also has the following additional methods. .. method:: float.as_integer_ratio() Return a pair of integers whose ratio is exactly equal to the - original float and with a positive denominator. Raises + original float. The ratio is in lowest terms and has a positive denominator. Raises :exc:`OverflowError` on infinities and a :exc:`ValueError` on NaNs. @@ -795,6 +804,40 @@ number, :class:`float`, or :class:`complex`:: hash_value = -2 return hash_value +.. _bltin-boolean-values: +.. _typebool: + +Boolean Type - :class:`bool` +============================ + +Booleans represent truth values. The :class:`bool` type has exactly two +constant instances: ``True`` and ``False``. + +.. index:: + single: False + single: True + pair: Boolean; values + +The built-in function :func:`bool` converts any value to a boolean, if the +value can be interpreted as a truth value (see section :ref:`truth` above). + +For logical operations, use the :ref:`boolean operators ` ``and``, +``or`` and ``not``. +When applying the bitwise operators ``&``, ``|``, ``^`` to two booleans, they +return a bool equivalent to the logical operations "and", "or", "xor". However, +the logical operators ``and``, ``or`` and ``!=`` should be preferred +over ``&``, ``|`` and ``^``. + +.. deprecated:: 3.12 + + The use of the bitwise inversion operator ``~`` is deprecated and will + raise an error in Python 3.14. + +:class:`bool` is a subclass of :class:`int` (see :ref:`typesnumeric`). In +many numeric contexts, ``False`` and ``True`` behave like the integers 0 and 1, respectively. +However, relying on this is discouraged; explicitly convert using :func:`int` +instead. + .. _typeiter: Iterator Types @@ -887,7 +930,7 @@ described in dedicated sections. Common Sequence Operations -------------------------- -.. index:: object: sequence +.. index:: pair: object; sequence The operations in the following table are supported by most sequence types, both mutable and immutable. The :class:`collections.abc.Sequence` ABC is @@ -905,15 +948,15 @@ operations have the same priority as the corresponding numeric operations. [3]_ .. index:: triple: operations on; sequence; types - builtin: len - builtin: min - builtin: max + pair: built-in function; len + pair: built-in function; min + pair: built-in function; max pair: concatenation; operation pair: repetition; operation pair: subscript; operation pair: slice; operation - operator: in - operator: not in + pair: operator; in + pair: operator; not in single: count() (sequence method) single: index() (sequence method) @@ -1072,8 +1115,8 @@ Immutable Sequence Types .. index:: triple: immutable; sequence; types - object: tuple - builtin: hash + pair: object; tuple + pair: built-in function; hash The only operation that immutable sequence types generally implement that is not also implemented by mutable sequence types is support for the :func:`hash` @@ -1094,8 +1137,8 @@ Mutable Sequence Types .. index:: triple: mutable; sequence; types - object: list - object: bytearray + pair: object; list + pair: object; bytearray The operations in the following table are defined on mutable sequence types. The :class:`collections.abc.MutableSequence` ABC is provided to make it @@ -1112,7 +1155,7 @@ accepts integers that meet the value restriction ``0 <= x <= 255``). triple: operations on; list; type pair: subscript; assignment pair: slice; assignment - statement: del + pair: statement; del single: append() (sequence method) single: clear() (sequence method) single: copy() (sequence method) @@ -1212,7 +1255,7 @@ Notes: Lists ----- -.. index:: object: list +.. index:: pair: object; list Lists are mutable sequences, typically used to store collections of homogeneous items (where the precise degree of similarity will vary by @@ -1291,7 +1334,7 @@ application). Tuples ------ -.. index:: object: tuple +.. index:: pair: object; tuple Tuples are immutable sequences, typically used to store collections of heterogeneous data (such as the 2-tuples produced by the :func:`enumerate` @@ -1335,7 +1378,7 @@ choice than a simple tuple object. Ranges ------ -.. index:: object: range +.. index:: pair: object; range The :class:`range` type represents an immutable sequence of numbers and is commonly used for looping a specific number of times in :keyword:`for` @@ -1460,7 +1503,7 @@ objects that compare equal might have different :attr:`~range.start`, .. index:: single: string; text sequence type single: str (built-in class); (see also string) - object: string + pair: object; string .. _textseq: @@ -1494,7 +1537,7 @@ Since there is no separate "character" type, indexing a string produces strings of length 1. That is, for a non-empty string *s*, ``s[0] == s[0:1]``. .. index:: - object: io.StringIO + pair: object; io.StringIO There is also no mutable string type, but :meth:`str.join` or :class:`io.StringIO` can be used to efficiently construct strings from @@ -1522,7 +1565,7 @@ multiple fragments. printable string representation of *object*. For string objects, this is the string itself. If *object* does not have a :meth:`~object.__str__` method, then :func:`str` falls back to returning - :meth:`repr(object) `. + :func:`repr(object) `. .. index:: single: buffer protocol; str (built-in class) @@ -1560,7 +1603,7 @@ String Methods -------------- .. index:: - module: re + pair: module; re Strings implement all of the :ref:`common ` sequence operations, along with the additional methods described below. @@ -1597,8 +1640,9 @@ expression support in the :mod:`re` module). lowercase, :meth:`lower` would do nothing to ``'ß'``; :meth:`casefold` converts it to ``"ss"``. - The casefolding algorithm is described in section 3.13 of the Unicode - Standard. + The casefolding algorithm is + `described in section 3.13 'Default Case Folding' of the Unicode Standard + `__. .. versionadded:: 3.3 @@ -1617,28 +1661,34 @@ expression support in the :mod:`re` module). range [*start*, *end*]. Optional arguments *start* and *end* are interpreted as in slice notation. + If *sub* is empty, returns the number of empty strings between characters + which is the length of the string plus one. + .. method:: str.encode(encoding="utf-8", errors="strict") - Return an encoded version of the string as a bytes object. Default encoding - is ``'utf-8'``. *errors* may be given to set a different error handling scheme. - The default for *errors* is ``'strict'``, meaning that encoding errors raise - a :exc:`UnicodeError`. Other possible - values are ``'ignore'``, ``'replace'``, ``'xmlcharrefreplace'``, - ``'backslashreplace'`` and any other name registered via - :func:`codecs.register_error`, see section :ref:`error-handlers`. For a - list of possible encodings, see section :ref:`standard-encodings`. + Return the string encoded to :class:`bytes`. + + *encoding* defaults to ``'utf-8'``; + see :ref:`standard-encodings` for possible values. - By default, the *errors* argument is not checked for best performances, but - only used at the first encoding error. Enable the :ref:`Python Development - Mode `, or use a :ref:`debug build ` to check - *errors*. + *errors* controls how encoding errors are handled. + If ``'strict'`` (the default), a :exc:`UnicodeError` exception is raised. + Other possible values are ``'ignore'``, + ``'replace'``, ``'xmlcharrefreplace'``, ``'backslashreplace'`` and any + other name registered via :func:`codecs.register_error`. + See :ref:`error-handlers` for details. + + For performance reasons, the value of *errors* is not checked for validity + unless an encoding error actually occurs, + :ref:`devmode` is enabled + or a :ref:`debug build ` is used. .. versionchanged:: 3.1 - Support for keyword arguments added. + Added support for keyword arguments. .. versionchanged:: 3.9 - The *errors* is now checked in development mode and + The value of the *errors* argument is now checked in :ref:`devmode` and in :ref:`debug mode `. @@ -1754,7 +1804,9 @@ expression support in the :mod:`re` module). one character, ``False`` otherwise. Alphabetic characters are those characters defined in the Unicode character database as "Letter", i.e., those with general category property being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is different - from the "Alphabetic" property defined in the Unicode Standard. + from the `Alphabetic property defined in the section 4.10 'Letters, Alphabetic, and + Ideographic' of the Unicode Standard + `_. .. method:: str.isascii() @@ -1791,7 +1843,7 @@ expression support in the :mod:`re` module). Return ``True`` if the string is a valid identifier according to the language definition, section :ref:`identifiers`. - Call :func:`keyword.iskeyword` to test whether string ``s`` is a reserved + :func:`keyword.iskeyword` can be used to test whether string ``s`` is a reserved identifier, such as :keyword:`def` and :keyword:`class`. Example: @@ -1888,8 +1940,9 @@ expression support in the :mod:`re` module). Return a copy of the string with all the cased characters [4]_ converted to lowercase. - The lowercasing algorithm used is described in section 3.13 of the Unicode - Standard. + The lowercasing algorithm used is + `described in section 3.13 'Default Case Folding' of the Unicode Standard + `__. .. method:: str.lstrip([chars]) @@ -1964,11 +2017,14 @@ expression support in the :mod:`re` module). .. versionadded:: 3.9 -.. method:: str.replace(old, new[, count]) +.. method:: str.replace(old, new, count=-1) Return a copy of the string with all occurrences of substring *old* replaced by - *new*. If the optional argument *count* is given, only the first *count* - occurrences are replaced. + *new*. If *count* is given, only the first *count* occurrences are replaced. + If *count* is not specified or ``-1``, then all occurrences are replaced. + + .. versionchanged:: 3.13 + *count* is now supported as a keyword argument. .. method:: str.rfind(sub[, start[, end]]) @@ -2211,7 +2267,7 @@ expression support in the :mod:`re` module). Return a copy of the string in which each character has been mapped through the given translation table. The table must be an object that implements - indexing via :meth:`__getitem__`, typically a :term:`mapping` or + indexing via :meth:`~object.__getitem__`, typically a :term:`mapping` or :term:`sequence`. When indexed by a Unicode ordinal (an integer), the table object can do any of the following: return a Unicode ordinal or a string, to map the character to one or more other characters; return @@ -2233,8 +2289,9 @@ expression support in the :mod:`re` module). character(s) is not "Lu" (Letter, uppercase), but e.g. "Lt" (Letter, titlecase). - The uppercasing algorithm used is described in section 3.13 of the Unicode - Standard. + The uppercasing algorithm used is + `described in section 3.13 'Default Case Folding' of the Unicode Standard + `__. .. method:: str.zfill(width) @@ -2457,10 +2514,10 @@ Binary Sequence Types --- :class:`bytes`, :class:`bytearray`, :class:`memoryview ================================================================================= .. index:: - object: bytes - object: bytearray - object: memoryview - module: array + pair: object; bytes + pair: object; bytearray + pair: object; memoryview + pair: module; array The core built-in types for manipulating binary data are :class:`bytes` and :class:`bytearray`. They are supported by :class:`memoryview` which uses @@ -2475,7 +2532,7 @@ The :mod:`array` module supports efficient storage of basic data types like Bytes Objects ------------- -.. index:: object: bytes +.. index:: pair: object; bytes Bytes objects are immutable sequences of single bytes. Since many major binary protocols are based on the ASCII text encoding, bytes objects offer @@ -2582,7 +2639,7 @@ always convert a bytes object into a list of integers using ``list(b)``. Bytearray Objects ----------------- -.. index:: object: bytearray +.. index:: pair: object; bytearray :class:`bytearray` objects are a mutable counterpart to :class:`bytes` objects. @@ -2698,6 +2755,9 @@ arbitrary binary data. The subsequence to search for may be any :term:`bytes-like object` or an integer in the range 0 to 255. + If *sub* is empty, returns the number of empty slices between characters + which is the length of the bytes object plus one. + .. versionchanged:: 3.3 Also accept an integer in the range 0 to 255 as the subsequence. @@ -2749,29 +2809,32 @@ arbitrary binary data. .. method:: bytes.decode(encoding="utf-8", errors="strict") bytearray.decode(encoding="utf-8", errors="strict") - Return a string decoded from the given bytes. Default encoding is - ``'utf-8'``. *errors* may be given to set a different - error handling scheme. The default for *errors* is ``'strict'``, meaning - that encoding errors raise a :exc:`UnicodeError`. Other possible values are - ``'ignore'``, ``'replace'`` and any other name registered via - :func:`codecs.register_error`, see section :ref:`error-handlers`. For a - list of possible encodings, see section :ref:`standard-encodings`. + Return the bytes decoded to a :class:`str`. - By default, the *errors* argument is not checked for best performances, but - only used at the first decoding error. Enable the :ref:`Python Development - Mode `, or use a :ref:`debug build ` to check *errors*. + *encoding* defaults to ``'utf-8'``; + see :ref:`standard-encodings` for possible values. + + *errors* controls how decoding errors are handled. + If ``'strict'`` (the default), a :exc:`UnicodeError` exception is raised. + Other possible values are ``'ignore'``, ``'replace'``, + and any other name registered via :func:`codecs.register_error`. + See :ref:`error-handlers` for details. + + For performance reasons, the value of *errors* is not checked for validity + unless a decoding error actually occurs, + :ref:`devmode` is enabled or a :ref:`debug build ` is used. .. note:: Passing the *encoding* argument to :class:`str` allows decoding any :term:`bytes-like object` directly, without needing to make a temporary - bytes or bytearray object. + :class:`!bytes` or :class:`!bytearray` object. .. versionchanged:: 3.1 Added support for keyword arguments. .. versionchanged:: 3.9 - The *errors* is now checked in development mode and + The value of the *errors* argument is now checked in :ref:`devmode` and in :ref:`debug mode `. @@ -3691,12 +3754,15 @@ copying. types such as :class:`bytes` and :class:`bytearray`, an element is a single byte, but other types such as :class:`array.array` may have bigger elements. - ``len(view)`` is equal to the length of :class:`~memoryview.tolist`. - If ``view.ndim = 0``, the length is 1. If ``view.ndim = 1``, the length - is equal to the number of elements in the view. For higher dimensions, - the length is equal to the length of the nested list representation of - the view. The :class:`~memoryview.itemsize` attribute will give you the - number of bytes in a single element. + ``len(view)`` is equal to the length of :class:`~memoryview.tolist`, which + is the nested list representation of the view. If ``view.ndim = 1``, + this is equal to the number of elements in the view. + + .. versionchanged:: 3.12 + If ``view.ndim == 0``, ``len(view)`` now raises :exc:`TypeError` instead of returning 1. + + The :class:`~memoryview.itemsize` attribute will give you the number of + bytes in a single element. A :class:`memoryview` supports slicing and indexing to expose its data. One-dimensional slicing will result in a subview:: @@ -3753,7 +3819,7 @@ copying. >>> data bytearray(b'z1spam') - One-dimensional memoryviews of hashable (read-only) types with formats + One-dimensional memoryviews of :term:`hashable` (read-only) types with formats 'B', 'b' or 'c' are also hashable. The hash is defined as ``hash(m) == hash(m.tobytes())``:: @@ -3767,7 +3833,7 @@ copying. .. versionchanged:: 3.3 One-dimensional memoryviews can now be sliced. - One-dimensional memoryviews with formats 'B', 'b' or 'c' are now hashable. + One-dimensional memoryviews with formats 'B', 'b' or 'c' are now :term:`hashable`. .. versionchanged:: 3.4 memoryview is now registered automatically with @@ -3890,7 +3956,7 @@ copying. >>> m = memoryview(bytearray(b'abc')) >>> mm = m.toreadonly() >>> mm.tolist() - [89, 98, 99] + [97, 98, 99] >>> mm[0] = 42 Traceback (most recent call last): File "", line 1, in @@ -3946,6 +4012,7 @@ copying. :mod:`struct` syntax. One of the formats must be a byte format ('B', 'b' or 'c'). The byte length of the result must be the same as the original length. + Note that all byte lengths may depend on the operating system. Cast 1D/long to 1D/unsigned bytes:: @@ -3976,8 +4043,8 @@ copying. >>> x = memoryview(b) >>> x[0] = b'a' Traceback (most recent call last): - File "", line 1, in - ValueError: memoryview: invalid value for format "B" + ... + TypeError: memoryview: invalid type for format 'B' >>> y = x.cast('c') >>> y[0] = b'a' >>> b @@ -4152,7 +4219,7 @@ copying. Set Types --- :class:`set`, :class:`frozenset` ============================================== -.. index:: object: set +.. index:: pair: object; set A :dfn:`set` object is an unordered collection of distinct :term:`hashable` objects. Common uses include membership testing, removing duplicates from a sequence, and @@ -4354,12 +4421,12 @@ Mapping Types --- :class:`dict` =============================== .. index:: - object: mapping - object: dictionary + pair: object; mapping + pair: object; dictionary triple: operations on; mapping; types triple: operations on; dictionary; type - statement: del - builtin: len + pair: statement; del + pair: built-in function; len A :term:`mapping` object maps :term:`hashable` values to arbitrary objects. Mappings are mutable objects. There is currently only one standard mapping @@ -4449,6 +4516,7 @@ can be used interchangeably to index the same dictionary entry. >>> class Counter(dict): ... def __missing__(self, key): ... return 0 + ... >>> c = Counter() >>> c['red'] 0 @@ -4687,14 +4755,17 @@ support membership tests: .. versionadded:: 3.10 -Keys views are set-like since their entries are unique and hashable. If all -values are hashable, so that ``(key, value)`` pairs are unique and hashable, -then the items view is also set-like. (Values views are not treated as set-like +Keys views are set-like since their entries are unique and :term:`hashable`. +Items views also have set-like operations since the (key, value) pairs +are unique and the keys are hashable. +If all values in an items view are hashable as well, +then the items view can interoperate with other sets. +(Values views are not treated as set-like since the entries are generally not unique.) For set-like views, all of the operations defined for the abstract base class :class:`collections.abc.Set` are available (for example, ``==``, ``<``, or ``^``). While using set operators, -set-like views accept any iterable as the other operand, unlike sets which only -accept sets as the input. +set-like views accept any iterable as the other operand, +unlike sets which only accept sets as the input. An example of dictionary view usage:: @@ -4706,6 +4777,7 @@ An example of dictionary view usage:: >>> n = 0 >>> for val in values: ... n += val + ... >>> print(n) 504 @@ -4724,14 +4796,14 @@ An example of dictionary view usage:: >>> # set operations >>> keys & {'eggs', 'bacon', 'salad'} {'bacon'} - >>> keys ^ {'sausage', 'juice'} - {'juice', 'sausage', 'bacon', 'spam'} - >>> keys | ['juice', 'juice', 'juice'] - {'juice', 'sausage', 'bacon', 'spam', 'eggs'} + >>> keys ^ {'sausage', 'juice'} == {'juice', 'sausage', 'bacon', 'spam'} + True + >>> keys | ['juice', 'juice', 'juice'] == {'bacon', 'spam', 'juice'} + True >>> # get back a read-only proxy for the original dictionary >>> values.mapping - mappingproxy({'eggs': 2, 'sausage': 1, 'bacon': 1, 'spam': 500}) + mappingproxy({'bacon': 1, 'spam': 500}) >>> values.mapping['spam'] 500 @@ -4788,7 +4860,7 @@ before the statement body is executed and exited when the statement ends: The exception passed in should never be reraised explicitly - instead, this method should return a false value to indicate that the method completed successfully and does not want to suppress the raised exception. This allows - context management code to easily detect whether or not an :meth:`__exit__` + context management code to easily detect whether or not an :meth:`~object.__exit__` method has actually failed. Python defines several context managers to support easy thread synchronisation, @@ -4827,7 +4899,7 @@ Generic Alias Type ------------------ .. index:: - object: GenericAlias + pair: object; GenericAlias pair: Generic; Alias ``GenericAlias`` objects are generally created by @@ -4934,8 +5006,8 @@ exception to disallow mistakes like ``dict[str][str]``:: >>> dict[str][str] Traceback (most recent call last): - File "", line 1, in - TypeError: There are no type variables left in dict[str] + ... + TypeError: dict[str] is not a generic class However, such expressions are valid when :ref:`type variables ` are used. The index must have as many elements as there are type variable items @@ -5082,7 +5154,7 @@ Union Type ---------- .. index:: - object: Union + pair: object; Union pair: union; type A union object holds the value of the ``|`` (bitwise or) operation on @@ -5100,6 +5172,14 @@ enables cleaner type hinting syntax compared to :data:`typing.Union`. def square(number: int | float) -> int | float: return number ** 2 + .. note:: + + The ``|`` operand cannot be used at runtime to define unions where one or + more members is a forward reference. For example, ``int | "Foo"``, where + ``"Foo"`` is a reference to a class not yet defined, will fail at + runtime. For unions which include forward references, present the + whole expression as a string, e.g. ``"int | Foo"``. + .. describe:: union_object == other Union objects can be tested for equality with other union objects. Details: @@ -5133,13 +5213,15 @@ enables cleaner type hinting syntax compared to :data:`typing.Union`. >>> isinstance("", int | str) True - However, union objects containing :ref:`parameterized generics - ` cannot be used:: + However, :ref:`parameterized generics ` in + union objects cannot be checked:: - >>> isinstance(1, int | list[int]) + >>> isinstance(1, int | list[int]) # short-circuit evaluation + True + >>> isinstance([1], int | list[int]) Traceback (most recent call last): - File "", line 1, in - TypeError: isinstance() argument 2 cannot contain a parameterized generic + ... + TypeError: isinstance() argument 2 cannot be a parameterized generic The user-exposed type for the union object can be accessed from :data:`types.UnionType` and used for :func:`isinstance` checks. An object cannot be @@ -5239,7 +5321,7 @@ See :ref:`function` for more information. Methods ------- -.. index:: object: method +.. index:: pair: object; method Methods are functions that are called using the attribute notation. There are two flavors: built-in methods (such as :meth:`append` on lists) and class @@ -5286,7 +5368,7 @@ Code Objects ------------ .. index:: - builtin: compile + pair: built-in function; compile single: __code__ (function object attribute) Code objects are used by the implementation to represent "pseudo-compiled" @@ -5300,8 +5382,8 @@ Accessing ``__code__`` raises an :ref:`auditing event ` ``object.__getattr__`` with arguments ``obj`` and ``"__code__"``. .. index:: - builtin: exec - builtin: eval + pair: built-in function; exec + pair: built-in function; eval A code object can be executed or evaluated by passing it (instead of a source string) to the :func:`exec` or :func:`eval` built-in functions. @@ -5315,8 +5397,8 @@ Type Objects ------------ .. index:: - builtin: type - module: types + pair: built-in function; type + pair: module; types Type objects represent the various object types. An object's type is accessed by the built-in function :func:`type`. There are no special operations on @@ -5365,27 +5447,6 @@ information. There is exactly one ``NotImplemented`` object. It is written as ``NotImplemented``. -.. _bltin-boolean-values: - -Boolean Values --------------- - -Boolean values are the two constant objects ``False`` and ``True``. They are -used to represent truth values (although other values can also be considered -false or true). In numeric contexts (for example when used as the argument to -an arithmetic operator), they behave like the integers 0 and 1, respectively. -The built-in function :func:`bool` can be used to convert any value to a -Boolean, if the value can be interpreted as a truth value (see section -:ref:`truth` above). - -.. index:: - single: False - single: True - pair: Boolean; values - -They are written as ``False`` and ``True``, respectively. - - .. _typesinternal: Internal Objects @@ -5435,6 +5496,14 @@ types, where they are relevant. Some of these are not reported by the .. versionadded:: 3.3 +.. attribute:: definition.__type_params__ + + The :ref:`type parameters ` of generic classes, functions, + and :ref:`type aliases `. + + .. versionadded:: 3.12 + + .. attribute:: class.__mro__ This attribute is a tuple of classes that are considered when looking for @@ -5455,7 +5524,7 @@ types, where they are relevant. Some of these are not reported by the definition order. Example:: >>> int.__subclasses__() - [] + [, , , ] .. _int_max_str_digits: @@ -5468,7 +5537,7 @@ to mitigate denial of service attacks. This limit *only* applies to decimal or other non-power-of-two number bases. Hexadecimal, octal, and binary conversions are unlimited. The limit can be configured. -The :class:`int` type in CPython is an abitrary length number stored in binary +The :class:`int` type in CPython is an arbitrary length number stored in binary form (commonly known as a "bignum"). There exists no algorithm that can convert a string to a binary integer or a binary integer to a string in linear time, *unless* the base is a power of 2. Even the best known algorithms for base 10 @@ -5491,7 +5560,7 @@ When an operation would exceed the limit, a :exc:`ValueError` is raised: >>> _ = int('2' * 5432) Traceback (most recent call last): ... - ValueError: Exceeds the limit (4300) for integer string conversion: value has 5432 digits; use sys.set_int_max_str_digits() to increase the limit. + ValueError: Exceeds the limit (4300 digits) for integer string conversion: value has 5432 digits; use sys.set_int_max_str_digits() to increase the limit >>> i = int('2' * 4300) >>> len(str(i)) 4300 @@ -5499,7 +5568,7 @@ When an operation would exceed the limit, a :exc:`ValueError` is raised: >>> len(str(i_squared)) Traceback (most recent call last): ... - ValueError: Exceeds the limit (4300) for integer string conversion: value has 8599 digits; use sys.set_int_max_str_digits() to increase the limit. + ValueError: Exceeds the limit (4300 digits) for integer string conversion; use sys.set_int_max_str_digits() to increase the limit >>> len(hex(i_squared)) 7144 >>> assert int(hex(i_squared), base=16) == i*i # Hexadecimal is unlimited. @@ -5532,7 +5601,7 @@ and :class:`str` or :class:`bytes`: * ``int(string)`` with default base 10. * ``int(string, base)`` for all bases that are not a power of 2. * ``str(integer)``. -* ``repr(integer)`` +* ``repr(integer)``. * any other string conversion to base 10, for example ``f"{integer}"``, ``"{}".format(integer)``, or ``b"%d" % integer``. @@ -5560,7 +5629,7 @@ command line flag to configure the limit: :envvar:`PYTHONINTMAXSTRDIGITS` or :option:`-X int_max_str_digits <-X>`. If both the env var and the ``-X`` option are set, the ``-X`` option takes precedence. A value of *-1* indicates that both were unset, thus a value of - :data:`sys.int_info.default_max_str_digits` was used during initilization. + :data:`sys.int_info.default_max_str_digits` was used during initialization. From code, you can inspect the current limit and set a new one using these :mod:`sys` APIs: @@ -5569,7 +5638,7 @@ From code, you can inspect the current limit and set a new one using these a getter and setter for the interpreter-wide limit. Subinterpreters have their own limit. -Information about the default and minimum can be found in :attr:`sys.int_info`: +Information about the default and minimum can be found in :data:`sys.int_info`: * :data:`sys.int_info.default_max_str_digits ` is the compiled-in default limit. diff --git a/Doc/library/string.rst b/Doc/library/string.rst index 3b96813e683864e..262b785bbcbfc17 100644 --- a/Doc/library/string.rst +++ b/Doc/library/string.rst @@ -206,15 +206,15 @@ literal text, it can be escaped by doubling: ``{{`` and ``}}``. The grammar for a replacement field is as follows: - .. productionlist:: format-string - replacement_field: "{" [`field_name`] ["!" `conversion`] [":" `format_spec`] "}" - field_name: arg_name ("." `attribute_name` | "[" `element_index` "]")* - arg_name: [`identifier` | `digit`+] - attribute_name: `identifier` - element_index: `digit`+ | `index_string` - index_string: + - conversion: "r" | "s" | "a" - format_spec: +.. productionlist:: format-string + replacement_field: "{" [`field_name`] ["!" `conversion`] [":" `format_spec`] "}" + field_name: arg_name ("." `attribute_name` | "[" `element_index` "]")* + arg_name: [`identifier` | `digit`+] + attribute_name: `identifier` + element_index: `digit`+ | `index_string` + index_string: + + conversion: "r" | "s" | "a" + format_spec: In less formal terms, the replacement field can start with a *field_name* that specifies the object whose value is to be formatted and inserted @@ -227,7 +227,9 @@ See also the :ref:`formatspec` section. The *field_name* itself begins with an *arg_name* that is either a number or a keyword. If it's a number, it refers to a positional argument, and if it's a keyword, -it refers to a named keyword argument. If the numerical arg_names in a format string +it refers to a named keyword argument. An *arg_name* is treated as a number if +a call to :meth:`str.isdecimal` on the string would return true. +If the numerical arg_names in a format string are 0, 1, 2, ... in sequence, they can all be omitted (not just some) and the numbers 0, 1, 2, ... will be automatically inserted in that order. Because *arg_name* is not quote-delimited, it is not possible to specify arbitrary @@ -235,7 +237,7 @@ dictionary keys (e.g., the strings ``'10'`` or ``':-]'``) within a format string The *arg_name* can be followed by any number of index or attribute expressions. An expression of the form ``'.name'`` selects the named attribute using :func:`getattr`, while an expression of the form ``'[index]'`` -does an index lookup using :func:`__getitem__`. +does an index lookup using :meth:`~object.__getitem__`. .. versionchanged:: 3.1 The positional argument specifiers can be omitted for :meth:`str.format`, @@ -254,10 +256,10 @@ Some simple format string examples:: "Units destroyed: {players[0]}" # First element of keyword argument 'players'. The *conversion* field causes a type coercion before formatting. Normally, the -job of formatting a value is done by the :meth:`__format__` method of the value +job of formatting a value is done by the :meth:`~object.__format__` method of the value itself. However, in some cases it is desirable to force a type to be formatted as a string, overriding its own definition of formatting. By converting the -value to a string before calling :meth:`__format__`, the normal formatting logic +value to a string before calling :meth:`~object.__format__`, the normal formatting logic is bypassed. Three conversion flags are currently supported: ``'!s'`` which calls :func:`str` @@ -310,7 +312,7 @@ non-empty format specification typically modifies the result. The general form of a *standard format specifier* is: .. productionlist:: format-spec - format_spec: [[`fill`]`align`][`sign`][z][#][0][`width`][`grouping_option`][.`precision`][`type`] + format_spec: [[`fill`]`align`][`sign`]["z"]["#"]["0"][`width`][`grouping_option`]["." `precision`][`type`] fill: align: "<" | ">" | "=" | "^" sign: "+" | "-" | " " @@ -330,30 +332,30 @@ affect the :func:`format` function. The meaning of the various alignment options is as follows: - .. index:: - single: < (less); in string formatting - single: > (greater); in string formatting - single: = (equals); in string formatting - single: ^ (caret); in string formatting - - +---------+----------------------------------------------------------+ - | Option | Meaning | - +=========+==========================================================+ - | ``'<'`` | Forces the field to be left-aligned within the available | - | | space (this is the default for most objects). | - +---------+----------------------------------------------------------+ - | ``'>'`` | Forces the field to be right-aligned within the | - | | available space (this is the default for numbers). | - +---------+----------------------------------------------------------+ - | ``'='`` | Forces the padding to be placed after the sign (if any) | - | | but before the digits. This is used for printing fields | - | | in the form '+000000120'. This alignment option is only | - | | valid for numeric types. It becomes the default for | - | | numbers when '0' immediately precedes the field width. | - +---------+----------------------------------------------------------+ - | ``'^'`` | Forces the field to be centered within the available | - | | space. | - +---------+----------------------------------------------------------+ +.. index:: + single: < (less); in string formatting + single: > (greater); in string formatting + single: = (equals); in string formatting + single: ^ (caret); in string formatting + ++---------+----------------------------------------------------------+ +| Option | Meaning | ++=========+==========================================================+ +| ``'<'`` | Forces the field to be left-aligned within the available | +| | space (this is the default for most objects). | ++---------+----------------------------------------------------------+ +| ``'>'`` | Forces the field to be right-aligned within the | +| | available space (this is the default for numbers). | ++---------+----------------------------------------------------------+ +| ``'='`` | Forces the padding to be placed after the sign (if any) | +| | but before the digits. This is used for printing fields | +| | in the form '+000000120'. This alignment option is only | +| | valid for numeric types. It becomes the default for | +| | numbers when '0' immediately precedes the field width. | ++---------+----------------------------------------------------------+ +| ``'^'`` | Forces the field to be centered within the available | +| | space. | ++---------+----------------------------------------------------------+ Note that unless a minimum field width is defined, the field width will always be the same size as the data to fill it, so that the alignment option has no @@ -362,23 +364,23 @@ meaning in this case. The *sign* option is only valid for number types, and can be one of the following: - .. index:: - single: + (plus); in string formatting - single: - (minus); in string formatting - single: space; in string formatting - - +---------+----------------------------------------------------------+ - | Option | Meaning | - +=========+==========================================================+ - | ``'+'`` | indicates that a sign should be used for both | - | | positive as well as negative numbers. | - +---------+----------------------------------------------------------+ - | ``'-'`` | indicates that a sign should be used only for negative | - | | numbers (this is the default behavior). | - +---------+----------------------------------------------------------+ - | space | indicates that a leading space should be used on | - | | positive numbers, and a minus sign on negative numbers. | - +---------+----------------------------------------------------------+ +.. index:: + single: + (plus); in string formatting + single: - (minus); in string formatting + single: space; in string formatting + ++---------+----------------------------------------------------------+ +| Option | Meaning | ++=========+==========================================================+ +| ``'+'`` | indicates that a sign should be used for both | +| | positive as well as negative numbers. | ++---------+----------------------------------------------------------+ +| ``'-'`` | indicates that a sign should be used only for negative | +| | numbers (this is the default behavior). | ++---------+----------------------------------------------------------+ +| space | indicates that a leading space should be used on | +| | positive numbers, and a minus sign on negative numbers. | ++---------+----------------------------------------------------------+ .. index:: single: z; in string formatting diff --git a/Doc/library/stringprep.rst b/Doc/library/stringprep.rst index 5cfb533d802db43..c6d78a356d97bcd 100644 --- a/Doc/library/stringprep.rst +++ b/Doc/library/stringprep.rst @@ -27,7 +27,7 @@ procedure are part of the profile. One example of a ``stringprep`` profile is ``nameprep``, which is used for internationalized domain names. The module :mod:`stringprep` only exposes the tables from :rfc:`3454`. As these -tables would be very large to represent them as dictionaries or lists, the +tables would be very large to represent as dictionaries or lists, the module uses the Unicode character database internally. The module source code itself was generated using the ``mkstringprep.py`` utility. diff --git a/Doc/library/struct.rst b/Doc/library/struct.rst index 620f50376beb62e..e2e6fc542e3e671 100644 --- a/Doc/library/struct.rst +++ b/Doc/library/struct.rst @@ -1,6 +1,10 @@ :mod:`struct` --- Interpret bytes as packed binary data ======================================================= +.. testsetup:: * + + from struct import * + .. module:: struct :synopsis: Interpret bytes as packed binary data. @@ -12,21 +16,25 @@ -------------- -This module performs conversions between Python values and C structs represented -as Python :class:`bytes` objects. This can be used in handling binary data -stored in files or from network connections, among other sources. It uses -:ref:`struct-format-strings` as compact descriptions of the layout of the C -structs and the intended conversion to/from Python values. +This module converts between Python values and C structs represented +as Python :class:`bytes` objects. Compact :ref:`format strings ` +describe the intended conversions to/from Python values. +The module's functions and objects can be used for two largely +distinct applications, data exchange with external sources (files or +network connections), or data transfer between the Python application +and the C layer. .. note:: - By default, the result of packing a given C struct includes pad bytes in - order to maintain proper alignment for the C types involved; similarly, - alignment is taken into account when unpacking. This behavior is chosen so - that the bytes of a packed struct correspond exactly to the layout in memory - of the corresponding C struct. To handle platform-independent data formats - or omit implicit pad bytes, use ``standard`` size and alignment instead of - ``native`` size and alignment: see :ref:`struct-alignment` for details. + When no prefix character is given, native mode is the default. It + packs or unpacks data based on the platform and compiler on which + the Python interpreter was built. + The result of packing a given C struct includes pad bytes which + maintain proper alignment for the C types involved; similarly, + alignment is taken into account when unpacking. In contrast, when + communicating data between external sources, the programmer is + responsible for defining byte ordering and padding between elements. + See :ref:`struct-alignment` for details. Several :mod:`struct` functions (and methods of :class:`Struct`) take a *buffer* argument. This refers to objects that implement the :ref:`bufferobjects` and @@ -102,10 +110,13 @@ The module defines the following exception and functions: Format Strings -------------- -Format strings are the mechanism used to specify the expected layout when -packing and unpacking data. They are built up from :ref:`format-characters`, -which specify the type of data being packed/unpacked. In addition, there are -special characters for controlling the :ref:`struct-alignment`. +Format strings describe the data layout when +packing and unpacking data. They are built up from :ref:`format characters`, +which specify the type of data being packed/unpacked. In addition, +special characters control the :ref:`byte order, size and alignment`. +Each format string consists of an optional prefix character which +describes the overall properties of the data and one or more format +characters which describe the actual data values and padding. .. _struct-alignment: @@ -116,6 +127,11 @@ Byte Order, Size, and Alignment By default, C types are represented in the machine's native format and byte order, and properly aligned by skipping pad bytes if necessary (according to the rules used by the C compiler). +This behavior is chosen so +that the bytes of a packed struct correspond exactly to the memory layout +of the corresponding C struct. +Whether to use native byte ordering +and padding or standard formats depends on the application. .. index:: single: @ (at); in struct format strings @@ -144,12 +160,10 @@ following table: If the first character is not one of these, ``'@'`` is assumed. -Native byte order is big-endian or little-endian, depending on the host -system. For example, Intel x86 and AMD64 (x86-64) are little-endian; -IBM z and most legacy architectures are big-endian; -and ARM, RISC-V and IBM Power feature switchable endianness -(bi-endian, though the former two are nearly always little-endian in practice). -Use ``sys.byteorder`` to check the endianness of your system. +Native byte order is big-endian or little-endian, depending on the +host system. For example, Intel x86, AMD64 (x86-64), and Apple M1 are +little-endian; IBM z and many legacy architectures are big-endian. +Use :data:`sys.byteorder` to check the endianness of your system. Native size and alignment are determined using the C compiler's ``sizeof`` expression. This is always combined with native byte order. @@ -221,9 +235,9 @@ platform-dependent. | ``Q`` | :c:expr:`unsigned long | integer | 8 | \(2) | | | long` | | | | +--------+--------------------------+--------------------+----------------+------------+ -| ``n`` | :c:expr:`ssize_t` | integer | | \(3) | +| ``n`` | :c:type:`ssize_t` | integer | | \(3) | +--------+--------------------------+--------------------+----------------+------------+ -| ``N`` | :c:expr:`size_t` | integer | | \(3) | +| ``N`` | :c:type:`size_t` | integer | | \(3) | +--------+--------------------------+--------------------+----------------+------------+ | ``e`` | \(6) | float | 2 | \(4) | +--------+--------------------------+--------------------+----------------+------------+ @@ -231,9 +245,9 @@ platform-dependent. +--------+--------------------------+--------------------+----------------+------------+ | ``d`` | :c:expr:`double` | float | 8 | \(4) | +--------+--------------------------+--------------------+----------------+------------+ -| ``s`` | :c:expr:`char[]` | bytes | | | +| ``s`` | :c:expr:`char[]` | bytes | | \(9) | +--------+--------------------------+--------------------+----------------+------------+ -| ``p`` | :c:expr:`char[]` | bytes | | | +| ``p`` | :c:expr:`char[]` | bytes | | \(8) | +--------+--------------------------+--------------------+----------------+------------+ | ``P`` | :c:expr:`void \*` | integer | | \(5) | +--------+--------------------------+--------------------+----------------+------------+ @@ -256,11 +270,11 @@ Notes: (2) When attempting to pack a non-integer using any of the integer conversion - codes, if the non-integer has a :meth:`__index__` method then that method is + codes, if the non-integer has a :meth:`~object.__index__` method then that method is called to convert the argument to an integer before packing. .. versionchanged:: 3.2 - Added use of the :meth:`__index__` method for non-integers. + Added use of the :meth:`~object.__index__` method for non-integers. (3) The ``'n'`` and ``'N'`` conversion codes are only available for the native @@ -292,8 +306,33 @@ Notes: format `_ for more information. (7) - For padding, ``x`` inserts null bytes. - + When packing, ``'x'`` inserts one NUL byte. + +(8) + The ``'p'`` format character encodes a "Pascal string", meaning a short + variable-length string stored in a *fixed number of bytes*, given by the count. + The first byte stored is the length of the string, or 255, whichever is + smaller. The bytes of the string follow. If the string passed in to + :func:`pack` is too long (longer than the count minus 1), only the leading + ``count-1`` bytes of the string are stored. If the string is shorter than + ``count-1``, it is padded with null bytes so that exactly count bytes in all + are used. Note that for :func:`unpack`, the ``'p'`` format character consumes + ``count`` bytes, but that the string returned can never contain more than 255 + bytes. + +(9) + For the ``'s'`` format character, the count is interpreted as the length of the + bytes, not a repeat count like for the other format characters; for example, + ``'10s'`` means a single 10-byte string mapping to or from a single + Python byte string, while ``'10c'`` means 10 + separate one byte character elements (e.g., ``cccccccccc``) mapping + to or from ten different Python byte objects. (See :ref:`struct-examples` + for a concrete demonstration of the difference.) + If a count is not given, it defaults to 1. For packing, the string is + truncated or padded with null bytes as appropriate to make it fit. For + unpacking, the resulting bytes object always has exactly the specified number + of bytes. As a special case, ``'0s'`` means a single, empty string (while + ``'0c'`` means 0 characters). A format character may be preceded by an integral repeat count. For example, the format string ``'4h'`` means exactly the same as ``'hhhh'``. @@ -301,15 +340,6 @@ the format string ``'4h'`` means exactly the same as ``'hhhh'``. Whitespace characters between formats are ignored; a count and its format must not contain whitespace though. -For the ``'s'`` format character, the count is interpreted as the length of the -bytes, not a repeat count like for the other format characters; for example, -``'10s'`` means a single 10-byte string, while ``'10c'`` means 10 characters. -If a count is not given, it defaults to 1. For packing, the string is -truncated or padded with null bytes as appropriate to make it fit. For -unpacking, the resulting bytes object always has exactly the specified number -of bytes. As a special case, ``'0s'`` means a single, empty string (while -``'0c'`` means 0 characters). - When packing a value ``x`` using one of the integer formats (``'b'``, ``'B'``, ``'h'``, ``'H'``, ``'i'``, ``'I'``, ``'l'``, ``'L'``, ``'q'``, ``'Q'``), if ``x`` is outside the valid range for that format @@ -319,17 +349,6 @@ then :exc:`struct.error` is raised. Previously, some of the integer formats wrapped out-of-range values and raised :exc:`DeprecationWarning` instead of :exc:`struct.error`. -The ``'p'`` format character encodes a "Pascal string", meaning a short -variable-length string stored in a *fixed number of bytes*, given by the count. -The first byte stored is the length of the string, or 255, whichever is -smaller. The bytes of the string follow. If the string passed in to -:func:`pack` is too long (longer than the count minus 1), only the leading -``count-1`` bytes of the string are stored. If the string is shorter than -``count-1``, it is padded with null bytes so that exactly count bytes in all -are used. Note that for :func:`unpack`, the ``'p'`` format character consumes -``count`` bytes, but that the string returned can never contain more than 255 -bytes. - .. index:: single: ? (question mark); in struct format strings For the ``'?'`` format character, the return value is either :const:`True` or @@ -345,18 +364,36 @@ Examples ^^^^^^^^ .. note:: - All examples assume a native byte order, size, and alignment with a - big-endian machine. + Native byte order examples (designated by the ``'@'`` format prefix or + lack of any prefix character) may not match what the reader's + machine produces as + that depends on the platform and compiler. + +Pack and unpack integers of three different sizes, using big endian +ordering:: + + >>> from struct import * + >>> pack(">bhl", 1, 2, 3) + b'\x01\x00\x02\x00\x00\x00\x03' + >>> unpack('>bhl', b'\x01\x00\x02\x00\x00\x00\x03') + (1, 2, 3) + >>> calcsize('>bhl') + 7 + +Attempt to pack an integer which is too large for the defined field:: + + >>> pack(">h", 99999) + Traceback (most recent call last): + File "", line 1, in + struct.error: 'h' format requires -32768 <= number <= 32767 -A basic example of packing/unpacking three integers:: +Demonstrate the difference between ``'s'`` and ``'c'`` format +characters:: - >>> from struct import * - >>> pack('hhl', 1, 2, 3) - b'\x00\x01\x00\x02\x00\x00\x00\x03' - >>> unpack('hhl', b'\x00\x01\x00\x02\x00\x00\x00\x03') - (1, 2, 3) - >>> calcsize('hhl') - 8 + >>> pack("@ccc", b'1', b'2', b'3') + b'123' + >>> pack("@3s", b'123') + b'123' Unpacked fields can be named by assigning them to variables or by wrapping the result in a named tuple:: @@ -369,35 +406,132 @@ the result in a named tuple:: >>> Student._make(unpack('<10sHHb', record)) Student(name=b'raymond ', serialnum=4658, school=264, gradelevel=8) -The ordering of format characters may have an impact on size since the padding -needed to satisfy alignment requirements is different:: - - >>> pack('ci', b'*', 0x12131415) - b'*\x00\x00\x00\x12\x13\x14\x15' - >>> pack('ic', 0x12131415, b'*') - b'\x12\x13\x14\x15*' - >>> calcsize('ci') +The ordering of format characters may have an impact on size in native +mode since padding is implicit. In standard mode, the user is +responsible for inserting any desired padding. +Note in +the first ``pack`` call below that three NUL bytes were added after the +packed ``'#'`` to align the following integer on a four-byte boundary. +In this example, the output was produced on a little endian machine:: + + >>> pack('@ci', b'#', 0x12131415) + b'#\x00\x00\x00\x15\x14\x13\x12' + >>> pack('@ic', 0x12131415, b'#') + b'\x15\x14\x13\x12#' + >>> calcsize('@ci') 8 - >>> calcsize('ic') + >>> calcsize('@ic') 5 -The following format ``'llh0l'`` specifies two pad bytes at the end, assuming -longs are aligned on 4-byte boundaries:: +The following format ``'llh0l'`` results in two pad bytes being added +at the end, assuming the platform's longs are aligned on 4-byte boundaries:: - >>> pack('llh0l', 1, 2, 3) + >>> pack('@llh0l', 1, 2, 3) b'\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00' -This only works when native size and alignment are in effect; standard size and -alignment does not enforce any alignment. - .. seealso:: Module :mod:`array` Packed binary storage of homogeneous data. - Module :mod:`xdrlib` - Packing and unpacking of XDR data. + Module :mod:`json` + JSON encoder and decoder. + + Module :mod:`pickle` + Python object serialization. + + +.. _applications: + +Applications +------------ + +Two main applications for the :mod:`struct` module exist, data +interchange between Python and C code within an application or another +application compiled using the same compiler (:ref:`native formats`), and +data interchange between applications using agreed upon data layout +(:ref:`standard formats`). Generally speaking, the format strings +constructed for these two domains are distinct. + + +.. _struct-native-formats: + +Native Formats +^^^^^^^^^^^^^^ + +When constructing format strings which mimic native layouts, the +compiler and machine architecture determine byte ordering and padding. +In such cases, the ``@`` format character should be used to specify +native byte ordering and data sizes. Internal pad bytes are normally inserted +automatically. It is possible that a zero-repeat format code will be +needed at the end of a format string to round up to the correct +byte boundary for proper alignment of consecutive chunks of data. + +Consider these two simple examples (on a 64-bit, little-endian +machine):: + + >>> calcsize('@lhl') + 24 + >>> calcsize('@llh') + 18 + +Data is not padded to an 8-byte boundary at the end of the second +format string without the use of extra padding. A zero-repeat format +code solves that problem:: + + >>> calcsize('@llh0l') + 24 + +The ``'x'`` format code can be used to specify the repeat, but for +native formats it is better to use a zero-repeat format like ``'0l'``. + +By default, native byte ordering and alignment is used, but it is +better to be explicit and use the ``'@'`` prefix character. + + +.. _struct-standard-formats: + +Standard Formats +^^^^^^^^^^^^^^^^ + +When exchanging data beyond your process such as networking or storage, +be precise. Specify the exact byte order, size, and alignment. Do +not assume they match the native order of a particular machine. +For example, network byte order is big-endian, while many popular CPUs +are little-endian. By defining this explicitly, the user need not +care about the specifics of the platform their code is running on. +The first character should typically be ``<`` or ``>`` +(or ``!``). Padding is the responsibility of the programmer. The +zero-repeat format character won't work. Instead, the user must +explicitly add ``'x'`` pad bytes where needed. Revisiting the +examples from the previous section, we have:: + + >>> calcsize('>> pack('>> calcsize('@llh') + 18 + >>> pack('@llh', 1, 2, 3) == pack('>> calcsize('>> calcsize('@llh0l') + 24 + >>> pack('@llh0l', 1, 2, 3) == pack('>> calcsize('>> calcsize('@llh0l') + 12 + >>> pack('@llh0l', 1, 2, 3) == pack('>> Struct('i') + Struct('i') .. _half precision format: https://en.wikipedia.org/wiki/Half-precision_floating-point_format .. _ieee 754 standard: https://en.wikipedia.org/wiki/IEEE_754-2008_revision -.. _IETF RFC 1700: https://tools.ietf.org/html/rfc1700 +.. _IETF RFC 1700: https://datatracker.ietf.org/doc/html/rfc1700 diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst index 51b9e38b7b6ce91..d6b892a7ed957d4 100644 --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -57,10 +57,13 @@ underlying :class:`Popen` interface can be used directly. and combine both streams into one, use ``stdout=PIPE`` and ``stderr=STDOUT`` instead of *capture_output*. - The *timeout* argument is passed to :meth:`Popen.communicate`. If the timeout - expires, the child process will be killed and waited for. The - :exc:`TimeoutExpired` exception will be re-raised after the child process - has terminated. + A *timeout* may be specified in seconds, it is internally passed on to + :meth:`Popen.communicate`. If the timeout expires, the child process will be + killed and waited for. The :exc:`TimeoutExpired` exception will be + re-raised after the child process has terminated. The initial process + creation itself cannot be interrupted on many platform APIs so you are not + guaranteed to see a timeout exception until at least after however long + process creation takes. The *input* argument is passed to :meth:`Popen.communicate` and thus to the subprocess's stdin. If used it must be a byte sequence, or a string if @@ -81,8 +84,10 @@ underlying :class:`Popen` interface can be used directly. If *env* is not ``None``, it must be a mapping that defines the environment variables for the new process; these are used instead of the default - behavior of inheriting the current process' environment. It is passed directly - to :class:`Popen`. + behavior of inheriting the current process' environment. It is passed + directly to :class:`Popen`. This mapping can be str to str on any platform + or bytes to bytes on POSIX platforms much like :data:`os.environ` or + :data:`os.environb`. Examples:: @@ -109,6 +114,14 @@ underlying :class:`Popen` interface can be used directly. Added the *text* parameter, as a more understandable alias of *universal_newlines*. Added the *capture_output* parameter. + .. versionchanged:: 3.12 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. class:: CompletedProcess The return value from :func:`run`, representing a process that has finished. @@ -268,15 +281,14 @@ default values. The arguments that are most commonly needed are: *stdin*, *stdout* and *stderr* specify the executed program's standard input, standard output and standard error file handles, respectively. Valid values - are :data:`PIPE`, :data:`DEVNULL`, an existing file descriptor (a positive - integer), an existing file object with a valid file descriptor, and ``None``. - :data:`PIPE` indicates that a new pipe to the child should be created. - :data:`DEVNULL` indicates that the special file :data:`os.devnull` will - be used. With the default settings of ``None``, no redirection will occur; - the child's file handles will be inherited from the parent. - Additionally, *stderr* can be :data:`STDOUT`, which indicates that the - stderr data from the child process should be captured into the same file - handle as for *stdout*. + are ``None``, :data:`PIPE`, :data:`DEVNULL`, an existing file descriptor (a + positive integer), and an existing :term:`file object` with a valid file + descriptor. With the default settings of ``None``, no redirection will + occur. :data:`PIPE` indicates that a new pipe to the child should be + created. :data:`DEVNULL` indicates that the special file :data:`os.devnull` + will be used. Additionally, *stderr* can be :data:`STDOUT`, which indicates + that the stderr data from the child process should be captured into the same + file handle as for *stdout*. .. index:: single: universal newlines; subprocess module @@ -453,10 +465,10 @@ functions. :func:`open` function when creating the stdin/stdout/stderr pipe file objects: - - :const:`0` means unbuffered (read and write are one + - ``0`` means unbuffered (read and write are one system call and can return short) - - :const:`1` means line buffered - (only usable if ``universal_newlines=True`` i.e., in a text mode) + - ``1`` means line buffered + (only usable if ``text=True`` or ``universal_newlines=True``) - any other positive value means use a buffer of approximately that size - negative bufsize (the default) means the system default of @@ -465,7 +477,7 @@ functions. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the behavior that most code expects. In versions prior to Python 3.2.4 and - 3.3.1 it incorrectly defaulted to :const:`0` which was unbuffered + 3.3.1 it incorrectly defaulted to ``0`` which was unbuffered and allowed short reads. This was unintentional and did not match the behavior of Python 2 as most code expected. @@ -486,17 +498,24 @@ functions. *executable* parameter accepts a bytes and :term:`path-like object` on Windows. + .. versionchanged:: 3.12 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + *stdin*, *stdout* and *stderr* specify the executed program's standard input, standard output and standard error file handles, respectively. Valid values - are :data:`PIPE`, :data:`DEVNULL`, an existing file descriptor (a positive - integer), an existing :term:`file object` with a valid file descriptor, - and ``None``. :data:`PIPE` indicates that a new pipe to the child should - be created. :data:`DEVNULL` indicates that the special file - :data:`os.devnull` will be used. With the default settings of ``None``, - no redirection will occur; the child's file handles will be inherited from - the parent. Additionally, *stderr* can be :data:`STDOUT`, which indicates + are ``None``, :data:`PIPE`, :data:`DEVNULL`, an existing file descriptor (a + positive integer), and an existing :term:`file object` with a valid file + descriptor. With the default settings of ``None``, no redirection will + occur. :data:`PIPE` indicates that a new pipe to the child should be + created. :data:`DEVNULL` indicates that the special file :data:`os.devnull` + will be used. Additionally, *stderr* can be :data:`STDOUT`, which indicates that the stderr data from the applications should be captured into the same - file handle as for stdout. + file handle as for *stdout*. If *preexec_fn* is set to a callable object, this object will be called in the child process just before the child is executed. @@ -522,8 +541,8 @@ functions. :exc:`RuntimeError`. The new restriction may affect applications that are deployed in mod_wsgi, uWSGI, and other embedded environments. - If *close_fds* is true, all file descriptors except :const:`0`, :const:`1` and - :const:`2` will be closed before the child process is executed. Otherwise + If *close_fds* is true, all file descriptors except ``0``, ``1`` and + ``2`` will be closed before the child process is executed. Otherwise when *close_fds* is false, file descriptors obey their inheritable flag as described in :ref:`fd_inheritance`. @@ -619,7 +638,9 @@ functions. If *env* is not ``None``, it must be a mapping that defines the environment variables for the new process; these are used instead of the default - behavior of inheriting the current process' environment. + behavior of inheriting the current process' environment. This mapping can be + str to str on any platform or bytes to bytes on POSIX platforms much like + :data:`os.environ` or :data:`os.environb`. .. note:: @@ -645,18 +666,18 @@ functions. passed to the underlying ``CreateProcess`` function. *creationflags*, if given, can be one or more of the following flags: - * :data:`CREATE_NEW_CONSOLE` - * :data:`CREATE_NEW_PROCESS_GROUP` - * :data:`ABOVE_NORMAL_PRIORITY_CLASS` - * :data:`BELOW_NORMAL_PRIORITY_CLASS` - * :data:`HIGH_PRIORITY_CLASS` - * :data:`IDLE_PRIORITY_CLASS` - * :data:`NORMAL_PRIORITY_CLASS` - * :data:`REALTIME_PRIORITY_CLASS` - * :data:`CREATE_NO_WINDOW` - * :data:`DETACHED_PROCESS` - * :data:`CREATE_DEFAULT_ERROR_MODE` - * :data:`CREATE_BREAKAWAY_FROM_JOB` + * :data:`CREATE_NEW_CONSOLE` + * :data:`CREATE_NEW_PROCESS_GROUP` + * :data:`ABOVE_NORMAL_PRIORITY_CLASS` + * :data:`BELOW_NORMAL_PRIORITY_CLASS` + * :data:`HIGH_PRIORITY_CLASS` + * :data:`IDLE_PRIORITY_CLASS` + * :data:`NORMAL_PRIORITY_CLASS` + * :data:`REALTIME_PRIORITY_CLASS` + * :data:`CREATE_NO_WINDOW` + * :data:`DETACHED_PROCESS` + * :data:`CREATE_DEFAULT_ERROR_MODE` + * :data:`CREATE_BREAKAWAY_FROM_JOB` *pipesize* can be used to change the size of the pipe when :data:`PIPE` is used for *stdin*, *stdout* or *stderr*. The size of the pipe @@ -716,13 +737,13 @@ arguments. code. All of the functions and methods that accept a *timeout* parameter, such as -:func:`call` and :meth:`Popen.communicate` will raise :exc:`TimeoutExpired` if +:func:`run` and :meth:`Popen.communicate` will raise :exc:`TimeoutExpired` if the timeout expires before the process exits. Exceptions defined in this module all inherit from :exc:`SubprocessError`. - .. versionadded:: 3.3 - The :exc:`SubprocessError` base class was added. +.. versionadded:: 3.3 + The :exc:`SubprocessError` base class was added. .. _subprocess-security: @@ -770,9 +791,10 @@ Instances of the :class:`Popen` class have the following methods: .. note:: - The function is implemented using a busy loop (non-blocking call and - short sleeps). Use the :mod:`asyncio` module for an asynchronous wait: - see :class:`asyncio.create_subprocess_exec`. + When the ``timeout`` parameter is not ``None``, then (on POSIX) the + function is implemented using a busy loop (non-blocking call and short + sleeps). Use the :mod:`asyncio` module for an asynchronous wait: see + :class:`asyncio.create_subprocess_exec`. .. versionchanged:: 3.3 *timeout* was added. @@ -845,7 +867,8 @@ Instances of the :class:`Popen` class have the following methods: On Windows :meth:`kill` is an alias for :meth:`terminate`. -The following attributes are also available: +The following attributes are also set by the class for you to access. +Reassigning them to new values is unsupported: .. attribute:: Popen.args @@ -858,9 +881,9 @@ The following attributes are also available: If the *stdin* argument was :data:`PIPE`, this attribute is a writeable stream object as returned by :func:`open`. If the *encoding* or *errors* - arguments were specified or the *universal_newlines* argument was ``True``, - the stream is a text stream, otherwise it is a byte stream. If the *stdin* - argument was not :data:`PIPE`, this attribute is ``None``. + arguments were specified or the *text* or *universal_newlines* argument + was ``True``, the stream is a text stream, otherwise it is a byte stream. + If the *stdin* argument was not :data:`PIPE`, this attribute is ``None``. .. attribute:: Popen.stdout @@ -868,9 +891,9 @@ The following attributes are also available: If the *stdout* argument was :data:`PIPE`, this attribute is a readable stream object as returned by :func:`open`. Reading from the stream provides output from the child process. If the *encoding* or *errors* arguments were - specified or the *universal_newlines* argument was ``True``, the stream is a - text stream, otherwise it is a byte stream. If the *stdout* argument was not - :data:`PIPE`, this attribute is ``None``. + specified or the *text* or *universal_newlines* argument was ``True``, the + stream is a text stream, otherwise it is a byte stream. If the *stdout* + argument was not :data:`PIPE`, this attribute is ``None``. .. attribute:: Popen.stderr @@ -878,9 +901,9 @@ The following attributes are also available: If the *stderr* argument was :data:`PIPE`, this attribute is a readable stream object as returned by :func:`open`. Reading from the stream provides error output from the child process. If the *encoding* or *errors* arguments - were specified or the *universal_newlines* argument was ``True``, the stream - is a text stream, otherwise it is a byte stream. If the *stderr* argument was - not :data:`PIPE`, this attribute is ``None``. + were specified or the *text* or *universal_newlines* argument was ``True``, the + stream is a text stream, otherwise it is a byte stream. If the *stderr* argument + was not :data:`PIPE`, this attribute is ``None``. .. warning:: @@ -900,9 +923,12 @@ The following attributes are also available: .. attribute:: Popen.returncode - The child return code, set by :meth:`poll` and :meth:`wait` (and indirectly - by :meth:`communicate`). A ``None`` value indicates that the process - hasn't terminated yet. + The child return code. Initially ``None``, :attr:`returncode` is set by + a call to the :meth:`poll`, :meth:`wait`, or :meth:`communicate` methods + if they detect that the process has terminated. + + A ``None`` value indicates that the process hadn't yet terminated at the + time of the last method call. A negative value ``-N`` indicates that the child was terminated by signal ``N`` (POSIX only). @@ -1155,6 +1181,14 @@ calls these functions. .. versionchanged:: 3.3 *timeout* was added. + .. versionchanged:: 3.12 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. function:: check_call(args, *, stdin=None, stdout=None, stderr=None, \ shell=False, cwd=None, timeout=None, \ **other_popen_kwargs) @@ -1187,6 +1221,14 @@ calls these functions. .. versionchanged:: 3.3 *timeout* was added. + .. versionchanged:: 3.12 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. function:: check_output(args, *, stdin=None, stderr=None, shell=False, \ cwd=None, encoding=None, errors=None, \ @@ -1242,6 +1284,14 @@ calls these functions. .. versionadded:: 3.7 *text* was added as a more readable alias for *universal_newlines*. + .. versionchanged:: 3.12 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. _subprocess-replacements: @@ -1561,21 +1611,25 @@ improves performance. If you ever encounter a presumed highly unusual situation where you need to prevent ``vfork()`` from being used by Python, you can set the -:attr:`subprocess._USE_VFORK` attribute to a false value. +:const:`subprocess._USE_VFORK` attribute to a false value. + +:: subprocess._USE_VFORK = False # See CPython issue gh-NNNNNN. Setting this has no impact on use of ``posix_spawn()`` which could use ``vfork()`` internally within its libc implementation. There is a similar -:attr:`subprocess._USE_POSIX_SPAWN` attribute if you need to prevent use of +:const:`subprocess._USE_POSIX_SPAWN` attribute if you need to prevent use of that. +:: + subprocess._USE_POSIX_SPAWN = False # See CPython issue gh-NNNNNN. It is safe to set these to false on any Python version. They will have no effect on older versions when unsupported. Do not assume the attributes are available to read. Despite their names, a true value does not indicate that the -corresponding function will be used, only that that it may be. +corresponding function will be used, only that it may be. Please file issues any time you have to use these private knobs with a way to reproduce the issue you were seeing. Link to that issue from a comment in your diff --git a/Doc/library/sunau.rst b/Doc/library/sunau.rst deleted file mode 100644 index c7a38d96ade1310..000000000000000 --- a/Doc/library/sunau.rst +++ /dev/null @@ -1,274 +0,0 @@ -:mod:`sunau` --- Read and write Sun AU files -============================================ - -.. module:: sunau - :synopsis: Provide an interface to the Sun AU sound format. - :deprecated: - -.. sectionauthor:: Moshe Zadka - -**Source code:** :source:`Lib/sunau.py` - -.. deprecated-removed:: 3.11 3.13 - The :mod:`sunau` module is deprecated - (see :pep:`PEP 594 <594#sunau>` for details). - --------------- - -The :mod:`sunau` module provides a convenient interface to the Sun AU sound -format. Note that this module is interface-compatible with the modules -:mod:`aifc` and :mod:`wave`. - -An audio file consists of a header followed by the data. The fields of the -header are: - -+---------------+-----------------------------------------------+ -| Field | Contents | -+===============+===============================================+ -| magic word | The four bytes ``.snd``. | -+---------------+-----------------------------------------------+ -| header size | Size of the header, including info, in bytes. | -+---------------+-----------------------------------------------+ -| data size | Physical size of the data, in bytes. | -+---------------+-----------------------------------------------+ -| encoding | Indicates how the audio samples are encoded. | -+---------------+-----------------------------------------------+ -| sample rate | The sampling rate. | -+---------------+-----------------------------------------------+ -| # of channels | The number of channels in the samples. | -+---------------+-----------------------------------------------+ -| info | ASCII string giving a description of the | -| | audio file (padded with null bytes). | -+---------------+-----------------------------------------------+ - -Apart from the info field, all header fields are 4 bytes in size. They are all -32-bit unsigned integers encoded in big-endian byte order. - -The :mod:`sunau` module defines the following functions: - - -.. function:: open(file, mode) - - If *file* is a string, open the file by that name, otherwise treat it as a - seekable file-like object. *mode* can be any of - - ``'r'`` - Read only mode. - - ``'w'`` - Write only mode. - - Note that it does not allow read/write files. - - A *mode* of ``'r'`` returns an :class:`AU_read` object, while a *mode* of ``'w'`` - or ``'wb'`` returns an :class:`AU_write` object. - - -The :mod:`sunau` module defines the following exception: - -.. exception:: Error - - An error raised when something is impossible because of Sun AU specs or - implementation deficiency. - - -The :mod:`sunau` module defines the following data items: - -.. data:: AUDIO_FILE_MAGIC - - An integer every valid Sun AU file begins with, stored in big-endian form. This - is the string ``.snd`` interpreted as an integer. - - -.. data:: AUDIO_FILE_ENCODING_MULAW_8 - AUDIO_FILE_ENCODING_LINEAR_8 - AUDIO_FILE_ENCODING_LINEAR_16 - AUDIO_FILE_ENCODING_LINEAR_24 - AUDIO_FILE_ENCODING_LINEAR_32 - AUDIO_FILE_ENCODING_ALAW_8 - - Values of the encoding field from the AU header which are supported by this - module. - - -.. data:: AUDIO_FILE_ENCODING_FLOAT - AUDIO_FILE_ENCODING_DOUBLE - AUDIO_FILE_ENCODING_ADPCM_G721 - AUDIO_FILE_ENCODING_ADPCM_G722 - AUDIO_FILE_ENCODING_ADPCM_G723_3 - AUDIO_FILE_ENCODING_ADPCM_G723_5 - - Additional known values of the encoding field from the AU header, but which are - not supported by this module. - - -.. _au-read-objects: - -AU_read Objects ---------------- - -AU_read objects, as returned by :func:`.open` above, have the following methods: - - -.. method:: AU_read.close() - - Close the stream, and make the instance unusable. (This is called automatically - on deletion.) - - -.. method:: AU_read.getnchannels() - - Returns number of audio channels (1 for mono, 2 for stereo). - - -.. method:: AU_read.getsampwidth() - - Returns sample width in bytes. - - -.. method:: AU_read.getframerate() - - Returns sampling frequency. - - -.. method:: AU_read.getnframes() - - Returns number of audio frames. - - -.. method:: AU_read.getcomptype() - - Returns compression type. Supported compression types are ``'ULAW'``, ``'ALAW'`` - and ``'NONE'``. - - -.. method:: AU_read.getcompname() - - Human-readable version of :meth:`getcomptype`. The supported types have the - respective names ``'CCITT G.711 u-law'``, ``'CCITT G.711 A-law'`` and ``'not - compressed'``. - - -.. method:: AU_read.getparams() - - Returns a :func:`~collections.namedtuple` ``(nchannels, sampwidth, - framerate, nframes, comptype, compname)``, equivalent to output of the - :meth:`get\*` methods. - - -.. method:: AU_read.readframes(n) - - Reads and returns at most *n* frames of audio, as a :class:`bytes` object. The data - will be returned in linear format. If the original data is in u-LAW format, it - will be converted. - - -.. method:: AU_read.rewind() - - Rewind the file pointer to the beginning of the audio stream. - -The following two methods define a term "position" which is compatible between -them, and is otherwise implementation dependent. - - -.. method:: AU_read.setpos(pos) - - Set the file pointer to the specified position. Only values returned from - :meth:`tell` should be used for *pos*. - - -.. method:: AU_read.tell() - - Return current file pointer position. Note that the returned value has nothing - to do with the actual position in the file. - -The following two functions are defined for compatibility with the :mod:`aifc`, -and don't do anything interesting. - - -.. method:: AU_read.getmarkers() - - Returns ``None``. - - -.. method:: AU_read.getmark(id) - - Raise an error. - - -.. _au-write-objects: - -AU_write Objects ----------------- - -AU_write objects, as returned by :func:`.open` above, have the following methods: - - -.. method:: AU_write.setnchannels(n) - - Set the number of channels. - - -.. method:: AU_write.setsampwidth(n) - - Set the sample width (in bytes.) - - .. versionchanged:: 3.4 - Added support for 24-bit samples. - - -.. method:: AU_write.setframerate(n) - - Set the frame rate. - - -.. method:: AU_write.setnframes(n) - - Set the number of frames. This can be later changed, when and if more frames - are written. - - -.. method:: AU_write.setcomptype(type, name) - - Set the compression type and description. Only ``'NONE'`` and ``'ULAW'`` are - supported on output. - - -.. method:: AU_write.setparams(tuple) - - The *tuple* should be ``(nchannels, sampwidth, framerate, nframes, comptype, - compname)``, with values valid for the :meth:`set\*` methods. Set all - parameters. - - -.. method:: AU_write.tell() - - Return current position in the file, with the same disclaimer for the - :meth:`AU_read.tell` and :meth:`AU_read.setpos` methods. - - -.. method:: AU_write.writeframesraw(data) - - Write audio frames, without correcting *nframes*. - - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. - - -.. method:: AU_write.writeframes(data) - - Write audio frames and make sure *nframes* is correct. - - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. - - -.. method:: AU_write.close() - - Make sure *nframes* is correct, and close the file. - - This method is called upon deletion. - -Note that it is invalid to set any parameters after calling :meth:`writeframes` -or :meth:`writeframesraw`. - diff --git a/Doc/library/superseded.rst b/Doc/library/superseded.rst index 57ef9638d058d41..17bfa66f0433022 100644 --- a/Doc/library/superseded.rst +++ b/Doc/library/superseded.rst @@ -4,32 +4,12 @@ Superseded Modules ****************** -The modules described in this chapter are deprecated and only kept for +The modules described in this chapter are deprecated or :term:`soft deprecated` and only kept for backwards compatibility. They have been superseded by other modules. .. toctree:: + :maxdepth: 1 - aifc.rst - asynchat.rst - asyncore.rst - audioop.rst - cgi.rst - cgitb.rst - chunk.rst - crypt.rst - imghdr.rst - imp.rst - mailcap.rst - msilib.rst - nis.rst - nntplib.rst + getopt.rst optparse.rst - ossaudiodev.rst - pipes.rst - sndhdr.rst - spwd.rst - sunau.rst - telnetlib.rst - uu.rst - xdrlib.rst diff --git a/Doc/library/symtable.rst b/Doc/library/symtable.rst index 65ff5bfe7abd618..46159dcef940e7c 100644 --- a/Doc/library/symtable.rst +++ b/Doc/library/symtable.rst @@ -38,7 +38,13 @@ Examining Symbol Tables .. method:: get_type() Return the type of the symbol table. Possible values are ``'class'``, - ``'module'``, and ``'function'``. + ``'module'``, ``'function'``, ``'annotation'``, ``'TypeVar bound'``, + ``'type alias'``, and ``'type parameter'``. The latter four refer to + different flavors of :ref:`annotation scopes `. + + .. versionchanged:: 3.12 + Added ``'annotation'``, ``'TypeVar bound'``, ``'type alias'``, + and ``'type parameter'`` as possible return values. .. method:: get_id() @@ -49,6 +55,10 @@ Examining Symbol Tables Return the table's name. This is the name of the class if the table is for a class, the name of the function if the table is for a function, or ``'top'`` if the table is global (:meth:`get_type` returns ``'module'``). + For type parameter scopes (which are used for generic classes, functions, + and type aliases), it is the name of the underlying class, function, or + type alias. For type alias scopes, it is the name of the type alias. + For :class:`~typing.TypeVar` bound scopes, it is the name of the ``TypeVar``. .. method:: get_lineno() @@ -197,3 +207,21 @@ Examining Symbol Tables Return the namespace bound to this name. If more than one or no namespace is bound to this name, a :exc:`ValueError` is raised. + + +.. _symtable-cli: + +Command-Line Usage +------------------ + +.. versionadded:: 3.13 + +The :mod:`symtable` module can be executed as a script from the command line. + +.. code-block:: sh + + python -m symtable [infile...] + +Symbol tables are generated for the specified Python source files and +dumped to stdout. +If no input file is specified, the content is read from stdin. diff --git a/Doc/library/sys.monitoring.rst b/Doc/library/sys.monitoring.rst new file mode 100644 index 000000000000000..762581b7eda7f1d --- /dev/null +++ b/Doc/library/sys.monitoring.rst @@ -0,0 +1,368 @@ +:mod:`sys.monitoring` --- Execution event monitoring +==================================================== + +.. module:: sys.monitoring + :synopsis: Access and control event monitoring + +.. versionadded:: 3.12 + +----------------- + +.. note:: + + :mod:`sys.monitoring` is a namespace within the :mod:`sys` module, + not an independent module, so there is no need to + ``import sys.monitoring``, simply ``import sys`` and then use + ``sys.monitoring``. + + +This namespace provides access to the functions and constants necessary to +activate and control event monitoring. + +As programs execute, events occur that might be of interest to tools that +monitor execution. The :mod:`sys.monitoring` namespace provides means to +receive callbacks when events of interest occur. + +The monitoring API consists of three components: + +* `Tool identifiers`_ +* `Events`_ +* :ref:`Callbacks ` + +Tool identifiers +---------------- + +A tool identifier is an integer and the associated name. +Tool identifiers are used to discourage tools from interfering with each +other and to allow multiple tools to operate at the same time. +Currently tools are completely independent and cannot be used to +monitor each other. This restriction may be lifted in the future. + +Before registering or activating events, a tool should choose an identifier. +Identifiers are integers in the range 0 to 5 inclusive. + +Registering and using tools +''''''''''''''''''''''''''' + +.. function:: use_tool_id(tool_id: int, name: str, /) -> None + + Must be called before *tool_id* can be used. + *tool_id* must be in the range 0 to 5 inclusive. + Raises a :exc:`ValueError` if *tool_id* is in use. + +.. function:: free_tool_id(tool_id: int, /) -> None + + Should be called once a tool no longer requires *tool_id*. + +.. note:: + + :func:`free_tool_id` will not disable global or local events associated + with *tool_id*, nor will it unregister any callback functions. This + function is only intended to be used to notify the VM that the + particular *tool_id* is no longer in use. + +.. function:: get_tool(tool_id: int, /) -> str | None + + Returns the name of the tool if *tool_id* is in use, + otherwise it returns ``None``. + *tool_id* must be in the range 0 to 5 inclusive. + +All IDs are treated the same by the VM with regard to events, but the +following IDs are pre-defined to make co-operation of tools easier:: + + sys.monitoring.DEBUGGER_ID = 0 + sys.monitoring.COVERAGE_ID = 1 + sys.monitoring.PROFILER_ID = 2 + sys.monitoring.OPTIMIZER_ID = 5 + +There is no obligation to set an ID, nor is there anything preventing a tool +from using an ID even it is already in use. +However, tools are encouraged to use a unique ID and respect other tools. + +Events +------ + +The following events are supported: + +.. monitoring-event:: BRANCH + + A conditional branch is taken (or not). + +.. monitoring-event:: CALL + + A call in Python code (event occurs before the call). + +.. monitoring-event:: C_RAISE + + An exception raised from any callable, except for Python functions (event occurs after the exit). + +.. monitoring-event:: C_RETURN + + Return from any callable, except for Python functions (event occurs after the return). + +.. monitoring-event:: EXCEPTION_HANDLED + + An exception is handled. + +.. monitoring-event:: INSTRUCTION + + A VM instruction is about to be executed. + +.. monitoring-event:: JUMP + + An unconditional jump in the control flow graph is made. + +.. monitoring-event:: LINE + + An instruction is about to be executed that has a different line number from the preceding instruction. + +.. monitoring-event:: PY_RESUME + + Resumption of a Python function (for generator and coroutine functions), except for ``throw()`` calls. + +.. monitoring-event:: PY_RETURN + + Return from a Python function (occurs immediately before the return, the callee's frame will be on the stack). + +.. monitoring-event:: PY_START + + Start of a Python function (occurs immediately after the call, the callee's frame will be on the stack) + +.. monitoring-event:: PY_THROW + + A Python function is resumed by a ``throw()`` call. + +.. monitoring-event:: PY_UNWIND + + Exit from a Python function during exception unwinding. + +.. monitoring-event:: PY_YIELD + + Yield from a Python function (occurs immediately before the yield, the callee's frame will be on the stack). + +.. monitoring-event:: RAISE + + An exception is raised, except those that cause a :monitoring-event:`STOP_ITERATION` event. + +.. monitoring-event:: RERAISE + + An exception is re-raised, for example at the end of a :keyword:`finally` block. + +.. monitoring-event:: STOP_ITERATION + + An artificial :exc:`StopIteration` is raised; see `the STOP_ITERATION event`_. + + +More events may be added in the future. + +These events are attributes of the :mod:`!sys.monitoring.events` namespace. +Each event is represented as a power-of-2 integer constant. +To define a set of events, simply bitwise or the individual events together. +For example, to specify both :monitoring-event:`PY_RETURN` and :monitoring-event:`PY_START` +events, use the expression ``PY_RETURN | PY_START``. + +.. monitoring-event:: NO_EVENTS + + An alias for ``0`` so users can do explict comparisions like:: + + if get_events(DEBUGGER_ID) == NO_EVENTS: + ... + +Events are divided into three groups: + +Local events +'''''''''''' + +Local events are associated with normal execution of the program and happen +at clearly defined locations. All local events can be disabled. +The local events are: + +* :monitoring-event:`PY_START` +* :monitoring-event:`PY_RESUME` +* :monitoring-event:`PY_RETURN` +* :monitoring-event:`PY_YIELD` +* :monitoring-event:`CALL` +* :monitoring-event:`LINE` +* :monitoring-event:`INSTRUCTION` +* :monitoring-event:`JUMP` +* :monitoring-event:`BRANCH` +* :monitoring-event:`STOP_ITERATION` + +Ancillary events +'''''''''''''''' + +Ancillary events can be monitored like other events, but are controlled +by another event: + +* :monitoring-event:`C_RAISE` +* :monitoring-event:`C_RETURN` + +The :monitoring-event:`C_RETURN` and :monitoring-event:`C_RAISE` events +are controlled by the :monitoring-event:`CALL` event. +:monitoring-event:`C_RETURN` and :monitoring-event:`C_RAISE` events will only be seen if the +corresponding :monitoring-event:`CALL` event is being monitored. + +Other events +'''''''''''' + +Other events are not necessarily tied to a specific location in the +program and cannot be individually disabled. + +The other events that can be monitored are: + +* :monitoring-event:`PY_THROW` +* :monitoring-event:`PY_UNWIND` +* :monitoring-event:`RAISE` +* :monitoring-event:`EXCEPTION_HANDLED` + + +The STOP_ITERATION event +'''''''''''''''''''''''' + +:pep:`PEP 380 <380#use-of-stopiteration-to-return-values>` +specifies that a :exc:`StopIteration` exception is raised when returning a value +from a generator or coroutine. However, this is a very inefficient way to +return a value, so some Python implementations, notably CPython 3.12+, do not +raise an exception unless it would be visible to other code. + +To allow tools to monitor for real exceptions without slowing down generators +and coroutines, the :monitoring-event:`STOP_ITERATION` event is provided. +:monitoring-event:`STOP_ITERATION` can be locally disabled, unlike :monitoring-event:`RAISE`. + + +Turning events on and off +------------------------- + +In order to monitor an event, it must be turned on and a corresponding callback +must be registered. +Events can be turned on or off by setting the events either globally or +for a particular code object. + + +Setting events globally +''''''''''''''''''''''' + +Events can be controlled globally by modifying the set of events being monitored. + +.. function:: get_events(tool_id: int, /) -> int + + Returns the ``int`` representing all the active events. + +.. function:: set_events(tool_id: int, event_set: int, /) -> None + + Activates all events which are set in *event_set*. + Raises a :exc:`ValueError` if *tool_id* is not in use. + +No events are active by default. + +Per code object events +'''''''''''''''''''''' + +Events can also be controlled on a per code object basis. + +.. function:: get_local_events(tool_id: int, code: CodeType, /) -> int + + Returns all the local events for *code* + +.. function:: set_local_events(tool_id: int, code: CodeType, event_set: int, /) -> None + + Activates all the local events for *code* which are set in *event_set*. + Raises a :exc:`ValueError` if *tool_id* is not in use. + +Local events add to global events, but do not mask them. +In other words, all global events will trigger for a code object, +regardless of the local events. + + +Disabling events +'''''''''''''''' + +.. data:: DISABLE + + A special value that can be returned from a callback function to disable + events for the current code location. + +Local events can be disabled for a specific code location by returning +:data:`sys.monitoring.DISABLE` from a callback function. This does not change +which events are set, or any other code locations for the same event. + +Disabling events for specific locations is very important for high +performance monitoring. For example, a program can be run under a +debugger with no overhead if the debugger disables all monitoring +except for a few breakpoints. + +.. function:: restart_events() -> None + + Enable all the events that were disabled by :data:`sys.monitoring.DISABLE` + for all tools. + + +.. _callbacks: + +Registering callback functions +------------------------------ + +To register a callable for events call + +.. function:: register_callback(tool_id: int, event: int, func: Callable | None, /) -> Callable | None + + Registers the callable *func* for the *event* with the given *tool_id* + + If another callback was registered for the given *tool_id* and *event*, + it is unregistered and returned. + Otherwise :func:`register_callback` returns ``None``. + + +Functions can be unregistered by calling +``sys.monitoring.register_callback(tool_id, event, None)``. + +Callback functions can be registered and unregistered at any time. + +Registering or unregistering a callback function will generate a :func:`sys.audit` event. + + +Callback function arguments +''''''''''''''''''''''''''' + +.. data:: MISSING + + A special value that is passed to a callback function to indicate + that there are no arguments to the call. + +When an active event occurs, the registered callback function is called. +Different events will provide the callback function with different arguments, as follows: + +* :monitoring-event:`PY_START` and :monitoring-event:`PY_RESUME`:: + + func(code: CodeType, instruction_offset: int) -> DISABLE | Any + +* :monitoring-event:`PY_RETURN` and :monitoring-event:`PY_YIELD`:: + + func(code: CodeType, instruction_offset: int, retval: object) -> DISABLE | Any + +* :monitoring-event:`CALL`, :monitoring-event:`C_RAISE` and :monitoring-event:`C_RETURN`:: + + func(code: CodeType, instruction_offset: int, callable: object, arg0: object | MISSING) -> DISABLE | Any + + If there are no arguments, *arg0* is set to :data:`sys.monitoring.MISSING`. + +* :monitoring-event:`RAISE`, :monitoring-event:`RERAISE`, :monitoring-event:`EXCEPTION_HANDLED`, + :monitoring-event:`PY_UNWIND`, :monitoring-event:`PY_THROW` and :monitoring-event:`STOP_ITERATION`:: + + func(code: CodeType, instruction_offset: int, exception: BaseException) -> DISABLE | Any + +* :monitoring-event:`LINE`:: + + func(code: CodeType, line_number: int) -> DISABLE | Any + +* :monitoring-event:`BRANCH` and :monitoring-event:`JUMP`:: + + func(code: CodeType, instruction_offset: int, destination_offset: int) -> DISABLE | Any + + Note that the *destination_offset* is where the code will next execute. + For an untaken branch this will be the offset of the instruction following + the branch. + +* :monitoring-event:`INSTRUCTION`:: + + func(code: CodeType, instruction_offset: int) -> DISABLE | Any diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst index f3fd16c4de75370..bf9aaca2a696ded 100644 --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -22,6 +22,8 @@ always available. .. versionadded:: 3.2 + .. availability:: Unix. + .. function:: addaudithook(hook) @@ -35,6 +37,15 @@ always available. can then log the event, raise an exception to abort the operation, or terminate the process entirely. + Note that audit hooks are primarily for collecting information about internal + or otherwise unobservable actions, whether by Python or libraries written in + Python. They are not suitable for implementing a "sandbox". In particular, + malicious code can trivially disable or bypass hooks added using this + function. At a minimum, any security-sensitive hooks must be added using the + C API :c:func:`PySys_AddAuditHook` before initialising the runtime, and any + modules allowing arbitrary memory modification (such as :mod:`ctypes`) should + be completely removed or closely monitored. + .. audit-event:: sys.addaudithook "" sys.addaudithook Calling :func:`sys.addaudithook` will itself raise an auditing event @@ -157,14 +168,18 @@ always available. Python interpreter. (This information is not available in any other way --- ``modules.keys()`` only lists the imported modules.) - See also the :attr:`sys.stdlib_module_names` list. + See also the :data:`sys.stdlib_module_names` list. .. function:: call_tracing(func, args) Call ``func(*args)``, while tracing is enabled. The tracing state is saved, and restored afterwards. This is intended to be called from a debugger from - a checkpoint, to recursively debug some other code. + a checkpoint, to recursively debug or profile some other code. + + Tracing is suspended while calling a tracing function set by + :func:`settrace` or :func:`setprofile` to avoid infinite recursion. + :func:`!call_tracing` enables explicit recursion of the tracing function. .. data:: copyright @@ -211,6 +226,10 @@ always available. .. audit-event:: sys._current_exceptions "" sys._current_exceptions + .. versionchanged:: 3.12 + Each value in the dictionary is now a single exception instance, rather + than a 3-tuple as returned from ``sys.exc_info()``. + .. function:: breakpointhook() This hook function is called by built-in :func:`breakpoint`. By default, @@ -320,23 +339,21 @@ always available. *wasm32-emscripten* platform. The named tuple is provisional and may change in the future. - .. tabularcolumns:: |l|L| - - +-----------------------------+----------------------------------------------+ - | Attribute | Explanation | - +=============================+==============================================+ - | :const:`emscripten_version` | Emscripten version as tuple of ints | - | | (major, minor, micro), e.g. ``(3, 1, 8)``. | - +-----------------------------+----------------------------------------------+ - | :const:`runtime` | Runtime string, e.g. browser user agent, | - | | ``'Node.js v14.18.2'``, or ``'UNKNOWN'``. | - +-----------------------------+----------------------------------------------+ - | :const:`pthreads` | ``True`` if Python is compiled with | - | | Emscripten pthreads support. | - +-----------------------------+----------------------------------------------+ - | :const:`shared_memory` | ``True`` if Python is compiled with shared | - | | memory support. | - +-----------------------------+----------------------------------------------+ + .. attribute:: _emscripten_info.emscripten_version + + Emscripten version as tuple of ints (major, minor, micro), e.g. ``(3, 1, 8)``. + + .. attribute:: _emscripten_info.runtime + + Runtime string, e.g. browser user agent, ``'Node.js v14.18.2'``, or ``'UNKNOWN'``. + + .. attribute:: _emscripten_info.pthreads + + ``True`` if Python is compiled with Emscripten pthreads support. + + .. attribute:: _emscripten_info.shared_memory + + ``True`` if Python is compiled with shared memory support. .. availability:: Emscripten. @@ -367,7 +384,7 @@ always available. This function prints out a given traceback and exception to ``sys.stderr``. - When an exception is raised and uncaught, the interpreter calls + When an exception other than :exc:`SystemExit` is raised and uncaught, the interpreter calls ``sys.excepthook`` with three arguments, the exception class, exception instance, and a traceback object. In an interactive session this happens just before control is returned to the prompt; in a Python program this happens just @@ -431,7 +448,7 @@ always available. object ` which typically encapsulates the call stack at the point where the exception last occurred. - .. index:: object: traceback + .. index:: pair: object; traceback If no exception is being handled anywhere on the stack, this function return a tuple containing three ``None`` values. @@ -502,27 +519,62 @@ always available. The :term:`named tuple` *flags* exposes the status of command line flags. The attributes are read only. - ============================= ============================================================================================================== - attribute flag - ============================= ============================================================================================================== - :const:`debug` :option:`-d` - :const:`inspect` :option:`-i` - :const:`interactive` :option:`-i` - :const:`isolated` :option:`-I` - :const:`optimize` :option:`-O` or :option:`-OO` - :const:`dont_write_bytecode` :option:`-B` - :const:`no_user_site` :option:`-s` - :const:`no_site` :option:`-S` - :const:`ignore_environment` :option:`-E` - :const:`verbose` :option:`-v` - :const:`bytes_warning` :option:`-b` - :const:`quiet` :option:`-q` - :const:`hash_randomization` :option:`-R` - :const:`dev_mode` :option:`-X dev <-X>` (:ref:`Python Development Mode `) - :const:`utf8_mode` :option:`-X utf8 <-X>` - :const:`safe_path` :option:`-P` - :const:`int_max_str_digits` :option:`-X int_max_str_digits <-X>` (:ref:`integer string conversion length limitation `) - ============================= ============================================================================================================== + .. list-table:: + + * - .. attribute:: flags.debug + - :option:`-d` + + * - .. attribute:: flags.inspect + - :option:`-i` + + * - .. attribute:: flags.interactive + - :option:`-i` + + * - .. attribute:: flags.isolated + - :option:`-I` + + * - .. attribute:: flags.optimize + - :option:`-O` or :option:`-OO` + + * - .. attribute:: flags.dont_write_bytecode + - :option:`-B` + + * - .. attribute:: flags.no_user_site + - :option:`-s` + + * - .. attribute:: flags.no_site + - :option:`-S` + + * - .. attribute:: flags.ignore_environment + - :option:`-E` + + * - .. attribute:: flags.verbose + - :option:`-v` + + * - .. attribute:: flags.bytes_warning + - :option:`-b` + + * - .. attribute:: flags.quiet + - :option:`-q` + + * - .. attribute:: flags.hash_randomization + - :option:`-R` + + * - .. attribute:: flags.dev_mode + - :option:`-X dev <-X>` (:ref:`Python Development Mode `) + + * - .. attribute:: flags.utf8_mode + - :option:`-X utf8 <-X>` + + * - .. attribute:: flags.safe_path + - :option:`-P` + + * - .. attribute:: flags.int_max_str_digits + - :option:`-X int_max_str_digits <-X>` + (:ref:`integer string conversion length limitation `) + + * - .. attribute:: flags.warn_default_encoding + - :option:`-X warn_default_encoding <-X>` .. versionchanged:: 3.2 Added ``quiet`` attribute for the new :option:`-q` flag. @@ -541,6 +593,9 @@ always available. Mode ` and the ``utf8_mode`` attribute for the new :option:`-X` ``utf8`` flag. + .. versionchanged:: 3.10 + Added ``warn_default_encoding`` attribute for :option:`-X` ``warn_default_encoding`` flag. + .. versionchanged:: 3.11 Added the ``safe_path`` attribute for :option:`-P` option. @@ -557,55 +612,82 @@ always available. programming language; see section 5.2.4.2.2 of the 1999 ISO/IEC C standard [C99]_, 'Characteristics of floating types', for details. - .. tabularcolumns:: |l|l|L| - - +---------------------+----------------+--------------------------------------------------+ - | attribute | float.h macro | explanation | - +=====================+================+==================================================+ - | :const:`epsilon` | DBL_EPSILON | difference between 1.0 and the least value | - | | | greater than 1.0 that is representable as a float| - | | | | - | | | See also :func:`math.ulp`. | - +---------------------+----------------+--------------------------------------------------+ - | :const:`dig` | DBL_DIG | maximum number of decimal digits that can be | - | | | faithfully represented in a float; see below | - +---------------------+----------------+--------------------------------------------------+ - | :const:`mant_dig` | DBL_MANT_DIG | float precision: the number of base-``radix`` | - | | | digits in the significand of a float | - +---------------------+----------------+--------------------------------------------------+ - | :const:`max` | DBL_MAX | maximum representable positive finite float | - +---------------------+----------------+--------------------------------------------------+ - | :const:`max_exp` | DBL_MAX_EXP | maximum integer *e* such that ``radix**(e-1)`` is| - | | | a representable finite float | - +---------------------+----------------+--------------------------------------------------+ - | :const:`max_10_exp` | DBL_MAX_10_EXP | maximum integer *e* such that ``10**e`` is in the| - | | | range of representable finite floats | - +---------------------+----------------+--------------------------------------------------+ - | :const:`min` | DBL_MIN | minimum representable positive *normalized* float| - | | | | - | | | Use :func:`math.ulp(0.0) ` to get the | - | | | smallest positive *denormalized* representable | - | | | float. | - +---------------------+----------------+--------------------------------------------------+ - | :const:`min_exp` | DBL_MIN_EXP | minimum integer *e* such that ``radix**(e-1)`` is| - | | | a normalized float | - +---------------------+----------------+--------------------------------------------------+ - | :const:`min_10_exp` | DBL_MIN_10_EXP | minimum integer *e* such that ``10**e`` is a | - | | | normalized float | - +---------------------+----------------+--------------------------------------------------+ - | :const:`radix` | FLT_RADIX | radix of exponent representation | - +---------------------+----------------+--------------------------------------------------+ - | :const:`rounds` | FLT_ROUNDS | integer constant representing the rounding mode | - | | | used for arithmetic operations. This reflects | - | | | the value of the system FLT_ROUNDS macro at | - | | | interpreter startup time. See section 5.2.4.2.2 | - | | | of the C99 standard for an explanation of the | - | | | possible values and their meanings. | - +---------------------+----------------+--------------------------------------------------+ + .. list-table:: Attributes of the :data:`!float_info` :term:`named tuple` + :header-rows: 1 + + * - attribute + - float.h macro + - explanation + + * - .. attribute:: float_info.epsilon + - :c:macro:`!DBL_EPSILON` + - difference between 1.0 and the least value greater than 1.0 that is + representable as a float. + + See also :func:`math.ulp`. + + * - .. attribute:: float_info.dig + - :c:macro:`!DBL_DIG` + - The maximum number of decimal digits that can be faithfully + represented in a float; see below. + + * - .. attribute:: float_info.mant_dig + - :c:macro:`!DBL_MANT_DIG` + - Float precision: the number of base-``radix`` digits in the + significand of a float. + + * - .. attribute:: float_info.max + - :c:macro:`!DBL_MAX` + - The maximum representable positive finite float. + + * - .. attribute:: float_info.max_exp + - :c:macro:`!DBL_MAX_EXP` + - The maximum integer *e* such that ``radix**(e-1)`` is a representable + finite float. + + * - .. attribute:: float_info.max_10_exp + - :c:macro:`!DBL_MAX_10_EXP` + - The maximum integer *e* such that ``10**e`` is in the range of + representable finite floats. + + * - .. attribute:: float_info.min + - :c:macro:`!DBL_MIN` + - The minimum representable positive *normalized* float. + + Use :func:`math.ulp(0.0) ` to get the smallest positive + *denormalized* representable float. + + * - .. attribute:: float_info.min_exp + - :c:macro:`!DBL_MIN_EXP` + - The minimum integer *e* such that ``radix**(e-1)`` is a normalized + float. + + * - .. attribute:: float_info.min_10_exp + - :c:macro:`!DBL_MIN_10_EXP` + - The minimum integer *e* such that ``10**e`` is a normalized float. + + * - .. attribute:: float_info.radix + - :c:macro:`!FLT_RADIX` + - The radix of exponent representation. + + * - .. attribute:: float_info.rounds + - :c:macro:`!FLT_ROUNDS` + - An integer representing the rounding mode for floating-point arithmetic. + This reflects the value of the system :c:macro:`!FLT_ROUNDS` macro + at interpreter startup time: + + * ``-1``: indeterminable + * ``0``: toward zero + * ``1``: to nearest + * ``2``: toward positive infinity + * ``3``: toward negative infinity + + All other values for :c:macro:`!FLT_ROUNDS` characterize + implementation-defined rounding behavior. The attribute :attr:`sys.float_info.dig` needs further explanation. If ``s`` is any string representing a decimal number with at most - :attr:`sys.float_info.dig` significant digits, then converting ``s`` to a + :attr:`!sys.float_info.dig` significant digits, then converting ``s`` to a float and back again will recover a string representing the same decimal value:: @@ -651,6 +733,13 @@ always available. .. versionadded:: 3.4 +.. function:: getunicodeinternedsize() + + Return the number of unicode objects that have been interned. + + .. versionadded:: 3.12 + + .. function:: getandroidapilevel() Return the build time API version of Android as an integer. @@ -670,8 +759,8 @@ always available. Return the current value of the flags that are used for :c:func:`dlopen` calls. Symbolic names for the flag values can be - found in the :mod:`os` module (``RTLD_xxx`` constants, e.g. - :data:`os.RTLD_LAZY`). + found in the :mod:`os` module (:samp:`RTLD_{xxx}` constants, e.g. + :const:`os.RTLD_LAZY`). .. availability:: Unix. @@ -682,7 +771,7 @@ always available. the encoding used with the :term:`filesystem error handler ` to convert between Unicode filenames and bytes filenames. The filesystem error handler is returned from - :func:`getfilesystemencoding`. + :func:`getfilesystemencodeerrors`. For best compatibility, str should be used for filenames in all cases, although representing filenames as bytes is also supported. Functions @@ -740,6 +829,15 @@ always available. higher than you might expect, because it includes the (temporary) reference as an argument to :func:`getrefcount`. + Note that the returned value may not actually reflect how many + references to the object are actually held. For example, some + objects are :term:`immortal` and have a very high refcount that does not + reflect the actual number of references. Consequently, do not rely + on the returned value to be accurate, other than a value of 0 or 1. + + .. versionchanged:: 3.12 + Immortal objects have very large refcounts that do not match + the actual number of references to the object. .. function:: getrecursionlimit() @@ -766,7 +864,7 @@ always available. additional garbage collector overhead if the object is managed by the garbage collector. - See `recursive sizeof recipe `_ + See `recursive sizeof recipe `_ for an example of using :func:`getsizeof` recursively to find the size of containers and all their contents. @@ -793,6 +891,22 @@ always available. It is not guaranteed to exist in all implementations of Python. +.. function:: _getframemodulename([depth]) + + Return the name of a module from the call stack. If optional integer *depth* + is given, return the module that many calls below the top of the stack. If + that is deeper than the call stack, or if the module is unidentifiable, + ``None`` is returned. The default for *depth* is zero, returning the + module at the top of the call stack. + + .. audit-event:: sys._getframemodulename depth sys._getframemodulename + + .. impl-detail:: + + This function should be used for internal and specialized purposes only. + It is not guaranteed to exist in all implementations of Python. + + .. function:: getprofile() .. index:: @@ -831,24 +945,24 @@ always available. ``sys.getwindowsversion().major``. For compatibility with prior versions, only the first 5 elements are retrievable by indexing. - *platform* will be :const:`2 (VER_PLATFORM_WIN32_NT)`. + *platform* will be ``2`` (VER_PLATFORM_WIN32_NT). *product_type* may be one of the following values: +---------------------------------------+---------------------------------+ | Constant | Meaning | +=======================================+=================================+ - | :const:`1 (VER_NT_WORKSTATION)` | The system is a workstation. | + | ``1`` (VER_NT_WORKSTATION) | The system is a workstation. | +---------------------------------------+---------------------------------+ - | :const:`2 (VER_NT_DOMAIN_CONTROLLER)` | The system is a domain | + | ``2`` (VER_NT_DOMAIN_CONTROLLER) | The system is a domain | | | controller. | +---------------------------------------+---------------------------------+ - | :const:`3 (VER_NT_SERVER)` | The system is a server, but not | + | ``3`` (VER_NT_SERVER) | The system is a server, but not | | | a domain controller. | +---------------------------------------+---------------------------------+ - This function wraps the Win32 :c:func:`GetVersionEx` function; see the - Microsoft documentation on :c:func:`OSVERSIONINFOEX` for more information + This function wraps the Win32 :c:func:`!GetVersionEx` function; see the + Microsoft documentation on :c:func:`!OSVERSIONINFOEX` for more information about these fields. *platform_version* returns the major version, minor version and @@ -906,28 +1020,37 @@ always available. implementation. For more details about hashing of numeric types, see :ref:`numeric-hash`. - +---------------------+--------------------------------------------------+ - | attribute | explanation | - +=====================+==================================================+ - | :const:`width` | width in bits used for hash values | - +---------------------+--------------------------------------------------+ - | :const:`modulus` | prime modulus P used for numeric hash scheme | - +---------------------+--------------------------------------------------+ - | :const:`inf` | hash value returned for a positive infinity | - +---------------------+--------------------------------------------------+ - | :const:`nan` | (this attribute is no longer used) | - +---------------------+--------------------------------------------------+ - | :const:`imag` | multiplier used for the imaginary part of a | - | | complex number | - +---------------------+--------------------------------------------------+ - | :const:`algorithm` | name of the algorithm for hashing of str, bytes, | - | | and memoryview | - +---------------------+--------------------------------------------------+ - | :const:`hash_bits` | internal output size of the hash algorithm | - +---------------------+--------------------------------------------------+ - | :const:`seed_bits` | size of the seed key of the hash algorithm | - +---------------------+--------------------------------------------------+ + .. attribute:: hash_info.width + + The width in bits used for hash values + + .. attribute:: hash_info.modulus + + The prime modulus P used for numeric hash scheme + + .. attribute:: hash_info.inf + + The hash value returned for a positive infinity + + .. attribute:: hash_info.nan + + (This attribute is no longer used) + .. attribute:: hash_info.imag + + The multiplier used for the imaginary part of a complex number + + .. attribute:: hash_info.algorithm + + The name of the algorithm for hashing of str, bytes, and memoryview + + .. attribute:: hash_info.hash_bits + + The internal output size of the hash algorithm + + .. attribute:: hash_info.seed_bits + + The size of the seed key of the hash algorithm .. versionadded:: 3.2 @@ -1005,32 +1128,31 @@ always available. A :term:`named tuple` that holds information about Python's internal representation of integers. The attributes are read only. - .. tabularcolumns:: |l|L| - - +----------------------------------------+-----------------------------------------------+ - | Attribute | Explanation | - +========================================+===============================================+ - | :const:`bits_per_digit` | number of bits held in each digit. Python | - | | integers are stored internally in base | - | | ``2**int_info.bits_per_digit`` | - +----------------------------------------+-----------------------------------------------+ - | :const:`sizeof_digit` | size in bytes of the C type used to | - | | represent a digit | - +----------------------------------------+-----------------------------------------------+ - | :const:`default_max_str_digits` | default value for | - | | :func:`sys.get_int_max_str_digits` when it | - | | is not otherwise explicitly configured. | - +----------------------------------------+-----------------------------------------------+ - | :const:`str_digits_check_threshold` | minimum non-zero value for | - | | :func:`sys.set_int_max_str_digits`, | - | | :envvar:`PYTHONINTMAXSTRDIGITS`, or | - | | :option:`-X int_max_str_digits <-X>`. | - +----------------------------------------+-----------------------------------------------+ + .. attribute:: int_info.bits_per_digit + + The number of bits held in each digit. + Python integers are stored internally in base ``2**int_info.bits_per_digit``. + + .. attribute:: int_info.sizeof_digit + + The size in bytes of the C type used to represent a digit. + + .. attribute:: int_info.default_max_str_digits + + The default value for :func:`sys.get_int_max_str_digits` + when it is not otherwise explicitly configured. + + .. attribute:: int_info.str_digits_check_threshold + + The minimum non-zero value for :func:`sys.set_int_max_str_digits`, + :envvar:`PYTHONINTMAXSTRDIGITS`, or :option:`-X int_max_str_digits <-X>`. .. versionadded:: 3.1 .. versionchanged:: 3.11 - Added ``default_max_str_digits`` and ``str_digits_check_threshold``. + + Added :attr:`~int_info.default_max_str_digits` and + :attr:`~int_info.str_digits_check_threshold`. .. data:: __interactivehook__ @@ -1060,33 +1182,36 @@ always available. names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. - Interned strings are not immortal; you must keep a reference to the return - value of :func:`intern` around to benefit from it. + Interned strings are not :term:`immortal`; you must keep a reference to the + return value of :func:`intern` around to benefit from it. .. function:: is_finalizing() - Return :const:`True` if the Python interpreter is - :term:`shutting down `, :const:`False` otherwise. + Return :const:`True` if the main Python interpreter is + :term:`shutting down `. Return :const:`False` otherwise. .. versionadded:: 3.5 +.. data:: last_exc + + This variable is not always defined; it is set to the exception instance + when an exception is not handled and the interpreter prints an error message + and a stack traceback. Its intended use is to allow an interactive user to + import a debugger module and engage in post-mortem debugging without having + to re-execute the command that caused the error. (Typical use is + ``import pdb; pdb.pm()`` to enter the post-mortem debugger; see :mod:`pdb` + module for more information.) + + .. versionadded:: 3.12 .. data:: last_type last_value last_traceback - These three variables are not always defined; they are set when an exception is - not handled and the interpreter prints an error message and a stack traceback. - Their intended use is to allow an interactive user to import a debugger module - and engage in post-mortem debugging without having to re-execute the command - that caused the error. (Typical use is ``import pdb; pdb.pm()`` to enter the - post-mortem debugger; see :mod:`pdb` module for - more information.) - - The meaning of the variables is the same as that of the return values from - :func:`exc_info` above. - + These three variables are deprecated; use :data:`sys.last_exc` instead. + They hold the legacy representation of ``sys.last_exc``, as returned + from :func:`exc_info` above. .. data:: maxsize @@ -1132,7 +1257,7 @@ always available. :term:`Module specs ` were introduced in Python 3.4, by :pep:`451`. Earlier versions of Python looked for a method called - :meth:`~importlib.abc.MetaPathFinder.find_module`. + :meth:`!find_module`. This is still called as a fallback if a :data:`meta_path` entry doesn't have a :meth:`~importlib.abc.MetaPathFinder.find_spec` method. @@ -1208,10 +1333,6 @@ always available. Originally specified in :pep:`302`. - .. versionchanged:: 3.3 - ``None`` is stored instead of :class:`imp.NullImporter` when no finder - is found. - .. data:: platform @@ -1246,20 +1367,20 @@ always available. ================ =========================== .. versionchanged:: 3.3 - On Linux, :attr:`sys.platform` doesn't contain the major version anymore. + On Linux, :data:`sys.platform` doesn't contain the major version anymore. It is always ``'linux'``, instead of ``'linux2'`` or ``'linux3'``. Since older Python versions include the version number, it is recommended to always use the ``startswith`` idiom presented above. .. versionchanged:: 3.8 - On AIX, :attr:`sys.platform` doesn't contain the major version anymore. + On AIX, :data:`sys.platform` doesn't contain the major version anymore. It is always ``'aix'``, instead of ``'aix5'`` or ``'aix7'``. Since older Python versions include the version number, it is recommended to always use the ``startswith`` idiom presented above. .. seealso:: - :attr:`os.name` has a coarser granularity. :func:`os.uname` gives + :data:`os.name` has a coarser granularity. :func:`os.uname` gives system-dependent version information. The :mod:`platform` module provides detailed checks for the @@ -1292,7 +1413,7 @@ always available. A string giving the site-specific directory prefix where the platform independent Python files are installed; on Unix, the default is - ``'/usr/local'``. This can be set at build time with the ``--prefix`` + :file:`/usr/local`. This can be set at build time with the :option:`--prefix` argument to the :program:`configure` script. See :ref:`installation_paths` for derived paths. @@ -1326,8 +1447,8 @@ always available. lazy resolving of symbols when importing a module, if called as ``sys.setdlopenflags(0)``. To share symbols across extension modules, call as ``sys.setdlopenflags(os.RTLD_GLOBAL)``. Symbolic names for the flag values - can be found in the :mod:`os` module (``RTLD_xxx`` constants, e.g. - :data:`os.RTLD_LAZY`). + can be found in the :mod:`os` module (:samp:`RTLD_{xxx}` constants, e.g. + :const:`os.RTLD_LAZY`). .. availability:: Unix. @@ -1356,13 +1477,16 @@ always available. its return value is not used, so it can simply return ``None``. Error in the profile function will cause itself unset. + .. note:: + The same tracing mechanism is used for :func:`!setprofile` as :func:`settrace`. + To trace calls with :func:`!setprofile` inside a tracing function + (e.g. in a debugger breakpoint), see :func:`call_tracing`. + Profile functions should have three arguments: *frame*, *event*, and *arg*. *frame* is the current stack frame. *event* is a string: ``'call'``, ``'return'``, ``'c_call'``, ``'c_return'``, or ``'c_exception'``. *arg* depends on the event type. - .. audit-event:: sys.setprofile "" sys.setprofile - The events have the following meaning: ``'call'`` @@ -1384,6 +1508,9 @@ always available. ``'c_exception'`` A C function has raised an exception. *arg* is the C function object. + .. audit-event:: sys.setprofile "" sys.setprofile + + .. function:: setrecursionlimit(limit) Set the maximum depth of the Python interpreter stack to *limit*. This limit @@ -1437,13 +1564,16 @@ always available. function to be used for the new scope, or ``None`` if the scope shouldn't be traced. - The local trace function should return a reference to itself (or to another - function for further tracing in that scope), or ``None`` to turn off tracing - in that scope. + The local trace function should return a reference to itself, or to another + function which would then be used as the local trace function for the scope. If there is any error occurred in the trace function, it will be unset, just like ``settrace(None)`` is called. + .. note:: + Tracing is disabled while calling the trace function (e.g. a function set by + :func:`!settrace`). For recursive tracing see :func:`call_tracing`. + The events have the following meaning: ``'call'`` @@ -1458,7 +1588,7 @@ always available. :file:`Objects/lnotab_notes.txt` for a detailed explanation of how this works. Per-line events may be disabled for a frame by setting - :attr:`f_trace_lines` to :const:`False` on that frame. + :attr:`!f_trace_lines` to :const:`False` on that :ref:`frame `. ``'return'`` A function (or other code block) is about to return. The local trace @@ -1476,8 +1606,8 @@ always available. opcode details). The local trace function is called; *arg* is ``None``; the return value specifies the new local trace function. Per-opcode events are not emitted by default: they must be explicitly - requested by setting :attr:`f_trace_opcodes` to :const:`True` on the - frame. + requested by setting :attr:`!f_trace_opcodes` to :const:`True` on the + :ref:`frame `. Note that as an exception is propagated down the chain of callers, an ``'exception'`` event is generated at each level. @@ -1506,8 +1636,8 @@ always available. .. versionchanged:: 3.7 - ``'opcode'`` event type added; :attr:`f_trace_lines` and - :attr:`f_trace_opcodes` attributes added to frames + ``'opcode'`` event type added; :attr:`!f_trace_lines` and + :attr:`!f_trace_opcodes` attributes added to frames .. function:: set_asyncgen_hooks(firstiter, finalizer) @@ -1664,7 +1794,7 @@ always available. However, if you are writing a library (and do not control in which context its code will be executed), be aware that the standard streams may be replaced with file-like objects like :class:`io.StringIO` which - do not support the :attr:`~io.BufferedIOBase.buffer` attribute. + do not support the :attr:`!buffer` attribute. .. data:: __stdin__ @@ -1702,7 +1832,7 @@ always available. ``email.mime`` sub-package and the ``email.message`` sub-module are not listed. - See also the :attr:`sys.builtin_module_names` list. + See also the :data:`sys.builtin_module_names` list. .. versionadded:: 3.10 @@ -1712,29 +1842,28 @@ always available. A :term:`named tuple` holding information about the thread implementation. - .. tabularcolumns:: |l|p{0.7\linewidth}| - - +------------------+---------------------------------------------------------+ - | Attribute | Explanation | - +==================+=========================================================+ - | :const:`name` | Name of the thread implementation: | - | | | - | | * ``'nt'``: Windows threads | - | | * ``'pthread'``: POSIX threads | - | | * ``'pthread-stubs'``: stub POSIX threads | - | | (on WebAssembly platforms without threading support) | - | | * ``'solaris'``: Solaris threads | - +------------------+---------------------------------------------------------+ - | :const:`lock` | Name of the lock implementation: | - | | | - | | * ``'semaphore'``: a lock uses a semaphore | - | | * ``'mutex+cond'``: a lock uses a mutex | - | | and a condition variable | - | | * ``None`` if this information is unknown | - +------------------+---------------------------------------------------------+ - | :const:`version` | Name and version of the thread library. It is a string, | - | | or ``None`` if this information is unknown. | - +------------------+---------------------------------------------------------+ + .. attribute:: thread_info.name + + The name of the thread implementation: + + * ``"nt"``: Windows threads + * ``"pthread"``: POSIX threads + * ``"pthread-stubs"``: stub POSIX threads + (on WebAssembly platforms without threading support) + * ``"solaris"``: Solaris threads + + .. attribute:: thread_info.lock + + The name of the lock implementation: + + * ``"semaphore"``: a lock uses a semaphore + * ``"mutex+cond"``: a lock uses a mutex and a condition variable + * ``None`` if this information is unknown + + .. attribute:: thread_info.version + + The name and version of the thread library. + It is a string, or ``None`` if this information is unknown. .. versionadded:: 3.3 @@ -1757,35 +1886,39 @@ always available. The *unraisable* argument has the following attributes: - * *exc_type*: Exception type. - * *exc_value*: Exception value, can be ``None``. - * *exc_traceback*: Exception traceback, can be ``None``. - * *err_msg*: Error message, can be ``None``. - * *object*: Object causing the exception, can be ``None``. + * :attr:`!exc_type`: Exception type. + * :attr:`!exc_value`: Exception value, can be ``None``. + * :attr:`!exc_traceback`: Exception traceback, can be ``None``. + * :attr:`!err_msg`: Error message, can be ``None``. + * :attr:`!object`: Object causing the exception, can be ``None``. - The default hook formats *err_msg* and *object* as: + The default hook formats :attr:`!err_msg` and :attr:`!object` as: ``f'{err_msg}: {object!r}'``; use "Exception ignored in" error message - if *err_msg* is ``None``. + if :attr:`!err_msg` is ``None``. :func:`sys.unraisablehook` can be overridden to control how unraisable exceptions are handled. - Storing *exc_value* using a custom hook can create a reference cycle. It - should be cleared explicitly to break the reference cycle when the - exception is no longer needed. + .. seealso:: + + :func:`excepthook` which handles uncaught exceptions. - Storing *object* using a custom hook can resurrect it if it is set to an - object which is being finalized. Avoid storing *object* after the custom - hook completes to avoid resurrecting objects. + .. warning:: - See also :func:`excepthook` which handles uncaught exceptions. + Storing :attr:`!exc_value` using a custom hook can create a reference cycle. + It should be cleared explicitly to break the reference cycle when the + exception is no longer needed. + + Storing :attr:`!object` using a custom hook can resurrect it if it is set to an + object which is being finalized. Avoid storing :attr:`!object` after the custom + hook completes to avoid resurrecting objects. .. audit-event:: sys.unraisablehook hook,unraisable sys.unraisablehook Raise an auditing event ``sys.unraisablehook`` with arguments - ``hook``, ``unraisable`` when an exception that cannot be handled occurs. - The ``unraisable`` object is the same as what will be passed to the hook. - If no hook has been set, ``hook`` may be ``None``. + *hook*, *unraisable* when an exception that cannot be handled occurs. + The *unraisable* object is the same as what will be passed to the hook. + If no hook has been set, *hook* may be ``None``. .. versionadded:: 3.8 @@ -1828,13 +1961,20 @@ always available. The version number used to form registry keys on Windows platforms. This is stored as string resource 1000 in the Python DLL. The value is normally the - first three characters of :const:`version`. It is provided in the :mod:`sys` + major and minor versions of the running Python interpreter. It is provided in the :mod:`sys` module for informational purposes; modifying this value has no effect on the registry keys used by Python. .. availability:: Windows. +.. data:: monitoring + :noindex: + + Namespace containing functions and constants for register callbacks + and controlling monitoring events. + See :mod:`sys.monitoring` for details. + .. data:: _xoptions A dictionary of the various implementation-specific flags passed through diff --git a/Doc/library/sys_path_init.rst b/Doc/library/sys_path_init.rst index 72c1387344c2ad0..a87a41cf829fa89 100644 --- a/Doc/library/sys_path_init.rst +++ b/Doc/library/sys_path_init.rst @@ -99,8 +99,7 @@ Embedded Python If Python is embedded within another application :c:func:`Py_InitializeFromConfig` and the :c:type:`PyConfig` structure can be used to initialize Python. The path specific -details are described at :ref:`init-path-config`. Alternatively the older :c:func:`Py_SetPath` -can be used to bypass the initialization of the module search path. +details are described at :ref:`init-path-config`. .. seealso:: diff --git a/Doc/library/sysconfig.rst b/Doc/library/sysconfig.rst index 024988777030f84..2faab212e46eff2 100644 --- a/Doc/library/sysconfig.rst +++ b/Doc/library/sysconfig.rst @@ -9,7 +9,7 @@ .. versionadded:: 3.2 -**Source code:** :source:`Lib/sysconfig.py` +**Source code:** :source:`Lib/sysconfig` .. index:: single: configuration information @@ -20,6 +20,7 @@ The :mod:`sysconfig` module provides access to Python's configuration information like the list of installation paths and the configuration variables relevant for the current platform. + Configuration variables ----------------------- @@ -60,6 +61,7 @@ Example of usage:: >>> sysconfig.get_config_vars('AR', 'CXX') ['ar', 'g++'] + .. _installation_paths: Installation paths @@ -68,29 +70,26 @@ Installation paths Python uses an installation scheme that differs depending on the platform and on the installation options. These schemes are stored in :mod:`sysconfig` under unique identifiers based on the value returned by :const:`os.name`. - -Every new component that is installed using :mod:`distutils` or a -Distutils-based system will follow the same scheme to copy its file in the right -places. +The schemes are used by package installers to determine where to copy files to. Python currently supports nine schemes: - *posix_prefix*: scheme for POSIX platforms like Linux or macOS. This is the default scheme used when Python or a component is installed. -- *posix_home*: scheme for POSIX platforms used when a *home* option is used - upon installation. This scheme is used when a component is installed through - Distutils with a specific home prefix. -- *posix_user*: scheme for POSIX platforms used when a component is installed - through Distutils and the *user* option is used. This scheme defines paths - located under the user home directory. +- *posix_home*: scheme for POSIX platforms, when the *home* option is used. + This scheme defines paths located under a specific home prefix. +- *posix_user*: scheme for POSIX platforms, when the *user* option is used. + This scheme defines paths located under the user's home directory + (:const:`site.USER_BASE`). - *posix_venv*: scheme for :mod:`Python virtual environments ` on POSIX - platforms; by default it is the same as *posix_prefix* . -- *nt*: scheme for NT platforms like Windows. -- *nt_user*: scheme for NT platforms, when the *user* option is used. -- *nt_venv*: scheme for :mod:`Python virtual environments ` on NT - platforms; by default it is the same as *nt* . -- *venv*: a scheme with values from ether *posix_venv* or *nt_venv* depending - on the platform Python runs on + platforms; by default it is the same as *posix_prefix*. +- *nt*: scheme for Windows. + This is the default scheme used when Python or a component is installed. +- *nt_user*: scheme for Windows, when the *user* option is used. +- *nt_venv*: scheme for :mod:`Python virtual environments ` on Windows; + by default it is the same as *nt*. +- *venv*: a scheme with values from either *posix_venv* or *nt_venv* depending + on the platform Python runs on. - *osx_framework_user*: scheme for macOS, when the *user* option is used. Each scheme is itself composed of a series of paths and each path has a unique @@ -101,7 +100,7 @@ identifier. Python currently uses eight paths: - *platstdlib*: directory containing the standard Python library files that are platform-specific. - *platlib*: directory for site-specific, platform-specific files. -- *purelib*: directory for site-specific, non-platform-specific files. +- *purelib*: directory for site-specific, non-platform-specific files ('pure' Python). - *include*: directory for non-platform-specific header files for the Python C-API. - *platinclude*: directory for platform-specific header files for @@ -109,7 +108,157 @@ identifier. Python currently uses eight paths: - *scripts*: directory for script files. - *data*: directory for data files. -:mod:`sysconfig` provides some functions to determine these paths. + +.. _sysconfig-user-scheme: + +User scheme +--------------- + +This scheme is designed to be the most convenient solution for users that don't +have write permission to the global site-packages directory or don't want to +install into it. + +Files will be installed into subdirectories of :const:`site.USER_BASE` (written +as :file:`{userbase}` hereafter). This scheme installs pure Python modules and +extension modules in the same location (also known as :const:`site.USER_SITE`). + +``posix_user`` +^^^^^^^^^^^^^^ + +============== =========================================================== +Path Installation directory +============== =========================================================== +*stdlib* :file:`{userbase}/lib/python{X.Y}` +*platstdlib* :file:`{userbase}/lib/python{X.Y}` +*platlib* :file:`{userbase}/lib/python{X.Y}/site-packages` +*purelib* :file:`{userbase}/lib/python{X.Y}/site-packages` +*include* :file:`{userbase}/include/python{X.Y}` +*scripts* :file:`{userbase}/bin` +*data* :file:`{userbase}` +============== =========================================================== + +``nt_user`` +^^^^^^^^^^^ + +============== =========================================================== +Path Installation directory +============== =========================================================== +*stdlib* :file:`{userbase}\\Python{XY}` +*platstdlib* :file:`{userbase}\\Python{XY}` +*platlib* :file:`{userbase}\\Python{XY}\\site-packages` +*purelib* :file:`{userbase}\\Python{XY}\\site-packages` +*include* :file:`{userbase}\\Python{XY}\\Include` +*scripts* :file:`{userbase}\\Python{XY}\\Scripts` +*data* :file:`{userbase}` +============== =========================================================== + +``osx_framework_user`` +^^^^^^^^^^^^^^^^^^^^^^ + +============== =========================================================== +Path Installation directory +============== =========================================================== +*stdlib* :file:`{userbase}/lib/python` +*platstdlib* :file:`{userbase}/lib/python` +*platlib* :file:`{userbase}/lib/python/site-packages` +*purelib* :file:`{userbase}/lib/python/site-packages` +*include* :file:`{userbase}/include/python{X.Y}` +*scripts* :file:`{userbase}/bin` +*data* :file:`{userbase}` +============== =========================================================== + + +.. _sysconfig-home-scheme: + +Home scheme +----------- + +The idea behind the "home scheme" is that you build and maintain a personal +stash of Python modules. This scheme's name is derived from the idea of a +"home" directory on Unix, since it's not unusual for a Unix user to make their +home directory have a layout similar to :file:`/usr/` or :file:`/usr/local/`. +This scheme can be used by anyone, regardless of the operating system they +are installing for. + +``posix_home`` +^^^^^^^^^^^^^^ + +============== =========================================================== +Path Installation directory +============== =========================================================== +*stdlib* :file:`{home}/lib/python` +*platstdlib* :file:`{home}/lib/python` +*platlib* :file:`{home}/lib/python` +*purelib* :file:`{home}/lib/python` +*include* :file:`{home}/include/python` +*platinclude* :file:`{home}/include/python` +*scripts* :file:`{home}/bin` +*data* :file:`{home}` +============== =========================================================== + + +.. _sysconfig-prefix-scheme: + +Prefix scheme +------------- + +The "prefix scheme" is useful when you wish to use one Python installation to +perform the build/install (i.e., to run the setup script), but install modules +into the third-party module directory of a different Python installation (or +something that looks like a different Python installation). If this sounds a +trifle unusual, it is---that's why the user and home schemes come before. However, +there are at least two known cases where the prefix scheme will be useful. + +First, consider that many Linux distributions put Python in :file:`/usr`, rather +than the more traditional :file:`/usr/local`. This is entirely appropriate, +since in those cases Python is part of "the system" rather than a local add-on. +However, if you are installing Python modules from source, you probably want +them to go in :file:`/usr/local/lib/python2.{X}` rather than +:file:`/usr/lib/python2.{X}`. + +Another possibility is a network filesystem where the name used to write to a +remote directory is different from the name used to read it: for example, the +Python interpreter accessed as :file:`/usr/local/bin/python` might search for +modules in :file:`/usr/local/lib/python2.{X}`, but those modules would have to +be installed to, say, :file:`/mnt/{@server}/export/lib/python2.{X}`. + +``posix_prefix`` +^^^^^^^^^^^^^^^^ + +============== ========================================================== +Path Installation directory +============== ========================================================== +*stdlib* :file:`{prefix}/lib/python{X.Y}` +*platstdlib* :file:`{prefix}/lib/python{X.Y}` +*platlib* :file:`{prefix}/lib/python{X.Y}/site-packages` +*purelib* :file:`{prefix}/lib/python{X.Y}/site-packages` +*include* :file:`{prefix}/include/python{X.Y}` +*platinclude* :file:`{prefix}/include/python{X.Y}` +*scripts* :file:`{prefix}/bin` +*data* :file:`{prefix}` +============== ========================================================== + +``nt`` +^^^^^^ + +============== ========================================================== +Path Installation directory +============== ========================================================== +*stdlib* :file:`{prefix}\\Lib` +*platstdlib* :file:`{prefix}\\Lib` +*platlib* :file:`{prefix}\\Lib\\site-packages` +*purelib* :file:`{prefix}\\Lib\\site-packages` +*include* :file:`{prefix}\\Include` +*platinclude* :file:`{prefix}\\Include` +*scripts* :file:`{prefix}\\Scripts` +*data* :file:`{prefix}` +============== ========================================================== + + +Installation path functions +--------------------------- + +:mod:`sysconfig` provides some functions to determine these installation paths. .. function:: get_scheme_names() @@ -121,7 +270,7 @@ identifier. Python currently uses eight paths: Return the default scheme name for the current platform. - .. versionchanged:: 3.10 + .. versionadded:: 3.10 This function was previously named ``_get_default_scheme()`` and considered an implementation detail. @@ -187,7 +336,7 @@ identifier. Python currently uses eight paths: platform is used. If *vars* is provided, it must be a dictionary of variables that will update - the dictionary return by :func:`get_config_vars`. + the dictionary returned by :func:`get_config_vars`. If *expand* is set to ``False``, the path will not be expanded using the variables. @@ -278,6 +427,7 @@ Other functions Return the path of :file:`Makefile`. +.. _sysconfig-cli: Using :mod:`sysconfig` as a script ---------------------------------- diff --git a/Doc/library/syslog.rst b/Doc/library/syslog.rst index 766ff57cc66d69c..b5ab446e0096eda 100644 --- a/Doc/library/syslog.rst +++ b/Doc/library/syslog.rst @@ -11,12 +11,12 @@ This module provides an interface to the Unix ``syslog`` library routines. Refer to the Unix manual pages for a detailed description of the ``syslog`` facility. +.. availability:: Unix, not Emscripten, not WASI. + This module wraps the system ``syslog`` family of routines. A pure Python library that can speak to a syslog server is available in the :mod:`logging.handlers` module as :class:`SysLogHandler`. -.. include:: ../includes/wasm-notavail.rst - The module defines the following functions: @@ -40,6 +40,13 @@ The module defines the following functions: it wasn't called prior to the call to :func:`syslog`, deferring to the syslog implementation to call ``openlog()``. + .. versionchanged:: 3.12 + This function is restricted in subinterpreters. + (Only code that runs in multiple interpreters is affected and + the restriction is not relevant for most users.) + :func:`openlog` must be called in the main interpreter before :func:`syslog` may be used + in a subinterpreter. Otherwise it will raise :exc:`RuntimeError`. + .. function:: openlog([ident[, logoption[, facility]]]) @@ -60,6 +67,13 @@ The module defines the following functions: In previous versions, keyword arguments were not allowed, and *ident* was required. + .. versionchanged:: 3.12 + This function is restricted in subinterpreters. + (Only code that runs in multiple interpreters is affected and + the restriction is not relevant for most users.) + This may only be called in the main interpreter. + It will raise :exc:`RuntimeError` if called in a subinterpreter. + .. function:: closelog() @@ -72,6 +86,13 @@ The module defines the following functions: .. audit-event:: syslog.closelog "" syslog.closelog + .. versionchanged:: 3.12 + This function is restricted in subinterpreters. + (Only code that runs in multiple interpreters is affected and + the restriction is not relevant for most users.) + This may only be called in the main interpreter. + It will raise :exc:`RuntimeError` if called in a subinterpreter. + .. function:: setlogmask(maskpri) diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst index 6e8baba04fb92df..3e5723a66780cad 100644 --- a/Doc/library/tarfile.rst +++ b/Doc/library/tarfile.rst @@ -36,6 +36,13 @@ Some facts and figures: .. versionchanged:: 3.3 Added support for :mod:`lzma` compression. +.. versionchanged:: 3.12 + Archives are extracted using a :ref:`filter `, + which makes it possible to either limit surprising/dangerous features, + or to acknowledge that they are expected and the archive is fully trusted. + By default, archives are fully trusted, but this default is deprecated + and slated to change in Python 3.14. + .. function:: open(name=None, mode='r', fileobj=None, bufsize=10240, **kwargs) @@ -209,6 +216,38 @@ The :mod:`tarfile` module defines the following exceptions: Is raised by :meth:`TarInfo.frombuf` if the buffer it gets is invalid. +.. exception:: FilterError + + Base class for members :ref:`refused ` by + filters. + + .. attribute:: tarinfo + + Information about the member that the filter refused to extract, + as :ref:`TarInfo `. + +.. exception:: AbsolutePathError + + Raised to refuse extracting a member with an absolute path. + +.. exception:: OutsideDestinationError + + Raised to refuse extracting a member outside the destination directory. + +.. exception:: SpecialFileError + + Raised to refuse extracting a special file (e.g. a device or pipe). + +.. exception:: AbsoluteLinkError + + Raised to refuse extracting a symbolic link with an absolute path. + +.. exception:: LinkOutsideDestinationError + + Raised to refuse extracting a symbolic link pointing outside the destination + directory. + + The following constants are available at the module level: .. data:: ENCODING @@ -279,7 +318,7 @@ be finalized; only the internally used file object will be closed. See the .. versionadded:: 3.2 Added support for the context management protocol. -.. class:: TarFile(name=None, mode='r', fileobj=None, format=DEFAULT_FORMAT, tarinfo=TarInfo, dereference=False, ignore_zeros=False, encoding=ENCODING, errors='surrogateescape', pax_headers=None, debug=0, errorlevel=0) +.. class:: TarFile(name=None, mode='r', fileobj=None, format=DEFAULT_FORMAT, tarinfo=TarInfo, dereference=False, ignore_zeros=False, encoding=ENCODING, errors='surrogateescape', pax_headers=None, debug=0, errorlevel=1, stream=False) All following arguments are optional and can be accessed as instance attributes as well. @@ -319,11 +358,8 @@ be finalized; only the internally used file object will be closed. See the *debug* can be set from ``0`` (no debug messages) up to ``3`` (all debug messages). The messages are written to ``sys.stderr``. - If *errorlevel* is ``0``, all errors are ignored when using :meth:`TarFile.extract`. - Nevertheless, they appear as error messages in the debug output, when debugging - is enabled. If ``1``, all *fatal* errors are raised as :exc:`OSError` - exceptions. If ``2``, all *non-fatal* errors are raised as :exc:`TarError` - exceptions as well. + *errorlevel* controls how extraction errors are handled, + see :attr:`the corresponding attribute <~TarFile.errorlevel>`. The *encoding* and *errors* arguments define the character encoding to be used for reading or writing the archive and how conversion errors are going @@ -333,6 +369,9 @@ be finalized; only the internally used file object will be closed. See the The *pax_headers* argument is an optional dictionary of strings which will be added as a pax global header if *format* is :const:`PAX_FORMAT`. + If *stream* is set to :const:`True` then while reading the archive info about files + in the archive are not cached, saving memory. + .. versionchanged:: 3.2 Use ``'surrogateescape'`` as the default for the *errors* argument. @@ -342,6 +381,8 @@ be finalized; only the internally used file object will be closed. See the .. versionchanged:: 3.6 The *name* parameter accepts a :term:`path-like object`. + .. versionchanged:: 3.13 + Add the *stream* parameter. .. classmethod:: TarFile.open(...) @@ -390,7 +431,7 @@ be finalized; only the internally used file object will be closed. See the available. -.. method:: TarFile.extractall(path=".", members=None, *, numeric_owner=False) +.. method:: TarFile.extractall(path=".", members=None, *, numeric_owner=False, filter=None) Extract all members from the archive to the current working directory or directory *path*. If optional *members* is given, it must be a subset of the @@ -404,6 +445,12 @@ be finalized; only the internally used file object will be closed. See the are used to set the owner/group for the extracted files. Otherwise, the named values from the tarfile are used. + The *filter* argument specifies how ``members`` are modified or rejected + before extraction. + See :ref:`tarfile-extraction-filter` for details. + It is recommended to set this explicitly depending on which *tar* features + you need to support. + .. warning:: Never extract archives from untrusted sources without prior inspection. @@ -411,14 +458,20 @@ be finalized; only the internally used file object will be closed. See the that have absolute filenames starting with ``"/"`` or filenames with two dots ``".."``. + Set ``filter='data'`` to prevent the most dangerous security issues, + and read the :ref:`tarfile-extraction-filter` section for details. + .. versionchanged:: 3.5 Added the *numeric_owner* parameter. .. versionchanged:: 3.6 The *path* parameter accepts a :term:`path-like object`. + .. versionchanged:: 3.12 + Added the *filter* parameter. + -.. method:: TarFile.extract(member, path="", set_attrs=True, *, numeric_owner=False) +.. method:: TarFile.extract(member, path="", set_attrs=True, *, numeric_owner=False, filter=None) Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. *member* @@ -426,9 +479,8 @@ be finalized; only the internally used file object will be closed. See the directory using *path*. *path* may be a :term:`path-like object`. File attributes (owner, mtime, mode) are set unless *set_attrs* is false. - If *numeric_owner* is :const:`True`, the uid and gid numbers from the tarfile - are used to set the owner/group for the extracted files. Otherwise, the named - values from the tarfile are used. + The *numeric_owner* and *filter* arguments are the same as + for :meth:`extractall`. .. note:: @@ -439,6 +491,9 @@ be finalized; only the internally used file object will be closed. See the See the warning for :meth:`extractall`. + Set ``filter='data'`` to prevent the most dangerous security issues, + and read the :ref:`tarfile-extraction-filter` section for details. + .. versionchanged:: 3.2 Added the *set_attrs* parameter. @@ -448,6 +503,9 @@ be finalized; only the internally used file object will be closed. See the .. versionchanged:: 3.6 The *path* parameter accepts a :term:`path-like object`. + .. versionchanged:: 3.12 + Added the *filter* parameter. + .. method:: TarFile.extractfile(member) @@ -460,6 +518,55 @@ be finalized; only the internally used file object will be closed. See the .. versionchanged:: 3.3 Return an :class:`io.BufferedReader` object. +.. attribute:: TarFile.errorlevel + :type: int + + If *errorlevel* is ``0``, errors are ignored when using :meth:`TarFile.extract` + and :meth:`TarFile.extractall`. + Nevertheless, they appear as error messages in the debug output when + *debug* is greater than 0. + If ``1`` (the default), all *fatal* errors are raised as :exc:`OSError` or + :exc:`FilterError` exceptions. If ``2``, all *non-fatal* errors are raised + as :exc:`TarError` exceptions as well. + + Some exceptions, e.g. ones caused by wrong argument types or data + corruption, are always raised. + + Custom :ref:`extraction filters ` + should raise :exc:`FilterError` for *fatal* errors + and :exc:`ExtractError` for *non-fatal* ones. + + Note that when an exception is raised, the archive may be partially + extracted. It is the user’s responsibility to clean up. + +.. attribute:: TarFile.extraction_filter + + .. versionadded:: 3.12 + + The :ref:`extraction filter ` used + as a default for the *filter* argument of :meth:`~TarFile.extract` + and :meth:`~TarFile.extractall`. + + The attribute may be ``None`` or a callable. + String names are not allowed for this attribute, unlike the *filter* + argument to :meth:`~TarFile.extract`. + + If ``extraction_filter`` is ``None`` (the default), + calling an extraction method without a *filter* argument will raise a + ``DeprecationWarning``, + and fall back to the :func:`fully_trusted ` filter, + whose dangerous behavior matches previous versions of Python. + + In Python 3.14+, leaving ``extraction_filter=None`` will cause + extraction methods to use the :func:`data ` filter by default. + + The attribute may be set on instances or overridden in subclasses. + It also is possible to set it on the ``TarFile`` class itself to set a + global default, although, since it affects all uses of *tarfile*, + it is best practice to only do so in top-level applications or + :mod:`site configuration `. + To set a global default this way, a filter function needs to be wrapped in + :func:`staticmethod()` to prevent injection of a ``self`` argument. .. method:: TarFile.add(name, arcname=None, recursive=True, *, filter=None) @@ -535,8 +642,23 @@ permissions, owner etc.), it provides some useful methods to determine its type. It does *not* contain the file's data itself. :class:`TarInfo` objects are returned by :class:`TarFile`'s methods -:meth:`getmember`, :meth:`getmembers` and :meth:`gettarinfo`. +:meth:`~TarFile.getmember`, :meth:`~TarFile.getmembers` and +:meth:`~TarFile.gettarinfo`. + +Modifying the objects returned by :meth:`~!TarFile.getmember` or +:meth:`~!TarFile.getmembers` will affect all subsequent +operations on the archive. +For cases where this is unwanted, you can use :mod:`copy.copy() ` or +call the :meth:`~TarInfo.replace` method to create a modified copy in one step. +Several attributes can be set to ``None`` to indicate that a piece of metadata +is unused or unknown. +Different :class:`TarInfo` methods handle ``None`` differently: + +- The :meth:`~TarFile.extract` or :meth:`~TarFile.extractall` methods will + ignore the corresponding metadata, leaving it set to a default. +- :meth:`~TarFile.addfile` will fail. +- :meth:`~TarFile.list` will print a placeholder string. .. class:: TarInfo(name="") @@ -569,24 +691,39 @@ A ``TarInfo`` object has the following public data attributes: .. attribute:: TarInfo.name + :type: str Name of the archive member. .. attribute:: TarInfo.size + :type: int Size in bytes. .. attribute:: TarInfo.mtime + :type: int | float - Time of last modification. + Time of last modification in seconds since the :ref:`epoch `, + as in :attr:`os.stat_result.st_mtime`. + .. versionchanged:: 3.12 + + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.mode + :type: int + + Permission bits, as for :func:`os.chmod`. - Permission bits. + .. versionchanged:: 3.12 + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.type @@ -598,35 +735,81 @@ A ``TarInfo`` object has the following public data attributes: .. attribute:: TarInfo.linkname + :type: str Name of the target file name, which is only present in :class:`TarInfo` objects of type :const:`LNKTYPE` and :const:`SYMTYPE`. + For symbolic links (``SYMTYPE``), the *linkname* is relative to the directory + that contains the link. + For hard links (``LNKTYPE``), the *linkname* is relative to the root of + the archive. + .. attribute:: TarInfo.uid + :type: int User ID of the user who originally stored this member. + .. versionchanged:: 3.12 + + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.gid + :type: int Group ID of the user who originally stored this member. + .. versionchanged:: 3.12 + + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.uname + :type: str User name. + .. versionchanged:: 3.12 + + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.gname + :type: str Group name. + .. versionchanged:: 3.12 + + Can be set to ``None`` for :meth:`~TarFile.extract` and + :meth:`~TarFile.extractall`, causing extraction to skip applying this + attribute. .. attribute:: TarInfo.pax_headers + :type: dict A dictionary containing key-value pairs of an associated pax extended header. +.. method:: TarInfo.replace(name=..., mtime=..., mode=..., linkname=..., + uid=..., gid=..., uname=..., gname=..., + deep=True) + + .. versionadded:: 3.12 + + Return a *new* copy of the :class:`!TarInfo` object with the given attributes + changed. For example, to return a ``TarInfo`` with the group name set to + ``'staff'``, use:: + + new_tarinfo = old_tarinfo.replace(gname='staff') + + By default, a deep copy is made. + If *deep* is false, the copy is shallow, i.e. ``pax_headers`` + and any custom attributes are shared with the original ``TarInfo`` object. A :class:`TarInfo` object also provides some convenient query methods: @@ -676,9 +859,258 @@ A :class:`TarInfo` object also provides some convenient query methods: Return :const:`True` if it is one of character device, block device or FIFO. +.. _tarfile-extraction-filter: + +Extraction filters +------------------ + +.. versionadded:: 3.12 + +The *tar* format is designed to capture all details of a UNIX-like filesystem, +which makes it very powerful. +Unfortunately, the features make it easy to create tar files that have +unintended -- and possibly malicious -- effects when extracted. +For example, extracting a tar file can overwrite arbitrary files in various +ways (e.g. by using absolute paths, ``..`` path components, or symlinks that +affect later members). + +In most cases, the full functionality is not needed. +Therefore, *tarfile* supports extraction filters: a mechanism to limit +functionality, and thus mitigate some of the security issues. + +.. seealso:: + + :pep:`706` + Contains further motivation and rationale behind the design. + +The *filter* argument to :meth:`TarFile.extract` or :meth:`~TarFile.extractall` +can be: + +* the string ``'fully_trusted'``: Honor all metadata as specified in the + archive. + Should be used if the user trusts the archive completely, or implements + their own complex verification. + +* the string ``'tar'``: Honor most *tar*-specific features (i.e. features of + UNIX-like filesystems), but block features that are very likely to be + surprising or malicious. See :func:`tar_filter` for details. + +* the string ``'data'``: Ignore or block most features specific to UNIX-like + filesystems. Intended for extracting cross-platform data archives. + See :func:`data_filter` for details. + +* ``None`` (default): Use :attr:`TarFile.extraction_filter`. + + If that is also ``None`` (the default), raise a ``DeprecationWarning``, + and fall back to the ``'fully_trusted'`` filter, whose dangerous behavior + matches previous versions of Python. + + In Python 3.14, the ``'data'`` filter will become the default instead. + It's possible to switch earlier; see :attr:`TarFile.extraction_filter`. + +* A callable which will be called for each extracted member with a + :ref:`TarInfo ` describing the member and the destination + path to where the archive is extracted (i.e. the same path is used for all + members):: + + filter(member: TarInfo, path: str, /) -> TarInfo | None + + The callable is called just before each member is extracted, so it can + take the current state of the disk into account. + It can: + + - return a :class:`TarInfo` object which will be used instead of the metadata + in the archive, or + - return ``None``, in which case the member will be skipped, or + - raise an exception to abort the operation or skip the member, + depending on :attr:`~TarFile.errorlevel`. + Note that when extraction is aborted, :meth:`~TarFile.extractall` may leave + the archive partially extracted. It does not attempt to clean up. + +Default named filters +~~~~~~~~~~~~~~~~~~~~~ + +The pre-defined, named filters are available as functions, so they can be +reused in custom filters: + +.. function:: fully_trusted_filter(member, path) + + Return *member* unchanged. + + This implements the ``'fully_trusted'`` filter. + +.. function:: tar_filter(member, path) + + Implements the ``'tar'`` filter. + + - Strip leading slashes (``/`` and :data:`os.sep`) from filenames. + - :ref:`Refuse ` to extract files with absolute + paths (in case the name is absolute + even after stripping slashes, e.g. ``C:/foo`` on Windows). + This raises :class:`~tarfile.AbsolutePathError`. + - :ref:`Refuse ` to extract files whose absolute + path (after following symlinks) would end up outside the destination. + This raises :class:`~tarfile.OutsideDestinationError`. + - Clear high mode bits (setuid, setgid, sticky) and group/other write bits + (:const:`~stat.S_IWGRP`|:const:`~stat.S_IWOTH`). + + Return the modified ``TarInfo`` member. + +.. function:: data_filter(member, path) + + Implements the ``'data'`` filter. + In addition to what ``tar_filter`` does: + + - :ref:`Refuse ` to extract links (hard or soft) + that link to absolute paths, or ones that link outside the destination. + + This raises :class:`~tarfile.AbsoluteLinkError` or + :class:`~tarfile.LinkOutsideDestinationError`. + + Note that such files are refused even on platforms that do not support + symbolic links. + + - :ref:`Refuse ` to extract device files + (including pipes). + This raises :class:`~tarfile.SpecialFileError`. + + - For regular files, including hard links: + + - Set the owner read and write permissions + (:const:`~stat.S_IRUSR`|:const:`~stat.S_IWUSR`). + - Remove the group & other executable permission + (:const:`~stat.S_IXGRP`|:const:`~stat.S_IXOTH`) + if the owner doesn’t have it (:const:`~stat.S_IXUSR`). + + - For other files (directories), set ``mode`` to ``None``, so + that extraction methods skip applying permission bits. + - Set user and group info (``uid``, ``gid``, ``uname``, ``gname``) + to ``None``, so that extraction methods skip setting it. + + Return the modified ``TarInfo`` member. + + +.. _tarfile-extraction-refuse: + +Filter errors +~~~~~~~~~~~~~ + +When a filter refuses to extract a file, it will raise an appropriate exception, +a subclass of :class:`~tarfile.FilterError`. +This will abort the extraction if :attr:`TarFile.errorlevel` is 1 or more. +With ``errorlevel=0`` the error will be logged and the member will be skipped, +but extraction will continue. + + +Hints for further verification +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Even with ``filter='data'``, *tarfile* is not suited for extracting untrusted +files without prior inspection. +Among other issues, the pre-defined filters do not prevent denial-of-service +attacks. Users should do additional checks. + +Here is an incomplete list of things to consider: + +* Extract to a :func:`new temporary directory ` + to prevent e.g. exploiting pre-existing links, and to make it easier to + clean up after a failed extraction. +* When working with untrusted data, use external (e.g. OS-level) limits on + disk, memory and CPU usage. +* Check filenames against an allow-list of characters + (to filter out control characters, confusables, foreign path separators, + etc.). +* Check that filenames have expected extensions (discouraging files that + execute when you “click on themâ€, or extension-less files like Windows special device names). +* Limit the number of extracted files, total size of extracted data, + filename length (including symlink length), and size of individual files. +* Check for files that would be shadowed on case-insensitive filesystems. + +Also note that: + +* Tar files may contain multiple versions of the same file. + Later ones are expected to overwrite any earlier ones. + This feature is crucial to allow updating tape archives, but can be abused + maliciously. +* *tarfile* does not protect against issues with “live†data, + e.g. an attacker tinkering with the destination (or source) directory while + extraction (or archiving) is in progress. + + +Supporting older Python versions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Extraction filters were added to Python 3.12, but may be backported to older +versions as security updates. +To check whether the feature is available, use e.g. +``hasattr(tarfile, 'data_filter')`` rather than checking the Python version. + +The following examples show how to support Python versions with and without +the feature. +Note that setting ``extraction_filter`` will affect any subsequent operations. + +* Fully trusted archive:: + + my_tarfile.extraction_filter = (lambda member, path: member) + my_tarfile.extractall() + +* Use the ``'data'`` filter if available, but revert to Python 3.11 behavior + (``'fully_trusted'``) if this feature is not available:: + + my_tarfile.extraction_filter = getattr(tarfile, 'data_filter', + (lambda member, path: member)) + my_tarfile.extractall() + +* Use the ``'data'`` filter; *fail* if it is not available:: + + my_tarfile.extractall(filter=tarfile.data_filter) + + or:: + + my_tarfile.extraction_filter = tarfile.data_filter + my_tarfile.extractall() + +* Use the ``'data'`` filter; *warn* if it is not available:: + + if hasattr(tarfile, 'data_filter'): + my_tarfile.extractall(filter='data') + else: + # remove this when no longer needed + warn_the_user('Extracting may be unsafe; consider updating Python') + my_tarfile.extractall() + + +Stateful extraction filter example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +While *tarfile*'s extraction methods take a simple *filter* callable, +custom filters may be more complex objects with an internal state. +It may be useful to write these as context managers, to be used like this:: + + with StatefulFilter() as filter_func: + tar.extractall(path, filter=filter_func) + +Such a filter can be written as, for example:: + + class StatefulFilter: + def __init__(self): + self.file_count = 0 + + def __enter__(self): + return self + + def __call__(self, member, path): + self.file_count += 1 + return member + + def __exit__(self, *exc_info): + print(f'{self.file_count} files extracted') + + .. _tarfile-commandline: .. program:: tarfile + Command-Line Interface ---------------------- @@ -724,30 +1156,37 @@ For a list of the files in a tar archive, use the :option:`-l` option: Command-line options ~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -l - --list +.. option:: -l + --list List files in a tarfile. -.. cmdoption:: -c ... - --create ... +.. option:: -c ... + --create ... Create tarfile from source files. -.. cmdoption:: -e [] - --extract [] +.. option:: -e [] + --extract [] Extract tarfile into the current directory if *output_dir* is not specified. -.. cmdoption:: -t - --test +.. option:: -t + --test Test whether the tarfile is valid or not. -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose Verbose output. +.. option:: --filter + + Specifies the *filter* for ``--extract``. + See :ref:`tarfile-extraction-filter` for details. + Only string names are accepted (that is, ``fully_trusted``, ``tar``, + and ``data``). + .. _tar-examples: Examples @@ -757,7 +1196,7 @@ How to extract an entire tar archive to the current working directory:: import tarfile tar = tarfile.open("sample.tar.gz") - tar.extractall() + tar.extractall(filter='data') tar.close() How to extract a subset of a tar archive with :meth:`TarFile.extractall` using diff --git a/Doc/library/telnetlib.rst b/Doc/library/telnetlib.rst deleted file mode 100644 index 5a993dc42a5ab22..000000000000000 --- a/Doc/library/telnetlib.rst +++ /dev/null @@ -1,262 +0,0 @@ -:mod:`telnetlib` --- Telnet client -================================== - -.. module:: telnetlib - :synopsis: Telnet client class. - :deprecated: - -.. sectionauthor:: Skip Montanaro - -**Source code:** :source:`Lib/telnetlib.py` - -.. index:: single: protocol; Telnet - -.. deprecated-removed:: 3.11 3.13 - The :mod:`telnetlib` module is deprecated - (see :pep:`PEP 594 <594#telnetlib>` for details and alternatives). - --------------- - -The :mod:`telnetlib` module provides a :class:`Telnet` class that implements the -Telnet protocol. See :rfc:`854` for details about the protocol. In addition, it -provides symbolic constants for the protocol characters (see below), and for the -telnet options. The symbolic names of the telnet options follow the definitions -in ``arpa/telnet.h``, with the leading ``TELOPT_`` removed. For symbolic names -of options which are traditionally not included in ``arpa/telnet.h``, see the -module source itself. - -The symbolic constants for the telnet commands are: IAC, DONT, DO, WONT, WILL, -SE (Subnegotiation End), NOP (No Operation), DM (Data Mark), BRK (Break), IP -(Interrupt process), AO (Abort output), AYT (Are You There), EC (Erase -Character), EL (Erase Line), GA (Go Ahead), SB (Subnegotiation Begin). - -.. include:: ../includes/wasm-notavail.rst - -.. class:: Telnet(host=None, port=0[, timeout]) - - :class:`Telnet` represents a connection to a Telnet server. The instance is - initially not connected by default; the :meth:`~Telnet.open` method must be used to - establish a connection. Alternatively, the host name and optional port - number can be passed to the constructor too, in which case the connection to - the server will be established before the constructor returns. The optional - *timeout* parameter specifies a timeout in seconds for blocking operations - like the connection attempt (if not specified, the global default timeout - setting will be used). - - Do not reopen an already connected instance. - - This class has many :meth:`read_\*` methods. Note that some of them raise - :exc:`EOFError` when the end of the connection is read, because they can return - an empty string for other reasons. See the individual descriptions below. - - A :class:`Telnet` object is a context manager and can be used in a - :keyword:`with` statement. When the :keyword:`!with` block ends, the - :meth:`close` method is called:: - - >>> from telnetlib import Telnet - >>> with Telnet('localhost', 23) as tn: - ... tn.interact() - ... - - .. versionchanged:: 3.6 Context manager support added - - -.. seealso:: - - :rfc:`854` - Telnet Protocol Specification - Definition of the Telnet protocol. - - -.. _telnet-objects: - -Telnet Objects --------------- - -:class:`Telnet` instances have the following methods: - - -.. method:: Telnet.read_until(expected, timeout=None) - - Read until a given byte string, *expected*, is encountered or until *timeout* - seconds have passed. - - When no match is found, return whatever is available instead, possibly empty - bytes. Raise :exc:`EOFError` if the connection is closed and no cooked data - is available. - - -.. method:: Telnet.read_all() - - Read all data until EOF as bytes; block until connection closed. - - -.. method:: Telnet.read_some() - - Read at least one byte of cooked data unless EOF is hit. Return ``b''`` if - EOF is hit. Block if no data is immediately available. - - -.. method:: Telnet.read_very_eager() - - Read everything that can be without blocking in I/O (eager). - - Raise :exc:`EOFError` if connection closed and no cooked data available. - Return ``b''`` if no cooked data available otherwise. Do not block unless in - the midst of an IAC sequence. - - -.. method:: Telnet.read_eager() - - Read readily available data. - - Raise :exc:`EOFError` if connection closed and no cooked data available. - Return ``b''`` if no cooked data available otherwise. Do not block unless in - the midst of an IAC sequence. - - -.. method:: Telnet.read_lazy() - - Process and return data already in the queues (lazy). - - Raise :exc:`EOFError` if connection closed and no data available. Return - ``b''`` if no cooked data available otherwise. Do not block unless in the - midst of an IAC sequence. - - -.. method:: Telnet.read_very_lazy() - - Return any data available in the cooked queue (very lazy). - - Raise :exc:`EOFError` if connection closed and no data available. Return - ``b''`` if no cooked data available otherwise. This method never blocks. - - -.. method:: Telnet.read_sb_data() - - Return the data collected between a SB/SE pair (suboption begin/end). The - callback should access these data when it was invoked with a ``SE`` command. - This method never blocks. - - -.. method:: Telnet.open(host, port=0[, timeout]) - - Connect to a host. The optional second argument is the port number, which - defaults to the standard Telnet port (23). The optional *timeout* parameter - specifies a timeout in seconds for blocking operations like the connection - attempt (if not specified, the global default timeout setting will be used). - - Do not try to reopen an already connected instance. - - .. audit-event:: telnetlib.Telnet.open self,host,port telnetlib.Telnet.open - - -.. method:: Telnet.msg(msg, *args) - - Print a debug message when the debug level is ``>`` 0. If extra arguments are - present, they are substituted in the message using the standard string - formatting operator. - - -.. method:: Telnet.set_debuglevel(debuglevel) - - Set the debug level. The higher the value of *debuglevel*, the more debug - output you get (on ``sys.stdout``). - - -.. method:: Telnet.close() - - Close the connection. - - -.. method:: Telnet.get_socket() - - Return the socket object used internally. - - -.. method:: Telnet.fileno() - - Return the file descriptor of the socket object used internally. - - -.. method:: Telnet.write(buffer) - - Write a byte string to the socket, doubling any IAC characters. This can - block if the connection is blocked. May raise :exc:`OSError` if the - connection is closed. - - .. audit-event:: telnetlib.Telnet.write self,buffer telnetlib.Telnet.write - - .. versionchanged:: 3.3 - This method used to raise :exc:`socket.error`, which is now an alias - of :exc:`OSError`. - - -.. method:: Telnet.interact() - - Interaction function, emulates a very dumb Telnet client. - - -.. method:: Telnet.mt_interact() - - Multithreaded version of :meth:`interact`. - - -.. method:: Telnet.expect(list, timeout=None) - - Read until one from a list of a regular expressions matches. - - The first argument is a list of regular expressions, either compiled - (:ref:`regex objects `) or uncompiled (byte strings). The - optional second argument is a timeout, in seconds; the default is to block - indefinitely. - - Return a tuple of three items: the index in the list of the first regular - expression that matches; the match object returned; and the bytes read up - till and including the match. - - If end of file is found and no bytes were read, raise :exc:`EOFError`. - Otherwise, when nothing matches, return ``(-1, None, data)`` where *data* is - the bytes received so far (may be empty bytes if a timeout happened). - - If a regular expression ends with a greedy match (such as ``.*``) or if more - than one expression can match the same input, the results are - non-deterministic, and may depend on the I/O timing. - - -.. method:: Telnet.set_option_negotiation_callback(callback) - - Each time a telnet option is read on the input flow, this *callback* (if set) is - called with the following parameters: callback(telnet socket, command - (DO/DONT/WILL/WONT), option). No other action is done afterwards by telnetlib. - - -.. _telnet-example: - -Telnet Example --------------- - -.. sectionauthor:: Peter Funk - - -A simple example illustrating typical use:: - - import getpass - import telnetlib - - HOST = "localhost" - user = input("Enter your remote account: ") - password = getpass.getpass() - - tn = telnetlib.Telnet(HOST) - - tn.read_until(b"login: ") - tn.write(user.encode('ascii') + b"\n") - if password: - tn.read_until(b"Password: ") - tn.write(password.encode('ascii') + b"\n") - - tn.write(b"ls\n") - tn.write(b"exit\n") - - print(tn.read_all().decode('ascii')) - diff --git a/Doc/library/tempfile.rst b/Doc/library/tempfile.rst index b6d4f5dd05bbfcc..b68a78e8267bccb 100644 --- a/Doc/library/tempfile.rst +++ b/Doc/library/tempfile.rst @@ -59,7 +59,7 @@ The module defines the following user-callable items: platforms, it is a file-like object whose :attr:`!file` attribute is the underlying true file object. - The :py:data:`os.O_TMPFILE` flag is used if it is available and works + The :py:const:`os.O_TMPFILE` flag is used if it is available and works (Linux-specific, requires Linux kernel 3.11 or later). On platforms that are neither Posix nor Cygwin, TemporaryFile is an alias @@ -69,7 +69,7 @@ The module defines the following user-callable items: .. versionchanged:: 3.5 - The :py:data:`os.O_TMPFILE` flag is now used if available. + The :py:const:`os.O_TMPFILE` flag is now used if available. .. versionchanged:: 3.8 Added *errors* parameter. @@ -115,14 +115,14 @@ The module defines the following user-callable items: * On Windows, make sure that at least one of the following conditions are fulfilled: - * *delete* is false - * additional open shares delete access (e.g. by calling :func:`os.open` - with the flag ``O_TEMPORARY``) - * *delete* is true but *delete_on_close* is false. Note, that in this - case the additional opens that do not share delete access (e.g. - created via builtin :func:`open`) must be closed before exiting the - context manager, else the :func:`os.unlink` call on context manager - exit will fail with a :exc:`PermissionError`. + * *delete* is false + * additional open shares delete access (e.g. by calling :func:`os.open` + with the flag ``O_TEMPORARY``) + * *delete* is true but *delete_on_close* is false. Note, that in this + case the additional opens that do not share delete access (e.g. + created via builtin :func:`open`) must be closed before exiting the + context manager, else the :func:`os.unlink` call on context manager + exit will fail with a :exc:`PermissionError`. On Windows, if *delete_on_close* is false, and the file is created in a directory for which the user lacks delete access, then the :func:`os.unlink` @@ -147,7 +147,7 @@ The module defines the following user-callable items: This class operates exactly as :func:`TemporaryFile` does, except that data is spooled in memory until the file size exceeds *max_size*, or - until the file's :func:`fileno` method is called, at which point the + until the file's :func:`~io.IOBase.fileno` method is called, at which point the contents are written to disk and operation proceeds as with :func:`TemporaryFile`. @@ -173,7 +173,7 @@ The module defines the following user-callable items: or text *mode* was specified). -.. class:: TemporaryDirectory(suffix=None, prefix=None, dir=None, ignore_cleanup_errors=False) +.. class:: TemporaryDirectory(suffix=None, prefix=None, dir=None, ignore_cleanup_errors=False, *, delete=True) This class securely creates a temporary directory using the same rules as :func:`mkdtemp`. The resulting object can be used as a context manager (see @@ -195,6 +195,12 @@ The module defines the following user-callable items: (the :func:`cleanup` call, exiting the context manager, when the object is garbage-collected or during interpreter shutdown). + The *delete* parameter can be used to disable cleanup of the directory tree + upon exiting the context. While it may seem unusual for a context manager + to disable the action taken when exiting the context, it can be useful during + debugging or when you need your cleanup behavior to be conditional based on + other logic. + .. audit-event:: tempfile.mkdtemp fullpath tempfile.TemporaryDirectory .. versionadded:: 3.2 @@ -202,6 +208,9 @@ The module defines the following user-callable items: .. versionchanged:: 3.10 Added *ignore_cleanup_errors* parameter. + .. versionchanged:: 3.12 + Added the *delete* parameter. + .. function:: mkstemp(suffix=None, prefix=None, dir=None, text=False) @@ -283,6 +292,9 @@ The module defines the following user-callable items: .. versionchanged:: 3.6 The *dir* parameter now accepts a :term:`path-like object`. + .. versionchanged:: 3.12 + :func:`mkdtemp` now always returns an absolute path, even if *dir* is relative. + .. function:: gettempdir() @@ -392,13 +404,13 @@ Here are some examples of typical usage of the :mod:`tempfile` module:: # create a temporary file using a context manager # close the file, use the name to open the file again - >>> with tempfile.TemporaryFile(delete_on_close=False) as fp: - ... fp.write(b'Hello world!') - ... fp.close() - # the file is closed, but not removed - # open the file again by using its name - ... with open(fp.name) as f - ... f.read() + >>> with tempfile.NamedTemporaryFile(delete_on_close=False) as fp: + ... fp.write(b'Hello world!') + ... fp.close() + ... # the file is closed, but not removed + ... # open the file again by using its name + ... with open(fp.name, mode='rb') as f: + ... f.read() b'Hello world!' >>> # file is now removed diff --git a/Doc/library/termios.rst b/Doc/library/termios.rst index fb1ff567d49e5c8..57705ddc4e64705 100644 --- a/Doc/library/termios.rst +++ b/Doc/library/termios.rst @@ -16,6 +16,8 @@ complete description of these calls, see :manpage:`termios(3)` Unix manual page. It is only available for those Unix versions that support POSIX *termios* style tty I/O control configured during installation. +.. availability:: Unix. + All functions in this module take a file descriptor *fd* as their first argument. This can be an integer file descriptor, such as returned by ``sys.stdin.fileno()``, or a :term:`file object`, such as ``sys.stdin`` itself. @@ -43,10 +45,20 @@ The module defines the following functions: Set the tty attributes for file descriptor *fd* from the *attributes*, which is a list like the one returned by :func:`tcgetattr`. The *when* argument - determines when the attributes are changed: :const:`TCSANOW` to change - immediately, :const:`TCSADRAIN` to change after transmitting all queued output, - or :const:`TCSAFLUSH` to change after transmitting all queued output and - discarding all queued input. + determines when the attributes are changed: + + .. data:: TCSANOW + + Change attributes immediately. + + .. data:: TCSADRAIN + + Change attributes after transmitting all queued output. + + .. data:: TCSAFLUSH + + Change attributes after transmitting all queued output and + discarding all queued input. .. function:: tcsendbreak(fd, duration) diff --git a/Doc/library/test.rst b/Doc/library/test.rst index 8199a27d7d9c4eb..6cbdc39b3c024d7 100644 --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -159,6 +159,9 @@ guidelines to be followed: Running tests using the command-line interface ---------------------------------------------- +.. module:: test.regrtest + :synopsis: Drives the regression test suite. + The :mod:`test` package can be run as a script to drive Python's regression test suite, thanks to the :option:`-m` option: :program:`python -m test`. Under the hood, it uses :mod:`test.regrtest`; the call :program:`python -m @@ -472,7 +475,7 @@ The :mod:`test.support` module defines the following functions: .. function:: with_pymalloc() - Return :data:`_testcapi.WITH_PYMALLOC`. + Return :const:`_testcapi.WITH_PYMALLOC`. .. function:: requires(resource, msg=None) @@ -498,42 +501,11 @@ The :mod:`test.support` module defines the following functions: rather than looking directly in the path directories. -.. function:: match_test(test) - - Determine whether *test* matches the patterns set in :func:`set_match_tests`. - - -.. function:: set_match_tests(accept_patterns=None, ignore_patterns=None) - - Define match patterns on test filenames and test method names for filtering tests. - - -.. function:: run_unittest(*classes) - - Execute :class:`unittest.TestCase` subclasses passed to the function. The - function scans the classes for methods starting with the prefix ``test_`` - and executes the tests individually. - - It is also legal to pass strings as parameters; these should be keys in - ``sys.modules``. Each associated module will be scanned by - ``unittest.TestLoader.loadTestsFromModule()``. This is usually seen in the - following :func:`test_main` function:: - - def test_main(): - support.run_unittest(__name__) +.. function:: get_pagesize() - This will run all tests defined in the named module. + Get size of a page in bytes. - -.. function:: run_doctest(module, verbosity=None, optionflags=0) - - Run :func:`doctest.testmod` on the given *module*. Return - ``(failure_count, test_count)``. - - If *verbosity* is ``None``, :func:`doctest.testmod` is run with verbosity - set to :data:`verbose`. Otherwise, it is run with verbosity set to - ``None``. *optionflags* is passed as ``optionflags`` to - :func:`doctest.testmod`. + .. versionadded:: 3.12 .. function:: setswitchinterval(interval) @@ -796,7 +768,7 @@ The :mod:`test.support` module defines the following functions: .. decorator:: requires_limited_api - Decorator for only running the test if :ref:`Limited C API ` + Decorator for only running the test if :ref:`Limited C API ` is available. @@ -1033,10 +1005,10 @@ The :mod:`test.support` module defines the following classes: `SetErrorMode `_. On UNIX, :func:`resource.setrlimit` is used to set - :attr:`resource.RLIMIT_CORE`'s soft limit to 0 to prevent coredump file + :const:`resource.RLIMIT_CORE`'s soft limit to 0 to prevent coredump file creation. - On both platforms, the old value is restored by :meth:`__exit__`. + On both platforms, the old value is restored by :meth:`~object.__exit__`. .. class:: SaveSignals() @@ -1684,6 +1656,21 @@ The :mod:`test.support.warnings_helper` module provides support for warnings tes .. versionadded:: 3.10 +.. function:: ignore_warnings(*, category) + + Suppress warnings that are instances of *category*, + which must be :exc:`Warning` or a subclass. + Roughly equivalent to :func:`warnings.catch_warnings` + with :meth:`warnings.simplefilter('ignore', category=category) `. + For example:: + + @warning_helper.ignore_warnings(category=DeprecationWarning) + def test_suppress_warning(): + # do something + + .. versionadded:: 3.8 + + .. function:: check_no_resource_warning(testcase) Context manager to check that no :exc:`ResourceWarning` was raised. You diff --git a/Doc/library/textwrap.rst b/Doc/library/textwrap.rst index 1a9d5f98f78a7e8..7445410f91808cb 100644 --- a/Doc/library/textwrap.rst +++ b/Doc/library/textwrap.rst @@ -60,7 +60,7 @@ functions should be good enough; otherwise, you should use an instance of First the whitespace in *text* is collapsed (all whitespace is replaced by single spaces). If the result fits in the *width*, it is returned. Otherwise, enough words are dropped from the end so that the remaining words - plus the :attr:`placeholder` fit within :attr:`width`:: + plus the *placeholder* fit within *width*:: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' @@ -173,7 +173,7 @@ hyphenated words; only then will long words be broken if necessary, unless .. attribute:: expand_tabs (default: ``True``) If true, then all tab characters in *text* will be - expanded to spaces using the :meth:`expandtabs` method of *text*. + expanded to spaces using the :meth:`~str.expandtabs` method of *text*. .. attribute:: tabsize @@ -238,7 +238,7 @@ hyphenated words; only then will long words be broken if necessary, unless However, the sentence detection algorithm is imperfect: it assumes that a sentence ending consists of a lowercase letter followed by one of ``'.'``, ``'!'``, or ``'?'``, possibly followed by one of ``'"'`` or ``"'"``, - followed by a space. One problem with this is algorithm is that it is + followed by a space. One problem with this algorithm is that it is unable to detect the difference between "Dr." in :: [...] Dr. Frankenstein's monster [...] diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst index b352125551fa79e..b85b7f008d1594c 100644 --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -127,10 +127,13 @@ This module defines the following functions: Its value may be used to uniquely identify this particular thread system-wide (until the thread terminates, after which the value may be recycled by the OS). - .. availability:: Windows, FreeBSD, Linux, macOS, OpenBSD, NetBSD, AIX, DragonFlyBSD. + .. availability:: Windows, FreeBSD, Linux, macOS, OpenBSD, NetBSD, AIX, DragonFlyBSD, GNU/kFreeBSD. .. versionadded:: 3.8 + .. versionchanged:: 3.13 + Added support for GNU/kFreeBSD. + .. function:: enumerate() @@ -272,7 +275,7 @@ The instance's values will be different for separate threads. A class that represents thread-local data. For more details and extensive examples, see the documentation string of the - :mod:`_threading_local` module. + :mod:`!_threading_local` module: :source:`Lib/_threading_local.py`. .. _thread-objects: @@ -285,7 +288,7 @@ thread of control. There are two ways to specify the activity: by passing a callable object to the constructor, or by overriding the :meth:`~Thread.run` method in a subclass. No other methods (except for the constructor) should be overridden in a subclass. In other words, *only* override the -:meth:`~Thread.__init__` and :meth:`~Thread.run` methods of this class. +``__init__()`` and :meth:`~Thread.run` methods of this class. Once a thread object is created, its activity must be started by calling the thread's :meth:`~Thread.start` method. This invokes the :meth:`~Thread.run` @@ -337,7 +340,7 @@ since it is impossible to detect the termination of alien threads. are: *group* should be ``None``; reserved for future extension when a - :class:`ThreadGroup` class is implemented. + :class:`!ThreadGroup` class is implemented. *target* is the callable object to be invoked by the :meth:`run` method. Defaults to ``None``, meaning nothing is called. @@ -1009,7 +1012,7 @@ This class represents an action that should be run only after a certain amount of time has passed --- a timer. :class:`Timer` is a subclass of :class:`Thread` and as such also functions as an example of creating custom threads. -Timers are started, as with threads, by calling their :meth:`~Timer.start` +Timers are started, as with threads, by calling their :meth:`Timer.start ` method. The timer can be stopped (before its action has begun) by calling the :meth:`~Timer.cancel` method. The interval the timer will wait before executing its action may not be exactly the same as the interval specified by @@ -1147,10 +1150,10 @@ As an example, here is a simple way to synchronize a client and server thread:: Using locks, conditions, and semaphores in the :keyword:`!with` statement ------------------------------------------------------------------------- -All of the objects provided by this module that have :meth:`acquire` and -:meth:`release` methods can be used as context managers for a :keyword:`with` -statement. The :meth:`acquire` method will be called when the block is -entered, and :meth:`release` will be called when the block is exited. Hence, +All of the objects provided by this module that have ``acquire`` and +``release`` methods can be used as context managers for a :keyword:`with` +statement. The ``acquire`` method will be called when the block is +entered, and ``release`` will be called when the block is exited. Hence, the following snippet:: with some_lock: diff --git a/Doc/library/time.rst b/Doc/library/time.rst index 9f23a6fc7d53413..577600881676b30 100644 --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -71,8 +71,8 @@ An explanation of some terminology and conventions is in order. * On the other hand, the precision of :func:`.time` and :func:`sleep` is better than their Unix equivalents: times are expressed as floating point numbers, :func:`.time` returns the most accurate time available (using Unix - :c:func:`gettimeofday` where available), and :func:`sleep` will accept a time - with a nonzero fraction (Unix :c:func:`select` is used to implement this, where + :c:func:`!gettimeofday` where available), and :func:`sleep` will accept a time + with a nonzero fraction (Unix :c:func:`!select` is used to implement this, where available). * The time value as returned by :func:`gmtime`, :func:`localtime`, and @@ -84,12 +84,14 @@ An explanation of some terminology and conventions is in order. See :class:`struct_time` for a description of these objects. .. versionchanged:: 3.3 - The :class:`struct_time` type was extended to provide the :attr:`tm_gmtoff` - and :attr:`tm_zone` attributes when platform supports corresponding + The :class:`struct_time` type was extended to provide + the :attr:`~struct_time.tm_gmtoff` and :attr:`~struct_time.tm_zone` + attributes when platform supports corresponding ``struct tm`` members. .. versionchanged:: 3.6 - The :class:`struct_time` attributes :attr:`tm_gmtoff` and :attr:`tm_zone` + The :class:`struct_time` attributes + :attr:`~struct_time.tm_gmtoff` and :attr:`~struct_time.tm_zone` are now available on all platforms. * Use the following functions to convert between time representations: @@ -379,6 +381,8 @@ Functions * Or use ``nanosleep()`` if available (resolution: 1 nanosecond); * Or use ``select()`` (resolution: 1 microsecond). + .. audit-event:: time.sleep secs + .. versionchanged:: 3.11 On Unix, the ``clock_nanosleep()`` and ``nanosleep()`` functions are now used if available. On Windows, a waitable timer is now used. @@ -389,6 +393,9 @@ Functions :pep:`475` for the rationale). + .. versionchanged:: 3.13 + Raises an auditing event. + .. index:: single: % (percent); datetime format @@ -496,6 +503,8 @@ Functions When used with the :func:`strptime` function, the ``%p`` directive only affects the output hour field if the ``%I`` directive is used to parse the hour. + .. _leap-second: + (2) The range really is ``0`` to ``61``; value ``60`` is valid in timestamps representing `leap seconds`_ and value ``61`` is supported @@ -566,32 +575,55 @@ Functions tuple` interface: values can be accessed by index and by attribute name. The following values are present: - +-------+-------------------+---------------------------------+ - | Index | Attribute | Values | - +=======+===================+=================================+ - | 0 | :attr:`tm_year` | (for example, 1993) | - +-------+-------------------+---------------------------------+ - | 1 | :attr:`tm_mon` | range [1, 12] | - +-------+-------------------+---------------------------------+ - | 2 | :attr:`tm_mday` | range [1, 31] | - +-------+-------------------+---------------------------------+ - | 3 | :attr:`tm_hour` | range [0, 23] | - +-------+-------------------+---------------------------------+ - | 4 | :attr:`tm_min` | range [0, 59] | - +-------+-------------------+---------------------------------+ - | 5 | :attr:`tm_sec` | range [0, 61]; see **(2)** in | - | | | :func:`strftime` description | - +-------+-------------------+---------------------------------+ - | 6 | :attr:`tm_wday` | range [0, 6], Monday is 0 | - +-------+-------------------+---------------------------------+ - | 7 | :attr:`tm_yday` | range [1, 366] | - +-------+-------------------+---------------------------------+ - | 8 | :attr:`tm_isdst` | 0, 1 or -1; see below | - +-------+-------------------+---------------------------------+ - | N/A | :attr:`tm_zone` | abbreviation of timezone name | - +-------+-------------------+---------------------------------+ - | N/A | :attr:`tm_gmtoff` | offset east of UTC in seconds | - +-------+-------------------+---------------------------------+ + .. list-table:: + + * - Index + - Attribute + - Values + + * - 0 + - .. attribute:: tm_year + - (for example, 1993) + + * - 1 + - .. attribute:: tm_mon + - range [1, 12] + + * - 2 + - .. attribute:: tm_day + - range [1, 31] + + * - 3 + - .. attribute:: tm_hour + - range [0, 23] + + * - 4 + - .. attribute:: tm_min + - range [0, 59] + + * - 5 + - .. attribute:: tm_sec + - range [0, 61]; see :ref:`Note (2) ` in :func:`strftime` + + * - 6 + - .. attribute:: tm_wday + - range [0, 6]; Monday is 0 + + * - 7 + - .. attribute:: tm_yday + - range [1, 366] + + * - 8 + - .. attribute:: tm_isdst + - 0, 1 or -1; see below + + * - N/A + - .. attribute:: tm_zone + - abbreviation of timezone name + + * - N/A + - .. attribute:: tm_gmtoff + - offset east of UTC in seconds Note that unlike the C structure, the month value is a range of [1, 12], not [0, 11]. @@ -912,8 +944,8 @@ Timezone Constants For the above Timezone constants (:data:`altzone`, :data:`daylight`, :data:`timezone`, and :data:`tzname`), the value is determined by the timezone rules in effect at module load time or the last time :func:`tzset` is called and may be incorrect - for times in the past. It is recommended to use the :attr:`tm_gmtoff` and - :attr:`tm_zone` results from :func:`localtime` to obtain timezone information. + for times in the past. It is recommended to use the :attr:`~struct_time.tm_gmtoff` and + :attr:`~struct_time.tm_zone` results from :func:`localtime` to obtain timezone information. .. seealso:: diff --git a/Doc/library/timeit.rst b/Doc/library/timeit.rst index 660a546e721892a..616f8365b80f6c3 100644 --- a/Doc/library/timeit.rst +++ b/Doc/library/timeit.rst @@ -27,11 +27,11 @@ can be used to compare three different expressions: .. code-block:: shell-session - $ python3 -m timeit '"-".join(str(n) for n in range(100))' + $ python -m timeit "'-'.join(str(n) for n in range(100))" 10000 loops, best of 5: 30.2 usec per loop - $ python3 -m timeit '"-".join([str(n) for n in range(100)])' + $ python -m timeit "'-'.join([str(n) for n in range(100)])" 10000 loops, best of 5: 27.5 usec per loop - $ python3 -m timeit '"-".join(map(str, range(100)))' + $ python -m timeit "'-'.join(map(str, range(100)))" 10000 loops, best of 5: 23.2 usec per loop This can be achieved from the :ref:`python-interface` with:: @@ -86,9 +86,11 @@ The module defines three convenience functions and a public class: .. versionchanged:: 3.7 Default value of *repeat* changed from 3 to 5. + .. function:: default_timer() - The default timer, which is always :func:`time.perf_counter`. + The default timer, which is always time.perf_counter(), returns float seconds. + An alternative, time.perf_counter_ns, returns integer nanoseconds. .. versionchanged:: 3.3 :func:`time.perf_counter` is now the default timer. @@ -124,7 +126,7 @@ The module defines three convenience functions and a public class: Time *number* executions of the main statement. This executes the setup statement once, and then returns the time it takes to execute the main - statement a number of times, measured in seconds as a float. + statement a number of times. The default timer returns seconds as a float. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. @@ -149,7 +151,7 @@ The module defines three convenience functions and a public class: so that the total time >= 0.2 second, returning the eventual (number of loops, time taken for that number of loops). It calls :meth:`.timeit` with increasing numbers from the sequence 1, 2, 5, - 10, 20, 50, ... until the time taken is at least 0.2 second. + 10, 20, 50, ... until the time taken is at least 0.2 seconds. If *callback* is given and is not ``None``, it will be called after each trial with two arguments: ``callback(number, time_taken)``. @@ -206,42 +208,42 @@ Command-Line Interface When called as a program from the command line, the following form is used:: - python -m timeit [-n N] [-r N] [-u U] [-s S] [-h] [statement ...] + python -m timeit [-n N] [-r N] [-u U] [-s S] [-p] [-v] [-h] [statement ...] Where the following options are understood: .. program:: timeit -.. cmdoption:: -n N, --number=N +.. option:: -n N, --number=N how many times to execute 'statement' -.. cmdoption:: -r N, --repeat=N +.. option:: -r N, --repeat=N how many times to repeat the timer (default 5) -.. cmdoption:: -s S, --setup=S +.. option:: -s S, --setup=S statement to be executed once initially (default ``pass``) -.. cmdoption:: -p, --process +.. option:: -p, --process measure process time, not wallclock time, using :func:`time.process_time` instead of :func:`time.perf_counter`, which is the default .. versionadded:: 3.3 -.. cmdoption:: -u, --unit=U +.. option:: -u, --unit=U specify a time unit for timer output; can select ``nsec``, ``usec``, ``msec``, or ``sec`` .. versionadded:: 3.5 -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose print raw timing results; repeat for more digits precision -.. cmdoption:: -h, --help +.. option:: -h, --help print a short usage message and exit @@ -277,9 +279,9 @@ It is possible to provide a setup statement that is executed only once at the be .. code-block:: shell-session - $ python -m timeit -s 'text = "sample string"; char = "g"' 'char in text' + $ python -m timeit -s "text = 'sample string'; char = 'g'" "char in text" 5000000 loops, best of 5: 0.0877 usec per loop - $ python -m timeit -s 'text = "sample string"; char = "g"' 'text.find(char)' + $ python -m timeit -s "text = 'sample string'; char = 'g'" "text.find(char)" 1000000 loops, best of 5: 0.342 usec per loop In the output, there are three fields. The loop count, which tells you how many @@ -313,14 +315,14 @@ to test for missing and present object attributes: .. code-block:: shell-session - $ python -m timeit 'try:' ' str.__bool__' 'except AttributeError:' ' pass' + $ python -m timeit "try:" " str.__bool__" "except AttributeError:" " pass" 20000 loops, best of 5: 15.7 usec per loop - $ python -m timeit 'if hasattr(str, "__bool__"): pass' + $ python -m timeit "if hasattr(str, '__bool__'): pass" 50000 loops, best of 5: 4.26 usec per loop - $ python -m timeit 'try:' ' int.__bool__' 'except AttributeError:' ' pass' + $ python -m timeit "try:" " int.__bool__" "except AttributeError:" " pass" 200000 loops, best of 5: 1.43 usec per loop - $ python -m timeit 'if hasattr(int, "__bool__"): pass' + $ python -m timeit "if hasattr(int, '__bool__'): pass" 100000 loops, best of 5: 2.23 usec per loop :: diff --git a/Doc/library/tk.rst b/Doc/library/tk.rst index 3dc2130539c2cf2..0593f8b73ea5456 100644 --- a/Doc/library/tk.rst +++ b/Doc/library/tk.rst @@ -12,8 +12,7 @@ Graphical User Interfaces with Tk Tk/Tcl has long been an integral part of Python. It provides a robust and platform independent windowing toolkit, that is available to Python programmers -using the :mod:`tkinter` package, and its extension, the :mod:`tkinter.tix` and -the :mod:`tkinter.ttk` modules. +using the :mod:`tkinter` package, and its extension, the :mod:`tkinter.ttk` module. The :mod:`tkinter` package is a thin object-oriented layer on top of Tcl/Tk. To use :mod:`tkinter`, you don't need to write Tcl code, but you will need to @@ -39,7 +38,6 @@ alternative `GUI frameworks and tools ` of the default button + for this message window (:data:`OK`, :data:`CANCEL`, and so on). + If this option is not specified, the first button in the dialog will + be made the default. + + *detail* + Specifies an auxiliary message to the main message given by the + *message* option. + The message detail will be presented beneath the main message and, + where supported by the OS, in a less emphasized font than the main + message. + + *icon* + Specifies an :ref:`icon ` to display. + If this option is not specified, then the :data:`INFO` icon will be + displayed. + + *message* + Specifies the message to display in this message box. + The default value is an empty string. + + *parent* + Makes the specified window the logical parent of the message box. + The message box is displayed on top of its parent window. + + *title* + Specifies a string to display as the title of the message box. + This option is ignored on macOS, where platform guidelines forbid + the use of a title on this kind of dialog. + + *type* + Arranges for a :ref:`predefined set of buttons ` + to be displayed. + + .. method:: show(**options) + + Display a message window and wait for the user to select one of the buttons. Then return the symbolic name of the selected button. + Keyword arguments can override options specified in the constructor. + **Information message box** -.. method:: showinfo(title=None, message=None, **options) +.. function:: showinfo(title=None, message=None, **options) + + Creates and displays an information message box with the specified title + and message. **Warning message boxes** -.. method:: showwarning(title=None, message=None, **options) - showerror(title=None, message=None, **options) +.. function:: showwarning(title=None, message=None, **options) + + Creates and displays a warning message box with the specified title + and message. + +.. function:: showerror(title=None, message=None, **options) + + Creates and displays an error message box with the specified title + and message. **Question message boxes** -.. method:: askquestion(title=None, message=None, **options) - askokcancel(title=None, message=None, **options) - askretrycancel(title=None, message=None, **options) - askyesno(title=None, message=None, **options) - askyesnocancel(title=None, message=None, **options) +.. function:: askquestion(title=None, message=None, *, type=YESNO, **options) + + Ask a question. By default shows buttons :data:`YES` and :data:`NO`. + Returns the symbolic name of the selected button. + +.. function:: askokcancel(title=None, message=None, **options) + + Ask if operation should proceed. Shows buttons :data:`OK` and :data:`CANCEL`. + Returns ``True`` if the answer is ok and ``False`` otherwise. + +.. function:: askretrycancel(title=None, message=None, **options) + + Ask if operation should be retried. Shows buttons :data:`RETRY` and :data:`CANCEL`. + Return ``True`` if the answer is yes and ``False`` otherwise. + +.. function:: askyesno(title=None, message=None, **options) + + Ask a question. Shows buttons :data:`YES` and :data:`NO`. + Returns ``True`` if the answer is yes and ``False`` otherwise. + +.. function:: askyesnocancel(title=None, message=None, **options) + + Ask a question. Shows buttons :data:`YES`, :data:`NO` and :data:`CANCEL`. + Return ``True`` if the answer is yes, ``None`` if cancelled, and ``False`` + otherwise. + + +.. _messagebox-buttons: + +Symbolic names of buttons: + +.. data:: ABORT + :value: 'abort' +.. data:: RETRY + :value: 'retry' +.. data:: IGNORE + :value: 'ignore' +.. data:: OK + :value: 'ok' +.. data:: CANCEL + :value: 'cancel' +.. data:: YES + :value: 'yes' +.. data:: NO + :value: 'no' + +.. _messagebox-types: + +Predefined sets of buttons: + +.. data:: ABORTRETRYIGNORE + :value: 'abortretryignore' + + Displays three buttons whose symbolic names are :data:`ABORT`, + :data:`RETRY` and :data:`IGNORE`. + +.. data:: OK + :value: 'ok' + :noindex: + + Displays one button whose symbolic name is :data:`OK`. + +.. data:: OKCANCEL + :value: 'okcancel' + + Displays two buttons whose symbolic names are :data:`OK` and + :data:`CANCEL`. + +.. data:: RETRYCANCEL + :value: 'retrycancel' + + Displays two buttons whose symbolic names are :data:`RETRY` and + :data:`CANCEL`. + +.. data:: YESNO + :value: 'yesno' + + Displays two buttons whose symbolic names are :data:`YES` and + :data:`NO`. + +.. data:: YESNOCANCEL + :value: 'yesnocancel' + + Displays three buttons whose symbolic names are :data:`YES`, + :data:`NO` and :data:`CANCEL`. + +.. _messagebox-icons: + +Icon images: + +.. data:: ERROR + :value: 'error' +.. data:: INFO + :value: 'info' +.. data:: QUESTION + :value: 'question' +.. data:: WARNING + :value: 'warning' diff --git a/Doc/library/tkinter.rst b/Doc/library/tkinter.rst index c8e4317be758793..e084d8554c7c09f 100644 --- a/Doc/library/tkinter.rst +++ b/Doc/library/tkinter.rst @@ -58,8 +58,8 @@ details that are unchanged. * `Modern Tkinter for Busy Python Developers `_ By Mark Roseman. (ISBN 978-1999149567) - * `Python and Tkinter Programming `_ - By Alan Moore. (ISBN 978-1788835886) + * `Python GUI programming with Tkinter `_ + By Alan D. Moore. (ISBN 978-1788835886) * `Programming Python `_ By Mark Lutz; has excellent coverage of Tkinter. (ISBN 978-0596158101) @@ -163,7 +163,7 @@ the modern themed widget set and API:: interpreter and calls :func:`exec` on the contents of :file:`.{className}.py` and :file:`.{baseName}.py`. The path for the profile files is the :envvar:`HOME` environment variable or, if that - isn't defined, then :attr:`os.curdir`. + isn't defined, then :data:`os.curdir`. .. attribute:: tk @@ -232,6 +232,9 @@ The modules that provide Tk support include: Additional modules: +.. module:: _tkinter + :synopsis: A binary module that contains the low-level interface to Tcl/Tk. + :mod:`_tkinter` A binary module that contains the low-level interface to Tcl/Tk. It is automatically imported by the main :mod:`tkinter` module, @@ -252,10 +255,6 @@ Additional modules: (experimental) Drag-and-drop support for :mod:`tkinter`. This will become deprecated when it is replaced with the Tk DND. -:mod:`tkinter.tix` - (deprecated) An older third-party Tcl/Tk package that adds several new - widgets. Better alternatives for most can be found in :mod:`tkinter.ttk`. - :mod:`turtle` Turtle graphics in a Tk window. @@ -352,7 +351,7 @@ Understanding How Tkinter Wraps Tcl/Tk When your application uses Tkinter's classes and methods, internally Tkinter is assembling strings representing Tcl/Tk commands, and executing those -commands in the Tcl interpreter attached to your applicaton's :class:`Tk` +commands in the Tcl interpreter attached to your application's :class:`Tk` instance. Whether it's trying to navigate reference documentation, trying to find @@ -533,24 +532,24 @@ interpreter will fail. A number of special cases exist: - * Tcl/Tk libraries can be built so they are not thread-aware. In this case, - :mod:`tkinter` calls the library from the originating Python thread, even - if this is different than the thread that created the Tcl interpreter. A global - lock ensures only one call occurs at a time. +* Tcl/Tk libraries can be built so they are not thread-aware. In this case, + :mod:`tkinter` calls the library from the originating Python thread, even + if this is different than the thread that created the Tcl interpreter. A global + lock ensures only one call occurs at a time. - * While :mod:`tkinter` allows you to create more than one instance of a :class:`Tk` - object (with its own interpreter), all interpreters that are part of the same - thread share a common event queue, which gets ugly fast. In practice, don't create - more than one instance of :class:`Tk` at a time. Otherwise, it's best to create - them in separate threads and ensure you're running a thread-aware Tcl/Tk build. +* While :mod:`tkinter` allows you to create more than one instance of a :class:`Tk` + object (with its own interpreter), all interpreters that are part of the same + thread share a common event queue, which gets ugly fast. In practice, don't create + more than one instance of :class:`Tk` at a time. Otherwise, it's best to create + them in separate threads and ensure you're running a thread-aware Tcl/Tk build. - * Blocking event handlers are not the only way to prevent the Tcl interpreter from - reentering the event loop. It is even possible to run multiple nested event loops - or abandon the event loop entirely. If you're doing anything tricky when it comes - to events or threads, be aware of these possibilities. +* Blocking event handlers are not the only way to prevent the Tcl interpreter from + reentering the event loop. It is even possible to run multiple nested event loops + or abandon the event loop entirely. If you're doing anything tricky when it comes + to events or threads, be aware of these possibilities. - * There are a few select :mod:`tkinter` functions that presently work only when - called from the thread that created the Tcl interpreter. +* There are a few select :mod:`tkinter` functions that presently work only when + called from the thread that created the Tcl interpreter. Handy Reference diff --git a/Doc/library/tkinter.tix.rst b/Doc/library/tkinter.tix.rst deleted file mode 100644 index c86fcfa6a3f46df..000000000000000 --- a/Doc/library/tkinter.tix.rst +++ /dev/null @@ -1,583 +0,0 @@ -:mod:`tkinter.tix` --- Extension widgets for Tk -=============================================== - -.. module:: tkinter.tix - :synopsis: Tk Extension Widgets for Tkinter - -.. sectionauthor:: Mike Clarkson - -**Source code:** :source:`Lib/tkinter/tix.py` - -.. index:: single: Tix - -.. deprecated:: 3.6 - This Tk extension is unmaintained and should not be used in new code. Use - :mod:`tkinter.ttk` instead. - --------------- - -The :mod:`tkinter.tix` (Tk Interface Extension) module provides an additional -rich set of widgets. Although the standard Tk library has many useful widgets, -they are far from complete. The :mod:`tkinter.tix` library provides most of the -commonly needed widgets that are missing from standard Tk: :class:`HList`, -:class:`ComboBox`, :class:`Control` (a.k.a. SpinBox) and an assortment of -scrollable widgets. -:mod:`tkinter.tix` also includes many more widgets that are generally useful in -a wide range of applications: :class:`NoteBook`, :class:`FileEntry`, -:class:`PanedWindow`, etc; there are more than 40 of them. - -With all these new widgets, you can introduce new interaction techniques into -applications, creating more useful and more intuitive user interfaces. You can -design your application by choosing the most appropriate widgets to match the -special needs of your application and users. - -.. seealso:: - - `Tix Homepage `_ - The home page for :mod:`Tix`. This includes links to additional documentation - and downloads. - - `Tix Man Pages `_ - On-line version of the man pages and reference material. - - `Tix Programming Guide `_ - On-line version of the programmer's reference material. - - `Tix Development Applications `_ - Tix applications for development of Tix and Tkinter programs. Tide applications - work under Tk or Tkinter, and include :program:`TixInspect`, an inspector to - remotely modify and debug Tix/Tk/Tkinter applications. - - -Using Tix ---------- - - -.. class:: Tk(screenName=None, baseName=None, className='Tix') - - Toplevel widget of Tix which represents mostly the main window of an - application. It has an associated Tcl interpreter. - - Classes in the :mod:`tkinter.tix` module subclasses the classes in the - :mod:`tkinter`. The former imports the latter, so to use :mod:`tkinter.tix` - with Tkinter, all you need to do is to import one module. In general, you - can just import :mod:`tkinter.tix`, and replace the toplevel call to - :class:`tkinter.Tk` with :class:`tix.Tk`:: - - from tkinter import tix - from tkinter.constants import * - root = tix.Tk() - -To use :mod:`tkinter.tix`, you must have the Tix widgets installed, usually -alongside your installation of the Tk widgets. To test your installation, try -the following:: - - from tkinter import tix - root = tix.Tk() - root.tk.eval('package require Tix') - - -Tix Widgets ------------ - -`Tix `_ -introduces over 40 widget classes to the :mod:`tkinter` repertoire. - - -Basic Widgets -^^^^^^^^^^^^^ - - -.. class:: Balloon() - - A `Balloon - `_ that - pops up over a widget to provide help. When the user moves the cursor inside a - widget to which a Balloon widget has been bound, a small pop-up window with a - descriptive message will be shown on the screen. - -.. Python Demo of: -.. \ulink{Balloon}{https://tix.sourceforge.net/dist/current/demos/samples/Balloon.tcl} - - -.. class:: ButtonBox() - - The `ButtonBox - `_ - widget creates a box of buttons, such as is commonly used for ``Ok Cancel``. - -.. Python Demo of: -.. \ulink{ButtonBox}{https://tix.sourceforge.net/dist/current/demos/samples/BtnBox.tcl} - - -.. class:: ComboBox() - - The `ComboBox - `_ - widget is similar to the combo box control in MS Windows. The user can select a - choice by either typing in the entry subwidget or selecting from the listbox - subwidget. - -.. Python Demo of: -.. \ulink{ComboBox}{https://tix.sourceforge.net/dist/current/demos/samples/ComboBox.tcl} - - -.. class:: Control() - - The `Control - `_ - widget is also known as the :class:`SpinBox` widget. The user can adjust the - value by pressing the two arrow buttons or by entering the value directly into - the entry. The new value will be checked against the user-defined upper and - lower limits. - -.. Python Demo of: -.. \ulink{Control}{https://tix.sourceforge.net/dist/current/demos/samples/Control.tcl} - - -.. class:: LabelEntry() - - The `LabelEntry - `_ - widget packages an entry widget and a label into one mega widget. It can - be used to simplify the creation of "entry-form" type of interface. - -.. Python Demo of: -.. \ulink{LabelEntry}{https://tix.sourceforge.net/dist/current/demos/samples/LabEntry.tcl} - - -.. class:: LabelFrame() - - The `LabelFrame - `_ - widget packages a frame widget and a label into one mega widget. To create - widgets inside a LabelFrame widget, one creates the new widgets relative to the - :attr:`frame` subwidget and manage them inside the :attr:`frame` subwidget. - -.. Python Demo of: -.. \ulink{LabelFrame}{https://tix.sourceforge.net/dist/current/demos/samples/LabFrame.tcl} - - -.. class:: Meter() - - The `Meter - `_ widget - can be used to show the progress of a background job which may take a long time - to execute. - -.. Python Demo of: -.. \ulink{Meter}{https://tix.sourceforge.net/dist/current/demos/samples/Meter.tcl} - - -.. class:: OptionMenu() - - The `OptionMenu - `_ - creates a menu button of options. - -.. Python Demo of: -.. \ulink{OptionMenu}{https://tix.sourceforge.net/dist/current/demos/samples/OptMenu.tcl} - - -.. class:: PopupMenu() - - The `PopupMenu - `_ - widget can be used as a replacement of the ``tk_popup`` command. The advantage - of the :mod:`Tix` :class:`PopupMenu` widget is it requires less application code - to manipulate. - -.. Python Demo of: -.. \ulink{PopupMenu}{https://tix.sourceforge.net/dist/current/demos/samples/PopMenu.tcl} - - -.. class:: Select() - - The `Select - `_ widget - is a container of button subwidgets. It can be used to provide radio-box or - check-box style of selection options for the user. - -.. Python Demo of: -.. \ulink{Select}{https://tix.sourceforge.net/dist/current/demos/samples/Select.tcl} - - -.. class:: StdButtonBox() - - The `StdButtonBox - `_ - widget is a group of standard buttons for Motif-like dialog boxes. - -.. Python Demo of: -.. \ulink{StdButtonBox}{https://tix.sourceforge.net/dist/current/demos/samples/StdBBox.tcl} - - -File Selectors -^^^^^^^^^^^^^^ - - -.. class:: DirList() - - The `DirList - `_ - widget displays a list view of a directory, its previous directories and its - sub-directories. The user can choose one of the directories displayed in the - list or change to another directory. - -.. Python Demo of: -.. \ulink{DirList}{https://tix.sourceforge.net/dist/current/demos/samples/DirList.tcl} - - -.. class:: DirTree() - - The `DirTree - `_ - widget displays a tree view of a directory, its previous directories and its - sub-directories. The user can choose one of the directories displayed in the - list or change to another directory. - -.. Python Demo of: -.. \ulink{DirTree}{https://tix.sourceforge.net/dist/current/demos/samples/DirTree.tcl} - - -.. class:: DirSelectDialog() - - The `DirSelectDialog - `_ - widget presents the directories in the file system in a dialog window. The user - can use this dialog window to navigate through the file system to select the - desired directory. - -.. Python Demo of: -.. \ulink{DirSelectDialog}{https://tix.sourceforge.net/dist/current/demos/samples/DirDlg.tcl} - - -.. class:: DirSelectBox() - - The :class:`DirSelectBox` is similar to the standard Motif(TM) - directory-selection box. It is generally used for the user to choose a - directory. DirSelectBox stores the directories mostly recently selected into - a ComboBox widget so that they can be quickly selected again. - - -.. class:: ExFileSelectBox() - - The `ExFileSelectBox - `_ - widget is usually embedded in a tixExFileSelectDialog widget. It provides a - convenient method for the user to select files. The style of the - :class:`ExFileSelectBox` widget is very similar to the standard file dialog on - MS Windows 3.1. - -.. Python Demo of: -.. \ulink{ExFileSelectDialog}{https://tix.sourceforge.net/dist/current/demos/samples/EFileDlg.tcl} - - -.. class:: FileSelectBox() - - The `FileSelectBox - `_ - is similar to the standard Motif(TM) file-selection box. It is generally used - for the user to choose a file. FileSelectBox stores the files mostly recently - selected into a :class:`ComboBox` widget so that they can be quickly selected - again. - -.. Python Demo of: -.. \ulink{FileSelectDialog}{https://tix.sourceforge.net/dist/current/demos/samples/FileDlg.tcl} - - -.. class:: FileEntry() - - The `FileEntry - `_ - widget can be used to input a filename. The user can type in the filename - manually. Alternatively, the user can press the button widget that sits next to - the entry, which will bring up a file selection dialog. - -.. Python Demo of: -.. \ulink{FileEntry}{https://tix.sourceforge.net/dist/current/demos/samples/FileEnt.tcl} - - -Hierarchical ListBox -^^^^^^^^^^^^^^^^^^^^ - - -.. class:: HList() - - The `HList - `_ widget - can be used to display any data that have a hierarchical structure, for example, - file system directory trees. The list entries are indented and connected by - branch lines according to their places in the hierarchy. - -.. Python Demo of: -.. \ulink{HList}{https://tix.sourceforge.net/dist/current/demos/samples/HList1.tcl} - - -.. class:: CheckList() - - The `CheckList - `_ - widget displays a list of items to be selected by the user. CheckList acts - similarly to the Tk checkbutton or radiobutton widgets, except it is capable of - handling many more items than checkbuttons or radiobuttons. - -.. Python Demo of: -.. \ulink{ CheckList}{https://tix.sourceforge.net/dist/current/demos/samples/ChkList.tcl} -.. Python Demo of: -.. \ulink{ScrolledHList (1)}{https://tix.sourceforge.net/dist/current/demos/samples/SHList.tcl} -.. Python Demo of: -.. \ulink{ScrolledHList (2)}{https://tix.sourceforge.net/dist/current/demos/samples/SHList2.tcl} - - -.. class:: Tree() - - The `Tree - `_ widget - can be used to display hierarchical data in a tree form. The user can adjust the - view of the tree by opening or closing parts of the tree. - -.. Python Demo of: -.. \ulink{Tree}{https://tix.sourceforge.net/dist/current/demos/samples/Tree.tcl} -.. Python Demo of: -.. \ulink{Tree (Dynamic)}{https://tix.sourceforge.net/dist/current/demos/samples/DynTree.tcl} - - -Tabular ListBox -^^^^^^^^^^^^^^^ - - -.. class:: TList() - - The `TList - `_ widget - can be used to display data in a tabular format. The list entries of a - :class:`TList` widget are similar to the entries in the Tk listbox widget. The - main differences are (1) the :class:`TList` widget can display the list entries - in a two dimensional format and (2) you can use graphical images as well as - multiple colors and fonts for the list entries. - -.. Python Demo of: -.. \ulink{ScrolledTList (1)}{https://tix.sourceforge.net/dist/current/demos/samples/STList1.tcl} -.. Python Demo of: -.. \ulink{ScrolledTList (2)}{https://tix.sourceforge.net/dist/current/demos/samples/STList2.tcl} -.. Grid has yet to be added to Python -.. \subsubsection{Grid Widget} -.. Python Demo of: -.. \ulink{Simple Grid}{https://tix.sourceforge.net/dist/current/demos/samples/SGrid0.tcl} -.. Python Demo of: -.. \ulink{ScrolledGrid}{https://tix.sourceforge.net/dist/current/demos/samples/SGrid1.tcl} -.. Python Demo of: -.. \ulink{Editable Grid}{https://tix.sourceforge.net/dist/current/demos/samples/EditGrid.tcl} - - -Manager Widgets -^^^^^^^^^^^^^^^ - - -.. class:: PanedWindow() - - The `PanedWindow - `_ - widget allows the user to interactively manipulate the sizes of several panes. - The panes can be arranged either vertically or horizontally. The user changes - the sizes of the panes by dragging the resize handle between two panes. - -.. Python Demo of: -.. \ulink{PanedWindow}{https://tix.sourceforge.net/dist/current/demos/samples/PanedWin.tcl} - - -.. class:: ListNoteBook() - - The `ListNoteBook - `_ - widget is very similar to the :class:`TixNoteBook` widget: it can be used to - display many windows in a limited space using a notebook metaphor. The notebook - is divided into a stack of pages (windows). At one time only one of these pages - can be shown. The user can navigate through these pages by choosing the name of - the desired page in the :attr:`hlist` subwidget. - -.. Python Demo of: -.. \ulink{ListNoteBook}{https://tix.sourceforge.net/dist/current/demos/samples/ListNBK.tcl} - - -.. class:: NoteBook() - - The `NoteBook - `_ - widget can be used to display many windows in a limited space using a notebook - metaphor. The notebook is divided into a stack of pages. At one time only one of - these pages can be shown. The user can navigate through these pages by choosing - the visual "tabs" at the top of the NoteBook widget. - -.. Python Demo of: -.. \ulink{NoteBook}{https://tix.sourceforge.net/dist/current/demos/samples/NoteBook.tcl} - -.. \subsubsection{Scrolled Widgets} -.. Python Demo of: -.. \ulink{ScrolledListBox}{https://tix.sourceforge.net/dist/current/demos/samples/SListBox.tcl} -.. Python Demo of: -.. \ulink{ScrolledText}{https://tix.sourceforge.net/dist/current/demos/samples/SText.tcl} -.. Python Demo of: -.. \ulink{ScrolledWindow}{https://tix.sourceforge.net/dist/current/demos/samples/SWindow.tcl} -.. Python Demo of: -.. \ulink{Canvas Object View}{https://tix.sourceforge.net/dist/current/demos/samples/CObjView.tcl} - - -Image Types -^^^^^^^^^^^ - -The :mod:`tkinter.tix` module adds: - -* `pixmap `_ - capabilities to all :mod:`tkinter.tix` and :mod:`tkinter` widgets to create - color images from XPM files. - - .. Python Demo of: - .. \ulink{XPM Image In Button}{https://tix.sourceforge.net/dist/current/demos/samples/Xpm.tcl} - .. Python Demo of: - .. \ulink{XPM Image In Menu}{https://tix.sourceforge.net/dist/current/demos/samples/Xpm1.tcl} - -* `Compound - `_ image - types can be used to create images that consists of multiple horizontal lines; - each line is composed of a series of items (texts, bitmaps, images or spaces) - arranged from left to right. For example, a compound image can be used to - display a bitmap and a text string simultaneously in a Tk :class:`Button` - widget. - - .. Python Demo of: - .. \ulink{Compound Image In Buttons}{https://tix.sourceforge.net/dist/current/demos/samples/CmpImg.tcl} - .. Python Demo of: - .. \ulink{Compound Image In NoteBook}{https://tix.sourceforge.net/dist/current/demos/samples/CmpImg2.tcl} - .. Python Demo of: - .. \ulink{Compound Image Notebook Color Tabs}{https://tix.sourceforge.net/dist/current/demos/samples/CmpImg4.tcl} - .. Python Demo of: - .. \ulink{Compound Image Icons}{https://tix.sourceforge.net/dist/current/demos/samples/CmpImg3.tcl} - - -Miscellaneous Widgets -^^^^^^^^^^^^^^^^^^^^^ - - -.. class:: InputOnly() - - The `InputOnly - `_ - widgets are to accept inputs from the user, which can be done with the ``bind`` - command (Unix only). - - -Form Geometry Manager -^^^^^^^^^^^^^^^^^^^^^ - -In addition, :mod:`tkinter.tix` augments :mod:`tkinter` by providing: - - -.. class:: Form() - - The `Form - `_ geometry - manager based on attachment rules for all Tk widgets. - - -Tix Commands ------------- - - -.. class:: tixCommand() - - The `tix commands - `_ provide - access to miscellaneous elements of :mod:`Tix`'s internal state and the - :mod:`Tix` application context. Most of the information manipulated by these - methods pertains to the application as a whole, or to a screen or display, - rather than to a particular window. - - To view the current settings, the common usage is:: - - from tkinter import tix - root = tix.Tk() - print(root.tix_configure()) - - -.. method:: tixCommand.tix_configure(cnf=None, **kw) - - Query or modify the configuration options of the Tix application context. If no - option is specified, returns a dictionary all of the available options. If - option is specified with no value, then the method returns a list describing the - one named option (this list will be identical to the corresponding sublist of - the value returned if no option is specified). If one or more option-value - pairs are specified, then the method modifies the given option(s) to have the - given value(s); in this case the method returns an empty string. Option may be - any of the configuration options. - - -.. method:: tixCommand.tix_cget(option) - - Returns the current value of the configuration option given by *option*. Option - may be any of the configuration options. - - -.. method:: tixCommand.tix_getbitmap(name) - - Locates a bitmap file of the name ``name.xpm`` or ``name`` in one of the bitmap - directories (see the :meth:`tix_addbitmapdir` method). By using - :meth:`tix_getbitmap`, you can avoid hard coding the pathnames of the bitmap - files in your application. When successful, it returns the complete pathname of - the bitmap file, prefixed with the character ``@``. The returned value can be - used to configure the ``bitmap`` option of the Tk and Tix widgets. - - -.. method:: tixCommand.tix_addbitmapdir(directory) - - Tix maintains a list of directories under which the :meth:`tix_getimage` and - :meth:`tix_getbitmap` methods will search for image files. The standard bitmap - directory is :file:`$TIX_LIBRARY/bitmaps`. The :meth:`tix_addbitmapdir` method - adds *directory* into this list. By using this method, the image files of an - applications can also be located using the :meth:`tix_getimage` or - :meth:`tix_getbitmap` method. - - -.. method:: tixCommand.tix_filedialog([dlgclass]) - - Returns the file selection dialog that may be shared among different calls from - this application. This method will create a file selection dialog widget when - it is called the first time. This dialog will be returned by all subsequent - calls to :meth:`tix_filedialog`. An optional dlgclass parameter can be passed - as a string to specified what type of file selection dialog widget is desired. - Possible options are ``tix``, ``FileSelectDialog`` or ``tixExFileSelectDialog``. - - -.. method:: tixCommand.tix_getimage(self, name) - - Locates an image file of the name :file:`name.xpm`, :file:`name.xbm` or - :file:`name.ppm` in one of the bitmap directories (see the - :meth:`tix_addbitmapdir` method above). If more than one file with the same name - (but different extensions) exist, then the image type is chosen according to the - depth of the X display: xbm images are chosen on monochrome displays and color - images are chosen on color displays. By using :meth:`tix_getimage`, you can - avoid hard coding the pathnames of the image files in your application. When - successful, this method returns the name of the newly created image, which can - be used to configure the ``image`` option of the Tk and Tix widgets. - - -.. method:: tixCommand.tix_option_get(name) - - Gets the options maintained by the Tix scheme mechanism. - - -.. method:: tixCommand.tix_resetoptions(newScheme, newFontSet[, newScmPrio]) - - Resets the scheme and fontset of the Tix application to *newScheme* and - *newFontSet*, respectively. This affects only those widgets created after this - call. Therefore, it is best to call the resetoptions method before the creation - of any widgets in a Tix application. - - The optional parameter *newScmPrio* can be given to reset the priority level of - the Tk options set by the Tix schemes. - - Because of the way Tk handles the X option database, after Tix has been has - imported and inited, it is not possible to reset the color schemes and font sets - using the :meth:`tix_config` method. Instead, the :meth:`tix_resetoptions` - method must be used. diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst index 4ff2b2159c36221..6e01ec7b2912552 100644 --- a/Doc/library/tkinter.ttk.rst +++ b/Doc/library/tkinter.ttk.rst @@ -102,35 +102,35 @@ themed widgets and is not supposed to be directly instantiated. Standard Options ^^^^^^^^^^^^^^^^ -All the :mod:`ttk` Widgets accepts the following options: - - .. tabularcolumns:: |l|L| - - +-----------+--------------------------------------------------------------+ - | Option | Description | - +===========+==============================================================+ - | class | Specifies the window class. The class is used when querying | - | | the option database for the window's other options, to | - | | determine the default bindtags for the window, and to select | - | | the widget's default layout and style. This option is | - | | read-only, and may only be specified when the window is | - | | created. | - +-----------+--------------------------------------------------------------+ - | cursor | Specifies the mouse cursor to be used for the widget. If set | - | | to the empty string (the default), the cursor is inherited | - | | for the parent widget. | - +-----------+--------------------------------------------------------------+ - | takefocus | Determines whether the window accepts the focus during | - | | keyboard traversal. 0, 1 or an empty string is returned. | - | | If 0 is returned, it means that the window should be skipped | - | | entirely during keyboard traversal. If 1, it means that the | - | | window should receive the input focus as long as it is | - | | viewable. And an empty string means that the traversal | - | | scripts make the decision about whether or not to focus | - | | on the window. | - +-----------+--------------------------------------------------------------+ - | style | May be used to specify a custom widget style. | - +-----------+--------------------------------------------------------------+ +All the :mod:`ttk` Widgets accept the following options: + +.. tabularcolumns:: |l|L| + ++-----------+--------------------------------------------------------------+ +| Option | Description | ++===========+==============================================================+ +| class | Specifies the window class. The class is used when querying | +| | the option database for the window's other options, to | +| | determine the default bindtags for the window, and to select | +| | the widget's default layout and style. This option is | +| | read-only, and may only be specified when the window is | +| | created. | ++-----------+--------------------------------------------------------------+ +| cursor | Specifies the mouse cursor to be used for the widget. If set | +| | to the empty string (the default), the cursor is inherited | +| | for the parent widget. | ++-----------+--------------------------------------------------------------+ +| takefocus | Determines whether the window accepts the focus during | +| | keyboard traversal. 0, 1 or an empty string is returned. | +| | If 0 is returned, it means that the window should be skipped | +| | entirely during keyboard traversal. If 1, it means that the | +| | window should receive the input focus as long as it is | +| | viewable. And an empty string means that the traversal | +| | scripts make the decision about whether or not to focus | +| | on the window. | ++-----------+--------------------------------------------------------------+ +| style | May be used to specify a custom widget style. | ++-----------+--------------------------------------------------------------+ Scrollable Widget Options @@ -139,24 +139,24 @@ Scrollable Widget Options The following options are supported by widgets that are controlled by a scrollbar. - .. tabularcolumns:: |l|L| - - +----------------+---------------------------------------------------------+ - | Option | Description | - +================+=========================================================+ - | xscrollcommand | Used to communicate with horizontal scrollbars. | - | | | - | | When the view in the widget's window change, the widget | - | | will generate a Tcl command based on the scrollcommand. | - | | | - | | Usually this option consists of the method | - | | :meth:`Scrollbar.set` of some scrollbar. This will cause| - | | the scrollbar to be updated whenever the view in the | - | | window changes. | - +----------------+---------------------------------------------------------+ - | yscrollcommand | Used to communicate with vertical scrollbars. | - | | For some more information, see above. | - +----------------+---------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++----------------+---------------------------------------------------------+ +| Option | Description | ++================+=========================================================+ +| xscrollcommand | Used to communicate with horizontal scrollbars. | +| | | +| | When the view in the widget's window change, the widget | +| | will generate a Tcl command based on the scrollcommand. | +| | | +| | Usually this option consists of the method | +| | :meth:`Scrollbar.set` of some scrollbar. This will cause| +| | the scrollbar to be updated whenever the view in the | +| | window changes. | ++----------------+---------------------------------------------------------+ +| yscrollcommand | Used to communicate with vertical scrollbars. | +| | For some more information, see above. | ++----------------+---------------------------------------------------------+ Label Options @@ -165,93 +165,93 @@ Label Options The following options are supported by labels, buttons and other button-like widgets. - .. tabularcolumns:: |l|p{0.7\linewidth}| - - +--------------+-----------------------------------------------------------+ - | Option | Description | - +==============+===========================================================+ - | text | Specifies a text string to be displayed inside the widget.| - +--------------+-----------------------------------------------------------+ - | textvariable | Specifies a name whose value will be used in place of the | - | | text option resource. | - +--------------+-----------------------------------------------------------+ - | underline | If set, specifies the index (0-based) of a character to | - | | underline in the text string. The underline character is | - | | used for mnemonic activation. | - +--------------+-----------------------------------------------------------+ - | image | Specifies an image to display. This is a list of 1 or more| - | | elements. The first element is the default image name. The| - | | rest of the list if a sequence of statespec/value pairs as| - | | defined by :meth:`Style.map`, specifying different images | - | | to use when the widget is in a particular state or a | - | | combination of states. All images in the list should have | - | | the same size. | - +--------------+-----------------------------------------------------------+ - | compound | Specifies how to display the image relative to the text, | - | | in the case both text and images options are present. | - | | Valid values are: | - | | | - | | * text: display text only | - | | * image: display image only | - | | * top, bottom, left, right: display image above, below, | - | | left of, or right of the text, respectively. | - | | * none: the default. display the image if present, | - | | otherwise the text. | - +--------------+-----------------------------------------------------------+ - | width | If greater than zero, specifies how much space, in | - | | character widths, to allocate for the text label, if less | - | | than zero, specifies a minimum width. If zero or | - | | unspecified, the natural width of the text label is used. | - +--------------+-----------------------------------------------------------+ +.. tabularcolumns:: |l|p{0.7\linewidth}| + ++--------------+-----------------------------------------------------------+ +| Option | Description | ++==============+===========================================================+ +| text | Specifies a text string to be displayed inside the widget.| ++--------------+-----------------------------------------------------------+ +| textvariable | Specifies a name whose value will be used in place of the | +| | text option resource. | ++--------------+-----------------------------------------------------------+ +| underline | If set, specifies the index (0-based) of a character to | +| | underline in the text string. The underline character is | +| | used for mnemonic activation. | ++--------------+-----------------------------------------------------------+ +| image | Specifies an image to display. This is a list of 1 or more| +| | elements. The first element is the default image name. The| +| | rest of the list if a sequence of statespec/value pairs as| +| | defined by :meth:`Style.map`, specifying different images | +| | to use when the widget is in a particular state or a | +| | combination of states. All images in the list should have | +| | the same size. | ++--------------+-----------------------------------------------------------+ +| compound | Specifies how to display the image relative to the text, | +| | in the case both text and images options are present. | +| | Valid values are: | +| | | +| | * text: display text only | +| | * image: display image only | +| | * top, bottom, left, right: display image above, below, | +| | left of, or right of the text, respectively. | +| | * none: the default. display the image if present, | +| | otherwise the text. | ++--------------+-----------------------------------------------------------+ +| width | If greater than zero, specifies how much space, in | +| | character widths, to allocate for the text label, if less | +| | than zero, specifies a minimum width. If zero or | +| | unspecified, the natural width of the text label is used. | ++--------------+-----------------------------------------------------------+ Compatibility Options ^^^^^^^^^^^^^^^^^^^^^ - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - +--------+----------------------------------------------------------------+ - | Option | Description | - +========+================================================================+ - | state | May be set to "normal" or "disabled" to control the "disabled" | - | | state bit. This is a write-only option: setting it changes the | - | | widget state, but the :meth:`Widget.state` method does not | - | | affect this option. | - +--------+----------------------------------------------------------------+ ++--------+----------------------------------------------------------------+ +| Option | Description | ++========+================================================================+ +| state | May be set to "normal" or "disabled" to control the "disabled" | +| | state bit. This is a write-only option: setting it changes the | +| | widget state, but the :meth:`Widget.state` method does not | +| | affect this option. | ++--------+----------------------------------------------------------------+ Widget States ^^^^^^^^^^^^^ The widget state is a bitmap of independent state flags. - .. tabularcolumns:: |l|L| - - +------------+-------------------------------------------------------------+ - | Flag | Description | - +============+=============================================================+ - | active | The mouse cursor is over the widget and pressing a mouse | - | | button will cause some action to occur | - +------------+-------------------------------------------------------------+ - | disabled | Widget is disabled under program control | - +------------+-------------------------------------------------------------+ - | focus | Widget has keyboard focus | - +------------+-------------------------------------------------------------+ - | pressed | Widget is being pressed | - +------------+-------------------------------------------------------------+ - | selected | "On", "true", or "current" for things like Checkbuttons and | - | | radiobuttons | - +------------+-------------------------------------------------------------+ - | background | Windows and Mac have a notion of an "active" or foreground | - | | window. The *background* state is set for widgets in a | - | | background window, and cleared for those in the foreground | - | | window | - +------------+-------------------------------------------------------------+ - | readonly | Widget should not allow user modification | - +------------+-------------------------------------------------------------+ - | alternate | A widget-specific alternate display format | - +------------+-------------------------------------------------------------+ - | invalid | The widget's value is invalid | - +------------+-------------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++------------+-------------------------------------------------------------+ +| Flag | Description | ++============+=============================================================+ +| active | The mouse cursor is over the widget and pressing a mouse | +| | button will cause some action to occur | ++------------+-------------------------------------------------------------+ +| disabled | Widget is disabled under program control | ++------------+-------------------------------------------------------------+ +| focus | Widget has keyboard focus | ++------------+-------------------------------------------------------------+ +| pressed | Widget is being pressed | ++------------+-------------------------------------------------------------+ +| selected | "On", "true", or "current" for things like Checkbuttons and | +| | radiobuttons | ++------------+-------------------------------------------------------------+ +| background | Windows and Mac have a notion of an "active" or foreground | +| | window. The *background* state is set for widgets in a | +| | background window, and cleared for those in the foreground | +| | window | ++------------+-------------------------------------------------------------+ +| readonly | Widget should not allow user modification | ++------------+-------------------------------------------------------------+ +| alternate | A widget-specific alternate display format | ++------------+-------------------------------------------------------------+ +| invalid | The widget's value is invalid | ++------------+-------------------------------------------------------------+ A state specification is a sequence of state names, optionally prefixed with an exclamation point indicating that the bit is off. @@ -311,43 +311,43 @@ Options This widget accepts the following specific options: - .. tabularcolumns:: |l|L| - - +-----------------+--------------------------------------------------------+ - | Option | Description | - +=================+========================================================+ - | exportselection | Boolean value. If set, the widget selection is linked | - | | to the Window Manager selection (which can be returned | - | | by invoking Misc.selection_get, for example). | - +-----------------+--------------------------------------------------------+ - | justify | Specifies how the text is aligned within the widget. | - | | One of "left", "center", or "right". | - +-----------------+--------------------------------------------------------+ - | height | Specifies the height of the pop-down listbox, in rows. | - +-----------------+--------------------------------------------------------+ - | postcommand | A script (possibly registered with Misc.register) that | - | | is called immediately before displaying the values. It | - | | may specify which values to display. | - +-----------------+--------------------------------------------------------+ - | state | One of "normal", "readonly", or "disabled". In the | - | | "readonly" state, the value may not be edited directly,| - | | and the user can only selection of the values from the | - | | dropdown list. In the "normal" state, the text field is| - | | directly editable. In the "disabled" state, no | - | | interaction is possible. | - +-----------------+--------------------------------------------------------+ - | textvariable | Specifies a name whose value is linked to the widget | - | | value. Whenever the value associated with that name | - | | changes, the widget value is updated, and vice versa. | - | | See :class:`tkinter.StringVar`. | - +-----------------+--------------------------------------------------------+ - | values | Specifies the list of values to display in the | - | | drop-down listbox. | - +-----------------+--------------------------------------------------------+ - | width | Specifies an integer value indicating the desired width| - | | of the entry window, in average-size characters of the | - | | widget's font. | - +-----------------+--------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++-----------------+--------------------------------------------------------+ +| Option | Description | ++=================+========================================================+ +| exportselection | Boolean value. If set, the widget selection is linked | +| | to the Window Manager selection (which can be returned | +| | by invoking Misc.selection_get, for example). | ++-----------------+--------------------------------------------------------+ +| justify | Specifies how the text is aligned within the widget. | +| | One of "left", "center", or "right". | ++-----------------+--------------------------------------------------------+ +| height | Specifies the height of the pop-down listbox, in rows. | ++-----------------+--------------------------------------------------------+ +| postcommand | A script (possibly registered with Misc.register) that | +| | is called immediately before displaying the values. It | +| | may specify which values to display. | ++-----------------+--------------------------------------------------------+ +| state | One of "normal", "readonly", or "disabled". In the | +| | "readonly" state, the value may not be edited directly,| +| | and the user can only selection of the values from the | +| | dropdown list. In the "normal" state, the text field is| +| | directly editable. In the "disabled" state, no | +| | interaction is possible. | ++-----------------+--------------------------------------------------------+ +| textvariable | Specifies a name whose value is linked to the widget | +| | value. Whenever the value associated with that name | +| | changes, the widget value is updated, and vice versa. | +| | See :class:`tkinter.StringVar`. | ++-----------------+--------------------------------------------------------+ +| values | Specifies the list of values to display in the | +| | drop-down listbox. | ++-----------------+--------------------------------------------------------+ +| width | Specifies an integer value indicating the desired width| +| | of the entry window, in average-size characters of the | +| | widget's font. | ++-----------------+--------------------------------------------------------+ Virtual events @@ -397,7 +397,7 @@ Options This widget accepts the following specific options: - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| +----------------------+------------------------------------------------------+ | Option | Description | @@ -473,25 +473,25 @@ Options This widget accepts the following specific options: - .. tabularcolumns:: |l|L| - - +---------+----------------------------------------------------------------+ - | Option | Description | - +=========+================================================================+ - | height | If present and greater than zero, specifies the desired height | - | | of the pane area (not including internal padding or tabs). | - | | Otherwise, the maximum height of all panes is used. | - +---------+----------------------------------------------------------------+ - | padding | Specifies the amount of extra space to add around the outside | - | | of the notebook. The padding is a list up to four length | - | | specifications left top right bottom. If fewer than four | - | | elements are specified, bottom defaults to top, right defaults | - | | to left, and top defaults to left. | - +---------+----------------------------------------------------------------+ - | width | If present and greater than zero, specified the desired width | - | | of the pane area (not including internal padding). Otherwise, | - | | the maximum width of all panes is used. | - +---------+----------------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++---------+----------------------------------------------------------------+ +| Option | Description | ++=========+================================================================+ +| height | If present and greater than zero, specifies the desired height | +| | of the pane area (not including internal padding or tabs). | +| | Otherwise, the maximum height of all panes is used. | ++---------+----------------------------------------------------------------+ +| padding | Specifies the amount of extra space to add around the outside | +| | of the notebook. The padding is a list up to four length | +| | specifications left top right bottom. If fewer than four | +| | elements are specified, bottom defaults to top, right defaults | +| | to left, and top defaults to left. | ++---------+----------------------------------------------------------------+ +| width | If present and greater than zero, specified the desired width | +| | of the pane area (not including internal padding). Otherwise, | +| | the maximum width of all panes is used. | ++---------+----------------------------------------------------------------+ Tab Options @@ -499,39 +499,39 @@ Tab Options There are also specific options for tabs: - .. tabularcolumns:: |l|L| - - +-----------+--------------------------------------------------------------+ - | Option | Description | - +===========+==============================================================+ - | state | Either "normal", "disabled" or "hidden". If "disabled", then | - | | the tab is not selectable. If "hidden", then the tab is not | - | | shown. | - +-----------+--------------------------------------------------------------+ - | sticky | Specifies how the child window is positioned within the pane | - | | area. Value is a string containing zero or more of the | - | | characters "n", "s", "e" or "w". Each letter refers to a | - | | side (north, south, east or west) that the child window will | - | | stick to, as per the :meth:`grid` geometry manager. | - +-----------+--------------------------------------------------------------+ - | padding | Specifies the amount of extra space to add between the | - | | notebook and this pane. Syntax is the same as for the option | - | | padding used by this widget. | - +-----------+--------------------------------------------------------------+ - | text | Specifies a text to be displayed in the tab. | - +-----------+--------------------------------------------------------------+ - | image | Specifies an image to display in the tab. See the option | - | | image described in :class:`Widget`. | - +-----------+--------------------------------------------------------------+ - | compound | Specifies how to display the image relative to the text, in | - | | the case both options text and image are present. See | - | | `Label Options`_ for legal values. | - +-----------+--------------------------------------------------------------+ - | underline | Specifies the index (0-based) of a character to underline in | - | | the text string. The underlined character is used for | - | | mnemonic activation if :meth:`Notebook.enable_traversal` is | - | | called. | - +-----------+--------------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++-----------+--------------------------------------------------------------+ +| Option | Description | ++===========+==============================================================+ +| state | Either "normal", "disabled" or "hidden". If "disabled", then | +| | the tab is not selectable. If "hidden", then the tab is not | +| | shown. | ++-----------+--------------------------------------------------------------+ +| sticky | Specifies how the child window is positioned within the pane | +| | area. Value is a string containing zero or more of the | +| | characters "n", "s", "e" or "w". Each letter refers to a | +| | side (north, south, east or west) that the child window will | +| | stick to, as per the :meth:`grid` geometry manager. | ++-----------+--------------------------------------------------------------+ +| padding | Specifies the amount of extra space to add between the | +| | notebook and this pane. Syntax is the same as for the option | +| | padding used by this widget. | ++-----------+--------------------------------------------------------------+ +| text | Specifies a text to be displayed in the tab. | ++-----------+--------------------------------------------------------------+ +| image | Specifies an image to display in the tab. See the option | +| | image described in :class:`Widget`. | ++-----------+--------------------------------------------------------------+ +| compound | Specifies how to display the image relative to the text, in | +| | the case both options text and image are present. See | +| | `Label Options`_ for legal values. | ++-----------+--------------------------------------------------------------+ +| underline | Specifies the index (0-based) of a character to underline in | +| | the text string. The underlined character is used for | +| | mnemonic activation if :meth:`Notebook.enable_traversal` is | +| | called. | ++-----------+--------------------------------------------------------------+ Tab Identifiers @@ -663,36 +663,36 @@ Options This widget accepts the following specific options: - .. tabularcolumns:: |l|L| - - +----------+---------------------------------------------------------------+ - | Option | Description | - +==========+===============================================================+ - | orient | One of "horizontal" or "vertical". Specifies the orientation | - | | of the progress bar. | - +----------+---------------------------------------------------------------+ - | length | Specifies the length of the long axis of the progress bar | - | | (width if horizontal, height if vertical). | - +----------+---------------------------------------------------------------+ - | mode | One of "determinate" or "indeterminate". | - +----------+---------------------------------------------------------------+ - | maximum | A number specifying the maximum value. Defaults to 100. | - +----------+---------------------------------------------------------------+ - | value | The current value of the progress bar. In "determinate" mode, | - | | this represents the amount of work completed. In | - | | "indeterminate" mode, it is interpreted as modulo *maximum*; | - | | that is, the progress bar completes one "cycle" when its value| - | | increases by *maximum*. | - +----------+---------------------------------------------------------------+ - | variable | A name which is linked to the option value. If specified, the | - | | value of the progress bar is automatically set to the value of| - | | this name whenever the latter is modified. | - +----------+---------------------------------------------------------------+ - | phase | Read-only option. The widget periodically increments the value| - | | of this option whenever its value is greater than 0 and, in | - | | determinate mode, less than maximum. This option may be used | - | | by the current theme to provide additional animation effects. | - +----------+---------------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++----------+---------------------------------------------------------------+ +| Option | Description | ++==========+===============================================================+ +| orient | One of "horizontal" or "vertical". Specifies the orientation | +| | of the progress bar. | ++----------+---------------------------------------------------------------+ +| length | Specifies the length of the long axis of the progress bar | +| | (width if horizontal, height if vertical). | ++----------+---------------------------------------------------------------+ +| mode | One of "determinate" or "indeterminate". | ++----------+---------------------------------------------------------------+ +| maximum | A number specifying the maximum value. Defaults to 100. | ++----------+---------------------------------------------------------------+ +| value | The current value of the progress bar. In "determinate" mode, | +| | this represents the amount of work completed. In | +| | "indeterminate" mode, it is interpreted as modulo *maximum*; | +| | that is, the progress bar completes one "cycle" when its value| +| | increases by *maximum*. | ++----------+---------------------------------------------------------------+ +| variable | A name which is linked to the option value. If specified, the | +| | value of the progress bar is automatically set to the value of| +| | this name whenever the latter is modified. | ++----------+---------------------------------------------------------------+ +| phase | Read-only option. The widget periodically increments the value| +| | of this option whenever its value is greater than 0 and, in | +| | determinate mode, less than maximum. This option may be used | +| | by the current theme to provide additional animation effects. | ++----------+---------------------------------------------------------------+ ttk.Progressbar @@ -734,14 +734,14 @@ Options This widget accepts the following specific option: - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - +--------+----------------------------------------------------------------+ - | Option | Description | - +========+================================================================+ - | orient | One of "horizontal" or "vertical". Specifies the orientation of| - | | the separator. | - +--------+----------------------------------------------------------------+ ++--------+----------------------------------------------------------------+ +| Option | Description | ++========+================================================================+ +| orient | One of "horizontal" or "vertical". Specifies the orientation of| +| | the separator. | ++--------+----------------------------------------------------------------+ Sizegrip @@ -802,49 +802,49 @@ Options This widget accepts the following specific options: - .. tabularcolumns:: |l|p{0.7\linewidth}| - - +----------------+--------------------------------------------------------+ - | Option | Description | - +================+========================================================+ - | columns | A list of column identifiers, specifying the number of | - | | columns and their names. | - +----------------+--------------------------------------------------------+ - | displaycolumns | A list of column identifiers (either symbolic or | - | | integer indices) specifying which data columns are | - | | displayed and the order in which they appear, or the | - | | string "#all". | - +----------------+--------------------------------------------------------+ - | height | Specifies the number of rows which should be visible. | - | | Note: the requested width is determined from the sum | - | | of the column widths. | - +----------------+--------------------------------------------------------+ - | padding | Specifies the internal padding for the widget. The | - | | padding is a list of up to four length specifications. | - +----------------+--------------------------------------------------------+ - | selectmode | Controls how the built-in class bindings manage the | - | | selection. One of "extended", "browse" or "none". | - | | If set to "extended" (the default), multiple items may | - | | be selected. If "browse", only a single item will be | - | | selected at a time. If "none", the selection will not | - | | be changed. | - | | | - | | Note that the application code and tag bindings can set| - | | the selection however they wish, regardless of the | - | | value of this option. | - +----------------+--------------------------------------------------------+ - | show | A list containing zero or more of the following values,| - | | specifying which elements of the tree to display. | - | | | - | | * tree: display tree labels in column #0. | - | | * headings: display the heading row. | - | | | - | | The default is "tree headings", i.e., show all | - | | elements. | - | | | - | | **Note**: Column #0 always refers to the tree column, | - | | even if show="tree" is not specified. | - +----------------+--------------------------------------------------------+ +.. tabularcolumns:: |l|p{0.7\linewidth}| + ++----------------+--------------------------------------------------------+ +| Option | Description | ++================+========================================================+ +| columns | A list of column identifiers, specifying the number of | +| | columns and their names. | ++----------------+--------------------------------------------------------+ +| displaycolumns | A list of column identifiers (either symbolic or | +| | integer indices) specifying which data columns are | +| | displayed and the order in which they appear, or the | +| | string "#all". | ++----------------+--------------------------------------------------------+ +| height | Specifies the number of rows which should be visible. | +| | Note: the requested width is determined from the sum | +| | of the column widths. | ++----------------+--------------------------------------------------------+ +| padding | Specifies the internal padding for the widget. The | +| | padding is a list of up to four length specifications. | ++----------------+--------------------------------------------------------+ +| selectmode | Controls how the built-in class bindings manage the | +| | selection. One of "extended", "browse" or "none". | +| | If set to "extended" (the default), multiple items may | +| | be selected. If "browse", only a single item will be | +| | selected at a time. If "none", the selection will not | +| | be changed. | +| | | +| | Note that the application code and tag bindings can set| +| | the selection however they wish, regardless of the | +| | value of this option. | ++----------------+--------------------------------------------------------+ +| show | A list containing zero or more of the following values,| +| | specifying which elements of the tree to display. | +| | | +| | * tree: display tree labels in column #0. | +| | * headings: display the heading row. | +| | | +| | The default is "tree headings", i.e., show all | +| | elements. | +| | | +| | **Note**: Column #0 always refers to the tree column, | +| | even if show="tree" is not specified. | ++----------------+--------------------------------------------------------+ Item Options @@ -853,27 +853,27 @@ Item Options The following item options may be specified for items in the insert and item widget commands. - .. tabularcolumns:: |l|L| - - +--------+---------------------------------------------------------------+ - | Option | Description | - +========+===============================================================+ - | text | The textual label to display for the item. | - +--------+---------------------------------------------------------------+ - | image | A Tk Image, displayed to the left of the label. | - +--------+---------------------------------------------------------------+ - | values | The list of values associated with the item. | - | | | - | | Each item should have the same number of values as the widget | - | | option columns. If there are fewer values than columns, the | - | | remaining values are assumed empty. If there are more values | - | | than columns, the extra values are ignored. | - +--------+---------------------------------------------------------------+ - | open | ``True``/``False`` value indicating whether the item's | - | | children should be displayed or hidden. | - +--------+---------------------------------------------------------------+ - | tags | A list of tags associated with this item. | - +--------+---------------------------------------------------------------+ +.. tabularcolumns:: |l|L| + ++--------+---------------------------------------------------------------+ +| Option | Description | ++========+===============================================================+ +| text | The textual label to display for the item. | ++--------+---------------------------------------------------------------+ +| image | A Tk Image, displayed to the left of the label. | ++--------+---------------------------------------------------------------+ +| values | The list of values associated with the item. | +| | | +| | Each item should have the same number of values as the widget | +| | option columns. If there are fewer values than columns, the | +| | remaining values are assumed empty. If there are more values | +| | than columns, the extra values are ignored. | ++--------+---------------------------------------------------------------+ +| open | ``True``/``False`` value indicating whether the item's | +| | children should be displayed or hidden. | ++--------+---------------------------------------------------------------+ +| tags | A list of tags associated with this item. | ++--------+---------------------------------------------------------------+ Tag Options @@ -881,20 +881,20 @@ Tag Options The following options may be specified on tags: - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - +------------+-----------------------------------------------------------+ - | Option | Description | - +============+===========================================================+ - | foreground | Specifies the text foreground color. | - +------------+-----------------------------------------------------------+ - | background | Specifies the cell or item background color. | - +------------+-----------------------------------------------------------+ - | font | Specifies the font to use when drawing text. | - +------------+-----------------------------------------------------------+ - | image | Specifies the item image, in case the item's image option | - | | is empty. | - +------------+-----------------------------------------------------------+ ++------------+-----------------------------------------------------------+ +| Option | Description | ++============+===========================================================+ +| foreground | Specifies the text foreground color. | ++------------+-----------------------------------------------------------+ +| background | Specifies the cell or item background color. | ++------------+-----------------------------------------------------------+ +| font | Specifies the font to use when drawing text. | ++------------+-----------------------------------------------------------+ +| image | Specifies the item image, in case the item's image option | +| | is empty. | ++------------+-----------------------------------------------------------+ Column Identifiers @@ -926,19 +926,19 @@ Virtual Events The Treeview widget generates the following virtual events. - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - +--------------------+--------------------------------------------------+ - | Event | Description | - +====================+==================================================+ - | <> | Generated whenever the selection changes. | - +--------------------+--------------------------------------------------+ - | <> | Generated just before settings the focus item to | - | | open=True. | - +--------------------+--------------------------------------------------+ - | <> | Generated just after setting the focus item to | - | | open=False. | - +--------------------+--------------------------------------------------+ ++--------------------+--------------------------------------------------+ +| Event | Description | ++====================+==================================================+ +| <> | Generated whenever the selection changes. | ++--------------------+--------------------------------------------------+ +| <> | Generated just before settings the focus item to | +| | open=True. | ++--------------------+--------------------------------------------------+ +| <> | Generated just after setting the focus item to | +| | open=False. | ++--------------------+--------------------------------------------------+ The :meth:`Treeview.focus` and :meth:`Treeview.selection` methods can be used to determine the affected item or items. @@ -986,19 +986,19 @@ ttk.Treeview The valid options/values are: - * id + id Returns the column name. This is a read-only option. - * anchor: One of the standard Tk anchor values. + anchor: One of the standard Tk anchor values. Specifies how the text in this column should be aligned with respect to the cell. - * minwidth: width + minwidth: width The minimum width of the column in pixels. The treeview widget will not make the column any smaller than specified by this option when the widget is resized or the user drags a column. - * stretch: ``True``/``False`` + stretch: ``True``/``False`` Specifies whether the column's width should be adjusted when the widget is resized. - * width: width + width: width The width of the column in pixels. To configure the tree column, call this with column = "#0" @@ -1041,14 +1041,14 @@ ttk.Treeview The valid options/values are: - * text: text + text: text The text to display in the column heading. - * image: imageName + image: imageName Specifies an image to display to the right of the column heading. - * anchor: anchor + anchor: anchor Specifies how the heading text should be aligned. One of the standard Tk anchor values. - * command: callback + command: callback A callback to be invoked when the heading label is pressed. To configure the tree column heading, call this with column = "#0". @@ -1391,32 +1391,42 @@ option. If you don't know the class name of a widget, use the method .. method:: element_create(elementname, etype, *args, **kw) Create a new element in the current theme, of the given *etype* which is - expected to be either "image", "from" or "vsapi". The latter is only - available in Tk 8.6a for Windows XP and Vista and is not described here. + expected to be either "image", "from" or "vsapi". + The latter is only available in Tk 8.6 on Windows. If "image" is used, *args* should contain the default image name followed by statespec/value pairs (this is the imagespec), and *kw* may have the following options: - * border=padding - padding is a list of up to four integers, specifying the left, top, - right, and bottom borders, respectively. + border=padding + padding is a list of up to four integers, specifying the left, top, + right, and bottom borders, respectively. - * height=height - Specifies a minimum height for the element. If less than zero, the - base image's height is used as a default. + height=height + Specifies a minimum height for the element. If less than zero, the + base image's height is used as a default. - * padding=padding - Specifies the element's interior padding. Defaults to border's value - if not specified. + padding=padding + Specifies the element's interior padding. Defaults to border's value + if not specified. - * sticky=spec - Specifies how the image is placed within the final parcel. spec - contains zero or more characters "n", "s", "w", or "e". + sticky=spec + Specifies how the image is placed within the final parcel. spec + contains zero or more characters "n", "s", "w", or "e". - * width=width - Specifies a minimum width for the element. If less than zero, the - base image's width is used as a default. + width=width + Specifies a minimum width for the element. If less than zero, the + base image's width is used as a default. + + Example:: + + img1 = tkinter.PhotoImage(master=root, file='button.png') + img1 = tkinter.PhotoImage(master=root, file='button-pressed.png') + img1 = tkinter.PhotoImage(master=root, file='button-active.png') + style = ttk.Style(root) + style.element_create('Button.button', 'image', + img1, ('pressed', img2), ('active', img3), + border=(2, 4), sticky='we') If "from" is used as the value of *etype*, :meth:`element_create` will clone an existing @@ -1425,6 +1435,68 @@ option. If you don't know the class name of a widget, use the method If this element to clone from is not specified, an empty element will be used. *kw* is discarded. + Example:: + + style = ttk.Style(root) + style.element_create('plain.background', 'from', 'default') + + If "vsapi" is used as the value of *etype*, :meth:`element_create` + will create a new element in the current theme whose visual appearance + is drawn using the Microsoft Visual Styles API which is responsible + for the themed styles on Windows XP and Vista. + *args* is expected to contain the Visual Styles class and part as + given in the Microsoft documentation followed by an optional sequence + of tuples of ttk states and the corresponding Visual Styles API state + value. + *kw* may have the following options: + + padding=padding + Specify the element's interior padding. + *padding* is a list of up to four integers specifying the left, + top, right and bottom padding quantities respectively. + If fewer than four elements are specified, bottom defaults to top, + right defaults to left, and top defaults to left. + In other words, a list of three numbers specify the left, vertical, + and right padding; a list of two numbers specify the horizontal + and the vertical padding; a single number specifies the same + padding all the way around the widget. + This option may not be mixed with any other options. + + margins=padding + Specifies the elements exterior padding. + *padding* is a list of up to four integers specifying the left, top, + right and bottom padding quantities respectively. + This option may not be mixed with any other options. + + width=width + Specifies the width for the element. + If this option is set then the Visual Styles API will not be queried + for the recommended size or the part. + If this option is set then *height* should also be set. + The *width* and *height* options cannot be mixed with the *padding* + or *margins* options. + + height=height + Specifies the height of the element. + See the comments for *width*. + + Example:: + + style = ttk.Style(root) + style.element_create('pin', 'vsapi', 'EXPLORERBAR', 3, [ + ('pressed', '!selected', 3), + ('active', '!selected', 2), + ('pressed', 'selected', 6), + ('active', 'selected', 5), + ('selected', 4), + ('', 1)]) + style.layout('Explorer.Pin', + [('Explorer.Pin.pin', {'sticky': 'news'})]) + pin = ttk.Checkbutton(style='Explorer.Pin') + pin.pack(expand=True, fill='both') + + .. versionchanged:: 3.13 + Added support of the "vsapi" element factory. .. method:: element_names() @@ -1504,22 +1576,22 @@ uses a simplified version of the pack geometry manager: given an initial cavity, each element is allocated a parcel. Valid options/values are: - * side: whichside - Specifies which side of the cavity to place the element; one of - top, right, bottom or left. If omitted, the element occupies the - entire cavity. +side: whichside + Specifies which side of the cavity to place the element; one of + top, right, bottom or left. If omitted, the element occupies the + entire cavity. - * sticky: nswe - Specifies where the element is placed inside its allocated parcel. +sticky: nswe + Specifies where the element is placed inside its allocated parcel. - * unit: 0 or 1 - If set to 1, causes the element and all of its descendants to be treated as - a single element for the purposes of :meth:`Widget.identify` et al. It's - used for things like scrollbar thumbs with grips. +unit: 0 or 1 + If set to 1, causes the element and all of its descendants to be treated as + a single element for the purposes of :meth:`Widget.identify` et al. It's + used for things like scrollbar thumbs with grips. - * children: [sublayout... ] - Specifies a list of elements to place inside the element. Each - element is a tuple (or other sequence type) where the first item is - the layout name, and the other is a `Layout`_. +children: [sublayout... ] + Specifies a list of elements to place inside the element. Each + element is a tuple (or other sequence type) where the first item is + the layout name, and the other is a `Layout`_. .. _Layout: `Layouts`_ diff --git a/Doc/library/token-list.inc b/Doc/library/token-list.inc index 2739d5bfc1dfa20..39df2927a0b7f2d 100644 --- a/Doc/library/token-list.inc +++ b/Doc/library/token-list.inc @@ -201,11 +201,11 @@ Token value for ``":="``. -.. data:: OP +.. data:: EXCLAMATION -.. data:: AWAIT + Token value for ``"!"``. -.. data:: ASYNC +.. data:: OP .. data:: TYPE_IGNORE @@ -213,6 +213,16 @@ .. data:: SOFT_KEYWORD +.. data:: FSTRING_START + +.. data:: FSTRING_MIDDLE + +.. data:: FSTRING_END + +.. data:: COMMENT + +.. data:: NL + .. data:: ERRORTOKEN .. data:: N_TOKENS diff --git a/Doc/library/token.rst b/Doc/library/token.rst index a1aceba96ce0306..e6dc37d7ad852cf 100644 --- a/Doc/library/token.rst +++ b/Doc/library/token.rst @@ -50,11 +50,13 @@ The following token type values aren't used by the C tokenizer but are needed fo the :mod:`tokenize` module. .. data:: COMMENT + :noindex: Token value used to indicate a comment. .. data:: NL + :noindex: Token value used to indicate a non-terminating newline. The :data:`NEWLINE` token indicates the end of a logical line of Python code; @@ -78,17 +80,21 @@ the :mod:`tokenize` module. .. versionchanged:: 3.5 - Added :data:`AWAIT` and :data:`ASYNC` tokens. + Added :data:`!AWAIT` and :data:`!ASYNC` tokens. .. versionchanged:: 3.7 Added :data:`COMMENT`, :data:`NL` and :data:`ENCODING` tokens. .. versionchanged:: 3.7 - Removed :data:`AWAIT` and :data:`ASYNC` tokens. "async" and "await" are + Removed :data:`!AWAIT` and :data:`!ASYNC` tokens. "async" and "await" are now tokenized as :data:`NAME` tokens. .. versionchanged:: 3.8 Added :data:`TYPE_COMMENT`, :data:`TYPE_IGNORE`, :data:`COLONEQUAL`. - Added :data:`AWAIT` and :data:`ASYNC` tokens back (they're needed + Added :data:`!AWAIT` and :data:`!ASYNC` tokens back (they're needed to support parsing older Python versions for :func:`ast.parse` with ``feature_version`` set to 6 or lower). + +.. versionchanged:: 3.13 + Removed :data:`!AWAIT` and :data:`!ASYNC` tokens again. + diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst index 11f569df2e7cde7..92bdb052267a689 100644 --- a/Doc/library/tokenize.rst +++ b/Doc/library/tokenize.rst @@ -22,6 +22,15 @@ the generic :data:`~token.OP` token type. The exact type can be determined by checking the ``exact_type`` property on the :term:`named tuple` returned from :func:`tokenize.tokenize`. + +.. warning:: + + Note that the functions in this module are only designed to parse + syntactically valid Python code (code that does not raise when parsed + using :func:`ast.parse`). The behavior of the functions in this module is + **undefined** when providing invalid Python code and it can change at any + point. + Tokenizing Input ---------------- @@ -139,11 +148,6 @@ function it uses to do this is available: 2, 3 -Note that unclosed single-quoted strings do not cause an error to be -raised. They are tokenized as :data:`~token.ERRORTOKEN`, followed by the -tokenization of their contents. - - .. _tokenize-cli: Command-Line Usage @@ -162,11 +166,11 @@ The following options are accepted: .. program:: tokenize -.. cmdoption:: -h, --help +.. option:: -h, --help show this help message and exit -.. cmdoption:: -e, --exact +.. option:: -e, --exact display token names using the exact type diff --git a/Doc/library/trace.rst b/Doc/library/trace.rst index 40cf198f1287d78..8854905e192b45f 100644 --- a/Doc/library/trace.rst +++ b/Doc/library/trace.rst @@ -34,11 +34,11 @@ all Python modules imported during the execution into the current directory. .. program:: trace -.. cmdoption:: --help +.. option:: --help Display usage and exit. -.. cmdoption:: --version +.. option:: --version Display the version of the module and exit. @@ -56,28 +56,28 @@ the :option:`--trace <-t>` and :option:`--count <-c>` options. When .. program:: trace -.. cmdoption:: -c, --count +.. option:: -c, --count Produce a set of annotated listing files upon program completion that shows how many times each statement was executed. See also :option:`--coverdir <-C>`, :option:`--file <-f>` and :option:`--no-report <-R>` below. -.. cmdoption:: -t, --trace +.. option:: -t, --trace Display lines as they are executed. -.. cmdoption:: -l, --listfuncs +.. option:: -l, --listfuncs Display the functions executed by running the program. -.. cmdoption:: -r, --report +.. option:: -r, --report Produce an annotated list from an earlier program run that used the :option:`--count <-c>` and :option:`--file <-f>` option. This does not execute any code. -.. cmdoption:: -T, --trackcalls +.. option:: -T, --trackcalls Display the calling relationships exposed by running the program. @@ -86,33 +86,33 @@ Modifiers .. program:: trace -.. cmdoption:: -f, --file= +.. option:: -f, --file= Name of a file to accumulate counts over several tracing runs. Should be used with the :option:`--count <-c>` option. -.. cmdoption:: -C, --coverdir= +.. option:: -C, --coverdir= Directory where the report files go. The coverage report for ``package.module`` is written to file :file:`{dir}/{package}/{module}.cover`. -.. cmdoption:: -m, --missing +.. option:: -m, --missing When generating annotated listings, mark lines which were not executed with ``>>>>>>``. -.. cmdoption:: -s, --summary +.. option:: -s, --summary When using :option:`--count <-c>` or :option:`--report <-r>`, write a brief summary to stdout for each file processed. -.. cmdoption:: -R, --no-report +.. option:: -R, --no-report Do not generate annotated listings. This is useful if you intend to make several runs with :option:`--count <-c>`, and then produce a single set of annotated listings at the end. -.. cmdoption:: -g, --timing +.. option:: -g, --timing Prefix each line with the time since the program started. Only used while tracing. @@ -124,12 +124,12 @@ These options may be repeated multiple times. .. program:: trace -.. cmdoption:: --ignore-module= +.. option:: --ignore-module= Ignore each of the given module names and its submodules (if it is a package). The argument can be a list of names separated by a comma. -.. cmdoption:: --ignore-dir= +.. option:: --ignore-dir= Ignore all modules and packages in the named directory and subdirectories. The argument can be a list of directories separated by :data:`os.pathsep`. @@ -187,7 +187,8 @@ Programmatic Interface Merge in data from another :class:`CoverageResults` object. - .. method:: write_results(show_missing=True, summary=False, coverdir=None) + .. method:: write_results(show_missing=True, summary=False, coverdir=None,\ + *, ignore_missing_files=False) Write coverage results. Set *show_missing* to show lines that had no hits. Set *summary* to include in the output the coverage summary per @@ -195,6 +196,13 @@ Programmatic Interface result files will be output. If ``None``, the results for each source file are placed in its directory. + If *ignore_missing_files* is ``True``, coverage counts for files that no + longer exist are silently ignored. Otherwise, a missing file will + raise a :exc:`FileNotFoundError`. + + .. versionchanged:: 3.13 + Added *ignore_missing_files* parameter. + A simple example demonstrating the use of the programmatic interface:: import sys diff --git a/Doc/library/traceback.rst b/Doc/library/traceback.rst index f8c1eabadacf9f2..80dda5ec520d7a9 100644 --- a/Doc/library/traceback.rst +++ b/Doc/library/traceback.rst @@ -14,14 +14,20 @@ interpreter when it prints a stack trace. This is useful when you want to print stack traces under program control, such as in a "wrapper" around the interpreter. -.. index:: object: traceback +.. index:: pair: object; traceback -The module uses traceback objects --- this is the object type that is stored in -the :data:`sys.last_traceback` variable and returned as the third item from -:func:`sys.exc_info`. +The module uses traceback objects --- these are objects of type :class:`types.TracebackType`, +which are assigned to the ``__traceback__`` field of :class:`BaseException` instances. -The module defines the following functions: +.. seealso:: + + Module :mod:`faulthandler` + Used to dump Python tracebacks explicitly, on a fault, after a timeout, or on a user signal. + Module :mod:`pdb` + Interactive source code debugger for Python programs. + +The module defines the following functions: .. function:: print_tb(tb, limit=None, file=None) @@ -74,16 +80,15 @@ The module defines the following functions: .. function:: print_exc(limit=None, file=None, chain=True) - This is a shorthand for ``print_exception(*sys.exc_info(), limit, file, + This is a shorthand for ``print_exception(sys.exception(), limit, file, chain)``. .. function:: print_last(limit=None, file=None, chain=True) - This is a shorthand for ``print_exception(sys.last_type, sys.last_value, - sys.last_traceback, limit, file, chain)``. In general it will work only - after an exception has reached an interactive prompt (see - :data:`sys.last_type`). + This is a shorthand for ``print_exception(sys.last_exc, limit, file, + chain)``. In general it will work only after an exception has reached + an interactive prompt (see :data:`sys.last_exc`). .. function:: print_stack(f=None, limit=None, file=None) @@ -130,24 +135,34 @@ The module defines the following functions: text line is not ``None``. -.. function:: format_exception_only(exc, /[, value]) +.. function:: format_exception_only(exc, /[, value], *, show_group=False) Format the exception part of a traceback using an exception value such as given by ``sys.last_value``. The return value is a list of strings, each - ending in a newline. Normally, the list contains a single string; however, - for :exc:`SyntaxError` exceptions, it contains several lines that (when - printed) display detailed information about where the syntax error occurred. - The message indicating which exception occurred is the always last string in - the list. + ending in a newline. The list contains the exception's message, which is + normally a single string; however, for :exc:`SyntaxError` exceptions, it + contains several lines that (when printed) display detailed information + about where the syntax error occurred. Following the message, the list + contains the exception's :attr:`notes `. Since Python 3.10, instead of passing *value*, an exception object can be passed as the first argument. If *value* is provided, the first argument is ignored in order to provide backwards compatibility. + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. + .. versionchanged:: 3.10 The *etype* parameter has been renamed to *exc* and is now positional-only. + .. versionchanged:: 3.11 + The returned list now includes any notes attached to the exception. + + .. versionchanged:: 3.13 + *show_group* parameter was added. + .. function:: format_exception(exc, /[, value, tb], limit=None, chain=True) @@ -212,7 +227,7 @@ The module also defines the following classes: :class:`TracebackException` objects are created from actual exceptions to capture data for later printing in a lightweight fashion. -.. class:: TracebackException(exc_type, exc_value, exc_traceback, *, limit=None, lookup_lines=True, capture_locals=False, compact=False) +.. class:: TracebackException(exc_type, exc_value, exc_traceback, *, limit=None, lookup_lines=True, capture_locals=False, compact=False, max_group_width=15, max_group_depth=10) Capture an exception for later rendering. *limit*, *lookup_lines* and *capture_locals* are as for the :class:`StackSummary` class. @@ -224,6 +239,18 @@ capture data for later printing in a lightweight fashion. Note that when locals are captured, they are also shown in the traceback. + *max_group_width* and *max_group_depth* control the formatting of exception + groups (see :exc:`BaseExceptionGroup`). The depth refers to the nesting + level of the group, and the width refers to the size of a single exception + group's exceptions array. The formatted output is truncated when either + limit is exceeded. + + .. versionchanged:: 3.10 + Added the *compact* parameter. + + .. versionchanged:: 3.11 + Added the *max_group_width* and *max_group_depth* parameters. + .. attribute:: __cause__ A :class:`TracebackException` of the original ``__cause__``. @@ -232,6 +259,14 @@ capture data for later printing in a lightweight fashion. A :class:`TracebackException` of the original ``__context__``. + .. attribute:: exceptions + + If ``self`` represents an :exc:`ExceptionGroup`, this field holds a list of + :class:`TracebackException` instances representing the nested exceptions. + Otherwise it is ``None``. + + .. versionadded:: 3.11 + .. attribute:: __suppress_context__ The ``__suppress_context__`` value from the original exception. @@ -252,6 +287,14 @@ capture data for later printing in a lightweight fashion. The class of the original traceback. + .. deprecated:: 3.13 + + .. attribute:: exc_type_str + + String display of the class of the original exception. + + .. versionadded:: 3.13 + .. attribute:: filename For syntax errors - the file name where the error occurred. @@ -260,6 +303,13 @@ capture data for later printing in a lightweight fashion. For syntax errors - the line number where the error occurred. + .. attribute:: end_lineno + + For syntax errors - the end line number where the error occurred. + Can be ``None`` if not present. + + .. versionadded:: 3.10 + .. attribute:: text For syntax errors - the text where the error occurred. @@ -268,6 +318,13 @@ capture data for later printing in a lightweight fashion. For syntax errors - the offset into the text where the error occurred. + .. attribute:: end_offset + + For syntax errors - the end offset into the text where the error occurred. + Can be ``None`` if not present. + + .. versionadded:: 3.10 + .. attribute:: msg For syntax errors - the compiler error message. @@ -297,25 +354,27 @@ capture data for later printing in a lightweight fashion. some containing internal newlines. :func:`~traceback.print_exception` is a wrapper around this method which just prints the lines to a file. - The message indicating which exception occurred is always the last - string in the output. - - .. method:: format_exception_only() + .. method:: format_exception_only(*, show_group=False) Format the exception part of the traceback. The return value is a generator of strings, each ending in a newline. - Normally, the generator emits a single string; however, for - :exc:`SyntaxError` exceptions, it emits several lines that (when - printed) display detailed information about where the syntax - error occurred. + When *show_group* is ``False``, the generator emits the exception's + message followed by its notes (if it has any). The exception message + is normally a single string; however, for :exc:`SyntaxError` exceptions, + it consists of several lines that (when printed) display detailed + information about where the syntax error occurred. - The message indicating which exception occurred is always the last - string in the output. + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. - .. versionchanged:: 3.10 - Added the *compact* parameter. + .. versionchanged:: 3.11 + The exception's notes are now included in the output. + + .. versionchanged:: 3.13 + Added the *show_group* parameter. :class:`StackSummary` Objects @@ -437,11 +496,11 @@ exception and traceback: try: lumberjack() except IndexError: - exc_type, exc_value, exc_traceback = sys.exc_info() + exc = sys.exception() print("*** print_tb:") - traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) + traceback.print_tb(exc.__traceback__, limit=1, file=sys.stdout) print("*** print_exception:") - traceback.print_exception(exc_value, limit=2, file=sys.stdout) + traceback.print_exception(exc, limit=2, file=sys.stdout) print("*** print_exc:") traceback.print_exc(limit=2, file=sys.stdout) print("*** format_exc, first and last line:") @@ -449,12 +508,12 @@ exception and traceback: print(formatted_lines[0]) print(formatted_lines[-1]) print("*** format_exception:") - print(repr(traceback.format_exception(exc_value))) + print(repr(traceback.format_exception(exc))) print("*** extract_tb:") - print(repr(traceback.extract_tb(exc_traceback))) + print(repr(traceback.extract_tb(exc.__traceback__))) print("*** format_tb:") - print(repr(traceback.format_tb(exc_traceback))) - print("*** tb_lineno:", exc_traceback.tb_lineno) + print(repr(traceback.format_tb(exc.__traceback__))) + print("*** tb_lineno:", exc.__traceback__.tb_lineno) The output for the example would look similar to this: diff --git a/Doc/library/tty.rst b/Doc/library/tty.rst index b30bc3c7ac42e9d..20ba7d7e0a45b30 100644 --- a/Doc/library/tty.rst +++ b/Doc/library/tty.rst @@ -15,23 +15,49 @@ The :mod:`tty` module defines functions for putting the tty into cbreak and raw modes. +.. availability:: Unix. + Because it requires the :mod:`termios` module, it will work only on Unix. The :mod:`tty` module defines the following functions: +.. function:: cfmakeraw(mode) + + Convert the tty attribute list *mode*, which is a list like the one returned + by :func:`termios.tcgetattr`, to that of a tty in raw mode. + + .. versionadded:: 3.12 + + +.. function:: cfmakecbreak(mode) + + Convert the tty attribute list *mode*, which is a list like the one returned + by :func:`termios.tcgetattr`, to that of a tty in cbreak mode. + + .. versionadded:: 3.12 + + .. function:: setraw(fd, when=termios.TCSAFLUSH) Change the mode of the file descriptor *fd* to raw. If *when* is omitted, it defaults to :const:`termios.TCSAFLUSH`, and is passed to - :func:`termios.tcsetattr`. + :func:`termios.tcsetattr`. The return value of :func:`termios.tcgetattr` + is saved before setting *fd* to raw mode; this value is returned. + + .. versionchanged:: 3.12 + The return value is now the original tty attributes, instead of None. .. function:: setcbreak(fd, when=termios.TCSAFLUSH) Change the mode of file descriptor *fd* to cbreak. If *when* is omitted, it defaults to :const:`termios.TCSAFLUSH`, and is passed to - :func:`termios.tcsetattr`. + :func:`termios.tcsetattr`. The return value of :func:`termios.tcgetattr` + is saved before setting *fd* to cbreak mode; this value is returned. + + .. versionchanged:: 3.12 + The return value is now the original tty attributes, instead of None. .. seealso:: diff --git a/Doc/library/turtle.rst b/Doc/library/turtle.rst index 5add61c759ea8ec..88b1f09eb3c8b78 100644 --- a/Doc/library/turtle.rst +++ b/Doc/library/turtle.rst @@ -19,9 +19,14 @@ Introduction ============ -Turtle graphics is a popular way for introducing programming to kids. It was -part of the original Logo programming language developed by Wally Feurzeig, -Seymour Papert and Cynthia Solomon in 1967. +Turtle graphics is an implementation of `the popular geometric drawing tools +introduced in Logo `_, developed by Wally Feurzeig, Seymour Papert and Cynthia Solomon +in 1967. + + +Get started +=========== Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it the command ``turtle.forward(15)``, and it moves (on-screen!) 15 pixels in the @@ -36,67 +41,261 @@ direction it is facing, drawing a line as it moves. Give it the command .. image:: turtle-star.* :align: center - .. literalinclude:: ../includes/turtle-star.py +In Python, turtle graphics provides a representation of a physical "turtle" +(a little robot with a pen) that draws on a sheet of paper on the floor. + +It's an effective and well-proven way for learners to encounter +programming concepts and interaction with software, as it provides instant, +visible feedback. It also provides convenient access to graphical output +in general. + +Turtle drawing was originally created as an educational tool, to be used by +teachers in the classroom. For the programmer who needs to produce some +graphical output it can be a way to do that without the overhead of +introducing more complex or external libraries into their work. + + +.. _turtle-tutorial: + +Tutorial +======== + +New users should start here. In this tutorial we'll explore some of the +basics of turtle drawing. + + +Starting a turtle environment +----------------------------- + +In a Python shell, import all the objects of the ``turtle`` module:: + + from turtle import * + +If you run into a ``No module named '_tkinter'`` error, you'll have to +install the :mod:`Tk interface package ` on your system. + + +Basic drawing +------------- + +Send the turtle forward 100 steps:: + + forward(100) + +You should see (most likely, in a new window on your display) a line +drawn by the turtle, heading East. Change the direction of the turtle, +so that it turns 120 degrees left (anti-clockwise):: + + left(120) + +Let's continue by drawing a triangle:: + + forward(100) + left(120) + forward(100) + +Notice how the turtle, represented by an arrow, points in different +directions as you steer it. + +Experiment with those commands, and also with ``backward()`` and +``right()``. + + +Pen control +~~~~~~~~~~~ + +Try changing the color - for example, ``color('blue')`` - and +width of the line - for example, ``width(3)`` - and then drawing again. + +You can also move the turtle around without drawing, by lifting up the pen: +``up()`` before moving. To start drawing again, use ``down()``. + + +The turtle's position +~~~~~~~~~~~~~~~~~~~~~ + +Send your turtle back to its starting-point (useful if it has disappeared +off-screen):: + + home() + +The home position is at the center of the turtle's screen. If you ever need to +know them, get the turtle's x-y co-ordinates with:: + + pos() + +Home is at ``(0, 0)``. -By combining together these and similar commands, intricate shapes and pictures -can easily be drawn. +And after a while, it will probably help to clear the window so we can start +anew:: -The :mod:`turtle` module is an extended reimplementation of the same-named -module from the Python standard distribution up to version Python 2.5. + clearscreen() -It tries to keep the merits of the old turtle module and to be (nearly) 100% -compatible with it. This means in the first place to enable the learning -programmer to use all the commands, classes and methods interactively when using -the module from within IDLE run with the ``-n`` switch. -The turtle module provides turtle graphics primitives, in both object-oriented -and procedure-oriented ways. Because it uses :mod:`tkinter` for the underlying -graphics, it needs a version of Python installed with Tk support. +Making algorithmic patterns +--------------------------- -The object-oriented interface uses essentially two+two classes: +Using loops, it's possible to build up geometric patterns:: -1. The :class:`TurtleScreen` class defines graphics windows as a playground for - the drawing turtles. Its constructor needs a :class:`tkinter.Canvas` or a - :class:`ScrolledCanvas` as argument. It should be used when :mod:`turtle` is - used as part of some application. + for steps in range(100): + for c in ('blue', 'red', 'green'): + color(c) + forward(steps) + right(30) - The function :func:`Screen` returns a singleton object of a - :class:`TurtleScreen` subclass. This function should be used when - :mod:`turtle` is used as a standalone tool for doing graphics. - As a singleton object, inheriting from its class is not possible. - All methods of TurtleScreen/Screen also exist as functions, i.e. as part of - the procedure-oriented interface. +\ - which of course, are limited only by the imagination! -2. :class:`RawTurtle` (alias: :class:`RawPen`) defines Turtle objects which draw - on a :class:`TurtleScreen`. Its constructor needs a Canvas, ScrolledCanvas - or TurtleScreen as argument, so the RawTurtle objects know where to draw. +Let's draw the star shape at the top of this page. We want red lines, +filled in with yellow:: + + color('red') + fillcolor('yellow') + +Just as ``up()`` and ``down()`` determine whether lines will be drawn, +filling can be turned on and off:: + + begin_fill() + +Next we'll create a loop:: + + while True: + forward(200) + left(170) + if abs(pos()) < 1: + break + +``abs(pos()) < 1`` is a good way to know when the turtle is back at its +home position. + +Finally, complete the filling:: + + end_fill() + +(Note that filling only actually takes place when you give the +``end_fill()`` command.) + + +.. _turtle-how-to: + +How to... +========= + +This section covers some typical turtle use-cases and approaches. + + +Get started as quickly as possible +---------------------------------- + +One of the joys of turtle graphics is the immediate, visual feedback that's +available from simple commands - it's an excellent way to introduce children +to programming ideas, with a minimum of overhead (not just children, of +course). + +The turtle module makes this possible by exposing all its basic functionality +as functions, available with ``from turtle import *``. The :ref:`turtle +graphics tutorial ` covers this approach. + +It's worth noting that many of the turtle commands also have even more terse +equivalents, such as ``fd()`` for :func:`forward`. These are especially +useful when working with learners for whom typing is not a skill. + +.. _note: + + You'll need to have the :mod:`Tk interface package ` installed on + your system for turtle graphics to work. Be warned that this is not + always straightforward, so check this in advance if you're planning to + use turtle graphics with a learner. + + +Use the ``turtle`` module namespace +----------------------------------- + +Using ``from turtle import *`` is convenient - but be warned that it imports a +rather large collection of objects, and if you're doing anything but turtle +graphics you run the risk of a name conflict (this becomes even more an issue +if you're using turtle graphics in a script where other modules might be +imported). + +The solution is to use ``import turtle`` - ``fd()`` becomes +``turtle.fd()``, ``width()`` becomes ``turtle.width()`` and so on. (If typing +"turtle" over and over again becomes tedious, use for example ``import turtle +as t`` instead.) + + +Use turtle graphics in a script +------------------------------- + +It's recommended to use the ``turtle`` module namespace as described +immediately above, for example:: + + import turtle as t + from random import random + + for i in range(100): + steps = int(random() * 100) + angle = int(random() * 360) + t.right(angle) + t.fd(steps) + +Another step is also required though - as soon as the script ends, Python +will also close the turtle's window. Add:: + + t.mainloop() + +to the end of the script. The script will now wait to be dismissed and +will not exit until it is terminated, for example by closing the turtle +graphics window. + + +Use object-oriented turtle graphics +----------------------------------- - Derived from RawTurtle is the subclass :class:`Turtle` (alias: :class:`Pen`), - which draws on "the" :class:`Screen` instance which is automatically - created, if not already present. +.. seealso:: :ref:`Explanation of the object-oriented interface ` - All methods of RawTurtle/Turtle also exist as functions, i.e. part of the - procedure-oriented interface. +Other than for very basic introductory purposes, or for trying things out +as quickly as possible, it's more usual and much more powerful to use the +object-oriented approach to turtle graphics. For example, this allows +multiple turtles on screen at once. -The procedural interface provides functions which are derived from the methods -of the classes :class:`Screen` and :class:`Turtle`. They have the same names as -the corresponding methods. A screen object is automatically created whenever a -function derived from a Screen method is called. An (unnamed) turtle object is -automatically created whenever any of the functions derived from a Turtle method -is called. +In this approach, the various turtle commands are methods of objects (mostly of +``Turtle`` objects). You *can* use the object-oriented approach in the shell, +but it would be more typical in a Python script. -To use multiple turtles on a screen one has to use the object-oriented interface. +The example above then becomes:: + + from turtle import Turtle + from random import random + + t = Turtle() + for i in range(100): + steps = int(random() * 100) + angle = int(random() * 360) + t.right(angle) + t.fd(steps) + + t.screen.mainloop() + +Note the last line. ``t.screen`` is an instance of the :class:`Screen` +that a Turtle instance exists on; it's created automatically along with +the turtle. + +The turtle's screen can be customised, for example:: + + t.screen.title('Object-oriented turtle demo') + t.screen.bgcolor("orange") + + +Turtle graphics reference +========================= .. note:: + In the following documentation the argument list for functions is given. Methods, of course, have the additional first argument *self* which is omitted here. -Overview of available Turtle and Screen methods -================================================= - Turtle methods -------------- @@ -107,6 +306,7 @@ Turtle motion | :func:`right` | :func:`rt` | :func:`left` | :func:`lt` | :func:`goto` | :func:`setpos` | :func:`setposition` + | :func:`teleport` | :func:`setx` | :func:`sety` | :func:`setheading` | :func:`seth` @@ -165,7 +365,6 @@ Turtle state | :func:`resizemode` | :func:`shapesize` | :func:`turtlesize` | :func:`shearfactor` - | :func:`settiltangle` | :func:`tiltangle` | :func:`tilt` | :func:`shapetransform` @@ -358,18 +557,56 @@ Turtle motion .. doctest:: :skipif: _tkinter is None - >>> tp = turtle.pos() - >>> tp - (0.00,0.00) - >>> turtle.setpos(60,30) - >>> turtle.pos() - (60.00,30.00) - >>> turtle.setpos((20,80)) - >>> turtle.pos() - (20.00,80.00) - >>> turtle.setpos(tp) - >>> turtle.pos() - (0.00,0.00) + >>> tp = turtle.pos() + >>> tp + (0.00,0.00) + >>> turtle.setpos(60,30) + >>> turtle.pos() + (60.00,30.00) + >>> turtle.setpos((20,80)) + >>> turtle.pos() + (20.00,80.00) + >>> turtle.setpos(tp) + >>> turtle.pos() + (0.00,0.00) + + +.. function:: teleport(x, y=None, *, fill_gap=False) + + :param x: a number or ``None`` + :param y: a number or ``None`` + :param fill_gap: a boolean + + Move turtle to an absolute position. Unlike goto(x, y), a line will not + be drawn. The turtle's orientation does not change. If currently + filling, the polygon(s) teleported from will be filled after leaving, + and filling will begin again after teleporting. This can be disabled + with fill_gap=True, which makes the imaginary line traveled during + teleporting act as a fill barrier like in goto(x, y). + + .. doctest:: + :skipif: _tkinter is None + :hide: + + >>> turtle.goto(0, 0) + + .. doctest:: + :skipif: _tkinter is None + + >>> tp = turtle.pos() + >>> tp + (0.00,0.00) + >>> turtle.teleport(60) + >>> turtle.pos() + (60.00,0.00) + >>> turtle.teleport(y=10) + >>> turtle.pos() + (60.00,10.00) + >>> turtle.teleport(20, 30) + >>> turtle.pos() + (20.00,30.00) + + .. versionadded: 3.12 .. function:: setx(x) @@ -537,8 +774,7 @@ Turtle motion :skipif: _tkinter is None >>> turtle.color("blue") - >>> turtle.stamp() - 11 + >>> stamp_id = turtle.stamp() >>> turtle.fd(50) @@ -575,15 +811,8 @@ Turtle motion .. doctest:: >>> for i in range(8): - ... turtle.stamp(); turtle.fd(30) - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 + ... unused_stamp_id = turtle.stamp() + ... turtle.fd(30) >>> turtle.clearstamps(2) >>> turtle.clearstamps(-2) >>> turtle.clearstamps() @@ -919,23 +1148,23 @@ Color control .. doctest:: :skipif: _tkinter is None - >>> colormode() - 1.0 - >>> turtle.pencolor() - 'red' - >>> turtle.pencolor("brown") - >>> turtle.pencolor() - 'brown' - >>> tup = (0.2, 0.8, 0.55) - >>> turtle.pencolor(tup) - >>> turtle.pencolor() - (0.2, 0.8, 0.5490196078431373) - >>> colormode(255) - >>> turtle.pencolor() - (51.0, 204.0, 140.0) - >>> turtle.pencolor('#32c18f') - >>> turtle.pencolor() - (50.0, 193.0, 143.0) + >>> colormode() + 1.0 + >>> turtle.pencolor() + 'red' + >>> turtle.pencolor("brown") + >>> turtle.pencolor() + 'brown' + >>> tup = (0.2, 0.8, 0.55) + >>> turtle.pencolor(tup) + >>> turtle.pencolor() + (0.2, 0.8, 0.5490196078431373) + >>> colormode(255) + >>> turtle.pencolor() + (51.0, 204.0, 140.0) + >>> turtle.pencolor('#32c18f') + >>> turtle.pencolor() + (50.0, 193.0, 143.0) .. function:: fillcolor(*args) @@ -968,17 +1197,17 @@ Color control .. doctest:: :skipif: _tkinter is None - >>> turtle.fillcolor("violet") - >>> turtle.fillcolor() - 'violet' - >>> turtle.pencolor() - (50.0, 193.0, 143.0) - >>> turtle.fillcolor((50, 193, 143)) # Integers, not floats - >>> turtle.fillcolor() - (50.0, 193.0, 143.0) - >>> turtle.fillcolor('#ffffff') - >>> turtle.fillcolor() - (255.0, 255.0, 255.0) + >>> turtle.fillcolor("violet") + >>> turtle.fillcolor() + 'violet' + >>> turtle.pencolor() + (50.0, 193.0, 143.0) + >>> turtle.fillcolor((50, 193, 143)) # Integers, not floats + >>> turtle.fillcolor() + (50.0, 193.0, 143.0) + >>> turtle.fillcolor('#ffffff') + >>> turtle.fillcolor() + (255.0, 255.0, 255.0) .. function:: color(*args) @@ -1007,12 +1236,12 @@ Color control .. doctest:: :skipif: _tkinter is None - >>> turtle.color("red", "green") - >>> turtle.color() - ('red', 'green') - >>> color("#285078", "#a0c8f0") - >>> color() - ((40.0, 80.0, 120.0), (160.0, 200.0, 240.0)) + >>> turtle.color("red", "green") + >>> turtle.color() + ('red', 'green') + >>> color("#285078", "#a0c8f0") + >>> color() + ((40.0, 80.0, 120.0), (160.0, 200.0, 240.0)) See also: Screen method :func:`colormode`. @@ -1034,11 +1263,11 @@ Filling .. doctest:: :skipif: _tkinter is None - >>> turtle.begin_fill() - >>> if turtle.filling(): - ... turtle.pensize(5) - ... else: - ... turtle.pensize(3) + >>> turtle.begin_fill() + >>> if turtle.filling(): + ... turtle.pensize(5) + ... else: + ... turtle.pensize(3) @@ -1214,7 +1443,7 @@ Appearance will be displayed stretched according to its stretchfactors: *stretch_wid* is stretchfactor perpendicular to its orientation, *stretch_len* is stretchfactor in direction of its orientation, *outline* determines the width - of the shapes's outline. + of the shape's outline. .. doctest:: :skipif: _tkinter is None @@ -1244,11 +1473,11 @@ Appearance .. doctest:: :skipif: _tkinter is None - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.shearfactor(0.5) - >>> turtle.shearfactor() - 0.5 + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.shearfactor(0.5) + >>> turtle.shearfactor() + 0.5 .. function:: tilt(angle) @@ -1270,28 +1499,6 @@ Appearance >>> turtle.fd(50) -.. function:: settiltangle(angle) - - :param angle: a number - - Rotate the turtleshape to point in the direction specified by *angle*, - regardless of its current tilt-angle. *Do not* change the turtle's heading - (direction of movement). - - .. doctest:: - :skipif: _tkinter is None or 'always; deprecated method' - - >>> turtle.reset() - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.settiltangle(45) - >>> turtle.fd(50) - >>> turtle.settiltangle(-45) - >>> turtle.fd(50) - - .. deprecated:: 3.1 - - .. function:: tiltangle(angle=None) :param angle: a number (optional) @@ -1545,7 +1752,7 @@ below: 1. Create an empty Shape object of type "compound". 2. Add as many components to this object as desired, using the - :meth:`addcomponent` method. + :meth:`~Shape.addcomponent` method. For example: @@ -1617,11 +1824,11 @@ Window control ``"nopic"``, delete background image, if present. If *picname* is ``None``, return the filename of the current backgroundimage. :: - >>> screen.bgpic() - 'nopic' - >>> screen.bgpic("landscape.gif") - >>> screen.bgpic() - "landscape.gif" + >>> screen.bgpic() + 'nopic' + >>> screen.bgpic("landscape.gif") + >>> screen.bgpic() + "landscape.gif" .. function:: clear() @@ -2028,16 +2235,16 @@ Settings and special methods Return the height of the turtle window. :: - >>> screen.window_height() - 480 + >>> screen.window_height() + 480 .. function:: window_width() Return the width of the turtle window. :: - >>> screen.window_width() - 640 + >>> screen.window_width() + 640 .. _screenspecific: @@ -2108,7 +2315,7 @@ Public classes .. class:: RawTurtle(canvas) RawPen(canvas) - :param canvas: a :class:`tkinter.Canvas`, a :class:`ScrolledCanvas` or a + :param canvas: a :class:`!tkinter.Canvas`, a :class:`ScrolledCanvas` or a :class:`TurtleScreen` Create a turtle. The turtle has all methods described above as "methods of @@ -2123,9 +2330,9 @@ Public classes .. class:: TurtleScreen(cv) - :param cv: a :class:`tkinter.Canvas` + :param cv: a :class:`!tkinter.Canvas` - Provides screen oriented methods like :func:`setbg` etc. that are described + Provides screen oriented methods like :func:`bgcolor` etc. that are described above. .. class:: Screen() @@ -2193,6 +2400,41 @@ Public classes * ``a.rotate(angle)`` rotation +.. _turtle-explanation: + +Explanation +=========== + +A turtle object draws on a screen object, and there a number of key classes in +the turtle object-oriented interface that can be used to create them and relate +them to each other. + +A :class:`Turtle` instance will automatically create a :class:`Screen` +instance if one is not already present. + +``Turtle`` is a subclass of :class:`RawTurtle`, which *doesn't* automatically +create a drawing surface - a *canvas* will need to be provided or created for +it. The *canvas* can be a :class:`!tkinter.Canvas`, :class:`ScrolledCanvas` +or :class:`TurtleScreen`. + + +:class:`TurtleScreen` is the basic drawing surface for a +turtle. :class:`Screen` is a subclass of ``TurtleScreen``, and +includes :ref:`some additional methods ` for managing its +appearance (including size and title) and behaviour. ``TurtleScreen``'s +constructor needs a :class:`!tkinter.Canvas` or a +:class:`ScrolledCanvas` as an argument. + +The functional interface for turtle graphics uses the various methods of +``Turtle`` and ``TurtleScreen``/``Screen``. Behind the scenes, a screen +object is automatically created whenever a function derived from a ``Screen`` +method is called. Similarly, a turtle object is automatically created +whenever any of the functions derived from a Turtle method is called. + +To use multiple turtles on a screen, the object-oriented interface must be +used. + + Help and configuration ====================== @@ -2218,12 +2460,12 @@ facilities: in the range 0..colormode or a 3-tuple of such numbers. - >>> screen.bgcolor("orange") - >>> screen.bgcolor() - "orange" - >>> screen.bgcolor(0.5,0,0.5) - >>> screen.bgcolor() - "#800080" + >>> screen.bgcolor("orange") + >>> screen.bgcolor() + "orange" + >>> screen.bgcolor(0.5,0,0.5) + >>> screen.bgcolor() + "#800080" >>> help(Turtle.penup) Help on method penup in module turtle: @@ -2315,7 +2557,9 @@ of this module or which better fits to your needs, e.g. for use in a classroom, you can prepare a configuration file ``turtle.cfg`` which will be read at import time and modify the configuration according to its settings. -The built in configuration would correspond to the following turtle.cfg:: +The built in configuration would correspond to the following ``turtle.cfg``: + +.. code-block:: ini width = 0.5 height = 0.75 @@ -2340,15 +2584,15 @@ The built in configuration would correspond to the following turtle.cfg:: Short explanation of selected entries: -- The first four lines correspond to the arguments of the :meth:`Screen.setup` +- The first four lines correspond to the arguments of the :func:`Screen.setup ` method. - Line 5 and 6 correspond to the arguments of the method - :meth:`Screen.screensize`. + :func:`Screen.screensize `. - *shape* can be any of the built-in shapes, e.g: arrow, turtle, etc. For more info try ``help(shape)``. -- If you want to use no fillcolor (i.e. make the turtle transparent), you have +- If you want to use no fill color (i.e. make the turtle transparent), you have to write ``fillcolor = ""`` (but all nonempty strings must not have quotes in - the cfg-file). + the cfg file). - If you want to reflect the turtle its state, you have to use ``resizemode = auto``. - If you set e.g. ``language = italian`` the docstringdict @@ -2398,6 +2642,8 @@ The :mod:`turtledemo` package directory contains: The demo scripts are: +.. currentmodule:: turtle + .. tabularcolumns:: |l|L|L| +----------------+------------------------------+-----------------------+ @@ -2444,6 +2690,9 @@ The demo scripts are: | planet_and_moon| simulation of | compound shapes, | | | gravitational system | :class:`Vec2D` | +----------------+------------------------------+-----------------------+ +| rosette | a pattern from the wikipedia | :func:`clone`, | +| | article on turtle graphics | :func:`undo` | ++----------------+------------------------------+-----------------------+ | round_dance | dancing turtles rotating | compound shapes, clone| | | pairwise in opposite | shapesize, tilt, | | | direction | get_shapepoly, update | @@ -2457,9 +2706,6 @@ The demo scripts are: | two_canvases | simple design | turtles on two | | | | canvases | +----------------+------------------------------+-----------------------+ -| wikipedia | a pattern from the wikipedia | :func:`clone`, | -| | article on turtle graphics | :func:`undo` | -+----------------+------------------------------+-----------------------+ | yinyang | another elementary example | :func:`circle` | +----------------+------------------------------+-----------------------+ @@ -2469,20 +2715,20 @@ Have fun! Changes since Python 2.6 ======================== -- The methods :meth:`Turtle.tracer`, :meth:`Turtle.window_width` and - :meth:`Turtle.window_height` have been eliminated. +- The methods :func:`Turtle.tracer `, :func:`Turtle.window_width ` and + :func:`Turtle.window_height ` have been eliminated. Methods with these names and functionality are now available only as methods of :class:`Screen`. The functions derived from these remain available. (In fact already in Python 2.6 these methods were merely duplications of the corresponding - :class:`TurtleScreen`/:class:`Screen`-methods.) + :class:`TurtleScreen`/:class:`Screen` methods.) -- The method :meth:`Turtle.fill` has been eliminated. - The behaviour of :meth:`begin_fill` and :meth:`end_fill` - have changed slightly: now every filling-process must be completed with an +- The method :func:`!Turtle.fill` has been eliminated. + The behaviour of :func:`begin_fill` and :func:`end_fill` + have changed slightly: now every filling process must be completed with an ``end_fill()`` call. -- A method :meth:`Turtle.filling` has been added. It returns a boolean +- A method :func:`Turtle.filling ` has been added. It returns a boolean value: ``True`` if a filling process is under way, ``False`` otherwise. This behaviour corresponds to a ``fill()`` call without arguments in Python 2.6. @@ -2490,23 +2736,22 @@ Changes since Python 2.6 Changes since Python 3.0 ======================== -- The methods :meth:`Turtle.shearfactor`, :meth:`Turtle.shapetransform` and - :meth:`Turtle.get_shapepoly` have been added. Thus the full range of +- The :class:`Turtle` methods :func:`shearfactor`, :func:`shapetransform` and + :func:`get_shapepoly` have been added. Thus the full range of regular linear transforms is now available for transforming turtle shapes. - :meth:`Turtle.tiltangle` has been enhanced in functionality: it now can - be used to get or set the tiltangle. :meth:`Turtle.settiltangle` has been - deprecated. + :func:`tiltangle` has been enhanced in functionality: it now can + be used to get or set the tilt angle. -- The method :meth:`Screen.onkeypress` has been added as a complement to - :meth:`Screen.onkey` which in fact binds actions to the keyrelease event. - Accordingly the latter has got an alias: :meth:`Screen.onkeyrelease`. +- The :class:`Screen` method :func:`onkeypress` has been added as a complement to + :func:`onkey`. As the latter binds actions to the key release event, + an alias: :func:`onkeyrelease` was also added for it. -- The method :meth:`Screen.mainloop` has been added. So when working only - with Screen and Turtle objects one must not additionally import - :func:`mainloop` anymore. +- The method :func:`Screen.mainloop ` has been added, + so there is no longer a need to use the standalone :func:`mainloop` function + when working with :class:`Screen` and :class:`Turtle` objects. -- Two input methods has been added :meth:`Screen.textinput` and - :meth:`Screen.numinput`. These popup input dialogs and return +- Two input methods have been added: :func:`Screen.textinput ` and + :func:`Screen.numinput `. These pop up input dialogs and return strings and numbers respectively. - Two example scripts :file:`tdemo_nim.py` and :file:`tdemo_round_dance.py` diff --git a/Doc/library/types.rst b/Doc/library/types.rst index cce0ad960edf970..54c3907dec98ccb 100644 --- a/Doc/library/types.rst +++ b/Doc/library/types.rst @@ -75,13 +75,53 @@ Dynamic Type Creation This function looks for items in *bases* that are not instances of :class:`type`, and returns a tuple where each such object that has - an ``__mro_entries__`` method is replaced with an unpacked result of + an :meth:`~object.__mro_entries__` method is replaced with an unpacked result of calling this method. If a *bases* item is an instance of :class:`type`, - or it doesn't have an ``__mro_entries__`` method, then it is included in + or it doesn't have an :meth:`!__mro_entries__` method, then it is included in the return tuple unchanged. .. versionadded:: 3.7 +.. function:: get_original_bases(cls, /) + + Return the tuple of objects originally given as the bases of *cls* before + the :meth:`~object.__mro_entries__` method has been called on any bases + (following the mechanisms laid out in :pep:`560`). This is useful for + introspecting :ref:`Generics `. + + For classes that have an ``__orig_bases__`` attribute, this + function returns the value of ``cls.__orig_bases__``. + For classes without the ``__orig_bases__`` attribute, ``cls.__bases__`` is + returned. + + Examples:: + + from typing import TypeVar, Generic, NamedTuple, TypedDict + + T = TypeVar("T") + class Foo(Generic[T]): ... + class Bar(Foo[int], float): ... + class Baz(list[str]): ... + Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) + Spam = TypedDict("Spam", {"a": int, "b": str}) + + assert Bar.__bases__ == (Foo, float) + assert get_original_bases(Bar) == (Foo[int], float) + + assert Baz.__bases__ == (list,) + assert get_original_bases(Baz) == (list[str],) + + assert Eggs.__bases__ == (tuple,) + assert get_original_bases(Eggs) == (NamedTuple,) + + assert Spam.__bases__ == (dict,) + assert get_original_bases(Spam) == (TypedDict,) + + assert int.__bases__ == (object,) + assert get_original_bases(int) == (object,) + + .. versionadded:: 3.12 + .. seealso:: :pep:`560` - Core support for typing module and generic types @@ -146,7 +186,7 @@ Standard names are defined for the following types: .. class:: CodeType(**kwargs) - .. index:: builtin: compile + .. index:: pair: built-in function; compile The type for code objects such as returned by :func:`compile`. @@ -160,6 +200,8 @@ Standard names are defined for the following types: Return a copy of the code object with new values for the specified fields. + Code objects are also supported by generic function :func:`copy.replace`. + .. versionadded:: 3.8 .. data:: CellType @@ -311,6 +353,13 @@ Standard names are defined for the following types: .. versionchanged:: 3.9.2 This type can now be subclassed. + .. seealso:: + + :ref:`Generic Alias Types` + In-depth documentation on instances of :class:`!types.GenericAlias` + + :pep:`585` - Type Hinting Generics In Standard Collections + Introducing the :class:`!types.GenericAlias` class .. class:: UnionType @@ -320,7 +369,7 @@ Standard names are defined for the following types: .. class:: TracebackType(tb_next, tb_frame, tb_lasti, tb_lineno) - The type of traceback objects such as found in ``sys.exc_info()[2]``. + The type of traceback objects such as found in ``sys.exception().__traceback__``. See :ref:`the language reference ` for details of the available attributes and operations, and guidance on creating tracebacks @@ -423,6 +472,12 @@ Standard names are defined for the following types: .. versionadded:: 3.12 +.. class:: CapsuleType + + The type of :ref:`capsule objects `. + + .. versionadded:: 3.13 + Additional Utility Classes and Functions ---------------------------------------- @@ -455,6 +510,8 @@ Additional Utility Classes and Functions However, for a structured record type use :func:`~collections.namedtuple` instead. + :class:`!SimpleNamespace` objects are supported by :func:`copy.replace`. + .. versionadded:: 3.3 .. versionchanged:: 3.9 @@ -486,7 +543,7 @@ Coroutine Utility Functions The generator-based coroutine is still a :term:`generator iterator`, but is also considered to be a :term:`coroutine` object and is :term:`awaitable`. However, it may not necessarily implement - the :meth:`__await__` method. + the :meth:`~object.__await__` method. If *gen_func* is a generator function, it will be modified in-place. diff --git a/Doc/library/typing.rst b/Doc/library/typing.rst index 0ec4499f94f5a7f..ba2845eb17ddccc 100644 --- a/Doc/library/typing.rst +++ b/Doc/library/typing.rst @@ -2,6 +2,12 @@ :mod:`typing` --- Support for type hints ======================================== +.. testsetup:: * + + import typing + from dataclasses import dataclass + from typing import * + .. module:: typing :synopsis: Support for type hints (see :pep:`484`). @@ -12,15 +18,14 @@ .. note:: The Python runtime does not enforce function and variable type annotations. - They can be used by third party tools such as type checkers, IDEs, linters, - etc. + They can be used by third party tools such as :term:`type checkers `, + IDEs, linters, etc. -------------- -This module provides runtime support for type hints. The most fundamental -support consists of the types :data:`Any`, :data:`Union`, :data:`Callable`, -:class:`TypeVar`, and :class:`Generic`. For a full specification, please see -:pep:`484`. For a simplified introduction to type hints, see :pep:`483`. +This module provides runtime support for type hints. For the original +specification of the typing system, see :pep:`484`. For a simplified +introduction to type hints, see :pep:`483`. The function below takes and returns a string and is annotated as follows:: @@ -41,9 +46,18 @@ For a summary of deprecated features and a deprecation timeline, please see .. seealso:: - The documentation at https://typing.readthedocs.io/ serves as useful reference - for type system features, useful typing related tools and typing best practices. + `"Typing cheat sheet" `_ + A quick overview of type hints (hosted at the mypy docs) + "Type System Reference" section of `the mypy docs `_ + The Python typing system is standardised via PEPs, so this reference + should broadly apply to most Python type checkers. (Some parts may still + be specific to mypy.) + + `"Static Typing with Python" `_ + Type-checker-agnostic documentation written by the community detailing + type system features, useful typing related tools and typing best + practices. .. _relevant-peps: @@ -52,7 +66,12 @@ Relevant PEPs Since the initial introduction of type hints in :pep:`484` and :pep:`483`, a number of PEPs have modified and enhanced Python's framework for type -annotations. These include: +annotations: + +.. raw:: html + +
+ The full list of PEPs * :pep:`526`: Syntax for Variable Annotations *Introducing* syntax for annotating variables outside of function @@ -91,16 +110,30 @@ annotations. These include: *Introducing* :data:`LiteralString` * :pep:`681`: Data Class Transforms *Introducing* the :func:`@dataclass_transform` decorator +* :pep:`692`: Using ``TypedDict`` for more precise ``**kwargs`` typing + *Introducing* a new way of typing ``**kwargs`` with :data:`Unpack` and + :data:`TypedDict` +* :pep:`695`: Type Parameter Syntax + *Introducing* builtin syntax for creating generic functions, classes, and type aliases. +* :pep:`698`: Adding an override decorator to typing + *Introducing* the :func:`@override` decorator + +.. raw:: html + +
+
.. _type-aliases: Type aliases ============ -A type alias is defined by assigning the type to the alias. In this example, -``Vector`` and ``list[float]`` will be treated as interchangeable synonyms:: +A type alias is defined using the :keyword:`type` statement, which creates +an instance of :class:`TypeAliasType`. In this example, +``Vector`` and ``list[float]`` will be treated equivalently by static type +checkers:: - Vector = list[float] + type Vector = list[float] def scale(scalar: float, vector: Vector) -> Vector: return [scalar * num for num in vector] @@ -112,9 +145,9 @@ Type aliases are useful for simplifying complex type signatures. For example:: from collections.abc import Sequence - ConnectionOptions = dict[str, str] - Address = tuple[str, int] - Server = tuple[Address, ConnectionOptions] + type ConnectionOptions = dict[str, str] + type Address = tuple[str, int] + type Server = tuple[Address, ConnectionOptions] def broadcast_message(message: str, servers: Sequence[Server]) -> None: ... @@ -126,8 +159,17 @@ Type aliases are useful for simplifying complex type signatures. For example:: servers: Sequence[tuple[tuple[str, int], dict[str, str]]]) -> None: ... -Note that ``None`` as a type hint is a special case and is replaced by -``type(None)``. +The :keyword:`type` statement is new in Python 3.12. For backwards +compatibility, type aliases can also be created through simple assignment:: + + Vector = list[float] + +Or marked with :data:`TypeAlias` to make it explicit that this is a type alias, +not a normal variable assignment:: + + from typing import TypeAlias + + Vector: TypeAlias = list[float] .. _distinct: @@ -194,7 +236,7 @@ See :pep:`484` for more details. .. note:: Recall that the use of a type alias declares two types to be *equivalent* to - one another. Doing ``Alias = Original`` will make the static type checker + one another. Doing ``type Alias = Original`` will make the static type checker treat ``Alias`` as being *exactly equivalent* to ``Original`` in all cases. This is useful when you want to simplify complex type signatures. @@ -208,35 +250,85 @@ See :pep:`484` for more details. .. versionadded:: 3.5.2 .. versionchanged:: 3.10 - ``NewType`` is now a class rather than a function. There is some additional - runtime cost when calling ``NewType`` over a regular function. However, this - cost will be reduced in 3.11.0. + ``NewType`` is now a class rather than a function. As a result, there is + some additional runtime cost when calling ``NewType`` over a regular + function. +.. versionchanged:: 3.11 + The performance of calling ``NewType`` has been restored to its level in + Python 3.9. -Callable -======== +.. _annotating-callables: -Frameworks expecting callback functions of specific signatures might be -type hinted using ``Callable[[Arg1Type, Arg2Type], ReturnType]``. +Annotating callable objects +=========================== -For example:: +Functions -- or other :term:`callable` objects -- can be annotated using +:class:`collections.abc.Callable` or :data:`typing.Callable`. +``Callable[[int], str]`` signifies a function that takes a single parameter +of type :class:`int` and returns a :class:`str`. + +For example: - from collections.abc import Callable +.. testcode:: + + from collections.abc import Callable, Awaitable def feeder(get_next_item: Callable[[], str]) -> None: - # Body + ... # Body def async_query(on_success: Callable[[int], None], on_error: Callable[[int, Exception], None]) -> None: - # Body + ... # Body async def on_update(value: str) -> None: - # Body + ... # Body + callback: Callable[[str], Awaitable[None]] = on_update -It is possible to declare the return type of a callable without specifying -the call signature by substituting a literal ellipsis -for the list of arguments in the type hint: ``Callable[..., ReturnType]``. +The subscription syntax must always be used with exactly two values: the +argument list and the return type. The argument list must be a list of types, +a :class:`ParamSpec`, :data:`Concatenate`, or an ellipsis. The return type must +be a single type. + +If a literal ellipsis ``...`` is given as the argument list, it indicates that +a callable with any arbitrary parameter list would be acceptable: + +.. testcode:: + + def concat(x: str, y: str) -> str: + return x + y + + x: Callable[..., str] + x = str # OK + x = concat # Also OK + +``Callable`` cannot express complex signatures such as functions that take a +variadic number of arguments, :ref:`overloaded functions `, or +functions that have keyword-only parameters. However, these signatures can be +expressed by defining a :class:`Protocol` class with a +:meth:`~object.__call__` method: + +.. testcode:: + + from collections.abc import Iterable + from typing import Protocol + + class Combiner(Protocol): + def __call__(self, *vals: bytes, maxlen: int | None = None) -> list[bytes]: ... + + def batch_proc(data: Iterable[bytes], cb_results: Combiner) -> bytes: + for item in data: + ... + + def good_cb(*vals: bytes, maxlen: int | None = None) -> list[bytes]: + ... + def bad_cb(*vals: bytes, maxitems: int | None) -> list[bytes]: + ... + + batch_proc([], good_cb) # OK + batch_proc([], bad_cb) # Error! Argument 2 has incompatible type because of + # different name and kind in the callback Callables which take other callables as arguments may indicate that their parameter types are dependent on each other using :class:`ParamSpec`. @@ -260,28 +352,150 @@ Generics ======== Since type information about objects kept in containers cannot be statically -inferred in a generic way, abstract base classes have been extended to support -subscription to denote expected types for container elements. +inferred in a generic way, many container classes in the standard library support +subscription to denote the expected types of container elements. -:: +.. testcode:: from collections.abc import Mapping, Sequence + class Employee: ... + + # Sequence[Employee] indicates that all elements in the sequence + # must be instances of "Employee". + # Mapping[str, str] indicates that all keys and all values in the mapping + # must be strings. def notify_by_email(employees: Sequence[Employee], overrides: Mapping[str, str]) -> None: ... -Generics can be parameterized by using a factory available in typing -called :class:`TypeVar`. +Generic functions and classes can be parameterized by using +:ref:`type parameter syntax `:: -:: + from collections.abc import Sequence + + def first[T](l: Sequence[T]) -> T: # Function is generic over the TypeVar "T" + return l[0] + +Or by using the :class:`TypeVar` factory directly:: from collections.abc import Sequence from typing import TypeVar - T = TypeVar('T') # Declare type variable + U = TypeVar('U') # Declare type variable "U" - def first(l: Sequence[T]) -> T: # Generic function - return l[0] + def second(l: Sequence[U]) -> U: # Function is generic over the TypeVar "U" + return l[1] + +.. versionchanged:: 3.12 + Syntactic support for generics is new in Python 3.12. + +.. _annotating-tuples: + +Annotating tuples +================= + +For most containers in Python, the typing system assumes that all elements in +the container will be of the same type. For example:: + + from collections.abc import Mapping + + # Type checker will infer that all elements in ``x`` are meant to be ints + x: list[int] = [] + + # Type checker error: ``list`` only accepts a single type argument: + y: list[int, str] = [1, 'foo'] + + # Type checker will infer that all keys in ``z`` are meant to be strings, + # and that all values in ``z`` are meant to be either strings or ints + z: Mapping[str, str | int] = {} + +:class:`list` only accepts one type argument, so a type checker would emit an +error on the ``y`` assignment above. Similarly, +:class:`~collections.abc.Mapping` only accepts two type arguments: the first +indicates the type of the keys, and the second indicates the type of the +values. + +Unlike most other Python containers, however, it is common in idiomatic Python +code for tuples to have elements which are not all of the same type. For this +reason, tuples are special-cased in Python's typing system. :class:`tuple` +accepts *any number* of type arguments:: + + # OK: ``x`` is assigned to a tuple of length 1 where the sole element is an int + x: tuple[int] = (5,) + + # OK: ``y`` is assigned to a tuple of length 2; + # element 1 is an int, element 2 is a str + y: tuple[int, str] = (5, "foo") + + # Error: the type annotation indicates a tuple of length 1, + # but ``z`` has been assigned to a tuple of length 3 + z: tuple[int] = (1, 2, 3) + +To denote a tuple which could be of *any* length, and in which all elements are +of the same type ``T``, use ``tuple[T, ...]``. To denote an empty tuple, use +``tuple[()]``. Using plain ``tuple`` as an annotation is equivalent to using +``tuple[Any, ...]``:: + + x: tuple[int, ...] = (1, 2) + # These reassignments are OK: ``tuple[int, ...]`` indicates x can be of any length + x = (1, 2, 3) + x = () + # This reassignment is an error: all elements in ``x`` must be ints + x = ("foo", "bar") + + # ``y`` can only ever be assigned to an empty tuple + y: tuple[()] = () + + z: tuple = ("foo", "bar") + # These reassignments are OK: plain ``tuple`` is equivalent to ``tuple[Any, ...]`` + z = (1, 2, 3) + z = () + +.. _type-of-class-objects: + +The type of class objects +========================= + +A variable annotated with ``C`` may accept a value of type ``C``. In +contrast, a variable annotated with ``type[C]`` (or +:class:`typing.Type[C] `) may accept values that are classes +themselves -- specifically, it will accept the *class object* of ``C``. For +example:: + + a = 3 # Has type ``int`` + b = int # Has type ``type[int]`` + c = type(a) # Also has type ``type[int]`` + +Note that ``type[C]`` is covariant:: + + class User: ... + class ProUser(User): ... + class TeamUser(User): ... + + def make_new_user(user_class: type[User]) -> User: + # ... + return user_class() + + make_new_user(User) # OK + make_new_user(ProUser) # Also OK: ``type[ProUser]`` is a subtype of ``type[User]`` + make_new_user(TeamUser) # Still fine + make_new_user(User()) # Error: expected ``type[User]`` but got ``User`` + make_new_user(int) # Error: ``type[int]`` is not a subtype of ``type[User]`` + +The only legal parameters for :class:`type` are classes, :data:`Any`, +:ref:`type variables `, and unions of any of these types. +For example:: + + def new_non_team_user(user_class: type[BasicUser | ProUser]): ... + + new_non_team_user(BasicUser) # OK + new_non_team_user(ProUser) # OK + new_non_team_user(TeamUser) # Error: ``type[TeamUser]`` is not a subtype + # of ``type[BasicUser | ProUser]`` + new_non_team_user(User) # Also an error + +``type[Any]`` is equivalent to :class:`type`, which is the root of Python's +:ref:`metaclass hierarchy `. .. _user-defined-generics: @@ -292,12 +506,9 @@ A user-defined class can be defined as a generic class. :: - from typing import TypeVar, Generic from logging import Logger - T = TypeVar('T') - - class LoggedVar(Generic[T]): + class LoggedVar[T]: def __init__(self, value: T, name: str, logger: Logger) -> None: self.name = name self.logger = logger @@ -314,12 +525,23 @@ A user-defined class can be defined as a generic class. def log(self, message: str) -> None: self.logger.info('%s: %s', self.name, message) -``Generic[T]`` as a base class defines that the class ``LoggedVar`` takes a -single type parameter ``T`` . This also makes ``T`` valid as a type within the -class body. +This syntax indicates that the class ``LoggedVar`` is parameterised around a +single :ref:`type variable ` ``T`` . This also makes ``T`` valid as +a type within the class body. + +Generic classes implicitly inherit from :class:`Generic`. For compatibility +with Python 3.11 and lower, it is also possible to inherit explicitly from +:class:`Generic` to indicate a generic class:: + + from typing import TypeVar, Generic + + T = TypeVar('T') + + class LoggedVar(Generic[T]): + ... -The :class:`Generic` base class defines :meth:`~object.__class_getitem__` so -that ``LoggedVar[T]`` is valid as a type:: +Generic classes have :meth:`~object.__class_getitem__` methods, meaning they +can be parameterised at runtime (e.g. ``LoggedVar[int]`` below):: from collections.abc import Iterable @@ -332,11 +554,14 @@ A generic type can have any number of type variables. All varieties of from typing import TypeVar, Generic, Sequence - T = TypeVar('T', contravariant=True) - B = TypeVar('B', bound=Sequence[bytes], covariant=True) - S = TypeVar('S', int, str) + class WeirdTrio[T, B: Sequence[bytes], S: (int, str)]: + ... + + OldT = TypeVar('OldT', contravariant=True) + OldB = TypeVar('OldB', bound=Sequence[bytes], covariant=True) + OldS = TypeVar('OldS', int, str) - class WeirdTrio(Generic[T, B, S]): + class OldWeirdTrio(Generic[OldT, OldB, OldS]): ... Each type variable argument to :class:`Generic` must be distinct. @@ -345,91 +570,108 @@ This is thus invalid:: from typing import TypeVar, Generic ... + class Pair[M, M]: # SyntaxError + ... + T = TypeVar('T') class Pair(Generic[T, T]): # INVALID ... -You can use multiple inheritance with :class:`Generic`:: +Generic classes can also inherit from other classes:: from collections.abc import Sized - from typing import TypeVar, Generic - T = TypeVar('T') - - class LinkedList(Sized, Generic[T]): + class LinkedList[T](Sized): ... -When inheriting from generic classes, some type variables could be fixed:: +When inheriting from generic classes, some type parameters could be fixed:: from collections.abc import Mapping - from typing import TypeVar - T = TypeVar('T') - - class MyDict(Mapping[str, T]): + class MyDict[T](Mapping[str, T]): ... In this case ``MyDict`` has a single parameter, ``T``. Using a generic class without specifying type parameters assumes :data:`Any` for each position. In the following example, ``MyIterable`` is -not generic but implicitly inherits from ``Iterable[Any]``:: +not generic but implicitly inherits from ``Iterable[Any]``: + +.. testcode:: from collections.abc import Iterable class MyIterable(Iterable): # Same as Iterable[Any] + ... -User defined generic type aliases are also supported. Examples:: +User-defined generic type aliases are also supported. Examples:: from collections.abc import Iterable - from typing import TypeVar - S = TypeVar('S') - Response = Iterable[S] | int + + type Response[S] = Iterable[S] | int # Return type here is same as Iterable[str] | int def response(query: str) -> Response[str]: ... - T = TypeVar('T', int, float, complex) - Vec = Iterable[tuple[T, T]] + type Vec[T] = Iterable[tuple[T, T]] - def inproduct(v: Vec[T]) -> T: # Same as Iterable[tuple[T, T]] + def inproduct[T: (int, float, complex)](v: Vec[T]) -> T: # Same as Iterable[tuple[T, T]] return sum(x*y for x, y in v) +For backward compatibility, generic type aliases can also be created +through a simple assignment:: + + from collections.abc import Iterable + from typing import TypeVar + + S = TypeVar("S") + Response = Iterable[S] | int + .. versionchanged:: 3.7 :class:`Generic` no longer has a custom metaclass. +.. versionchanged:: 3.12 + Syntactic support for generics and type aliases is new in version 3.12. + Previously, generic classes had to explicitly inherit from :class:`Generic` + or contain a type variable in one of their bases. + User-defined generics for parameter expressions are also supported via parameter -specification variables in the form ``Generic[P]``. The behavior is consistent +specification variables in the form ``[**P]``. The behavior is consistent with type variables' described above as parameter specification variables are treated by the typing module as a specialized type variable. The one exception to this is that a list of types can be used to substitute a :class:`ParamSpec`:: - >>> from typing import Generic, ParamSpec, TypeVar - - >>> T = TypeVar('T') - >>> P = ParamSpec('P') - - >>> class Z(Generic[T, P]): ... + >>> class Z[T, **P]: ... # T is a TypeVar; P is a ParamSpec ... >>> Z[int, [dict, float]] - __main__.Z[int, (, )] + __main__.Z[int, [dict, float]] + +Classes generic over a :class:`ParamSpec` can also be created using explicit +inheritance from :class:`Generic`. In this case, ``**`` is not used:: + + from typing import ParamSpec, Generic + + P = ParamSpec('P') + class Z(Generic[P]): + ... -Furthermore, a generic with only one parameter specification variable will accept +Another difference between :class:`TypeVar` and :class:`ParamSpec` is that a +generic with only one parameter specification variable will accept parameter lists in the forms ``X[[Type1, Type2, ...]]`` and also ``X[Type1, Type2, ...]`` for aesthetic reasons. Internally, the latter is converted to the former, so the following are equivalent:: - >>> class X(Generic[P]): ... + >>> class X[**P]: ... ... >>> X[int, str] - __main__.X[(, )] + __main__.X[[int, str]] >>> X[[int, str]] - __main__.X[(, )] + __main__.X[[int, str]] -Do note that generics with :class:`ParamSpec` may not have correct +Note that generics with :class:`ParamSpec` may not have correct ``__parameters__`` after substitution in some cases because they are intended primarily for static type checking. @@ -439,7 +681,7 @@ are intended primarily for static type checking. A user-defined generic class can have ABCs as base classes without a metaclass conflict. Generic metaclasses are not supported. The outcome of parameterizing -generics is cached, and most types in the typing module are hashable and +generics is cached, and most types in the typing module are :term:`hashable` and comparable for equality. @@ -564,25 +806,7 @@ can define new custom protocols to fully enjoy structural subtyping Module contents =============== -The module defines the following classes, functions and decorators. - -.. note:: - - This module defines several types that are subclasses of pre-existing - standard library classes which also extend :class:`Generic` - to support type variables inside ``[]``. - These types became redundant in Python 3.9 when the - corresponding pre-existing classes were enhanced to support ``[]``. - - The redundant types are deprecated as of Python 3.9 but no - deprecation warnings will be issued by the interpreter. - It is expected that type checkers will flag the deprecated types - when the checked program targets Python 3.9 or newer. - - The deprecated types will be removed from the :mod:`typing` module - in the first Python version released 5 years after the release of Python 3.9.0. - See details in :pep:`585`—*Type Hinting Generics In Standard Collections*. - +The ``typing`` module defines the following classes, functions and decorators. Special typing primitives ------------------------- @@ -590,7 +814,8 @@ Special typing primitives Special types """"""""""""" -These can be used as types in annotations and do not support ``[]``. +These can be used as types in annotations. They do not support subscription +using ``[]``. .. data:: Any @@ -604,29 +829,78 @@ These can be used as types in annotations and do not support ``[]``. avoiding type checker errors with classes that can duck type anywhere or are highly dynamic. +.. data:: AnyStr + + A :ref:`constrained type variable `. + + Definition:: + + AnyStr = TypeVar('AnyStr', str, bytes) + + ``AnyStr`` is meant to be used for functions that may accept :class:`str` or + :class:`bytes` arguments but cannot allow the two to mix. + + For example:: + + def concat(a: AnyStr, b: AnyStr) -> AnyStr: + return a + b + + concat("foo", "bar") # OK, output has type 'str' + concat(b"foo", b"bar") # OK, output has type 'bytes' + concat("foo", b"bar") # Error, cannot mix str and bytes + + Note that, despite its name, ``AnyStr`` has nothing to do with the + :class:`Any` type, nor does it mean "any string". In particular, ``AnyStr`` + and ``str | bytes`` are different from each other and have different use + cases:: + + # Invalid use of AnyStr: + # The type variable is used only once in the function signature, + # so cannot be "solved" by the type checker + def greet_bad(cond: bool) -> AnyStr: + return "hi there!" if cond else b"greetings!" + + # The better way of annotating this function: + def greet_proper(cond: bool) -> str | bytes: + return "hi there!" if cond else b"greetings!" + + .. deprecated-removed:: 3.13 3.18 + Deprecated in favor of the new :ref:`type parameter syntax `. + Use ``class A[T: (str, bytes)]: ...`` instead of importing ``AnyStr``. See + :pep:`695` for more details. + + In Python 3.16, ``AnyStr`` will be removed from ``typing.__all__``, and + deprecation warnings will be emitted at runtime when it is accessed or + imported from ``typing``. ``AnyStr`` will be removed from ``typing`` + in Python 3.18. + .. data:: LiteralString - Special type that includes only literal strings. A string + Special type that includes only literal strings. + + Any string literal is compatible with ``LiteralString``, as is another - ``LiteralString``, but an object typed as just ``str`` is not. + ``LiteralString``. However, an object typed as just ``str`` is not. A string created by composing ``LiteralString``-typed objects is also acceptable as a ``LiteralString``. - Example:: + Example: - def run_query(sql: LiteralString) -> ... + .. testcode:: + + def run_query(sql: LiteralString) -> None: ... def caller(arbitrary_string: str, literal_string: LiteralString) -> None: - run_query("SELECT * FROM students") # ok - run_query(literal_string) # ok - run_query("SELECT * FROM " + literal_string) # ok + run_query("SELECT * FROM students") # OK + run_query(literal_string) # OK + run_query("SELECT * FROM " + literal_string) # OK run_query(arbitrary_string) # type checker error run_query( # type checker error f"SELECT * FROM students WHERE name = {arbitrary_string}" ) - This is useful for sensitive APIs where arbitrary user-generated + ``LiteralString`` is useful for sensitive APIs where arbitrary user-generated strings could generate problems. For example, the two cases above that generate type checker errors could be vulnerable to an SQL injection attack. @@ -643,20 +917,20 @@ These can be used as types in annotations and do not support ``[]``. This can be used to define a function that should never be called, or a function that never returns:: - from typing import Never + from typing import Never - def never_call_me(arg: Never) -> None: - pass + def never_call_me(arg: Never) -> None: + pass - def int_or_str(arg: int | str) -> None: - never_call_me(arg) # type checker error - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - never_call_me(arg) # ok, arg is of type Never + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # OK, arg is of type Never .. versionadded:: 3.11 @@ -666,6 +940,7 @@ These can be used as types in annotations and do not support ``[]``. .. data:: NoReturn Special type indicating that a function never returns. + For example:: from typing import NoReturn @@ -685,15 +960,20 @@ These can be used as types in annotations and do not support ``[]``. .. data:: Self Special type to represent the current enclosed class. + For example:: - from typing import Self + from typing import Self, reveal_type class Foo: - def return_self(self) -> Self: - ... - return self + def return_self(self) -> Self: + ... + return self + class SubclassOfFoo(Foo): pass + + reveal_type(Foo().return_self()) # Revealed type is "Foo" + reveal_type(SubclassOfFoo().return_self()) # Revealed type is "SubclassOfFoo" This annotation is semantically equivalent to the following, albeit in a more succinct fashion:: @@ -703,19 +983,15 @@ These can be used as types in annotations and do not support ``[]``. Self = TypeVar("Self", bound="Foo") class Foo: - def return_self(self: Self) -> Self: - ... - return self - - In general if something currently follows the pattern of:: - - class Foo: - def return_self(self) -> "Foo": - ... - return self + def return_self(self: Self) -> Self: + ... + return self - You should use :data:`Self` as calls to ``SubclassOfFoo.return_self`` would have - ``Foo`` as the return type and not ``SubclassOfFoo``. + In general, if something returns ``self``, as in the above examples, you + should use ``Self`` as the return annotation. If ``Foo.return_self`` was + annotated as returning ``"Foo"``, then the type checker would infer the + object returned from ``SubclassOfFoo.return_self`` as being of type ``Foo`` + rather than ``SubclassOfFoo``. Other common use cases include: @@ -723,6 +999,17 @@ These can be used as types in annotations and do not support ``[]``. of the ``cls`` parameter. - Annotating an :meth:`~object.__enter__` method which returns self. + You should not use ``Self`` as the return annotation if the method is not + guaranteed to return an instance of a subclass when the class is + subclassed:: + + class Eggs: + # Self would be an incorrect return annotation here, + # as the object returned is always an instance of Eggs, + # even in subclasses + def returns_eggs(self) -> "Eggs": + return Eggs() + See :pep:`673` for more details. .. versionadded:: 3.11 @@ -730,38 +1017,52 @@ These can be used as types in annotations and do not support ``[]``. .. data:: TypeAlias Special annotation for explicitly declaring a :ref:`type alias `. + For example:: - from typing import TypeAlias + from typing import TypeAlias - Factors: TypeAlias = list[int] + Factors: TypeAlias = list[int] - See :pep:`613` for more details about explicit type aliases. + ``TypeAlias`` is particularly useful on older Python versions for annotating + aliases that make use of forward references, as it can be hard for type + checkers to distinguish these from normal variable assignments: - .. versionadded:: 3.10 + .. testcode:: -Special forms -""""""""""""" + from typing import Generic, TypeAlias, TypeVar + + T = TypeVar("T") -These can be used as types in annotations using ``[]``, each having a unique syntax. + # "Box" does not exist yet, + # so we have to use quotes for the forward reference on Python <3.12. + # Using ``TypeAlias`` tells the type checker that this is a type alias declaration, + # not a variable assignment to a string. + BoxOfStrings: TypeAlias = "Box[str]" -.. data:: Tuple + class Box(Generic[T]): + @classmethod + def make_box_of_strings(cls) -> BoxOfStrings: ... - Tuple type; ``Tuple[X, Y]`` is the type of a tuple of two items - with the first item of type X and the second of type Y. The type of - the empty tuple can be written as ``Tuple[()]``. + See :pep:`613` for more details. - Example: ``Tuple[T1, T2]`` is a tuple of two elements corresponding - to type variables T1 and T2. ``Tuple[int, float, str]`` is a tuple - of an int, a float and a string. + .. versionadded:: 3.10 - To specify a variable-length tuple of homogeneous type, - use literal ellipsis, e.g. ``Tuple[int, ...]``. A plain :data:`Tuple` - is equivalent to ``Tuple[Any, ...]``, and in turn to :class:`tuple`. + .. deprecated:: 3.12 + :data:`TypeAlias` is deprecated in favor of the :keyword:`type` statement, + which creates instances of :class:`TypeAliasType` + and which natively supports forward references. + Note that while :data:`TypeAlias` and :class:`TypeAliasType` serve + similar purposes and have similar names, they are distinct and the + latter is not the type of the former. + Removal of :data:`TypeAlias` is not currently planned, but users + are encouraged to migrate to :keyword:`type` statements. - .. deprecated:: 3.9 - :class:`builtins.tuple ` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +Special forms +""""""""""""" + +These can be used as types in annotations. They all support subscription using +``[]``, but each has a unique syntax. .. data:: Union @@ -800,8 +1101,6 @@ These can be used as types in annotations using ``[]``, each having a unique syn .. data:: Optional - Optional type. - ``Optional[X]`` is equivalent to ``X | None`` (or ``Union[X, None]``). Note that this is not the same concept as an optional argument, @@ -823,50 +1122,16 @@ These can be used as types in annotations using ``[]``, each having a unique syn Optional can now be written as ``X | None``. See :ref:`union type expressions`. -.. data:: Callable - - Callable type; ``Callable[[int], str]`` is a function of (int) -> str. - - The subscription syntax must always be used with exactly two - values: the argument list and the return type. The argument list - must be a list of types or an ellipsis; the return type must be - a single type. - - There is no syntax to indicate optional or keyword arguments; - such function types are rarely used as callback types. - ``Callable[..., ReturnType]`` (literal ellipsis) can be used to - type hint a callable taking any number of arguments and returning - ``ReturnType``. A plain :data:`Callable` is equivalent to - ``Callable[..., Any]``, and in turn to - :class:`collections.abc.Callable`. - - Callables which take other callables as arguments may indicate that their - parameter types are dependent on each other using :class:`ParamSpec`. - Additionally, if that callable adds or removes arguments from other - callables, the :data:`Concatenate` operator may be used. They - take the form ``Callable[ParamSpecVariable, ReturnType]`` and - ``Callable[Concatenate[Arg1Type, Arg2Type, ..., ParamSpecVariable], ReturnType]`` - respectively. - - .. deprecated:: 3.9 - :class:`collections.abc.Callable` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. - - .. versionchanged:: 3.10 - ``Callable`` now supports :class:`ParamSpec` and :data:`Concatenate`. - See :pep:`612` for more details. - - .. seealso:: - The documentation for :class:`ParamSpec` and :class:`Concatenate` provide - examples of usage with ``Callable``. - .. data:: Concatenate - Used with :data:`Callable` and :class:`ParamSpec` to type annotate a higher - order callable which adds, removes, or transforms parameters of another + Special form for annotating higher-order functions. + + ``Concatenate`` can be used in conjunction with :ref:`Callable ` and + :class:`ParamSpec` to annotate a higher-order callable which adds, removes, + or transforms parameters of another callable. Usage is in the form ``Concatenate[Arg1Type, Arg2Type, ..., ParamSpecVariable]``. ``Concatenate`` - is currently only valid when used as the first argument to a :data:`Callable`. + is currently only valid when used as the first argument to a :ref:`Callable `. The last parameter to ``Concatenate`` must be a :class:`ParamSpec` or ellipsis (``...``). @@ -880,16 +1145,13 @@ These can be used as types in annotations using ``[]``, each having a unique syn from collections.abc import Callable from threading import Lock - from typing import Concatenate, ParamSpec, TypeVar - - P = ParamSpec('P') - R = TypeVar('R') + from typing import Concatenate # Use this lock to ensure that only one thread is executing a function # at any time. my_lock = Lock() - def with_lock(f: Callable[Concatenate[Lock, P], R]) -> Callable[P, R]: + def with_lock[**P, R](f: Callable[Concatenate[Lock, P], R]) -> Callable[P, R]: '''A type-safe decorator which provides a lock.''' def inner(*args: P.args, **kwargs: P.kwargs) -> R: # Provide the lock as the first argument. @@ -905,75 +1167,33 @@ These can be used as types in annotations using ``[]``, each having a unique syn # We don't need to pass in the lock ourselves thanks to the decorator. sum_threadsafe([1.1, 2.2, 3.3]) -.. versionadded:: 3.10 - -.. seealso:: - - * :pep:`612` -- Parameter Specification Variables (the PEP which introduced - ``ParamSpec`` and ``Concatenate``). - * :class:`ParamSpec` and :class:`Callable`. - - -.. class:: Type(Generic[CT_co]) - - A variable annotated with ``C`` may accept a value of type ``C``. In - contrast, a variable annotated with ``Type[C]`` may accept values that are - classes themselves -- specifically, it will accept the *class object* of - ``C``. For example:: - - a = 3 # Has type 'int' - b = int # Has type 'Type[int]' - c = type(a) # Also has type 'Type[int]' - - Note that ``Type[C]`` is covariant:: - - class User: ... - class BasicUser(User): ... - class ProUser(User): ... - class TeamUser(User): ... - - # Accepts User, BasicUser, ProUser, TeamUser, ... - def make_new_user(user_class: Type[User]) -> User: - # ... - return user_class() - - The fact that ``Type[C]`` is covariant implies that all subclasses of - ``C`` should implement the same constructor signature and class method - signatures as ``C``. The type checker should flag violations of this, - but should also allow constructor calls in subclasses that match the - constructor calls in the indicated base class. How the type checker is - required to handle this particular case may change in future revisions of - :pep:`484`. - - The only legal parameters for :class:`Type` are classes, :data:`Any`, - :ref:`type variables `, and unions of any of these types. - For example:: + .. versionadded:: 3.10 - def new_non_team_user(user_class: Type[BasicUser | ProUser]): ... + .. seealso:: - ``Type[Any]`` is equivalent to ``Type`` which in turn is equivalent - to ``type``, which is the root of Python's metaclass hierarchy. + * :pep:`612` -- Parameter Specification Variables (the PEP which introduced + ``ParamSpec`` and ``Concatenate``) + * :class:`ParamSpec` + * :ref:`annotating-callables` - .. versionadded:: 3.5.2 +.. data:: Literal - .. deprecated:: 3.9 - :class:`builtins.type ` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Special typing form to define "literal types". -.. data:: Literal + ``Literal`` can be used to indicate to type checkers that the + annotated object has a value equivalent to one of the + provided literals. - A type that can be used to indicate to type checkers that the - corresponding variable or function parameter has a value equivalent to - the provided literal (or one of several literals). For example:: + For example:: def validate_simple(data: Any) -> Literal[True]: # always returns True ... - MODE = Literal['r', 'rb', 'w', 'wb'] - def open_helper(file: str, mode: MODE) -> str: + type Mode = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: Mode) -> str: ... - open_helper('/some/path', 'r') # Passes type check + open_helper('/some/path', 'r') # Passes type check open_helper('/other/path', 'typo') # Error in type checker ``Literal[...]`` cannot be subclassed. At runtime, an arbitrary value @@ -1016,8 +1236,12 @@ These can be used as types in annotations using ``[]``, each having a unique syn .. data:: Final - A special typing construct to indicate to type checkers that a name - cannot be re-assigned or overridden in a subclass. For example:: + Special typing construct to indicate final names to type checkers. + + Final names cannot be reassigned in any scope. Final names declared in class + scopes cannot be overridden in subclasses. + + For example:: MAX_SIZE: Final = 9000 MAX_SIZE += 1 # Error reported by type checker @@ -1035,10 +1259,17 @@ These can be used as types in annotations using ``[]``, each having a unique syn .. data:: Required + Special typing construct to mark a :class:`TypedDict` key as required. + + This is mainly useful for ``total=False`` TypedDicts. See :class:`TypedDict` + and :pep:`655` for more details. + + .. versionadded:: 3.11 + .. data:: NotRequired - Special typing constructs that mark individual keys of a :class:`TypedDict` - as either required or non-required respectively. + Special typing construct to mark a :class:`TypedDict` key as potentially + missing. See :class:`TypedDict` and :pep:`655` for more details. @@ -1046,92 +1277,150 @@ These can be used as types in annotations using ``[]``, each having a unique syn .. data:: Annotated - A type, introduced in :pep:`593` (``Flexible function and variable - annotations``), to decorate existing types with context-specific metadata - (possibly multiple pieces of it, as ``Annotated`` is variadic). - Specifically, a type ``T`` can be annotated with metadata ``x`` via the - typehint ``Annotated[T, x]``. This metadata can be used for either static - analysis or at runtime. If a library (or tool) encounters a typehint - ``Annotated[T, x]`` and has no special logic for metadata ``x``, it - should ignore it and simply treat the type as ``T``. Unlike the - ``no_type_check`` functionality that currently exists in the ``typing`` - module which completely disables typechecking annotations on a function - or a class, the ``Annotated`` type allows for both static typechecking - of ``T`` (which can safely ignore ``x``) - together with runtime access to ``x`` within a specific application. - - Ultimately, the responsibility of how to interpret the annotations (if - at all) is the responsibility of the tool or library encountering the - ``Annotated`` type. A tool or library encountering an ``Annotated`` type - can scan through the annotations to determine if they are of interest - (e.g., using ``isinstance()``). - - When a tool or a library does not support annotations or encounters an - unknown annotation it should just ignore it and treat annotated type as - the underlying type. - - It's up to the tool consuming the annotations to decide whether the - client is allowed to have several annotations on one type and how to - merge those annotations. - - Since the ``Annotated`` type allows you to put several annotations of - the same (or different) type(s) on any node, the tools or libraries - consuming those annotations are in charge of dealing with potential - duplicates. For example, if you are doing value range analysis you might - allow this:: - - T1 = Annotated[int, ValueRange(-10, 5)] - T2 = Annotated[T1, ValueRange(-20, 3)] - - Passing ``include_extras=True`` to :func:`get_type_hints` lets one - access the extra annotations at runtime. - - The details of the syntax: + Special typing form to add context-specific metadata to an annotation. + + Add metadata ``x`` to a given type ``T`` by using the annotation + ``Annotated[T, x]``. Metadata added using ``Annotated`` can be used by + static analysis tools or at runtime. At runtime, the metadata is stored + in a :attr:`!__metadata__` attribute. + + If a library or tool encounters an annotation ``Annotated[T, x]`` and has + no special logic for the metadata, it should ignore the metadata and simply + treat the annotation as ``T``. As such, ``Annotated`` can be useful for code + that wants to use annotations for purposes outside Python's static typing + system. + + Using ``Annotated[T, x]`` as an annotation still allows for static + typechecking of ``T``, as type checkers will simply ignore the metadata ``x``. + In this way, ``Annotated`` differs from the + :func:`@no_type_check ` decorator, which can also be used for + adding annotations outside the scope of the typing system, but + completely disables typechecking for a function or class. + + The responsibility of how to interpret the metadata + lies with the tool or library encountering an + ``Annotated`` annotation. A tool or library encountering an ``Annotated`` type + can scan through the metadata elements to determine if they are of interest + (e.g., using :func:`isinstance`). + + .. describe:: Annotated[, ] + + Here is an example of how you might use ``Annotated`` to add metadata to + type annotations if you were doing range analysis: + + .. testcode:: + + @dataclass + class ValueRange: + lo: int + hi: int + + T1 = Annotated[int, ValueRange(-10, 5)] + T2 = Annotated[T1, ValueRange(-20, 3)] + + Details of the syntax: * The first argument to ``Annotated`` must be a valid type - * Multiple type annotations are supported (``Annotated`` supports variadic + * Multiple metadata elements can be supplied (``Annotated`` supports variadic arguments):: - Annotated[int, ValueRange(3, 10), ctype("char")] + @dataclass + class ctype: + kind: str - * ``Annotated`` must be called with at least two arguments ( + Annotated[int, ValueRange(3, 10), ctype("char")] + + It is up to the tool consuming the annotations to decide whether the + client is allowed to add multiple metadata elements to one annotation and how to + merge those annotations. + + * ``Annotated`` must be subscripted with at least two arguments ( ``Annotated[int]`` is not valid) - * The order of the annotations is preserved and matters for equality + * The order of the metadata elements is preserved and matters for equality checks:: - Annotated[int, ValueRange(3, 10), ctype("char")] != Annotated[ - int, ctype("char"), ValueRange(3, 10) - ] + assert Annotated[int, ValueRange(3, 10), ctype("char")] != Annotated[ + int, ctype("char"), ValueRange(3, 10) + ] + + * Nested ``Annotated`` types are flattened. The order of the metadata elements + starts with the innermost annotation:: + + assert Annotated[Annotated[int, ValueRange(3, 10)], ctype("char")] == Annotated[ + int, ValueRange(3, 10), ctype("char") + ] + + * Duplicated metadata elements are not removed:: + + assert Annotated[int, ValueRange(3, 10)] != Annotated[ + int, ValueRange(3, 10), ValueRange(3, 10) + ] + + * ``Annotated`` can be used with nested and generic aliases: + + .. testcode:: + + @dataclass + class MaxLen: + value: int + + type Vec[T] = Annotated[list[tuple[T, T]], MaxLen(10)] + + # When used in a type annotation, a type checker will treat "V" the same as + # ``Annotated[list[tuple[int, int]], MaxLen(10)]``: + type V = Vec[int] + + * ``Annotated`` cannot be used with an unpacked :class:`TypeVarTuple`:: - * Nested ``Annotated`` types are flattened, with metadata ordered - starting with the innermost annotation:: + type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid - Annotated[Annotated[int, ValueRange(3, 10)], ctype("char")] == Annotated[ - int, ValueRange(3, 10), ctype("char") - ] + This would be equivalent to:: - * Duplicated annotations are not removed:: + Annotated[T1, T2, T3, ..., Ann1] - Annotated[int, ValueRange(3, 10)] != Annotated[ - int, ValueRange(3, 10), ValueRange(3, 10) - ] + where ``T1``, ``T2``, etc. are :class:`TypeVars `. This would be + invalid: only one type should be passed to Annotated. - * ``Annotated`` can be used with nested and generic aliases:: + * By default, :func:`get_type_hints` strips the metadata from annotations. + Pass ``include_extras=True`` to have the metadata preserved: - T = TypeVar('T') - Vec = Annotated[list[tuple[T, T]], MaxLen(10)] - V = Vec[int] + .. doctest:: - V == Annotated[list[tuple[int, int]], MaxLen(10)] + >>> from typing import Annotated, get_type_hints + >>> def func(x: Annotated[int, "metadata"]) -> None: pass + ... + >>> get_type_hints(func) + {'x': , 'return': } + >>> get_type_hints(func, include_extras=True) + {'x': typing.Annotated[int, 'metadata'], 'return': } + + * At runtime, the metadata associated with an ``Annotated`` type can be + retrieved via the :attr:`!__metadata__` attribute: + + .. doctest:: + + >>> from typing import Annotated + >>> X = Annotated[int, "very", "important", "metadata"] + >>> X + typing.Annotated[int, 'very', 'important', 'metadata'] + >>> X.__metadata__ + ('very', 'important', 'metadata') + + .. seealso:: + + :pep:`593` - Flexible function and variable annotations + The PEP introducing ``Annotated`` to the standard library. .. versionadded:: 3.9 .. data:: TypeGuard - Special typing form used to annotate the return type of a user-defined + Special typing construct for marking user-defined type guard functions. + + ``TypeGuard`` can be used to annotate the return type of a user-defined type guard function. ``TypeGuard`` only accepts a single type argument. At runtime, functions marked this way should return a boolean. @@ -1196,237 +1485,379 @@ These can be used as types in annotations using ``[]``, each having a unique syn .. versionadded:: 3.10 -Building generic types -"""""""""""""""""""""" +.. data:: Unpack -These are not used in annotations. They are building blocks for creating generic types. + Typing operator to conceptually mark an object as having been unpacked. -.. class:: Generic + For example, using the unpack operator ``*`` on a + :ref:`type variable tuple ` is equivalent to using ``Unpack`` + to mark the type variable tuple as having been unpacked:: - Abstract base class for generic types. + Ts = TypeVarTuple('Ts') + tup: tuple[*Ts] + # Effectively does: + tup: tuple[Unpack[Ts]] - A generic type is typically declared by inheriting from an - instantiation of this class with one or more type variables. - For example, a generic mapping type might be defined as:: + In fact, ``Unpack`` can be used interchangeably with ``*`` in the context + of :class:`typing.TypeVarTuple ` and + :class:`builtins.tuple ` types. You might see ``Unpack`` being used + explicitly in older versions of Python, where ``*`` couldn't be used in + certain places:: - class Mapping(Generic[KT, VT]): - def __getitem__(self, key: KT) -> VT: - ... - # Etc. + # In older versions of Python, TypeVarTuple and Unpack + # are located in the `typing_extensions` backports package. + from typing_extensions import TypeVarTuple, Unpack - This class can then be used as follows:: + Ts = TypeVarTuple('Ts') + tup: tuple[*Ts] # Syntax error on Python <= 3.10! + tup: tuple[Unpack[Ts]] # Semantically equivalent, and backwards-compatible - X = TypeVar('X') - Y = TypeVar('Y') + ``Unpack`` can also be used along with :class:`typing.TypedDict` for typing + ``**kwargs`` in a function signature:: - def lookup_name(mapping: Mapping[X, Y], key: X, default: Y) -> Y: - try: - return mapping[key] - except KeyError: - return default + from typing import TypedDict, Unpack -.. class:: TypeVar + class Movie(TypedDict): + name: str + year: int + + # This function expects two keyword arguments - `name` of type `str` + # and `year` of type `int`. + def foo(**kwargs: Unpack[Movie]): ... + + See :pep:`692` for more details on using ``Unpack`` for ``**kwargs`` typing. + + .. versionadded:: 3.11 + +Building generic types and type aliases +""""""""""""""""""""""""""""""""""""""" + +The following classes should not be used directly as annotations. +Their intended purpose is to be building blocks +for creating generic types and type aliases. + +These objects can be created through special syntax +(:ref:`type parameter lists ` and the :keyword:`type` statement). +For compatibility with Python 3.11 and earlier, they can also be created +without the dedicated syntax, as documented below. + +.. class:: Generic + + Abstract base class for generic types. + + A generic type is typically declared by adding a list of type parameters + after the class name:: + + class Mapping[KT, VT]: + def __getitem__(self, key: KT) -> VT: + ... + # Etc. + + Such a class implicitly inherits from ``Generic``. + The runtime semantics of this syntax are discussed in the + :ref:`Language Reference `. + + This class can then be used as follows:: + + def lookup_name[X, Y](mapping: Mapping[X, Y], key: X, default: Y) -> Y: + try: + return mapping[key] + except KeyError: + return default + + Here the brackets after the function name indicate a + :ref:`generic function `. + + For backwards compatibility, generic classes can also be + declared by explicitly inheriting from + ``Generic``. In this case, the type parameters must be declared + separately:: + + KT = TypeVar('KT') + VT = TypeVar('VT') + + class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc. + +.. _typevar: + +.. class:: TypeVar(name, *constraints, bound=None, covariant=False, contravariant=False, infer_variance=False) + + Type variable. + + The preferred way to construct a type variable is via the dedicated syntax + for :ref:`generic functions `, + :ref:`generic classes `, and + :ref:`generic type aliases `:: + + class Sequence[T]: # T is a TypeVar + ... - Type variable. + This syntax can also be used to create bound and constrained type + variables:: - Usage:: + class StrSequence[S: str]: # S is a TypeVar bound to str + ... + + + class StrOrBytesSequence[A: (str, bytes)]: # A is a TypeVar constrained to str or bytes + ... + + However, if desired, reusable type variables can also be constructed manually, like so:: T = TypeVar('T') # Can be anything S = TypeVar('S', bound=str) # Can be any subtype of str A = TypeVar('A', str, bytes) # Must be exactly str or bytes - Type variables exist primarily for the benefit of static type - checkers. They serve as the parameters for generic types as well - as for generic function definitions. See :class:`Generic` for more - information on generic types. Generic functions work as follows:: + Type variables exist primarily for the benefit of static type + checkers. They serve as the parameters for generic types as well + as for generic function and type alias definitions. + See :class:`Generic` for more + information on generic types. Generic functions work as follows:: - def repeat(x: T, n: int) -> Sequence[T]: - """Return a list containing n references to x.""" - return [x]*n + def repeat[T](x: T, n: int) -> Sequence[T]: + """Return a list containing n references to x.""" + return [x]*n - def print_capitalized(x: S) -> S: - """Print x capitalized, and return x.""" - print(x.capitalize()) - return x + def print_capitalized[S: str](x: S) -> S: + """Print x capitalized, and return x.""" + print(x.capitalize()) + return x - def concatenate(x: A, y: A) -> A: - """Add two strings or bytes objects together.""" - return x + y + def concatenate[A: (str, bytes)](x: A, y: A) -> A: + """Add two strings or bytes objects together.""" + return x + y - Note that type variables can be *bound*, *constrained*, or neither, but - cannot be both bound *and* constrained. + Note that type variables can be *bound*, *constrained*, or neither, but + cannot be both bound *and* constrained. - Bound type variables and constrained type variables have different - semantics in several important ways. Using a *bound* type variable means - that the ``TypeVar`` will be solved using the most specific type possible:: + The variance of type variables is inferred by type checkers when they are created + through the :ref:`type parameter syntax ` or when + ``infer_variance=True`` is passed. + Manually created type variables may be explicitly marked covariant or contravariant by passing + ``covariant=True`` or ``contravariant=True``. + By default, manually created type variables are invariant. + See :pep:`484` and :pep:`695` for more details. - x = print_capitalized('a string') - reveal_type(x) # revealed type is str + Bound type variables and constrained type variables have different + semantics in several important ways. Using a *bound* type variable means + that the ``TypeVar`` will be solved using the most specific type possible:: - class StringSubclass(str): - pass + x = print_capitalized('a string') + reveal_type(x) # revealed type is str - y = print_capitalized(StringSubclass('another string')) - reveal_type(y) # revealed type is StringSubclass + class StringSubclass(str): + pass - z = print_capitalized(45) # error: int is not a subtype of str + y = print_capitalized(StringSubclass('another string')) + reveal_type(y) # revealed type is StringSubclass - Type variables can be bound to concrete types, abstract types (ABCs or - protocols), and even unions of types:: + z = print_capitalized(45) # error: int is not a subtype of str - U = TypeVar('U', bound=str|bytes) # Can be any subtype of the union str|bytes - V = TypeVar('V', bound=SupportsAbs) # Can be anything with an __abs__ method + Type variables can be bound to concrete types, abstract types (ABCs or + protocols), and even unions of types:: - Using a *constrained* type variable, however, means that the ``TypeVar`` - can only ever be solved as being exactly one of the constraints given:: + # Can be anything with an __abs__ method + def print_abs[T: SupportsAbs](arg: T) -> None: + print("Absolute value:", abs(arg)) - a = concatenate('one', 'two') - reveal_type(a) # revealed type is str + U = TypeVar('U', bound=str|bytes) # Can be any subtype of the union str|bytes + V = TypeVar('V', bound=SupportsAbs) # Can be anything with an __abs__ method - b = concatenate(StringSubclass('one'), StringSubclass('two')) - reveal_type(b) # revealed type is str, despite StringSubclass being passed in + .. _typing-constrained-typevar: - c = concatenate('one', b'two') # error: type variable 'A' can be either str or bytes in a function call, but not both + Using a *constrained* type variable, however, means that the ``TypeVar`` + can only ever be solved as being exactly one of the constraints given:: - At runtime, ``isinstance(x, T)`` will raise :exc:`TypeError`. In general, - :func:`isinstance` and :func:`issubclass` should not be used with types. + a = concatenate('one', 'two') + reveal_type(a) # revealed type is str - Type variables may be marked covariant or contravariant by passing - ``covariant=True`` or ``contravariant=True``. See :pep:`484` for more - details. By default, type variables are invariant. + b = concatenate(StringSubclass('one'), StringSubclass('two')) + reveal_type(b) # revealed type is str, despite StringSubclass being passed in -.. class:: TypeVarTuple + c = concatenate('one', b'two') # error: type variable 'A' can be either str or bytes in a function call, but not both - Type variable tuple. A specialized form of :class:`type variable ` - that enables *variadic* generics. + At runtime, ``isinstance(x, T)`` will raise :exc:`TypeError`. - A normal type variable enables parameterization with a single type. A type - variable tuple, in contrast, allows parameterization with an - *arbitrary* number of types by acting like an *arbitrary* number of type - variables wrapped in a tuple. For example:: + .. attribute:: __name__ - T = TypeVar('T') - Ts = TypeVarTuple('Ts') + The name of the type variable. - def move_first_element_to_last(tup: tuple[T, *Ts]) -> tuple[*Ts, T]: - return (*tup[1:], tup[0]) + .. attribute:: __covariant__ - # T is bound to int, Ts is bound to () - # Return value is (1,), which has type tuple[int] - move_first_element_to_last(tup=(1,)) + Whether the type var has been explicitly marked as covariant. - # T is bound to int, Ts is bound to (str,) - # Return value is ('spam', 1), which has type tuple[str, int] - move_first_element_to_last(tup=(1, 'spam')) + .. attribute:: __contravariant__ - # T is bound to int, Ts is bound to (str, float) - # Return value is ('spam', 3.0, 1), which has type tuple[str, float, int] - move_first_element_to_last(tup=(1, 'spam', 3.0)) + Whether the type var has been explicitly marked as contravariant. - # This fails to type check (and fails at runtime) - # because tuple[()] is not compatible with tuple[T, *Ts] - # (at least one element is required) - move_first_element_to_last(tup=()) + .. attribute:: __infer_variance__ - Note the use of the unpacking operator ``*`` in ``tuple[T, *Ts]``. - Conceptually, you can think of ``Ts`` as a tuple of type variables - ``(T1, T2, ...)``. ``tuple[T, *Ts]`` would then become - ``tuple[T, *(T1, T2, ...)]``, which is equivalent to - ``tuple[T, T1, T2, ...]``. (Note that in older versions of Python, you might - see this written using :data:`Unpack ` instead, as - ``Unpack[Ts]``.) + Whether the type variable's variance should be inferred by type checkers. - Type variable tuples must *always* be unpacked. This helps distinguish type - variable types from normal type variables:: + .. versionadded:: 3.12 - x: Ts # Not valid - x: tuple[Ts] # Not valid - x: tuple[*Ts] # The correct way to to do it + .. attribute:: __bound__ - Type variable tuples can be used in the same contexts as normal type - variables. For example, in class definitions, arguments, and return types:: + The bound of the type variable, if any. - Shape = TypeVarTuple('Shape') - class Array(Generic[*Shape]): - def __getitem__(self, key: tuple[*Shape]) -> float: ... - def __abs__(self) -> "Array[*Shape]": ... - def get_shape(self) -> tuple[*Shape]: ... + .. versionchanged:: 3.12 - Type variable tuples can be happily combined with normal type variables:: + For type variables created through :ref:`type parameter syntax `, + the bound is evaluated only when the attribute is accessed, not when + the type variable is created (see :ref:`lazy-evaluation`). - DType = TypeVar('DType') + .. attribute:: __constraints__ - class Array(Generic[DType, *Shape]): # This is fine - pass + A tuple containing the constraints of the type variable, if any. - class Array2(Generic[*Shape, DType]): # This would also be fine - pass + .. versionchanged:: 3.12 - float_array_1d: Array[float, Height] = Array() # Totally fine - int_array_2d: Array[int, Height, Width] = Array() # Yup, fine too + For type variables created through :ref:`type parameter syntax `, + the constraints are evaluated only when the attribute is accessed, not when + the type variable is created (see :ref:`lazy-evaluation`). - However, note that at most one type variable tuple may appear in a single - list of type arguments or type parameters:: + .. versionchanged:: 3.12 - x: tuple[*Ts, *Ts] # Not valid - class Array(Generic[*Shape, *Shape]): # Not valid - pass + Type variables can now be declared using the + :ref:`type parameter ` syntax introduced by :pep:`695`. + The ``infer_variance`` parameter was added. - Finally, an unpacked type variable tuple can be used as the type annotation - of ``*args``:: +.. _typevartuple: - def call_soon( - callback: Callable[[*Ts], None], - *args: *Ts - ) -> None: - ... - callback(*args) +.. class:: TypeVarTuple(name) - In contrast to non-unpacked annotations of ``*args`` - e.g. ``*args: int``, - which would specify that *all* arguments are ``int`` - ``*args: *Ts`` - enables reference to the types of the *individual* arguments in ``*args``. - Here, this allows us to ensure the types of the ``*args`` passed - to ``call_soon`` match the types of the (positional) arguments of - ``callback``. + Type variable tuple. A specialized form of :ref:`type variable ` + that enables *variadic* generics. - See :pep:`646` for more details on type variable tuples. + Type variable tuples can be declared in :ref:`type parameter lists ` + using a single asterisk (``*``) before the name:: - .. versionadded:: 3.11 + def move_first_element_to_last[T, *Ts](tup: tuple[T, *Ts]) -> tuple[*Ts, T]: + return (*tup[1:], tup[0]) -.. data:: Unpack + Or by explicitly invoking the ``TypeVarTuple`` constructor:: - A typing operator that conceptually marks an object as having been - unpacked. For example, using the unpack operator ``*`` on a - :class:`type variable tuple ` is equivalent to using ``Unpack`` - to mark the type variable tuple as having been unpacked:: + T = TypeVar("T") + Ts = TypeVarTuple("Ts") - Ts = TypeVarTuple('Ts') - tup: tuple[*Ts] - # Effectively does: - tup: tuple[Unpack[Ts]] + def move_first_element_to_last(tup: tuple[T, *Ts]) -> tuple[*Ts, T]: + return (*tup[1:], tup[0]) - In fact, ``Unpack`` can be used interchangeably with ``*`` in the context - of types. You might see ``Unpack`` being used explicitly in older versions - of Python, where ``*`` couldn't be used in certain places:: + A normal type variable enables parameterization with a single type. A type + variable tuple, in contrast, allows parameterization with an + *arbitrary* number of types by acting like an *arbitrary* number of type + variables wrapped in a tuple. For example:: - # In older versions of Python, TypeVarTuple and Unpack - # are located in the `typing_extensions` backports package. - from typing_extensions import TypeVarTuple, Unpack + # T is bound to int, Ts is bound to () + # Return value is (1,), which has type tuple[int] + move_first_element_to_last(tup=(1,)) - Ts = TypeVarTuple('Ts') - tup: tuple[*Ts] # Syntax error on Python <= 3.10! - tup: tuple[Unpack[Ts]] # Semantically equivalent, and backwards-compatible + # T is bound to int, Ts is bound to (str,) + # Return value is ('spam', 1), which has type tuple[str, int] + move_first_element_to_last(tup=(1, 'spam')) + + # T is bound to int, Ts is bound to (str, float) + # Return value is ('spam', 3.0, 1), which has type tuple[str, float, int] + move_first_element_to_last(tup=(1, 'spam', 3.0)) + + # This fails to type check (and fails at runtime) + # because tuple[()] is not compatible with tuple[T, *Ts] + # (at least one element is required) + move_first_element_to_last(tup=()) + + Note the use of the unpacking operator ``*`` in ``tuple[T, *Ts]``. + Conceptually, you can think of ``Ts`` as a tuple of type variables + ``(T1, T2, ...)``. ``tuple[T, *Ts]`` would then become + ``tuple[T, *(T1, T2, ...)]``, which is equivalent to + ``tuple[T, T1, T2, ...]``. (Note that in older versions of Python, you might + see this written using :data:`Unpack ` instead, as + ``Unpack[Ts]``.) + + Type variable tuples must *always* be unpacked. This helps distinguish type + variable tuples from normal type variables:: + + x: Ts # Not valid + x: tuple[Ts] # Not valid + x: tuple[*Ts] # The correct way to do it + + Type variable tuples can be used in the same contexts as normal type + variables. For example, in class definitions, arguments, and return types:: + + class Array[*Shape]: + def __getitem__(self, key: tuple[*Shape]) -> float: ... + def __abs__(self) -> "Array[*Shape]": ... + def get_shape(self) -> tuple[*Shape]: ... + + Type variable tuples can be happily combined with normal type variables: + + .. testcode:: + + class Array[DType, *Shape]: # This is fine + pass + + class Array2[*Shape, DType]: # This would also be fine + pass + + class Height: ... + class Width: ... + + float_array_1d: Array[float, Height] = Array() # Totally fine + int_array_2d: Array[int, Height, Width] = Array() # Yup, fine too + + However, note that at most one type variable tuple may appear in a single + list of type arguments or type parameters:: + + x: tuple[*Ts, *Ts] # Not valid + class Array[*Shape, *Shape]: # Not valid + pass + + Finally, an unpacked type variable tuple can be used as the type annotation + of ``*args``:: + + def call_soon[*Ts]( + callback: Callable[[*Ts], None], + *args: *Ts + ) -> None: + ... + callback(*args) + + In contrast to non-unpacked annotations of ``*args`` - e.g. ``*args: int``, + which would specify that *all* arguments are ``int`` - ``*args: *Ts`` + enables reference to the types of the *individual* arguments in ``*args``. + Here, this allows us to ensure the types of the ``*args`` passed + to ``call_soon`` match the types of the (positional) arguments of + ``callback``. + + See :pep:`646` for more details on type variable tuples. + + .. attribute:: __name__ + + The name of the type variable tuple. .. versionadded:: 3.11 + .. versionchanged:: 3.12 + + Type variable tuples can now be declared using the + :ref:`type parameter ` syntax introduced by :pep:`695`. + .. class:: ParamSpec(name, *, bound=None, covariant=False, contravariant=False) Parameter specification variable. A specialized version of - :class:`type variables `. + :ref:`type variables `. - Usage:: + In :ref:`type parameter lists `, parameter specifications + can be declared with two asterisks (``**``):: + + type IntFunc[**P] = Callable[P, int] + + For compatibility with Python 3.11 and earlier, ``ParamSpec`` objects + can also be created as follows:: P = ParamSpec('P') @@ -1443,13 +1874,9 @@ These are not used in annotations. They are building blocks for creating generic new callable returned by it have inter-dependent type parameters:: from collections.abc import Callable - from typing import TypeVar, ParamSpec import logging - T = TypeVar('T') - P = ParamSpec('P') - - def add_logging(f: Callable[P, T]) -> Callable[P, T]: + def add_logging[T, **P](f: Callable[P, T]) -> Callable[P, T]: '''A type-safe decorator to add logging to a function.''' def inner(*args: P.args, **kwargs: P.kwargs) -> T: logging.info(f'{f.__name__} was called') @@ -1484,6 +1911,10 @@ These are not used in annotations. They are building blocks for creating generic ``P.args`` and ``P.kwargs`` are instances respectively of :class:`ParamSpecArgs` and :class:`ParamSpecKwargs`. + .. attribute:: __name__ + + The name of the parameter specification. + Parameter specification variables created with ``covariant=True`` or ``contravariant=True`` can be used to declare covariant or contravariant generic types. The ``bound`` argument is also accepted, similar to @@ -1492,14 +1923,20 @@ These are not used in annotations. They are building blocks for creating generic .. versionadded:: 3.10 + .. versionchanged:: 3.12 + + Parameter specifications can now be declared using the + :ref:`type parameter ` syntax introduced by :pep:`695`. + .. note:: Only parameter specification variables defined in global scope can be pickled. .. seealso:: * :pep:`612` -- Parameter Specification Variables (the PEP which introduced - ``ParamSpec`` and ``Concatenate``). - * :class:`Callable` and :class:`Concatenate`. + ``ParamSpec`` and ``Concatenate``) + * :data:`Concatenate` + * :ref:`annotating-callables` .. data:: ParamSpecArgs .. data:: ParamSpecKwargs @@ -1510,94 +1947,91 @@ These are not used in annotations. They are building blocks for creating generic for runtime introspection and have no special meaning to static type checkers. Calling :func:`get_origin` on either of these objects will return the - original ``ParamSpec``:: - - P = ParamSpec("P") - get_origin(P.args) # returns P - get_origin(P.kwargs) # returns P - - .. versionadded:: 3.10 + original ``ParamSpec``: + .. doctest:: -.. data:: AnyStr + >>> from typing import ParamSpec, get_origin + >>> P = ParamSpec("P") + >>> get_origin(P.args) is P + True + >>> get_origin(P.kwargs) is P + True - ``AnyStr`` is a :class:`constrained type variable ` defined as - ``AnyStr = TypeVar('AnyStr', str, bytes)``. + .. versionadded:: 3.10 - It is meant to be used for functions that may accept any kind of string - without allowing different kinds of strings to mix. For example:: - def concat(a: AnyStr, b: AnyStr) -> AnyStr: - return a + b +.. class:: TypeAliasType(name, value, *, type_params=()) - concat(u"foo", u"bar") # Ok, output has type 'unicode' - concat(b"foo", b"bar") # Ok, output has type 'bytes' - concat(u"foo", b"bar") # Error, cannot mix unicode and bytes + The type of type aliases created through the :keyword:`type` statement. -.. class:: Protocol(Generic) + Example: - Base class for protocol classes. Protocol classes are defined like this:: + .. doctest:: - class Proto(Protocol): - def meth(self) -> int: - ... + >>> type Alias = int + >>> type(Alias) + - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing), for example:: + .. versionadded:: 3.12 - class C: - def meth(self) -> int: - return 0 + .. attribute:: __name__ - def func(x: Proto) -> int: - return x.meth() + The name of the type alias: - func(C()) # Passes static type check + .. doctest:: - See :pep:`544` for more details. Protocol classes decorated with - :func:`runtime_checkable` (described later) act as simple-minded runtime - protocols that check only the presence of given attributes, ignoring their - type signatures. + >>> type Alias = int + >>> Alias.__name__ + 'Alias' - Protocol classes can be generic, for example:: + .. attribute:: __module__ - class GenProto(Protocol[T]): - def meth(self) -> T: - ... + The module in which the type alias was defined:: - .. versionadded:: 3.8 + >>> type Alias = int + >>> Alias.__module__ + '__main__' -.. decorator:: runtime_checkable + .. attribute:: __type_params__ - Mark a protocol class as a runtime protocol. + The type parameters of the type alias, or an empty tuple if the alias is + not generic: - Such a protocol can be used with :func:`isinstance` and :func:`issubclass`. - This raises :exc:`TypeError` when applied to a non-protocol class. This - allows a simple-minded structural check, very similar to "one trick ponies" - in :mod:`collections.abc` such as :class:`~collections.abc.Iterable`. For example:: + .. doctest:: - @runtime_checkable - class Closable(Protocol): - def close(self): ... + >>> type ListOrSet[T] = list[T] | set[T] + >>> ListOrSet.__type_params__ + (T,) + >>> type NotGeneric = int + >>> NotGeneric.__type_params__ + () - assert isinstance(open('/some/file'), Closable) + .. attribute:: __value__ - .. note:: + The type alias's value. This is :ref:`lazily evaluated `, + so names used in the definition of the alias are not resolved until the + ``__value__`` attribute is accessed: - :func:`runtime_checkable` will check only the presence of the required - methods, not their type signatures. For example, :class:`ssl.SSLObject` - is a class, therefore it passes an :func:`issubclass` - check against :data:`Callable`. However, the - :meth:`ssl.SSLObject.__init__` method exists only to raise a - :exc:`TypeError` with a more informative message, therefore making - it impossible to call (instantiate) :class:`ssl.SSLObject`. + .. doctest:: - .. versionadded:: 3.8 + >>> type Mutually = Recursive + >>> type Recursive = Mutually + >>> Mutually + Mutually + >>> Recursive + Recursive + >>> Mutually.__value__ + Recursive + >>> Recursive.__value__ + Mutually Other special directives """""""""""""""""""""""" -These are not used in annotations. They are building blocks for declaring types. +These functions and classes should not be used directly as annotations. +Their intended purpose is to be building blocks for creating and declaring +types. .. class:: NamedTuple @@ -1642,12 +2076,18 @@ These are not used in annotations. They are building blocks for declaring types. ``NamedTuple`` subclasses can be generic:: - class Group(NamedTuple, Generic[T]): + class Group[T](NamedTuple): key: T group: list[T] Backward-compatible usage:: + # For creating a generic NamedTuple on Python 3.11 or lower + class Group(NamedTuple, Generic[T]): + key: T + group: list[T] + + # A functional syntax is also supported Employee = NamedTuple('Employee', [('name', str), ('id', int)]) .. versionchanged:: 3.6 @@ -1667,21 +2107,153 @@ These are not used in annotations. They are building blocks for declaring types. .. versionchanged:: 3.11 Added support for generic namedtuples. + .. deprecated-removed:: 3.13 3.15 + The undocumented keyword argument syntax for creating NamedTuple classes + (``NT = NamedTuple("NT", x=int)``) is deprecated, and will be disallowed + in 3.15. Use the class-based syntax or the functional syntax instead. + + .. deprecated-removed:: 3.13 3.15 + When using the functional syntax to create a NamedTuple class, failing to + pass a value to the 'fields' parameter (``NT = NamedTuple("NT")``) is + deprecated. Passing ``None`` to the 'fields' parameter + (``NT = NamedTuple("NT", None)``) is also deprecated. Both will be + disallowed in Python 3.15. To create a NamedTuple class with 0 fields, + use ``class NT(NamedTuple): pass`` or ``NT = NamedTuple("NT", [])``. + .. class:: NewType(name, tp) - A helper class to indicate a distinct type to a typechecker, - see :ref:`distinct`. At runtime it returns an object that returns - its argument when called. + Helper class to create low-overhead :ref:`distinct types `. + + A ``NewType`` is considered a distinct type by a typechecker. At runtime, + however, calling a ``NewType`` returns its argument unchanged. + Usage:: - UserId = NewType('UserId', int) - first_user = UserId(1) + UserId = NewType('UserId', int) # Declare the NewType "UserId" + first_user = UserId(1) # "UserId" returns the argument unchanged at runtime + + .. attribute:: __module__ + + The module in which the new type is defined. + + .. attribute:: __name__ + + The name of the new type. + + .. attribute:: __supertype__ + + The type that the new type is based on. .. versionadded:: 3.5.2 .. versionchanged:: 3.10 ``NewType`` is now a class rather than a function. +.. class:: Protocol(Generic) + + Base class for protocol classes. + + Protocol classes are defined like this:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See :pep:`544` for more details. Protocol classes decorated with + :func:`runtime_checkable` (described later) act as simple-minded runtime + protocols that check only the presence of given attributes, ignoring their + type signatures. + + Protocol classes can be generic, for example:: + + class GenProto[T](Protocol): + def meth(self) -> T: + ... + + In code that needs to be compatible with Python 3.11 or older, generic + Protocols can be written as follows:: + + T = TypeVar("T") + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + + .. versionadded:: 3.8 + +.. decorator:: runtime_checkable + + Mark a protocol class as a runtime protocol. + + Such a protocol can be used with :func:`isinstance` and :func:`issubclass`. + This raises :exc:`TypeError` when applied to a non-protocol class. This + allows a simple-minded structural check, very similar to "one trick ponies" + in :mod:`collections.abc` such as :class:`~collections.abc.Iterable`. For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + @runtime_checkable + class Named(Protocol): + name: str + + import threading + assert isinstance(threading.Thread(name='Bob'), Named) + + .. note:: + + :func:`!runtime_checkable` will check only the presence of the required + methods or attributes, not their type signatures or types. + For example, :class:`ssl.SSLObject` + is a class, therefore it passes an :func:`issubclass` + check against :ref:`Callable `. However, the + ``ssl.SSLObject.__init__`` method exists only to raise a + :exc:`TypeError` with a more informative message, therefore making + it impossible to call (instantiate) :class:`ssl.SSLObject`. + + .. note:: + + An :func:`isinstance` check against a runtime-checkable protocol can be + surprisingly slow compared to an ``isinstance()`` check against + a non-protocol class. Consider using alternative idioms such as + :func:`hasattr` calls for structural checks in performance-sensitive + code. + + .. versionadded:: 3.8 + + .. versionchanged:: 3.12 + The internal implementation of :func:`isinstance` checks against + runtime-checkable protocols now uses :func:`inspect.getattr_static` + to look up attributes (previously, :func:`hasattr` was used). + As a result, some objects which used to be considered instances + of a runtime-checkable protocol may no longer be considered instances + of that protocol on Python 3.12+, and vice versa. + Most users are unlikely to be affected by this change. + + .. versionchanged:: 3.12 + The members of a runtime-checkable protocol are now considered "frozen" + at runtime as soon as the class has been created. Monkey-patching + attributes onto a runtime-checkable protocol will still work, but will + have no impact on :func:`isinstance` checks comparing objects to the + protocol. See :ref:`"What's new in Python 3.12" ` + for more details. + + .. class:: TypedDict(dict) Special construct to add type hints to a dictionary. @@ -1703,25 +2275,14 @@ These are not used in annotations. They are building blocks for declaring types. assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') - To allow using this feature with older versions of Python that do not - support :pep:`526`, ``TypedDict`` supports two additional equivalent - syntactic forms: - - * Using a literal :class:`dict` as the second argument:: + An alternative way to create a ``TypedDict`` is by using + function-call syntax. The second argument must be a literal :class:`dict`:: Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) - * Using keyword arguments:: - - Point2D = TypedDict('Point2D', x=int, y=int, label=str) - - .. deprecated-removed:: 3.11 3.13 - The keyword-argument syntax is deprecated in 3.11 and will be removed - in 3.13. It may also be unsupported by static type checkers. - - The functional syntax should also be used when any of the keys are not valid - :ref:`identifiers `, for example because they are keywords or contain hyphens. - Example:: + This functional syntax allows defining keys which are not valid + :ref:`identifiers `, for example because they are + keywords or contain hyphens:: # raises SyntaxError class Point2D(TypedDict): @@ -1805,11 +2366,19 @@ These are not used in annotations. They are building blocks for declaring types. class XZ(X, Z): pass # raises TypeError - T = TypeVar('T') - class XT(X, Generic[T]): pass # raises TypeError - A ``TypedDict`` can be generic:: + class Group[T](TypedDict): + key: T + group: list[T] + + To create a generic ``TypedDict`` that is compatible with Python 3.11 + or lower, inherit from :class:`Generic` explicitly: + + .. testcode:: + + T = TypeVar("T") + class Group(TypedDict, Generic[T]): key: T group: list[T] @@ -1821,7 +2390,9 @@ These are not used in annotations. They are building blocks for declaring types. .. attribute:: __total__ ``Point2D.__total__`` gives the value of the ``total`` argument. - Example:: + Example: + + .. doctest:: >>> from typing import TypedDict >>> class Point2D(TypedDict): pass @@ -1834,6 +2405,13 @@ These are not used in annotations. They are building blocks for declaring types. >>> Point3D.__total__ True + This attribute reflects *only* the value of the ``total`` argument + to the current ``TypedDict`` class, not whether the class is semantically + total. For example, a ``TypedDict`` with ``__total__`` set to True may + have keys marked with :data:`NotRequired`, or it may inherit from another + ``TypedDict`` with ``total=False``. Therefore, it is generally better to use + :attr:`__required_keys__` and :attr:`__optional_keys__` for introspection. + .. attribute:: __required_keys__ .. versionadded:: 3.9 @@ -1851,7 +2429,9 @@ These are not used in annotations. They are building blocks for declaring types. non-required keys in the same ``TypedDict`` . This is done by declaring a ``TypedDict`` with one value for the ``total`` argument and then inheriting from it in another ``TypedDict`` with a different value for - ``total``:: + ``total``: + + .. doctest:: >>> class Point2D(TypedDict, total=False): ... x: int @@ -1867,6 +2447,14 @@ These are not used in annotations. They are building blocks for declaring types. .. versionadded:: 3.9 + .. note:: + + If ``from __future__ import annotations`` is used or if annotations + are given as strings, annotations are not evaluated when the + ``TypedDict`` is defined. Therefore, the runtime introspection that + ``__required_keys__`` and ``__optional_keys__`` rely on may not work + properly, and the values of the attributes may be incorrect. + See :pep:`589` for more examples and detailed rules of using ``TypedDict``. .. versionadded:: 3.8 @@ -1878,986 +2466,1241 @@ These are not used in annotations. They are building blocks for declaring types. .. versionchanged:: 3.11 Added support for generic ``TypedDict``\ s. -Generic concrete collections ----------------------------- + .. versionchanged:: 3.13 + Removed support for the keyword-argument method of creating ``TypedDict``\ s. -Corresponding to built-in types -""""""""""""""""""""""""""""""" + .. deprecated-removed:: 3.13 3.15 + When using the functional syntax to create a TypedDict class, failing to + pass a value to the 'fields' parameter (``TD = TypedDict("TD")``) is + deprecated. Passing ``None`` to the 'fields' parameter + (``TD = TypedDict("TD", None)``) is also deprecated. Both will be + disallowed in Python 3.15. To create a TypedDict class with 0 fields, + use ``class TD(TypedDict): pass`` or ``TD = TypedDict("TD", {})``. -.. class:: Dict(dict, MutableMapping[KT, VT]) +Protocols +--------- - A generic version of :class:`dict`. - Useful for annotating return types. To annotate arguments it is preferred - to use an abstract collection type such as :class:`Mapping`. +The following protocols are provided by the typing module. All are decorated +with :func:`@runtime_checkable `. - This type can be used as follows:: +.. class:: SupportsAbs - def count_words(text: str) -> Dict[str, int]: - ... + An ABC with one abstract method ``__abs__`` that is covariant + in its return type. - .. deprecated:: 3.9 - :class:`builtins.dict ` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +.. class:: SupportsBytes -.. class:: List(list, MutableSequence[T]) + An ABC with one abstract method ``__bytes__``. - Generic version of :class:`list`. - Useful for annotating return types. To annotate arguments it is preferred - to use an abstract collection type such as :class:`Sequence` or - :class:`Iterable`. +.. class:: SupportsComplex - This type may be used as follows:: - - T = TypeVar('T', int, float) + An ABC with one abstract method ``__complex__``. - def vec2(x: T, y: T) -> List[T]: - return [x, y] +.. class:: SupportsFloat - def keep_positives(vector: Sequence[T]) -> List[T]: - return [item for item in vector if item > 0] + An ABC with one abstract method ``__float__``. - .. deprecated:: 3.9 - :class:`builtins.list ` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +.. class:: SupportsIndex -.. class:: Set(set, MutableSet[T]) + An ABC with one abstract method ``__index__``. - A generic version of :class:`builtins.set `. - Useful for annotating return types. To annotate arguments it is preferred - to use an abstract collection type such as :class:`AbstractSet`. + .. versionadded:: 3.8 - .. deprecated:: 3.9 - :class:`builtins.set ` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +.. class:: SupportsInt -.. class:: FrozenSet(frozenset, AbstractSet[T_co]) + An ABC with one abstract method ``__int__``. - A generic version of :class:`builtins.frozenset `. +.. class:: SupportsRound - .. deprecated:: 3.9 - :class:`builtins.frozenset ` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + An ABC with one abstract method ``__round__`` + that is covariant in its return type. -.. note:: :data:`Tuple` is a special form. +ABCs for working with IO +------------------------ -Corresponding to types in :mod:`collections` -"""""""""""""""""""""""""""""""""""""""""""" +.. class:: IO + TextIO + BinaryIO -.. class:: DefaultDict(collections.defaultdict, MutableMapping[KT, VT]) + Generic type ``IO[AnyStr]`` and its subclasses ``TextIO(IO[str])`` + and ``BinaryIO(IO[bytes])`` + represent the types of I/O streams such as returned by + :func:`open`. - A generic version of :class:`collections.defaultdict`. +Functions and decorators +------------------------ - .. versionadded:: 3.5.2 +.. function:: cast(typ, val) - .. deprecated:: 3.9 - :class:`collections.defaultdict` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Cast a value to a type. -.. class:: OrderedDict(collections.OrderedDict, MutableMapping[KT, VT]) + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). - A generic version of :class:`collections.OrderedDict`. +.. function:: assert_type(val, typ, /) - .. versionadded:: 3.7.2 + Ask a static type checker to confirm that *val* has an inferred type of *typ*. - .. deprecated:: 3.9 - :class:`collections.OrderedDict` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + At runtime this does nothing: it returns the first argument unchanged with no + checks or side effects, no matter the actual type of the argument. -.. class:: ChainMap(collections.ChainMap, MutableMapping[KT, VT]) + When a static type checker encounters a call to ``assert_type()``, it + emits an error if the value is not of the specified type:: - A generic version of :class:`collections.ChainMap`. + def greet(name: str) -> None: + assert_type(name, str) # OK, inferred type of `name` is `str` + assert_type(name, int) # type checker error - .. versionadded:: 3.5.4 - .. versionadded:: 3.6.1 + This function is useful for ensuring the type checker's understanding of a + script is in line with the developer's intentions:: - .. deprecated:: 3.9 - :class:`collections.ChainMap` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + def complex_function(arg: object): + # Do some complex type-narrowing logic, + # after which we hope the inferred type will be `int` + ... + # Test whether the type checker correctly understands our function + assert_type(arg, int) -.. class:: Counter(collections.Counter, Dict[T, int]) + .. versionadded:: 3.11 - A generic version of :class:`collections.Counter`. +.. function:: assert_never(arg, /) - .. versionadded:: 3.5.4 - .. versionadded:: 3.6.1 + Ask a static type checker to confirm that a line of code is unreachable. - .. deprecated:: 3.9 - :class:`collections.Counter` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Example:: -.. class:: Deque(deque, MutableSequence[T]) + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _ as unreachable: + assert_never(unreachable) - A generic version of :class:`collections.deque`. + Here, the annotations allow the type checker to infer that the + last case can never execute, because ``arg`` is either + an :class:`int` or a :class:`str`, and both options are covered by + earlier cases. - .. versionadded:: 3.5.4 - .. versionadded:: 3.6.1 + If a type checker finds that a call to ``assert_never()`` is + reachable, it will emit an error. For example, if the type annotation + for ``arg`` was instead ``int | str | float``, the type checker would + emit an error pointing out that ``unreachable`` is of type :class:`float`. + For a call to ``assert_never`` to pass type checking, the inferred type of + the argument passed in must be the bottom type, :data:`Never`, and nothing + else. - .. deprecated:: 3.9 - :class:`collections.deque` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + At runtime, this throws an exception when called. -Other concrete types -"""""""""""""""""""" + .. seealso:: + `Unreachable Code and Exhaustiveness Checking + `__ has more + information about exhaustiveness checking with static typing. -.. class:: IO - TextIO - BinaryIO + .. versionadded:: 3.11 - Generic type ``IO[AnyStr]`` and its subclasses ``TextIO(IO[str])`` - and ``BinaryIO(IO[bytes])`` - represent the types of I/O streams such as returned by - :func:`open`. +.. function:: reveal_type(obj, /) - .. deprecated-removed:: 3.8 3.13 - The ``typing.io`` namespace is deprecated and will be removed. - These types should be directly imported from ``typing`` instead. + Reveal the inferred static type of an expression. -.. class:: Pattern - Match + When a static type checker encounters a call to this function, + it emits a diagnostic with the type of the argument. For example:: - These type aliases - correspond to the return types from :func:`re.compile` and - :func:`re.match`. These types (and the corresponding functions) - are generic in ``AnyStr`` and can be made specific by writing - ``Pattern[str]``, ``Pattern[bytes]``, ``Match[str]``, or - ``Match[bytes]``. + x: int = 1 + reveal_type(x) # Revealed type is "builtins.int" - .. deprecated-removed:: 3.8 3.13 - The ``typing.re`` namespace is deprecated and will be removed. - These types should be directly imported from ``typing`` instead. + This can be useful when you want to debug how your type checker + handles a particular piece of code. - .. deprecated:: 3.9 - Classes ``Pattern`` and ``Match`` from :mod:`re` now support ``[]``. - See :pep:`585` and :ref:`types-genericalias`. + The function returns its argument unchanged, which allows using + it within an expression:: -.. class:: Text + x = reveal_type(1) # Revealed type is "builtins.int" - ``Text`` is an alias for ``str``. It is provided to supply a forward - compatible path for Python 2 code: in Python 2, ``Text`` is an alias for - ``unicode``. + Most type checkers support ``reveal_type()`` anywhere, even if the + name is not imported from ``typing``. Importing the name from + ``typing`` allows your code to run without runtime errors and + communicates intent more clearly. - Use ``Text`` to indicate that a value must contain a unicode string in - a manner that is compatible with both Python 2 and Python 3:: + At runtime, this function prints the runtime type of its argument to stderr + and returns it unchanged:: - def add_unicode_checkmark(text: Text) -> Text: - return text + u' \u2713' + x = reveal_type(1) # prints "Runtime type is int" + print(x) # prints "1" - .. versionadded:: 3.5.2 + .. versionadded:: 3.11 - .. deprecated:: 3.11 - Python 2 is no longer supported, and most type checkers also no longer - support type checking Python 2 code. Removal of the alias is not - currently planned, but users are encouraged to use - :class:`str` instead of ``Text`` wherever possible. +.. decorator:: dataclass_transform(*, eq_default=True, order_default=False, \ + kw_only_default=False, frozen_default=False, \ + field_specifiers=(), **kwargs) -Abstract Base Classes ---------------------- + Decorator to mark an object as providing + :func:`dataclass `-like behavior. -Corresponding to collections in :mod:`collections.abc` -"""""""""""""""""""""""""""""""""""""""""""""""""""""" + ``dataclass_transform`` may be used to + decorate a class, metaclass, or a function that is itself a decorator. + The presence of ``@dataclass_transform()`` tells a static type checker that the + decorated object performs runtime "magic" that + transforms a class in a similar way to + :func:`@dataclasses.dataclass `. -.. class:: AbstractSet(Collection[T_co]) + Example usage with a decorator function: - A generic version of :class:`collections.abc.Set`. + .. testcode:: - .. deprecated:: 3.9 - :class:`collections.abc.Set` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + @dataclass_transform() + def create_model[T](cls: type[T]) -> type[T]: + ... + return cls -.. class:: ByteString(Sequence[int]) + @create_model + class CustomerModel: + id: int + name: str - A generic version of :class:`collections.abc.ByteString`. + On a base class:: - This type represents the types :class:`bytes`, :class:`bytearray`, - and :class:`memoryview` of byte sequences. + @dataclass_transform() + class ModelBase: ... - As a shorthand for this type, :class:`bytes` can be used to - annotate arguments of any of the types mentioned above. + class CustomerModel(ModelBase): + id: int + name: str - .. deprecated:: 3.9 - :class:`collections.abc.ByteString` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + On a metaclass:: -.. class:: Collection(Sized, Iterable[T_co], Container[T_co]) + @dataclass_transform() + class ModelMeta(type): ... - A generic version of :class:`collections.abc.Collection` + class ModelBase(metaclass=ModelMeta): ... - .. versionadded:: 3.6.0 + class CustomerModel(ModelBase): + id: int + name: str - .. deprecated:: 3.9 - :class:`collections.abc.Collection` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + The ``CustomerModel`` classes defined above will + be treated by type checkers similarly to classes created with + :func:`@dataclasses.dataclass `. + For example, type checkers will assume these classes have + ``__init__`` methods that accept ``id`` and ``name``. -.. class:: Container(Generic[T_co]) + The decorated class, metaclass, or function may accept the following bool + arguments which type checkers will assume have the same effect as they + would have on the + :func:`@dataclasses.dataclass` decorator: ``init``, + ``eq``, ``order``, ``unsafe_hash``, ``frozen``, ``match_args``, + ``kw_only``, and ``slots``. It must be possible for the value of these + arguments (``True`` or ``False``) to be statically evaluated. - A generic version of :class:`collections.abc.Container`. + The arguments to the ``dataclass_transform`` decorator can be used to + customize the default behaviors of the decorated class, metaclass, or + function: - .. deprecated:: 3.9 - :class:`collections.abc.Container` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + :param bool eq_default: + Indicates whether the ``eq`` parameter is assumed to be + ``True`` or ``False`` if it is omitted by the caller. + Defaults to ``True``. -.. class:: ItemsView(MappingView, AbstractSet[tuple[KT_co, VT_co]]) + :param bool order_default: + Indicates whether the ``order`` parameter is + assumed to be ``True`` or ``False`` if it is omitted by the caller. + Defaults to ``False``. - A generic version of :class:`collections.abc.ItemsView`. + :param bool kw_only_default: + Indicates whether the ``kw_only`` parameter is + assumed to be ``True`` or ``False`` if it is omitted by the caller. + Defaults to ``False``. - .. deprecated:: 3.9 - :class:`collections.abc.ItemsView` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + :param bool frozen_default: + Indicates whether the ``frozen`` parameter is + assumed to be ``True`` or ``False`` if it is omitted by the caller. + Defaults to ``False``. -.. class:: KeysView(MappingView, AbstractSet[KT_co]) + .. versionadded:: 3.12 - A generic version of :class:`collections.abc.KeysView`. + :param field_specifiers: + Specifies a static list of supported classes + or functions that describe fields, similar to :func:`dataclasses.field`. + Defaults to ``()``. + :type field_specifiers: tuple[Callable[..., Any], ...] - .. deprecated:: 3.9 - :class:`collections.abc.KeysView` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + :param Any \**kwargs: + Arbitrary other keyword arguments are accepted in order to allow for + possible future extensions. -.. class:: Mapping(Collection[KT], Generic[KT, VT_co]) + Type checkers recognize the following optional parameters on field + specifiers: - A generic version of :class:`collections.abc.Mapping`. - This type can be used as follows:: + .. list-table:: **Recognised parameters for field specifiers** + :header-rows: 1 + :widths: 20 80 + + * - Parameter name + - Description + * - ``init`` + - Indicates whether the field should be included in the + synthesized ``__init__`` method. If unspecified, ``init`` defaults to + ``True``. + * - ``default`` + - Provides the default value for the field. + * - ``default_factory`` + - Provides a runtime callback that returns the + default value for the field. If neither ``default`` nor + ``default_factory`` are specified, the field is assumed to have no + default value and must be provided a value when the class is + instantiated. + * - ``factory`` + - An alias for the ``default_factory`` parameter on field specifiers. + * - ``kw_only`` + - Indicates whether the field should be marked as + keyword-only. If ``True``, the field will be keyword-only. If + ``False``, it will not be keyword-only. If unspecified, the value of + the ``kw_only`` parameter on the object decorated with + ``dataclass_transform`` will be used, or if that is unspecified, the + value of ``kw_only_default`` on ``dataclass_transform`` will be used. + * - ``alias`` + - Provides an alternative name for the field. This alternative + name is used in the synthesized ``__init__`` method. - def get_position_in_index(word_list: Mapping[str, int], word: str) -> int: - return word_list[word] + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + It has no other runtime effect. - .. deprecated:: 3.9 - :class:`collections.abc.Mapping` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + See :pep:`681` for more details. -.. class:: MappingView(Sized) + .. versionadded:: 3.11 - A generic version of :class:`collections.abc.MappingView`. +.. _overload: - .. deprecated:: 3.9 - :class:`collections.abc.MappingView` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +.. decorator:: overload -.. class:: MutableMapping(Mapping[KT, VT]) + Decorator for creating overloaded functions and methods. - A generic version of :class:`collections.abc.MutableMapping`. + The ``@overload`` decorator allows describing functions and methods + that support multiple different combinations of argument types. A series + of ``@overload``-decorated definitions must be followed by exactly one + non-``@overload``-decorated definition (for the same function/method). - .. deprecated:: 3.9 - :class:`collections.abc.MutableMapping` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + ``@overload``-decorated definitions are for the benefit of the + type checker only, since they will be overwritten by the + non-``@overload``-decorated definition. The non-``@overload``-decorated + definition, meanwhile, will be used at + runtime but should be ignored by a type checker. At runtime, calling + an ``@overload``-decorated function directly will raise + :exc:`NotImplementedError`. -.. class:: MutableSequence(Sequence[T]) + An example of overload that gives a more + precise type than can be expressed using a union or a type variable: - A generic version of :class:`collections.abc.MutableSequence`. + .. testcode:: - .. deprecated:: 3.9 - :class:`collections.abc.MutableSequence` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + @overload + def process(response: None) -> None: + ... + @overload + def process(response: int) -> tuple[int, str]: + ... + @overload + def process(response: bytes) -> str: + ... + def process(response): + ... # actual implementation goes here -.. class:: MutableSet(AbstractSet[T]) + See :pep:`484` for more details and comparison with other typing semantics. - A generic version of :class:`collections.abc.MutableSet`. + .. versionchanged:: 3.11 + Overloaded functions can now be introspected at runtime using + :func:`get_overloads`. - .. deprecated:: 3.9 - :class:`collections.abc.MutableSet` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. -.. class:: Sequence(Reversible[T_co], Collection[T_co]) +.. function:: get_overloads(func) - A generic version of :class:`collections.abc.Sequence`. + Return a sequence of :func:`@overload `-decorated definitions for + *func*. - .. deprecated:: 3.9 - :class:`collections.abc.Sequence` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + *func* is the function object for the implementation of the + overloaded function. For example, given the definition of ``process`` in + the documentation for :func:`@overload `, + ``get_overloads(process)`` will return a sequence of three function objects + for the three defined overloads. If called on a function with no overloads, + ``get_overloads()`` returns an empty sequence. -.. class:: ValuesView(MappingView, Collection[_VT_co]) + ``get_overloads()`` can be used for introspecting an overloaded function at + runtime. - A generic version of :class:`collections.abc.ValuesView`. + .. versionadded:: 3.11 - .. deprecated:: 3.9 - :class:`collections.abc.ValuesView` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. -Corresponding to other types in :mod:`collections.abc` -"""""""""""""""""""""""""""""""""""""""""""""""""""""" +.. function:: clear_overloads() -.. class:: Iterable(Generic[T_co]) + Clear all registered overloads in the internal registry. - A generic version of :class:`collections.abc.Iterable`. + This can be used to reclaim the memory used by the registry. - .. deprecated:: 3.9 - :class:`collections.abc.Iterable` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + .. versionadded:: 3.11 -.. class:: Iterator(Iterable[T_co]) - A generic version of :class:`collections.abc.Iterator`. +.. decorator:: final - .. deprecated:: 3.9 - :class:`collections.abc.Iterator` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Decorator to indicate final methods and final classes. -.. class:: Generator(Iterator[T_co], Generic[T_co, T_contra, V_co]) + Decorating a method with ``@final`` indicates to a type checker that the + method cannot be overridden in a subclass. Decorating a class with ``@final`` + indicates that it cannot be subclassed. - A generator can be annotated by the generic type - ``Generator[YieldType, SendType, ReturnType]``. For example:: + For example:: - def echo_round() -> Generator[int, float, str]: - sent = yield 0 - while sent >= 0: - sent = yield round(sent) - return 'Done' + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... - Note that unlike many other generics in the typing module, the ``SendType`` - of :class:`Generator` behaves contravariantly, not covariantly or - invariantly. + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... - If your generator will only yield values, set the ``SendType`` and - ``ReturnType`` to ``None``:: + There is no runtime checking of these properties. See :pep:`591` for + more details. - def infinite_stream(start: int) -> Generator[int, None, None]: - while True: - yield start - start += 1 + .. versionadded:: 3.8 - Alternatively, annotate your generator as having a return type of - either ``Iterable[YieldType]`` or ``Iterator[YieldType]``:: + .. versionchanged:: 3.11 + The decorator will now attempt to set a ``__final__`` attribute to ``True`` + on the decorated object. Thus, a check like + ``if getattr(obj, "__final__", False)`` can be used at runtime + to determine whether an object ``obj`` has been marked as final. + If the decorated object does not support setting attributes, + the decorator returns the object unchanged without raising an exception. - def infinite_stream(start: int) -> Iterator[int]: - while True: - yield start - start += 1 - .. deprecated:: 3.9 - :class:`collections.abc.Generator` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. +.. decorator:: no_type_check -.. class:: Hashable + Decorator to indicate that annotations are not type hints. - An alias to :class:`collections.abc.Hashable`. + This works as a class or function :term:`decorator`. With a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). Type + checkers will ignore all annotations in a function or class with this + decorator. - .. deprecated:: 3.12 - Use :class:`collections.abc.Hashable` directly instead. + ``@no_type_check`` mutates the decorated object in place. -.. class:: Reversible(Iterable[T_co]) +.. decorator:: no_type_check_decorator - A generic version of :class:`collections.abc.Reversible`. + Decorator to give another decorator the :func:`no_type_check` effect. - .. deprecated:: 3.9 - :class:`collections.abc.Reversible` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + This wraps the decorator with something that wraps the decorated + function in :func:`no_type_check`. -.. class:: Sized + .. deprecated-removed:: 3.13 3.15 + No type checker ever added support for ``@no_type_check_decorator``. It + is therefore deprecated, and will be removed in Python 3.15. - An alias to :class:`collections.abc.Sized`. +.. decorator:: override - .. deprecated:: 3.12 - Use :class:`collections.abc.Sized` directly instead. + Decorator to indicate that a method in a subclass is intended to override a + method or attribute in a superclass. -Asynchronous programming -"""""""""""""""""""""""" + Type checkers should emit an error if a method decorated with ``@override`` + does not, in fact, override anything. + This helps prevent bugs that may occur when a base class is changed without + an equivalent change to a child class. -.. class:: Coroutine(Awaitable[V_co], Generic[T_co, T_contra, V_co]) + For example: - A generic version of :class:`collections.abc.Coroutine`. - The variance and order of type variables - correspond to those of :class:`Generator`, for example:: + .. testcode:: - from collections.abc import Coroutine - c: Coroutine[list[str], str, int] # Some coroutine defined elsewhere - x = c.send('hi') # Inferred type of 'x' is list[str] - async def bar() -> None: - y = await c # Inferred type of 'y' is int + class Base: + def log_status(self) -> None: + ... - .. versionadded:: 3.5.3 + class Sub(Base): + @override + def log_status(self) -> None: # Okay: overrides Base.log_status + ... - .. deprecated:: 3.9 - :class:`collections.abc.Coroutine` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + @override + def done(self) -> None: # Error reported by type checker + ... -.. class:: AsyncGenerator(AsyncIterator[T_co], Generic[T_co, T_contra]) + There is no runtime checking of this property. - An async generator can be annotated by the generic type - ``AsyncGenerator[YieldType, SendType]``. For example:: + The decorator will attempt to set an ``__override__`` attribute to ``True`` on + the decorated object. Thus, a check like + ``if getattr(obj, "__override__", False)`` can be used at runtime to determine + whether an object ``obj`` has been marked as an override. If the decorated object + does not support setting attributes, the decorator returns the object unchanged + without raising an exception. - async def echo_round() -> AsyncGenerator[int, float]: - sent = yield 0 - while sent >= 0.0: - rounded = await round(sent) - sent = yield rounded + See :pep:`698` for more details. - Unlike normal generators, async generators cannot return a value, so there - is no ``ReturnType`` type parameter. As with :class:`Generator`, the - ``SendType`` behaves contravariantly. + .. versionadded:: 3.12 - If your generator will only yield values, set the ``SendType`` to - ``None``:: - async def infinite_stream(start: int) -> AsyncGenerator[int, None]: - while True: - yield start - start = await increment(start) +.. decorator:: type_check_only - Alternatively, annotate your generator as having a return type of - either ``AsyncIterable[YieldType]`` or ``AsyncIterator[YieldType]``:: + Decorator to mark a class or function as unavailable at runtime. - async def infinite_stream(start: int) -> AsyncIterator[int]: - while True: - yield start - start = await increment(start) + This decorator is itself not available at runtime. It is mainly + intended to mark classes that are defined in type stub files if + an implementation returns an instance of a private class:: - .. versionadded:: 3.6.1 + @type_check_only + class Response: # private or not available at runtime + code: int + def get_header(self, name: str) -> str: ... - .. deprecated:: 3.9 - :class:`collections.abc.AsyncGenerator` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + def fetch_response() -> Response: ... -.. class:: AsyncIterable(Generic[T_co]) + Note that returning instances of private classes is not recommended. + It is usually preferable to make such classes public. - A generic version of :class:`collections.abc.AsyncIterable`. +Introspection helpers +--------------------- - .. versionadded:: 3.5.2 +.. function:: get_type_hints(obj, globalns=None, localns=None, include_extras=False) - .. deprecated:: 3.9 - :class:`collections.abc.AsyncIterable` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Return a dictionary containing type hints for a function, method, module + or class object. -.. class:: AsyncIterator(AsyncIterable[T_co]) + This is often the same as ``obj.__annotations__``. In addition, + forward references encoded as string literals are handled by evaluating + them in ``globals`` and ``locals`` namespaces. For a class ``C``, return + a dictionary constructed by merging all the ``__annotations__`` along + ``C.__mro__`` in reverse order. - A generic version of :class:`collections.abc.AsyncIterator`. + The function recursively replaces all ``Annotated[T, ...]`` with ``T``, + unless ``include_extras`` is set to ``True`` (see :class:`Annotated` for + more information). For example: - .. versionadded:: 3.5.2 + .. testcode:: - .. deprecated:: 3.9 - :class:`collections.abc.AsyncIterator` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + class Student(NamedTuple): + name: Annotated[str, 'some marker'] -.. class:: Awaitable(Generic[T_co]) + assert get_type_hints(Student) == {'name': str} + assert get_type_hints(Student, include_extras=False) == {'name': str} + assert get_type_hints(Student, include_extras=True) == { + 'name': Annotated[str, 'some marker'] + } - A generic version of :class:`collections.abc.Awaitable`. + .. note:: - .. versionadded:: 3.5.2 + :func:`get_type_hints` does not work with imported + :ref:`type aliases ` that include forward references. + Enabling postponed evaluation of annotations (:pep:`563`) may remove + the need for most forward references. - .. deprecated:: 3.9 - :class:`collections.abc.Awaitable` now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + .. versionchanged:: 3.9 + Added ``include_extras`` parameter as part of :pep:`593`. + See the documentation on :data:`Annotated` for more information. + .. versionchanged:: 3.11 + Previously, ``Optional[t]`` was added for function and method annotations + if a default value equal to ``None`` was set. + Now the annotation is returned unchanged. -Context manager types -""""""""""""""""""""" +.. function:: get_origin(tp) -.. class:: ContextManager(Generic[T_co]) + Get the unsubscripted version of a type: for a typing object of the form + ``X[Y, Z, ...]`` return ``X``. - A generic version of :class:`contextlib.AbstractContextManager`. + If ``X`` is a typing-module alias for a builtin or + :mod:`collections` class, it will be normalized to the original class. + If ``X`` is an instance of :class:`ParamSpecArgs` or :class:`ParamSpecKwargs`, + return the underlying :class:`ParamSpec`. + Return ``None`` for unsupported objects. - .. versionadded:: 3.5.4 - .. versionadded:: 3.6.0 + Examples: - .. deprecated:: 3.9 - :class:`contextlib.AbstractContextManager` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + .. testcode:: -.. class:: AsyncContextManager(Generic[T_co]) + assert get_origin(str) is None + assert get_origin(Dict[str, int]) is dict + assert get_origin(Union[int, str]) is Union + P = ParamSpec('P') + assert get_origin(P.args) is P + assert get_origin(P.kwargs) is P - A generic version of :class:`contextlib.AbstractAsyncContextManager`. + .. versionadded:: 3.8 - .. versionadded:: 3.5.4 - .. versionadded:: 3.6.2 +.. function:: get_args(tp) - .. deprecated:: 3.9 - :class:`contextlib.AbstractAsyncContextManager` - now supports subscripting (``[]``). - See :pep:`585` and :ref:`types-genericalias`. + Get type arguments with all substitutions performed: for a typing object + of the form ``X[Y, Z, ...]`` return ``(Y, Z, ...)``. -Protocols ---------- + If ``X`` is a union or :class:`Literal` contained in another + generic type, the order of ``(Y, Z, ...)`` may be different from the order + of the original arguments ``[Y, Z, ...]`` due to type caching. + Return ``()`` for unsupported objects. -These protocols are decorated with :func:`runtime_checkable`. + Examples: -.. class:: SupportsAbs + .. testcode:: - An ABC with one abstract method ``__abs__`` that is covariant - in its return type. + assert get_args(int) == () + assert get_args(Dict[int, str]) == (int, str) + assert get_args(Union[int, str]) == (int, str) -.. class:: SupportsBytes + .. versionadded:: 3.8 - An ABC with one abstract method ``__bytes__``. +.. function:: get_protocol_members(tp) -.. class:: SupportsComplex + Return the set of members defined in a :class:`Protocol`. - An ABC with one abstract method ``__complex__``. + .. doctest:: -.. class:: SupportsFloat + >>> from typing import Protocol, get_protocol_members + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> get_protocol_members(P) == frozenset({'a', 'b'}) + True - An ABC with one abstract method ``__float__``. + Raise :exc:`TypeError` for arguments that are not Protocols. -.. class:: SupportsIndex + .. versionadded:: 3.13 - An ABC with one abstract method ``__index__``. +.. function:: is_protocol(tp) - .. versionadded:: 3.8 + Determine if a type is a :class:`Protocol`. -.. class:: SupportsInt + For example:: - An ABC with one abstract method ``__int__``. + class P(Protocol): + def a(self) -> str: ... + b: int -.. class:: SupportsRound + is_protocol(P) # => True + is_protocol(int) # => False - An ABC with one abstract method ``__round__`` - that is covariant in its return type. + .. versionadded:: 3.13 -Functions and decorators ------------------------- +.. function:: is_typeddict(tp) -.. function:: cast(typ, val) + Check if a type is a :class:`TypedDict`. - Cast a value to a type. + For example: - This returns the value unchanged. To the type checker this - signals that the return value has the designated type, but at - runtime we intentionally don't check anything (we want this - to be as fast as possible). + .. testcode:: -.. function:: assert_type(val, typ, /) + class Film(TypedDict): + title: str + year: int - Ask a static type checker to confirm that *val* has an inferred type of *typ*. + assert is_typeddict(Film) + assert not is_typeddict(list | str) - When the type checker encounters a call to ``assert_type()``, it - emits an error if the value is not of the specified type:: + # TypedDict is a factory for creating typed dicts, + # not a typed dict itself + assert not is_typeddict(TypedDict) - def greet(name: str) -> None: - assert_type(name, str) # OK, inferred type of `name` is `str` - assert_type(name, int) # type checker error + .. versionadded:: 3.10 - At runtime this returns the first argument unchanged with no side effects. +.. class:: ForwardRef - This function is useful for ensuring the type checker's understanding of a - script is in line with the developer's intentions:: + Class used for internal typing representation of string forward references. - def complex_function(arg: object): - # Do some complex type-narrowing logic, - # after which we hope the inferred type will be `int` - ... - # Test whether the type checker correctly understands our function - assert_type(arg, int) + For example, ``List["SomeClass"]`` is implicitly transformed into + ``List[ForwardRef("SomeClass")]``. ``ForwardRef`` should not be instantiated by + a user, but may be used by introspection tools. - .. versionadded:: 3.11 + .. note:: + :pep:`585` generic types such as ``list["SomeClass"]`` will not be + implicitly transformed into ``list[ForwardRef("SomeClass")]`` and thus + will not automatically resolve to ``list[SomeClass]``. -.. function:: assert_never(arg, /) + .. versionadded:: 3.7.4 - Ask a static type checker to confirm that a line of code is unreachable. +Constant +-------- - Example:: +.. data:: TYPE_CHECKING - def int_or_str(arg: int | str) -> None: - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _ as unreachable: - assert_never(unreachable) + A special constant that is assumed to be ``True`` by 3rd party static + type checkers. It is ``False`` at runtime. - Here, the annotations allow the type checker to infer that the - last case can never execute, because ``arg`` is either - an :class:`int` or a :class:`str`, and both options are covered by - earlier cases. - If a type checker finds that a call to ``assert_never()`` is - reachable, it will emit an error. For example, if the type annotation - for ``arg`` was instead ``int | str | float``, the type checker would - emit an error pointing out that ``unreachable`` is of type :class:`float`. - For a call to ``assert_never`` to pass type checking, the inferred type of - the argument passed in must be the bottom type, :data:`Never`, and nothing - else. + Usage:: - At runtime, this throws an exception when called. + if TYPE_CHECKING: + import expensive_mod - .. seealso:: - `Unreachable Code and Exhaustiveness Checking - `__ has more - information about exhaustiveness checking with static typing. + def fun(arg: 'expensive_mod.SomeType') -> None: + local_var: expensive_mod.AnotherType = other_fun() - .. versionadded:: 3.11 + The first type annotation must be enclosed in quotes, making it a + "forward reference", to hide the ``expensive_mod`` reference from the + interpreter runtime. Type annotations for local variables are not + evaluated, so the second annotation does not need to be enclosed in quotes. -.. function:: reveal_type(obj, /) + .. note:: - Reveal the inferred static type of an expression. + If ``from __future__ import annotations`` is used, + annotations are not evaluated at function definition time. + Instead, they are stored as strings in ``__annotations__``. + This makes it unnecessary to use quotes around the annotation + (see :pep:`563`). - When a static type checker encounters a call to this function, - it emits a diagnostic with the type of the argument. For example:: + .. versionadded:: 3.5.2 - x: int = 1 - reveal_type(x) # Revealed type is "builtins.int" +.. _generic-concrete-collections: +.. _deprecated-aliases: - This can be useful when you want to debug how your type checker - handles a particular piece of code. +Deprecated aliases +------------------ - The function returns its argument unchanged, which allows using - it within an expression:: +This module defines several deprecated aliases to pre-existing +standard library classes. These were originally included in the typing +module in order to support parameterizing these generic classes using ``[]``. +However, the aliases became redundant in Python 3.9 when the +corresponding pre-existing classes were enhanced to support ``[]`` (see +:pep:`585`). - x = reveal_type(1) # Revealed type is "builtins.int" +The redundant types are deprecated as of Python 3.9. However, while the aliases +may be removed at some point, removal of these aliases is not currently +planned. As such, no deprecation warnings are currently issued by the +interpreter for these aliases. - Most type checkers support ``reveal_type()`` anywhere, even if the - name is not imported from ``typing``. Importing the name from - ``typing`` allows your code to run without runtime errors and - communicates intent more clearly. +If at some point it is decided to remove these deprecated aliases, a +deprecation warning will be issued by the interpreter for at least two releases +prior to removal. The aliases are guaranteed to remain in the typing module +without deprecation warnings until at least Python 3.14. - At runtime, this function prints the runtime type of its argument to stderr - and returns it unchanged:: +Type checkers are encouraged to flag uses of the deprecated types if the +program they are checking targets a minimum Python version of 3.9 or newer. - x = reveal_type(1) # prints "Runtime type is int" - print(x) # prints "1" +.. _corresponding-to-built-in-types: - .. versionadded:: 3.11 +Aliases to built-in types +""""""""""""""""""""""""" -.. decorator:: dataclass_transform +.. class:: Dict(dict, MutableMapping[KT, VT]) - :data:`~typing.dataclass_transform` may be used to - decorate a class, metaclass, or a function that is itself a decorator. - The presence of ``@dataclass_transform()`` tells a static type checker that the - decorated object performs runtime "magic" that - transforms a class, giving it :func:`dataclasses.dataclass`-like behaviors. + Deprecated alias to :class:`dict`. - Example usage with a decorator function:: + Note that to annotate arguments, it is preferred + to use an abstract collection type such as :class:`Mapping` + rather than to use :class:`dict` or :class:`!typing.Dict`. - T = TypeVar("T") + This type can be used as follows:: - @dataclass_transform() - def create_model(cls: type[T]) -> type[T]: + def count_words(text: str) -> Dict[str, int]: ... - return cls - @create_model - class CustomerModel: - id: int - name: str + .. deprecated:: 3.9 + :class:`builtins.dict ` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - On a base class:: +.. class:: List(list, MutableSequence[T]) - @dataclass_transform() - class ModelBase: ... + Deprecated alias to :class:`list`. - class CustomerModel(ModelBase): - id: int - name: str + Note that to annotate arguments, it is preferred + to use an abstract collection type such as :class:`Sequence` or + :class:`Iterable` rather than to use :class:`list` or :class:`!typing.List`. - On a metaclass:: + This type may be used as follows:: - @dataclass_transform() - class ModelMeta(type): ... + def vec2[T: (int, float)](x: T, y: T) -> List[T]: + return [x, y] - class ModelBase(metaclass=ModelMeta): ... + def keep_positives[T: (int, float)](vector: Sequence[T]) -> List[T]: + return [item for item in vector if item > 0] - class CustomerModel(ModelBase): - id: int - name: str + .. deprecated:: 3.9 + :class:`builtins.list ` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - The ``CustomerModel`` classes defined above will - be treated by type checkers similarly to classes created with - :func:`@dataclasses.dataclass `. - For example, type checkers will assume these classes have - ``__init__`` methods that accept ``id`` and ``name``. +.. class:: Set(set, MutableSet[T]) - The decorated class, metaclass, or function may accept the following bool - arguments which type checkers will assume have the same effect as they - would have on the - :func:`@dataclasses.dataclass` decorator: ``init``, - ``eq``, ``order``, ``unsafe_hash``, ``frozen``, ``match_args``, - ``kw_only``, and ``slots``. It must be possible for the value of these - arguments (``True`` or ``False``) to be statically evaluated. + Deprecated alias to :class:`builtins.set `. - The arguments to the ``dataclass_transform`` decorator can be used to - customize the default behaviors of the decorated class, metaclass, or - function: + Note that to annotate arguments, it is preferred + to use an abstract collection type such as :class:`AbstractSet` + rather than to use :class:`set` or :class:`!typing.Set`. - * ``eq_default`` indicates whether the ``eq`` parameter is assumed to be - ``True`` or ``False`` if it is omitted by the caller. - * ``order_default`` indicates whether the ``order`` parameter is - assumed to be True or False if it is omitted by the caller. - * ``kw_only_default`` indicates whether the ``kw_only`` parameter is - assumed to be True or False if it is omitted by the caller. - * ``field_specifiers`` specifies a static list of supported classes - or functions that describe fields, similar to ``dataclasses.field()``. - * Arbitrary other keyword arguments are accepted in order to allow for - possible future extensions. - - Type checkers recognize the following optional arguments on field - specifiers: + .. deprecated:: 3.9 + :class:`builtins.set ` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - * ``init`` indicates whether the field should be included in the - synthesized ``__init__`` method. If unspecified, ``init`` defaults to - ``True``. - * ``default`` provides the default value for the field. - * ``default_factory`` provides a runtime callback that returns the - default value for the field. If neither ``default`` nor - ``default_factory`` are specified, the field is assumed to have no - default value and must be provided a value when the class is - instantiated. - * ``factory`` is an alias for ``default_factory``. - * ``kw_only`` indicates whether the field should be marked as - keyword-only. If ``True``, the field will be keyword-only. If - ``False``, it will not be keyword-only. If unspecified, the value of - the ``kw_only`` parameter on the object decorated with - ``dataclass_transform`` will be used, or if that is unspecified, the - value of ``kw_only_default`` on ``dataclass_transform`` will be used. - * ``alias`` provides an alternative name for the field. This alternative - name is used in the synthesized ``__init__`` method. +.. class:: FrozenSet(frozenset, AbstractSet[T_co]) - At runtime, this decorator records its arguments in the - ``__dataclass_transform__`` attribute on the decorated object. - It has no other runtime effect. + Deprecated alias to :class:`builtins.frozenset `. - See :pep:`681` for more details. + .. deprecated:: 3.9 + :class:`builtins.frozenset ` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - .. versionadded:: 3.11 +.. data:: Tuple -.. decorator:: overload + Deprecated alias for :class:`tuple`. - The ``@overload`` decorator allows describing functions and methods - that support multiple different combinations of argument types. A series - of ``@overload``-decorated definitions must be followed by exactly one - non-``@overload``-decorated definition (for the same function/method). - The ``@overload``-decorated definitions are for the benefit of the - type checker only, since they will be overwritten by the - non-``@overload``-decorated definition, while the latter is used at - runtime but should be ignored by a type checker. At runtime, calling - a ``@overload``-decorated function directly will raise - :exc:`NotImplementedError`. An example of overload that gives a more - precise type than can be expressed using a union or a type variable:: + :class:`tuple` and ``Tuple`` are special-cased in the type system; see + :ref:`annotating-tuples` for more details. - @overload - def process(response: None) -> None: - ... - @overload - def process(response: int) -> tuple[int, str]: - ... - @overload - def process(response: bytes) -> str: - ... - def process(response): - + .. deprecated:: 3.9 + :class:`builtins.tuple ` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - See :pep:`484` for more details and comparison with other typing semantics. +.. class:: Type(Generic[CT_co]) - .. versionchanged:: 3.11 - Overloaded functions can now be introspected at runtime using - :func:`get_overloads`. + Deprecated alias to :class:`type`. + See :ref:`type-of-class-objects` for details on using :class:`type` or + ``typing.Type`` in type annotations. -.. function:: get_overloads(func) + .. versionadded:: 3.5.2 - Return a sequence of :func:`@overload `-decorated definitions for - *func*. *func* is the function object for the implementation of the - overloaded function. For example, given the definition of ``process`` in - the documentation for :func:`@overload `, - ``get_overloads(process)`` will return a sequence of three function objects - for the three defined overloads. If called on a function with no overloads, - ``get_overloads()`` returns an empty sequence. + .. deprecated:: 3.9 + :class:`builtins.type ` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - ``get_overloads()`` can be used for introspecting an overloaded function at - runtime. +.. _corresponding-to-types-in-collections: - .. versionadded:: 3.11 +Aliases to types in :mod:`collections` +"""""""""""""""""""""""""""""""""""""" +.. class:: DefaultDict(collections.defaultdict, MutableMapping[KT, VT]) -.. function:: clear_overloads() + Deprecated alias to :class:`collections.defaultdict`. - Clear all registered overloads in the internal registry. This can be used - to reclaim the memory used by the registry. + .. versionadded:: 3.5.2 - .. versionadded:: 3.11 + .. deprecated:: 3.9 + :class:`collections.defaultdict` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. +.. class:: OrderedDict(collections.OrderedDict, MutableMapping[KT, VT]) -.. decorator:: final + Deprecated alias to :class:`collections.OrderedDict`. - A decorator to indicate to type checkers that the decorated method - cannot be overridden, and the decorated class cannot be subclassed. - For example:: + .. versionadded:: 3.7.2 - class Base: - @final - def done(self) -> None: - ... - class Sub(Base): - def done(self) -> None: # Error reported by type checker - ... + .. deprecated:: 3.9 + :class:`collections.OrderedDict` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - @final - class Leaf: - ... - class Other(Leaf): # Error reported by type checker - ... +.. class:: ChainMap(collections.ChainMap, MutableMapping[KT, VT]) - There is no runtime checking of these properties. See :pep:`591` for - more details. + Deprecated alias to :class:`collections.ChainMap`. - .. versionadded:: 3.8 + .. versionadded:: 3.5.4 + .. versionadded:: 3.6.1 - .. versionchanged:: 3.11 - The decorator will now set the ``__final__`` attribute to ``True`` - on the decorated object. Thus, a check like - ``if getattr(obj, "__final__", False)`` can be used at runtime - to determine whether an object ``obj`` has been marked as final. - If the decorated object does not support setting attributes, - the decorator returns the object unchanged without raising an exception. + .. deprecated:: 3.9 + :class:`collections.ChainMap` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. +.. class:: Counter(collections.Counter, Dict[T, int]) -.. decorator:: no_type_check + Deprecated alias to :class:`collections.Counter`. - Decorator to indicate that annotations are not type hints. + .. versionadded:: 3.5.4 + .. versionadded:: 3.6.1 - This works as class or function :term:`decorator`. With a class, it - applies recursively to all methods and classes defined in that class - (but not to methods defined in its superclasses or subclasses). + .. deprecated:: 3.9 + :class:`collections.Counter` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - This mutates the function(s) in place. +.. class:: Deque(deque, MutableSequence[T]) -.. decorator:: no_type_check_decorator + Deprecated alias to :class:`collections.deque`. - Decorator to give another decorator the :func:`no_type_check` effect. + .. versionadded:: 3.5.4 + .. versionadded:: 3.6.1 - This wraps the decorator with something that wraps the decorated - function in :func:`no_type_check`. + .. deprecated:: 3.9 + :class:`collections.deque` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. -.. decorator:: type_check_only +.. _other-concrete-types: - Decorator to mark a class or function to be unavailable at runtime. +Aliases to other concrete types +""""""""""""""""""""""""""""""" - This decorator is itself not available at runtime. It is mainly - intended to mark classes that are defined in type stub files if - an implementation returns an instance of a private class:: +.. class:: Pattern + Match - @type_check_only - class Response: # private or not available at runtime - code: int - def get_header(self, name: str) -> str: ... + Deprecated aliases corresponding to the return types from + :func:`re.compile` and :func:`re.match`. - def fetch_response() -> Response: ... + These types (and the corresponding functions) are generic over + :data:`AnyStr`. ``Pattern`` can be specialised as ``Pattern[str]`` or + ``Pattern[bytes]``; ``Match`` can be specialised as ``Match[str]`` or + ``Match[bytes]``. - Note that returning instances of private classes is not recommended. - It is usually preferable to make such classes public. + .. deprecated:: 3.9 + Classes ``Pattern`` and ``Match`` from :mod:`re` now support ``[]``. + See :pep:`585` and :ref:`types-genericalias`. -Introspection helpers ---------------------- +.. class:: Text -.. function:: get_type_hints(obj, globalns=None, localns=None, include_extras=False) + Deprecated alias for :class:`str`. - Return a dictionary containing type hints for a function, method, module - or class object. + ``Text`` is provided to supply a forward + compatible path for Python 2 code: in Python 2, ``Text`` is an alias for + ``unicode``. - This is often the same as ``obj.__annotations__``. In addition, - forward references encoded as string literals are handled by evaluating - them in ``globals`` and ``locals`` namespaces. For a class ``C``, return - a dictionary constructed by merging all the ``__annotations__`` along - ``C.__mro__`` in reverse order. + Use ``Text`` to indicate that a value must contain a unicode string in + a manner that is compatible with both Python 2 and Python 3:: - The function recursively replaces all ``Annotated[T, ...]`` with ``T``, - unless ``include_extras`` is set to ``True`` (see :class:`Annotated` for - more information). For example:: + def add_unicode_checkmark(text: Text) -> Text: + return text + u' \u2713' - class Student(NamedTuple): - name: Annotated[str, 'some marker'] + .. versionadded:: 3.5.2 - get_type_hints(Student) == {'name': str} - get_type_hints(Student, include_extras=False) == {'name': str} - get_type_hints(Student, include_extras=True) == { - 'name': Annotated[str, 'some marker'] - } + .. deprecated:: 3.11 + Python 2 is no longer supported, and most type checkers also no longer + support type checking Python 2 code. Removal of the alias is not + currently planned, but users are encouraged to use + :class:`str` instead of ``Text``. - .. note:: +.. _abstract-base-classes: +.. _corresponding-to-collections-in-collections-abc: - :func:`get_type_hints` does not work with imported - :ref:`type aliases ` that include forward references. - Enabling postponed evaluation of annotations (:pep:`563`) may remove - the need for most forward references. +Aliases to container ABCs in :mod:`collections.abc` +""""""""""""""""""""""""""""""""""""""""""""""""""" - .. versionchanged:: 3.9 - Added ``include_extras`` parameter as part of :pep:`593`. +.. class:: AbstractSet(Collection[T_co]) - .. versionchanged:: 3.11 - Previously, ``Optional[t]`` was added for function and method annotations - if a default value equal to ``None`` was set. - Now the annotation is returned unchanged. + Deprecated alias to :class:`collections.abc.Set`. -.. function:: get_args(tp) -.. function:: get_origin(tp) + .. deprecated:: 3.9 + :class:`collections.abc.Set` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - Provide basic introspection for generic types and special typing forms. +.. class:: ByteString(Sequence[int]) - For a typing object of the form ``X[Y, Z, ...]`` these functions return - ``X`` and ``(Y, Z, ...)``. If ``X`` is a generic alias for a builtin or - :mod:`collections` class, it gets normalized to the original class. - If ``X`` is a union or :class:`Literal` contained in another - generic type, the order of ``(Y, Z, ...)`` may be different from the order - of the original arguments ``[Y, Z, ...]`` due to type caching. - For unsupported objects return ``None`` and ``()`` correspondingly. - Examples:: + This type represents the types :class:`bytes`, :class:`bytearray`, + and :class:`memoryview` of byte sequences. - assert get_origin(Dict[str, int]) is dict - assert get_args(Dict[int, str]) == (int, str) + .. deprecated-removed:: 3.9 3.14 + Prefer :class:`collections.abc.Buffer`, or a union like ``bytes | bytearray | memoryview``. - assert get_origin(Union[int, str]) is Union - assert get_args(Union[int, str]) == (int, str) +.. class:: Collection(Sized, Iterable[T_co], Container[T_co]) - .. versionadded:: 3.8 + Deprecated alias to :class:`collections.abc.Collection`. -.. function:: is_typeddict(tp) + .. versionadded:: 3.6.0 - Check if a type is a :class:`TypedDict`. + .. deprecated:: 3.9 + :class:`collections.abc.Collection` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - For example:: +.. class:: Container(Generic[T_co]) - class Film(TypedDict): - title: str - year: int + Deprecated alias to :class:`collections.abc.Container`. - is_typeddict(Film) # => True - is_typeddict(list | str) # => False + .. deprecated:: 3.9 + :class:`collections.abc.Container` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - .. versionadded:: 3.10 +.. class:: ItemsView(MappingView, AbstractSet[tuple[KT_co, VT_co]]) -.. class:: ForwardRef + Deprecated alias to :class:`collections.abc.ItemsView`. - A class used for internal typing representation of string forward references. - For example, ``List["SomeClass"]`` is implicitly transformed into - ``List[ForwardRef("SomeClass")]``. This class should not be instantiated by - a user, but may be used by introspection tools. + .. deprecated:: 3.9 + :class:`collections.abc.ItemsView` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - .. note:: - :pep:`585` generic types such as ``list["SomeClass"]`` will not be - implicitly transformed into ``list[ForwardRef("SomeClass")]`` and thus - will not automatically resolve to ``list[SomeClass]``. +.. class:: KeysView(MappingView, AbstractSet[KT_co]) - .. versionadded:: 3.7.4 + Deprecated alias to :class:`collections.abc.KeysView`. -Constant --------- + .. deprecated:: 3.9 + :class:`collections.abc.KeysView` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. -.. data:: TYPE_CHECKING +.. class:: Mapping(Collection[KT], Generic[KT, VT_co]) - A special constant that is assumed to be ``True`` by 3rd party static - type checkers. It is ``False`` at runtime. Usage:: + Deprecated alias to :class:`collections.abc.Mapping`. - if TYPE_CHECKING: - import expensive_mod + This type can be used as follows:: - def fun(arg: 'expensive_mod.SomeType') -> None: - local_var: expensive_mod.AnotherType = other_fun() + def get_position_in_index(word_list: Mapping[str, int], word: str) -> int: + return word_list[word] - The first type annotation must be enclosed in quotes, making it a - "forward reference", to hide the ``expensive_mod`` reference from the - interpreter runtime. Type annotations for local variables are not - evaluated, so the second annotation does not need to be enclosed in quotes. + .. deprecated:: 3.9 + :class:`collections.abc.Mapping` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. - .. note:: +.. class:: MappingView(Sized) - If ``from __future__ import annotations`` is used, - annotations are not evaluated at function definition time. - Instead, they are stored as strings in ``__annotations__``. - This makes it unnecessary to use quotes around the annotation - (see :pep:`563`). + Deprecated alias to :class:`collections.abc.MappingView`. + + .. deprecated:: 3.9 + :class:`collections.abc.MappingView` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: MutableMapping(Mapping[KT, VT]) + + Deprecated alias to :class:`collections.abc.MutableMapping`. + + .. deprecated:: 3.9 + :class:`collections.abc.MutableMapping` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: MutableSequence(Sequence[T]) + + Deprecated alias to :class:`collections.abc.MutableSequence`. + + .. deprecated:: 3.9 + :class:`collections.abc.MutableSequence` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: MutableSet(AbstractSet[T]) + + Deprecated alias to :class:`collections.abc.MutableSet`. + + .. deprecated:: 3.9 + :class:`collections.abc.MutableSet` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: Sequence(Reversible[T_co], Collection[T_co]) + + Deprecated alias to :class:`collections.abc.Sequence`. + + .. deprecated:: 3.9 + :class:`collections.abc.Sequence` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: ValuesView(MappingView, Collection[_VT_co]) + + Deprecated alias to :class:`collections.abc.ValuesView`. + + .. deprecated:: 3.9 + :class:`collections.abc.ValuesView` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. _asynchronous-programming: + +Aliases to asynchronous ABCs in :mod:`collections.abc` +"""""""""""""""""""""""""""""""""""""""""""""""""""""" + +.. class:: Coroutine(Awaitable[ReturnType], Generic[YieldType, SendType, ReturnType]) + + Deprecated alias to :class:`collections.abc.Coroutine`. + + The variance and order of type variables + correspond to those of :class:`Generator`, for example:: + + from collections.abc import Coroutine + c: Coroutine[list[str], str, int] # Some coroutine defined elsewhere + x = c.send('hi') # Inferred type of 'x' is list[str] + async def bar() -> None: + y = await c # Inferred type of 'y' is int + + .. versionadded:: 3.5.3 + + .. deprecated:: 3.9 + :class:`collections.abc.Coroutine` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: AsyncGenerator(AsyncIterator[YieldType], Generic[YieldType, SendType]) + + Deprecated alias to :class:`collections.abc.AsyncGenerator`. + + An async generator can be annotated by the generic type + ``AsyncGenerator[YieldType, SendType]``. For example:: + + async def echo_round() -> AsyncGenerator[int, float]: + sent = yield 0 + while sent >= 0.0: + rounded = await round(sent) + sent = yield rounded + + Unlike normal generators, async generators cannot return a value, so there + is no ``ReturnType`` type parameter. As with :class:`Generator`, the + ``SendType`` behaves contravariantly. + + If your generator will only yield values, set the ``SendType`` to + ``None``:: + + async def infinite_stream(start: int) -> AsyncGenerator[int, None]: + while True: + yield start + start = await increment(start) + + Alternatively, annotate your generator as having a return type of + either ``AsyncIterable[YieldType]`` or ``AsyncIterator[YieldType]``:: + + async def infinite_stream(start: int) -> AsyncIterator[int]: + while True: + yield start + start = await increment(start) + + .. versionadded:: 3.6.1 + + .. deprecated:: 3.9 + :class:`collections.abc.AsyncGenerator` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: AsyncIterable(Generic[T_co]) + + Deprecated alias to :class:`collections.abc.AsyncIterable`. + + .. versionadded:: 3.5.2 + + .. deprecated:: 3.9 + :class:`collections.abc.AsyncIterable` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: AsyncIterator(AsyncIterable[T_co]) + + Deprecated alias to :class:`collections.abc.AsyncIterator`. .. versionadded:: 3.5.2 + .. deprecated:: 3.9 + :class:`collections.abc.AsyncIterator` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: Awaitable(Generic[T_co]) + + Deprecated alias to :class:`collections.abc.Awaitable`. + + .. versionadded:: 3.5.2 + + .. deprecated:: 3.9 + :class:`collections.abc.Awaitable` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. _corresponding-to-other-types-in-collections-abc: + +Aliases to other ABCs in :mod:`collections.abc` +""""""""""""""""""""""""""""""""""""""""""""""" + +.. class:: Iterable(Generic[T_co]) + + Deprecated alias to :class:`collections.abc.Iterable`. + + .. deprecated:: 3.9 + :class:`collections.abc.Iterable` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: Iterator(Iterable[T_co]) + + Deprecated alias to :class:`collections.abc.Iterator`. + + .. deprecated:: 3.9 + :class:`collections.abc.Iterator` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. data:: Callable + + Deprecated alias to :class:`collections.abc.Callable`. + + See :ref:`annotating-callables` for details on how to use + :class:`collections.abc.Callable` and ``typing.Callable`` in type annotations. + + .. deprecated:: 3.9 + :class:`collections.abc.Callable` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + + .. versionchanged:: 3.10 + ``Callable`` now supports :class:`ParamSpec` and :data:`Concatenate`. + See :pep:`612` for more details. + +.. class:: Generator(Iterator[YieldType], Generic[YieldType, SendType, ReturnType]) + + Deprecated alias to :class:`collections.abc.Generator`. + + A generator can be annotated by the generic type + ``Generator[YieldType, SendType, ReturnType]``. For example:: + + def echo_round() -> Generator[int, float, str]: + sent = yield 0 + while sent >= 0: + sent = yield round(sent) + return 'Done' + + Note that unlike many other generics in the typing module, the ``SendType`` + of :class:`Generator` behaves contravariantly, not covariantly or + invariantly. + + If your generator will only yield values, set the ``SendType`` and + ``ReturnType`` to ``None``:: + + def infinite_stream(start: int) -> Generator[int, None, None]: + while True: + yield start + start += 1 + + Alternatively, annotate your generator as having a return type of + either ``Iterable[YieldType]`` or ``Iterator[YieldType]``:: + + def infinite_stream(start: int) -> Iterator[int]: + while True: + yield start + start += 1 + + .. deprecated:: 3.9 + :class:`collections.abc.Generator` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: Hashable + + Deprecated alias to :class:`collections.abc.Hashable`. + + .. deprecated:: 3.12 + Use :class:`collections.abc.Hashable` directly instead. + +.. class:: Reversible(Iterable[T_co]) + + Deprecated alias to :class:`collections.abc.Reversible`. + + .. deprecated:: 3.9 + :class:`collections.abc.Reversible` now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: Sized + + Deprecated alias to :class:`collections.abc.Sized`. + + .. deprecated:: 3.12 + Use :class:`collections.abc.Sized` directly instead. + +.. _context-manager-types: + +Aliases to :mod:`contextlib` ABCs +""""""""""""""""""""""""""""""""" + +.. class:: ContextManager(Generic[T_co]) + + Deprecated alias to :class:`contextlib.AbstractContextManager`. + + .. versionadded:: 3.5.4 + .. versionadded:: 3.6.0 + + .. deprecated:: 3.9 + :class:`contextlib.AbstractContextManager` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + +.. class:: AsyncContextManager(Generic[T_co]) + + Deprecated alias to :class:`contextlib.AbstractAsyncContextManager`. + + .. versionadded:: 3.5.4 + .. versionadded:: 3.6.2 + + .. deprecated:: 3.9 + :class:`contextlib.AbstractAsyncContextManager` + now supports subscripting (``[]``). + See :pep:`585` and :ref:`types-genericalias`. + Deprecation Timeline of Major Features ====================================== @@ -2865,17 +3708,38 @@ Certain features in ``typing`` are deprecated and may be removed in a future version of Python. The following table summarizes major deprecations for your convenience. This is subject to change, and not all deprecations are listed. -+----------------------------------+---------------+-------------------+----------------+ -| Feature | Deprecated in | Projected removal | PEP/issue | -+==================================+===============+===================+================+ -| ``typing.io`` and ``typing.re`` | 3.8 | 3.13 | :issue:`38291` | -| submodules | | | | -+----------------------------------+---------------+-------------------+----------------+ -| ``typing`` versions of standard | 3.9 | Undecided | :pep:`585` | -| collections | | | | -+----------------------------------+---------------+-------------------+----------------+ -| ``typing.Text`` | 3.11 | Undecided | :gh:`92332` | -+----------------------------------+---------------+-------------------+----------------+ -| ``typing.Hashable`` and | 3.12 | Undecided | :gh:`94309` | -| ``typing.Sized`` | | | | -+----------------------------------+---------------+-------------------+----------------+ +.. list-table:: + :header-rows: 1 + + * - Feature + - Deprecated in + - Projected removal + - PEP/issue + * - ``typing`` versions of standard collections + - 3.9 + - Undecided (see :ref:`deprecated-aliases` for more information) + - :pep:`585` + * - :class:`typing.ByteString` + - 3.9 + - 3.14 + - :gh:`91896` + * - :data:`typing.Text` + - 3.11 + - Undecided + - :gh:`92332` + * - :class:`typing.Hashable` and :class:`typing.Sized` + - 3.12 + - Undecided + - :gh:`94309` + * - :data:`typing.TypeAlias` + - 3.12 + - Undecided + - :pep:`695` + * - :func:`@typing.no_type_check_decorator ` + - 3.13 + - 3.15 + - :gh:`106309` + * - :data:`typing.AnyStr` + - 3.13 + - 3.18 + - :gh:`105578` diff --git a/Doc/library/unicodedata.rst b/Doc/library/unicodedata.rst index 3a094f9c64d4a05..7db47d48022a0ea 100644 --- a/Doc/library/unicodedata.rst +++ b/Doc/library/unicodedata.rst @@ -17,8 +17,8 @@ This module provides access to the Unicode Character Database (UCD) which defines character properties for all Unicode characters. The data contained in -this database is compiled from the `UCD version 15.0.0 -`_. +this database is compiled from the `UCD version 15.1.0 +`_. The module uses the same names and symbols as defined by Unicode Standard Annex #44, `"Unicode Character Database" @@ -175,6 +175,6 @@ Examples: .. rubric:: Footnotes -.. [#] https://www.unicode.org/Public/15.0.0/ucd/NameAliases.txt +.. [#] https://www.unicode.org/Public/15.1.0/ucd/NameAliases.txt -.. [#] https://www.unicode.org/Public/15.0.0/ucd/NamedSequences.txt +.. [#] https://www.unicode.org/Public/15.1.0/ucd/NamedSequences.txt diff --git a/Doc/library/unittest.mock-examples.rst b/Doc/library/unittest.mock-examples.rst index f9a207bad6903f4..f2bdde80bdbd644 100644 --- a/Doc/library/unittest.mock-examples.rst +++ b/Doc/library/unittest.mock-examples.rst @@ -339,7 +339,7 @@ instantiate the class in those tests. >>> mock.old_method() Traceback (most recent call last): ... - AttributeError: object has no attribute 'old_method' + AttributeError: Mock object has no attribute 'old_method'. Did you mean: 'class_method'? Using a specification also enables a smarter matching of calls made to the mock, regardless of whether some parameters were passed as positional or @@ -360,6 +360,30 @@ of arbitrary attributes as well as the getting of them then you can use *spec_set* instead of *spec*. +Using side_effect to return per file content +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:func:`mock_open` is used to patch :func:`open` method. :attr:`~Mock.side_effect` +can be used to return a new Mock object per call. This can be used to return different +contents per file stored in a dictionary:: + + DEFAULT = "default" + data_dict = {"file1": "data1", + "file2": "data2"} + + def open_side_effect(name): + return mock_open(read_data=data_dict.get(name, DEFAULT))() + + with patch("builtins.open", side_effect=open_side_effect): + with open("file1") as file1: + assert file1.read() == "data1" + + with open("file2") as file2: + assert file2.read() == "data2" + + with open("file3") as file2: + assert file2.read() == "default" + Patch Decorators ---------------- @@ -579,14 +603,14 @@ Partial mocking In some tests I wanted to mock out a call to :meth:`datetime.date.today` to return a known date, but I didn't want to prevent the code under test from creating new date objects. Unfortunately :class:`datetime.date` is written in C, and -so I couldn't just monkey-patch out the static :meth:`date.today` method. +so I couldn't just monkey-patch out the static :meth:`datetime.date.today` method. I found a simple way of doing this that involved effectively wrapping the date class with a mock, but passing through calls to the constructor to the real class (and returning real instances). The :func:`patch decorator ` is used here to -mock out the ``date`` class in the module under test. The :attr:`side_effect` +mock out the ``date`` class in the module under test. The :attr:`~Mock.side_effect` attribute on the mock date class is then set to a lambda function that returns a real date. When the mock date class is called a real date will be constructed and returned by ``side_effect``. :: @@ -766,15 +790,16 @@ mock has a nice API for making assertions about how your mock objects are used. >>> mock.foo_bar.assert_called_with('baz', spam='eggs') If your mock is only being called once you can use the -:meth:`assert_called_once_with` method that also asserts that the -:attr:`call_count` is one. +:meth:`~Mock.assert_called_once_with` method that also asserts that the +:attr:`~Mock.call_count` is one. >>> mock.foo_bar.assert_called_once_with('baz', spam='eggs') >>> mock.foo_bar() >>> mock.foo_bar.assert_called_once_with('baz', spam='eggs') Traceback (most recent call last): ... - AssertionError: Expected to be called once. Called 2 times. + AssertionError: Expected 'foo_bar' to be called once. Called 2 times. + Calls: [call('baz', spam='eggs'), call()]. Both ``assert_called_with`` and ``assert_called_once_with`` make assertions about the *most recent* call. If your mock is going to be called several times, and @@ -835,7 +860,7 @@ One possibility would be for mock to copy the arguments you pass in. This could then cause problems if you do assertions that rely on object identity for equality. -Here's one solution that uses the :attr:`side_effect` +Here's one solution that uses the :attr:`~Mock.side_effect` functionality. If you provide a ``side_effect`` function for a mock then ``side_effect`` will be called with the same args as the mock. This gives us an opportunity to copy the arguments and store them for later assertions. In this @@ -903,8 +928,9 @@ Here's an example implementation: >>> c.assert_called_with(arg) Traceback (most recent call last): ... - AssertionError: Expected call: mock({1}) - Actual call: mock(set()) + AssertionError: expected call not found. + Expected: mock({1}) + Actual: mock(set()) >>> c.foo @@ -971,7 +997,8 @@ We can do this with :class:`MagicMock`, which will behave like a dictionary, and using :data:`~Mock.side_effect` to delegate dictionary access to a real underlying dictionary that is under our control. -When the :meth:`__getitem__` and :meth:`__setitem__` methods of our ``MagicMock`` are called +When the :meth:`~object.__getitem__` and :meth:`~object.__setitem__` methods +of our ``MagicMock`` are called (normal dictionary access) then ``side_effect`` is called with the key (and in the case of ``__setitem__`` the value too). We can also control what is returned. @@ -1074,7 +1101,7 @@ subclass. Sometimes this is inconvenient. For example, `one user `_ is subclassing mock to created a `Twisted adaptor -`_. +`_. Having this applied to attributes too actually causes errors. ``Mock`` (in all its flavours) uses a method called ``_get_child_mock`` to create @@ -1267,8 +1294,9 @@ sufficient: >>> mock.assert_called_with(Foo(1, 2)) Traceback (most recent call last): ... - AssertionError: Expected: call(<__main__.Foo object at 0x...>) - Actual call: call(<__main__.Foo object at 0x...>) + AssertionError: expected call not found. + Expected: mock(<__main__.Foo object at 0x...>) + Actual: mock(<__main__.Foo object at 0x...>) A comparison function for our ``Foo`` class might look something like this: diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst index b768557e6075f6e..d6ac4d09300d9fe 100644 --- a/Doc/library/unittest.mock.rst +++ b/Doc/library/unittest.mock.rst @@ -72,6 +72,7 @@ available, and then make assertions about how they have been used: :attr:`side_effect` allows you to perform side effects, including raising an exception when a mock is called: + >>> from unittest.mock import Mock >>> mock = Mock(side_effect=KeyError('foo')) >>> mock() Traceback (most recent call last): @@ -188,7 +189,7 @@ code if they are used incorrectly: >>> mock_function('wrong arguments') Traceback (most recent call last): ... - TypeError: () takes exactly 3 arguments (1 given) + TypeError: missing a required argument: 'b' :func:`create_autospec` can also be used on classes, where it copies the signature of the ``__init__`` method, and on callable objects where it copies the signature of @@ -204,8 +205,10 @@ The Mock Class import asyncio import inspect import unittest + import threading from unittest.mock import sentinel, DEFAULT, ANY from unittest.mock import patch, call, Mock, MagicMock, PropertyMock, AsyncMock + from unittest.mock import ThreadingMock from unittest.mock import mock_open :class:`Mock` is a flexible mock object intended to replace the use of stubs and @@ -312,6 +315,7 @@ the *new_callable* argument to :func:`patch`. Traceback (most recent call last): ... AssertionError: Expected 'method' to have been called once. Called 2 times. + Calls: [call(), call()]. .. versionadded:: 3.6 @@ -339,7 +343,7 @@ the *new_callable* argument to :func:`patch`. Traceback (most recent call last): ... AssertionError: Expected 'mock' to be called once. Called 2 times. - + Calls: [call('foo', bar='baz'), call('other', bar='values')]. .. method:: assert_any_call(*args, **kwargs) @@ -389,6 +393,7 @@ the *new_callable* argument to :func:`patch`. Traceback (most recent call last): ... AssertionError: Expected 'hello' to not have been called. Called 1 times. + Calls: [call()]. .. versionadded:: 3.5 @@ -406,7 +411,7 @@ the *new_callable* argument to :func:`patch`. False .. versionchanged:: 3.6 - Added two keyword only argument to the reset_mock function. + Added two keyword-only arguments to the reset_mock function. This can be useful where you want to make a series of assertions that reuse the same object. Note that :meth:`reset_mock` *doesn't* clear the @@ -416,8 +421,8 @@ the *new_callable* argument to :func:`patch`. parameter as ``True``. Child mocks and the return value mock (if any) are reset as well. - .. note:: *return_value*, and :attr:`side_effect` are keyword only - argument. + .. note:: *return_value*, and :attr:`side_effect` are keyword-only + arguments. .. method:: mock_add_spec(spec, spec_set=False) @@ -813,8 +818,8 @@ This applies to :meth:`~Mock.assert_called_with`, :meth:`~Mock.assert_any_call`. When :ref:`auto-speccing`, it will also apply to method calls on the mock object. - .. versionchanged:: 3.4 - Added signature introspection on specced and autospecced mock objects. +.. versionchanged:: 3.4 + Added signature introspection on specced and autospecced mock objects. .. class:: PropertyMock(*args, **kwargs) @@ -951,7 +956,7 @@ object:: >>> asyncio.run(main()) >>> mock.assert_awaited_once() >>> asyncio.run(main()) - >>> mock.method.assert_awaited_once() + >>> mock.assert_awaited_once() Traceback (most recent call last): ... AssertionError: Expected mock to have been awaited once. Awaited 2 times. @@ -969,7 +974,7 @@ object:: >>> mock.assert_awaited_with('other') Traceback (most recent call last): ... - AssertionError: expected call not found. + AssertionError: expected await not found. Expected: mock('other') Actual: mock('foo', bar='bar') @@ -1098,6 +1103,51 @@ object:: [call('foo'), call('bar')] +.. class:: ThreadingMock(spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, unsafe=False, *, timeout=UNSET, **kwargs) + + A version of :class:`MagicMock` for multithreading tests. The + :class:`ThreadingMock` object provides extra methods to wait for a call to + be invoked, rather than assert on it immediately. + + The default timeout is specified by the ``timeout`` argument, or if unset by the + :attr:`ThreadingMock.DEFAULT_TIMEOUT` attribute, which defaults to blocking (``None``). + + You can configure the global default timeout by setting :attr:`ThreadingMock.DEFAULT_TIMEOUT`. + + .. method:: wait_until_called(*, timeout=UNSET) + + Waits until the mock is called. + + If a timeout was passed at the creation of the mock or if a timeout + argument is passed to this function, the function raises an + :exc:`AssertionError` if the call is not performed in time. + + >>> mock = ThreadingMock() + >>> thread = threading.Thread(target=mock) + >>> thread.start() + >>> mock.wait_until_called(timeout=1) + >>> thread.join() + + .. method:: wait_until_any_call_with(*args, **kwargs) + + Waits until the mock is called with the specified arguments. + + If a timeout was passed at the creation of the mock + the function raises an :exc:`AssertionError` if the call is not performed in time. + + >>> mock = ThreadingMock() + >>> thread = threading.Thread(target=mock, args=("arg1", "arg2",), kwargs={"arg": "thing"}) + >>> thread.start() + >>> mock.wait_until_any_call_with("arg1", "arg2", arg="thing") + >>> thread.join() + + .. attribute:: DEFAULT_TIMEOUT + + Global default timeout in seconds to create instances of :class:`ThreadingMock`. + + .. versionadded:: 3.13 + + Calling ~~~~~~~ @@ -1387,9 +1437,9 @@ patch .. note:: - .. versionchanged:: 3.5 - If you are patching builtins in a module then you don't - need to pass ``create=True``, it will be added by default. + .. versionchanged:: 3.5 + If you are patching builtins in a module then you don't + need to pass ``create=True``, it will be added by default. Patch can be used as a :class:`TestCase` class decorator. It works by decorating each test method in the class. This reduces the boilerplate @@ -1604,6 +1654,7 @@ decorator: >>> @patch.dict(foo, {'newkey': 'newvalue'}) ... def test(): ... assert foo == {'newkey': 'newvalue'} + ... >>> test() >>> assert foo == {} @@ -1656,7 +1707,7 @@ Keywords can be used in the :func:`patch.dict` call to set values in the diction :func:`patch.dict` can be used with dictionary like objects that aren't actually dictionaries. At the very minimum they must support item getting, setting, deleting and either iteration or membership test. This corresponds to the -magic methods :meth:`__getitem__`, :meth:`__setitem__`, :meth:`__delitem__` and either +magic methods :meth:`~object.__getitem__`, :meth:`__setitem__`, :meth:`__delitem__` and either :meth:`__iter__` or :meth:`__contains__`. >>> class Container: @@ -2436,7 +2487,7 @@ behaviour you can switch it off by setting the module level switch Alternatively you can just use ``vars(my_mock)`` (instance members) and ``dir(type(my_mock))`` (type members) to bypass the filtering irrespective of -:data:`mock.FILTER_DIR`. +:const:`mock.FILTER_DIR`. mock_open @@ -2480,8 +2531,8 @@ are closed properly and is becoming common:: f.write('something') The issue is that even if you mock out the call to :func:`open` it is the -*returned object* that is used as a context manager (and has :meth:`__enter__` and -:meth:`__exit__` called). +*returned object* that is used as a context manager (and has :meth:`~object.__enter__` and +:meth:`~object.__exit__` called). Mocking context managers with a :class:`MagicMock` is common enough and fiddly enough that a helper function is useful. :: diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index 7f48146ca830f4e..02b72cb9f6b8aae 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -72,7 +72,7 @@ test runner a GUI tool for test discovery and execution. This is intended largely for ease of use for those new to unit testing. For production environments it is recommended that tests be driven by a continuous integration system such as - `Buildbot `_, `Jenkins `_, + `Buildbot `_, `Jenkins `_, `GitHub Actions `_, or `AppVeyor `_. @@ -206,13 +206,13 @@ Command-line options .. program:: unittest -.. cmdoption:: -b, --buffer +.. option:: -b, --buffer The standard output and standard error streams are buffered during the test run. Output during a passing test is discarded. Output is echoed normally on test fail or error and is added to the failure messages. -.. cmdoption:: -c, --catch +.. option:: -c, --catch :kbd:`Control-C` during the test run waits for the current test to end and then reports all the results so far. A second :kbd:`Control-C` raises the normal @@ -220,11 +220,11 @@ Command-line options See `Signal Handling`_ for the functions that provide this functionality. -.. cmdoption:: -f, --failfast +.. option:: -f, --failfast Stop the test run on the first error or failure. -.. cmdoption:: -k +.. option:: -k Only run test methods and classes that match the pattern or substring. This option may be used multiple times, in which case all test cases that @@ -240,10 +240,14 @@ Command-line options For example, ``-k foo`` matches ``foo_tests.SomeTest.test_something``, ``bar_tests.SomeTest.test_foo``, but not ``bar_tests.FooTest.test_something``. -.. cmdoption:: --locals +.. option:: --locals Show local variables in tracebacks. +.. option:: --durations N + + Show the N slowest test cases (N=0 for all). + .. versionadded:: 3.2 The command-line options ``-b``, ``-c`` and ``-f`` were added. @@ -253,10 +257,12 @@ Command-line options .. versionadded:: 3.7 The command-line option ``-k``. +.. versionadded:: 3.12 + The command-line option ``--durations``. + The command line can also be used for test discovery, for running all of the tests in a project or just a subset. - .. _unittest-test-discovery: Test Discovery @@ -286,19 +292,19 @@ The ``discover`` sub-command has the following options: .. program:: unittest discover -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose Verbose output -.. cmdoption:: -s, --start-directory directory +.. option:: -s, --start-directory directory Directory to start discovery (``.`` default) -.. cmdoption:: -p, --pattern pattern +.. option:: -p, --pattern pattern Pattern to match test files (``test*.py`` default) -.. cmdoption:: -t, --top-level-directory directory +.. option:: -t, --top-level-directory directory Top level directory of project (defaults to start directory) @@ -1128,7 +1134,7 @@ Test cases If given, *level* should be either a numeric logging level or its string equivalent (for example either ``"ERROR"`` or - :attr:`logging.ERROR`). The default is :attr:`logging.INFO`. + :const:`logging.ERROR`). The default is :const:`logging.INFO`. The test passes if at least one message emitted inside the ``with`` block matches the *logger* and *level* conditions, otherwise it fails. @@ -1169,7 +1175,7 @@ Test cases If given, *level* should be either a numeric logging level or its string equivalent (for example either ``"ERROR"`` or - :attr:`logging.ERROR`). The default is :attr:`logging.INFO`. + :const:`logging.ERROR`). The default is :const:`logging.INFO`. Unlike :meth:`assertLogs`, nothing will be returned by the context manager. @@ -1565,6 +1571,14 @@ Test cases .. versionadded:: 3.8 + .. attribute:: loop_factory + + The *loop_factory* passed to :class:`asyncio.Runner`. Override + in subclasses with :class:`asyncio.EventLoop` to avoid using the + asyncio policy system. + + .. versionadded:: 3.13 + .. coroutinemethod:: asyncSetUp() Method called to prepare the test fixture. This is called after :meth:`setUp`. @@ -1947,12 +1961,12 @@ Loading and running tests .. attribute:: testNamePatterns List of Unix shell-style wildcard test name patterns that test methods - have to match to be included in test suites (see ``-v`` option). + have to match to be included in test suites (see ``-k`` option). If this attribute is not ``None`` (the default), all test methods to be included in test suites must match one of the patterns in this list. Note that matches are always performed using :meth:`fnmatch.fnmatchcase`, - so unlike patterns passed to the ``-v`` option, simple substring patterns + so unlike patterns passed to the ``-k`` option, simple substring patterns will have to be converted using ``*`` wildcards. This affects all the :meth:`loadTestsFrom\*` methods. @@ -2009,6 +2023,13 @@ Loading and running tests A list containing :class:`TestCase` instances that were marked as expected failures, but succeeded. + .. attribute:: collectedDurations + + A list containing 2-tuples of test case names and floats + representing the elapsed time of each test which was run. + + .. versionadded:: 3.12 + .. attribute:: shouldStop Set to ``True`` when the execution of tests should stop by :meth:`stop`. @@ -2160,14 +2181,23 @@ Loading and running tests .. versionadded:: 3.4 + .. method:: addDuration(test, elapsed) + + Called when the test case finishes. *elapsed* is the time represented in + seconds, and it includes the execution of cleanup functions. -.. class:: TextTestResult(stream, descriptions, verbosity) + .. versionadded:: 3.12 + +.. class:: TextTestResult(stream, descriptions, verbosity, *, durations=None) A concrete implementation of :class:`TestResult` used by the - :class:`TextTestRunner`. + :class:`TextTestRunner`. Subclasses should accept ``**kwargs`` to ensure + compatibility as the interface changes. .. versionadded:: 3.2 + .. versionadded:: 3.12 + Added *durations* keyword argument. .. data:: defaultTestLoader @@ -2177,7 +2207,8 @@ Loading and running tests .. class:: TextTestRunner(stream=None, descriptions=True, verbosity=1, failfast=False, \ - buffer=False, resultclass=None, warnings=None, *, tb_locals=False) + buffer=False, resultclass=None, warnings=None, *, \ + tb_locals=False, durations=None) A basic test runner implementation that outputs results to a stream. If *stream* is ``None``, the default, :data:`sys.stderr` is used as the output stream. This class @@ -2195,14 +2226,17 @@ Loading and running tests *warnings* to ``None``. .. versionchanged:: 3.2 - Added the ``warnings`` argument. + Added the *warnings* parameter. .. versionchanged:: 3.2 The default stream is set to :data:`sys.stderr` at instantiation time rather than import time. .. versionchanged:: 3.5 - Added the tb_locals parameter. + Added the *tb_locals* parameter. + + .. versionchanged:: 3.12 + Added the *durations* parameter. .. method:: _makeResult() @@ -2255,7 +2289,8 @@ Loading and running tests The *testRunner* argument can either be a test runner class or an already created instance of it. By default ``main`` calls :func:`sys.exit` with - an exit code indicating success or failure of the tests run. + an exit code indicating success (0) or failure (1) of the tests run. + An exit code of 5 indicates that no tests were run. The *testLoader* argument has to be a :class:`TestLoader` instance, and defaults to :data:`defaultTestLoader`. @@ -2291,6 +2326,8 @@ Loading and running tests test names. +.. _load_tests-protocol: + load_tests Protocol ################### diff --git a/Doc/library/urllib.error.rst b/Doc/library/urllib.error.rst index f7d47ed76aca18f..facb11f42a40c57 100644 --- a/Doc/library/urllib.error.rst +++ b/Doc/library/urllib.error.rst @@ -27,11 +27,11 @@ The following exceptions are raised by :mod:`urllib.error` as appropriate: exception instance. .. versionchanged:: 3.3 - :exc:`URLError` has been made a subclass of :exc:`OSError` instead - of :exc:`IOError`. + :exc:`URLError` used to be a subtype of :exc:`IOError`, which is now an + alias of :exc:`OSError`. -.. exception:: HTTPError +.. exception:: HTTPError(url, code, msg, hdrs, fp) Though being an exception (a subclass of :exc:`URLError`), an :exc:`HTTPError` can also function as a non-exceptional file-like return @@ -39,6 +39,11 @@ The following exceptions are raised by :mod:`urllib.error` as appropriate: is useful when handling exotic HTTP errors, such as requests for authentication. + .. attribute:: url + + Contains the request URL. + An alias for *filename* attribute. + .. attribute:: code An HTTP status code as defined in :rfc:`2616`. This numeric value corresponds @@ -48,19 +53,27 @@ The following exceptions are raised by :mod:`urllib.error` as appropriate: .. attribute:: reason This is usually a string explaining the reason for this error. + An alias for *msg* attribute. .. attribute:: headers The HTTP response headers for the HTTP request that caused the :exc:`HTTPError`. + An alias for *hdrs* attribute. .. versionadded:: 3.4 + .. attribute:: fp + + A file-like object where the HTTP error body can be read from. + .. exception:: ContentTooShortError(msg, content) This exception is raised when the :func:`~urllib.request.urlretrieve` function detects that the amount of the downloaded data is less than the expected amount (given by - the *Content-Length* header). The :attr:`content` attribute stores the - downloaded (and supposedly truncated) data. + the *Content-Length* header). + + .. attribute:: content + The downloaded (and supposedly truncated) data. diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst index 96b396510794b4b..53e5f0395715d77 100644 --- a/Doc/library/urllib.parse.rst +++ b/Doc/library/urllib.parse.rst @@ -23,9 +23,9 @@ to an absolute URL given a "base URL." The module has been designed to match the internet RFC on Relative Uniform Resource Locators. It supports the following URL schemes: ``file``, ``ftp``, ``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``mailto``, ``mms``, -``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtspu``, ``sftp``, -``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``, -``wais``, ``ws``, ``wss``. +``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtsps``, ``rtspu``, +``sftp``, ``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, +``telnet``, ``wais``, ``ws``, ``wss``. The :mod:`urllib.parse` module defines functions that fall into two broad categories: URL parsing and URL quoting. These are covered in detail in @@ -159,6 +159,10 @@ or on combining URL components into a URL string. ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html', params='', query='', fragment='') + .. warning:: + + :func:`urlparse` does not perform validation. See :ref:`URL parsing + security ` for details. .. versionchanged:: 3.2 Added IPv6 URL parsing capabilities. @@ -324,8 +328,14 @@ or on combining URL components into a URL string. ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is decomposed before parsing, no error will be raised. - Following the `WHATWG spec`_ that updates RFC 3986, ASCII newline - ``\n``, ``\r`` and tab ``\t`` characters are stripped from the URL. + Following some of the `WHATWG spec`_ that updates RFC 3986, leading C0 + control and space characters are stripped from the URL. ``\n``, + ``\r`` and tab ``\t`` characters are removed from the URL at any position. + + .. warning:: + + :func:`urlsplit` does not perform validation. See :ref:`URL parsing + security ` for details. .. versionchanged:: 3.6 Out-of-range port numbers now raise :exc:`ValueError`, instead of @@ -338,6 +348,9 @@ or on combining URL components into a URL string. .. versionchanged:: 3.10 ASCII newline and tab characters are stripped from the URL. + .. versionchanged:: 3.12 + Leading WHATWG C0 control and space characters are stripped from the URL. + .. _WHATWG spec: https://url.spec.whatwg.org/#concept-basic-url-parser .. function:: urlunsplit(parts) @@ -414,6 +427,35 @@ or on combining URL components into a URL string. or ``scheme://host/path``). If *url* is not a wrapped URL, it is returned without changes. +.. _url-parsing-security: + +URL parsing security +-------------------- + +The :func:`urlsplit` and :func:`urlparse` APIs do not perform **validation** of +inputs. They may not raise errors on inputs that other applications consider +invalid. They may also succeed on some inputs that might not be considered +URLs elsewhere. Their purpose is for practical functionality rather than +purity. + +Instead of raising an exception on unusual input, they may instead return some +component parts as empty strings. Or components may contain more than perhaps +they should. + +We recommend that users of these APIs where the values may be used anywhere +with security implications code defensively. Do some verification within your +code before trusting a returned component part. Does that ``scheme`` make +sense? Is that a sensible ``path``? Is there anything strange about that +``hostname``? etc. + +What constitutes a URL is not universally well defined. Different applications +have different needs and desired constraints. For instance the living `WHATWG +spec`_ describes what user facing web clients such as a web browser require. +While :rfc:`3986` is more general. These functions incorporate some aspects of +both, but cannot be claimed compliant with either. The APIs and existing user +code with expectations on specific behaviors predate both standards leading us +to be very cautious about making API behavior changes. + .. _parsing-ascii-encoded-bytes: Parsing ASCII Encoded Bytes @@ -556,7 +598,7 @@ task isn't already covered by the URL parsing functions above. .. function:: quote(string, safe='/', encoding=None, errors=None) - Replace special characters in *string* using the ``%xx`` escape. Letters, + Replace special characters in *string* using the :samp:`%{xx}` escape. Letters, digits, and the characters ``'_.-~'`` are never quoted. By default, this function is intended for quoting the path section of a URL. The optional *safe* parameter specifies additional ASCII characters that should not be @@ -603,7 +645,7 @@ task isn't already covered by the URL parsing functions above. .. function:: unquote(string, encoding='utf-8', errors='replace') - Replace ``%xx`` escapes with their single-character equivalent. + Replace :samp:`%{xx}` escapes with their single-character equivalent. The optional *encoding* and *errors* parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the :meth:`bytes.decode` method. @@ -634,7 +676,7 @@ task isn't already covered by the URL parsing functions above. .. function:: unquote_to_bytes(string) - Replace ``%xx`` escapes with their single-octet equivalent, and return a + Replace :samp:`%{xx}` escapes with their single-octet equivalent, and return a :class:`bytes` object. *string* may be either a :class:`str` or a :class:`bytes` object. diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst index 59e1f2da828a833..bf3af1bef0714c5 100644 --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -26,10 +26,10 @@ authentication, redirections, cookies and more. The :mod:`urllib.request` module defines the following functions: -.. function:: urlopen(url, data=None[, timeout], *, cafile=None, capath=None, cadefault=False, context=None) +.. function:: urlopen(url, data=None[, timeout], *, context=None) - Open the URL *url*, which can be either a string or a - :class:`Request` object. + Open *url*, which can be either a string containing a valid, properly + encoded URL, or a :class:`Request` object. *data* must be an object specifying additional data to be sent to the server, or ``None`` if no such data is needed. See :class:`Request` @@ -47,14 +47,6 @@ The :mod:`urllib.request` module defines the following functions: describing the various SSL options. See :class:`~http.client.HTTPSConnection` for more details. - The optional *cafile* and *capath* parameters specify a set of trusted - CA certificates for HTTPS requests. *cafile* should point to a single - file containing a bundle of CA certificates, whereas *capath* should - point to a directory of hashed certificate files. More information can - be found in :meth:`ssl.SSLContext.load_verify_locations`. - - The *cadefault* parameter is ignored. - This function always returns an object which can work as a :term:`context manager` and has the properties *url*, *headers*, and *status*. See :class:`urllib.response.addinfourl` for more detail on these properties. @@ -99,7 +91,7 @@ The :mod:`urllib.request` module defines the following functions: .. versionchanged:: 3.2 HTTPS virtual hosts are now supported if possible (that is, if - :data:`ssl.HAS_SNI` is true). + :const:`ssl.HAS_SNI` is true). .. versionadded:: 3.2 *data* can be an iterable object. @@ -115,12 +107,9 @@ The :mod:`urllib.request` module defines the following functions: ``http/1.1`` when no *context* is given. Custom *context* should set ALPN protocols with :meth:`~ssl.SSLContext.set_alpn_protocol`. - .. deprecated:: 3.6 - - *cafile*, *capath* and *cadefault* are deprecated in favor of *context*. - Please use :meth:`ssl.SSLContext.load_cert_chain` instead, or let - :func:`ssl.create_default_context` select the system's trusted CA - certificates for you. + .. versionchanged:: 3.13 + Remove *cafile*, *capath* and *cadefault* parameters: use the *context* + parameter instead. .. function:: install_opener(opener) @@ -192,7 +181,7 @@ The following classes are provided: This class is an abstraction of a URL request. - *url* should be a string containing a valid URL. + *url* should be a string containing a valid, properly encoded URL. *data* must be an object specifying additional data to send to the server, or ``None`` if no such data is needed. Currently HTTP @@ -315,10 +304,10 @@ The following classes are provided: list of hostname suffixes, optionally with ``:port`` appended, for example ``cern.ch,ncsa.uiuc.edu,some.host:8080``. - .. note:: + .. note:: - ``HTTP_PROXY`` will be ignored if a variable ``REQUEST_METHOD`` is set; - see the documentation on :func:`~urllib.request.getproxies`. + ``HTTP_PROXY`` will be ignored if a variable ``REQUEST_METHOD`` is set; + see the documentation on :func:`~urllib.request.getproxies`. .. class:: HTTPPasswordMgr() @@ -1536,9 +1525,9 @@ some point in the future. :mod:`urllib.request` Restrictions ---------------------------------- - .. index:: - pair: HTTP; protocol - pair: FTP; protocol +.. index:: + pair: HTTP; protocol + pair: FTP; protocol * Currently, only the following protocols are supported: HTTP (versions 0.9 and 1.0), FTP, local files, and data URLs. @@ -1630,7 +1619,7 @@ The typical response object is a :class:`urllib.response.addinfourl` instance: .. deprecated:: 3.9 Deprecated in favor of :attr:`~addinfourl.status`. - .. method:: getstatus() + .. method:: getcode() .. deprecated:: 3.9 Deprecated in favor of :attr:`~addinfourl.status`. diff --git a/Doc/library/uu.rst b/Doc/library/uu.rst deleted file mode 100644 index 83c4aec47bbefce..000000000000000 --- a/Doc/library/uu.rst +++ /dev/null @@ -1,72 +0,0 @@ -:mod:`uu` --- Encode and decode uuencode files -============================================== - -.. module:: uu - :synopsis: Encode and decode files in uuencode format. - :deprecated: - -.. moduleauthor:: Lance Ellinghouse - -**Source code:** :source:`Lib/uu.py` - -.. deprecated-removed:: 3.11 3.13 - The :mod:`uu` module is deprecated - (see :pep:`PEP 594 <594#uu-and-the-uu-encoding>` for details). - :mod:`base64` is a modern alternative. - --------------- - -This module encodes and decodes files in uuencode format, allowing arbitrary -binary data to be transferred over ASCII-only connections. Wherever a file -argument is expected, the methods accept a file-like object. For backwards -compatibility, a string containing a pathname is also accepted, and the -corresponding file will be opened for reading and writing; the pathname ``'-'`` -is understood to mean the standard input or output. However, this interface is -deprecated; it's better for the caller to open the file itself, and be sure -that, when required, the mode is ``'rb'`` or ``'wb'`` on Windows. - -.. index:: - single: Jansen, Jack - single: Ellinghouse, Lance - -This code was contributed by Lance Ellinghouse, and modified by Jack Jansen. - -The :mod:`uu` module defines the following functions: - - -.. function:: encode(in_file, out_file, name=None, mode=None, *, backtick=False) - - Uuencode file *in_file* into file *out_file*. The uuencoded file will have - the header specifying *name* and *mode* as the defaults for the results of - decoding the file. The default defaults are taken from *in_file*, or ``'-'`` - and ``0o666`` respectively. If *backtick* is true, zeros are represented by - ``'`'`` instead of spaces. - - .. versionchanged:: 3.7 - Added the *backtick* parameter. - - -.. function:: decode(in_file, out_file=None, mode=None, quiet=False) - - This call decodes uuencoded file *in_file* placing the result on file - *out_file*. If *out_file* is a pathname, *mode* is used to set the permission - bits if the file must be created. Defaults for *out_file* and *mode* are taken - from the uuencode header. However, if the file specified in the header already - exists, a :exc:`uu.Error` is raised. - - :func:`decode` may print a warning to standard error if the input was produced - by an incorrect uuencoder and Python could recover from that error. Setting - *quiet* to a true value silences this warning. - - -.. exception:: Error() - - Subclass of :exc:`Exception`, this can be raised by :func:`uu.decode` under - various situations, such as described above, but also including a badly - formatted header, or truncated input file. - - -.. seealso:: - - Module :mod:`binascii` - Support module containing ASCII-to-binary and binary-to-ASCII conversions. diff --git a/Doc/library/uuid.rst b/Doc/library/uuid.rst index a71fe7abf5b5474..e2d231da38fd9a9 100644 --- a/Doc/library/uuid.rst +++ b/Doc/library/uuid.rst @@ -22,7 +22,7 @@ random UUID. Depending on support from the underlying platform, :func:`uuid1` may or may not return a "safe" UUID. A safe UUID is one which is generated using synchronization methods that ensure no two processes can obtain the same -UUID. All instances of :class:`UUID` have an :attr:`is_safe` attribute +UUID. All instances of :class:`UUID` have an :attr:`~UUID.is_safe` attribute which relays any information about the UUID's safety, using this enumeration: .. class:: SafeUUID @@ -95,25 +95,34 @@ which relays any information about the UUID's safety, using this enumeration: A tuple of the six integer fields of the UUID, which are also available as six individual attributes and two derived attributes: - +------------------------------+-------------------------------+ - | Field | Meaning | - +==============================+===============================+ - | :attr:`time_low` | the first 32 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`time_mid` | the next 16 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`time_hi_version` | the next 16 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`clock_seq_hi_variant` | the next 8 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`clock_seq_low` | the next 8 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`node` | the last 48 bits of the UUID | - +------------------------------+-------------------------------+ - | :attr:`time` | the 60-bit timestamp | - +------------------------------+-------------------------------+ - | :attr:`clock_seq` | the 14-bit sequence number | - +------------------------------+-------------------------------+ +.. list-table:: + + * - Field + - Meaning + + * - .. attribute:: UUID.time_low + - The first 32 bits of the UUID. + + * - .. attribute:: UUID.time_mid + - The next 16 bits of the UUID. + + * - .. attribute:: UUID.time_hi_version + - The next 16 bits of the UUID. + + * - .. attribute:: UUID.clock_seq_hi_variant + - The next 8 bits of the UUID. + + * - .. attribute:: UUID.clock_seq_low + - The next 8 bits of the UUID. + + * - .. attribute:: UUID.node + - The last 48 bits of the UUID. + + * - .. attribute:: UUID.time + - The 60-bit timestamp. + + * - .. attribute:: UUID.clock_seq + - The 14-bit sequence number. .. attribute:: UUID.hex @@ -186,7 +195,8 @@ The :mod:`uuid` module defines the following functions: .. function:: uuid3(namespace, name) Generate a UUID based on the MD5 hash of a namespace identifier (which is a - UUID) and a name (which is a string). + UUID) and a name (which is a :class:`bytes` object or a string + that will be encoded using UTF-8). .. index:: single: uuid3 @@ -201,7 +211,8 @@ The :mod:`uuid` module defines the following functions: .. function:: uuid5(namespace, name) Generate a UUID based on the SHA-1 hash of a namespace identifier (which is a - UUID) and a name (which is a string). + UUID) and a name (which is a :class:`bytes` object or a string + that will be encoded using UTF-8). .. index:: single: uuid5 @@ -231,7 +242,7 @@ The :mod:`uuid` module defines the following namespace identifiers for use with text output format. The :mod:`uuid` module defines the following constants for the possible values -of the :attr:`variant` attribute: +of the :attr:`~UUID.variant` attribute: .. data:: RESERVED_NCS @@ -261,6 +272,47 @@ of the :attr:`variant` attribute: internal format of UUIDs, and methods of generating UUIDs. +.. _uuid-cli: + +Command-Line Usage +------------------ + +.. versionadded:: 3.12 + +The :mod:`uuid` module can be executed as a script from the command line. + +.. code-block:: sh + + python -m uuid [-h] [-u {uuid1,uuid3,uuid4,uuid5}] [-n NAMESPACE] [-N NAME] + +The following options are accepted: + +.. program:: uuid + +.. option:: -h, --help + + Show the help message and exit. + +.. option:: -u + --uuid + + Specify the function name to use to generate the uuid. By default :func:`uuid4` + is used. + +.. option:: -n + --namespace + + The namespace is a ``UUID``, or ``@ns`` where ``ns`` is a well-known predefined UUID + addressed by namespace name. Such as ``@dns``, ``@url``, ``@oid``, and ``@x500``. + Only required for :func:`uuid3` / :func:`uuid5` functions. + +.. option:: -N + --name + + The name used as part of generating the uuid. Only required for + :func:`uuid3` / :func:`uuid5` functions. + + .. _uuid-example: Example @@ -301,3 +353,22 @@ Here are some examples of typical usage of the :mod:`uuid` module:: >>> uuid.UUID(bytes=x.bytes) UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') + +.. _uuid-cli-example: + +Command-Line Example +-------------------- + +Here are some examples of typical usage of the :mod:`uuid` command line interface: + +.. code-block:: shell + + # generate a random uuid - by default uuid4() is used + $ python -m uuid + + # generate a uuid using uuid1() + $ python -m uuid -u uuid1 + + # generate a uuid using uuid5 + $ python -m uuid -u uuid5 -n @url -N example.com + diff --git a/Doc/library/venv.rst b/Doc/library/venv.rst index adc6cd339ac157a..da8942c554dea10 100644 --- a/Doc/library/venv.rst +++ b/Doc/library/venv.rst @@ -30,6 +30,25 @@ When used from within a virtual environment, common installation tools such as `pip`_ will install Python packages into a virtual environment without needing to be told to do so explicitly. +A virtual environment is (amongst other things): + +* Used to contain a specific Python interpreter and software libraries and + binaries which are needed to support a project (library or application). These + are by default isolated from software in other virtual environments and Python + interpreters and libraries installed in the operating system. + +* Contained in a directory, conventionally either named ``venv`` or ``.venv`` in + the project directory, or under a container directory for lots of virtual + environments, such as ``~/.virtualenvs``. + +* Not checked into source control systems such as Git. + +* Considered as disposable -- it should be simple to delete and recreate it from + scratch. You don't place any project code in the environment + +* Not considered as movable or copyable -- you just recreate the same + environment in the target location. + See :pep:`405` for more background on Python virtual environments. .. seealso:: @@ -55,13 +74,13 @@ point to the directories of the virtual environment, whereas :data:`sys.base_prefix` and :data:`sys.base_exec_prefix` point to those of the base Python used to create the environment. It is sufficient to check -``sys.prefix == sys.base_prefix`` to determine if the current interpreter is +``sys.prefix != sys.base_prefix`` to determine if the current interpreter is running from a virtual environment. A virtual environment may be "activated" using a script in its binary directory (``bin`` on POSIX; ``Scripts`` on Windows). -This will prepend that directory to your :envvar:`!PATH`, so that running -:program:`!python` will invoke the environment's Python interpreter +This will prepend that directory to your :envvar:`PATH`, so that running +:program:`python` will invoke the environment's Python interpreter and you can run installed scripts without having to use their full path. The invocation of the activation script is platform-specific (:samp:`{}` must be replaced by the path to the directory @@ -84,7 +103,7 @@ containing the virtual environment): +-------------+------------+--------------------------------------------------+ .. versionadded:: 3.4 - :program:`!fish` and :program:`!csh` activation scripts. + :program:`fish` and :program:`csh` activation scripts. .. versionadded:: 3.8 PowerShell activation scripts installed under POSIX for PowerShell Core @@ -100,10 +119,10 @@ In order to achieve this, scripts installed into virtual environments have a "shebang" line which points to the environment's Python interpreter, i.e. :samp:`#!/{}/bin/python`. This means that the script will run with that interpreter regardless of the -value of :envvar:`!PATH`. On Windows, "shebang" line processing is supported if +value of :envvar:`PATH`. On Windows, "shebang" line processing is supported if you have the :ref:`launcher` installed. Thus, double-clicking an installed script in a Windows Explorer window should run it with the correct interpreter -without the environment needing to be activated or on the :envvar:`!PATH`. +without the environment needing to be activated or on the :envvar:`PATH`. When a virtual environment has been activated, the :envvar:`!VIRTUAL_ENV` environment variable is set to the path of the environment. @@ -143,7 +162,8 @@ creation according to their needs, the :class:`EnvBuilder` class. .. class:: EnvBuilder(system_site_packages=False, clear=False, \ symlinks=False, upgrade=False, with_pip=False, \ - prompt=None, upgrade_deps=False) + prompt=None, upgrade_deps=False, \ + *, scm_ignore_files=frozenset()) The :class:`EnvBuilder` class accepts the following keyword arguments on instantiation: @@ -172,6 +192,12 @@ creation according to their needs, the :class:`EnvBuilder` class. * ``upgrade_deps`` -- Update the base venv modules to the latest on PyPI + * ``scm_ignore_files`` -- Create ignore files based for the specified source + control managers (SCM) in the iterable. Support is defined by having a + method named ``create_{scm}_ignore_file``. The only value supported by + default is ``"git"`` via :meth:`create_git_ignore_file`. + + .. versionchanged:: 3.4 Added the ``with_pip`` parameter @@ -181,6 +207,9 @@ creation according to their needs, the :class:`EnvBuilder` class. .. versionadded:: 3.9 Added the ``upgrade_deps`` parameter + .. versionadded:: 3.13 + Added the ``scm_ignore_files`` parameter + Creators of third-party virtual environment tools will be free to use the provided :class:`EnvBuilder` class as a base class. @@ -284,11 +313,14 @@ creation according to their needs, the :class:`EnvBuilder` class. .. method:: upgrade_dependencies(context) - Upgrades the core venv dependency packages (currently ``pip`` and - ``setuptools``) in the environment. This is done by shelling out to the + Upgrades the core venv dependency packages (currently ``pip``) + in the environment. This is done by shelling out to the ``pip`` executable in the environment. .. versionadded:: 3.9 + .. versionchanged:: 3.12 + + ``setuptools`` is no longer a core venv dependency. .. method:: post_setup(context) @@ -336,11 +368,18 @@ creation according to their needs, the :class:`EnvBuilder` class. The directories are allowed to exist (for when an existing environment is being upgraded). + .. method:: create_git_ignore_file(context) + + Creates a ``.gitignore`` file within the virtual environment that causes + the entire directory to be ignored by the ``git`` source control manager. + + .. versionadded:: 3.13 + There is also a module-level convenience function: .. function:: create(env_dir, system_site_packages=False, clear=False, \ symlinks=False, with_pip=False, prompt=None, \ - upgrade_deps=False) + upgrade_deps=False, *, scm_ignore_files=frozenset()) Create an :class:`EnvBuilder` with the given keyword arguments, and call its :meth:`~EnvBuilder.create` method with the *env_dir* argument. @@ -356,6 +395,9 @@ There is also a module-level convenience function: .. versionchanged:: 3.9 Added the ``upgrade_deps`` parameter + .. versionchanged:: 3.13 + Added the ``scm_ignore_files`` parameter + An example of extending ``EnvBuilder`` -------------------------------------- @@ -478,7 +520,7 @@ subclass which installs setuptools and pip into a created virtual environment:: :param context: The information for the virtual environment creation request being processed. """ - url = 'https://bitbucket.org/pypa/setuptools/downloads/ez_setup.py' + url = "https://bootstrap.pypa.io/ez_setup.py" self.install_script(context, 'setuptools', url) # clear up the setuptools archive which gets downloaded pred = lambda o: o.startswith('setuptools-') and o.endswith('.tar.gz') @@ -497,76 +539,68 @@ subclass which installs setuptools and pip into a created virtual environment:: url = 'https://bootstrap.pypa.io/get-pip.py' self.install_script(context, 'pip', url) + def main(args=None): - compatible = True - if sys.version_info < (3, 3): - compatible = False - elif not hasattr(sys, 'base_prefix'): - compatible = False - if not compatible: - raise ValueError('This script is only for use with ' - 'Python 3.3 or later') + import argparse + + parser = argparse.ArgumentParser(prog=__name__, + description='Creates virtual Python ' + 'environments in one or ' + 'more target ' + 'directories.') + parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', + help='A directory in which to create the ' + 'virtual environment.') + parser.add_argument('--no-setuptools', default=False, + action='store_true', dest='nodist', + help="Don't install setuptools or pip in the " + "virtual environment.") + parser.add_argument('--no-pip', default=False, + action='store_true', dest='nopip', + help="Don't install pip in the virtual " + "environment.") + parser.add_argument('--system-site-packages', default=False, + action='store_true', dest='system_site', + help='Give the virtual environment access to the ' + 'system site-packages dir.') + if os.name == 'nt': + use_symlinks = False else: - import argparse - - parser = argparse.ArgumentParser(prog=__name__, - description='Creates virtual Python ' - 'environments in one or ' - 'more target ' - 'directories.') - parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', - help='A directory in which to create the ' - 'virtual environment.') - parser.add_argument('--no-setuptools', default=False, - action='store_true', dest='nodist', - help="Don't install setuptools or pip in the " - "virtual environment.") - parser.add_argument('--no-pip', default=False, - action='store_true', dest='nopip', - help="Don't install pip in the virtual " - "environment.") - parser.add_argument('--system-site-packages', default=False, - action='store_true', dest='system_site', - help='Give the virtual environment access to the ' - 'system site-packages dir.') - if os.name == 'nt': - use_symlinks = False - else: - use_symlinks = True - parser.add_argument('--symlinks', default=use_symlinks, - action='store_true', dest='symlinks', - help='Try to use symlinks rather than copies, ' - 'when symlinks are not the default for ' - 'the platform.') - parser.add_argument('--clear', default=False, action='store_true', - dest='clear', help='Delete the contents of the ' - 'virtual environment ' - 'directory if it already ' - 'exists, before virtual ' - 'environment creation.') - parser.add_argument('--upgrade', default=False, action='store_true', - dest='upgrade', help='Upgrade the virtual ' - 'environment directory to ' - 'use this version of ' - 'Python, assuming Python ' - 'has been upgraded ' - 'in-place.') - parser.add_argument('--verbose', default=False, action='store_true', - dest='verbose', help='Display the output ' - 'from the scripts which ' - 'install setuptools and pip.') - options = parser.parse_args(args) - if options.upgrade and options.clear: - raise ValueError('you cannot supply --upgrade and --clear together.') - builder = ExtendedEnvBuilder(system_site_packages=options.system_site, - clear=options.clear, - symlinks=options.symlinks, - upgrade=options.upgrade, - nodist=options.nodist, - nopip=options.nopip, - verbose=options.verbose) - for d in options.dirs: - builder.create(d) + use_symlinks = True + parser.add_argument('--symlinks', default=use_symlinks, + action='store_true', dest='symlinks', + help='Try to use symlinks rather than copies, ' + 'when symlinks are not the default for ' + 'the platform.') + parser.add_argument('--clear', default=False, action='store_true', + dest='clear', help='Delete the contents of the ' + 'virtual environment ' + 'directory if it already ' + 'exists, before virtual ' + 'environment creation.') + parser.add_argument('--upgrade', default=False, action='store_true', + dest='upgrade', help='Upgrade the virtual ' + 'environment directory to ' + 'use this version of ' + 'Python, assuming Python ' + 'has been upgraded ' + 'in-place.') + parser.add_argument('--verbose', default=False, action='store_true', + dest='verbose', help='Display the output ' + 'from the scripts which ' + 'install setuptools and pip.') + options = parser.parse_args(args) + if options.upgrade and options.clear: + raise ValueError('you cannot supply --upgrade and --clear together.') + builder = ExtendedEnvBuilder(system_site_packages=options.system_site, + clear=options.clear, + symlinks=options.symlinks, + upgrade=options.upgrade, + nodist=options.nodist, + nopip=options.nopip, + verbose=options.verbose) + for d in options.dirs: + builder.create(d) if __name__ == '__main__': rc = 1 diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst index 28579ce8df4a622..884de08eab1b16a 100644 --- a/Doc/library/warnings.rst +++ b/Doc/library/warnings.rst @@ -396,7 +396,7 @@ Available Functions ------------------- -.. function:: warn(message, category=None, stacklevel=1, source=None) +.. function:: warn(message, category=None, stacklevel=1, source=None, \*, skip_file_prefixes=None) Issue a warning, or maybe ignore it or raise an exception. The *category* argument, if given, must be a :ref:`warning category class `; it @@ -407,12 +407,39 @@ Available Functions :ref:`warnings filter `. The *stacklevel* argument can be used by wrapper functions written in Python, like this:: - def deprecation(message): + def deprecated_api(message): warnings.warn(message, DeprecationWarning, stacklevel=2) - This makes the warning refer to :func:`deprecation`'s caller, rather than to the - source of :func:`deprecation` itself (since the latter would defeat the purpose - of the warning message). + This makes the warning refer to ``deprecated_api``'s caller, rather than to + the source of ``deprecated_api`` itself (since the latter would defeat the + purpose of the warning message). + + The *skip_file_prefixes* keyword argument can be used to indicate which + stack frames are ignored when counting stack levels. This can be useful when + you want the warning to always appear at call sites outside of a package + when a constant *stacklevel* does not fit all call paths or is otherwise + challenging to maintain. If supplied, it must be a tuple of strings. When + prefixes are supplied, stacklevel is implicitly overridden to be ``max(2, + stacklevel)``. To cause a warning to be attributed to the caller from + outside of the current package you might write:: + + # example/lower.py + _warn_skips = (os.path.dirname(__file__),) + + def one_way(r_luxury_yacht=None, t_wobbler_mangrove=None): + if r_luxury_yacht: + warnings.warn("Please migrate to t_wobbler_mangrove=.", + skip_file_prefixes=_warn_skips) + + # example/higher.py + from . import lower + + def another_way(**kw): + lower.one_way(**kw) + + This makes the warning refer to both the ``example.lower.one_way()`` and + ``package.higher.another_way()`` call sites only from calling code living + outside of ``example`` package. *source*, if supplied, is the destroyed object which emitted a :exc:`ResourceWarning`. @@ -420,6 +447,9 @@ Available Functions .. versionchanged:: 3.6 Added *source* parameter. + .. versionchanged:: 3.12 + Added *skip_file_prefixes*. + .. function:: warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, source=None) diff --git a/Doc/library/wave.rst b/Doc/library/wave.rst index 04a28d97d619eb9..55b029bc742b245 100644 --- a/Doc/library/wave.rst +++ b/Doc/library/wave.rst @@ -11,8 +11,9 @@ -------------- -The :mod:`wave` module provides a convenient interface to the WAV sound format. -Only PCM encoded wave files are supported. +The :mod:`wave` module provides a convenient interface to the Waveform Audio +"WAVE" (or "WAV") file format. Only uncompressed PCM encoded wave files are +supported. .. versionchanged:: 3.12 @@ -41,13 +42,12 @@ The :mod:`wave` module defines the following function and exception: value for *mode*. If you pass in a file-like object, the wave object will not close it when its - :meth:`close` method is called; it is the caller's responsibility to close + ``close()`` method is called; it is the caller's responsibility to close the file object. The :func:`.open` function may be used in a :keyword:`with` statement. When - the :keyword:`!with` block completes, the :meth:`Wave_read.close() - ` or :meth:`Wave_write.close() - ` method is called. + the :keyword:`!with` block completes, the :meth:`Wave_read.close()` or + :meth:`Wave_write.close()` method is called. .. versionchanged:: 3.4 Added support for unseekable files. @@ -63,87 +63,99 @@ The :mod:`wave` module defines the following function and exception: Wave_read Objects ----------------- -Wave_read objects, as returned by :func:`.open`, have the following methods: +.. class:: Wave_read + Read a WAV file. -.. method:: Wave_read.close() + Wave_read objects, as returned by :func:`.open`, have the following methods: - Close the stream if it was opened by :mod:`wave`, and make the instance - unusable. This is called automatically on object collection. + .. method:: close() -.. method:: Wave_read.getnchannels() + Close the stream if it was opened by :mod:`wave`, and make the instance + unusable. This is called automatically on object collection. - Returns number of audio channels (``1`` for mono, ``2`` for stereo). + .. method:: getnchannels() -.. method:: Wave_read.getsampwidth() + Returns number of audio channels (``1`` for mono, ``2`` for stereo). - Returns sample width in bytes. + .. method:: getsampwidth() -.. method:: Wave_read.getframerate() + Returns sample width in bytes. - Returns sampling frequency. + .. method:: getframerate() -.. method:: Wave_read.getnframes() + Returns sampling frequency. - Returns number of audio frames. + .. method:: getnframes() -.. method:: Wave_read.getcomptype() + Returns number of audio frames. - Returns compression type (``'NONE'`` is the only supported type). + .. method:: getcomptype() -.. method:: Wave_read.getcompname() + Returns compression type (``'NONE'`` is the only supported type). - Human-readable version of :meth:`getcomptype`. Usually ``'not compressed'`` - parallels ``'NONE'``. + .. method:: getcompname() -.. method:: Wave_read.getparams() + Human-readable version of :meth:`getcomptype`. Usually ``'not compressed'`` + parallels ``'NONE'``. - Returns a :func:`~collections.namedtuple` ``(nchannels, sampwidth, - framerate, nframes, comptype, compname)``, equivalent to output of the - :meth:`get\*` methods. + .. method:: getparams() -.. method:: Wave_read.readframes(n) + Returns a :func:`~collections.namedtuple` ``(nchannels, sampwidth, + framerate, nframes, comptype, compname)``, equivalent to output of the + ``get*()`` methods. - Reads and returns at most *n* frames of audio, as a :class:`bytes` object. + .. method:: readframes(n) -.. method:: Wave_read.rewind() + Reads and returns at most *n* frames of audio, as a :class:`bytes` object. - Rewind the file pointer to the beginning of the audio stream. -The following two methods are defined for compatibility with the :mod:`aifc` -module, and don't do anything interesting. + .. method:: rewind() + Rewind the file pointer to the beginning of the audio stream. -.. method:: Wave_read.getmarkers() + The following two methods are defined for compatibility with the old :mod:`!aifc` + module, and don't do anything interesting. - Returns ``None``. + .. method:: getmarkers() -.. method:: Wave_read.getmark(id) + Returns ``None``. - Raise an error. + .. deprecated-removed:: 3.13 3.15 + The method only existed for compatibility with the :mod:`!aifc` module + which has been removed in Python 3.13. -The following two methods define a term "position" which is compatible between -them, and is otherwise implementation dependent. + .. method:: getmark(id) -.. method:: Wave_read.setpos(pos) + Raise an error. - Set the file pointer to the specified position. + .. deprecated-removed:: 3.13 3.15 + The method only existed for compatibility with the :mod:`!aifc` module + which has been removed in Python 3.13. + The following two methods define a term "position" which is compatible between + them, and is otherwise implementation dependent. -.. method:: Wave_read.tell() - Return current file pointer position. + .. method:: setpos(pos) + + Set the file pointer to the specified position. + + + .. method:: tell() + + Return current file pointer position. .. _wave-write-objects: @@ -151,97 +163,100 @@ them, and is otherwise implementation dependent. Wave_write Objects ------------------ -For seekable output streams, the ``wave`` header will automatically be updated -to reflect the number of frames actually written. For unseekable streams, the -*nframes* value must be accurate when the first frame data is written. An -accurate *nframes* value can be achieved either by calling -:meth:`~Wave_write.setnframes` or :meth:`~Wave_write.setparams` with the number -of frames that will be written before :meth:`~Wave_write.close` is called and -then using :meth:`~Wave_write.writeframesraw` to write the frame data, or by -calling :meth:`~Wave_write.writeframes` with all of the frame data to be -written. In the latter case :meth:`~Wave_write.writeframes` will calculate -the number of frames in the data and set *nframes* accordingly before writing -the frame data. +.. class:: Wave_write -Wave_write objects, as returned by :func:`.open`, have the following methods: + Write a WAV file. -.. versionchanged:: 3.4 - Added support for unseekable files. + Wave_write objects, as returned by :func:`.open`. + For seekable output streams, the ``wave`` header will automatically be updated + to reflect the number of frames actually written. For unseekable streams, the + *nframes* value must be accurate when the first frame data is written. An + accurate *nframes* value can be achieved either by calling + :meth:`setnframes` or :meth:`setparams` with the number + of frames that will be written before :meth:`close` is called and + then using :meth:`writeframesraw` to write the frame data, or by + calling :meth:`writeframes` with all of the frame data to be + written. In the latter case :meth:`writeframes` will calculate + the number of frames in the data and set *nframes* accordingly before writing + the frame data. -.. method:: Wave_write.close() + .. versionchanged:: 3.4 + Added support for unseekable files. - Make sure *nframes* is correct, and close the file if it was opened by - :mod:`wave`. This method is called upon object collection. It will raise - an exception if the output stream is not seekable and *nframes* does not - match the number of frames actually written. + Wave_write objects have the following methods: + .. method:: close() -.. method:: Wave_write.setnchannels(n) + Make sure *nframes* is correct, and close the file if it was opened by + :mod:`wave`. This method is called upon object collection. It will raise + an exception if the output stream is not seekable and *nframes* does not + match the number of frames actually written. - Set the number of channels. + .. method:: setnchannels(n) -.. method:: Wave_write.setsampwidth(n) + Set the number of channels. - Set the sample width to *n* bytes. + .. method:: setsampwidth(n) -.. method:: Wave_write.setframerate(n) + Set the sample width to *n* bytes. - Set the frame rate to *n*. - .. versionchanged:: 3.2 - A non-integral input to this method is rounded to the nearest - integer. + .. method:: setframerate(n) + Set the frame rate to *n*. -.. method:: Wave_write.setnframes(n) + .. versionchanged:: 3.2 + A non-integral input to this method is rounded to the nearest + integer. - Set the number of frames to *n*. This will be changed later if the number - of frames actually written is different (this update attempt will - raise an error if the output stream is not seekable). + .. method:: setnframes(n) -.. method:: Wave_write.setcomptype(type, name) + Set the number of frames to *n*. This will be changed later if the number + of frames actually written is different (this update attempt will + raise an error if the output stream is not seekable). - Set the compression type and description. At the moment, only compression type - ``NONE`` is supported, meaning no compression. + .. method:: setcomptype(type, name) -.. method:: Wave_write.setparams(tuple) + Set the compression type and description. At the moment, only compression type + ``NONE`` is supported, meaning no compression. - The *tuple* should be ``(nchannels, sampwidth, framerate, nframes, comptype, - compname)``, with values valid for the :meth:`set\*` methods. Sets all - parameters. + .. method:: setparams(tuple) -.. method:: Wave_write.tell() + The *tuple* should be ``(nchannels, sampwidth, framerate, nframes, comptype, + compname)``, with values valid for the ``set*()`` methods. Sets all + parameters. - Return current position in the file, with the same disclaimer for the - :meth:`Wave_read.tell` and :meth:`Wave_read.setpos` methods. + .. method:: tell() -.. method:: Wave_write.writeframesraw(data) + Return current position in the file, with the same disclaimer for the + :meth:`Wave_read.tell` and :meth:`Wave_read.setpos` methods. - Write audio frames, without correcting *nframes*. - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. + .. method:: writeframesraw(data) + Write audio frames, without correcting *nframes*. -.. method:: Wave_write.writeframes(data) + .. versionchanged:: 3.4 + Any :term:`bytes-like object` is now accepted. - Write audio frames and make sure *nframes* is correct. It will raise an - error if the output stream is not seekable and the total number of frames - that have been written after *data* has been written does not match the - previously set value for *nframes*. - .. versionchanged:: 3.4 - Any :term:`bytes-like object` is now accepted. + .. method:: writeframes(data) + Write audio frames and make sure *nframes* is correct. It will raise an + error if the output stream is not seekable and the total number of frames + that have been written after *data* has been written does not match the + previously set value for *nframes*. -Note that it is invalid to set any parameters after calling :meth:`writeframes` -or :meth:`writeframesraw`, and any attempt to do so will raise -:exc:`wave.Error`. + .. versionchanged:: 3.4 + Any :term:`bytes-like object` is now accepted. + Note that it is invalid to set any parameters after calling :meth:`writeframes` + or :meth:`writeframesraw`, and any attempt to do so will raise + :exc:`wave.Error`. diff --git a/Doc/library/weakref.rst b/Doc/library/weakref.rst index a1e542b1e927e49..d6e062df945c64c 100644 --- a/Doc/library/weakref.rst +++ b/Doc/library/weakref.rst @@ -111,7 +111,7 @@ See :ref:`__slots__ documentation ` for details. Exceptions raised by the callback will be noted on the standard error output, but cannot be propagated; they are handled in exactly the same way as exceptions - raised from an object's :meth:`__del__` method. + raised from an object's :meth:`~object.__del__` method. Weak references are :term:`hashable` if the *object* is hashable. They will maintain their hash value even after the *object* was deleted. If @@ -143,7 +143,7 @@ See :ref:`__slots__ documentation ` for details. ``ProxyType`` or ``CallableProxyType``, depending on whether *object* is callable. Proxy objects are not :term:`hashable` regardless of the referent; this avoids a number of problems related to their fundamentally mutable nature, and - prevent their use as dictionary keys. *callback* is the same as the parameter + prevents their use as dictionary keys. *callback* is the same as the parameter of the same name to the :func:`ref` function. Accessing an attribute of the proxy object after the referent is @@ -172,6 +172,30 @@ See :ref:`__slots__ documentation ` for details. application without adding attributes to those objects. This can be especially useful with objects that override attribute accesses. + Note that when a key with equal value to an existing key (but not equal identity) + is inserted into the dictionary, it replaces the value but does not replace the + existing key. Due to this, when the reference to the original key is deleted, it + also deletes the entry in the dictionary:: + + >>> class T(str): pass + ... + >>> k1, k2 = T(), T() + >>> d = weakref.WeakKeyDictionary() + >>> d[k1] = 1 # d = {k1: 1} + >>> d[k2] = 2 # d = {k1: 2} + >>> del k1 # d = {} + + A workaround would be to remove the key prior to reassignment:: + + >>> class T(str): pass + ... + >>> k1, k2 = T(), T() + >>> d = weakref.WeakKeyDictionary() + >>> d[k1] = 1 # d = {k1: 1} + >>> del d[k1] + >>> d[k2] = 2 # d = {k2: 2} + >>> del k1 # d = {k2: 2} + .. versionchanged:: 3.9 Added support for ``|`` and ``|=`` operators, specified in :pep:`584`. @@ -197,8 +221,7 @@ than needed. Added support for ``|`` and ``|=`` operators, as specified in :pep:`584`. :class:`WeakValueDictionary` objects have an additional method that has the -same issues as the :meth:`keyrefs` method of :class:`WeakKeyDictionary` -objects. +same issues as the :meth:`WeakKeyDictionary.keyrefs` method. .. method:: WeakValueDictionary.valuerefs() @@ -212,7 +235,7 @@ objects. discarded when no strong reference to it exists any more. -.. class:: WeakMethod(method) +.. class:: WeakMethod(method[, callback]) A custom :class:`ref` subclass which simulates a weak reference to a bound method (i.e., a method defined on a class and looked up on an instance). @@ -238,6 +261,8 @@ objects. >>> r() >>> + *callback* is the same as the parameter of the same name to the :func:`ref` function. + .. versionadded:: 3.4 .. class:: finalize(obj, func, /, *args, **kwargs) @@ -255,7 +280,7 @@ objects. Exceptions raised by finalizer callbacks during garbage collection will be shown on the standard error output, but cannot be propagated. They are handled in the same way as exceptions raised - from an object's :meth:`__del__` method or a weak reference's + from an object's :meth:`~object.__del__` method or a weak reference's callback. When the program exits, each remaining live finalizer is called @@ -497,18 +522,18 @@ is still alive. For instance obj dead or exiting -Comparing finalizers with :meth:`__del__` methods -------------------------------------------------- +Comparing finalizers with :meth:`~object.__del__` methods +--------------------------------------------------------- Suppose we want to create a class whose instances represent temporary directories. The directories should be deleted with their contents when the first of the following events occurs: * the object is garbage collected, -* the object's :meth:`remove` method is called, or +* the object's :meth:`!remove` method is called, or * the program exits. -We might try to implement the class using a :meth:`__del__` method as +We might try to implement the class using a :meth:`~object.__del__` method as follows:: class TempDir: @@ -527,12 +552,12 @@ follows:: def __del__(self): self.remove() -Starting with Python 3.4, :meth:`__del__` methods no longer prevent +Starting with Python 3.4, :meth:`~object.__del__` methods no longer prevent reference cycles from being garbage collected, and module globals are no longer forced to :const:`None` during :term:`interpreter shutdown`. So this code should work without any issues on CPython. -However, handling of :meth:`__del__` methods is notoriously implementation +However, handling of :meth:`~object.__del__` methods is notoriously implementation specific, since it depends on internal details of the interpreter's garbage collector implementation. diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst index 734b6321e5a7e7e..4667b81e38ada23 100644 --- a/Doc/library/webbrowser.rst +++ b/Doc/library/webbrowser.rst @@ -111,51 +111,41 @@ for the controller classes, all defined in this module. +------------------------+-----------------------------------------+-------+ | Type Name | Class Name | Notes | +========================+=========================================+=======+ -| ``'mozilla'`` | :class:`Mozilla('mozilla')` | | +| ``'mozilla'`` | ``Mozilla('mozilla')`` | | +------------------------+-----------------------------------------+-------+ -| ``'firefox'`` | :class:`Mozilla('mozilla')` | | +| ``'firefox'`` | ``Mozilla('mozilla')`` | | +------------------------+-----------------------------------------+-------+ -| ``'netscape'`` | :class:`Mozilla('netscape')` | | +| ``'epiphany'`` | ``Epiphany('epiphany')`` | | +------------------------+-----------------------------------------+-------+ -| ``'galeon'`` | :class:`Galeon('galeon')` | | +| ``'kfmclient'`` | ``Konqueror()`` | \(1) | +------------------------+-----------------------------------------+-------+ -| ``'epiphany'`` | :class:`Galeon('epiphany')` | | +| ``'konqueror'`` | ``Konqueror()`` | \(1) | +------------------------+-----------------------------------------+-------+ -| ``'skipstone'`` | :class:`BackgroundBrowser('skipstone')` | | +| ``'kfm'`` | ``Konqueror()`` | \(1) | +------------------------+-----------------------------------------+-------+ -| ``'kfmclient'`` | :class:`Konqueror()` | \(1) | +| ``'opera'`` | ``Opera()`` | | +------------------------+-----------------------------------------+-------+ -| ``'konqueror'`` | :class:`Konqueror()` | \(1) | +| ``'links'`` | ``GenericBrowser('links')`` | | +------------------------+-----------------------------------------+-------+ -| ``'kfm'`` | :class:`Konqueror()` | \(1) | +| ``'elinks'`` | ``Elinks('elinks')`` | | +------------------------+-----------------------------------------+-------+ -| ``'mosaic'`` | :class:`BackgroundBrowser('mosaic')` | | +| ``'lynx'`` | ``GenericBrowser('lynx')`` | | +------------------------+-----------------------------------------+-------+ -| ``'opera'`` | :class:`Opera()` | | +| ``'w3m'`` | ``GenericBrowser('w3m')`` | | +------------------------+-----------------------------------------+-------+ -| ``'grail'`` | :class:`Grail()` | | +| ``'windows-default'`` | ``WindowsDefault`` | \(2) | +------------------------+-----------------------------------------+-------+ -| ``'links'`` | :class:`GenericBrowser('links')` | | +| ``'macosx'`` | ``MacOSXOSAScript('default')`` | \(3) | +------------------------+-----------------------------------------+-------+ -| ``'elinks'`` | :class:`Elinks('elinks')` | | +| ``'safari'`` | ``MacOSXOSAScript('safari')`` | \(3) | +------------------------+-----------------------------------------+-------+ -| ``'lynx'`` | :class:`GenericBrowser('lynx')` | | +| ``'google-chrome'`` | ``Chrome('google-chrome')`` | | +------------------------+-----------------------------------------+-------+ -| ``'w3m'`` | :class:`GenericBrowser('w3m')` | | +| ``'chrome'`` | ``Chrome('chrome')`` | | +------------------------+-----------------------------------------+-------+ -| ``'windows-default'`` | :class:`WindowsDefault` | \(2) | +| ``'chromium'`` | ``Chromium('chromium')`` | | +------------------------+-----------------------------------------+-------+ -| ``'macosx'`` | :class:`MacOSXOSAScript('default')` | \(3) | -+------------------------+-----------------------------------------+-------+ -| ``'safari'`` | :class:`MacOSXOSAScript('safari')` | \(3) | -+------------------------+-----------------------------------------+-------+ -| ``'google-chrome'`` | :class:`Chrome('google-chrome')` | | -+------------------------+-----------------------------------------+-------+ -| ``'chrome'`` | :class:`Chrome('chrome')` | | -+------------------------+-----------------------------------------+-------+ -| ``'chromium'`` | :class:`Chromium('chromium')` | | -+------------------------+-----------------------------------------+-------+ -| ``'chromium-browser'`` | :class:`Chromium('chromium-browser')` | | +| ``'chromium-browser'`` | ``Chromium('chromium-browser')`` | | +------------------------+-----------------------------------------+-------+ Notes: @@ -163,7 +153,7 @@ Notes: (1) "Konqueror" is the file manager for the KDE desktop environment for Unix, and only makes sense to use if KDE is running. Some way of reliably detecting KDE - would be nice; the :envvar:`KDEDIR` variable is not sufficient. Note also that + would be nice; the :envvar:`!KDEDIR` variable is not sufficient. Note also that the name "kfm" is used even when using the :program:`konqueror` command with KDE 2 --- the implementation selects the best strategy for running Konqueror. @@ -173,11 +163,18 @@ Notes: (3) Only on macOS platform. +.. versionadded:: 3.2 + A new :class:`!MacOSXOSAScript` class has been added + and is used on Mac instead of the previous :class:`!MacOSX` class. + This adds support for opening browsers not currently set as the OS default. + .. versionadded:: 3.3 Support for Chrome/Chromium has been added. -.. deprecated-removed:: 3.11 3.13 - :class:`MacOSX` is deprecated, use :class:`MacOSXOSAScript` instead. +.. versionchanged:: 3.12 + Support for several obsolete browsers has been removed. + Removed browsers include Grail, Mosaic, Netscape, Galeon, + Skipstone, Iceape, and Firefox versions 35 and below. Here are some simple examples:: @@ -199,7 +196,7 @@ Browser controllers provide these methods which parallel three of the module-level convenience functions: -.. attribute:: name +.. attribute:: controller.name System-dependent name for the browser. diff --git a/Doc/library/winreg.rst b/Doc/library/winreg.rst index 4ab671817710dd7..06bd4d87eb03c6e 100644 --- a/Doc/library/winreg.rst +++ b/Doc/library/winreg.rst @@ -288,7 +288,7 @@ This module offers the following functions: table (FAT) file system, the filename may not have an extension. A call to :func:`LoadKey` fails if the calling process does not have the - :const:`SE_RESTORE_PRIVILEGE` privilege. Note that privileges are different + :c:data:`!SE_RESTORE_PRIVILEGE` privilege. Note that privileges are different from permissions -- see the `RegLoadKey documentation `__ for more details. @@ -414,7 +414,7 @@ This module offers the following functions: If *key* represents a key on a remote computer, the path described by *file_name* is relative to the remote computer. The caller of this method must - possess the :const:`SeBackupPrivilege` security privilege. Note that + possess the **SeBackupPrivilege** security privilege. Note that privileges are different than permissions -- see the `Conflicts Between User Rights and Permissions documentation `__ @@ -536,7 +536,7 @@ This module offers the following functions: Constants ------------------ -The following constants are defined for use in many :mod:`_winreg` functions. +The following constants are defined for use in many :mod:`winreg` functions. .. _hkey-constants: @@ -745,7 +745,7 @@ All registry functions in this module return one of these objects. All registry functions in this module which accept a handle object also accept an integer, however, use of the handle object is encouraged. -Handle objects provide semantics for :meth:`__bool__` -- thus :: +Handle objects provide semantics for :meth:`~object.__bool__` -- thus :: if handle: print("Yes") diff --git a/Doc/library/winsound.rst b/Doc/library/winsound.rst index 372f792a0f938e1..370c5216652ba7d 100644 --- a/Doc/library/winsound.rst +++ b/Doc/library/winsound.rst @@ -24,7 +24,7 @@ provided by Windows platforms. It includes functions and several constants. .. function:: PlaySound(sound, flags) - Call the underlying :c:func:`PlaySound` function from the Platform API. The + Call the underlying :c:func:`!PlaySound` function from the Platform API. The *sound* parameter may be a filename, a system sound alias, audio data as a :term:`bytes-like object`, or ``None``. Its interpretation depends on the value of *flags*, which can be a bitwise ORed @@ -35,7 +35,7 @@ provided by Windows platforms. It includes functions and several constants. .. function:: MessageBeep(type=MB_OK) - Call the underlying :c:func:`MessageBeep` function from the Platform API. This + Call the underlying :c:func:`!MessageBeep` function from the Platform API. This plays a sound as specified in the registry. The *type* argument specifies which sound to play; possible values are ``-1``, ``MB_ICONASTERISK``, ``MB_ICONEXCLAMATION``, ``MB_ICONHAND``, ``MB_ICONQUESTION``, and ``MB_OK``, all diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst index 06223e667a450a5..be9e56b04c1fbf1 100644 --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -7,6 +7,8 @@ .. moduleauthor:: Phillip J. Eby .. sectionauthor:: Phillip J. Eby +**Source code:** :source:`Lib/wsgiref` + -------------- The Web Server Gateway Interface (WSGI) is a standard interface between web @@ -178,7 +180,7 @@ also provides these miscellaneous utilities: print(chunk) .. versionchanged:: 3.11 - Support for :meth:`__getitem__` method has been removed. + Support for :meth:`~object.__getitem__` method has been removed. :mod:`wsgiref.headers` -- WSGI response header tools @@ -199,7 +201,7 @@ manipulation of WSGI response headers using a mapping-like interface. an empty list. :class:`Headers` objects support typical mapping operations including - :meth:`__getitem__`, :meth:`get`, :meth:`__setitem__`, :meth:`setdefault`, + :meth:`~object.__getitem__`, :meth:`get`, :meth:`__setitem__`, :meth:`setdefault`, :meth:`__delitem__` and :meth:`__contains__`. For each of these methods, the key is the header name (treated case-insensitively), and the value is the first value associated with that header name. Setting a header @@ -672,7 +674,7 @@ input, output, and error streams. This method is a WSGI application to generate an error page for the user. It is only invoked if an error occurs before headers are sent to the client. - This method can access the current error information using ``sys.exc_info()``, + This method can access the current error using ``sys.exception()``, and should pass that information to *start_response* when calling it (as described in the "Error Handling" section of :pep:`3333`). diff --git a/Doc/library/xdrlib.rst b/Doc/library/xdrlib.rst deleted file mode 100644 index 39e75573260c50d..000000000000000 --- a/Doc/library/xdrlib.rst +++ /dev/null @@ -1,283 +0,0 @@ -:mod:`xdrlib` --- Encode and decode XDR data -============================================ - -.. module:: xdrlib - :synopsis: Encoders and decoders for the External Data Representation (XDR). - :deprecated: - -**Source code:** :source:`Lib/xdrlib.py` - -.. index:: - single: XDR - single: External Data Representation - -.. deprecated-removed:: 3.11 3.13 - The :mod:`xdrlib` module is deprecated - (see :pep:`PEP 594 <594#xdrlib>` for details). - --------------- - -The :mod:`xdrlib` module supports the External Data Representation Standard as -described in :rfc:`1014`, written by Sun Microsystems, Inc. June 1987. It -supports most of the data types described in the RFC. - -The :mod:`xdrlib` module defines two classes, one for packing variables into XDR -representation, and another for unpacking from XDR representation. There are -also two exception classes. - - -.. class:: Packer() - - :class:`Packer` is the class for packing data into XDR representation. The - :class:`Packer` class is instantiated with no arguments. - - -.. class:: Unpacker(data) - - ``Unpacker`` is the complementary class which unpacks XDR data values from a - string buffer. The input buffer is given as *data*. - - -.. seealso:: - - :rfc:`1014` - XDR: External Data Representation Standard - This RFC defined the encoding of data which was XDR at the time this module was - originally written. It has apparently been obsoleted by :rfc:`1832`. - - :rfc:`1832` - XDR: External Data Representation Standard - Newer RFC that provides a revised definition of XDR. - - -.. _xdr-packer-objects: - -Packer Objects --------------- - -:class:`Packer` instances have the following methods: - - -.. method:: Packer.get_buffer() - - Returns the current pack buffer as a string. - - -.. method:: Packer.reset() - - Resets the pack buffer to the empty string. - -In general, you can pack any of the most common XDR data types by calling the -appropriate ``pack_type()`` method. Each method takes a single argument, the -value to pack. The following simple data type packing methods are supported: -:meth:`pack_uint`, :meth:`pack_int`, :meth:`pack_enum`, :meth:`pack_bool`, -:meth:`pack_uhyper`, and :meth:`pack_hyper`. - - -.. method:: Packer.pack_float(value) - - Packs the single-precision floating point number *value*. - - -.. method:: Packer.pack_double(value) - - Packs the double-precision floating point number *value*. - -The following methods support packing strings, bytes, and opaque data: - - -.. method:: Packer.pack_fstring(n, s) - - Packs a fixed length string, *s*. *n* is the length of the string but it is - *not* packed into the data buffer. The string is padded with null bytes if - necessary to guaranteed 4 byte alignment. - - -.. method:: Packer.pack_fopaque(n, data) - - Packs a fixed length opaque data stream, similarly to :meth:`pack_fstring`. - - -.. method:: Packer.pack_string(s) - - Packs a variable length string, *s*. The length of the string is first packed - as an unsigned integer, then the string data is packed with - :meth:`pack_fstring`. - - -.. method:: Packer.pack_opaque(data) - - Packs a variable length opaque data string, similarly to :meth:`pack_string`. - - -.. method:: Packer.pack_bytes(bytes) - - Packs a variable length byte stream, similarly to :meth:`pack_string`. - -The following methods support packing arrays and lists: - - -.. method:: Packer.pack_list(list, pack_item) - - Packs a *list* of homogeneous items. This method is useful for lists with an - indeterminate size; i.e. the size is not available until the entire list has - been walked. For each item in the list, an unsigned integer ``1`` is packed - first, followed by the data value from the list. *pack_item* is the function - that is called to pack the individual item. At the end of the list, an unsigned - integer ``0`` is packed. - - For example, to pack a list of integers, the code might appear like this:: - - import xdrlib - p = xdrlib.Packer() - p.pack_list([1, 2, 3], p.pack_int) - - -.. method:: Packer.pack_farray(n, array, pack_item) - - Packs a fixed length list (*array*) of homogeneous items. *n* is the length of - the list; it is *not* packed into the buffer, but a :exc:`ValueError` exception - is raised if ``len(array)`` is not equal to *n*. As above, *pack_item* is the - function used to pack each element. - - -.. method:: Packer.pack_array(list, pack_item) - - Packs a variable length *list* of homogeneous items. First, the length of the - list is packed as an unsigned integer, then each element is packed as in - :meth:`pack_farray` above. - - -.. _xdr-unpacker-objects: - -Unpacker Objects ----------------- - -The :class:`Unpacker` class offers the following methods: - - -.. method:: Unpacker.reset(data) - - Resets the string buffer with the given *data*. - - -.. method:: Unpacker.get_position() - - Returns the current unpack position in the data buffer. - - -.. method:: Unpacker.set_position(position) - - Sets the data buffer unpack position to *position*. You should be careful about - using :meth:`get_position` and :meth:`set_position`. - - -.. method:: Unpacker.get_buffer() - - Returns the current unpack data buffer as a string. - - -.. method:: Unpacker.done() - - Indicates unpack completion. Raises an :exc:`Error` exception if all of the - data has not been unpacked. - -In addition, every data type that can be packed with a :class:`Packer`, can be -unpacked with an :class:`Unpacker`. Unpacking methods are of the form -``unpack_type()``, and take no arguments. They return the unpacked object. - - -.. method:: Unpacker.unpack_float() - - Unpacks a single-precision floating point number. - - -.. method:: Unpacker.unpack_double() - - Unpacks a double-precision floating point number, similarly to - :meth:`unpack_float`. - -In addition, the following methods unpack strings, bytes, and opaque data: - - -.. method:: Unpacker.unpack_fstring(n) - - Unpacks and returns a fixed length string. *n* is the number of characters - expected. Padding with null bytes to guaranteed 4 byte alignment is assumed. - - -.. method:: Unpacker.unpack_fopaque(n) - - Unpacks and returns a fixed length opaque data stream, similarly to - :meth:`unpack_fstring`. - - -.. method:: Unpacker.unpack_string() - - Unpacks and returns a variable length string. The length of the string is first - unpacked as an unsigned integer, then the string data is unpacked with - :meth:`unpack_fstring`. - - -.. method:: Unpacker.unpack_opaque() - - Unpacks and returns a variable length opaque data string, similarly to - :meth:`unpack_string`. - - -.. method:: Unpacker.unpack_bytes() - - Unpacks and returns a variable length byte stream, similarly to - :meth:`unpack_string`. - -The following methods support unpacking arrays and lists: - - -.. method:: Unpacker.unpack_list(unpack_item) - - Unpacks and returns a list of homogeneous items. The list is unpacked one - element at a time by first unpacking an unsigned integer flag. If the flag is - ``1``, then the item is unpacked and appended to the list. A flag of ``0`` - indicates the end of the list. *unpack_item* is the function that is called to - unpack the items. - - -.. method:: Unpacker.unpack_farray(n, unpack_item) - - Unpacks and returns (as a list) a fixed length array of homogeneous items. *n* - is number of list elements to expect in the buffer. As above, *unpack_item* is - the function used to unpack each element. - - -.. method:: Unpacker.unpack_array(unpack_item) - - Unpacks and returns a variable length *list* of homogeneous items. First, the - length of the list is unpacked as an unsigned integer, then each element is - unpacked as in :meth:`unpack_farray` above. - - -.. _xdr-exceptions: - -Exceptions ----------- - -Exceptions in this module are coded as class instances: - - -.. exception:: Error - - The base exception class. :exc:`Error` has a single public attribute - :attr:`msg` containing the description of the error. - - -.. exception:: ConversionError - - Class derived from :exc:`Error`. Contains no additional instance variables. - -Here is an example of how you would catch one of these exceptions:: - - import xdrlib - p = xdrlib.Packer() - try: - p.pack_double(8.01) - except xdrlib.ConversionError as instance: - print('packing the double failed:', instance.msg) - diff --git a/Doc/library/xml.dom.pulldom.rst b/Doc/library/xml.dom.pulldom.rst index d1df465a598e532..843c2fd7fdb937a 100644 --- a/Doc/library/xml.dom.pulldom.rst +++ b/Doc/library/xml.dom.pulldom.rst @@ -115,7 +115,7 @@ DOMEventStream Objects .. class:: DOMEventStream(stream, parser, bufsize) .. versionchanged:: 3.11 - Support for :meth:`__getitem__` method has been removed. + Support for :meth:`~object.__getitem__` method has been removed. .. method:: getEvent() diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst index 2fe0d2e082fb3a0..57cfbb8d92244be 100644 --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -17,7 +17,7 @@ for parsing and creating XML data. This module will use a fast implementation whenever available. .. deprecated:: 3.3 - The :mod:`xml.etree.cElementTree` module is deprecated. + The :mod:`!xml.etree.cElementTree` module is deprecated. .. warning:: @@ -154,6 +154,7 @@ elements, call :meth:`XMLPullParser.read_events`. Here is an example:: ... print(elem.tag, 'text=', elem.text) ... end + mytag text= sometext more text The obvious use case is applications that operate in a non-blocking fashion where the XML data is being received from a socket or read incrementally from @@ -621,7 +622,9 @@ Functions *parser* is an optional parser instance. If not given, the standard :class:`XMLParser` parser is used. *parser* must be a subclass of :class:`XMLParser` and can only use the default :class:`TreeBuilder` as a - target. Returns an :term:`iterator` providing ``(event, elem)`` pairs. + target. Returns an :term:`iterator` providing ``(event, elem)`` pairs; + it has a ``root`` attribute that references the root element of the + resulting XML tree once *source* is fully read. Note that while :func:`iterparse` builds the tree incrementally, it issues blocking reads on *source* (or the file it names). As such, it's unsuitable @@ -825,6 +828,8 @@ Reference Functions ^^^^^^^^^ +.. module:: xml.etree.ElementInclude + .. function:: xml.etree.ElementInclude.default_loader( href, parse, encoding=None) :module: @@ -862,6 +867,9 @@ Functions Element Objects ^^^^^^^^^^^^^^^ +.. module:: xml.etree.ElementTree + :noindex: + .. class:: Element(tag, attrib={}, **extra) Element class. This class defines the Element interface, and provides a @@ -1045,9 +1053,9 @@ Element Objects :meth:`~object.__getitem__`, :meth:`~object.__setitem__`, :meth:`~object.__len__`. - Caution: Elements with no subelements will test as ``False``. This behavior - will change in future versions. Use specific ``len(elem)`` or ``elem is - None`` test instead. :: + Caution: Elements with no subelements will test as ``False``. Testing the + truth value of an Element is deprecated and will raise an exception in + Python 3.14. Use specific ``len(elem)`` or ``elem is None`` test instead.:: element = root.find('foo') @@ -1057,6 +1065,9 @@ Element Objects if element is None: print("element not found") + .. versionchanged:: 3.12 + Testing the truth value of an Element emits :exc:`DeprecationWarning`. + Prior to Python 3.8, the serialisation order of the XML attributes of elements was artificially made predictable by sorting the attributes by their name. Based on the now guaranteed ordering of dicts, this arbitrary @@ -1212,6 +1223,7 @@ Example of changing the attribute "target" of every link in first paragraph:: [, ] >>> for i in links: # Iterates through all found links ... i.attrib["target"] = "blank" + ... >>> tree.write("output.xhtml") .. _elementtree-qname-objects: diff --git a/Doc/library/xml.rst b/Doc/library/xml.rst index 20b0905bb1093a9..909022ea4ba6a4b 100644 --- a/Doc/library/xml.rst +++ b/Doc/library/xml.rst @@ -73,12 +73,12 @@ decompression bomb Safe Safe Safe 1. Expat 2.4.1 and newer is not vulnerable to the "billion laughs" and "quadratic blowup" vulnerabilities. Items still listed as vulnerable due to potential reliance on system-provided libraries. Check - :data:`pyexpat.EXPAT_VERSION`. + :const:`!pyexpat.EXPAT_VERSION`. 2. :mod:`xml.etree.ElementTree` doesn't expand external entities and raises a - :exc:`ParserError` when an entity occurs. + :exc:`~xml.etree.ElementTree.ParseError` when an entity occurs. 3. :mod:`xml.dom.minidom` doesn't expand external entities and simply returns the unexpanded entity verbatim. -4. :mod:`xmlrpclib` doesn't expand external entities and omits them. +4. :mod:`xmlrpc.client` doesn't expand external entities and omits them. 5. Since Python 3.7.1, external general entities are no longer processed by default. @@ -119,8 +119,8 @@ all known attack vectors with examples and references. .. _defusedxml-package: -The :mod:`defusedxml` Package ------------------------------------------------------- +The :mod:`!defusedxml` Package +------------------------------ `defusedxml`_ is a pure Python package with modified subclasses of all stdlib XML parsers that prevent any potentially malicious operation. Use of this diff --git a/Doc/library/xml.sax.handler.rst b/Doc/library/xml.sax.handler.rst index 719ce5ab1bcf658..e2f28e3244cb096 100644 --- a/Doc/library/xml.sax.handler.rst +++ b/Doc/library/xml.sax.handler.rst @@ -393,7 +393,7 @@ implements this interface, then register the object with your :class:`~xml.sax.xmlreader.XMLReader`, the parser will call the methods in your object to report all warnings and errors. There are three levels of errors available: warnings, (possibly) recoverable errors, -and unrecoverable errors. All methods take a :exc:`SAXParseException` as the +and unrecoverable errors. All methods take a :exc:`~xml.sax.SAXParseException` as the only parameter. Errors and warnings may be converted to an exception by raising the passed-in exception object. diff --git a/Doc/library/xml.sax.utils.rst b/Doc/library/xml.sax.utils.rst index ab4606bcf9fe6ca..e57e76dcac7820b 100644 --- a/Doc/library/xml.sax.utils.rst +++ b/Doc/library/xml.sax.utils.rst @@ -92,5 +92,5 @@ or as base classes. reading. The input source can be given as a string, a file-like object, or an :class:`~xml.sax.xmlreader.InputSource` object; parsers will use this function to implement the polymorphic *source* argument to their - :meth:`parse` method. + :meth:`~xml.sax.xmlreader.XMLReader.parse` method. diff --git a/Doc/library/xmlrpc.client.rst b/Doc/library/xmlrpc.client.rst index bd2c49a6edab7fa..146c4fd768233bb 100644 --- a/Doc/library/xmlrpc.client.rst +++ b/Doc/library/xmlrpc.client.rst @@ -161,7 +161,7 @@ between conformable Python objects and XML on the wire. .. seealso:: - `XML-RPC HOWTO `_ + `XML-RPC HOWTO `_ A good description of XML-RPC operation and client software in several languages. Contains pretty much everything an XML-RPC client developer needs to know. diff --git a/Doc/library/xmlrpc.rst b/Doc/library/xmlrpc.rst index ae68157b0f63c14..5f0a2cf68d01f9c 100644 --- a/Doc/library/xmlrpc.rst +++ b/Doc/library/xmlrpc.rst @@ -1,5 +1,5 @@ -:mod:`xmlrpc` --- XMLRPC server and client modules -================================================== +:mod:`!xmlrpc` --- XMLRPC server and client modules +=================================================== XML-RPC is a Remote Procedure Call method that uses XML passed via HTTP as a transport. With it, a client can call methods with parameters on a remote diff --git a/Doc/library/zipapp.rst b/Doc/library/zipapp.rst index fb40a2b3e964e4d..104afca23a20b42 100644 --- a/Doc/library/zipapp.rst +++ b/Doc/library/zipapp.rst @@ -54,7 +54,7 @@ The following options are understood: .. program:: zipapp -.. cmdoption:: -o , --output= +.. option:: -o , --output= Write the output to a file named *output*. If this option is not specified, the output filename will be the same as the input *source*, with the @@ -64,13 +64,13 @@ The following options are understood: An output filename must be specified if the *source* is an archive (and in that case, *output* must not be the same as *source*). -.. cmdoption:: -p , --python= +.. option:: -p , --python= Add a ``#!`` line to the archive specifying *interpreter* as the command to run. Also, on POSIX, make the archive executable. The default is to write no ``#!`` line, and not make the file executable. -.. cmdoption:: -m , --main= +.. option:: -m , --main= Write a ``__main__.py`` file to the archive that executes *mainfn*. The *mainfn* argument should have the form "pkg.mod:fn", where "pkg.mod" is a @@ -79,7 +79,7 @@ The following options are understood: :option:`--main` cannot be specified when copying an archive. -.. cmdoption:: -c, --compress +.. option:: -c, --compress Compress files with the deflate method, reducing the size of the output file. By default, files are stored uncompressed in the archive. @@ -88,13 +88,13 @@ The following options are understood: .. versionadded:: 3.7 -.. cmdoption:: --info +.. option:: --info Display the interpreter embedded in the archive, for diagnostic purposes. In this case, any other options are ignored and SOURCE must be an archive, not a directory. -.. cmdoption:: -h, --help +.. option:: -h, --help Print a short usage message and exit. @@ -215,7 +215,7 @@ using the :func:`create_archive` function:: >>> import zipapp >>> zipapp.create_archive('old_archive.pyz', 'new_archive.pyz', '/usr/bin/python3') -To update the file in place, do the replacement in memory using a :class:`BytesIO` +To update the file in place, do the replacement in memory using a :class:`~io.BytesIO` object, and then overwrite the source afterwards. Note that there is a risk when overwriting a file in place that an error will result in the loss of the original file. This code does not protect against such errors, but @@ -281,12 +281,7 @@ The steps to create a standalone archive are as follows: file - if not, you can just list the dependencies manually on the pip command line). -3. Optionally, delete the ``.dist-info`` directories created by pip in the - ``myapp`` directory. These hold metadata for pip to manage the packages, and - as you won't be making any further use of pip they aren't required - - although it won't do any harm if you leave them. - -4. Package the application using: +3. Package the application using: .. code-block:: shell-session @@ -303,115 +298,18 @@ the Python interpreter registers the ``.pyz`` and ``.pyzw`` file extensions when installed. -Making a Windows executable -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On Windows, registration of the ``.pyz`` extension is optional, and -furthermore, there are certain places that don't recognise registered -extensions "transparently" (the simplest example is that -``subprocess.run(['myapp'])`` won't find your application - you need to -explicitly specify the extension). - -On Windows, therefore, it is often preferable to create an executable from the -zipapp. This is relatively easy, although it does require a C compiler. The -basic approach relies on the fact that zipfiles can have arbitrary data -prepended, and Windows exe files can have arbitrary data appended. So by -creating a suitable launcher and tacking the ``.pyz`` file onto the end of it, -you end up with a single-file executable that runs your application. - -A suitable launcher can be as simple as the following:: - - #define Py_LIMITED_API 1 - #include "Python.h" - - #define WIN32_LEAN_AND_MEAN - #include - - #ifdef WINDOWS - int WINAPI wWinMain( - HINSTANCE hInstance, /* handle to current instance */ - HINSTANCE hPrevInstance, /* handle to previous instance */ - LPWSTR lpCmdLine, /* pointer to command line */ - int nCmdShow /* show state of window */ - ) - #else - int wmain() - #endif - { - wchar_t **myargv = _alloca((__argc + 1) * sizeof(wchar_t*)); - myargv[0] = __wargv[0]; - memcpy(myargv + 1, __wargv, __argc * sizeof(wchar_t *)); - return Py_Main(__argc+1, myargv); - } - -If you define the ``WINDOWS`` preprocessor symbol, this will generate a -GUI executable, and without it, a console executable. - -To compile the executable, you can either just use the standard MSVC -command line tools, or you can take advantage of the fact that distutils -knows how to compile Python source:: - - >>> from distutils.ccompiler import new_compiler - >>> import distutils.sysconfig - >>> import sys - >>> import os - >>> from pathlib import Path - - >>> def compile(src): - >>> src = Path(src) - >>> cc = new_compiler() - >>> exe = src.stem - >>> cc.add_include_dir(distutils.sysconfig.get_python_inc()) - >>> cc.add_library_dir(os.path.join(sys.base_exec_prefix, 'libs')) - >>> # First the CLI executable - >>> objs = cc.compile([str(src)]) - >>> cc.link_executable(objs, exe) - >>> # Now the GUI executable - >>> cc.define_macro('WINDOWS') - >>> objs = cc.compile([str(src)]) - >>> cc.link_executable(objs, exe + 'w') - - >>> if __name__ == "__main__": - >>> compile("zastub.c") - -The resulting launcher uses the "Limited ABI", so it will run unchanged with -any version of Python 3.x. All it needs is for Python (``python3.dll``) to be -on the user's ``PATH``. - -For a fully standalone distribution, you can distribute the launcher with your -application appended, bundled with the Python "embedded" distribution. This -will run on any PC with the appropriate architecture (32 bit or 64 bit). - - Caveats ~~~~~~~ -There are some limitations to the process of bundling your application into -a single file. In most, if not all, cases they can be addressed without -needing major changes to your application. - -1. If your application depends on a package that includes a C extension, that - package cannot be run from a zip file (this is an OS limitation, as executable - code must be present in the filesystem for the OS loader to load it). In this - case, you can exclude that dependency from the zipfile, and either require - your users to have it installed, or ship it alongside your zipfile and add code - to your ``__main__.py`` to include the directory containing the unzipped - module in ``sys.path``. In this case, you will need to make sure to ship - appropriate binaries for your target architecture(s) (and potentially pick the - correct version to add to ``sys.path`` at runtime, based on the user's machine). - -2. If you are shipping a Windows executable as described above, you either need to - ensure that your users have ``python3.dll`` on their PATH (which is not the - default behaviour of the installer) or you should bundle your application with - the embedded distribution. - -3. The suggested launcher above uses the Python embedding API. This means that in - your application, ``sys.executable`` will be your application, and *not* a - conventional Python interpreter. Your code and its dependencies need to be - prepared for this possibility. For example, if your application uses the - :mod:`multiprocessing` module, it will need to call - :func:`multiprocessing.set_executable` to let the module know where to find the - standard Python interpreter. +If your application depends on a package that includes a C extension, that +package cannot be run from a zip file (this is an OS limitation, as executable +code must be present in the filesystem for the OS loader to load it). In this +case, you can exclude that dependency from the zipfile, and either require +your users to have it installed, or ship it alongside your zipfile and add code +to your ``__main__.py`` to include the directory containing the unzipped +module in ``sys.path``. In this case, you will need to make sure to ship +appropriate binaries for your target architecture(s) (and potentially pick the +correct version to add to ``sys.path`` at runtime, based on the user's machine). The Python Zip Application Archive Format diff --git a/Doc/library/zipfile.rst b/Doc/library/zipfile.rst index 4dd9fa961a8d983..a77e49a76438267 100644 --- a/Doc/library/zipfile.rst +++ b/Doc/library/zipfile.rst @@ -7,7 +7,7 @@ .. moduleauthor:: James C. Ahlstrom .. sectionauthor:: James C. Ahlstrom -**Source code:** :source:`Lib/zipfile.py` +**Source code:** :source:`Lib/zipfile/` -------------- @@ -55,8 +55,9 @@ The module defines the following items: .. class:: Path :noindex: - A pathlib-compatible wrapper for zip files. See section - :ref:`path-objects` for details. + Class that implements a subset of the interface provided by + :class:`pathlib.Path`, including the full + :class:`importlib.resources.abc.Traversable` interface. .. versionadded:: 3.8 @@ -127,7 +128,7 @@ The module defines the following items: Documentation on the ZIP file format by Phil Katz, the creator of the format and algorithms used. - `Info-ZIP Home Page `_ + `Info-ZIP Home Page `_ Information about the Info-ZIP project's ZIP archive programs and development libraries. @@ -273,7 +274,8 @@ ZipFile Objects Access a member of the archive as a binary file-like object. *name* can be either the name of a file within the archive or a :class:`ZipInfo` object. The *mode* parameter, if included, must be ``'r'`` (the default) - or ``'w'``. *pwd* is the password used to decrypt encrypted ZIP files. + or ``'w'``. *pwd* is the password used to decrypt encrypted ZIP files as a + :class:`bytes` object. :meth:`~ZipFile.open` is also a context manager and therefore supports the :keyword:`with` statement:: @@ -286,7 +288,7 @@ ZipFile Objects (``ZipExtFile``) is read-only and provides the following methods: :meth:`~io.BufferedIOBase.read`, :meth:`~io.IOBase.readline`, :meth:`~io.IOBase.readlines`, :meth:`~io.IOBase.seek`, - :meth:`~io.IOBase.tell`, :meth:`__iter__`, :meth:`~iterator.__next__`. + :meth:`~io.IOBase.tell`, :meth:`~container.__iter__`, :meth:`~iterator.__next__`. These objects can operate independently of the ZipFile. With ``mode='w'``, a writable file handle is returned, which supports the @@ -325,7 +327,7 @@ ZipFile Objects must be its full name or a :class:`ZipInfo` object. Its file information is extracted as accurately as possible. *path* specifies a different directory to extract to. *member* can be a filename or a :class:`ZipInfo` object. - *pwd* is the password used for encrypted files. + *pwd* is the password used for encrypted files as a :class:`bytes` object. Returns the normalized path created (a directory or new file). @@ -352,7 +354,7 @@ ZipFile Objects Extract all members from the archive to the current working directory. *path* specifies a different directory to extract to. *members* is optional and must be a subset of the list returned by :meth:`namelist`. *pwd* is the password - used for encrypted files. + used for encrypted files as a :class:`bytes` object. .. warning:: @@ -377,16 +379,16 @@ ZipFile Objects .. method:: ZipFile.setpassword(pwd) - Set *pwd* as default password to extract encrypted files. + Set *pwd* (a :class:`bytes` object) as default password to extract encrypted files. .. method:: ZipFile.read(name, pwd=None) Return the bytes of the file *name* in the archive. *name* is the name of the file in the archive, or a :class:`ZipInfo` object. The archive must be open for - read or append. *pwd* is the password used for encrypted files and, if specified, - it will override the default password set with :meth:`setpassword`. Calling - :meth:`read` on a ZipFile that uses a compression method other than + read or append. *pwd* is the password used for encrypted files as a :class:`bytes` + object and, if specified, overrides the default password set with :meth:`setpassword`. + Calling :meth:`read` on a ZipFile that uses a compression method other than :const:`ZIP_STORED`, :const:`ZIP_DEFLATED`, :const:`ZIP_BZIP2` or :const:`ZIP_LZMA` will raise a :exc:`NotImplementedError`. An error will also be raised if the corresponding compression module is not available. @@ -550,6 +552,12 @@ Path objects are traversable using the ``/`` operator or ``joinpath``. Added support for text and binary modes for open. Default mode is now text. + .. versionchanged:: 3.11.2 + The ``encoding`` parameter can be supplied as a positional argument + without causing a :exc:`TypeError`. As it could in 3.9. Code needing to + be compatible with unpatched 3.10 and 3.11 versions must pass all + :class:`io.TextIOWrapper` arguments, ``encoding`` included, as keywords. + .. method:: Path.iterdir() Enumerate the children of the current directory. @@ -569,7 +577,8 @@ Path objects are traversable using the ``/`` operator or ``joinpath``. .. data:: Path.suffix - The file extension of the final component. + The last dot-separated portion of the final component, if any. + This is commonly called the file extension. .. versionadded:: 3.11 Added :data:`Path.suffix` property. @@ -583,7 +592,7 @@ Path objects are traversable using the ``/`` operator or ``joinpath``. .. data:: Path.suffixes - A list of the path’s file extensions. + A list of the path’s suffixes, commonly called file extensions. .. versionadded:: 3.11 Added :data:`Path.suffixes` property. @@ -595,6 +604,12 @@ Path objects are traversable using the ``/`` operator or ``joinpath``. :class:`io.TextIOWrapper` (except ``buffer``, which is implied by the context). + .. versionchanged:: 3.11.2 + The ``encoding`` parameter can be supplied as a positional argument + without causing a :exc:`TypeError`. As it could in 3.9. Code needing to + be compatible with unpatched 3.10 and 3.11 versions must pass all + :class:`io.TextIOWrapper` arguments, ``encoding`` included, as keywords. + .. method:: Path.read_bytes() Read the current file as bytes. @@ -672,6 +687,7 @@ The :class:`PyZipFile` constructor takes the same parameters as the >>> def notests(s): ... fn = os.path.basename(s) ... return (not (fn == 'test' or fn.startswith('test_'))) + ... >>> zf.writepy('myprog', filterfunc=notests) The :meth:`writepy` method makes archives with file names like @@ -890,27 +906,27 @@ For a list of the files in a ZIP archive, use the :option:`-l` option: Command-line options ~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -l - --list +.. option:: -l + --list List files in a zipfile. -.. cmdoption:: -c ... - --create ... +.. option:: -c ... + --create ... Create zipfile from source files. -.. cmdoption:: -e - --extract +.. option:: -e + --extract Extract zipfile into target directory. -.. cmdoption:: -t - --test +.. option:: -t + --test Test whether the zipfile is valid or not. -.. cmdoption:: --metadata-encoding +.. option:: --metadata-encoding Specify encoding of member names for :option:`-l`, :option:`-e` and :option:`-t`. diff --git a/Doc/library/zipimport.rst b/Doc/library/zipimport.rst index fe1adcae163c23f..47c81f0e63603dc 100644 --- a/Doc/library/zipimport.rst +++ b/Doc/library/zipimport.rst @@ -74,6 +74,11 @@ zipimporter Objects :exc:`ZipImportError` is raised if *archivepath* doesn't point to a valid ZIP archive. + .. versionchanged:: 3.12 + + Methods ``find_loader()`` and ``find_module()``, deprecated in 3.10 are + now removed. Use :meth:`find_spec` instead. + .. method:: create_module(spec) Implementation of :meth:`importlib.abc.Loader.create_module` that returns @@ -89,28 +94,6 @@ zipimporter Objects .. versionadded:: 3.10 - .. method:: find_loader(fullname, path=None) - - An implementation of :meth:`importlib.abc.PathEntryFinder.find_loader`. - - .. deprecated:: 3.10 - - Use :meth:`find_spec` instead. - - - .. method:: find_module(fullname, path=None) - - Search for a module specified by *fullname*. *fullname* must be the fully - qualified (dotted) module name. It returns the zipimporter instance itself - if the module was found, or :const:`None` if it wasn't. The optional - *path* argument is ignored---it's there for compatibility with the - importer protocol. - - .. deprecated:: 3.10 - - Use :meth:`find_spec` instead. - - .. method:: find_spec(fullname, target=None) An implementation of :meth:`importlib.abc.PathEntryFinder.find_spec`. @@ -130,7 +113,7 @@ zipimporter Objects file wasn't found. .. versionchanged:: 3.3 - :exc:`IOError` used to be raised instead of :exc:`OSError`. + :exc:`IOError` used to be raised, it is now an alias of :exc:`OSError`. .. method:: get_filename(fullname) diff --git a/Doc/library/zoneinfo.rst b/Doc/library/zoneinfo.rst index 2f1879dc056a881..f8624da6e51dbb4 100644 --- a/Doc/library/zoneinfo.rst +++ b/Doc/library/zoneinfo.rst @@ -9,6 +9,8 @@ .. moduleauthor:: Paul Ganssle .. sectionauthor:: Paul Ganssle +**Source code:** :source:`Lib/zoneinfo` + -------------- The :mod:`zoneinfo` module provides a concrete time zone implementation to @@ -239,7 +241,7 @@ The following class methods are also available: .. warning:: Invoking this function may change the semantics of datetimes using - ``ZoneInfo`` in surprising ways; this modifies process-wide global state + ``ZoneInfo`` in surprising ways; this modifies module state and thus may have wide-ranging effects. Only use it if you know that you need to. diff --git a/Doc/license.rst b/Doc/license.rst index 92059b91effd9bc..8aad93062a5a887 100644 --- a/Doc/license.rst +++ b/Doc/license.rst @@ -100,7 +100,7 @@ PSF LICENSE AGREEMENT FOR PYTHON |release| analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python |release| alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of - copyright, i.e., "Copyright © 2001-2022 Python Software Foundation; All Rights + copyright, i.e., "Copyright © 2001-2023 Python Software Foundation; All Rights Reserved" are retained in Python |release| alone or in any derivative version prepared by Licensee. @@ -352,8 +352,8 @@ the verbatim comments from the original code:: Sockets ------- -The :mod:`socket` module uses the functions, :func:`getaddrinfo`, and -:func:`getnameinfo`, which are coded in separate source files from the WIDE +The :mod:`socket` module uses the functions, :c:func:`!getaddrinfo`, and +:c:func:`!getnameinfo`, which are coded in separate source files from the WIDE Project, https://www.wide.ad.jp/. :: Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -387,7 +387,8 @@ Project, https://www.wide.ad.jp/. :: Asynchronous socket services ---------------------------- -The :mod:`asynchat` and :mod:`asyncore` modules contain the following notice:: +The :mod:`!test.support.asynchat` and :mod:`!test.support.asyncore` +modules contain the following notice:: Copyright 1996 by Sam Rushing @@ -475,7 +476,7 @@ The :mod:`trace` module contains the following notice:: UUencode and UUdecode functions ------------------------------- -The :mod:`uu` module contains the following notice:: +The ``uu`` codec contains the following notice:: Copyright 1994 by Lance Ellinghouse Cathedral City, California Republic, United States of America. @@ -538,7 +539,7 @@ The :mod:`xmlrpc.client` module contains the following notice:: test_epoll ---------- -The :mod:`test_epoll` module contains the following notice:: +The :mod:`!test.test_epoll` module contains the following notice:: Copyright (c) 2001-2006 Twisted Matrix Laboratories. @@ -654,144 +655,196 @@ copyright and licensing notice:: OpenSSL ------- -The modules :mod:`hashlib`, :mod:`posix`, :mod:`ssl`, :mod:`crypt` use +The modules :mod:`hashlib`, :mod:`posix` and :mod:`ssl` use the OpenSSL library for added performance if made available by the operating system. Additionally, the Windows and macOS installers for Python may include a copy of the OpenSSL libraries, so we include a copy -of the OpenSSL license here:: - - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a dual license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. Actually both licenses are BSD-style - Open Source licenses. In case of any license issues related to OpenSSL - please contact openssl-core@openssl.org. - - OpenSSL License - --------------- - - /* ==================================================================== - * Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - - /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ +of the OpenSSL license here. For the OpenSSL 3.0 release, +and later releases derived from that, the Apache License v2 applies:: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS expat ----- -The :mod:`pyexpat` extension is built using an included copy of the expat +The :mod:`pyexpat ` extension is built using an included copy of the expat sources unless the build is configured ``--with-system-expat``:: Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd @@ -988,27 +1041,28 @@ https://www.w3.org/TR/xml-c14n2-testcases/ and is distributed under the (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Audioop -------- -The audioop module uses the code base in g771.c file of the SoX project:: - Programming the AdLib/Sound Blaster - FM Music Chips - Version 2.0 (24 Feb 1992) - Copyright (c) 1991, 1992 by Jeffrey S. Lee - jlee@smylex.uucp - Warranty and Copyright Policy - This document is provided on an "as-is" basis, and its author makes - no warranty or representation, express or implied, with respect to - its quality performance or fitness for a particular purpose. In no - event will the author of this document be liable for direct, indirect, - special, incidental, or consequential damages arising out of the use - or inability to use the information contained within. Use of this - document is at your own risk. - This file may be used and copied freely so long as the applicable - copyright notices are retained, and no modifications are made to the - text of the document. No money shall be charged for its distribution - beyond reasonable shipping, handling and duplication costs, nor shall - proprietary changes be made to this document so that it cannot be - distributed freely. This document may not be included in published - material or commercial packages without the written consent of its - author. + +mimalloc +-------- + +MIT License + +Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Doc/reference/compound_stmts.rst b/Doc/reference/compound_stmts.rst index 9e09515f50d12f9..8f6481339837a05 100644 --- a/Doc/reference/compound_stmts.rst +++ b/Doc/reference/compound_stmts.rst @@ -84,9 +84,9 @@ The :keyword:`!if` statement ============================ .. index:: - ! statement: if - keyword: elif - keyword: else + ! pair: statement; if + pair: keyword; elif + pair: keyword; else single: : (colon); compound statement The :keyword:`if` statement is used for conditional execution: @@ -109,8 +109,8 @@ The :keyword:`!while` statement =============================== .. index:: - ! statement: while - keyword: else + ! pair: statement; while + pair: keyword; else pair: loop; statement single: : (colon); compound statement @@ -127,8 +127,8 @@ suite of the :keyword:`!else` clause, if present, is executed and the loop terminates. .. index:: - statement: break - statement: continue + pair: statement; break + pair: statement; continue A :keyword:`break` statement executed in the first suite terminates the loop without executing the :keyword:`!else` clause's suite. A :keyword:`continue` @@ -142,12 +142,12 @@ The :keyword:`!for` statement ============================= .. index:: - ! statement: for - keyword: in - keyword: else + ! pair: statement; for + pair: keyword; in + pair: keyword; else pair: target; list pair: loop; statement - object: sequence + pair: object; sequence single: : (colon); compound statement The :keyword:`for` statement is used to iterate over the elements of a sequence @@ -167,8 +167,8 @@ the suite in the :keyword:`!else` clause, if present, is executed, and the loop terminates. .. index:: - statement: break - statement: continue + pair: statement; break + pair: statement; continue A :keyword:`break` statement executed in the first suite terminates the loop without executing the :keyword:`!else` clause's suite. A :keyword:`continue` @@ -188,13 +188,12 @@ those made in the suite of the for-loop:: .. index:: - builtin: range + pair: built-in function; range Names in the target list are not deleted when the loop is finished, but if the sequence is empty, they will not have been assigned to at all by the loop. Hint: -the built-in function :func:`range` returns an iterator of integers suitable to -emulate the effect of Pascal's ``for i := a to b do``; e.g., ``list(range(3))`` -returns the list ``[0, 1, 2]``. +the built-in type :func:`range` represents immutable arithmetic sequences of integers. +For instance, iterating ``range(3)`` successively yields 0, 1, and then 2. .. versionchanged:: 3.11 Starred elements are now allowed in the expression list. @@ -206,11 +205,11 @@ The :keyword:`!try` statement ============================= .. index:: - ! statement: try - keyword: except - keyword: finally - keyword: else - keyword: as + ! pair: statement; try + pair: keyword; except + pair: keyword; finally + pair: keyword; else + pair: keyword; as single: : (colon); compound statement The :keyword:`!try` statement specifies exception handlers and/or cleanup code @@ -298,39 +297,36 @@ traceback attached to them, they form a reference cycle with the stack frame, keeping all locals in that frame alive until the next garbage collection occurs. .. index:: - module: sys - object: traceback + pair: module; sys + pair: object; traceback Before an :keyword:`!except` clause's suite is executed, -details about the exception are -stored in the :mod:`sys` module and can be accessed via :func:`sys.exc_info`. -:func:`sys.exc_info` returns a 3-tuple consisting of the exception class, the -exception instance and a traceback object (see section :ref:`types`) identifying -the point in the program where the exception occurred. The details about the -exception accessed via :func:`sys.exc_info` are restored to their previous values -when leaving an exception handler:: - - >>> print(sys.exc_info()) - (None, None, None) +the exception is stored in the :mod:`sys` module, where it can be accessed +from within the body of the :keyword:`!except` clause by calling +:func:`sys.exception`. When leaving an exception handler, the exception +stored in the :mod:`sys` module is reset to its previous value:: + + >>> print(sys.exception()) + None >>> try: ... raise TypeError ... except: - ... print(sys.exc_info()) + ... print(repr(sys.exception())) ... try: ... raise ValueError ... except: - ... print(sys.exc_info()) - ... print(sys.exc_info()) + ... print(repr(sys.exception())) + ... print(repr(sys.exception())) ... - (, TypeError(), ) - (, ValueError(), ) - (, TypeError(), ) - >>> print(sys.exc_info()) - (None, None, None) + TypeError() + ValueError() + TypeError() + >>> print(sys.exception()) + None .. index:: - keyword: except_star + pair: keyword; except_star .. _except_star: @@ -366,8 +362,10 @@ one :keyword:`!except*` clause, the first that matches it. :: Any remaining exceptions that were not handled by any :keyword:`!except*` -clause are re-raised at the end, combined into an exception group along with -all exceptions that were raised from within :keyword:`!except*` clauses. +clause are re-raised at the end, along with all exceptions that were +raised from within the :keyword:`!except*` clauses. If this list contains +more than one exception to reraise, they are combined into an exception +group. If the raised exception is not an exception group and its type matches one of the :keyword:`!except*` clauses, it is caught and wrapped by an @@ -389,10 +387,10 @@ cannot appear in an :keyword:`!except*` clause. .. index:: - keyword: else - statement: return - statement: break - statement: continue + pair: keyword; else + pair: statement; return + pair: statement; break + pair: statement; continue .. _except_else: @@ -406,7 +404,7 @@ the :keyword:`!else` clause are not handled by the preceding :keyword:`except` clauses. -.. index:: keyword: finally +.. index:: pair: keyword; finally .. _finally: @@ -436,9 +434,9 @@ The exception information is not available to the program during execution of the :keyword:`!finally` clause. .. index:: - statement: return - statement: break - statement: continue + pair: statement; return + pair: statement; break + pair: statement; continue When a :keyword:`return`, :keyword:`break` or :keyword:`continue` statement is executed in the :keyword:`try` suite of a :keyword:`!try`...\ :keyword:`!finally` @@ -470,8 +468,8 @@ The :keyword:`!with` statement ============================== .. index:: - ! statement: with - keyword: as + ! pair: statement; with + pair: keyword; as single: as; with statement single: , (comma); with statement single: : (colon); compound statement @@ -491,37 +489,37 @@ The execution of the :keyword:`with` statement with one "item" proceeds as follo #. The context expression (the expression given in the :token:`~python-grammar:with_item`) is evaluated to obtain a context manager. -#. The context manager's :meth:`__enter__` is loaded for later use. +#. The context manager's :meth:`~object.__enter__` is loaded for later use. -#. The context manager's :meth:`__exit__` is loaded for later use. +#. The context manager's :meth:`~object.__exit__` is loaded for later use. -#. The context manager's :meth:`__enter__` method is invoked. +#. The context manager's :meth:`~object.__enter__` method is invoked. #. If a target was included in the :keyword:`with` statement, the return value - from :meth:`__enter__` is assigned to it. + from :meth:`~object.__enter__` is assigned to it. .. note:: - The :keyword:`with` statement guarantees that if the :meth:`__enter__` - method returns without an error, then :meth:`__exit__` will always be + The :keyword:`with` statement guarantees that if the :meth:`~object.__enter__` + method returns without an error, then :meth:`~object.__exit__` will always be called. Thus, if an error occurs during the assignment to the target list, it will be treated the same as an error occurring within the suite would - be. See step 6 below. + be. See step 7 below. #. The suite is executed. -#. The context manager's :meth:`__exit__` method is invoked. If an exception +#. The context manager's :meth:`~object.__exit__` method is invoked. If an exception caused the suite to be exited, its type, value, and traceback are passed as - arguments to :meth:`__exit__`. Otherwise, three :const:`None` arguments are + arguments to :meth:`~object.__exit__`. Otherwise, three :const:`None` arguments are supplied. If the suite was exited due to an exception, and the return value from the - :meth:`__exit__` method was false, the exception is reraised. If the return + :meth:`~object.__exit__` method was false, the exception is reraised. If the return value was true, the exception is suppressed, and execution continues with the statement following the :keyword:`with` statement. If the suite was exited for any reason other than an exception, the return - value from :meth:`__exit__` is ignored, and execution proceeds at the normal + value from :meth:`~object.__exit__` is ignored, and execution proceeds at the normal location for the kind of exit that was taken. The following code:: @@ -587,11 +585,11 @@ The :keyword:`!match` statement =============================== .. index:: - ! statement: match - ! keyword: case + ! pair: statement; match + ! pair: keyword; case ! single: pattern matching - keyword: if - keyword: as + pair: keyword; if + pair: keyword; as pair: match; case single: as; match statement single: : (colon); compound statement @@ -644,14 +642,14 @@ Here's an overview of the logical flow of a match statement: specified below. **Name bindings made during a successful pattern match outlive the executed block and can be used after the match statement**. - .. note:: + .. note:: - During failed pattern matches, some subpatterns may succeed. Do not - rely on bindings being made for a failed match. Conversely, do not - rely on variables remaining unchanged after a failed match. The exact - behavior is dependent on implementation and may vary. This is an - intentional decision made to allow different implementations to add - optimizations. + During failed pattern matches, some subpatterns may succeed. Do not + rely on bindings being made for a failed match. Conversely, do not + rely on variables remaining unchanged after a failed match. The exact + behavior is dependent on implementation and may vary. This is an + intentional decision made to allow different implementations to add + optimizations. #. If the pattern succeeds, the corresponding guard (if present) is evaluated. In this case all name bindings are guaranteed to have happened. @@ -820,7 +818,7 @@ keyword against a subject. Syntax: If the OR pattern fails, the AS pattern fails. Otherwise, the AS pattern binds the subject to the name on the right of the as keyword and succeeds. -``capture_pattern`` cannot be a a ``_``. +``capture_pattern`` cannot be a ``_``. In simple terms ``P as NAME`` will match with ``P``, and on success it will set ``NAME = ``. @@ -1060,7 +1058,7 @@ subject value: .. note:: Key-value pairs are matched using the two-argument form of the mapping subject's ``get()`` method. Matched key-value pairs must already be present in the mapping, and not created on-the-fly via :meth:`__missing__` or - :meth:`__getitem__`. + :meth:`~object.__getitem__`. In simple terms ``{KEY1: P1, KEY2: P2, ... }`` matches only if all the following happens: @@ -1172,8 +1170,10 @@ In simple terms ``CLS(P1, attr=P2)`` matches only if the following happens: * ``isinstance(, CLS)`` * convert ``P1`` to a keyword pattern using ``CLS.__match_args__`` * For each keyword argument ``attr=P2``: - * ``hasattr(, "attr")`` - * ``P2`` matches ``.attr`` + + * ``hasattr(, "attr")`` + * ``P2`` matches ``.attr`` + * ... and so on for the corresponding keyword argument/pattern pair. .. seealso:: @@ -1192,12 +1192,12 @@ Function definitions ==================== .. index:: - statement: def + pair: statement; def pair: function; definition pair: function; name pair: name; binding - object: user-defined function - object: function + pair: object; user-defined function + pair: object; function pair: function; name pair: name; binding single: () (parentheses); function definition @@ -1208,7 +1208,7 @@ A function definition defines a user-defined function object (see section :ref:`types`): .. productionlist:: python-grammar - funcdef: [`decorators`] "def" `funcname` "(" [`parameter_list`] ")" + funcdef: [`decorators`] "def" `funcname` [`type_params`] "(" [`parameter_list`] ")" : ["->" `expression`] ":" `suite` decorators: `decorator`+ decorator: "@" `assignment_expression` NEWLINE @@ -1258,6 +1258,15 @@ except that the original function is not temporarily bound to the name ``func``. :token:`~python-grammar:assignment_expression`. Previously, the grammar was much more restrictive; see :pep:`614` for details. +A list of :ref:`type parameters ` may be given in square brackets +between the function's name and the opening parenthesis for its parameter list. +This indicates to static type checkers that the function is generic. At runtime, +the type parameters can be retrieved from the function's ``__type_params__`` +attribute. See :ref:`generic-functions` for more. + +.. versionchanged:: 3.12 + Type parameter lists are new in Python 3.12. + .. index:: triple: default; parameter; value single: argument; function definition @@ -1365,8 +1374,8 @@ Class definitions ================= .. index:: - object: class - statement: class + pair: object; class + pair: statement; class pair: class; definition pair: class; name pair: name; binding @@ -1380,7 +1389,7 @@ Class definitions A class definition defines a class object (see section :ref:`types`): .. productionlist:: python-grammar - classdef: [`decorators`] "class" `classname` [`inheritance`] ":" `suite` + classdef: [`decorators`] "class" `classname` [`type_params`] [`inheritance`] ":" `suite` inheritance: "(" [`argument_list`] ")" classname: `identifier` @@ -1436,6 +1445,15 @@ decorators. The result is then bound to the class name. :token:`~python-grammar:assignment_expression`. Previously, the grammar was much more restrictive; see :pep:`614` for details. +A list of :ref:`type parameters ` may be given in square brackets +immediately after the class's name. +This indicates to static type checkers that the class is generic. At runtime, +the type parameters can be retrieved from the class's ``__type_params__`` +attribute. See :ref:`generic-classes` for more. + +.. versionchanged:: 3.12 + Type parameter lists are new in Python 3.12. + **Programmer's note:** Variables defined in the class definition are class attributes; they are shared by instances. Instance attributes can be set in a method with ``self.name = value``. Both class and instance attributes are @@ -1465,7 +1483,7 @@ Coroutines .. versionadded:: 3.5 -.. index:: statement: async def +.. index:: pair: statement; async def .. _`async def`: Coroutine function definition @@ -1476,8 +1494,8 @@ Coroutine function definition : ["->" `expression`] ":" `suite` .. index:: - keyword: async - keyword: await + pair: keyword; async + pair: keyword; await Execution of Python coroutines can be suspended and resumed at many points (see :term:`coroutine`). :keyword:`await` expressions, :keyword:`async for` and @@ -1499,7 +1517,7 @@ An example of a coroutine function:: ``await`` and ``async`` are now keywords; previously they were only treated as such inside the body of a coroutine function. -.. index:: statement: async for +.. index:: pair: statement; async for .. _`async for`: The :keyword:`!async for` statement @@ -1544,7 +1562,7 @@ It is a :exc:`SyntaxError` to use an ``async for`` statement outside the body of a coroutine function. -.. index:: statement: async with +.. index:: pair: statement; async with .. _`async with`: The :keyword:`!async with` statement @@ -1591,6 +1609,228 @@ body of a coroutine function. The proposal that made coroutines a proper standalone concept in Python, and added supporting syntax. +.. _type-params: + +Type parameter lists +==================== + +.. versionadded:: 3.12 + +.. index:: + single: type parameters + +.. productionlist:: python-grammar + type_params: "[" `type_param` ("," `type_param`)* "]" + type_param: `typevar` | `typevartuple` | `paramspec` + typevar: `identifier` (":" `expression`)? + typevartuple: "*" `identifier` + paramspec: "**" `identifier` + +:ref:`Functions ` (including :ref:`coroutines `), +:ref:`classes ` and :ref:`type aliases ` may +contain a type parameter list:: + + def max[T](args: list[T]) -> T: + ... + + async def amax[T](args: list[T]) -> T: + ... + + class Bag[T]: + def __iter__(self) -> Iterator[T]: + ... + + def add(self, arg: T) -> None: + ... + + type ListOrSet[T] = list[T] | set[T] + +Semantically, this indicates that the function, class, or type alias is +generic over a type variable. This information is primarily used by static +type checkers, and at runtime, generic objects behave much like their +non-generic counterparts. + +Type parameters are declared in square brackets (``[]``) immediately +after the name of the function, class, or type alias. The type parameters +are accessible within the scope of the generic object, but not elsewhere. +Thus, after a declaration ``def func[T](): pass``, the name ``T`` is not available in +the module scope. Below, the semantics of generic objects are described +with more precision. The scope of type parameters is modeled with a special +function (technically, an :ref:`annotation scope `) that +wraps the creation of the generic object. + +Generic functions, classes, and type aliases have a :attr:`!__type_params__` +attribute listing their type parameters. + +Type parameters come in three kinds: + +* :data:`typing.TypeVar`, introduced by a plain name (e.g., ``T``). Semantically, this + represents a single type to a type checker. +* :data:`typing.TypeVarTuple`, introduced by a name prefixed with a single + asterisk (e.g., ``*Ts``). Semantically, this stands for a tuple of any + number of types. +* :data:`typing.ParamSpec`, introduced by a name prefixed with two asterisks + (e.g., ``**P``). Semantically, this stands for the parameters of a callable. + +:data:`typing.TypeVar` declarations can define *bounds* and *constraints* with +a colon (``:``) followed by an expression. A single expression after the colon +indicates a bound (e.g. ``T: int``). Semantically, this means +that the :data:`!typing.TypeVar` can only represent types that are a subtype of +this bound. A parenthesized tuple of expressions after the colon indicates a +set of constraints (e.g. ``T: (str, bytes)``). Each member of the tuple should be a +type (again, this is not enforced at runtime). Constrained type variables can only +take on one of the types in the list of constraints. + +For :data:`!typing.TypeVar`\ s declared using the type parameter list syntax, +the bound and constraints are not evaluated when the generic object is created, +but only when the value is explicitly accessed through the attributes ``__bound__`` +and ``__constraints__``. To accomplish this, the bounds or constraints are +evaluated in a separate :ref:`annotation scope `. + +:data:`typing.TypeVarTuple`\ s and :data:`typing.ParamSpec`\ s cannot have bounds +or constraints. + +The following example indicates the full set of allowed type parameter declarations:: + + def overly_generic[ + SimpleTypeVar, + TypeVarWithBound: int, + TypeVarWithConstraints: (str, bytes), + *SimpleTypeVarTuple, + **SimpleParamSpec, + ]( + a: SimpleTypeVar, + b: TypeVarWithBound, + c: Callable[SimpleParamSpec, TypeVarWithConstraints], + *d: SimpleTypeVarTuple, + ): ... + +.. _generic-functions: + +Generic functions +----------------- + +Generic functions are declared as follows:: + + def func[T](arg: T): ... + +This syntax is equivalent to:: + + annotation-def TYPE_PARAMS_OF_func(): + T = typing.TypeVar("T") + def func(arg: T): ... + func.__type_params__ = (T,) + return func + func = TYPE_PARAMS_OF_func() + +Here ``annotation-def`` indicates an :ref:`annotation scope `, +which is not actually bound to any name at runtime. (One +other liberty is taken in the translation: the syntax does not go through +attribute access on the :mod:`typing` module, but creates an instance of +:data:`typing.TypeVar` directly.) + +The annotations of generic functions are evaluated within the annotation scope +used for declaring the type parameters, but the function's defaults and +decorators are not. + +The following example illustrates the scoping rules for these cases, +as well as for additional flavors of type parameters:: + + @decorator + def func[T: int, *Ts, **P](*args: *Ts, arg: Callable[P, T] = some_default): + ... + +Except for the :ref:`lazy evaluation ` of the +:class:`~typing.TypeVar` bound, this is equivalent to:: + + DEFAULT_OF_arg = some_default + + annotation-def TYPE_PARAMS_OF_func(): + + annotation-def BOUND_OF_T(): + return int + # In reality, BOUND_OF_T() is evaluated only on demand. + T = typing.TypeVar("T", bound=BOUND_OF_T()) + + Ts = typing.TypeVarTuple("Ts") + P = typing.ParamSpec("P") + + def func(*args: *Ts, arg: Callable[P, T] = DEFAULT_OF_arg): + ... + + func.__type_params__ = (T, Ts, P) + return func + func = decorator(TYPE_PARAMS_OF_func()) + +The capitalized names like ``DEFAULT_OF_arg`` are not actually +bound at runtime. + +.. _generic-classes: + +Generic classes +--------------- + +Generic classes are declared as follows:: + + class Bag[T]: ... + +This syntax is equivalent to:: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(typing.Generic[T]): + __type_params__ = (T,) + ... + return Bag + Bag = TYPE_PARAMS_OF_Bag() + +Here again ``annotation-def`` (not a real keyword) indicates an +:ref:`annotation scope `, and the name +``TYPE_PARAMS_OF_Bag`` is not actually bound at runtime. + +Generic classes implicitly inherit from :data:`typing.Generic`. +The base classes and keyword arguments of generic classes are +evaluated within the type scope for the type parameters, +and decorators are evaluated outside that scope. This is illustrated +by this example:: + + @decorator + class Bag(Base[T], arg=T): ... + +This is equivalent to:: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(Base[T], typing.Generic[T], arg=T): + __type_params__ = (T,) + ... + return Bag + Bag = decorator(TYPE_PARAMS_OF_Bag()) + +.. _generic-type-aliases: + +Generic type aliases +-------------------- + +The :keyword:`type` statement can also be used to create a generic type alias:: + + type ListOrSet[T] = list[T] | set[T] + +Except for the :ref:`lazy evaluation ` of the value, +this is equivalent to:: + + annotation-def TYPE_PARAMS_OF_ListOrSet(): + T = typing.TypeVar("T") + + annotation-def VALUE_OF_ListOrSet(): + return list[T] | set[T] + # In reality, the value is lazily evaluated + return typing.TypeAliasType("ListOrSet", VALUE_OF_ListOrSet(), type_params=(T,)) + ListOrSet = TYPE_PARAMS_OF_ListOrSet() + +Here, ``annotation-def`` (not a real keyword) indicates an +:ref:`annotation scope `. The capitalized names +like ``TYPE_PARAMS_OF_ListOrSet`` are not actually bound at runtime. .. rubric:: Footnotes @@ -1600,29 +1840,29 @@ body of a coroutine function. .. [#] In pattern matching, a sequence is defined as one of the following: - * a class that inherits from :class:`collections.abc.Sequence` - * a Python class that has been registered as :class:`collections.abc.Sequence` - * a builtin class that has its (CPython) :data:`Py_TPFLAGS_SEQUENCE` bit set - * a class that inherits from any of the above + * a class that inherits from :class:`collections.abc.Sequence` + * a Python class that has been registered as :class:`collections.abc.Sequence` + * a builtin class that has its (CPython) :c:macro:`Py_TPFLAGS_SEQUENCE` bit set + * a class that inherits from any of the above The following standard library classes are sequences: - * :class:`array.array` - * :class:`collections.deque` - * :class:`list` - * :class:`memoryview` - * :class:`range` - * :class:`tuple` + * :class:`array.array` + * :class:`collections.deque` + * :class:`list` + * :class:`memoryview` + * :class:`range` + * :class:`tuple` .. note:: Subject values of type ``str``, ``bytes``, and ``bytearray`` do not match sequence patterns. .. [#] In pattern matching, a mapping is defined as one of the following: - * a class that inherits from :class:`collections.abc.Mapping` - * a Python class that has been registered as :class:`collections.abc.Mapping` - * a builtin class that has its (CPython) :data:`Py_TPFLAGS_MAPPING` bit set - * a class that inherits from any of the above + * a class that inherits from :class:`collections.abc.Mapping` + * a Python class that has been registered as :class:`collections.abc.Mapping` + * a builtin class that has its (CPython) :c:macro:`Py_TPFLAGS_MAPPING` bit set + * a class that inherits from any of the above The standard library classes :class:`dict` and :class:`types.MappingProxyType` are mappings. diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index 301f41f3952c961..f7d3d2d0bbec23a 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -21,8 +21,8 @@ conformance to Von Neumann's model of a "stored program computer", code is also represented by objects.) .. index:: - builtin: id - builtin: type + pair: built-in function; id + pair: built-in function; type single: identity of an object single: value of an object single: type of an object @@ -141,1081 +141,1206 @@ Some of the type descriptions below contain a paragraph listing 'special attributes.' These are attributes that provide access to the implementation and are not intended for general use. Their definition may change in the future. + None - .. index:: object: None +---- + +.. index:: pair: object; None + +This type has a single value. There is a single object with this value. This +object is accessed through the built-in name ``None``. It is used to signify the +absence of a value in many situations, e.g., it is returned from functions that +don't explicitly return anything. Its truth value is false. - This type has a single value. There is a single object with this value. This - object is accessed through the built-in name ``None``. It is used to signify the - absence of a value in many situations, e.g., it is returned from functions that - don't explicitly return anything. Its truth value is false. NotImplemented - .. index:: object: NotImplemented +-------------- + +.. index:: pair: object; NotImplemented - This type has a single value. There is a single object with this value. This - object is accessed through the built-in name ``NotImplemented``. Numeric methods - and rich comparison methods should return this value if they do not implement the - operation for the operands provided. (The interpreter will then try the - reflected operation, or some other fallback, depending on the operator.) It - should not be evaluated in a boolean context. +This type has a single value. There is a single object with this value. This +object is accessed through the built-in name ``NotImplemented``. Numeric methods +and rich comparison methods should return this value if they do not implement the +operation for the operands provided. (The interpreter will then try the +reflected operation, or some other fallback, depending on the operator.) It +should not be evaluated in a boolean context. - See - :ref:`implementing-the-arithmetic-operations` - for more details. +See +:ref:`implementing-the-arithmetic-operations` +for more details. - .. versionchanged:: 3.9 - Evaluating ``NotImplemented`` in a boolean context is deprecated. While - it currently evaluates as true, it will emit a :exc:`DeprecationWarning`. - It will raise a :exc:`TypeError` in a future version of Python. +.. versionchanged:: 3.9 + Evaluating ``NotImplemented`` in a boolean context is deprecated. While + it currently evaluates as true, it will emit a :exc:`DeprecationWarning`. + It will raise a :exc:`TypeError` in a future version of Python. Ellipsis - .. index:: - object: Ellipsis - single: ...; ellipsis literal +-------- +.. index:: + pair: object; Ellipsis + single: ...; ellipsis literal + +This type has a single value. There is a single object with this value. This +object is accessed through the literal ``...`` or the built-in name +``Ellipsis``. Its truth value is true. - This type has a single value. There is a single object with this value. This - object is accessed through the literal ``...`` or the built-in name - ``Ellipsis``. Its truth value is true. :class:`numbers.Number` - .. index:: object: numeric +----------------------- + +.. index:: pair: object; numeric + +These are created by numeric literals and returned as results by arithmetic +operators and arithmetic built-in functions. Numeric objects are immutable; +once created their value never changes. Python numbers are of course strongly +related to mathematical numbers, but subject to the limitations of numerical +representation in computers. + +The string representations of the numeric classes, computed by +:meth:`~object.__repr__` and :meth:`~object.__str__`, have the following +properties: + +* They are valid numeric literals which, when passed to their + class constructor, produce an object having the value of the + original numeric. + +* The representation is in base 10, when possible. + +* Leading zeros, possibly excepting a single zero before a + decimal point, are not shown. + +* Trailing zeros, possibly excepting a single zero after a + decimal point, are not shown. - These are created by numeric literals and returned as results by arithmetic - operators and arithmetic built-in functions. Numeric objects are immutable; - once created their value never changes. Python numbers are of course strongly - related to mathematical numbers, but subject to the limitations of numerical - representation in computers. +* A sign is shown only when the number is negative. - The string representations of the numeric classes, computed by - :meth:`~object.__repr__` and :meth:`~object.__str__`, have the following - properties: +Python distinguishes between integers, floating point numbers, and complex +numbers: - * They are valid numeric literals which, when passed to their - class constructor, produce an object having the value of the - original numeric. - * The representation is in base 10, when possible. +:class:`numbers.Integral` +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: pair: object; integer - * Leading zeros, possibly excepting a single zero before a - decimal point, are not shown. +These represent elements from the mathematical set of integers (positive and +negative). - * Trailing zeros, possibly excepting a single zero after a - decimal point, are not shown. +.. note:: + .. index:: pair: integer; representation - * A sign is shown only when the number is negative. + The rules for integer representation are intended to give the most meaningful + interpretation of shift and mask operations involving negative integers. - Python distinguishes between integers, floating point numbers, and complex - numbers: +There are two types of integers: - :class:`numbers.Integral` - .. index:: object: integer +Integers (:class:`int`) + These represent numbers in an unlimited range, subject to available (virtual) + memory only. For the purpose of shift and mask operations, a binary + representation is assumed, and negative numbers are represented in a variant of + 2's complement which gives the illusion of an infinite string of sign bits + extending to the left. - These represent elements from the mathematical set of integers (positive and - negative). +Booleans (:class:`bool`) + .. index:: + pair: object; Boolean + single: False + single: True - There are two types of integers: + These represent the truth values False and True. The two objects representing + the values ``False`` and ``True`` are the only Boolean objects. The Boolean type is a + subtype of the integer type, and Boolean values behave like the values 0 and 1, + respectively, in almost all contexts, the exception being that when converted to + a string, the strings ``"False"`` or ``"True"`` are returned, respectively. - Integers (:class:`int`) - These represent numbers in an unlimited range, subject to available (virtual) - memory only. For the purpose of shift and mask operations, a binary - representation is assumed, and negative numbers are represented in a variant of - 2's complement which gives the illusion of an infinite string of sign bits - extending to the left. - Booleans (:class:`bool`) - .. index:: - object: Boolean - single: False - single: True +:class:`numbers.Real` (:class:`float`) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - These represent the truth values False and True. The two objects representing - the values ``False`` and ``True`` are the only Boolean objects. The Boolean type is a - subtype of the integer type, and Boolean values behave like the values 0 and 1, - respectively, in almost all contexts, the exception being that when converted to - a string, the strings ``"False"`` or ``"True"`` are returned, respectively. +.. index:: + pair: object; floating point + pair: floating point; number + pair: C; language + pair: Java; language - .. index:: pair: integer; representation +These represent machine-level double precision floating point numbers. You are +at the mercy of the underlying machine architecture (and C or Java +implementation) for the accepted range and handling of overflow. Python does not +support single-precision floating point numbers; the savings in processor and +memory usage that are usually the reason for using these are dwarfed by the +overhead of using objects in Python, so there is no reason to complicate the +language with two kinds of floating point numbers. - The rules for integer representation are intended to give the most meaningful - interpretation of shift and mask operations involving negative integers. - :class:`numbers.Real` (:class:`float`) - .. index:: - object: floating point - pair: floating point; number - pair: C; language - pair: Java; language +:class:`numbers.Complex` (:class:`complex`) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - These represent machine-level double precision floating point numbers. You are - at the mercy of the underlying machine architecture (and C or Java - implementation) for the accepted range and handling of overflow. Python does not - support single-precision floating point numbers; the savings in processor and - memory usage that are usually the reason for using these are dwarfed by the - overhead of using objects in Python, so there is no reason to complicate the - language with two kinds of floating point numbers. +.. index:: + pair: object; complex + pair: complex; number - :class:`numbers.Complex` (:class:`complex`) - .. index:: - object: complex - pair: complex; number +These represent complex numbers as a pair of machine-level double precision +floating point numbers. The same caveats apply as for floating point numbers. +The real and imaginary parts of a complex number ``z`` can be retrieved through +the read-only attributes ``z.real`` and ``z.imag``. - These represent complex numbers as a pair of machine-level double precision - floating point numbers. The same caveats apply as for floating point numbers. - The real and imaginary parts of a complex number ``z`` can be retrieved through - the read-only attributes ``z.real`` and ``z.imag``. Sequences +--------- + +.. index:: + pair: built-in function; len + pair: object; sequence + single: index operation + single: item selection + single: subscription + +These represent finite ordered sets indexed by non-negative numbers. The +built-in function :func:`len` returns the number of items of a sequence. When +the length of a sequence is *n*, the index set contains the numbers 0, 1, +..., *n*-1. Item *i* of sequence *a* is selected by ``a[i]``. + +.. index:: single: slicing + +Sequences also support slicing: ``a[i:j]`` selects all items with index *k* such +that *i* ``<=`` *k* ``<`` *j*. When used as an expression, a slice is a +sequence of the same type. This implies that the index set is renumbered so +that it starts at 0. + +Some sequences also support "extended slicing" with a third "step" parameter: +``a[i:j:k]`` selects all items of *a* with index *x* where ``x = i + n*k``, *n* +``>=`` ``0`` and *i* ``<=`` *x* ``<`` *j*. + +Sequences are distinguished according to their mutability: + + +Immutable sequences +^^^^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; immutable sequence + pair: object; immutable + +An object of an immutable sequence type cannot change once it is created. (If +the object contains references to other objects, these other objects may be +mutable and may be changed; however, the collection of objects directly +referenced by an immutable object cannot change.) + +The following types are immutable sequences: + +.. index:: + single: string; immutable sequences + +Strings .. index:: - builtin: len - object: sequence - single: index operation - single: item selection - single: subscription - - These represent finite ordered sets indexed by non-negative numbers. The - built-in function :func:`len` returns the number of items of a sequence. When - the length of a sequence is *n*, the index set contains the numbers 0, 1, - ..., *n*-1. Item *i* of sequence *a* is selected by ``a[i]``. - - .. index:: single: slicing - - Sequences also support slicing: ``a[i:j]`` selects all items with index *k* such - that *i* ``<=`` *k* ``<`` *j*. When used as an expression, a slice is a - sequence of the same type. This implies that the index set is renumbered so - that it starts at 0. - - Some sequences also support "extended slicing" with a third "step" parameter: - ``a[i:j:k]`` selects all items of *a* with index *x* where ``x = i + n*k``, *n* - ``>=`` ``0`` and *i* ``<=`` *x* ``<`` *j*. - - Sequences are distinguished according to their mutability: - - Immutable sequences - .. index:: - object: immutable sequence - object: immutable - - An object of an immutable sequence type cannot change once it is created. (If - the object contains references to other objects, these other objects may be - mutable and may be changed; however, the collection of objects directly - referenced by an immutable object cannot change.) - - The following types are immutable sequences: - - .. index:: - single: string; immutable sequences - - Strings - .. index:: - builtin: chr - builtin: ord - single: character - single: integer - single: Unicode - - A string is a sequence of values that represent Unicode code points. - All the code points in the range ``U+0000 - U+10FFFF`` can be - represented in a string. Python doesn't have a :c:expr:`char` type; - instead, every code point in the string is represented as a string - object with length ``1``. The built-in function :func:`ord` - converts a code point from its string form to an integer in the - range ``0 - 10FFFF``; :func:`chr` converts an integer in the range - ``0 - 10FFFF`` to the corresponding length ``1`` string object. - :meth:`str.encode` can be used to convert a :class:`str` to - :class:`bytes` using the given text encoding, and - :meth:`bytes.decode` can be used to achieve the opposite. - - Tuples - .. index:: - object: tuple - pair: singleton; tuple - pair: empty; tuple - - The items of a tuple are arbitrary Python objects. Tuples of two or - more items are formed by comma-separated lists of expressions. A tuple - of one item (a 'singleton') can be formed by affixing a comma to an - expression (an expression by itself does not create a tuple, since - parentheses must be usable for grouping of expressions). An empty - tuple can be formed by an empty pair of parentheses. - - Bytes - .. index:: bytes, byte - - A bytes object is an immutable array. The items are 8-bit bytes, - represented by integers in the range 0 <= x < 256. Bytes literals - (like ``b'abc'``) and the built-in :func:`bytes()` constructor - can be used to create bytes objects. Also, bytes objects can be - decoded to strings via the :meth:`~bytes.decode` method. - - Mutable sequences - .. index:: - object: mutable sequence - object: mutable - pair: assignment; statement - single: subscription - single: slicing - - Mutable sequences can be changed after they are created. The subscription and - slicing notations can be used as the target of assignment and :keyword:`del` - (delete) statements. - - There are currently two intrinsic mutable sequence types: - - Lists - .. index:: object: list - - The items of a list are arbitrary Python objects. Lists are formed by - placing a comma-separated list of expressions in square brackets. (Note - that there are no special cases needed to form lists of length 0 or 1.) - - Byte Arrays - .. index:: bytearray - - A bytearray object is a mutable array. They are created by the built-in - :func:`bytearray` constructor. Aside from being mutable - (and hence unhashable), byte arrays otherwise provide the same interface - and functionality as immutable :class:`bytes` objects. - - .. index:: module: array - - The extension module :mod:`array` provides an additional example of a - mutable sequence type, as does the :mod:`collections` module. + pair: built-in function; chr + pair: built-in function; ord + single: character + single: integer + single: Unicode + + A string is a sequence of values that represent Unicode code points. + All the code points in the range ``U+0000 - U+10FFFF`` can be + represented in a string. Python doesn't have a :c:expr:`char` type; + instead, every code point in the string is represented as a string + object with length ``1``. The built-in function :func:`ord` + converts a code point from its string form to an integer in the + range ``0 - 10FFFF``; :func:`chr` converts an integer in the range + ``0 - 10FFFF`` to the corresponding length ``1`` string object. + :meth:`str.encode` can be used to convert a :class:`str` to + :class:`bytes` using the given text encoding, and + :meth:`bytes.decode` can be used to achieve the opposite. + +Tuples + .. index:: + pair: object; tuple + pair: singleton; tuple + pair: empty; tuple + + The items of a tuple are arbitrary Python objects. Tuples of two or + more items are formed by comma-separated lists of expressions. A tuple + of one item (a 'singleton') can be formed by affixing a comma to an + expression (an expression by itself does not create a tuple, since + parentheses must be usable for grouping of expressions). An empty + tuple can be formed by an empty pair of parentheses. + +Bytes + .. index:: bytes, byte + + A bytes object is an immutable array. The items are 8-bit bytes, + represented by integers in the range 0 <= x < 256. Bytes literals + (like ``b'abc'``) and the built-in :func:`bytes()` constructor + can be used to create bytes objects. Also, bytes objects can be + decoded to strings via the :meth:`~bytes.decode` method. + + +Mutable sequences +^^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; mutable sequence + pair: object; mutable + pair: assignment; statement + single: subscription + single: slicing + +Mutable sequences can be changed after they are created. The subscription and +slicing notations can be used as the target of assignment and :keyword:`del` +(delete) statements. + +.. note:: + .. index:: pair: module; array + .. index:: pair: module; collections + + The :mod:`collections` and :mod:`array` module provide + additional examples of mutable sequence types. + +There are currently two intrinsic mutable sequence types: + +Lists + .. index:: pair: object; list + + The items of a list are arbitrary Python objects. Lists are formed by + placing a comma-separated list of expressions in square brackets. (Note + that there are no special cases needed to form lists of length 0 or 1.) + +Byte Arrays + .. index:: bytearray + + A bytearray object is a mutable array. They are created by the built-in + :func:`bytearray` constructor. Aside from being mutable + (and hence unhashable), byte arrays otherwise provide the same interface + and functionality as immutable :class:`bytes` objects. + Set types - .. index:: - builtin: len - object: set type +--------- + +.. index:: + pair: built-in function; len + pair: object; set type + +These represent unordered, finite sets of unique, immutable objects. As such, +they cannot be indexed by any subscript. However, they can be iterated over, and +the built-in function :func:`len` returns the number of items in a set. Common +uses for sets are fast membership testing, removing duplicates from a sequence, +and computing mathematical operations such as intersection, union, difference, +and symmetric difference. - These represent unordered, finite sets of unique, immutable objects. As such, - they cannot be indexed by any subscript. However, they can be iterated over, and - the built-in function :func:`len` returns the number of items in a set. Common - uses for sets are fast membership testing, removing duplicates from a sequence, - and computing mathematical operations such as intersection, union, difference, - and symmetric difference. +For set elements, the same immutability rules apply as for dictionary keys. Note +that numeric types obey the normal rules for numeric comparison: if two numbers +compare equal (e.g., ``1`` and ``1.0``), only one of them can be contained in a +set. - For set elements, the same immutability rules apply as for dictionary keys. Note - that numeric types obey the normal rules for numeric comparison: if two numbers - compare equal (e.g., ``1`` and ``1.0``), only one of them can be contained in a - set. +There are currently two intrinsic set types: - There are currently two intrinsic set types: - Sets - .. index:: object: set +Sets + .. index:: pair: object; set - These represent a mutable set. They are created by the built-in :func:`set` - constructor and can be modified afterwards by several methods, such as - :meth:`~set.add`. + These represent a mutable set. They are created by the built-in :func:`set` + constructor and can be modified afterwards by several methods, such as + :meth:`~set.add`. - Frozen sets - .. index:: object: frozenset - These represent an immutable set. They are created by the built-in - :func:`frozenset` constructor. As a frozenset is immutable and - :term:`hashable`, it can be used again as an element of another set, or as - a dictionary key. +Frozen sets + .. index:: pair: object; frozenset + + These represent an immutable set. They are created by the built-in + :func:`frozenset` constructor. As a frozenset is immutable and + :term:`hashable`, it can be used again as an element of another set, or as + a dictionary key. + Mappings - .. index:: - builtin: len - single: subscription - object: mapping - - These represent finite sets of objects indexed by arbitrary index sets. The - subscript notation ``a[k]`` selects the item indexed by ``k`` from the mapping - ``a``; this can be used in expressions and as the target of assignments or - :keyword:`del` statements. The built-in function :func:`len` returns the number - of items in a mapping. - - There is currently a single intrinsic mapping type: - - Dictionaries - .. index:: object: dictionary - - These represent finite sets of objects indexed by nearly arbitrary values. The - only types of values not acceptable as keys are values containing lists or - dictionaries or other mutable types that are compared by value rather than by - object identity, the reason being that the efficient implementation of - dictionaries requires a key's hash value to remain constant. Numeric types used - for keys obey the normal rules for numeric comparison: if two numbers compare - equal (e.g., ``1`` and ``1.0``) then they can be used interchangeably to index - the same dictionary entry. - - Dictionaries preserve insertion order, meaning that keys will be produced - in the same order they were added sequentially over the dictionary. - Replacing an existing key does not change the order, however removing a key - and re-inserting it will add it to the end instead of keeping its old place. - - Dictionaries are mutable; they can be created by the ``{...}`` notation (see - section :ref:`dict`). - - .. index:: - module: dbm.ndbm - module: dbm.gnu - - The extension modules :mod:`dbm.ndbm` and :mod:`dbm.gnu` provide - additional examples of mapping types, as does the :mod:`collections` - module. - - .. versionchanged:: 3.7 - Dictionaries did not preserve insertion order in versions of Python before 3.6. - In CPython 3.6, insertion order was preserved, but it was considered - an implementation detail at that time rather than a language guarantee. +-------- + +.. index:: + pair: built-in function; len + single: subscription + pair: object; mapping + +These represent finite sets of objects indexed by arbitrary index sets. The +subscript notation ``a[k]`` selects the item indexed by ``k`` from the mapping +``a``; this can be used in expressions and as the target of assignments or +:keyword:`del` statements. The built-in function :func:`len` returns the number +of items in a mapping. + +There is currently a single intrinsic mapping type: + + +Dictionaries +^^^^^^^^^^^^ + +.. index:: pair: object; dictionary + +These represent finite sets of objects indexed by nearly arbitrary values. The +only types of values not acceptable as keys are values containing lists or +dictionaries or other mutable types that are compared by value rather than by +object identity, the reason being that the efficient implementation of +dictionaries requires a key's hash value to remain constant. Numeric types used +for keys obey the normal rules for numeric comparison: if two numbers compare +equal (e.g., ``1`` and ``1.0``) then they can be used interchangeably to index +the same dictionary entry. + +Dictionaries preserve insertion order, meaning that keys will be produced +in the same order they were added sequentially over the dictionary. +Replacing an existing key does not change the order, however removing a key +and re-inserting it will add it to the end instead of keeping its old place. + +Dictionaries are mutable; they can be created by the ``{...}`` notation (see +section :ref:`dict`). + +.. index:: + pair: module; dbm.ndbm + pair: module; dbm.gnu + +The extension modules :mod:`dbm.ndbm` and :mod:`dbm.gnu` provide +additional examples of mapping types, as does the :mod:`collections` +module. + +.. versionchanged:: 3.7 + Dictionaries did not preserve insertion order in versions of Python before 3.6. + In CPython 3.6, insertion order was preserved, but it was considered + an implementation detail at that time rather than a language guarantee. + Callable types - .. index:: - object: callable - pair: function; call - single: invocation - pair: function; argument - - These are the types to which the function call operation (see section - :ref:`calls`) can be applied: - - User-defined functions - .. index:: - pair: user-defined; function - object: function - object: user-defined function - - A user-defined function object is created by a function definition (see - section :ref:`function`). It should be called with an argument list - containing the same number of items as the function's formal parameter - list. - - Special attributes: - - .. tabularcolumns:: |l|L|l| - - .. index:: - single: __doc__ (function attribute) - single: __name__ (function attribute) - single: __module__ (function attribute) - single: __dict__ (function attribute) - single: __defaults__ (function attribute) - single: __closure__ (function attribute) - single: __code__ (function attribute) - single: __globals__ (function attribute) - single: __annotations__ (function attribute) - single: __kwdefaults__ (function attribute) - pair: global; namespace - - +-------------------------+-------------------------------+-----------+ - | Attribute | Meaning | | - +=========================+===============================+===========+ - | :attr:`__doc__` | The function's documentation | Writable | - | | string, or ``None`` if | | - | | unavailable; not inherited by | | - | | subclasses. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`~definition.\ | The function's name. | Writable | - | __name__` | | | - +-------------------------+-------------------------------+-----------+ - | :attr:`~definition.\ | The function's | Writable | - | __qualname__` | :term:`qualified name`. | | - | | | | - | | .. versionadded:: 3.3 | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__module__` | The name of the module the | Writable | - | | function was defined in, or | | - | | ``None`` if unavailable. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__defaults__` | A tuple containing default | Writable | - | | argument values for those | | - | | arguments that have defaults, | | - | | or ``None`` if no arguments | | - | | have a default value. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__code__` | The code object representing | Writable | - | | the compiled function body. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__globals__` | A reference to the dictionary | Read-only | - | | that holds the function's | | - | | global variables --- the | | - | | global namespace of the | | - | | module in which the function | | - | | was defined. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`~object.__dict__`| The namespace supporting | Writable | - | | arbitrary function | | - | | attributes. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__closure__` | ``None`` or a tuple of cells | Read-only | - | | that contain bindings for the | | - | | function's free variables. | | - | | See below for information on | | - | | the ``cell_contents`` | | - | | attribute. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__annotations__` | A dict containing annotations | Writable | - | | of parameters. The keys of | | - | | the dict are the parameter | | - | | names, and ``'return'`` for | | - | | the return annotation, if | | - | | provided. For more | | - | | information on working with | | - | | this attribute, see | | - | | :ref:`annotations-howto`. | | - +-------------------------+-------------------------------+-----------+ - | :attr:`__kwdefaults__` | A dict containing defaults | Writable | - | | for keyword-only parameters. | | - +-------------------------+-------------------------------+-----------+ - - Most of the attributes labelled "Writable" check the type of the assigned value. - - Function objects also support getting and setting arbitrary attributes, which - can be used, for example, to attach metadata to functions. Regular attribute - dot-notation is used to get and set such attributes. *Note that the current - implementation only supports function attributes on user-defined functions. - Function attributes on built-in functions may be supported in the future.* - - A cell object has the attribute ``cell_contents``. This can be used to get - the value of the cell, as well as set the value. - - Additional information about a function's definition can be retrieved from its - code object; see the description of internal types below. The - :data:`cell ` type can be accessed in the :mod:`types` - module. - - Instance methods - .. index:: - object: method - object: user-defined method - pair: user-defined; method - - An instance method object combines a class, a class instance and any - callable object (normally a user-defined function). - - .. index:: - single: __func__ (method attribute) - single: __self__ (method attribute) - single: __doc__ (method attribute) - single: __name__ (method attribute) - single: __module__ (method attribute) - - Special read-only attributes: :attr:`__self__` is the class instance object, - :attr:`__func__` is the function object; :attr:`__doc__` is the method's - documentation (same as ``__func__.__doc__``); :attr:`~definition.__name__` is the - method name (same as ``__func__.__name__``); :attr:`__module__` is the - name of the module the method was defined in, or ``None`` if unavailable. - - Methods also support accessing (but not setting) the arbitrary function - attributes on the underlying function object. - - User-defined method objects may be created when getting an attribute of a - class (perhaps via an instance of that class), if that attribute is a - user-defined function object or a class method object. - - When an instance method object is created by retrieving a user-defined - function object from a class via one of its instances, its - :attr:`__self__` attribute is the instance, and the method object is said - to be bound. The new method's :attr:`__func__` attribute is the original - function object. - - When an instance method object is created by retrieving a class method - object from a class or instance, its :attr:`__self__` attribute is the - class itself, and its :attr:`__func__` attribute is the function object - underlying the class method. - - When an instance method object is called, the underlying function - (:attr:`__func__`) is called, inserting the class instance - (:attr:`__self__`) in front of the argument list. For instance, when - :class:`C` is a class which contains a definition for a function - :meth:`f`, and ``x`` is an instance of :class:`C`, calling ``x.f(1)`` is - equivalent to calling ``C.f(x, 1)``. - - When an instance method object is derived from a class method object, the - "class instance" stored in :attr:`__self__` will actually be the class - itself, so that calling either ``x.f(1)`` or ``C.f(1)`` is equivalent to - calling ``f(C,1)`` where ``f`` is the underlying function. - - Note that the transformation from function object to instance method - object happens each time the attribute is retrieved from the instance. In - some cases, a fruitful optimization is to assign the attribute to a local - variable and call that local variable. Also notice that this - transformation only happens for user-defined functions; other callable - objects (and all non-callable objects) are retrieved without - transformation. It is also important to note that user-defined functions - which are attributes of a class instance are not converted to bound - methods; this *only* happens when the function is an attribute of the - class. - - Generator functions - .. index:: - single: generator; function - single: generator; iterator - - A function or method which uses the :keyword:`yield` statement (see section - :ref:`yield`) is called a :dfn:`generator function`. Such a function, when - called, always returns an :term:`iterator` object which can be used to - execute the body of the function: calling the iterator's - :meth:`iterator.__next__` method will cause the function to execute until - it provides a value using the :keyword:`!yield` statement. When the - function executes a :keyword:`return` statement or falls off the end, a - :exc:`StopIteration` exception is raised and the iterator will have - reached the end of the set of values to be returned. - - Coroutine functions - .. index:: - single: coroutine; function - - A function or method which is defined using :keyword:`async def` is called - a :dfn:`coroutine function`. Such a function, when called, returns a - :term:`coroutine` object. It may contain :keyword:`await` expressions, - as well as :keyword:`async with` and :keyword:`async for` statements. See - also the :ref:`coroutine-objects` section. - - Asynchronous generator functions - .. index:: - single: asynchronous generator; function - single: asynchronous generator; asynchronous iterator - - A function or method which is defined using :keyword:`async def` and - which uses the :keyword:`yield` statement is called a - :dfn:`asynchronous generator function`. Such a function, when called, - returns an :term:`asynchronous iterator` object which can be used in an - :keyword:`async for` statement to execute the body of the function. - - Calling the asynchronous iterator's - :meth:`aiterator.__anext__ ` method - will return an :term:`awaitable` which when awaited - will execute until it provides a value using the :keyword:`yield` - expression. When the function executes an empty :keyword:`return` - statement or falls off the end, a :exc:`StopAsyncIteration` exception - is raised and the asynchronous iterator will have reached the end of - the set of values to be yielded. - - Built-in functions - .. index:: - object: built-in function - object: function - pair: C; language - - A built-in function object is a wrapper around a C function. Examples of - built-in functions are :func:`len` and :func:`math.sin` (:mod:`math` is a - standard built-in module). The number and type of the arguments are - determined by the C function. Special read-only attributes: - :attr:`__doc__` is the function's documentation string, or ``None`` if - unavailable; :attr:`~definition.__name__` is the function's name; :attr:`__self__` is - set to ``None`` (but see the next item); :attr:`__module__` is the name of - the module the function was defined in or ``None`` if unavailable. - - Built-in methods - .. index:: - object: built-in method - object: method - pair: built-in; method - - This is really a different disguise of a built-in function, this time containing - an object passed to the C function as an implicit extra argument. An example of - a built-in method is ``alist.append()``, assuming *alist* is a list object. In - this case, the special read-only attribute :attr:`__self__` is set to the object - denoted by *alist*. - - Classes - Classes are callable. These objects normally act as factories for new - instances of themselves, but variations are possible for class types that - override :meth:`~object.__new__`. The arguments of the call are passed to - :meth:`__new__` and, in the typical case, to :meth:`~object.__init__` to - initialize the new instance. - - Class Instances - Instances of arbitrary classes can be made callable by defining a - :meth:`~object.__call__` method in their class. +-------------- + +.. index:: + pair: object; callable + pair: function; call + single: invocation + pair: function; argument + +These are the types to which the function call operation (see section +:ref:`calls`) can be applied: + + +User-defined functions +^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + pair: user-defined; function + pair: object; function + pair: object; user-defined function + +A user-defined function object is created by a function definition (see +section :ref:`function`). It should be called with an argument list +containing the same number of items as the function's formal parameter +list. + +Special attributes: + +.. tabularcolumns:: |l|L|l| + +.. index:: + single: __doc__ (function attribute) + single: __name__ (function attribute) + single: __module__ (function attribute) + single: __dict__ (function attribute) + single: __defaults__ (function attribute) + single: __closure__ (function attribute) + single: __code__ (function attribute) + single: __globals__ (function attribute) + single: __annotations__ (function attribute) + single: __kwdefaults__ (function attribute) + single: __type_params__ (function attribute) + pair: global; namespace + ++-------------------------+-------------------------------+-----------+ +| Attribute | Meaning | | ++=========================+===============================+===========+ +| :attr:`__doc__` | The function's documentation | Writable | +| | string, or ``None`` if | | +| | unavailable; not inherited by | | +| | subclasses. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`~definition.\ | The function's name. | Writable | +| __name__` | | | ++-------------------------+-------------------------------+-----------+ +| :attr:`~definition.\ | The function's | Writable | +| __qualname__` | :term:`qualified name`. | | +| | | | +| | .. versionadded:: 3.3 | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__module__` | The name of the module the | Writable | +| | function was defined in, or | | +| | ``None`` if unavailable. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__defaults__` | A tuple containing default | Writable | +| | argument values for those | | +| | arguments that have defaults, | | +| | or ``None`` if no arguments | | +| | have a default value. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__code__` | The code object representing | Writable | +| | the compiled function body. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__globals__` | A reference to the dictionary | Read-only | +| | that holds the function's | | +| | global variables --- the | | +| | global namespace of the | | +| | module in which the function | | +| | was defined. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`~object.__dict__`| The namespace supporting | Writable | +| | arbitrary function | | +| | attributes. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__closure__` | ``None`` or a tuple of cells | Read-only | +| | that contain bindings for the | | +| | function's free variables. | | +| | See below for information on | | +| | the ``cell_contents`` | | +| | attribute. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__annotations__` | A dict containing annotations | Writable | +| | of parameters. The keys of | | +| | the dict are the parameter | | +| | names, and ``'return'`` for | | +| | the return annotation, if | | +| | provided. For more | | +| | information on working with | | +| | this attribute, see | | +| | :ref:`annotations-howto`. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__kwdefaults__` | A dict containing defaults | Writable | +| | for keyword-only parameters. | | ++-------------------------+-------------------------------+-----------+ +| :attr:`__type_params__` | A tuple containing the | Writable | +| | :ref:`type parameters | | +| | ` of a | | +| | :ref:`generic function | | +| | `. | | ++-------------------------+-------------------------------+-----------+ + +Most of the attributes labelled "Writable" check the type of the assigned value. + +Function objects also support getting and setting arbitrary attributes, which +can be used, for example, to attach metadata to functions. Regular attribute +dot-notation is used to get and set such attributes. *Note that the current +implementation only supports function attributes on user-defined functions. +Function attributes on built-in functions may be supported in the future.* + +A cell object has the attribute ``cell_contents``. This can be used to get +the value of the cell, as well as set the value. + +Additional information about a function's definition can be retrieved from its +code object; see the description of internal types below. The +:data:`cell ` type can be accessed in the :mod:`types` +module. + + +Instance methods +^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; method + pair: object; user-defined method + pair: user-defined; method + +An instance method object combines a class, a class instance and any +callable object (normally a user-defined function). + +.. index:: + single: __func__ (method attribute) + single: __self__ (method attribute) + single: __doc__ (method attribute) + single: __name__ (method attribute) + single: __module__ (method attribute) + +Special read-only attributes: :attr:`__self__` is the class instance object, +:attr:`__func__` is the function object; :attr:`__doc__` is the method's +documentation (same as ``__func__.__doc__``); :attr:`~definition.__name__` is the +method name (same as ``__func__.__name__``); :attr:`__module__` is the +name of the module the method was defined in, or ``None`` if unavailable. + +Methods also support accessing (but not setting) the arbitrary function +attributes on the underlying function object. + +User-defined method objects may be created when getting an attribute of a +class (perhaps via an instance of that class), if that attribute is a +user-defined function object or a class method object. + +When an instance method object is created by retrieving a user-defined +function object from a class via one of its instances, its +:attr:`__self__` attribute is the instance, and the method object is said +to be bound. The new method's :attr:`__func__` attribute is the original +function object. + +When an instance method object is created by retrieving a class method +object from a class or instance, its :attr:`__self__` attribute is the +class itself, and its :attr:`__func__` attribute is the function object +underlying the class method. + +When an instance method object is called, the underlying function +(:attr:`__func__`) is called, inserting the class instance +(:attr:`__self__`) in front of the argument list. For instance, when +:class:`C` is a class which contains a definition for a function +:meth:`f`, and ``x`` is an instance of :class:`C`, calling ``x.f(1)`` is +equivalent to calling ``C.f(x, 1)``. + +When an instance method object is derived from a class method object, the +"class instance" stored in :attr:`__self__` will actually be the class +itself, so that calling either ``x.f(1)`` or ``C.f(1)`` is equivalent to +calling ``f(C,1)`` where ``f`` is the underlying function. + +Note that the transformation from function object to instance method +object happens each time the attribute is retrieved from the instance. In +some cases, a fruitful optimization is to assign the attribute to a local +variable and call that local variable. Also notice that this +transformation only happens for user-defined functions; other callable +objects (and all non-callable objects) are retrieved without +transformation. It is also important to note that user-defined functions +which are attributes of a class instance are not converted to bound +methods; this *only* happens when the function is an attribute of the +class. + + +Generator functions +^^^^^^^^^^^^^^^^^^^ + +.. index:: + single: generator; function + single: generator; iterator + +A function or method which uses the :keyword:`yield` statement (see section +:ref:`yield`) is called a :dfn:`generator function`. Such a function, when +called, always returns an :term:`iterator` object which can be used to +execute the body of the function: calling the iterator's +:meth:`iterator.__next__` method will cause the function to execute until +it provides a value using the :keyword:`!yield` statement. When the +function executes a :keyword:`return` statement or falls off the end, a +:exc:`StopIteration` exception is raised and the iterator will have +reached the end of the set of values to be returned. + + +Coroutine functions +^^^^^^^^^^^^^^^^^^^ + +.. index:: + single: coroutine; function + +A function or method which is defined using :keyword:`async def` is called +a :dfn:`coroutine function`. Such a function, when called, returns a +:term:`coroutine` object. It may contain :keyword:`await` expressions, +as well as :keyword:`async with` and :keyword:`async for` statements. See +also the :ref:`coroutine-objects` section. + + +Asynchronous generator functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + single: asynchronous generator; function + single: asynchronous generator; asynchronous iterator + +A function or method which is defined using :keyword:`async def` and +which uses the :keyword:`yield` statement is called a +:dfn:`asynchronous generator function`. Such a function, when called, +returns an :term:`asynchronous iterator` object which can be used in an +:keyword:`async for` statement to execute the body of the function. + +Calling the asynchronous iterator's +:meth:`aiterator.__anext__ ` method +will return an :term:`awaitable` which when awaited +will execute until it provides a value using the :keyword:`yield` +expression. When the function executes an empty :keyword:`return` +statement or falls off the end, a :exc:`StopAsyncIteration` exception +is raised and the asynchronous iterator will have reached the end of +the set of values to be yielded. + + +Built-in functions +^^^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; built-in function + pair: object; function + pair: C; language + +A built-in function object is a wrapper around a C function. Examples of +built-in functions are :func:`len` and :func:`math.sin` (:mod:`math` is a +standard built-in module). The number and type of the arguments are +determined by the C function. Special read-only attributes: +:attr:`__doc__` is the function's documentation string, or ``None`` if +unavailable; :attr:`~definition.__name__` is the function's name; :attr:`__self__` is +set to ``None`` (but see the next item); :attr:`__module__` is the name of +the module the function was defined in or ``None`` if unavailable. + + +Built-in methods +^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; built-in method + pair: object; method + pair: built-in; method + +This is really a different disguise of a built-in function, this time containing +an object passed to the C function as an implicit extra argument. An example of +a built-in method is ``alist.append()``, assuming *alist* is a list object. In +this case, the special read-only attribute :attr:`__self__` is set to the object +denoted by *alist*. + + +Classes +^^^^^^^ + +Classes are callable. These objects normally act as factories for new +instances of themselves, but variations are possible for class types that +override :meth:`~object.__new__`. The arguments of the call are passed to +:meth:`__new__` and, in the typical case, to :meth:`~object.__init__` to +initialize the new instance. + + +Class Instances +^^^^^^^^^^^^^^^ + +Instances of arbitrary classes can be made callable by defining a +:meth:`~object.__call__` method in their class. Modules - .. index:: - statement: import - object: module - - Modules are a basic organizational unit of Python code, and are created by - the :ref:`import system ` as invoked either by the - :keyword:`import` statement, or by calling - functions such as :func:`importlib.import_module` and built-in - :func:`__import__`. A module object has a namespace implemented by a - dictionary object (this is the dictionary referenced by the ``__globals__`` - attribute of functions defined in the module). Attribute references are - translated to lookups in this dictionary, e.g., ``m.x`` is equivalent to - ``m.__dict__["x"]``. A module object does not contain the code object used - to initialize the module (since it isn't needed once the initialization is - done). - - Attribute assignment updates the module's namespace dictionary, e.g., - ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``. +------- - .. index:: - single: __name__ (module attribute) - single: __doc__ (module attribute) - single: __file__ (module attribute) - single: __annotations__ (module attribute) - pair: module; namespace +.. index:: + pair: statement; import + pair: object; module + +Modules are a basic organizational unit of Python code, and are created by +the :ref:`import system ` as invoked either by the +:keyword:`import` statement, or by calling +functions such as :func:`importlib.import_module` and built-in +:func:`__import__`. A module object has a namespace implemented by a +dictionary object (this is the dictionary referenced by the ``__globals__`` +attribute of functions defined in the module). Attribute references are +translated to lookups in this dictionary, e.g., ``m.x`` is equivalent to +``m.__dict__["x"]``. A module object does not contain the code object used +to initialize the module (since it isn't needed once the initialization is +done). + +Attribute assignment updates the module's namespace dictionary, e.g., +``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``. - Predefined (writable) attributes: +.. index:: + single: __name__ (module attribute) + single: __doc__ (module attribute) + single: __file__ (module attribute) + single: __annotations__ (module attribute) + pair: module; namespace - :attr:`__name__` - The module's name. +Predefined (writable) attributes: - :attr:`__doc__` - The module's documentation string, or ``None`` if - unavailable. + :attr:`__name__` + The module's name. - :attr:`__file__` - The pathname of the file from which the - module was loaded, if it was loaded from a file. - The :attr:`__file__` - attribute may be missing for certain types of modules, such as C modules - that are statically linked into the interpreter. For extension modules - loaded dynamically from a shared library, it's the pathname of the shared - library file. + :attr:`__doc__` + The module's documentation string, or ``None`` if + unavailable. - :attr:`__annotations__` - A dictionary containing - :term:`variable annotations ` collected during - module body execution. For best practices on working - with :attr:`__annotations__`, please see :ref:`annotations-howto`. + :attr:`__file__` + The pathname of the file from which the + module was loaded, if it was loaded from a file. + The :attr:`__file__` + attribute may be missing for certain types of modules, such as C modules + that are statically linked into the interpreter. For extension modules + loaded dynamically from a shared library, it's the pathname of the shared + library file. - .. index:: single: __dict__ (module attribute) + :attr:`__annotations__` + A dictionary containing + :term:`variable annotations ` collected during + module body execution. For best practices on working + with :attr:`__annotations__`, please see :ref:`annotations-howto`. - Special read-only attribute: :attr:`~object.__dict__` is the module's - namespace as a dictionary object. +.. index:: single: __dict__ (module attribute) - .. impl-detail:: +Special read-only attribute: :attr:`~object.__dict__` is the module's +namespace as a dictionary object. + +.. impl-detail:: + + Because of the way CPython clears module dictionaries, the module + dictionary will be cleared when the module falls out of scope even if the + dictionary still has live references. To avoid this, copy the dictionary + or keep the module around while using its dictionary directly. - Because of the way CPython clears module dictionaries, the module - dictionary will be cleared when the module falls out of scope even if the - dictionary still has live references. To avoid this, copy the dictionary - or keep the module around while using its dictionary directly. Custom classes - Custom class types are typically created by class definitions (see section - :ref:`class`). A class has a namespace implemented by a dictionary object. - Class attribute references are translated to lookups in this dictionary, e.g., - ``C.x`` is translated to ``C.__dict__["x"]`` (although there are a number of - hooks which allow for other means of locating attributes). When the attribute - name is not found there, the attribute search continues in the base classes. - This search of the base classes uses the C3 method resolution order which - behaves correctly even in the presence of 'diamond' inheritance structures - where there are multiple inheritance paths leading back to a common ancestor. - Additional details on the C3 MRO used by Python can be found in the - documentation accompanying the 2.3 release at - https://www.python.org/download/releases/2.3/mro/. - - .. XXX: Could we add that MRO doc as an appendix to the language ref? +-------------- + +Custom class types are typically created by class definitions (see section +:ref:`class`). A class has a namespace implemented by a dictionary object. +Class attribute references are translated to lookups in this dictionary, e.g., +``C.x`` is translated to ``C.__dict__["x"]`` (although there are a number of +hooks which allow for other means of locating attributes). When the attribute +name is not found there, the attribute search continues in the base classes. +This search of the base classes uses the C3 method resolution order which +behaves correctly even in the presence of 'diamond' inheritance structures +where there are multiple inheritance paths leading back to a common ancestor. +Additional details on the C3 MRO used by Python can be found in the +documentation accompanying the 2.3 release at +https://www.python.org/download/releases/2.3/mro/. + +.. XXX: Could we add that MRO doc as an appendix to the language ref? - .. index:: - object: class - object: class instance - object: instance - pair: class object; call - single: container - object: dictionary - pair: class; attribute +.. index:: + pair: object; class + pair: object; class instance + pair: object; instance + pair: class object; call + single: container + pair: object; dictionary + pair: class; attribute - When a class attribute reference (for class :class:`C`, say) would yield a - class method object, it is transformed into an instance method object whose - :attr:`__self__` attribute is :class:`C`. When it would yield a static - method object, it is transformed into the object wrapped by the static method - object. See section :ref:`descriptors` for another way in which attributes - retrieved from a class may differ from those actually contained in its - :attr:`~object.__dict__`. +When a class attribute reference (for class :class:`C`, say) would yield a +class method object, it is transformed into an instance method object whose +:attr:`__self__` attribute is :class:`C`. When it would yield a static +method object, it is transformed into the object wrapped by the static method +object. See section :ref:`descriptors` for another way in which attributes +retrieved from a class may differ from those actually contained in its +:attr:`~object.__dict__`. - .. index:: triple: class; attribute; assignment +.. index:: triple: class; attribute; assignment - Class attribute assignments update the class's dictionary, never the dictionary - of a base class. +Class attribute assignments update the class's dictionary, never the dictionary +of a base class. - .. index:: pair: class object; call +.. index:: pair: class object; call - A class object can be called (see above) to yield a class instance (see below). +A class object can be called (see above) to yield a class instance (see below). - .. index:: - single: __name__ (class attribute) - single: __module__ (class attribute) - single: __dict__ (class attribute) - single: __bases__ (class attribute) - single: __doc__ (class attribute) - single: __annotations__ (class attribute) +.. index:: + single: __name__ (class attribute) + single: __module__ (class attribute) + single: __dict__ (class attribute) + single: __bases__ (class attribute) + single: __doc__ (class attribute) + single: __annotations__ (class attribute) + single: __type_params__ (class attribute) + +Special attributes: - Special attributes: + :attr:`~definition.__name__` + The class name. - :attr:`~definition.__name__` - The class name. + :attr:`__module__` + The name of the module in which the class was defined. + + :attr:`~object.__dict__` + The dictionary containing the class's namespace. - :attr:`__module__` - The name of the module in which the class was defined. + :attr:`~class.__bases__` + A tuple containing the base classes, in the order of + their occurrence in the base class list. - :attr:`~object.__dict__` - The dictionary containing the class's namespace. + :attr:`__doc__` + The class's documentation string, or ``None`` if undefined. - :attr:`~class.__bases__` - A tuple containing the base classes, in the order of - their occurrence in the base class list. + :attr:`__annotations__` + A dictionary containing + :term:`variable annotations ` + collected during class body execution. For best practices on + working with :attr:`__annotations__`, please see + :ref:`annotations-howto`. - :attr:`__doc__` - The class's documentation string, or ``None`` if undefined. + :attr:`__type_params__` + A tuple containing the :ref:`type parameters ` of + a :ref:`generic class `. - :attr:`__annotations__` - A dictionary containing - :term:`variable annotations ` - collected during class body execution. For best practices on - working with :attr:`__annotations__`, please see - :ref:`annotations-howto`. Class instances - .. index:: - object: class instance - object: instance - pair: class; instance - pair: class instance; attribute - - A class instance is created by calling a class object (see above). A class - instance has a namespace implemented as a dictionary which is the first place - in which attribute references are searched. When an attribute is not found - there, and the instance's class has an attribute by that name, the search - continues with the class attributes. If a class attribute is found that is a - user-defined function object, it is transformed into an instance method - object whose :attr:`__self__` attribute is the instance. Static method and - class method objects are also transformed; see above under "Classes". See - section :ref:`descriptors` for another way in which attributes of a class - retrieved via its instances may differ from the objects actually stored in - the class's :attr:`~object.__dict__`. If no class attribute is found, and the - object's class has a :meth:`~object.__getattr__` method, that is called to satisfy - the lookup. - - .. index:: triple: class instance; attribute; assignment - - Attribute assignments and deletions update the instance's dictionary, never a - class's dictionary. If the class has a :meth:`~object.__setattr__` or - :meth:`~object.__delattr__` method, this is called instead of updating the instance - dictionary directly. +--------------- - .. index:: - object: numeric - object: sequence - object: mapping +.. index:: + pair: object; class instance + pair: object; instance + pair: class; instance + pair: class instance; attribute + +A class instance is created by calling a class object (see above). A class +instance has a namespace implemented as a dictionary which is the first place +in which attribute references are searched. When an attribute is not found +there, and the instance's class has an attribute by that name, the search +continues with the class attributes. If a class attribute is found that is a +user-defined function object, it is transformed into an instance method +object whose :attr:`__self__` attribute is the instance. Static method and +class method objects are also transformed; see above under "Classes". See +section :ref:`descriptors` for another way in which attributes of a class +retrieved via its instances may differ from the objects actually stored in +the class's :attr:`~object.__dict__`. If no class attribute is found, and the +object's class has a :meth:`~object.__getattr__` method, that is called to satisfy +the lookup. + +.. index:: triple: class instance; attribute; assignment + +Attribute assignments and deletions update the instance's dictionary, never a +class's dictionary. If the class has a :meth:`~object.__setattr__` or +:meth:`~object.__delattr__` method, this is called instead of updating the instance +dictionary directly. - Class instances can pretend to be numbers, sequences, or mappings if they have - methods with certain special names. See section :ref:`specialnames`. +.. index:: + pair: object; numeric + pair: object; sequence + pair: object; mapping - .. index:: - single: __dict__ (instance attribute) - single: __class__ (instance attribute) +Class instances can pretend to be numbers, sequences, or mappings if they have +methods with certain special names. See section :ref:`specialnames`. + +.. index:: + single: __dict__ (instance attribute) + single: __class__ (instance attribute) + +Special attributes: :attr:`~object.__dict__` is the attribute dictionary; +:attr:`~instance.__class__` is the instance's class. - Special attributes: :attr:`~object.__dict__` is the attribute dictionary; - :attr:`~instance.__class__` is the instance's class. I/O objects (also known as file objects) - .. index:: - builtin: open - module: io - single: popen() (in module os) - single: makefile() (socket method) - single: sys.stdin - single: sys.stdout - single: sys.stderr - single: stdio - single: stdin (in module sys) - single: stdout (in module sys) - single: stderr (in module sys) - - A :term:`file object` represents an open file. Various shortcuts are - available to create file objects: the :func:`open` built-in function, and - also :func:`os.popen`, :func:`os.fdopen`, and the - :meth:`~socket.socket.makefile` method of socket objects (and perhaps by - other functions or methods provided by extension modules). - - The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are - initialized to file objects corresponding to the interpreter's standard - input, output and error streams; they are all open in text mode and - therefore follow the interface defined by the :class:`io.TextIOBase` - abstract class. +---------------------------------------- + +.. index:: + pair: built-in function; open + pair: module; io + single: popen() (in module os) + single: makefile() (socket method) + single: sys.stdin + single: sys.stdout + single: sys.stderr + single: stdio + single: stdin (in module sys) + single: stdout (in module sys) + single: stderr (in module sys) + +A :term:`file object` represents an open file. Various shortcuts are +available to create file objects: the :func:`open` built-in function, and +also :func:`os.popen`, :func:`os.fdopen`, and the +:meth:`~socket.socket.makefile` method of socket objects (and perhaps by +other functions or methods provided by extension modules). + +The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are +initialized to file objects corresponding to the interpreter's standard +input, output and error streams; they are all open in text mode and +therefore follow the interface defined by the :class:`io.TextIOBase` +abstract class. + Internal types - .. index:: - single: internal type - single: types, internal - - A few types used internally by the interpreter are exposed to the user. Their - definitions may change with future versions of the interpreter, but they are - mentioned here for completeness. - - .. index:: bytecode, object; code, code object - - Code objects - Code objects represent *byte-compiled* executable Python code, or :term:`bytecode`. - The difference between a code object and a function object is that the function - object contains an explicit reference to the function's globals (the module in - which it was defined), while a code object contains no context; also the default - argument values are stored in the function object, not in the code object - (because they represent values calculated at run-time). Unlike function - objects, code objects are immutable and contain no references (directly or - indirectly) to mutable objects. - - .. index:: - single: co_argcount (code object attribute) - single: co_posonlyargcount (code object attribute) - single: co_kwonlyargcount (code object attribute) - single: co_code (code object attribute) - single: co_consts (code object attribute) - single: co_filename (code object attribute) - single: co_firstlineno (code object attribute) - single: co_flags (code object attribute) - single: co_lnotab (code object attribute) - single: co_name (code object attribute) - single: co_names (code object attribute) - single: co_nlocals (code object attribute) - single: co_stacksize (code object attribute) - single: co_varnames (code object attribute) - single: co_cellvars (code object attribute) - single: co_freevars (code object attribute) - single: co_qualname (code object attribute) - - Special read-only attributes: :attr:`co_name` gives the function name; - :attr:`co_qualname` gives the fully qualified function name; - :attr:`co_argcount` is the total number of positional arguments - (including positional-only arguments and arguments with default values); - :attr:`co_posonlyargcount` is the number of positional-only arguments - (including arguments with default values); :attr:`co_kwonlyargcount` is - the number of keyword-only arguments (including arguments with default - values); :attr:`co_nlocals` is the number of local variables used by the - function (including arguments); :attr:`co_varnames` is a tuple containing - the names of the local variables (starting with the argument names); - :attr:`co_cellvars` is a tuple containing the names of local variables - that are referenced by nested functions; :attr:`co_freevars` is a tuple - containing the names of free variables; :attr:`co_code` is a string - representing the sequence of bytecode instructions; :attr:`co_consts` is - a tuple containing the literals used by the bytecode; :attr:`co_names` is - a tuple containing the names used by the bytecode; :attr:`co_filename` is - the filename from which the code was compiled; :attr:`co_firstlineno` is - the first line number of the function; :attr:`co_lnotab` is a string - encoding the mapping from bytecode offsets to line numbers (for details - see the source code of the interpreter); :attr:`co_stacksize` is the - required stack size; :attr:`co_flags` is an integer encoding a number - of flags for the interpreter. - - .. index:: object: generator - - The following flag bits are defined for :attr:`co_flags`: bit ``0x04`` is set if - the function uses the ``*arguments`` syntax to accept an arbitrary number of - positional arguments; bit ``0x08`` is set if the function uses the - ``**keywords`` syntax to accept arbitrary keyword arguments; bit ``0x20`` is set - if the function is a generator. - - Future feature declarations (``from __future__ import division``) also use bits - in :attr:`co_flags` to indicate whether a code object was compiled with a - particular feature enabled: bit ``0x2000`` is set if the function was compiled - with future division enabled; bits ``0x10`` and ``0x1000`` were used in earlier - versions of Python. - - Other bits in :attr:`co_flags` are reserved for internal use. - - .. index:: single: documentation string - - If a code object represents a function, the first item in :attr:`co_consts` is - the documentation string of the function, or ``None`` if undefined. - - .. method:: codeobject.co_positions() - - Returns an iterable over the source code positions of each bytecode - instruction in the code object. - - The iterator returns tuples containing the ``(start_line, end_line, - start_column, end_column)``. The *i-th* tuple corresponds to the - position of the source code that compiled to the *i-th* instruction. - Column information is 0-indexed utf-8 byte offsets on the given source - line. - - This positional information can be missing. A non-exhaustive lists of - cases where this may happen: - - - Running the interpreter with :option:`-X` ``no_debug_ranges``. - - Loading a pyc file compiled while using :option:`-X` ``no_debug_ranges``. - - Position tuples corresponding to artificial instructions. - - Line and column numbers that can't be represented due to - implementation specific limitations. - - When this occurs, some or all of the tuple elements can be - :const:`None`. - - .. versionadded:: 3.11 - - .. note:: - This feature requires storing column positions in code objects which may - result in a small increase of disk usage of compiled Python files or - interpreter memory usage. To avoid storing the extra information and/or - deactivate printing the extra traceback information, the - :option:`-X` ``no_debug_ranges`` command line flag or the :envvar:`PYTHONNODEBUGRANGES` - environment variable can be used. - - .. _frame-objects: - - Frame objects - .. index:: object: frame - - Frame objects represent execution frames. They may occur in traceback objects - (see below), and are also passed to registered trace functions. - - .. index:: - single: f_back (frame attribute) - single: f_code (frame attribute) - single: f_globals (frame attribute) - single: f_locals (frame attribute) - single: f_lasti (frame attribute) - single: f_builtins (frame attribute) - - Special read-only attributes: :attr:`f_back` is to the previous stack frame - (towards the caller), or ``None`` if this is the bottom stack frame; - :attr:`f_code` is the code object being executed in this frame; :attr:`f_locals` - is the dictionary used to look up local variables; :attr:`f_globals` is used for - global variables; :attr:`f_builtins` is used for built-in (intrinsic) names; - :attr:`f_lasti` gives the precise instruction (this is an index into the - bytecode string of the code object). - - Accessing ``f_code`` raises an :ref:`auditing event ` - ``object.__getattr__`` with arguments ``obj`` and ``"f_code"``. - - .. index:: - single: f_trace (frame attribute) - single: f_trace_lines (frame attribute) - single: f_trace_opcodes (frame attribute) - single: f_lineno (frame attribute) - - Special writable attributes: :attr:`f_trace`, if not ``None``, is a function - called for various events during code execution (this is used by the debugger). - Normally an event is triggered for each new source line - this can be - disabled by setting :attr:`f_trace_lines` to :const:`False`. - - Implementations *may* allow per-opcode events to be requested by setting - :attr:`f_trace_opcodes` to :const:`True`. Note that this may lead to - undefined interpreter behaviour if exceptions raised by the trace - function escape to the function being traced. - - :attr:`f_lineno` is the current line number of the frame --- writing to this - from within a trace function jumps to the given line (only for the bottom-most - frame). A debugger can implement a Jump command (aka Set Next Statement) - by writing to f_lineno. - - Frame objects support one method: - - .. method:: frame.clear() - - This method clears all references to local variables held by the - frame. Also, if the frame belonged to a generator, the generator - is finalized. This helps break reference cycles involving frame - objects (for example when catching an exception and storing its - traceback for later use). - - :exc:`RuntimeError` is raised if the frame is currently executing. - - .. versionadded:: 3.4 - - .. _traceback-objects: - - Traceback objects - .. index:: - object: traceback - pair: stack; trace - pair: exception; handler - pair: execution; stack - single: exc_info (in module sys) - single: last_traceback (in module sys) - single: sys.exc_info - single: sys.last_traceback - - Traceback objects represent a stack trace of an exception. A traceback object - is implicitly created when an exception occurs, and may also be explicitly - created by calling :class:`types.TracebackType`. - - For implicitly created tracebacks, when the search for an exception handler - unwinds the execution stack, at each unwound level a traceback object is - inserted in front of the current traceback. When an exception handler is - entered, the stack trace is made available to the program. (See section - :ref:`try`.) It is accessible as the third item of the - tuple returned by ``sys.exc_info()``, and as the ``__traceback__`` attribute - of the caught exception. - - When the program contains no suitable - handler, the stack trace is written (nicely formatted) to the standard error - stream; if the interpreter is interactive, it is also made available to the user - as ``sys.last_traceback``. - - For explicitly created tracebacks, it is up to the creator of the traceback - to determine how the ``tb_next`` attributes should be linked to form a - full stack trace. - - .. index:: - single: tb_frame (traceback attribute) - single: tb_lineno (traceback attribute) - single: tb_lasti (traceback attribute) - statement: try - - Special read-only attributes: - :attr:`tb_frame` points to the execution frame of the current level; - :attr:`tb_lineno` gives the line number where the exception occurred; - :attr:`tb_lasti` indicates the precise instruction. - The line number and last instruction in the traceback may differ from the - line number of its frame object if the exception occurred in a - :keyword:`try` statement with no matching except clause or with a - finally clause. - - Accessing ``tb_frame`` raises an :ref:`auditing event ` - ``object.__getattr__`` with arguments ``obj`` and ``"tb_frame"``. - - .. index:: - single: tb_next (traceback attribute) - - Special writable attribute: :attr:`tb_next` is the next level in the stack - trace (towards the frame where the exception occurred), or ``None`` if - there is no next level. - - .. versionchanged:: 3.7 - Traceback objects can now be explicitly instantiated from Python code, - and the ``tb_next`` attribute of existing instances can be updated. - - Slice objects - .. index:: builtin: slice - - Slice objects are used to represent slices for - :meth:`~object.__getitem__` - methods. They are also created by the built-in :func:`slice` function. - - .. index:: - single: start (slice object attribute) - single: stop (slice object attribute) - single: step (slice object attribute) - - Special read-only attributes: :attr:`~slice.start` is the lower bound; - :attr:`~slice.stop` is the upper bound; :attr:`~slice.step` is the step - value; each is ``None`` if omitted. These attributes can have any type. - - Slice objects support one method: - - .. method:: slice.indices(self, length) - - This method takes a single integer argument *length* and computes - information about the slice that the slice object would describe if - applied to a sequence of *length* items. It returns a tuple of three - integers; respectively these are the *start* and *stop* indices and the - *step* or stride length of the slice. Missing or out-of-bounds indices - are handled in a manner consistent with regular slices. - - Static method objects - Static method objects provide a way of defeating the transformation of function - objects to method objects described above. A static method object is a wrapper - around any other object, usually a user-defined method object. When a static - method object is retrieved from a class or a class instance, the object actually - returned is the wrapped object, which is not subject to any further - transformation. Static method objects are also callable. Static method - objects are created by the built-in :func:`staticmethod` constructor. - - Class method objects - A class method object, like a static method object, is a wrapper around another - object that alters the way in which that object is retrieved from classes and - class instances. The behaviour of class method objects upon such retrieval is - described above, under "User-defined methods". Class method objects are created - by the built-in :func:`classmethod` constructor. +-------------- + +.. index:: + single: internal type + single: types, internal + +A few types used internally by the interpreter are exposed to the user. Their +definitions may change with future versions of the interpreter, but they are +mentioned here for completeness. + + +.. _code-objects: + +Code objects +^^^^^^^^^^^^ + +.. index:: bytecode, object; code, code object + +Code objects represent *byte-compiled* executable Python code, or :term:`bytecode`. +The difference between a code object and a function object is that the function +object contains an explicit reference to the function's globals (the module in +which it was defined), while a code object contains no context; also the default +argument values are stored in the function object, not in the code object +(because they represent values calculated at run-time). Unlike function +objects, code objects are immutable and contain no references (directly or +indirectly) to mutable objects. + +.. index:: + single: co_argcount (code object attribute) + single: co_posonlyargcount (code object attribute) + single: co_kwonlyargcount (code object attribute) + single: co_code (code object attribute) + single: co_consts (code object attribute) + single: co_filename (code object attribute) + single: co_firstlineno (code object attribute) + single: co_flags (code object attribute) + single: co_lnotab (code object attribute) + single: co_name (code object attribute) + single: co_names (code object attribute) + single: co_nlocals (code object attribute) + single: co_stacksize (code object attribute) + single: co_varnames (code object attribute) + single: co_cellvars (code object attribute) + single: co_freevars (code object attribute) + single: co_qualname (code object attribute) + +Special read-only attributes: :attr:`co_name` gives the function name; +:attr:`co_qualname` gives the fully qualified function name; +:attr:`co_argcount` is the total number of positional arguments +(including positional-only arguments and arguments with default values); +:attr:`co_posonlyargcount` is the number of positional-only arguments +(including arguments with default values); :attr:`co_kwonlyargcount` is +the number of keyword-only arguments (including arguments with default +values); :attr:`co_nlocals` is the number of local variables used by the +function (including arguments); :attr:`co_varnames` is a tuple containing +the names of the local variables (starting with the argument names); +:attr:`co_cellvars` is a tuple containing the names of local variables +that are referenced by nested functions; :attr:`co_freevars` is a tuple +containing the names of free variables; :attr:`co_code` is a string +representing the sequence of bytecode instructions; :attr:`co_consts` is +a tuple containing the literals used by the bytecode; :attr:`co_names` is +a tuple containing the names used by the bytecode; :attr:`co_filename` is +the filename from which the code was compiled; :attr:`co_firstlineno` is +the first line number of the function; :attr:`co_lnotab` is a string +encoding the mapping from bytecode offsets to line numbers (for details +see the source code of the interpreter, is deprecated since 3.12 +and may be removed in 3.14); :attr:`co_stacksize` is the +required stack size; :attr:`co_flags` is an integer encoding a number +of flags for the interpreter. + +.. index:: pair: object; generator + +The following flag bits are defined for :attr:`co_flags`: bit ``0x04`` is set if +the function uses the ``*arguments`` syntax to accept an arbitrary number of +positional arguments; bit ``0x08`` is set if the function uses the +``**keywords`` syntax to accept arbitrary keyword arguments; bit ``0x20`` is set +if the function is a generator. + +Future feature declarations (``from __future__ import division``) also use bits +in :attr:`co_flags` to indicate whether a code object was compiled with a +particular feature enabled: bit ``0x2000`` is set if the function was compiled +with future division enabled; bits ``0x10`` and ``0x1000`` were used in earlier +versions of Python. + +Other bits in :attr:`co_flags` are reserved for internal use. + +.. index:: single: documentation string + +If a code object represents a function, the first item in :attr:`co_consts` is +the documentation string of the function, or ``None`` if undefined. + +.. method:: codeobject.co_positions() + + Returns an iterable over the source code positions of each bytecode + instruction in the code object. + + The iterator returns tuples containing the ``(start_line, end_line, + start_column, end_column)``. The *i-th* tuple corresponds to the + position of the source code that compiled to the *i-th* instruction. + Column information is 0-indexed utf-8 byte offsets on the given source + line. + + This positional information can be missing. A non-exhaustive lists of + cases where this may happen: + + - Running the interpreter with :option:`-X` ``no_debug_ranges``. + - Loading a pyc file compiled while using :option:`-X` ``no_debug_ranges``. + - Position tuples corresponding to artificial instructions. + - Line and column numbers that can't be represented due to + implementation specific limitations. + + When this occurs, some or all of the tuple elements can be + :const:`None`. + + .. versionadded:: 3.11 + + .. note:: + This feature requires storing column positions in code objects which may + result in a small increase of disk usage of compiled Python files or + interpreter memory usage. To avoid storing the extra information and/or + deactivate printing the extra traceback information, the + :option:`-X` ``no_debug_ranges`` command line flag or the :envvar:`PYTHONNODEBUGRANGES` + environment variable can be used. + + +.. _frame-objects: + +Frame objects +^^^^^^^^^^^^^ + +.. index:: pair: object; frame + +Frame objects represent execution frames. They may occur in traceback objects +(see below), and are also passed to registered trace functions. + +.. index:: + single: f_back (frame attribute) + single: f_code (frame attribute) + single: f_globals (frame attribute) + single: f_locals (frame attribute) + single: f_lasti (frame attribute) + single: f_builtins (frame attribute) + +Special read-only attributes: :attr:`f_back` is to the previous stack frame +(towards the caller), or ``None`` if this is the bottom stack frame; +:attr:`f_code` is the code object being executed in this frame; :attr:`f_locals` +is the dictionary used to look up local variables; :attr:`f_globals` is used for +global variables; :attr:`f_builtins` is used for built-in (intrinsic) names; +:attr:`f_lasti` gives the precise instruction (this is an index into the +bytecode string of the code object). + +Accessing ``f_code`` raises an :ref:`auditing event ` +``object.__getattr__`` with arguments ``obj`` and ``"f_code"``. + +.. index:: + single: f_trace (frame attribute) + single: f_trace_lines (frame attribute) + single: f_trace_opcodes (frame attribute) + single: f_lineno (frame attribute) + +Special writable attributes: :attr:`f_trace`, if not ``None``, is a function +called for various events during code execution (this is used by the debugger). +Normally an event is triggered for each new source line - this can be +disabled by setting :attr:`f_trace_lines` to :const:`False`. + +Implementations *may* allow per-opcode events to be requested by setting +:attr:`f_trace_opcodes` to :const:`True`. Note that this may lead to +undefined interpreter behaviour if exceptions raised by the trace +function escape to the function being traced. + +:attr:`f_lineno` is the current line number of the frame --- writing to this +from within a trace function jumps to the given line (only for the bottom-most +frame). A debugger can implement a Jump command (aka Set Next Statement) +by writing to f_lineno. + +Frame objects support one method: + +.. method:: frame.clear() + + This method clears all references to local variables held by the + frame. Also, if the frame belonged to a generator, the generator + is finalized. This helps break reference cycles involving frame + objects (for example when catching an exception and storing its + traceback for later use). + + :exc:`RuntimeError` is raised if the frame is currently executing + or suspended. + + .. versionadded:: 3.4 + + .. versionchanged:: 3.13 + Attempting to clear a suspended frame raises :exc:`RuntimeError` + (as has always been the case for executing frames). + + +.. _traceback-objects: + +Traceback objects +^^^^^^^^^^^^^^^^^ + +.. index:: + pair: object; traceback + pair: stack; trace + pair: exception; handler + pair: execution; stack + single: exc_info (in module sys) + single: last_traceback (in module sys) + single: sys.exc_info + single: sys.exception + single: sys.last_traceback + +Traceback objects represent a stack trace of an exception. A traceback object +is implicitly created when an exception occurs, and may also be explicitly +created by calling :class:`types.TracebackType`. + +For implicitly created tracebacks, when the search for an exception handler +unwinds the execution stack, at each unwound level a traceback object is +inserted in front of the current traceback. When an exception handler is +entered, the stack trace is made available to the program. (See section +:ref:`try`.) It is accessible as the third item of the +tuple returned by ``sys.exc_info()``, and as the ``__traceback__`` attribute +of the caught exception. + +When the program contains no suitable +handler, the stack trace is written (nicely formatted) to the standard error +stream; if the interpreter is interactive, it is also made available to the user +as ``sys.last_traceback``. + +For explicitly created tracebacks, it is up to the creator of the traceback +to determine how the ``tb_next`` attributes should be linked to form a +full stack trace. + +.. index:: + single: tb_frame (traceback attribute) + single: tb_lineno (traceback attribute) + single: tb_lasti (traceback attribute) + pair: statement; try + +Special read-only attributes: +:attr:`tb_frame` points to the execution frame of the current level; +:attr:`tb_lineno` gives the line number where the exception occurred; +:attr:`tb_lasti` indicates the precise instruction. +The line number and last instruction in the traceback may differ from the +line number of its frame object if the exception occurred in a +:keyword:`try` statement with no matching except clause or with a +finally clause. + +Accessing ``tb_frame`` raises an :ref:`auditing event ` +``object.__getattr__`` with arguments ``obj`` and ``"tb_frame"``. + +.. index:: + single: tb_next (traceback attribute) + +Special writable attribute: :attr:`tb_next` is the next level in the stack +trace (towards the frame where the exception occurred), or ``None`` if +there is no next level. + +.. versionchanged:: 3.7 + Traceback objects can now be explicitly instantiated from Python code, + and the ``tb_next`` attribute of existing instances can be updated. + + +Slice objects +^^^^^^^^^^^^^ + +.. index:: pair: built-in function; slice + +Slice objects are used to represent slices for +:meth:`~object.__getitem__` +methods. They are also created by the built-in :func:`slice` function. + +.. index:: + single: start (slice object attribute) + single: stop (slice object attribute) + single: step (slice object attribute) + +Special read-only attributes: :attr:`~slice.start` is the lower bound; +:attr:`~slice.stop` is the upper bound; :attr:`~slice.step` is the step +value; each is ``None`` if omitted. These attributes can have any type. + +Slice objects support one method: + +.. method:: slice.indices(self, length) + + This method takes a single integer argument *length* and computes + information about the slice that the slice object would describe if + applied to a sequence of *length* items. It returns a tuple of three + integers; respectively these are the *start* and *stop* indices and the + *step* or stride length of the slice. Missing or out-of-bounds indices + are handled in a manner consistent with regular slices. + + +Static method objects +^^^^^^^^^^^^^^^^^^^^^ + +Static method objects provide a way of defeating the transformation of function +objects to method objects described above. A static method object is a wrapper +around any other object, usually a user-defined method object. When a static +method object is retrieved from a class or a class instance, the object actually +returned is the wrapped object, which is not subject to any further +transformation. Static method objects are also callable. Static method +objects are created by the built-in :func:`staticmethod` constructor. + + +Class method objects +^^^^^^^^^^^^^^^^^^^^ + +A class method object, like a static method object, is a wrapper around another +object that alters the way in which that object is retrieved from classes and +class instances. The behaviour of class method objects upon such retrieval is +described above, under "User-defined methods". Class method objects are created +by the built-in :func:`classmethod` constructor. .. _specialnames: @@ -1308,7 +1433,7 @@ Basic customization .. index:: single: destructor single: finalizer - statement: del + pair: statement; del Called when the instance is about to be destroyed. This is also called a finalizer or (improperly) a destructor. If a base class has a @@ -1409,7 +1534,7 @@ Basic customization .. method:: object.__bytes__(self) - .. index:: builtin: bytes + .. index:: pair: built-in function; bytes Called by :ref:`bytes ` to compute a byte-string representation of an object. This should return a :class:`bytes` object. @@ -1417,7 +1542,7 @@ Basic customization .. index:: single: string; __format__() (object method) pair: string; conversion - builtin: print + pair: built-in function; print .. method:: object.__format__(self, format_spec) @@ -1496,8 +1621,8 @@ Basic customization .. method:: object.__hash__(self) .. index:: - object: dictionary - builtin: hash + pair: object; dictionary + pair: built-in function; hash Called by built-in function :func:`hash` and for operations on members of hashed collections including :class:`set`, :class:`frozenset`, and @@ -1525,7 +1650,7 @@ Basic customization :meth:`__hash__`, its instances will not be usable as items in hashable collections. If a class defines mutable objects and implements an :meth:`__eq__` method, it should not implement :meth:`__hash__`, since the - implementation of hashable collections requires that a key's hash value is + implementation of :term:`hashable` collections requires that a key's hash value is immutable (if the object's hash value changes, it will be in the wrong hash bucket). @@ -1562,7 +1687,7 @@ Basic customization This is intended to provide protection against a denial-of-service caused by carefully chosen inputs that exploit the worst case performance of a dict insertion, O(n\ :sup:`2`) complexity. See - http://www.ocert.org/advisories/ocert-2011-003.html for details. + http://ocert.org/advisories/ocert-2011-003.html for details. Changing hash values affects the iteration order of sets. Python has never made guarantees about this ordering @@ -1580,9 +1705,9 @@ Basic customization Called to implement truth value testing and the built-in operation ``bool()``; should return ``False`` or ``True``. When this method is not - defined, :meth:`__len__` is called, if it is defined, and the object is + defined, :meth:`~object.__len__` is called, if it is defined, and the object is considered true if its result is nonzero. If a class defines neither - :meth:`__len__` nor :meth:`__bool__`, all its instances are considered + :meth:`!__len__` nor :meth:`!__bool__`, all its instances are considered true. @@ -1611,7 +1736,7 @@ access (use of, assignment to, or deletion of ``x.name``) for class instances. :meth:`__getattr__` and :meth:`__setattr__`.) This is done both for efficiency reasons and because otherwise :meth:`__getattr__` would have no way to access other attributes of the instance. Note that at least for instance variables, - you can fake total control by not inserting any values in the instance attribute + you can take total control by not inserting any values in the instance attribute dictionary (but instead inserting them in another object). See the :meth:`__getattribute__` method below for a way to actually get total control over attribute access. @@ -1906,8 +2031,7 @@ Attribute lookup speed can be significantly improved as well. .. _datamodel-note-slots: -Notes on using *__slots__* -"""""""""""""""""""""""""" +Notes on using *__slots__*: * When inheriting from a class without *__slots__*, the :attr:`~object.__dict__` and @@ -1943,8 +2067,10 @@ Notes on using *__slots__* descriptor directly from the base class). This renders the meaning of the program undefined. In the future, a check may be added to prevent this. -* Nonempty *__slots__* does not work for classes derived from "variable-length" - built-in types such as :class:`int`, :class:`bytes` and :class:`tuple`. +* :exc:`TypeError` will be raised if nonempty *__slots__* are defined for a + class derived from a + :c:member:`"variable-length" built-in type ` such as + :class:`int`, :class:`bytes`, and :class:`tuple`. * Any non-string :term:`iterable` may be assigned to *__slots__*. @@ -2046,7 +2172,7 @@ Metaclasses .. index:: single: metaclass - builtin: type + pair: built-in function; type single: = (equals); class definition By default, classes are constructed using :func:`type`. The class body is @@ -2082,15 +2208,28 @@ When a class definition is executed, the following steps occur: Resolving MRO entries ^^^^^^^^^^^^^^^^^^^^^ -If a base that appears in class definition is not an instance of :class:`type`, -then an ``__mro_entries__`` method is searched on it. If found, it is called -with the original bases tuple. This method must return a tuple of classes that -will be used instead of this base. The tuple may be empty, in such case -the original base is ignored. +.. method:: object.__mro_entries__(self, bases) + + If a base that appears in a class definition is not an instance of + :class:`type`, then an :meth:`!__mro_entries__` method is searched on the base. + If an :meth:`!__mro_entries__` method is found, the base is substituted with the + result of a call to :meth:`!__mro_entries__` when creating the class. + The method is called with the original bases tuple + passed to the *bases* parameter, and must return a tuple + of classes that will be used instead of the base. The returned tuple may be + empty: in these cases, the original base is ignored. .. seealso:: - :pep:`560` - Core support for typing module and generic types + :func:`types.resolve_bases` + Dynamically resolve bases that are not instances of :class:`type`. + + :func:`types.get_original_bases` + Retrieve a class's "original bases" prior to modifications by + :meth:`~object.__mro_entries__`. + + :pep:`560` + Core support for typing module and generic types. Determining the appropriate metaclass @@ -2460,21 +2599,21 @@ through the object's keys; for sequences, it should iterate through the values. .. method:: object.__len__(self) .. index:: - builtin: len + pair: built-in function; len single: __bool__() (object method) Called to implement the built-in function :func:`len`. Should return the length of the object, an integer ``>=`` 0. Also, an object that doesn't define a - :meth:`__bool__` method and whose :meth:`__len__` method returns zero is + :meth:`~object.__bool__` method and whose :meth:`!__len__` method returns zero is considered to be false in a Boolean context. .. impl-detail:: - In CPython, the length is required to be at most :attr:`sys.maxsize`. - If the length is larger than :attr:`!sys.maxsize` some features (such as + In CPython, the length is required to be at most :data:`sys.maxsize`. + If the length is larger than :data:`!sys.maxsize` some features (such as :func:`len`) may raise :exc:`OverflowError`. To prevent raising :exc:`!OverflowError` by truth value testing, an object must define a - :meth:`__bool__` method. + :meth:`~object.__bool__` method. .. method:: object.__length_hint__(self) @@ -2489,7 +2628,7 @@ through the object's keys; for sequences, it should iterate through the values. .. versionadded:: 3.4 -.. index:: object: slice +.. index:: pair: object; slice .. note:: @@ -2618,9 +2757,9 @@ left undefined. object.__or__(self, other) .. index:: - builtin: divmod - builtin: pow - builtin: pow + pair: built-in function; divmod + pair: built-in function; pow + pair: built-in function; pow These methods are called to implement the binary arithmetic operations (``+``, ``-``, ``*``, ``@``, ``/``, ``//``, ``%``, :func:`divmod`, @@ -2653,8 +2792,8 @@ left undefined. object.__ror__(self, other) .. index:: - builtin: divmod - builtin: pow + pair: built-in function; divmod + pair: built-in function; pow These methods are called to implement the binary arithmetic operations (``+``, ``-``, ``*``, ``@``, ``/``, ``//``, ``%``, :func:`divmod`, @@ -2666,7 +2805,7 @@ left undefined. ``type(y).__rsub__(y, x)`` is called if ``type(x).__sub__(x, y)`` returns *NotImplemented*. - .. index:: builtin: pow + .. index:: pair: built-in function; pow Note that ternary :func:`pow` will not try calling :meth:`__rpow__` (the coercion rules would become too complicated). @@ -2713,7 +2852,7 @@ left undefined. object.__abs__(self) object.__invert__(self) - .. index:: builtin: abs + .. index:: pair: built-in function; abs Called to implement the unary arithmetic operations (``-``, ``+``, :func:`abs` and ``~``). @@ -2724,9 +2863,9 @@ left undefined. object.__float__(self) .. index:: - builtin: complex - builtin: int - builtin: float + pair: built-in function; complex + pair: built-in function; int + pair: built-in function; float Called to implement the built-in functions :func:`complex`, :func:`int` and :func:`float`. Should return a value @@ -2751,7 +2890,7 @@ left undefined. object.__floor__(self) object.__ceil__(self) - .. index:: builtin: round + .. index:: pair: built-in function; round Called to implement the built-in function :func:`round` and :mod:`math` functions :func:`~math.trunc`, :func:`~math.floor` and :func:`~math.ceil`. @@ -2779,7 +2918,7 @@ execution of the block of code. Context managers are normally invoked using the used by directly invoking their methods. .. index:: - statement: with + pair: statement; with single: context manager Typical uses of context managers include saving and restoring various kinds of @@ -2805,7 +2944,7 @@ For more information on context managers, see :ref:`typecontextmanager`. (i.e., prevent it from being propagated), it should return a true value. Otherwise, the exception will be processed normally upon exit from this method. - Note that :meth:`__exit__` methods should not reraise the passed-in exception; + Note that :meth:`~object.__exit__` methods should not reraise the passed-in exception; this is the caller's responsibility. @@ -2823,7 +2962,7 @@ Customizing positional arguments in class pattern matching When using a class name in a pattern, positional arguments in the pattern are not allowed by default, i.e. ``case MyClass(x, y)`` is typically invalid without special -support in ``MyClass``. To be able to use that kind of patterns, the class needs to +support in ``MyClass``. To be able to use that kind of pattern, the class needs to define a *__match_args__* attribute. .. data:: object.__match_args__ @@ -2848,6 +2987,47 @@ a :exc:`TypeError`. The specification for the Python ``match`` statement. +.. _python-buffer-protocol: + +Emulating buffer types +---------------------- + +The :ref:`buffer protocol ` provides a way for Python +objects to expose efficient access to a low-level memory array. This protocol +is implemented by builtin types such as :class:`bytes` and :class:`memoryview`, +and third-party libraries may define additional buffer types. + +While buffer types are usually implemented in C, it is also possible to +implement the protocol in Python. + +.. method:: object.__buffer__(self, flags) + + Called when a buffer is requested from *self* (for example, by the + :class:`memoryview` constructor). The *flags* argument is an integer + representing the kind of buffer requested, affecting for example whether + the returned buffer is read-only or writable. :class:`inspect.BufferFlags` + provides a convenient way to interpret the flags. The method must return + a :class:`memoryview` object. + +.. method:: object.__release_buffer__(self, buffer) + + Called when a buffer is no longer needed. The *buffer* argument is a + :class:`memoryview` object that was previously returned by + :meth:`~object.__buffer__`. The method must release any resources associated + with the buffer. This method should return ``None``. + Buffer objects that do not need to perform any cleanup are not required + to implement this method. + +.. versionadded:: 3.12 + +.. seealso:: + + :pep:`688` - Making the buffer protocol accessible in Python + Introduces the Python ``__buffer__`` and ``__release_buffer__`` methods. + + :class:`collections.abc.Buffer` + ABC for buffer types. + .. _special-lookup: Special method lookup @@ -2950,6 +3130,14 @@ are awaitable. :term:`awaitable` objects. For instance, :class:`asyncio.Future` implements this method to be compatible with the :keyword:`await` expression. + .. note:: + + The language doesn't place any restriction on the type or value of the + objects yielded by the iterator returned by ``__await__``, as this is + specific to the implementation of the asynchronous execution framework + (e.g. :mod:`asyncio`) that will be managing the :term:`awaitable` object. + + .. versionadded:: 3.5 .. seealso:: :pep:`492` for additional information about awaitable objects. @@ -3074,12 +3262,12 @@ Asynchronous context managers can be used in an :keyword:`async with` statement. .. method:: object.__aenter__(self) - Semantically similar to :meth:`__enter__`, the only + Semantically similar to :meth:`~object.__enter__`, the only difference being that it must return an *awaitable*. .. method:: object.__aexit__(self, exc_type, exc_value, traceback) - Semantically similar to :meth:`__exit__`, the only + Semantically similar to :meth:`~object.__exit__`, the only difference being that it must return an *awaitable*. An example of an asynchronous context manager class:: @@ -3101,8 +3289,9 @@ An example of an asynchronous context manager class:: lead to some very strange behaviour if it is handled incorrectly. .. [#] The :meth:`~object.__hash__`, :meth:`~object.__iter__`, - :meth:`~object.__reversed__`, and :meth:`~object.__contains__` methods have - special handling for this; others + :meth:`~object.__reversed__`, :meth:`~object.__contains__`, + :meth:`~object.__class_getitem__` and :meth:`~os.PathLike.__fspath__` + methods have special handling for this. Others will still raise a :exc:`TypeError`, but may do so by relying on the behavior that ``None`` is not callable. diff --git a/Doc/reference/executionmodel.rst b/Doc/reference/executionmodel.rst index 3f01180e13f7766..cea3a56ba516447 100644 --- a/Doc/reference/executionmodel.rst +++ b/Doc/reference/executionmodel.rst @@ -71,6 +71,8 @@ The following constructs bind names: + in a capture pattern in structural pattern matching * :keyword:`import` statements. +* :keyword:`type` statements. +* :ref:`type parameter lists `. The :keyword:`!import` statement of the form ``from ... import *`` binds all names defined in the imported module, except those beginning with an underscore. @@ -128,6 +130,8 @@ lead to errors when a name is used within a block before it is bound. This rule is subtle. Python lacks declarations and allows name binding operations to occur anywhere within a code block. The local variables of a code block can be determined by scanning the entire text of the block for name binding operations. +See :ref:`the FAQ entry on UnboundLocalError ` +for examples. If the :keyword:`global` statement occurs within a block, all uses of the names specified in the statement refer to the bindings of those names in the top-level @@ -147,9 +151,10 @@ a global statement, the free variable is treated as a global. The :keyword:`nonlocal` statement causes corresponding names to refer to previously bound variables in the nearest enclosing function scope. :exc:`SyntaxError` is raised at compile time if the given name does not -exist in any enclosing function scope. +exist in any enclosing function scope. :ref:`Type parameters ` +cannot be rebound with the :keyword:`!nonlocal` statement. -.. index:: module: __main__ +.. index:: pair: module; __main__ The namespace for a module is automatically created the first time a module is imported. The main module for a script is always called :mod:`__main__`. @@ -161,14 +166,119 @@ These references follow the normal rules for name resolution with an exception that unbound local variables are looked up in the global namespace. The namespace of the class definition becomes the attribute dictionary of the class. The scope of names defined in a class block is limited to the -class block; it does not extend to the code blocks of methods -- this includes -comprehensions and generator expressions since they are implemented using a -function scope. This means that the following will fail:: +class block; it does not extend to the code blocks of methods. This includes +comprehensions and generator expressions, but it does not include +:ref:`annotation scopes `, +which have access to their enclosing class scopes. +This means that the following will fail:: class A: a = 42 b = list(a + i for i in range(10)) +However, the following will succeed:: + + class A: + type Alias = Nested + class Nested: pass + + print(A.Alias.__value__) # + +.. _annotation-scopes: + +Annotation scopes +----------------- + +:ref:`Type parameter lists ` and :keyword:`type` statements +introduce *annotation scopes*, which behave mostly like function scopes, +but with some exceptions discussed below. :term:`Annotations ` +currently do not use annotation scopes, but they are expected to use +annotation scopes in Python 3.13 when :pep:`649` is implemented. + +Annotation scopes are used in the following contexts: + +* Type parameter lists for :ref:`generic type aliases `. +* Type parameter lists for :ref:`generic functions `. + A generic function's annotations are + executed within the annotation scope, but its defaults and decorators are not. +* Type parameter lists for :ref:`generic classes `. + A generic class's base classes and + keyword arguments are executed within the annotation scope, but its decorators are not. +* The bounds and constraints for type variables + (:ref:`lazily evaluated `). +* The value of type aliases (:ref:`lazily evaluated `). + +Annotation scopes differ from function scopes in the following ways: + +* Annotation scopes have access to their enclosing class namespace. + If an annotation scope is immediately within a class scope, or within another + annotation scope that is immediately within a class scope, the code in the + annotation scope can use names defined in the class scope as if it were + executed directly within the class body. This contrasts with regular + functions defined within classes, which cannot access names defined in the class scope. +* Expressions in annotation scopes cannot contain :keyword:`yield`, ``yield from``, + :keyword:`await`, or :token:`:= ` + expressions. (These expressions are allowed in other scopes contained within the + annotation scope.) +* Names defined in annotation scopes cannot be rebound with :keyword:`nonlocal` + statements in inner scopes. This includes only type parameters, as no other + syntactic elements that can appear within annotation scopes can introduce new names. +* While annotation scopes have an internal name, that name is not reflected in the + :term:`__qualname__ ` of objects defined within the scope. + Instead, the :attr:`!__qualname__` + of such objects is as if the object were defined in the enclosing scope. + +.. versionadded:: 3.12 + Annotation scopes were introduced in Python 3.12 as part of :pep:`695`. + +.. _lazy-evaluation: + +Lazy evaluation +--------------- + +The values of type aliases created through the :keyword:`type` statement are +*lazily evaluated*. The same applies to the bounds and constraints of type +variables created through the :ref:`type parameter syntax `. +This means that they are not evaluated when the type alias or type variable is +created. Instead, they are only evaluated when doing so is necessary to resolve +an attribute access. + +Example: + +.. doctest:: + + >>> type Alias = 1/0 + >>> Alias.__value__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + >>> def func[T: 1/0](): pass + >>> T = func.__type_params__[0] + >>> T.__bound__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + +Here the exception is raised only when the ``__value__`` attribute +of the type alias or the ``__bound__`` attribute of the type variable +is accessed. + +This behavior is primarily useful for references to types that have not +yet been defined when the type alias or type variable is created. For example, +lazy evaluation enables creation of mutually recursive type aliases:: + + from typing import Literal + + type SimpleExpr = int | Parenthesized + type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] + type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] + +Lazily evaluated values are evaluated in :ref:`annotation scope `, +which means that names that appear inside the lazily evaluated value are looked up +as if they were used in the immediately enclosing scope. + +.. versionadded:: 3.12 + .. _restrict_exec: Builtins and restricted execution diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst index 920e4d19b82d8d6..3f6d5bfafee9d14 100644 --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -71,7 +71,7 @@ An identifier occurring as an atom is a name. See section :ref:`identifiers` for lexical definition and section :ref:`naming` for documentation of naming and binding. -.. index:: exception: NameError +.. index:: pair: exception; NameError When the name is bound to an object, evaluation of the atom yields that object. When a name is not bound, an attempt to evaluate it raises a :exc:`NameError` @@ -245,7 +245,7 @@ List displays pair: list; display pair: list; comprehensions pair: empty; list - object: list + pair: object; list single: [] (square brackets); list expression single: , (comma); expression list @@ -270,7 +270,7 @@ Set displays .. index:: pair: set; display pair: set; comprehensions - object: set + pair: object; set single: {} (curly brackets); set expression single: , (comma); expression list @@ -298,27 +298,27 @@ Dictionary displays .. index:: pair: dictionary; display pair: dictionary; comprehensions - key, datum, key/datum pair - object: dictionary + key, value, key/value pair + pair: object; dictionary single: {} (curly brackets); dictionary expression single: : (colon); in dictionary expressions single: , (comma); in dictionary displays -A dictionary display is a possibly empty series of key/datum pairs enclosed in -curly braces: +A dictionary display is a possibly empty series of dict items (key/value pairs) +enclosed in curly braces: .. productionlist:: python-grammar - dict_display: "{" [`key_datum_list` | `dict_comprehension`] "}" - key_datum_list: `key_datum` ("," `key_datum`)* [","] - key_datum: `expression` ":" `expression` | "**" `or_expr` + dict_display: "{" [`dict_item_list` | `dict_comprehension`] "}" + dict_item_list: `dict_item` ("," `dict_item`)* [","] + dict_item: `expression` ":" `expression` | "**" `or_expr` dict_comprehension: `expression` ":" `expression` `comp_for` A dictionary display yields a new dictionary object. -If a comma-separated sequence of key/datum pairs is given, they are evaluated +If a comma-separated sequence of dict items is given, they are evaluated from left to right to define the entries of the dictionary: each key object is -used as a key into the dictionary to store the corresponding datum. This means -that you can specify the same key multiple times in the key/datum list, and the +used as a key into the dictionary to store the corresponding value. This means +that you can specify the same key multiple times in the dict item list, and the final dictionary's value for that key will be the last one given. .. index:: @@ -328,7 +328,7 @@ final dictionary's value for that key will be the last one given. A double asterisk ``**`` denotes :dfn:`dictionary unpacking`. Its operand must be a :term:`mapping`. Each mapping item is added to the new dictionary. Later values replace values already set by -earlier key/datum pairs and earlier dictionary unpackings. +earlier dict items and earlier dictionary unpackings. .. versionadded:: 3.5 Unpacking into dictionary displays, originally proposed by :pep:`448`. @@ -344,7 +344,7 @@ in the new dictionary in the order they are produced. Restrictions on the types of the key values are listed earlier in section :ref:`types`. (To summarize, the key type should be :term:`hashable`, which excludes all mutable objects.) Clashes between duplicate keys are not detected; the last -datum (textually rightmost in the display) stored for a given key value +value (textually rightmost in the display) stored for a given key value prevails. .. versionchanged:: 3.8 @@ -361,7 +361,7 @@ Generator expressions .. index:: pair: generator; expression - object: generator + pair: object; generator single: () (parentheses); generator expression A generator expression is a compact generator notation in parentheses: @@ -415,8 +415,8 @@ Yield expressions ----------------- .. index:: - keyword: yield - keyword: from + pair: keyword; yield + pair: keyword; from pair: yield; expression pair: generator; function @@ -454,7 +454,9 @@ generator. That generator then controls the execution of the generator function. The execution starts when one of the generator's methods is called. At that time, the execution proceeds to the first yield expression, where it is suspended again, returning the value of :token:`~python-grammar:expression_list` -to the generator's caller. By suspended, we mean that all local state is +to the generator's caller, +or ``None`` if :token:`~python-grammar:expression_list` is omitted. +By suspended, we mean that all local state is retained, including the current bindings of local variables, the instruction pointer, the internal evaluation stack, and the state of any exception handling. When the execution is resumed by calling one of the generator's methods, the @@ -497,8 +499,8 @@ the yield expression. It can be either set explicitly when raising :exc:`StopIteration`, or automatically when the subiterator is a generator (by returning a value from the subgenerator). - .. versionchanged:: 3.3 - Added ``yield from `` to delegate control flow to a subiterator. +.. versionchanged:: 3.3 + Added ``yield from `` to delegate control flow to a subiterator. The parentheses may be omitted when the yield expression is the sole expression on the right hand side of an assignment statement. @@ -520,7 +522,7 @@ on the right hand side of an assignment statement. The proposal that expanded on :pep:`492` by adding generator capabilities to coroutine functions. -.. index:: object: generator +.. index:: pair: object; generator .. _generator-methods: Generator-iterator methods @@ -532,7 +534,7 @@ be used to control the execution of a generator function. Note that calling any of the generator methods below when the generator is already executing raises a :exc:`ValueError` exception. -.. index:: exception: StopIteration +.. index:: pair: exception; StopIteration .. method:: generator.__next__() @@ -587,18 +589,25 @@ is already executing raises a :exc:`ValueError` exception. The second signature \(type\[, value\[, traceback\]\]\) is deprecated and may be removed in a future version of Python. -.. index:: exception: GeneratorExit +.. index:: pair: exception; GeneratorExit .. method:: generator.close() Raises a :exc:`GeneratorExit` at the point where the generator function was - paused. If the generator function then exits gracefully, is already closed, - or raises :exc:`GeneratorExit` (by not catching the exception), close - returns to its caller. If the generator yields a value, a - :exc:`RuntimeError` is raised. If the generator raises any other exception, - it is propagated to the caller. :meth:`close` does nothing if the generator - has already exited due to an exception or normal exit. + paused. If the generator function catches the exception and returns a + value, this value is returned from :meth:`close`. If the generator function + is already closed, or raises :exc:`GeneratorExit` (by not catching the + exception), :meth:`close` returns :const:`None`. If the generator yields a + value, a :exc:`RuntimeError` is raised. If the generator raises any other + exception, it is propagated to the caller. If the generator has already + exited due to an exception or normal exit, :meth:`close` returns + :const:`None` and has no other effect. + + .. versionchanged:: 3.13 + + If a generator returns a value upon being closed, the value is returned + by :meth:`close`. .. index:: single: yield; examples @@ -699,7 +708,7 @@ of a *finalizer* method see the implementation of The expression ``yield from `` is a syntax error when used in an asynchronous generator function. -.. index:: object: asynchronous-generator +.. index:: pair: object; asynchronous-generator .. _asynchronous-generator-methods: Asynchronous generator-iterator methods @@ -709,7 +718,7 @@ This subsection describes the methods of an asynchronous generator iterator, which are used to control the execution of a generator function. -.. index:: exception: StopAsyncIteration +.. index:: pair: exception; StopAsyncIteration .. coroutinemethod:: agen.__anext__() @@ -761,7 +770,7 @@ which are used to control the execution of a generator function. The second signature \(type\[, value\[, traceback\]\]\) is deprecated and may be removed in a future version of Python. -.. index:: exception: GeneratorExit +.. index:: pair: exception; GeneratorExit .. coroutinemethod:: agen.aclose() @@ -808,18 +817,24 @@ An attribute reference is a primary followed by a period and a name: attributeref: `primary` "." `identifier` .. index:: - exception: AttributeError - object: module - object: list + pair: exception; AttributeError + pair: object; module + pair: object; list The primary must evaluate to an object of a type that supports attribute references, which most objects do. This object is then asked to produce the -attribute whose name is the identifier. This production can be customized by -overriding the :meth:`__getattr__` method. If this attribute is not available, -the exception :exc:`AttributeError` is raised. Otherwise, the type and value of -the object produced is determined by the object. Multiple evaluations of the -same attribute reference may yield different objects. +attribute whose name is the identifier. The type and value produced is +determined by the object. Multiple evaluations of the same attribute +reference may yield different objects. + +This production can be customized by overriding the +:meth:`~object.__getattribute__` method or the :meth:`~object.__getattr__` +method. The :meth:`!__getattribute__` method is called first and either +returns a value or raises :exc:`AttributeError` if the attribute is not +available. +If an :exc:`AttributeError` is raised and the object has a :meth:`!__getattr__` +method, that method is called as a fallback. .. _subscriptions: @@ -831,12 +846,12 @@ Subscriptions single: [] (square brackets); subscription .. index:: - object: sequence - object: mapping - object: string - object: tuple - object: list - object: dictionary + pair: object; sequence + pair: object; mapping + pair: object; string + pair: object; tuple + pair: object; list + pair: object; dictionary pair: sequence; item The subscription of an instance of a :ref:`container class ` @@ -880,7 +895,7 @@ to the index so that, for example, ``x[-1]`` selects the last item of ``x``. The resulting value must be a nonnegative integer less than the number of items in the sequence, and the subscription selects the item whose index is that value (counting from zero). Since the support for negative indices and slicing -occurs in the object's :meth:`__getitem__` method, subclasses overriding +occurs in the object's :meth:`~object.__getitem__` method, subclasses overriding this method will need to explicitly add that support. .. index:: @@ -904,10 +919,10 @@ Slicings single: , (comma); slicing .. index:: - object: sequence - object: string - object: tuple - object: list + pair: object; sequence + pair: object; string + pair: object; tuple + pair: object; list A slicing selects a range of items in a sequence object (e.g., a string, tuple or list). Slicings may be used as expressions or as targets in assignment or @@ -935,7 +950,7 @@ slice list contains no proper slice). single: step (slice object attribute) The semantics for a slicing are as follows. The primary is indexed (using the -same :meth:`__getitem__` method as +same :meth:`~object.__getitem__` method as normal subscription) with a key that is constructed from the slice list, as follows. If the slice list contains at least one comma, the key is a tuple containing the conversion of the slice items; otherwise, the conversion of the @@ -948,7 +963,7 @@ substituting ``None`` for missing expressions. .. index:: - object: callable + pair: object; callable single: call single: argument; call semantics single: () (parentheses); call @@ -1098,8 +1113,8 @@ a user-defined function: .. index:: pair: function; call triple: user-defined; function; call - object: user-defined function - object: function + pair: object; user-defined function + pair: object; function The code block for the function is executed, passing it the argument list. The first thing the code block will do is bind the formal parameters to the @@ -1113,25 +1128,25 @@ a built-in function or method: pair: built-in function; call pair: method; call pair: built-in method; call - object: built-in method - object: built-in function - object: method - object: function + pair: object; built-in method + pair: object; built-in function + pair: object; method + pair: object; function The result is up to the interpreter; see :ref:`built-in-funcs` for the descriptions of built-in functions and methods. a class object: .. index:: - object: class + pair: object; class pair: class object; call A new instance of that class is returned. a class instance method: .. index:: - object: class instance - object: instance + pair: object; class instance + pair: object; instance pair: class instance; call The corresponding user-defined function is called, with an argument list that is @@ -1147,7 +1162,7 @@ a class instance: if that method was called. -.. index:: keyword: await +.. index:: pair: keyword; await .. _await: Await expression @@ -1169,7 +1184,7 @@ The power operator .. index:: pair: power; operation - operator: ** + pair: operator; ** The power operator binds more tightly than unary operators on its left; it binds less tightly than unary operators on its right. The syntax is: @@ -1230,7 +1245,7 @@ operation can be overridden with the :meth:`__pos__` special method. .. index:: single: inversion - operator: ~ (tilde) + pair: operator; ~ (tilde) The unary ``~`` (invert) operator yields the bitwise inversion of its integer argument. The bitwise inversion of ``x`` is defined as ``-(x+1)``. It only @@ -1239,7 +1254,7 @@ applies to integral numbers or to custom objects that override the -.. index:: exception: TypeError +.. index:: pair: exception; TypeError In all three cases, if the argument does not have the proper type, a :exc:`TypeError` exception is raised. @@ -1265,7 +1280,7 @@ operators and one for additive operators: .. index:: single: multiplication - operator: * (asterisk) + pair: operator; * (asterisk) The ``*`` (multiplication) operator yields the product of its arguments. The arguments must either both be numbers, or one argument must be an integer and @@ -1278,7 +1293,7 @@ This operation can be customized using the special :meth:`__mul__` and .. index:: single: matrix multiplication - operator: @ (at) + pair: operator; @ (at) The ``@`` (at) operator is intended to be used for matrix multiplication. No builtin Python types implement this operator. @@ -1286,10 +1301,10 @@ builtin Python types implement this operator. .. versionadded:: 3.5 .. index:: - exception: ZeroDivisionError + pair: exception; ZeroDivisionError single: division - operator: / (slash) - operator: // + pair: operator; / (slash) + pair: operator; // The ``/`` (division) and ``//`` (floor division) operators yield the quotient of their arguments. The numeric arguments are first converted to a common type. @@ -1303,7 +1318,7 @@ This operation can be customized using the special :meth:`__truediv__` and .. index:: single: modulo - operator: % (percent) + pair: operator; % (percent) The ``%`` (modulo) operator yields the remainder from the division of the first argument by the second. The numeric arguments are first converted to a common @@ -1361,8 +1376,8 @@ Shifting operations .. index:: pair: shifting; operation - operator: << - operator: >> + pair: operator; << + pair: operator; >> The shifting operations have lower priority than the arithmetic operations: @@ -1375,7 +1390,7 @@ the left or right by the number of bits given by the second argument. This operation can be customized using the special :meth:`__lshift__` and :meth:`__rshift__` methods. -.. index:: exception: ValueError +.. index:: pair: exception; ValueError A right shift by *n* bits is defined as floor division by ``pow(2,n)``. A left shift by *n* bits is defined as multiplication with ``pow(2,n)``. @@ -1397,7 +1412,7 @@ Each of the three bitwise operations has a different priority level: .. index:: pair: bitwise; and - operator: & (ampersand) + pair: operator; & (ampersand) The ``&`` operator yields the bitwise AND of its arguments, which must be integers or one of them must be a custom object overriding :meth:`__and__` or @@ -1406,7 +1421,7 @@ integers or one of them must be a custom object overriding :meth:`__and__` or .. index:: pair: bitwise; xor pair: exclusive; or - operator: ^ (caret) + pair: operator; ^ (caret) The ``^`` operator yields the bitwise XOR (exclusive OR) of its arguments, which must be integers or one of them must be a custom object overriding :meth:`__xor__` or @@ -1415,7 +1430,7 @@ must be integers or one of them must be a custom object overriding :meth:`__xor_ .. index:: pair: bitwise; or pair: inclusive; or - operator: | (vertical bar) + pair: operator; | (vertical bar) The ``|`` operator yields the bitwise (inclusive) OR of its arguments, which must be integers or one of them must be a custom object overriding :meth:`__or__` or @@ -1430,12 +1445,12 @@ Comparisons .. index:: single: comparison pair: C; language - operator: < (less) - operator: > (greater) - operator: <= - operator: >= - operator: == - operator: != + pair: operator; < (less) + pair: operator; > (greater) + pair: operator; <= + pair: operator; >= + pair: operator; == + pair: operator; != Unlike C, all comparison operations in Python have the same priority, which is lower than that of any arithmetic, shifting or bitwise operation. Also unlike @@ -1661,23 +1676,23 @@ If an exception is raised during the iteration, it is as if :keyword:`in` raised that exception. Lastly, the old-style iteration protocol is tried: if a class defines -:meth:`__getitem__`, ``x in y`` is ``True`` if and only if there is a non-negative +:meth:`~object.__getitem__`, ``x in y`` is ``True`` if and only if there is a non-negative integer index *i* such that ``x is y[i] or x == y[i]``, and no lower integer index raises the :exc:`IndexError` exception. (If any other exception is raised, it is as if :keyword:`in` raised that exception). .. index:: - operator: in - operator: not in + pair: operator; in + pair: operator; not in pair: membership; test - object: sequence + pair: object; sequence The operator :keyword:`not in` is defined to have the inverse truth value of :keyword:`in`. .. index:: - operator: is - operator: is not + pair: operator; is + pair: operator; is not pair: identity; test @@ -1715,19 +1730,19 @@ control flow statements, the following values are interpreted as false: ``False``, ``None``, numeric zero of all types, and empty strings and containers (including strings, tuples, lists, dictionaries, sets and frozensets). All other values are interpreted as true. User-defined objects can customize their -truth value by providing a :meth:`__bool__` method. +truth value by providing a :meth:`~object.__bool__` method. -.. index:: operator: not +.. index:: pair: operator; not The operator :keyword:`not` yields ``True`` if its argument is false, ``False`` otherwise. -.. index:: operator: and +.. index:: pair: operator; and The expression ``x and y`` first evaluates *x*; if *x* is false, its value is returned; otherwise, *y* is evaluated and the resulting value is returned. -.. index:: operator: or +.. index:: pair: operator; or The expression ``x or y`` first evaluates *x*; if *x* is true, its value is returned; otherwise, *y* is evaluated and the resulting value is returned. @@ -1772,10 +1787,11 @@ Or, when processing a file stream in chunks: while chunk := file.read(9000): process(chunk) -Assignment expressions must be surrounded by parentheses when used -as sub-expressions in slicing, conditional, lambda, -keyword-argument, and comprehension-if expressions -and in ``assert`` and ``with`` statements. +Assignment expressions must be surrounded by parentheses when +used as expression statements and when used as sub-expressions in +slicing, conditional, lambda, +keyword-argument, and comprehension-if expressions and +in ``assert``, ``with``, and ``assignment`` statements. In all other places where they can be used, parentheses are not required, including in ``if`` and ``while`` statements. @@ -1852,7 +1868,7 @@ Expression lists starred_expression: `expression` | (`starred_item` ",")* [`starred_item`] starred_item: `assignment_expression` | "*" `or_expr` -.. index:: object: tuple +.. index:: pair: object; tuple Except when part of a list or set display, an expression list containing at least one comma yields a tuple. The length of diff --git a/Doc/reference/grammar.rst b/Doc/reference/grammar.rst index 59b45005836a763..bc1db7b039cd5a9 100644 --- a/Doc/reference/grammar.rst +++ b/Doc/reference/grammar.rst @@ -12,7 +12,7 @@ and `PEG `_. In particular, ``&`` followed by a symbol, token or parenthesized group indicates a positive lookahead (i.e., is required to match but not consumed), while ``!`` indicates a negative lookahead (i.e., is -required _not_ to match). We use the ``|`` separator to mean PEG's +required *not* to match). We use the ``|`` separator to mean PEG's "ordered choice" (written as ``/`` in traditional PEG grammars). See :pep:`617` for more details on the grammar's syntax. diff --git a/Doc/reference/import.rst b/Doc/reference/import.rst index b22b5251f1de469..a7beeea29b45567 100644 --- a/Doc/reference/import.rst +++ b/Doc/reference/import.rst @@ -324,15 +324,18 @@ modules, and one that knows how to import modules from an :term:`import path` .. versionchanged:: 3.4 The :meth:`~importlib.abc.MetaPathFinder.find_spec` method of meta path - finders replaced :meth:`~importlib.abc.MetaPathFinder.find_module`, which + finders replaced :meth:`!find_module`, which is now deprecated. While it will continue to work without change, the import machinery will try it only if the finder does not implement ``find_spec()``. .. versionchanged:: 3.10 - Use of :meth:`~importlib.abc.MetaPathFinder.find_module` by the import system + Use of :meth:`!find_module` by the import system now raises :exc:`ImportWarning`. +.. versionchanged:: 3.12 + ``find_module()`` has been removed. Use :meth:`find_spec` instead. + Loading ======= @@ -372,32 +375,32 @@ of what happens during the loading portion of import:: Note the following details: - * If there is an existing module object with the given name in - :data:`sys.modules`, import will have already returned it. +* If there is an existing module object with the given name in + :data:`sys.modules`, import will have already returned it. - * The module will exist in :data:`sys.modules` before the loader - executes the module code. This is crucial because the module code may - (directly or indirectly) import itself; adding it to :data:`sys.modules` - beforehand prevents unbounded recursion in the worst case and multiple - loading in the best. +* The module will exist in :data:`sys.modules` before the loader + executes the module code. This is crucial because the module code may + (directly or indirectly) import itself; adding it to :data:`sys.modules` + beforehand prevents unbounded recursion in the worst case and multiple + loading in the best. - * If loading fails, the failing module -- and only the failing module -- - gets removed from :data:`sys.modules`. Any module already in the - :data:`sys.modules` cache, and any module that was successfully loaded - as a side-effect, must remain in the cache. This contrasts with - reloading where even the failing module is left in :data:`sys.modules`. +* If loading fails, the failing module -- and only the failing module -- + gets removed from :data:`sys.modules`. Any module already in the + :data:`sys.modules` cache, and any module that was successfully loaded + as a side-effect, must remain in the cache. This contrasts with + reloading where even the failing module is left in :data:`sys.modules`. - * After the module is created but before execution, the import machinery - sets the import-related module attributes ("_init_module_attrs" in - the pseudo-code example above), as summarized in a - :ref:`later section `. +* After the module is created but before execution, the import machinery + sets the import-related module attributes ("_init_module_attrs" in + the pseudo-code example above), as summarized in a + :ref:`later section `. - * Module execution is the key moment of loading in which the module's - namespace gets populated. Execution is entirely delegated to the - loader, which gets to decide what gets populated and how. +* Module execution is the key moment of loading in which the module's + namespace gets populated. Execution is entirely delegated to the + loader, which gets to decide what gets populated and how. - * The module created during loading and passed to exec_module() may - not be the one returned at the end of import [#fnlo]_. +* The module created during loading and passed to exec_module() may + not be the one returned at the end of import [#fnlo]_. .. versionchanged:: 3.4 The import system has taken over the boilerplate responsibilities of @@ -414,13 +417,13 @@ returned from :meth:`~importlib.abc.Loader.exec_module` is ignored. Loaders must satisfy the following requirements: - * If the module is a Python module (as opposed to a built-in module or a - dynamically loaded extension), the loader should execute the module's code - in the module's global name space (``module.__dict__``). +* If the module is a Python module (as opposed to a built-in module or a + dynamically loaded extension), the loader should execute the module's code + in the module's global name space (``module.__dict__``). - * If the loader cannot execute the module, it should raise an - :exc:`ImportError`, although any other exception raised during - :meth:`~importlib.abc.Loader.exec_module` will be propagated. +* If the loader cannot execute the module, it should raise an + :exc:`ImportError`, although any other exception raised during + :meth:`~importlib.abc.Loader.exec_module` will be propagated. In many cases, the finder and loader can be the same object; in such cases the :meth:`~importlib.abc.MetaPathFinder.find_spec` method would just return a @@ -450,20 +453,20 @@ import machinery will create the new module itself. functionality described above in addition to executing the module. All the same constraints apply, with some additional clarification: - * If there is an existing module object with the given name in - :data:`sys.modules`, the loader must use that existing module. - (Otherwise, :func:`importlib.reload` will not work correctly.) If the - named module does not exist in :data:`sys.modules`, the loader - must create a new module object and add it to :data:`sys.modules`. + * If there is an existing module object with the given name in + :data:`sys.modules`, the loader must use that existing module. + (Otherwise, :func:`importlib.reload` will not work correctly.) If the + named module does not exist in :data:`sys.modules`, the loader + must create a new module object and add it to :data:`sys.modules`. - * The module *must* exist in :data:`sys.modules` before the loader - executes the module code, to prevent unbounded recursion or multiple - loading. + * The module *must* exist in :data:`sys.modules` before the loader + executes the module code, to prevent unbounded recursion or multiple + loading. - * If loading fails, the loader must remove any modules it has inserted - into :data:`sys.modules`, but it must remove **only** the failing - module(s), and only if the loader itself has loaded the module(s) - explicitly. + * If loading fails, the loader must remove any modules it has inserted + into :data:`sys.modules`, but it must remove **only** the failing + module(s), and only if the loader itself has loaded the module(s) + explicitly. .. versionchanged:: 3.5 A :exc:`DeprecationWarning` is raised when ``exec_module()`` is defined but @@ -556,7 +559,7 @@ listed below. functionality, for example getting data associated with a loader. It is **strongly** recommended that you rely on :attr:`__spec__` - instead instead of this attribute. + instead of this attribute. .. versionchanged:: 3.12 The value of ``__loader__`` is expected to be the same as @@ -577,7 +580,7 @@ listed below. relative imports for main modules, as defined in :pep:`366`. It is **strongly** recommended that you rely on :attr:`__spec__` - instead instead of this attribute. + instead of this attribute. .. versionchanged:: 3.6 The value of ``__package__`` is expected to be the same as @@ -647,7 +650,7 @@ listed below. from a file, that atypical scenario may be appropriate. It is **strongly** recommended that you rely on :attr:`__spec__` - instead instead of ``__cached__``. + instead of ``__cached__``. .. _package-path-rules: @@ -690,20 +693,20 @@ with defaults for whatever information is missing. Here are the exact rules used: - * If the module has a ``__spec__`` attribute, the information in the spec - is used to generate the repr. The "name", "loader", "origin", and - "has_location" attributes are consulted. +* If the module has a ``__spec__`` attribute, the information in the spec + is used to generate the repr. The "name", "loader", "origin", and + "has_location" attributes are consulted. - * If the module has a ``__file__`` attribute, this is used as part of the - module's repr. +* If the module has a ``__file__`` attribute, this is used as part of the + module's repr. - * If the module has no ``__file__`` but does have a ``__loader__`` that is not - ``None``, then the loader's repr is used as part of the module's repr. +* If the module has no ``__file__`` but does have a ``__loader__`` that is not + ``None``, then the loader's repr is used as part of the module's repr. - * Otherwise, just use the module's ``__name__`` in the repr. +* Otherwise, just use the module's ``__name__`` in the repr. .. versionchanged:: 3.12 - Use of :meth:`module_repr`, having been deprecated since Python 3.4, was + Use of :meth:`!module_repr`, having been deprecated since Python 3.4, was removed in Python 3.12 and is no longer called during the resolution of a module's repr. @@ -837,7 +840,7 @@ stores finder objects rather than being limited to :term:`importer` objects). In this way, the expensive search for a particular :term:`path entry` location's :term:`path entry finder` need only be done once. User code is free to remove cache entries from :data:`sys.path_importer_cache` forcing -the path based finder to perform the path entry search again [#fnpic]_. +the path based finder to perform the path entry search again. If the path entry is not present in the cache, the path based finder iterates over every callable in :data:`sys.path_hooks`. Each of the :term:`path entry @@ -887,13 +890,13 @@ module. ``find_spec()`` returns a fully populated spec for the module. This spec will always have "loader" set (with one exception). To indicate to the import machinery that the spec represents a namespace -:term:`portion`, the path entry finder sets "submodule_search_locations" to +:term:`portion`, the path entry finder sets ``submodule_search_locations`` to a list containing the portion. .. versionchanged:: 3.4 :meth:`~importlib.abc.PathEntryFinder.find_spec` replaced - :meth:`~importlib.abc.PathEntryFinder.find_loader` and - :meth:`~importlib.abc.PathEntryFinder.find_module`, both of which + :meth:`!find_loader` and + :meth:`!find_module`, both of which are now deprecated, but will be used if ``find_spec()`` is not defined. Older path entry finders may implement one of these two deprecated methods @@ -901,7 +904,7 @@ a list containing the portion. sake of backward compatibility. However, if ``find_spec()`` is implemented on the path entry finder, the legacy methods are ignored. - :meth:`~importlib.abc.PathEntryFinder.find_loader` takes one argument, the + :meth:`!find_loader` takes one argument, the fully qualified name of the module being imported. ``find_loader()`` returns a 2-tuple where the first item is the loader and the second item is a namespace :term:`portion`. @@ -920,10 +923,13 @@ a list containing the portion. ``find_loader()`` in preference to ``find_module()``. .. versionchanged:: 3.10 - Calls to :meth:`~importlib.abc.PathEntryFinder.find_module` and - :meth:`~importlib.abc.PathEntryFinder.find_loader` by the import + Calls to :meth:`!find_module` and + :meth:`!find_loader` by the import system will raise :exc:`ImportWarning`. +.. versionchanged:: 3.12 + ``find_module()`` and ``find_loader()`` have been removed. + Replacing the standard import system ==================================== @@ -1045,8 +1051,8 @@ The original specification for :data:`sys.meta_path` was :pep:`302`, with subsequent extension in :pep:`420`. :pep:`420` introduced :term:`namespace packages ` for -Python 3.3. :pep:`420` also introduced the :meth:`find_loader` protocol as an -alternative to :meth:`find_module`. +Python 3.3. :pep:`420` also introduced the :meth:`!find_loader` protocol as an +alternative to :meth:`!find_module`. :pep:`366` describes the addition of the ``__package__`` attribute for explicit relative imports in main modules. @@ -1073,8 +1079,3 @@ methods to finders and loaders. module may replace itself in :data:`sys.modules`. This is implementation-specific behavior that is not guaranteed to work in other Python implementations. - -.. [#fnpic] In legacy code, it is possible to find instances of - :class:`imp.NullImporter` in the :data:`sys.path_importer_cache`. It - is recommended that code be changed to use ``None`` instead. See - :ref:`portingpythoncode` for more details. diff --git a/Doc/reference/introduction.rst b/Doc/reference/introduction.rst index 914a11556c94e69..cf186705e6e987b 100644 --- a/Doc/reference/introduction.rst +++ b/Doc/reference/introduction.rst @@ -74,7 +74,7 @@ PyPy and a Just in Time compiler. One of the goals of the project is to encourage experimentation with the language itself by making it easier to modify the interpreter (since it is written in Python). Additional information is - available on `the PyPy project's home page `_. + available on `the PyPy project's home page `_. Each of these implementations varies in some way from the language as documented in this manual, or introduces specific information beyond what's covered in the @@ -90,7 +90,8 @@ Notation .. index:: BNF, grammar, syntax, notation -The descriptions of lexical analysis and syntax use a modified BNF grammar +The descriptions of lexical analysis and syntax use a modified +`Backus–Naur form (BNF) `_ grammar notation. This uses the following style of definition: .. productionlist:: notation diff --git a/Doc/reference/lexical_analysis.rst b/Doc/reference/lexical_analysis.rst index 4ab6e90a6234495..3e07d16068a6279 100644 --- a/Doc/reference/lexical_analysis.rst +++ b/Doc/reference/lexical_analysis.rst @@ -315,7 +315,7 @@ The Unicode category codes mentioned above stand for: * *Nd* - decimal numbers * *Pc* - connector punctuations * *Other_ID_Start* - explicit list of characters in `PropList.txt - `_ to support backwards + `_ to support backwards compatibility * *Other_ID_Continue* - likewise @@ -323,8 +323,8 @@ All identifiers are converted into the normal form NFKC while parsing; compariso of identifiers is based on NFKC. A non-normative HTML file listing all valid identifier characters for Unicode -15.0.0 can be found at -https://www.unicode.org/Public/15.0.0/ucd/DerivedCoreProperties.txt +15.1.0 can be found at +https://www.unicode.org/Public/15.1.0/ucd/DerivedCoreProperties.txt .. _keywords: @@ -361,15 +361,19 @@ Soft Keywords .. versionadded:: 3.10 Some identifiers are only reserved under specific contexts. These are known as -*soft keywords*. The identifiers ``match``, ``case`` and ``_`` can -syntactically act as keywords in contexts related to the pattern matching -statement, but this distinction is done at the parser level, not when -tokenizing. +*soft keywords*. The identifiers ``match``, ``case``, ``type`` and ``_`` can +syntactically act as keywords in certain contexts, +but this distinction is done at the parser level, not when tokenizing. -As soft keywords, their use with pattern matching is possible while still -preserving compatibility with existing code that uses ``match``, ``case`` and ``_`` as +As soft keywords, their use in the grammar is possible while still +preserving compatibility with existing code that uses these names as identifier names. +``match``, ``case``, and ``_`` are used in the :keyword:`match` statement. +``type`` is used in the :keyword:`type` statement. + +.. versionchanged:: 3.12 + ``type`` is now a soft keyword. .. index:: single: _, identifiers @@ -545,55 +549,59 @@ retained), except that three unescaped quotes in a row terminate the literal. ( .. _escape-sequences: + +Escape sequences +^^^^^^^^^^^^^^^^ + Unless an ``'r'`` or ``'R'`` prefix is present, escape sequences in string and bytes literals are interpreted according to rules similar to those used by Standard C. The recognized escape sequences are: -+-----------------+---------------------------------+-------+ -| Escape Sequence | Meaning | Notes | -+=================+=================================+=======+ -| ``\``\ | Backslash and newline ignored | \(1) | -+-----------------+---------------------------------+-------+ -| ``\\`` | Backslash (``\``) | | -+-----------------+---------------------------------+-------+ -| ``\'`` | Single quote (``'``) | | -+-----------------+---------------------------------+-------+ -| ``\"`` | Double quote (``"``) | | -+-----------------+---------------------------------+-------+ -| ``\a`` | ASCII Bell (BEL) | | -+-----------------+---------------------------------+-------+ -| ``\b`` | ASCII Backspace (BS) | | -+-----------------+---------------------------------+-------+ -| ``\f`` | ASCII Formfeed (FF) | | -+-----------------+---------------------------------+-------+ -| ``\n`` | ASCII Linefeed (LF) | | -+-----------------+---------------------------------+-------+ -| ``\r`` | ASCII Carriage Return (CR) | | -+-----------------+---------------------------------+-------+ -| ``\t`` | ASCII Horizontal Tab (TAB) | | -+-----------------+---------------------------------+-------+ -| ``\v`` | ASCII Vertical Tab (VT) | | -+-----------------+---------------------------------+-------+ -| ``\ooo`` | Character with octal value | (2,4) | -| | *ooo* | | -+-----------------+---------------------------------+-------+ -| ``\xhh`` | Character with hex value *hh* | (3,4) | -+-----------------+---------------------------------+-------+ ++-------------------------+---------------------------------+-------+ +| Escape Sequence | Meaning | Notes | ++=========================+=================================+=======+ +| ``\``\ | Backslash and newline ignored | \(1) | ++-------------------------+---------------------------------+-------+ +| ``\\`` | Backslash (``\``) | | ++-------------------------+---------------------------------+-------+ +| ``\'`` | Single quote (``'``) | | ++-------------------------+---------------------------------+-------+ +| ``\"`` | Double quote (``"``) | | ++-------------------------+---------------------------------+-------+ +| ``\a`` | ASCII Bell (BEL) | | ++-------------------------+---------------------------------+-------+ +| ``\b`` | ASCII Backspace (BS) | | ++-------------------------+---------------------------------+-------+ +| ``\f`` | ASCII Formfeed (FF) | | ++-------------------------+---------------------------------+-------+ +| ``\n`` | ASCII Linefeed (LF) | | ++-------------------------+---------------------------------+-------+ +| ``\r`` | ASCII Carriage Return (CR) | | ++-------------------------+---------------------------------+-------+ +| ``\t`` | ASCII Horizontal Tab (TAB) | | ++-------------------------+---------------------------------+-------+ +| ``\v`` | ASCII Vertical Tab (VT) | | ++-------------------------+---------------------------------+-------+ +| :samp:`\\\\{ooo}` | Character with octal value | (2,4) | +| | *ooo* | | ++-------------------------+---------------------------------+-------+ +| :samp:`\\x{hh}` | Character with hex value *hh* | (3,4) | ++-------------------------+---------------------------------+-------+ Escape sequences only recognized in string literals are: -+-----------------+---------------------------------+-------+ -| Escape Sequence | Meaning | Notes | -+=================+=================================+=======+ -| ``\N{name}`` | Character named *name* in the | \(5) | -| | Unicode database | | -+-----------------+---------------------------------+-------+ -| ``\uxxxx`` | Character with 16-bit hex value | \(6) | -| | *xxxx* | | -+-----------------+---------------------------------+-------+ -| ``\Uxxxxxxxx`` | Character with 32-bit hex value | \(7) | -| | *xxxxxxxx* | | -+-----------------+---------------------------------+-------+ ++-------------------------+---------------------------------+-------+ +| Escape Sequence | Meaning | Notes | ++=========================+=================================+=======+ +| :samp:`\\N\\{{name}\\}` | Character named *name* in the | \(5) | +| | Unicode database | | ++-------------------------+---------------------------------+-------+ +| :samp:`\\u{xxxx}` | Character with 16-bit hex value | \(6) | +| | *xxxx* | | ++-------------------------+---------------------------------+-------+ +| :samp:`\\U{xxxxxxxx}` | Character with 32-bit hex value | \(7) | +| | *xxxxxxxx* | | ++-------------------------+---------------------------------+-------+ Notes: @@ -612,9 +620,13 @@ Notes: As in Standard C, up to three octal digits are accepted. .. versionchanged:: 3.11 - Octal escapes with value larger than ``0o377`` produce a :exc:`DeprecationWarning`. - In a future Python version they will be a :exc:`SyntaxWarning` and - eventually a :exc:`SyntaxError`. + Octal escapes with value larger than ``0o377`` produce a + :exc:`DeprecationWarning`. + + .. versionchanged:: 3.12 + Octal escapes with value larger than ``0o377`` produce a + :exc:`SyntaxWarning`. In a future Python version they will be eventually + a :exc:`SyntaxError`. (3) Unlike in Standard C, exactly two hex digits are required. @@ -645,10 +657,12 @@ is more easily recognized as broken.) It is also important to note that the escape sequences only recognized in string literals fall into the category of unrecognized escapes for bytes literals. - .. versionchanged:: 3.6 - Unrecognized escape sequences produce a :exc:`DeprecationWarning`. In - a future Python version they will be a :exc:`SyntaxWarning` and - eventually a :exc:`SyntaxError`. +.. versionchanged:: 3.6 + Unrecognized escape sequences produce a :exc:`DeprecationWarning`. + +.. versionchanged:: 3.12 + Unrecognized escape sequences produce a :exc:`SyntaxWarning`. In a future + Python version they will be eventually a :exc:`SyntaxError`. Even in a raw literal, quotes can be escaped with a backslash, but the backslash remains in the result; for example, ``r"\""`` is a valid string @@ -735,16 +749,28 @@ Expressions in formatted string literals are treated like regular Python expressions surrounded by parentheses, with a few exceptions. An empty expression is not allowed, and both :keyword:`lambda` and assignment expressions ``:=`` must be surrounded by explicit parentheses. -Replacement expressions can contain line breaks (e.g. in triple-quoted -strings), but they cannot contain comments. Each expression is evaluated -in the context where the formatted string literal appears, in order from -left to right. +Each expression is evaluated in the context where the formatted string literal +appears, in order from left to right. Replacement expressions can contain +newlines in both single-quoted and triple-quoted f-strings and they can contain +comments. Everything that comes after a ``#`` inside a replacement field +is a comment (even closing braces and quotes). In that case, replacement fields +must be closed in a different line. + +.. code-block:: text + + >>> f"abc{a # This is a comment }" + ... + 3}" + 'abc5' .. versionchanged:: 3.7 Prior to Python 3.7, an :keyword:`await` expression and comprehensions containing an :keyword:`async for` clause were illegal in the expressions in formatted string literals due to a problem with the implementation. +.. versionchanged:: 3.12 + Prior to Python 3.12, comments were not allowed inside f-string replacement + fields. + When the equal sign ``'='`` is provided, the output will have the expression text, the ``'='`` and the evaluated value. Spaces after the opening brace ``'{'``, within the expression and after the ``'='`` are all retained in the @@ -761,7 +787,7 @@ is converted before formatting. Conversion ``'!s'`` calls :func:`str` on the result, ``'!r'`` calls :func:`repr`, and ``'!a'`` calls :func:`ascii`. The result is then formatted using the :func:`format` protocol. The -format specifier is passed to the :meth:`__format__` method of the +format specifier is passed to the :meth:`~object.__format__` method of the expression or conversion result. An empty string is passed when the format specifier is omitted. The formatted result is then included in the final value of the whole string. @@ -807,24 +833,30 @@ Some examples of formatted string literals:: 'line = "The mill\'s closed" ' -A consequence of sharing the same syntax as regular string literals is -that characters in the replacement fields must not conflict with the -quoting used in the outer formatted string literal:: +Reusing the outer f-string quoting type inside a replacement field is +permitted:: - f"abc {a["x"]} def" # error: outer string literal ended prematurely - f"abc {a['x']} def" # workaround: use different quoting + >>> a = dict(x=2) + >>> f"abc {a["x"]} def" + 'abc 2 def' -Backslashes are not allowed in format expressions and will raise -an error:: +.. versionchanged:: 3.12 + Prior to Python 3.12, reuse of the same quoting type of the outer f-string + inside a replacement field was not possible. - f"newline: {ord('\n')}" # raises SyntaxError +Backslashes are also allowed in replacement fields and are evaluated the same +way as in any other context:: -To include a value in which a backslash escape is required, create -a temporary variable. + >>> a = ["a", "b", "c"] + >>> print(f"List a contains:\n{"\n".join(a)}") + List a contains: + a + b + c - >>> newline = ord('\n') - >>> f"newline: {newline}" - 'newline: 10' +.. versionchanged:: 3.12 + Prior to Python 3.12, backslashes were not permitted inside an f-string + replacement field. Formatted string literals cannot be used as docstrings, even if they do not include expressions. @@ -1013,4 +1045,4 @@ occurrence outside string literals and comments is an unconditional error: .. rubric:: Footnotes -.. [#] https://www.unicode.org/Public/15.0.0/ucd/NameAliases.txt +.. [#] https://www.unicode.org/Public/15.1.0/ucd/NameAliases.txt diff --git a/Doc/reference/simple_stmts.rst b/Doc/reference/simple_stmts.rst index 5c9937fb5b6d72f..a9e65be1eda3403 100644 --- a/Doc/reference/simple_stmts.rst +++ b/Doc/reference/simple_stmts.rst @@ -28,6 +28,7 @@ simple statements is: : | `future_stmt` : | `global_stmt` : | `nonlocal_stmt` + : | `type_stmt` .. _exprstmts: @@ -53,8 +54,8 @@ An expression statement evaluates the expression list (which may be a single expression). .. index:: - builtin: repr - object: None + pair: built-in function; repr + pair: object; None pair: string; conversion single: output pair: standard; output @@ -76,7 +77,7 @@ Assignment statements pair: assignment; statement pair: binding; name pair: rebinding; name - object: mutable + pair: object; mutable pair: attribute; assignment Assignment statements are used to (re)bind names to values and to modify @@ -185,7 +186,7 @@ Assignment of an object to a single target is recursively defined as follows. .. index:: pair: subscription; assignment - object: mutable + pair: object; mutable * If the target is a subscription: The primary expression in the reference is evaluated. It should yield either a mutable sequence object (such as a list) @@ -193,8 +194,8 @@ Assignment of an object to a single target is recursively defined as follows. evaluated. .. index:: - object: sequence - object: list + pair: object; sequence + pair: object; list If the primary is a mutable sequence object (such as a list), the subscript must yield an integer. If it is negative, the sequence's length is added to @@ -204,12 +205,12 @@ Assignment of an object to a single target is recursively defined as follows. raised (assignment to a subscripted sequence cannot add new items to a list). .. index:: - object: mapping - object: dictionary + pair: object; mapping + pair: object; dictionary If the primary is a mapping object (such as a dictionary), the subscript must have a type compatible with the mapping's key type, and the mapping is then - asked to create a key/datum pair which maps the subscript to the assigned + asked to create a key/value pair which maps the subscript to the assigned object. This can either replace an existing key/value pair with the same key value, or insert a new key/value pair (if no key with the same value existed). @@ -330,7 +331,7 @@ statement, of a variable or attribute annotation and an optional assignment stat annotated_assignment_stmt: `augtarget` ":" `expression` : ["=" (`starred_expression` | `yield_expression`)] -The difference from normal :ref:`assignment` is that only single target is allowed. +The difference from normal :ref:`assignment` is that only a single target is allowed. For simple names as assignment targets, if in class or module scope, the annotations are evaluated and stored in a special class or module @@ -365,8 +366,8 @@ target, then the interpreter evaluates the target except for the last IDEs. .. versionchanged:: 3.8 - Now annotated assignments allow same expressions in the right hand side as - the regular assignments. Previously, some expressions (like un-parenthesized + Now annotated assignments allow the same expressions in the right hand side as + regular assignments. Previously, some expressions (like un-parenthesized tuple expressions) caused a syntax error. @@ -376,7 +377,7 @@ The :keyword:`!assert` statement ================================ .. index:: - ! statement: assert + ! pair: statement; assert pair: debugging; assertions single: , (comma); expression list @@ -398,7 +399,7 @@ The extended form, ``assert expression1, expression2``, is equivalent to :: .. index:: single: __debug__ - exception: AssertionError + pair: exception; AssertionError These equivalences assume that :const:`__debug__` and :exc:`AssertionError` refer to the built-in variables with those names. In the current implementation, the @@ -419,7 +420,7 @@ The :keyword:`!pass` statement ============================== .. index:: - statement: pass + pair: statement; pass pair: null; operation pair: null; operation @@ -441,7 +442,7 @@ The :keyword:`!del` statement ============================= .. index:: - ! statement: del + ! pair: statement; del pair: deletion; target triple: deletion; target; list @@ -454,7 +455,7 @@ Rather than spelling it out in full details, here are some hints. Deletion of a target list recursively deletes each target, from left to right. .. index:: - statement: global + pair: statement; global pair: unbinding; name Deletion of a name removes the binding of that name from the local or global @@ -480,7 +481,7 @@ The :keyword:`!return` statement ================================ .. index:: - ! statement: return + ! pair: statement; return pair: function; definition pair: class; definition @@ -495,7 +496,7 @@ If an expression list is present, it is evaluated, else ``None`` is substituted. :keyword:`return` leaves the current function call with the expression list (or ``None``) as return value. -.. index:: keyword: finally +.. index:: pair: keyword; finally When :keyword:`return` passes control out of a :keyword:`try` statement with a :keyword:`finally` clause, that :keyword:`!finally` clause is executed before @@ -517,11 +518,11 @@ The :keyword:`!yield` statement =============================== .. index:: - statement: yield + pair: statement; yield single: generator; function single: generator; iterator single: function; generator - exception: StopIteration + pair: exception; StopIteration .. productionlist:: python-grammar yield_stmt: `yield_expression` @@ -553,7 +554,7 @@ The :keyword:`!raise` statement =============================== .. index:: - ! statement: raise + ! pair: statement; raise single: exception pair: raising; exception single: __traceback__ (exception attribute) @@ -574,7 +575,7 @@ instantiating the class with no arguments. The :dfn:`type` of the exception is the exception instance's class, the :dfn:`value` is the instance itself. -.. index:: object: traceback +.. index:: pair: object; traceback A traceback object is normally created automatically when an exception is raised and attached to it as the :attr:`__traceback__` attribute, which is writable. @@ -667,9 +668,9 @@ The :keyword:`!break` statement =============================== .. index:: - ! statement: break - statement: for - statement: while + ! pair: statement; break + pair: statement; for + pair: statement; while pair: loop; statement .. productionlist:: python-grammar @@ -679,7 +680,7 @@ The :keyword:`!break` statement :keyword:`while` loop, but not nested in a function or class definition within that loop. -.. index:: keyword: else +.. index:: pair: keyword; else pair: loop control; target It terminates the nearest enclosing loop, skipping the optional :keyword:`!else` @@ -688,7 +689,7 @@ clause if the loop has one. If a :keyword:`for` loop is terminated by :keyword:`break`, the loop control target keeps its current value. -.. index:: keyword: finally +.. index:: pair: keyword; finally When :keyword:`break` passes control out of a :keyword:`try` statement with a :keyword:`finally` clause, that :keyword:`!finally` clause is executed before @@ -701,11 +702,11 @@ The :keyword:`!continue` statement ================================== .. index:: - ! statement: continue - statement: for - statement: while + ! pair: statement; continue + pair: statement; for + pair: statement; while pair: loop; statement - keyword: finally + pair: keyword; finally .. productionlist:: python-grammar continue_stmt: "continue" @@ -726,12 +727,12 @@ The :keyword:`!import` statement ================================ .. index:: - ! statement: import + ! pair: statement; import single: module; importing pair: name; binding - keyword: from - keyword: as - exception: ImportError + pair: keyword; from + pair: keyword; as + pair: exception; ImportError single: , (comma); import statement .. productionlist:: python-grammar @@ -756,7 +757,7 @@ commas) the two steps are carried out separately for each clause, just as though the clauses had been separated out into individual import statements. -The details of the first step, finding and loading modules are described in +The details of the first step, finding and loading modules, are described in greater detail in the section on the :ref:`import system `, which also describes the various types of packages and modules that can be imported, as well as all the hooks that can be used to customize @@ -942,7 +943,7 @@ The :keyword:`!global` statement ================================ .. index:: - ! statement: global + ! pair: statement; global triple: global; name; binding single: , (comma); identifier list @@ -970,9 +971,9 @@ annotation. them or silently change the meaning of the program. .. index:: - builtin: exec - builtin: eval - builtin: compile + pair: built-in function; exec + pair: built-in function; eval + pair: built-in function; compile **Programmer's note:** :keyword:`global` is a directive to the parser. It applies only to code parsed at the same time as the :keyword:`!global` statement. @@ -988,7 +989,7 @@ call. The same applies to the :func:`eval` and :func:`compile` functions. The :keyword:`!nonlocal` statement ================================== -.. index:: statement: nonlocal +.. index:: pair: statement; nonlocal single: , (comma); identifier list .. productionlist:: python-grammar @@ -1012,3 +1013,48 @@ pre-existing bindings in the local scope. :pep:`3104` - Access to Names in Outer Scopes The specification for the :keyword:`nonlocal` statement. + +.. _type: + +The :keyword:`!type` statement +============================== + +.. index:: pair: statement; type + +.. productionlist:: python-grammar + type_stmt: 'type' `identifier` [`type_params`] "=" `expression` + +The :keyword:`!type` statement declares a type alias, which is an instance +of :class:`typing.TypeAliasType`. + +For example, the following statement creates a type alias:: + + type Point = tuple[float, float] + +This code is roughly equivalent to:: + + annotation-def VALUE_OF_Point(): + return tuple[float, float] + Point = typing.TypeAliasType("Point", VALUE_OF_Point()) + +``annotation-def`` indicates an :ref:`annotation scope `, which behaves +mostly like a function, but with several small differences. + +The value of the +type alias is evaluated in the annotation scope. It is not evaluated when the +type alias is created, but only when the value is accessed through the type alias's +:attr:`!__value__` attribute (see :ref:`lazy-evaluation`). +This allows the type alias to refer to names that are not yet defined. + +Type aliases may be made generic by adding a :ref:`type parameter list ` +after the name. See :ref:`generic-type-aliases` for more. + +:keyword:`!type` is a :ref:`soft keyword `. + +.. versionadded:: 3.12 + +.. seealso:: + + :pep:`695` - Type Parameter Syntax + Introduced the :keyword:`!type` statement and syntax for + generic classes and functions. diff --git a/Doc/reference/toplevel_components.rst b/Doc/reference/toplevel_components.rst index 319c9de484241e2..dd3d3d6878e2892 100644 --- a/Doc/reference/toplevel_components.rst +++ b/Doc/reference/toplevel_components.rst @@ -21,9 +21,9 @@ Complete Python programs .. index:: single: program .. index:: - module: sys - module: __main__ - module: builtins + pair: module; sys + pair: module; __main__ + pair: module; builtins While a language specification need not prescribe how the language interpreter is invoked, it is useful to have a notion of a complete Python program. A @@ -38,7 +38,7 @@ the next section. .. index:: single: interactive mode - module: __main__ + pair: module; __main__ The interpreter may also be invoked in interactive mode; in this case, it does not read and execute a complete program but reads and executes one statement @@ -98,7 +98,7 @@ Expression input ================ .. index:: single: input -.. index:: builtin: eval +.. index:: pair: built-in function; eval :func:`eval` is used for expression input. It ignores leading whitespace. The string argument to :func:`eval` must have the following form: diff --git a/Doc/requirements-oldest-sphinx.txt b/Doc/requirements-oldest-sphinx.txt new file mode 100644 index 000000000000000..597341d99ffeaab --- /dev/null +++ b/Doc/requirements-oldest-sphinx.txt @@ -0,0 +1,35 @@ +# Requirements to build the Python documentation, for the oldest supported +# Sphinx version. +# +# We pin Sphinx and all of its dependencies to ensure a consistent environment. + +blurb +python-docs-theme>=2022.1 + +# Generated from: +# pip install "Sphinx~=4.2.0" +# pip freeze +# +# Sphinx 4.2 comes from ``needs_sphinx = '4.2'`` in ``Doc/conf.py``. + +alabaster==0.7.13 +Babel==2.13.0 +certifi==2023.7.22 +charset-normalizer==3.3.0 +docutils==0.17.1 +idna==3.4 +imagesize==1.4.1 +Jinja2==3.1.2 +MarkupSafe==2.1.3 +packaging==23.2 +Pygments==2.16.1 +requests==2.31.0 +snowballstemmer==2.2.0 +Sphinx==4.2.0 +sphinxcontrib-applehelp==1.0.4 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.5 +urllib3==2.0.7 diff --git a/Doc/requirements.txt b/Doc/requirements.txt index 958665db69e2279..ce87be2d3928245 100644 --- a/Doc/requirements.txt +++ b/Doc/requirements.txt @@ -1,14 +1,21 @@ # Requirements to build the Python documentation +# +# Note that when updating this file, you will likely also have to update +# the Doc/constraints.txt file. # Sphinx version is pinned so that new versions that introduce new warnings # won't suddenly cause build failures. Updating the version is fine as long # as no warnings are raised by doing so. -sphinx==4.5.0 +# PR #104777: Sphinx 6.2 no longer uses imghdr, removed in Python 3.13. +sphinx==6.2.1 blurb -sphinx-lint==0.6.7 +sphinx-autobuild +sphinxext-opengraph==0.7.5 # The theme used by the documentation is stored separately, so we need # to install that as well. -python-docs-theme>=2022.1 +python-docs-theme>=2023.3.1,!=2023.7 + +-c constraints.txt diff --git a/Doc/tools/.nitignore b/Doc/tools/.nitignore new file mode 100644 index 000000000000000..1e3e367460147ad --- /dev/null +++ b/Doc/tools/.nitignore @@ -0,0 +1,151 @@ +# All RST files under Doc/ -- except these -- must pass Sphinx nit-picky mode, +# as tested on the CI via check-warnings.py in reusable-docs.yml. +# Keep lines sorted lexicographically to help avoid merge conflicts. + +Doc/c-api/descriptor.rst +Doc/c-api/exceptions.rst +Doc/c-api/file.rst +Doc/c-api/float.rst +Doc/c-api/gcsupport.rst +Doc/c-api/init.rst +Doc/c-api/init_config.rst +Doc/c-api/intro.rst +Doc/c-api/memory.rst +Doc/c-api/memoryview.rst +Doc/c-api/module.rst +Doc/c-api/object.rst +Doc/c-api/stable.rst +Doc/c-api/structures.rst +Doc/c-api/sys.rst +Doc/c-api/type.rst +Doc/c-api/typeobj.rst +Doc/extending/extending.rst +Doc/glossary.rst +Doc/howto/descriptor.rst +Doc/howto/enum.rst +Doc/howto/isolating-extensions.rst +Doc/howto/logging.rst +Doc/howto/urllib2.rst +Doc/library/abc.rst +Doc/library/ast.rst +Doc/library/asyncio-extending.rst +Doc/library/asyncio-policy.rst +Doc/library/asyncio-subprocess.rst +Doc/library/asyncio-task.rst +Doc/library/bdb.rst +Doc/library/bisect.rst +Doc/library/calendar.rst +Doc/library/cmd.rst +Doc/library/collections.abc.rst +Doc/library/collections.rst +Doc/library/concurrent.futures.rst +Doc/library/configparser.rst +Doc/library/contextlib.rst +Doc/library/csv.rst +Doc/library/datetime.rst +Doc/library/dbm.rst +Doc/library/decimal.rst +Doc/library/email.charset.rst +Doc/library/email.compat32-message.rst +Doc/library/email.errors.rst +Doc/library/email.mime.rst +Doc/library/email.parser.rst +Doc/library/email.policy.rst +Doc/library/enum.rst +Doc/library/exceptions.rst +Doc/library/faulthandler.rst +Doc/library/fcntl.rst +Doc/library/ftplib.rst +Doc/library/functions.rst +Doc/library/functools.rst +Doc/library/gettext.rst +Doc/library/http.client.rst +Doc/library/http.cookiejar.rst +Doc/library/http.cookies.rst +Doc/library/http.server.rst +Doc/library/importlib.rst +Doc/library/inspect.rst +Doc/library/locale.rst +Doc/library/logging.config.rst +Doc/library/logging.handlers.rst +Doc/library/lzma.rst +Doc/library/mailbox.rst +Doc/library/mmap.rst +Doc/library/multiprocessing.rst +Doc/library/multiprocessing.shared_memory.rst +Doc/library/numbers.rst +Doc/library/optparse.rst +Doc/library/os.path.rst +Doc/library/os.rst +Doc/library/pickle.rst +Doc/library/pickletools.rst +Doc/library/platform.rst +Doc/library/plistlib.rst +Doc/library/profile.rst +Doc/library/pyclbr.rst +Doc/library/pydoc.rst +Doc/library/pyexpat.rst +Doc/library/random.rst +Doc/library/readline.rst +Doc/library/reprlib.rst +Doc/library/resource.rst +Doc/library/rlcompleter.rst +Doc/library/select.rst +Doc/library/shelve.rst +Doc/library/signal.rst +Doc/library/smtplib.rst +Doc/library/socket.rst +Doc/library/ssl.rst +Doc/library/stdtypes.rst +Doc/library/string.rst +Doc/library/subprocess.rst +Doc/library/syslog.rst +Doc/library/tarfile.rst +Doc/library/tempfile.rst +Doc/library/termios.rst +Doc/library/test.rst +Doc/library/tkinter.rst +Doc/library/tkinter.scrolledtext.rst +Doc/library/tkinter.ttk.rst +Doc/library/traceback.rst +Doc/library/unittest.mock.rst +Doc/library/unittest.rst +Doc/library/urllib.parse.rst +Doc/library/urllib.request.rst +Doc/library/wsgiref.rst +Doc/library/xml.dom.minidom.rst +Doc/library/xml.dom.pulldom.rst +Doc/library/xml.dom.rst +Doc/library/xml.sax.handler.rst +Doc/library/xml.sax.reader.rst +Doc/library/xml.sax.rst +Doc/library/xmlrpc.client.rst +Doc/library/xmlrpc.server.rst +Doc/library/zlib.rst +Doc/reference/compound_stmts.rst +Doc/reference/datamodel.rst +Doc/reference/expressions.rst +Doc/reference/import.rst +Doc/reference/simple_stmts.rst +Doc/tutorial/datastructures.rst +Doc/using/windows.rst +Doc/whatsnew/2.0.rst +Doc/whatsnew/2.1.rst +Doc/whatsnew/2.2.rst +Doc/whatsnew/2.3.rst +Doc/whatsnew/2.4.rst +Doc/whatsnew/2.5.rst +Doc/whatsnew/2.6.rst +Doc/whatsnew/2.7.rst +Doc/whatsnew/3.0.rst +Doc/whatsnew/3.1.rst +Doc/whatsnew/3.2.rst +Doc/whatsnew/3.3.rst +Doc/whatsnew/3.4.rst +Doc/whatsnew/3.5.rst +Doc/whatsnew/3.6.rst +Doc/whatsnew/3.7.rst +Doc/whatsnew/3.8.rst +Doc/whatsnew/3.9.rst +Doc/whatsnew/3.10.rst +Doc/whatsnew/3.11.rst diff --git a/Doc/tools/check-warnings.py b/Doc/tools/check-warnings.py new file mode 100644 index 000000000000000..809a8d63087e12e --- /dev/null +++ b/Doc/tools/check-warnings.py @@ -0,0 +1,311 @@ +#!/usr/bin/env python3 +""" +Check the output of running Sphinx in nit-picky mode (missing references). +""" +from __future__ import annotations + +import argparse +import itertools +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import TextIO + +# Exclude these whether they're dirty or clean, +# because they trigger a rebuild of dirty files. +EXCLUDE_FILES = { + "Doc/whatsnew/changelog.rst", +} + +# Subdirectories of Doc/ to exclude. +EXCLUDE_SUBDIRS = { + ".env", + ".venv", + "env", + "includes", + "venv", +} + +# Regex pattern to match the parts of a Sphinx warning +WARNING_PATTERN = re.compile( + r"(?P([A-Za-z]:[\\/])?[^:]+):(?P\d+): WARNING: (?P.+)" +) + +# Regex pattern to match the line numbers in a Git unified diff +DIFF_PATTERN = re.compile( + r"^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@", + flags=re.MULTILINE, +) + + +def get_diff_files(ref_a: str, ref_b: str, filter_mode: str = "") -> set[Path]: + """List the files changed between two Git refs, filtered by change type.""" + added_files_result = subprocess.run( + [ + "git", + "diff", + f"--diff-filter={filter_mode}", + "--name-only", + f"{ref_a}...{ref_b}", + "--", + ], + stdout=subprocess.PIPE, + check=True, + text=True, + encoding="UTF-8", + ) + + added_files = added_files_result.stdout.strip().split("\n") + return {Path(file.strip()) for file in added_files if file.strip()} + + +def get_diff_lines(ref_a: str, ref_b: str, file: Path) -> list[int]: + """List the lines changed between two Git refs for a specific file.""" + diff_output = subprocess.run( + [ + "git", + "diff", + "--unified=0", + f"{ref_a}...{ref_b}", + "--", + str(file), + ], + stdout=subprocess.PIPE, + check=True, + text=True, + encoding="UTF-8", + ) + + # Scrape line offsets + lengths from diff and convert to line numbers + line_matches = DIFF_PATTERN.finditer(diff_output.stdout) + # Removed and added line counts are 1 if not printed + line_match_values = [ + line_match.groupdict(default=1) for line_match in line_matches + ] + line_ints = [ + (int(match_value["lineb"]), int(match_value["added"])) + for match_value in line_match_values + ] + line_ranges = [ + range(line_b, line_b + added) for line_b, added in line_ints + ] + line_numbers = list(itertools.chain(*line_ranges)) + + return line_numbers + + +def get_para_line_numbers(file_obj: TextIO) -> list[list[int]]: + """Get the line numbers of text in a file object, grouped by paragraph.""" + paragraphs = [] + prev_line = None + for lineno, line in enumerate(file_obj): + lineno = lineno + 1 + if prev_line is None or (line.strip() and not prev_line.strip()): + paragraph = [lineno - 1] + paragraphs.append(paragraph) + paragraph.append(lineno) + prev_line = line + return paragraphs + + +def filter_and_parse_warnings( + warnings: list[str], files: set[Path] +) -> list[re.Match[str]]: + """Get the warnings matching passed files and parse them with regex.""" + filtered_warnings = [ + warning + for warning in warnings + if any(str(file) in warning for file in files) + ] + warning_matches = [ + WARNING_PATTERN.fullmatch(warning.strip()) + for warning in filtered_warnings + ] + non_null_matches = [warning for warning in warning_matches if warning] + return non_null_matches + + +def filter_warnings_by_diff( + warnings: list[re.Match[str]], ref_a: str, ref_b: str, file: Path +) -> list[re.Match[str]]: + """Filter the passed per-file warnings to just those on changed lines.""" + diff_lines = get_diff_lines(ref_a, ref_b, file) + with file.open(encoding="UTF-8") as file_obj: + paragraphs = get_para_line_numbers(file_obj) + touched_paras = [ + para_lines + for para_lines in paragraphs + if set(diff_lines) & set(para_lines) + ] + touched_para_lines = set(itertools.chain(*touched_paras)) + warnings_infile = [ + warning for warning in warnings if str(file) in warning["file"] + ] + warnings_touched = [ + warning + for warning in warnings_infile + if int(warning["line"]) in touched_para_lines + ] + return warnings_touched + + +def process_touched_warnings( + warnings: list[str], ref_a: str, ref_b: str +) -> list[re.Match[str]]: + """Filter a list of Sphinx warnings to those affecting touched lines.""" + added_files, modified_files = tuple( + get_diff_files(ref_a, ref_b, filter_mode=mode) for mode in ("A", "M") + ) + + warnings_added = filter_and_parse_warnings(warnings, added_files) + warnings_modified = filter_and_parse_warnings(warnings, modified_files) + + modified_files_warned = { + file + for file in modified_files + if any(str(file) in warning["file"] for warning in warnings_modified) + } + + warnings_modified_touched = [ + filter_warnings_by_diff(warnings_modified, ref_a, ref_b, file) + for file in modified_files_warned + ] + warnings_touched = warnings_added + list( + itertools.chain(*warnings_modified_touched) + ) + + return warnings_touched + + +def annotate_diff( + warnings: list[str], ref_a: str = "main", ref_b: str = "HEAD" +) -> None: + """ + Convert Sphinx warning messages to GitHub Actions for changed paragraphs. + + Converts lines like: + .../Doc/library/cgi.rst:98: WARNING: reference target not found + to: + ::warning file=.../Doc/library/cgi.rst,line=98::reference target not found + + See: + https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message + """ + warnings_touched = process_touched_warnings(warnings, ref_a, ref_b) + print("Emitting doc warnings matching modified lines:") + for warning in warnings_touched: + print("::warning file={file},line={line}::{msg}".format_map(warning)) + print(warning[0]) + if not warnings_touched: + print("None") + + +def fail_if_regression( + warnings: list[str], files_with_expected_nits: set[str], files_with_nits: set[str] +) -> int: + """ + Ensure some files always pass Sphinx nit-picky mode (no missing references). + These are files which are *not* in .nitignore. + """ + all_rst = { + str(rst) + for rst in Path("Doc/").rglob("*.rst") + if rst.parts[1] not in EXCLUDE_SUBDIRS + } + should_be_clean = all_rst - files_with_expected_nits - EXCLUDE_FILES + problem_files = sorted(should_be_clean & files_with_nits) + if problem_files: + print("\nError: must not contain warnings:\n") + for filename in problem_files: + print(filename) + for warning in warnings: + if filename in warning: + if match := WARNING_PATTERN.fullmatch(warning): + print(" {line}: {msg}".format_map(match)) + return -1 + return 0 + + +def fail_if_improved( + files_with_expected_nits: set[str], files_with_nits: set[str] +) -> int: + """ + We may have fixed warnings in some files so that the files are now completely clean. + Good news! Let's add them to .nitignore to prevent regression. + """ + files_with_no_nits = files_with_expected_nits - files_with_nits + if files_with_no_nits: + print("\nCongratulations! You improved:\n") + for filename in sorted(files_with_no_nits): + print(filename) + print("\nPlease remove from Doc/tools/.nitignore\n") + return -1 + return 0 + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument( + "--annotate-diff", + nargs="*", + metavar=("BASE_REF", "HEAD_REF"), + help="Add GitHub Actions annotations on the diff for warnings on " + "lines changed between the given refs (main and HEAD, by default)", + ) + parser.add_argument( + "--fail-if-regression", + action="store_true", + help="Fail if known-good files have warnings", + ) + parser.add_argument( + "--fail-if-improved", + action="store_true", + help="Fail if new files with no nits are found", + ) + + args = parser.parse_args(argv) + if args.annotate_diff is not None and len(args.annotate_diff) > 2: + parser.error( + "--annotate-diff takes between 0 and 2 ref args, not " + f"{len(args.annotate_diff)} {tuple(args.annotate_diff)}" + ) + exit_code = 0 + + wrong_directory_msg = "Must run this script from the repo root" + assert Path("Doc").exists() and Path("Doc").is_dir(), wrong_directory_msg + + with Path("Doc/sphinx-warnings.txt").open(encoding="UTF-8") as f: + warnings = f.read().splitlines() + + cwd = str(Path.cwd()) + os.path.sep + files_with_nits = { + warning.removeprefix(cwd).split(":")[0] + for warning in warnings + if "Doc/" in warning + } + + with Path("Doc/tools/.nitignore").open(encoding="UTF-8") as clean_files: + files_with_expected_nits = { + filename.strip() + for filename in clean_files + if filename.strip() and not filename.startswith("#") + } + + if args.annotate_diff is not None: + annotate_diff(warnings, *args.annotate_diff) + + if args.fail_if_regression: + exit_code += fail_if_regression( + warnings, files_with_expected_nits, files_with_nits + ) + + if args.fail_if_improved: + exit_code += fail_if_improved(files_with_expected_nits, files_with_nits) + + return exit_code + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/Doc/tools/extensions/c_annotations.py b/Doc/tools/extensions/c_annotations.py index 9defb24a0331ef5..3551bfa4c0f1333 100644 --- a/Doc/tools/extensions/c_annotations.py +++ b/Doc/tools/extensions/c_annotations.py @@ -20,10 +20,12 @@ """ from os import path +import docutils from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst import Directive from docutils.statemachine import StringList +from sphinx.locale import _ as sphinx_gettext import csv from sphinx import addnodes @@ -40,6 +42,16 @@ } +# Monkeypatch nodes.Node.findall for forwards compatability +# This patch can be dropped when the minimum Sphinx version is 4.4.0 +# or the minimum Docutils version is 0.18.1. +if docutils.__version_info__ < (0, 18, 1): + def findall(self, *args, **kwargs): + return iter(self.traverse(*args, **kwargs)) + + nodes.Node.findall = findall + + class RCEntry: def __init__(self, name): self.name = name @@ -86,7 +98,7 @@ def __init__(self, refcount_filename, stable_abi_file): self.stable_abi_data[name] = record def add_annotations(self, app, doctree): - for node in doctree.traverse(addnodes.desc_content): + for node in doctree.findall(addnodes.desc_content): par = node.parent if par['domain'] != 'c': continue @@ -143,6 +155,22 @@ def add_annotations(self, app, doctree): ' (Only some members are part of the stable ABI.)') node.insert(0, emph_node) + # Unstable API annotation. + if name.startswith('PyUnstable'): + warn_node = nodes.admonition( + classes=['unstable-c-api', 'warning']) + message = 'This is ' + emph_node = nodes.emphasis(message, message) + ref_node = addnodes.pending_xref( + 'Unstable API', refdomain="std", + reftarget='unstable-c-api', + reftype='ref', refexplicit="False") + ref_node += nodes.Text('Unstable API') + emph_node += ref_node + emph_node += nodes.Text('. It may change without warning in minor releases.') + warn_node += emph_node + node.insert(0, warn_node) + # Return value annotation if objtype != 'function': continue @@ -152,11 +180,11 @@ def add_annotations(self, app, doctree): elif not entry.result_type.endswith("Object*"): continue if entry.result_refs is None: - rc = 'Return value: Always NULL.' + rc = sphinx_gettext('Return value: Always NULL.') elif entry.result_refs: - rc = 'Return value: New reference.' + rc = sphinx_gettext('Return value: New reference.') else: - rc = 'Return value: Borrowed reference.' + rc = sphinx_gettext('Return value: Borrowed reference.') node.insert(0, nodes.emphasis(rc, rc, classes=['refcount'])) diff --git a/Doc/tools/extensions/pyspecific.py b/Doc/tools/extensions/pyspecific.py index 3b9f7442f75bccc..31c2544caf601c0 100644 --- a/Doc/tools/extensions/pyspecific.py +++ b/Doc/tools/extensions/pyspecific.py @@ -14,28 +14,27 @@ from os import getenv, path from time import asctime from pprint import pformat + +from docutils import nodes, utils from docutils.io import StringOutput from docutils.parsers.rst import Directive from docutils.utils import new_document - -from docutils import nodes, utils - from sphinx import addnodes from sphinx.builders import Builder -try: - from sphinx.errors import NoUri -except ImportError: - from sphinx.environment import NoUri -from sphinx.locale import translators -from sphinx.util import status_iterator, logging +from sphinx.domains.python import PyFunction, PyMethod +from sphinx.errors import NoUri +from sphinx.locale import _ as sphinx_gettext +from sphinx.util import logging +from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import split_explicit_title from sphinx.writers.text import TextWriter, TextTranslator try: - from sphinx.domains.python import PyFunction, PyMethod + # Sphinx 6+ + from sphinx.util.display import status_iterator except ImportError: - from sphinx.domains.python import PyClassmember as PyMethod - from sphinx.domains.python import PyModulelevel as PyFunction + # Deprecated in Sphinx 6.1, will be removed in Sphinx 8 + from sphinx.util import status_iterator ISSUE_URI = 'https://bugs.python.org/issue?@action=redirect&bpo=%s' @@ -99,14 +98,13 @@ class ImplementationDetail(Directive): final_argument_whitespace = True # This text is copied to templates/dummy.html - label_text = 'CPython implementation detail:' + label_text = sphinx_gettext('CPython implementation detail:') def run(self): self.assert_has_content() pnode = nodes.compound(classes=['impl-detail']) - label = translators['sphinx'].gettext(self.label_text) content = self.content - add_text = nodes.strong(label, label) + add_text = nodes.strong(self.label_text, self.label_text) self.state.nested_parse(content, self.content_offset, pnode) content = nodes.inline(pnode[0].rawsource, translatable=True) content.source = pnode[0].source @@ -119,7 +117,7 @@ def run(self): # Support for documenting platform availability -class Availability(Directive): +class Availability(SphinxDirective): has_content = True required_arguments = 1 @@ -129,8 +127,8 @@ class Availability(Directive): # known platform, libc, and threading implementations known_platforms = frozenset({ "AIX", "Android", "BSD", "DragonFlyBSD", "Emscripten", "FreeBSD", - "Linux", "NetBSD", "OpenBSD", "POSIX", "Solaris", "Unix", "VxWorks", - "WASI", "Windows", "macOS", + "GNU/kFreeBSD", "Linux", "NetBSD", "OpenBSD", "POSIX", "Solaris", + "Unix", "VxWorks", "WASI", "Windows", "macOS", # libc "BSD libc", "glibc", "musl", # POSIX platforms with pthreads @@ -139,18 +137,19 @@ class Availability(Directive): def run(self): availability_ref = ':ref:`Availability `: ' + avail_nodes, avail_msgs = self.state.inline_text( + availability_ref + self.arguments[0], + self.lineno) pnode = nodes.paragraph(availability_ref + self.arguments[0], - classes=["availability"],) - n, m = self.state.inline_text(availability_ref, self.lineno) - pnode.extend(n + m) - n, m = self.state.inline_text(self.arguments[0], self.lineno) - pnode.extend(n + m) + '', *avail_nodes, *avail_msgs) + self.set_source_info(pnode) + cnode = nodes.container("", pnode, classes=["availability"]) + self.set_source_info(cnode) if self.content: - self.state.nested_parse(self.content, self.content_offset, pnode) - + self.state.nested_parse(self.content, self.content_offset, cnode) self.parse_platforms() - return [pnode] + return [cnode] def parse_platforms(self): """Parse platform information from arguments @@ -180,7 +179,7 @@ def parse_platforms(self): if unknown: cls = type(self) logger = logging.getLogger(cls.__qualname__) - logger.warn( + logger.warning( f"Unknown platform(s) or syntax '{' '.join(sorted(unknown))}' " f"in '.. availability:: {self.arguments[0]}', see " f"{__file__}:{cls.__qualname__}.known_platforms for a set " @@ -234,9 +233,9 @@ class AuditEvent(Directive): final_argument_whitespace = True _label = [ - "Raises an :ref:`auditing event ` {name} with no arguments.", - "Raises an :ref:`auditing event ` {name} with argument {args}.", - "Raises an :ref:`auditing event ` {name} with arguments {args}.", + sphinx_gettext("Raises an :ref:`auditing event ` {name} with no arguments."), + sphinx_gettext("Raises an :ref:`auditing event ` {name} with argument {args}."), + sphinx_gettext("Raises an :ref:`auditing event ` {name} with arguments {args}."), ] @property @@ -252,7 +251,7 @@ def run(self): else: args = [] - label = translators['sphinx'].gettext(self._label[min(2, len(args))]) + label = self._label[min(2, len(args))] text = label.format(name="``{}``".format(name), args=", ".join("``{}``".format(a) for a in args if a)) @@ -267,7 +266,7 @@ def run(self): info = env.all_audit_events.setdefault(name, new_info) if info is not new_info: if not self._do_args_match(info['args'], new_info['args']): - self.logger.warn( + self.logger.warning( "Mismatched arguments for audit-event {}: {!r} != {!r}" .format(name, info['args'], new_info['args']) ) @@ -414,8 +413,8 @@ class DeprecatedRemoved(Directive): final_argument_whitespace = True option_spec = {} - _deprecated_label = 'Deprecated since version {deprecated}, will be removed in version {removed}' - _removed_label = 'Deprecated since version {deprecated}, removed in version {removed}' + _deprecated_label = sphinx_gettext('Deprecated since version {deprecated}, will be removed in version {removed}') + _removed_label = sphinx_gettext('Deprecated since version {deprecated}, removed in version {removed}') def run(self): node = addnodes.versionmodified() @@ -431,7 +430,6 @@ def run(self): else: label = self._removed_label - label = translators['sphinx'].gettext(label) text = label.format(deprecated=self.arguments[0], removed=self.arguments[1]) if len(self.arguments) == 3: inodes, messages = self.state.inline_text(self.arguments[2], @@ -544,7 +542,7 @@ def write(self, *ignored): 'building topics... ', length=len(pydoc_topic_labels)): if label not in self.env.domaindata['std']['labels']: - self.env.logger.warn('label %r not in documentation' % label) + self.env.logger.warning(f'label {label!r} not in documentation') continue docname, labelid, sectname = self.env.domaindata['std']['labels'][label] doctree = self.env.get_and_resolve_doctree(docname, self) @@ -559,6 +557,7 @@ def finish(self): try: f.write('# -*- coding: utf-8 -*-\n'.encode('utf-8')) f.write(('# Autogenerated by Sphinx on %s\n' % asctime()).encode('utf-8')) + f.write('# as part of the release process.\n'.encode('utf-8')) f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8')) finally: f.close() @@ -608,6 +607,13 @@ def parse_pdb_command(env, sig, signode): return fullname +def parse_monitoring_event(env, sig, signode): + """Transform a monitoring event signature into RST nodes.""" + signode += addnodes.desc_addname('sys.monitoring.events.', 'sys.monitoring.events.') + signode += addnodes.desc_name(sig, sig) + return sig + + def process_audit_events(app, doctree, fromdocname): for node in doctree.traverse(audit_event_list): break @@ -672,6 +678,30 @@ def process_audit_events(app, doctree, fromdocname): node.replace_self(table) +def patch_pairindextypes(app, _env) -> None: + """Remove all entries from ``pairindextypes`` before writing POT files. + + We want to run this just before writing output files, as the check to + circumvent is in ``I18nBuilder.write_doc()``. + As such, we link this to ``env-check-consistency``, even though it has + nothing to do with the environment consistency check. + """ + if app.builder.name != 'gettext': + return + + # allow translating deprecated index entries + try: + from sphinx.domains.python import pairindextypes + except ImportError: + pass + else: + # Sphinx checks if a 'pair' type entry on an index directive is one of + # the Sphinx-translated pairindextypes values. As we intend to move + # away from this, we need Sphinx to believe that these values don't + # exist, by deleting them when using the gettext builder. + pairindextypes.clear() + + def setup(app): app.add_role('issue', issue_role) app.add_role('gh', gh_issue_role) @@ -684,7 +714,7 @@ def setup(app): app.add_builder(PydocTopicsBuilder) app.add_object_type('opcode', 'opcode', '%s (opcode)', parse_opcode_signature) app.add_object_type('pdbcommand', 'pdbcmd', '%s (pdb command)', parse_pdb_command) - app.add_object_type('2to3fixer', '2to3fixer', '%s (2to3 fixer)') + app.add_object_type('monitoring-event', 'monitoring-event', '%s (monitoring event)', parse_monitoring_event) app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction) app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod) app.add_directive_to_domain('py', 'coroutinefunction', PyCoroutineFunction) @@ -693,6 +723,7 @@ def setup(app): app.add_directive_to_domain('py', 'awaitablemethod', PyAwaitableMethod) app.add_directive_to_domain('py', 'abstractmethod', PyAbstractMethod) app.add_directive('miscnews', MiscNews) + app.connect('env-check-consistency', patch_pairindextypes) app.connect('doctree-resolved', process_audit_events) app.connect('env-merge-info', audit_events_merge) app.connect('env-purge-doc', audit_events_purge) diff --git a/Doc/tools/templates/download.html b/Doc/tools/templates/download.html index 7920e0619f9337f..b5353d6fb77ab46 100644 --- a/Doc/tools/templates/download.html +++ b/Doc/tools/templates/download.html @@ -49,12 +49,12 @@

Unpacking

Unix users should download the .tar.bz2 archives; these are bzipped tar archives and can be handled in the usual way using tar and the bzip2 -program. The InfoZIP unzip program can be +program. The Info-ZIP unzip program can be used to handle the ZIP archives if desired. The .tar.bz2 archives provide the best compression and fastest download times.

Windows users can use the ZIP archives since those are customary on that -platform. These are created on Unix using the InfoZIP zip program.

+platform. These are created on Unix using the Info-ZIP zip program.

Problems

diff --git a/Doc/tools/templates/dummy.html b/Doc/tools/templates/dummy.html index 3438b44377fcb96..bab4aaeb4604b8f 100644 --- a/Doc/tools/templates/dummy.html +++ b/Doc/tools/templates/dummy.html @@ -7,6 +7,11 @@ {% trans %}Deprecated since version {deprecated}, will be removed in version {removed}{% endtrans %} {% trans %}Deprecated since version {deprecated}, removed in version {removed}{% endtrans %} +In extensions/c_annotations.py: + +{% trans %}Return value: Always NULL.{% endtrans %} +{% trans %}Return value: New reference.{% endtrans %} +{% trans %}Return value: Borrowed reference.{% endtrans %} In docsbuild-scripts, when rewriting indexsidebar.html with actual versions: diff --git a/Doc/tools/templates/indexcontent.html b/Doc/tools/templates/indexcontent.html index a96746b69fd41b7..1e3ab7cfe02feeb 100644 --- a/Doc/tools/templates/indexcontent.html +++ b/Doc/tools/templates/indexcontent.html @@ -62,6 +62,7 @@

{{ docstitle|e }}

+ {% endblock %} diff --git a/Doc/tools/templates/layout.html b/Doc/tools/templates/layout.html index 98ccf4224804b29..9498b2ccc5af929 100644 --- a/Doc/tools/templates/layout.html +++ b/Doc/tools/templates/layout.html @@ -4,8 +4,16 @@ {%- if outdated %}
{% trans %}This document is for an old version of Python that is no longer supported. - You should upgrade, and read the {% endtrans %} - {% trans %} Python documentation for the current stable release{% endtrans %}. + You should upgrade, and read the{% endtrans %} + {% trans %}Python documentation for the current stable release{% endtrans %}. +
+{%- endif %} + +{%- if is_deployment_preview %} +
+ {% trans %}This is a deploy preview created from a pull request. + For authoritative documentation, see{% endtrans %} + {% trans %}the current stable release{% endtrans %}.
{%- endif %} {% endblock %} diff --git a/Doc/tools/templates/search.html b/Doc/tools/templates/search.html index f2ac2ea0f098731..852974461380f2d 100644 --- a/Doc/tools/templates/search.html +++ b/Doc/tools/templates/search.html @@ -1,48 +1,62 @@ {% extends "!search.html" %} {% block extrahead %} {{ super() }} + -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/Doc/tutorial/appendix.rst b/Doc/tutorial/appendix.rst index 241a812037469e2..588591fcdb726f7 100644 --- a/Doc/tutorial/appendix.rst +++ b/Doc/tutorial/appendix.rst @@ -101,8 +101,8 @@ in the script:: The Customization Modules ------------------------- -Python provides two hooks to let you customize it: :mod:`sitecustomize` and -:mod:`usercustomize`. To see how it works, you need first to find the location +Python provides two hooks to let you customize it: :index:`sitecustomize` and +:index:`usercustomize`. To see how it works, you need first to find the location of your user site-packages directory. Start Python and run this code:: >>> import site @@ -113,9 +113,9 @@ Now you can create a file named :file:`usercustomize.py` in that directory and put anything you want in it. It will affect every invocation of Python, unless it is started with the :option:`-s` option to disable the automatic import. -:mod:`sitecustomize` works in the same way, but is typically created by an +:index:`sitecustomize` works in the same way, but is typically created by an administrator of the computer in the global site-packages directory, and is -imported before :mod:`usercustomize`. See the documentation of the :mod:`site` +imported before :index:`usercustomize`. See the documentation of the :mod:`site` module for more details. diff --git a/Doc/tutorial/classes.rst b/Doc/tutorial/classes.rst index 9ecbf8b87efbf1b..7b92e1a51b6e677 100644 --- a/Doc/tutorial/classes.rst +++ b/Doc/tutorial/classes.rst @@ -91,7 +91,7 @@ Attributes may be read-only or writable. In the latter case, assignment to attributes is possible. Module attributes are writable: you can write ``modname.the_answer = 42``. Writable attributes may also be deleted with the :keyword:`del` statement. For example, ``del modname.the_answer`` will remove -the attribute :attr:`the_answer` from the object named by ``modname``. +the attribute :attr:`!the_answer` from the object named by ``modname``. Namespaces are created at different moments and have different lifetimes. The namespace containing the built-in names is created when the Python interpreter @@ -119,12 +119,12 @@ directly accessible: * the innermost scope, which is searched first, contains the local names * the scopes of any enclosing functions, which are searched starting with the - nearest enclosing scope, contains non-local, but also non-global names + nearest enclosing scope, contain non-local, but also non-global names * the next-to-last scope contains the current module's global names * the outermost scope (searched last) is the namespace containing built-in names If a name is declared global, then all references and assignments go directly to -the middle scope containing the module's global names. To rebind variables +the next-to-last scope containing the module's global names. To rebind variables found outside of the innermost scope, the :keyword:`nonlocal` statement can be used; if not declared nonlocal, those variables are read-only (an attempt to write to such a variable will simply create a *new* local variable in the @@ -249,7 +249,7 @@ created. This is basically a wrapper around the contents of the namespace created by the class definition; we'll learn more about class objects in the next section. The original local scope (the one in effect just before the class definition was entered) is reinstated, and the class object is bound here to the -class name given in the class definition header (:class:`ClassName` in the +class name given in the class definition header (:class:`!ClassName` in the example). @@ -276,7 +276,7 @@ definition looked like this:: then ``MyClass.i`` and ``MyClass.f`` are valid attribute references, returning an integer and a function object, respectively. Class attributes can also be assigned to, so you can change the value of ``MyClass.i`` by assignment. -:attr:`__doc__` is also a valid attribute, returning the docstring belonging to +:attr:`!__doc__` is also a valid attribute, returning the docstring belonging to the class: ``"A simple example class"``. Class *instantiation* uses function notation. Just pretend that the class @@ -291,20 +291,20 @@ variable ``x``. The instantiation operation ("calling" a class object) creates an empty object. Many classes like to create objects with instances customized to a specific initial state. Therefore a class may define a special method named -:meth:`__init__`, like this:: +:meth:`~object.__init__`, like this:: def __init__(self): self.data = [] -When a class defines an :meth:`__init__` method, class instantiation -automatically invokes :meth:`__init__` for the newly created class instance. So +When a class defines an :meth:`~object.__init__` method, class instantiation +automatically invokes :meth:`!__init__` for the newly created class instance. So in this example, a new, initialized instance can be obtained by:: x = MyClass() -Of course, the :meth:`__init__` method may have arguments for greater +Of course, the :meth:`~object.__init__` method may have arguments for greater flexibility. In that case, arguments given to the class instantiation operator -are passed on to :meth:`__init__`. For example, :: +are passed on to :meth:`!__init__`. For example, :: >>> class Complex: ... def __init__(self, realpart, imagpart): @@ -328,7 +328,7 @@ attribute names: data attributes and methods. *data attributes* correspond to "instance variables" in Smalltalk, and to "data members" in C++. Data attributes need not be declared; like local variables, they spring into existence when they are first assigned to. For example, if -``x`` is the instance of :class:`MyClass` created above, the following piece of +``x`` is the instance of :class:`!MyClass` created above, the following piece of code will print the value ``16``, without leaving a trace:: x.counter = 1 @@ -344,7 +344,7 @@ list objects have methods called append, insert, remove, sort, and so on. However, in the following discussion, we'll use the term method exclusively to mean methods of class instance objects, unless explicitly stated otherwise.) -.. index:: object: method +.. index:: pair: object; method Valid method names of an instance object depend on its class. By definition, all attributes of a class that are function objects define corresponding @@ -363,7 +363,7 @@ Usually, a method is called right after it is bound:: x.f() -In the :class:`MyClass` example, this will return the string ``'hello world'``. +In the :class:`!MyClass` example, this will return the string ``'hello world'``. However, it is not necessary to call a method right away: ``x.f`` is a method object, and can be stored away and called at a later time. For example:: @@ -375,7 +375,7 @@ will continue to print ``hello world`` until the end of time. What exactly happens when a method is called? You may have noticed that ``x.f()`` was called without an argument above, even though the function -definition for :meth:`f` specified an argument. What happened to the argument? +definition for :meth:`!f` specified an argument. What happened to the argument? Surely Python raises an exception when a function that requires an argument is called without any --- even if the argument isn't actually used... @@ -532,9 +532,9 @@ variable in the class is also ok. For example:: h = g -Now ``f``, ``g`` and ``h`` are all attributes of class :class:`C` that refer to +Now ``f``, ``g`` and ``h`` are all attributes of class :class:`!C` that refer to function objects, and consequently they are all methods of instances of -:class:`C` --- ``h`` being exactly equivalent to ``g``. Note that this practice +:class:`!C` --- ``h`` being exactly equivalent to ``g``. Note that this practice usually only serves to confuse the reader of a program. Methods may call other methods by using method attributes of the ``self`` @@ -581,7 +581,7 @@ this:: . -The name :class:`BaseClassName` must be defined in a +The name :class:`!BaseClassName` must be defined in a namespace accessible from the scope containing the derived class definition. In place of a base class name, other arbitrary expressions are also allowed. This can be useful, for example, when the base @@ -645,9 +645,9 @@ multiple base classes looks like this:: For most purposes, in the simplest cases, you can think of the search for attributes inherited from a parent class as depth-first, left-to-right, not searching twice in the same class where there is an overlap in the hierarchy. -Thus, if an attribute is not found in :class:`DerivedClassName`, it is searched -for in :class:`Base1`, then (recursively) in the base classes of :class:`Base1`, -and if it was not found there, it was searched for in :class:`Base2`, and so on. +Thus, if an attribute is not found in :class:`!DerivedClassName`, it is searched +for in :class:`!Base1`, then (recursively) in the base classes of :class:`!Base1`, +and if it was not found there, it was searched for in :class:`!Base2`, and so on. In fact, it is slightly more complex than that; the method resolution order changes dynamically to support cooperative calls to :func:`super`. This @@ -738,23 +738,30 @@ Odds and Ends ============= Sometimes it is useful to have a data type similar to the Pascal "record" or C -"struct", bundling together a few named data items. An empty class definition -will do nicely:: +"struct", bundling together a few named data items. The idiomatic approach +is to use :mod:`dataclasses` for this purpose:: - class Employee: - pass + from dataclasses import dataclass - john = Employee() # Create an empty employee record + @dataclass + class Employee: + name: str + dept: str + salary: int - # Fill the fields of the record - john.name = 'John Doe' - john.dept = 'computer lab' - john.salary = 1000 +:: + + >>> john = Employee('john', 'computer lab', 1000) + >>> john.dept + 'computer lab' + >>> john.salary + 1000 A piece of Python code that expects a particular abstract data type can often be passed a class that emulates the methods of that data type instead. For instance, if you have a function that formats some data from a file object, you -can define a class with methods :meth:`read` and :meth:`!readline` that get the +can define a class with methods :meth:`~io.TextIOBase.read` and +:meth:`~io.TextIOBase.readline` that get the data from a string buffer instead, and pass it as an argument. .. (Unfortunately, this technique has its limitations: a class can't define @@ -763,7 +770,7 @@ data from a string buffer instead, and pass it as an argument. not cause the interpreter to read further input from it.) Instance method objects have attributes, too: ``m.__self__`` is the instance -object with the method :meth:`m`, and ``m.__func__`` is the function object +object with the method :meth:`!m`, and ``m.__func__`` is the function object corresponding to the method. @@ -812,9 +819,9 @@ using the :func:`next` built-in function; this example shows how it all works:: StopIteration Having seen the mechanics behind the iterator protocol, it is easy to add -iterator behavior to your classes. Define an :meth:`__iter__` method which +iterator behavior to your classes. Define an :meth:`~container.__iter__` method which returns an object with a :meth:`~iterator.__next__` method. If the class -defines :meth:`__next__`, then :meth:`__iter__` can just return ``self``:: +defines :meth:`!__next__`, then :meth:`!__iter__` can just return ``self``:: class Reverse: """Iterator for looping over a sequence backwards.""" @@ -873,7 +880,7 @@ easy to create:: Anything that can be done with generators can also be done with class-based iterators as described in the previous section. What makes generators so -compact is that the :meth:`__iter__` and :meth:`~generator.__next__` methods +compact is that the :meth:`~iterator.__iter__` and :meth:`~generator.__next__` methods are created automatically. Another key feature is that the local variables and execution state are diff --git a/Doc/tutorial/controlflow.rst b/Doc/tutorial/controlflow.rst index 52db51e84cd5fca..aa9caa101da40a4 100644 --- a/Doc/tutorial/controlflow.rst +++ b/Doc/tutorial/controlflow.rst @@ -4,8 +4,8 @@ More Control Flow Tools *********************** -Besides the :keyword:`while` statement just introduced, Python uses the usual -flow control statements known from other languages, with some twists. +As well as the :keyword:`while` statement just introduced, Python uses a few more +that we will encounter in this chapter. .. _tut-if: @@ -46,7 +46,7 @@ details see :ref:`tut-match`. ========================== .. index:: - statement: for + pair: statement; for The :keyword:`for` statement in Python differs a bit from what you may be used to in C or Pascal. Rather than always iterating over an arithmetic progression @@ -163,14 +163,21 @@ arguments. In chapter :ref:`tut-structures`, we will discuss in more detail abo :keyword:`!break` and :keyword:`!continue` Statements, and :keyword:`!else` Clauses on Loops ============================================================================================ -The :keyword:`break` statement, like in C, breaks out of the innermost enclosing +The :keyword:`break` statement breaks out of the innermost enclosing :keyword:`for` or :keyword:`while` loop. -Loop statements may have an :keyword:`!else` clause; it is executed when the loop -terminates through exhaustion of the iterable (with :keyword:`for`) or when the -condition becomes false (with :keyword:`while`), but not when the loop is -terminated by a :keyword:`break` statement. This is exemplified by the -following loop, which searches for prime numbers:: +A :keyword:`!for` or :keyword:`!while` loop can include an :keyword:`!else` clause. + +In a :keyword:`for` loop, the :keyword:`!else` clause is executed +after the loop reaches its final iteration. + +In a :keyword:`while` loop, it's executed after the loop's condition becomes false. + +In either kind of loop, the :keyword:`!else` clause is **not** executed +if the loop was terminated by a :keyword:`break`. + +This is exemplified in the following :keyword:`!for` loop, +which searches for prime numbers:: >>> for n in range(2, 10): ... for x in range(2, n): @@ -307,8 +314,9 @@ you can use the class name followed by an argument list resembling a constructor, but with the ability to capture attributes into variables:: class Point: - x: int - y: int + def __init__(self, x, y): + self.x = x + self.y = y def where_is(point): match point: @@ -342,7 +350,13 @@ Dotted names (like ``foo.bar``), attribute names (the ``x=`` and ``y=`` above) o (recognized by the "(...)" next to them like ``Point`` above) are never assigned to. Patterns can be arbitrarily nested. For example, if we have a short -list of points, we could match it like this:: +list of Points, with ``__match_args__`` added, we could match it like this:: + + class Point: + __match_args__ = ('x', 'y') + def __init__(self, x, y): + self.x = x + self.y = y match points: case []: @@ -520,7 +534,7 @@ This example, as usual, demonstrates some new Python features: Different types define different methods. Methods of different types may have the same name without causing ambiguity. (It is possible to define your own object types and methods, using *classes*, see :ref:`tut-classes`) - The method :meth:`append` shown in the example is defined for list objects; it + The method :meth:`!append` shown in the example is defined for list objects; it adds a new element at the end of the list. In this example it is equivalent to ``result = result + [a]``, but more efficient. @@ -1032,7 +1046,7 @@ Function Annotations information about the types used by user-defined functions (see :pep:`3107` and :pep:`484` for more information). -:term:`Annotations ` are stored in the :attr:`__annotations__` +:term:`Annotations ` are stored in the :attr:`!__annotations__` attribute of the function as a dictionary and have no effect on any other part of the function. Parameter annotations are defined by a colon after the parameter name, followed by an expression evaluating to the value of the annotation. Return annotations are diff --git a/Doc/tutorial/datastructures.rst b/Doc/tutorial/datastructures.rst index c8e89d9b79bdddc..87614d082a1d4e0 100644 --- a/Doc/tutorial/datastructures.rst +++ b/Doc/tutorial/datastructures.rst @@ -143,8 +143,8 @@ Using Lists as Stacks The list methods make it very easy to use a list as a stack, where the last element added is the first element retrieved ("last-in, first-out"). To add an -item to the top of the stack, use :meth:`append`. To retrieve an item from the -top of the stack, use :meth:`pop` without an explicit index. For example:: +item to the top of the stack, use :meth:`~list.append`. To retrieve an item from the +top of the stack, use :meth:`~list.pop` without an explicit index. For example:: >>> stack = [3, 4, 5] >>> stack.append(6) @@ -341,7 +341,7 @@ The :keyword:`!del` statement ============================= There is a way to remove an item from a list given its index instead of its -value: the :keyword:`del` statement. This differs from the :meth:`pop` method +value: the :keyword:`del` statement. This differs from the :meth:`~list.pop` method which returns a value. The :keyword:`!del` statement can also be used to remove slices from a list or clear the entire list (which we did earlier by assignment of an empty list to the slice). For example:: @@ -501,8 +501,8 @@ any immutable type; strings and numbers can always be keys. Tuples can be used as keys if they contain only strings, numbers, or tuples; if a tuple contains any mutable object either directly or indirectly, it cannot be used as a key. You can't use lists as keys, since lists can be modified in place using index -assignments, slice assignments, or methods like :meth:`append` and -:meth:`extend`. +assignments, slice assignments, or methods like :meth:`~list.append` and +:meth:`~list.extend`. It is best to think of a dictionary as a set of *key: value* pairs, with the requirement that the keys are unique (within one dictionary). A pair of @@ -567,7 +567,7 @@ Looping Techniques ================== When looping through dictionaries, the key and corresponding value can be -retrieved at the same time using the :meth:`items` method. :: +retrieved at the same time using the :meth:`~dict.items` method. :: >>> knights = {'gallahad': 'the pure', 'robin': 'the brave'} >>> for k, v in knights.items(): diff --git a/Doc/tutorial/errors.rst b/Doc/tutorial/errors.rst index 67bb19556681c06..4058ebe8efdb424 100644 --- a/Doc/tutorial/errors.rst +++ b/Doc/tutorial/errors.rst @@ -108,8 +108,7 @@ The :keyword:`try` statement works as follows. * If an exception occurs which does not match the exception named in the *except clause*, it is passed on to outer :keyword:`try` statements; if no handler is - found, it is an *unhandled exception* and execution stops with a message as - shown above. + found, it is an *unhandled exception* and execution stops with an error message. A :keyword:`try` statement may have more than one *except clause*, to specify handlers for different exceptions. At most one handler will be executed. @@ -154,13 +153,13 @@ exception type. The *except clause* may specify a variable after the exception name. The variable is bound to the exception instance which typically has an ``args`` attribute that stores the arguments. For convenience, builtin exception -types define :meth:`__str__` to print all the arguments without explicitly +types define :meth:`~object.__str__` to print all the arguments without explicitly accessing ``.args``. :: >>> try: ... raise Exception('spam', 'eggs') ... except Exception as inst: - ... print(type(inst)) # the exception instance + ... print(type(inst)) # the exception type ... print(inst.args) # arguments stored in .args ... print(inst) # __str__ allows args to be printed directly, ... # but may be overridden in exception subclasses @@ -174,7 +173,7 @@ accessing ``.args``. :: x = spam y = eggs -The exception's :meth:`__str__` output is printed as the last part ('detail') +The exception's :meth:`~object.__str__` output is printed as the last part ('detail') of the message for unhandled exceptions. :exc:`BaseException` is the common base class of all exceptions. One of its @@ -496,7 +495,7 @@ Raising and Handling Multiple Unrelated Exceptions ================================================== There are situations where it is necessary to report several exceptions that -have occurred. This it often the case in concurrency frameworks, when several +have occurred. This is often the case in concurrency frameworks, when several tasks may have failed in parallel, but there are also other use cases where it is desirable to continue execution and collect multiple errors rather than raise the first exception. @@ -535,11 +534,20 @@ of a certain type while letting all other exceptions propagate to other clauses and eventually to be reraised. :: >>> def f(): - ... raise ExceptionGroup("group1", - ... [OSError(1), - ... SystemError(2), - ... ExceptionGroup("group2", - ... [OSError(3), RecursionError(4)])]) + ... raise ExceptionGroup( + ... "group1", + ... [ + ... OSError(1), + ... SystemError(2), + ... ExceptionGroup( + ... "group2", + ... [ + ... OSError(3), + ... RecursionError(4) + ... ] + ... ) + ... ] + ... ) ... >>> try: ... f() @@ -578,6 +586,8 @@ the following pattern:: ... +.. _tut-exception-notes: + Enriching Exceptions with Notes =============================== diff --git a/Doc/tutorial/floatingpoint.rst b/Doc/tutorial/floatingpoint.rst index e1cd7f9ece75d0e..30f3dfb6b238b4a 100644 --- a/Doc/tutorial/floatingpoint.rst +++ b/Doc/tutorial/floatingpoint.rst @@ -1,6 +1,7 @@ .. testsetup:: import math + from fractions import Fraction .. _tut-fp-issues: @@ -9,12 +10,13 @@ Floating Point Arithmetic: Issues and Limitations ************************************************** .. sectionauthor:: Tim Peters +.. sectionauthor:: Raymond Hettinger Floating-point numbers are represented in computer hardware as base 2 (binary) -fractions. For example, the **decimal** fraction ``0.125`` -has value 1/10 + 2/100 + 5/1000, and in the same way the **binary** fraction ``0.001`` -has value 0/2 + 0/4 + 1/8. These two fractions have identical values, the only +fractions. For example, the **decimal** fraction ``0.625`` +has value 6/10 + 2/100 + 5/1000, and in the same way the **binary** fraction ``0.101`` +has value 1/2 + 0/4 + 1/8. These two fractions have identical values, the only real difference being that the first is written in base 10 fractional notation, and the second in base 2. @@ -57,13 +59,15 @@ Many users are not aware of the approximation because of the way values are displayed. Python only prints a decimal approximation to the true decimal value of the binary approximation stored by the machine. On most machines, if Python were to print the true decimal value of the binary approximation stored -for 0.1, it would have to display :: +for 0.1, it would have to display:: >>> 0.1 0.1000000000000000055511151231257827021181583404541015625 That is more digits than most people find useful, so Python keeps the number -of digits manageable by displaying a rounded value instead :: +of digits manageable by displaying a rounded value instead: + +.. doctest:: >>> 1 / 10 0.1 @@ -90,7 +94,10 @@ thing in all languages that support your hardware's floating-point arithmetic (although some languages may not *display* the difference by default, or in all output modes). -For more pleasant output, you may wish to use string formatting to produce a limited number of significant digits:: +For more pleasant output, you may wish to use string formatting to produce a +limited number of significant digits: + +.. doctest:: >>> format(math.pi, '.12g') # give 12 significant digits '3.14159265359' @@ -101,33 +108,49 @@ For more pleasant output, you may wish to use string formatting to produce a lim >>> repr(math.pi) '3.141592653589793' - It's important to realize that this is, in a real sense, an illusion: you're simply rounding the *display* of the true machine value. One illusion may beget another. For example, since 0.1 is not exactly 1/10, -summing three values of 0.1 may not yield exactly 0.3, either:: +summing three values of 0.1 may not yield exactly 0.3, either: + +.. doctest:: - >>> .1 + .1 + .1 == .3 + >>> 0.1 + 0.1 + 0.1 == 0.3 False Also, since the 0.1 cannot get any closer to the exact value of 1/10 and 0.3 cannot get any closer to the exact value of 3/10, then pre-rounding with -:func:`round` function cannot help:: +:func:`round` function cannot help: - >>> round(.1, 1) + round(.1, 1) + round(.1, 1) == round(.3, 1) +.. doctest:: + + >>> round(0.1, 1) + round(0.1, 1) + round(0.1, 1) == round(0.3, 1) False Though the numbers cannot be made closer to their intended exact values, -the :func:`round` function can be useful for post-rounding so that results -with inexact values become comparable to one another:: +the :func:`math.isclose` function can be useful for comparing inexact values: - >>> round(.1 + .1 + .1, 10) == round(.3, 10) - True +.. doctest:: + + >>> math.isclose(0.1 + 0.1 + 0.1, 0.3) + True + +Alternatively, the :func:`round` function can be used to compare rough +approximations: + +.. doctest:: + + >>> round(math.pi, ndigits=2) == round(22 / 7, ndigits=2) + True Binary floating-point arithmetic holds many surprises like this. The problem with "0.1" is explained in precise detail below, in the "Representation Error" -section. See `The Perils of Floating Point `_ +section. See `Examples of Floating Point Problems +`_ for +a pleasant summary of how binary floating-point works and the kinds of +problems commonly encountered in practice. Also see +`The Perils of Floating Point `_ for a more complete account of other common surprises. As that says near the end, "there are no easy answers." Still, don't be unduly @@ -151,33 +174,41 @@ Another form of exact arithmetic is supported by the :mod:`fractions` module which implements arithmetic based on rational numbers (so the numbers like 1/3 can be represented exactly). -If you are a heavy user of floating point operations you should take a look +If you are a heavy user of floating-point operations you should take a look at the NumPy package and many other packages for mathematical and statistical operations supplied by the SciPy project. See . Python provides tools that may help on those rare occasions when you really *do* want to know the exact value of a float. The :meth:`float.as_integer_ratio` method expresses the value of a float as a -fraction:: +fraction: + +.. doctest:: >>> x = 3.14159 >>> x.as_integer_ratio() (3537115888337719, 1125899906842624) Since the ratio is exact, it can be used to losslessly recreate the -original value:: +original value: + +.. doctest:: >>> x == 3537115888337719 / 1125899906842624 True The :meth:`float.hex` method expresses a float in hexadecimal (base -16), again giving the exact value stored by your computer:: +16), again giving the exact value stored by your computer: + +.. doctest:: >>> x.hex() '0x1.921f9f01b866ep+1' This precise hexadecimal representation can be used to reconstruct -the float value exactly:: +the float value exactly: + +.. doctest:: >>> x == float.fromhex('0x1.921f9f01b866ep+1') True @@ -186,17 +217,43 @@ Since the representation is exact, it is useful for reliably porting values across different versions of Python (platform independence) and exchanging data with other languages that support the same format (such as Java and C99). -Another helpful tool is the :func:`math.fsum` function which helps mitigate -loss-of-precision during summation. It tracks "lost digits" as values are -added onto a running total. That can make a difference in overall accuracy -so that the errors do not accumulate to the point where they affect the -final total: +Another helpful tool is the :func:`sum` function which helps mitigate +loss-of-precision during summation. It uses extended precision for +intermediate rounding steps as values are added onto a running total. +That can make a difference in overall accuracy so that the errors do not +accumulate to the point where they affect the final total: - >>> sum([0.1] * 10) == 1.0 +.. doctest:: + + >>> 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 == 1.0 False - >>> math.fsum([0.1] * 10) == 1.0 + >>> sum([0.1] * 10) == 1.0 True +The :func:`math.fsum()` goes further and tracks all of the "lost digits" +as values are added onto a running total so that the result has only a +single rounding. This is slower than :func:`sum` but will be more +accurate in uncommon cases where large magnitude inputs mostly cancel +each other out leaving a final sum near zero: + +.. doctest:: + + >>> arr = [-0.10430216751806065, -266310978.67179024, 143401161448607.16, + ... -143401161400469.7, 266262841.31058735, -0.003244936839808227] + >>> float(sum(map(Fraction, arr))) # Exact summation with single rounding + 8.042173697819788e-13 + >>> math.fsum(arr) # Single rounding + 8.042173697819788e-13 + >>> sum(arr) # Multiple roundings in extended precision + 8.042178034628478e-13 + >>> total = 0.0 + >>> for x in arr: + ... total += x # Multiple roundings in standard precision + ... + >>> total # Straight addition has no correct digits! + -0.0051575902860057365 + + .. _tut-fp-error: Representation Error @@ -211,12 +268,14 @@ decimal fractions cannot be represented exactly as binary (base 2) fractions. This is the chief reason why Python (or Perl, C, C++, Java, Fortran, and many others) often won't display the exact decimal number you expect. -Why is that? 1/10 is not exactly representable as a binary fraction. Almost all -machines today (November 2000) use IEEE-754 floating point arithmetic, and -almost all platforms map Python floats to IEEE-754 "double precision". 754 -doubles contain 53 bits of precision, so on input the computer strives to -convert 0.1 to the closest fraction it can of the form *J*/2**\ *N* where *J* is -an integer containing exactly 53 bits. Rewriting :: +Why is that? 1/10 is not exactly representable as a binary fraction. Since at +least 2000, almost all machines use IEEE 754 binary floating-point arithmetic, +and almost all platforms map Python floats to IEEE 754 binary64 "double +precision" values. IEEE 754 binary64 values contain 53 bits of precision, so +on input the computer strives to convert 0.1 to the closest fraction it can of +the form *J*/2**\ *N* where *J* is an integer containing exactly 53 bits. +Rewriting +:: 1 / 10 ~= J / (2**N) @@ -225,25 +284,34 @@ as :: J ~= 2**N / 10 and recalling that *J* has exactly 53 bits (is ``>= 2**52`` but ``< 2**53``), -the best value for *N* is 56:: +the best value for *N* is 56: + +.. doctest:: >>> 2**52 <= 2**56 // 10 < 2**53 True That is, 56 is the only value for *N* that leaves *J* with exactly 53 bits. The -best possible value for *J* is then that quotient rounded:: +best possible value for *J* is then that quotient rounded: + +.. doctest:: >>> q, r = divmod(2**56, 10) >>> r 6 Since the remainder is more than half of 10, the best approximation is obtained -by rounding up:: +by rounding up: + +.. doctest:: + + >>> q+1 7205759403792794 -Therefore the best possible approximation to 1/10 in 754 double precision is:: +Therefore the best possible approximation to 1/10 in IEEE 754 double precision +is:: 7205759403792794 / 2 ** 56 @@ -256,13 +324,17 @@ if we had not rounded up, the quotient would have been a little bit smaller than 1/10. But in no case can it be *exactly* 1/10! So the computer never "sees" 1/10: what it sees is the exact fraction given -above, the best 754 double approximation it can get:: +above, the best IEEE 754 double approximation it can get: + +.. doctest:: >>> 0.1 * 2 ** 55 3602879701896397.0 If we multiply that fraction by 10\*\*55, we can see the value out to -55 decimal digits:: +55 decimal digits: + +.. doctest:: >>> 3602879701896397 * 10 ** 55 // 2 ** 55 1000000000000000055511151231257827021181583404541015625 @@ -270,13 +342,17 @@ If we multiply that fraction by 10\*\*55, we can see the value out to meaning that the exact number stored in the computer is equal to the decimal value 0.1000000000000000055511151231257827021181583404541015625. Instead of displaying the full decimal value, many languages (including -older versions of Python), round the result to 17 significant digits:: +older versions of Python), round the result to 17 significant digits: + +.. doctest:: >>> format(0.1, '.17f') '0.10000000000000001' The :mod:`fractions` and :mod:`decimal` modules make these calculations -easy:: +easy: + +.. doctest:: >>> from decimal import Decimal >>> from fractions import Fraction diff --git a/Doc/tutorial/inputoutput.rst b/Doc/tutorial/inputoutput.rst index 3581b3727a53eac..fe9ca9ccb9c7e0d 100644 --- a/Doc/tutorial/inputoutput.rst +++ b/Doc/tutorial/inputoutput.rst @@ -15,7 +15,7 @@ Fancier Output Formatting ========================= So far we've encountered two ways of writing values: *expression statements* and -the :func:`print` function. (A third way is using the :meth:`write` method +the :func:`print` function. (A third way is using the :meth:`~io.TextIOBase.write` method of file objects; the standard output file can be referenced as ``sys.stdout``. See the Library Reference for more information on this.) @@ -285,8 +285,8 @@ Reading and Writing Files ========================= .. index:: - builtin: open - object: file + pair: built-in function; open + pair: object; file :func:`open` returns a :term:`file object`, and is most commonly used with two positional arguments and one keyword argument: @@ -456,8 +456,8 @@ to the very file end with ``seek(0, 2)``) and the only valid *offset* values are those returned from the ``f.tell()``, or zero. Any other *offset* value produces undefined behaviour. -File objects have some additional methods, such as :meth:`~file.isatty` and -:meth:`~file.truncate` which are less frequently used; consult the Library +File objects have some additional methods, such as :meth:`~io.IOBase.isatty` and +:meth:`~io.IOBase.truncate` which are less frequently used; consult the Library Reference for a complete guide to file objects. @@ -466,10 +466,10 @@ Reference for a complete guide to file objects. Saving structured data with :mod:`json` --------------------------------------- -.. index:: module: json +.. index:: pair: module; json Strings can easily be written to and read from a file. Numbers take a bit more -effort, since the :meth:`read` method only returns strings, which will have to +effort, since the :meth:`~io.TextIOBase.read` method only returns strings, which will have to be passed to a function like :func:`int`, which takes a string like ``'123'`` and returns its numeric value 123. When you want to save more complex data types like nested lists and dictionaries, parsing and serializing by hand diff --git a/Doc/tutorial/interactive.rst b/Doc/tutorial/interactive.rst index c0eb1feec4eb4d1..4e054c4e6c2c326 100644 --- a/Doc/tutorial/interactive.rst +++ b/Doc/tutorial/interactive.rst @@ -23,7 +23,7 @@ Python statement names, the current local variables, and the available module names. For dotted expressions such as ``string.a``, it will evaluate the expression up to the final ``'.'`` and then suggest completions from the attributes of the resulting object. Note that this may execute -application-defined code if an object with a :meth:`__getattr__` method +application-defined code if an object with a :meth:`~object.__getattr__` method is part of the expression. The default configuration also saves your history into a file named :file:`.python_history` in your user directory. The history will be available again during the next interactive interpreter @@ -51,4 +51,4 @@ bpython_. .. _GNU Readline: https://tiswww.case.edu/php/chet/readline/rltop.html .. _IPython: https://ipython.org/ -.. _bpython: https://www.bpython-interpreter.org/ +.. _bpython: https://bpython-interpreter.org/ diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst index b71c61089e6dc19..42ebf2b3d294a8b 100644 --- a/Doc/tutorial/interpreter.rst +++ b/Doc/tutorial/interpreter.rst @@ -10,13 +10,13 @@ Using the Python Interpreter Invoking the Interpreter ======================== -The Python interpreter is usually installed as :file:`/usr/local/bin/python3.12` +The Python interpreter is usually installed as :file:`/usr/local/bin/python3.13` on those machines where it is available; putting :file:`/usr/local/bin` in your Unix shell's search path makes it possible to start it by typing the command: .. code-block:: text - python3.12 + python3.13 to the shell. [#]_ Since the choice of the directory where the interpreter lives is an installation option, other places are possible; check with your local @@ -24,7 +24,7 @@ Python guru or system administrator. (E.g., :file:`/usr/local/python` is a popular alternative location.) On Windows machines where you have installed Python from the :ref:`Microsoft Store -`, the :file:`python3.12` command will be available. If you have +`, the :file:`python3.13` command will be available. If you have the :ref:`py.exe launcher ` installed, you can use the :file:`py` command. See :ref:`setting-envvars` for other ways to launch Python. @@ -97,8 +97,8 @@ before printing the first prompt: .. code-block:: shell-session - $ python3.12 - Python 3.12 (default, April 4 2022, 09:25:04) + $ python3.13 + Python 3.13 (default, April 4 2023, 09:25:04) [GCC 10.2.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> diff --git a/Doc/tutorial/introduction.rst b/Doc/tutorial/introduction.rst index 558b1c3eec60ede..4536ab9486d39cf 100644 --- a/Doc/tutorial/introduction.rst +++ b/Doc/tutorial/introduction.rst @@ -52,8 +52,8 @@ Numbers The interpreter acts as a simple calculator: you can type an expression at it and it will write the value. Expression syntax is straightforward: the -operators ``+``, ``-``, ``*`` and ``/`` work just like in most other languages -(for example, Pascal or C); parentheses (``()``) can be used for grouping. +operators ``+``, ``-``, ``*`` and ``/`` can be used to perform +arithmetic; parentheses (``()``) can be used for grouping. For example:: >>> 2 + 2 @@ -138,16 +138,25 @@ and uses the ``j`` or ``J`` suffix to indicate the imaginary part .. _tut-strings: -Strings -------- +Text +---- -Besides numbers, Python can also manipulate strings, which can be expressed -in several ways. They can be enclosed in single quotes (``'...'``) or -double quotes (``"..."``) with the same result [#]_. ``\`` can be used -to escape quotes:: +Python can manipulate text (represented by type :class:`str`, so-called +"strings") as well as numbers. This includes characters "``!``", words +"``rabbit``", names "``Paris``", sentences "``Got your back.``", etc. +"``Yay! :)``". They can be enclosed in single quotes (``'...'``) or double +quotes (``"..."``) with the same result [#]_. >>> 'spam eggs' # single quotes 'spam eggs' + >>> "Paris rabbit got your back :)! Yay!" # double quotes + 'Paris rabbit got your back :)! Yay!' + >>> '1975' # digits and numerals enclosed in quotes are also strings + '1975' + +To quote a quote, we need to "escape" it, by preceding it with ``\``. +Alternatively, we can use the other type of quotation marks:: + >>> 'doesn\'t' # use \' to escape the single quote... "doesn't" >>> "doesn't" # ...or use double quotes instead @@ -159,23 +168,14 @@ to escape quotes:: >>> '"Isn\'t," they said.' '"Isn\'t," they said.' -In the interactive interpreter, the output string is enclosed in quotes and -special characters are escaped with backslashes. While this might sometimes -look different from the input (the enclosing quotes could change), the two -strings are equivalent. The string is enclosed in double quotes if -the string contains a single quote and no double quotes, otherwise it is -enclosed in single quotes. The :func:`print` function produces a more -readable output, by omitting the enclosing quotes and by printing escaped -and special characters:: +In the Python shell, the string definition and output string can look +different. The :func:`print` function produces a more readable output, by +omitting the enclosing quotes and by printing escaped and special characters:: - >>> '"Isn\'t," they said.' - '"Isn\'t," they said.' - >>> print('"Isn\'t," they said.') - "Isn't," they said. >>> s = 'First line.\nSecond line.' # \n means newline - >>> s # without print(), \n is included in the output + >>> s # without print(), special characters are included in the string 'First line.\nSecond line.' - >>> print(s) # with print(), \n produces a new line + >>> print(s) # with print(), special characters are interpreted, so \n produces new line First line. Second line. @@ -189,6 +189,11 @@ the first quote:: >>> print(r'C:\some\name') # note the r before the quote C:\some\name +There is one subtle aspect to raw strings: a raw string may not end in +an odd number of ``\`` characters; see +:ref:`the FAQ entry ` for more information +and workarounds. + String literals can span multiple lines. One way is using triple-quotes: ``"""..."""`` or ``'''...'''``. End of lines are automatically included in the string, but it's possible to prevent this by adding a ``\`` at @@ -269,7 +274,7 @@ Indices may also be negative numbers, to start counting from the right:: Note that since -0 is the same as 0, negative indices start from -1. In addition to indexing, *slicing* is also supported. While indexing is used -to obtain individual characters, *slicing* allows you to obtain substring:: +to obtain individual characters, *slicing* allows you to obtain a substring:: >>> word[0:2] # characters from position 0 (included) to 2 (excluded) 'Py' @@ -423,7 +428,7 @@ type, i.e. it is possible to change their content:: [1, 8, 27, 64, 125] You can also add new items at the end of the list, by using -the :meth:`~list.append` *method* (we will see more about methods later):: +the :meth:`!list.append` *method* (we will see more about methods later):: >>> cubes.append(216) # add the cube of 6 >>> cubes.append(7 ** 3) # and the cube of 7 @@ -475,7 +480,7 @@ First Steps Towards Programming Of course, we can use Python for more complicated tasks than adding two and two together. For instance, we can write an initial sub-sequence of the -`Fibonacci series `_ +`Fibonacci series `_ as follows:: >>> # Fibonacci series: diff --git a/Doc/tutorial/modules.rst b/Doc/tutorial/modules.rst index ad70d92994af49b..bf9e8e0b7b80665 100644 --- a/Doc/tutorial/modules.rst +++ b/Doc/tutorial/modules.rst @@ -183,7 +183,7 @@ The Module Search Path .. index:: triple: module; search; path -When a module named :mod:`spam` is imported, the interpreter first searches for +When a module named :mod:`!spam` is imported, the interpreter first searches for a built-in module with that name. These module names are listed in :data:`sys.builtin_module_names`. If not found, it then searches for a file named :file:`spam.py` in a list of directories given by the variable @@ -264,7 +264,7 @@ Some tips for experts: Standard Modules ================ -.. index:: module: sys +.. index:: pair: module; sys Python comes with a library of standard modules, described in a separate document, the Python Library Reference ("Library Reference" hereafter). Some @@ -345,7 +345,7 @@ Without arguments, :func:`dir` lists the names you have defined currently:: Note that it lists all types of names: variables, modules, functions, etc. -.. index:: module: builtins +.. index:: pair: module; builtins :func:`dir` does not list the names of built-in functions and variables. If you want a list of those, they are defined in the standard module @@ -389,7 +389,7 @@ Packages ======== Packages are a way of structuring Python's module namespace by using "dotted -module names". For example, the module name :mod:`A.B` designates a submodule +module names". For example, the module name :mod:`!A.B` designates a submodule named ``B`` in a package named ``A``. Just like the use of modules saves the authors of different modules from having to worry about each other's global variable names, the use of dotted module names saves the authors of multi-module @@ -438,7 +438,7 @@ When importing the package, Python searches through the directories on The :file:`__init__.py` files are required to make Python treat directories containing the file as packages. This prevents directories with a common name, -such as ``string``, unintentionally hiding valid modules that occur later +such as ``string``, from unintentionally hiding valid modules that occur later on the module search path. In the simplest case, :file:`__init__.py` can just be an empty file, but it can also execute initialization code for the package or set the ``__all__`` variable, described later. @@ -448,7 +448,7 @@ example:: import sound.effects.echo -This loads the submodule :mod:`sound.effects.echo`. It must be referenced with +This loads the submodule :mod:`!sound.effects.echo`. It must be referenced with its full name. :: sound.effects.echo.echofilter(input, output, delay=0.7, atten=4) @@ -457,7 +457,7 @@ An alternative way of importing the submodule is:: from sound.effects import echo -This also loads the submodule :mod:`echo`, and makes it available without its +This also loads the submodule :mod:`!echo`, and makes it available without its package prefix, so it can be used as follows:: echo.echofilter(input, output, delay=0.7, atten=4) @@ -466,8 +466,8 @@ Yet another variation is to import the desired function or variable directly:: from sound.effects.echo import echofilter -Again, this loads the submodule :mod:`echo`, but this makes its function -:func:`echofilter` directly available:: +Again, this loads the submodule :mod:`!echo`, but this makes its function +:func:`!echofilter` directly available:: echofilter(input, output, delay=0.7, atten=4) @@ -510,11 +510,27 @@ code:: __all__ = ["echo", "surround", "reverse"] This would mean that ``from sound.effects import *`` would import the three -named submodules of the :mod:`sound.effects` package. +named submodules of the :mod:`!sound.effects` package. + +Be aware that submodules might become shadowed by locally defined names. For +example, if you added a ``reverse`` function to the +:file:`sound/effects/__init__.py` file, the ``from sound.effects import *`` +would only import the two submodules ``echo`` and ``surround``, but *not* the +``reverse`` submodule, because it is shadowed by the locally defined +``reverse`` function:: + + __all__ = [ + "echo", # refers to the 'echo.py' file + "surround", # refers to the 'surround.py' file + "reverse", # !!! refers to the 'reverse' function now !!! + ] + + def reverse(msg: str): # <-- this name shadows the 'reverse.py' submodule + return msg[::-1] # in the case of a 'from sound.effects import *' If ``__all__`` is not defined, the statement ``from sound.effects import *`` -does *not* import all submodules from the package :mod:`sound.effects` into the -current namespace; it only ensures that the package :mod:`sound.effects` has +does *not* import all submodules from the package :mod:`!sound.effects` into the +current namespace; it only ensures that the package :mod:`!sound.effects` has been imported (possibly running any initialization code in :file:`__init__.py`) and then imports whatever names are defined in the package. This includes any names defined (and submodules explicitly loaded) by :file:`__init__.py`. It @@ -525,8 +541,8 @@ previous :keyword:`import` statements. Consider this code:: import sound.effects.surround from sound.effects import * -In this example, the :mod:`echo` and :mod:`surround` modules are imported in the -current namespace because they are defined in the :mod:`sound.effects` package +In this example, the :mod:`!echo` and :mod:`!surround` modules are imported in the +current namespace because they are defined in the :mod:`!sound.effects` package when the ``from...import`` statement is executed. (This also works when ``__all__`` is defined.) @@ -545,15 +561,15 @@ packages. Intra-package References ------------------------ -When packages are structured into subpackages (as with the :mod:`sound` package +When packages are structured into subpackages (as with the :mod:`!sound` package in the example), you can use absolute imports to refer to submodules of siblings -packages. For example, if the module :mod:`sound.filters.vocoder` needs to use -the :mod:`echo` module in the :mod:`sound.effects` package, it can use ``from +packages. For example, if the module :mod:`!sound.filters.vocoder` needs to use +the :mod:`!echo` module in the :mod:`!sound.effects` package, it can use ``from sound.effects import echo``. You can also write relative imports, with the ``from module import name`` form of import statement. These imports use leading dots to indicate the current and -parent packages involved in the relative import. From the :mod:`surround` +parent packages involved in the relative import. From the :mod:`!surround` module for example, you might use:: from . import echo diff --git a/Doc/tutorial/stdlib.rst b/Doc/tutorial/stdlib.rst index 4f5ada90eb57bc7..63f4b5e1ce02073 100644 --- a/Doc/tutorial/stdlib.rst +++ b/Doc/tutorial/stdlib.rst @@ -24,7 +24,7 @@ Be sure to use the ``import os`` style instead of ``from os import *``. This will keep :func:`os.open` from shadowing the built-in :func:`open` function which operates much differently. -.. index:: builtin: help +.. index:: pair: built-in function; help The built-in :func:`dir` and :func:`help` functions are useful as interactive aids for working with large modules like :mod:`os`:: @@ -153,7 +153,7 @@ The :mod:`random` module provides tools for making random selections:: 'apple' >>> random.sample(range(100), 10) # sampling without replacement [30, 83, 16, 4, 8, 81, 41, 50, 18, 33] - >>> random.random() # random float + >>> random.random() # random float from the interval [0.0, 1.0) 0.17970987693706186 >>> random.randrange(6) # random integer chosen from range(6) 4 diff --git a/Doc/tutorial/stdlib2.rst b/Doc/tutorial/stdlib2.rst index 0c101c1f2072350..33f311db3a24d23 100644 --- a/Doc/tutorial/stdlib2.rst +++ b/Doc/tutorial/stdlib2.rst @@ -394,7 +394,7 @@ point:: >>> sum([Decimal('0.1')]*10) == Decimal('1.0') True - >>> sum([0.1]*10) == 1.0 + >>> 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 == 1.0 False The :mod:`decimal` module provides arithmetic with as much precision as needed:: diff --git a/Doc/tutorial/venv.rst b/Doc/tutorial/venv.rst index 1fdb370b33d5af5..a6dead2eac11f68 100644 --- a/Doc/tutorial/venv.rst +++ b/Doc/tutorial/venv.rst @@ -44,7 +44,7 @@ whichever version you want. To create a virtual environment, decide upon a directory where you want to place it, and run the :mod:`venv` module as a script with the directory path:: - python3 -m venv tutorial-env + python -m venv tutorial-env This will create the ``tutorial-env`` directory if it doesn't exist, and also create directories inside it containing a copy of the Python @@ -98,8 +98,8 @@ Managing Packages with pip ========================== You can install, upgrade, and remove packages using a program called -:program:`pip`. By default ``pip`` will install packages from the Python -Package Index, . You can browse the Python +:program:`pip`. By default ``pip`` will install packages from the `Python +Package Index `_. You can browse the Python Package Index by going to it in your web browser. ``pip`` has a number of subcommands: "install", "uninstall", @@ -207,4 +207,6 @@ necessary packages with ``install -r``: ``pip`` has many more options. Consult the :ref:`installing-index` guide for complete documentation for ``pip``. When you've written a package and want to make it available on the Python Package Index, -consult the :ref:`distributing-index` guide. +consult the `Python packaging user guide`_. + +.. _Python Packaging User Guide: https://packaging.python.org/en/latest/tutorials/packaging-projects/ diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst index 2a4d070ec057dfd..39c8d114f1e2c5e 100644 --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -59,7 +59,7 @@ all consecutive arguments will end up in :data:`sys.argv` -- note that the first element, subscript zero (``sys.argv[0]``), is a string reflecting the program's source. -.. cmdoption:: -c +.. option:: -c Execute the Python code in *command*. *command* can be one or more statements separated by newlines, with significant leading whitespace as in @@ -72,7 +72,7 @@ source. .. audit-event:: cpython.run_command command cmdoption-c -.. cmdoption:: -m +.. option:: -m Search :data:`sys.path` for the named module and execute its contents as the :mod:`__main__` module. @@ -103,13 +103,13 @@ source. :option:`-I` option can be used to run the script in isolated mode where :data:`sys.path` contains neither the current directory nor the user's - site-packages directory. All :envvar:`PYTHON*` environment variables are + site-packages directory. All ``PYTHON*`` environment variables are ignored, too. Many standard library modules contain code that is invoked on their execution as a script. An example is the :mod:`timeit` module:: - python -m timeit -s 'setup here' 'benchmarked code here' + python -m timeit -s "setup here" "benchmarked code here" python -m timeit -h # for details .. audit-event:: cpython.run_module module-name cmdoption-m @@ -161,7 +161,7 @@ source. :option:`-I` option can be used to run the script in isolated mode where :data:`sys.path` contains neither the script's directory nor the user's - site-packages directory. All :envvar:`PYTHON*` environment variables are + site-packages directory. All ``PYTHON*`` environment variables are ignored, too. .. audit-event:: cpython.run_file filename @@ -188,35 +188,35 @@ automatically enabled, if available on your platform (see Generic options ~~~~~~~~~~~~~~~ -.. cmdoption:: -? - -h - --help +.. option:: -? + -h + --help Print a short description of all command line options and corresponding environment variables and exit. -.. cmdoption:: --help-env +.. option:: --help-env Print a short description of Python-specific environment variables and exit. .. versionadded:: 3.11 -.. cmdoption:: --help-xoptions +.. option:: --help-xoptions Print a description of implementation-specific :option:`-X` options and exit. .. versionadded:: 3.11 -.. cmdoption:: --help-all +.. option:: --help-all Print complete usage information and exit. .. versionadded:: 3.11 -.. cmdoption:: -V - --version +.. option:: -V + --version Print the Python version number and exit. Example output could be: @@ -240,7 +240,7 @@ Generic options Miscellaneous options ~~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -b +.. option:: -b Issue a warning when comparing :class:`bytes` or :class:`bytearray` with :class:`str` or :class:`bytes` with :class:`int`. Issue an error when the @@ -249,13 +249,13 @@ Miscellaneous options .. versionchanged:: 3.5 Affects comparisons of :class:`bytes` with :class:`int`. -.. cmdoption:: -B +.. option:: -B If given, Python won't try to write ``.pyc`` files on the import of source modules. See also :envvar:`PYTHONDONTWRITEBYTECODE`. -.. cmdoption:: --check-hash-based-pycs default|always|never +.. option:: --check-hash-based-pycs default|always|never Control the validation behavior of hash-based ``.pyc`` files. See :ref:`pyc-invalidation`. When set to ``default``, checked and unchecked @@ -269,7 +269,7 @@ Miscellaneous options option. -.. cmdoption:: -d +.. option:: -d Turn on parser debugging output (for expert only). See also the :envvar:`PYTHONDEBUG` environment variable. @@ -278,15 +278,15 @@ Miscellaneous options it's ignored. -.. cmdoption:: -E +.. option:: -E - Ignore all :envvar:`PYTHON*` environment variables, e.g. + Ignore all ``PYTHON*`` environment variables, e.g. :envvar:`PYTHONPATH` and :envvar:`PYTHONHOME`, that might be set. See also the :option:`-P` and :option:`-I` (isolated) options. -.. cmdoption:: -i +.. option:: -i When a script is passed as first argument or the :option:`-c` option is used, enter interactive mode after executing the script or the command, even when @@ -297,20 +297,20 @@ Miscellaneous options raises an exception. See also :envvar:`PYTHONINSPECT`. -.. cmdoption:: -I +.. option:: -I Run Python in isolated mode. This also implies :option:`-E`, :option:`-P` and :option:`-s` options. In isolated mode :data:`sys.path` contains neither the script's directory nor - the user's site-packages directory. All :envvar:`PYTHON*` environment + the user's site-packages directory. All ``PYTHON*`` environment variables are ignored, too. Further restrictions may be imposed to prevent the user from injecting malicious code. .. versionadded:: 3.4 -.. cmdoption:: -O +.. option:: -O Remove assert statements and any code conditional on the value of :const:`__debug__`. Augment the filename for compiled @@ -321,7 +321,7 @@ Miscellaneous options Modify ``.pyc`` filenames according to :pep:`488`. -.. cmdoption:: -OO +.. option:: -OO Do :option:`-O` and also discard docstrings. Augment the filename for compiled (:term:`bytecode`) files by adding ``.opt-2`` before the @@ -331,7 +331,7 @@ Miscellaneous options Modify ``.pyc`` filenames according to :pep:`488`. -.. cmdoption:: -P +.. option:: -P Don't prepend a potentially unsafe path to :data:`sys.path`: @@ -348,21 +348,21 @@ Miscellaneous options .. versionadded:: 3.11 -.. cmdoption:: -q +.. option:: -q Don't display the copyright and version messages even in interactive mode. .. versionadded:: 3.2 -.. cmdoption:: -R +.. option:: -R Turn on hash randomization. This option only has an effect if the :envvar:`PYTHONHASHSEED` environment variable is set to ``0``, since hash randomization is enabled by default. On previous versions of Python, this option turns on hash randomization, - so that the :meth:`__hash__` values of str and bytes objects + so that the :meth:`~object.__hash__` values of str and bytes objects are "salted" with an unpredictable random value. Although they remain constant within an individual Python process, they are not predictable between repeated invocations of Python. @@ -370,7 +370,7 @@ Miscellaneous options Hash randomization is intended to provide protection against a denial-of-service caused by carefully chosen inputs that exploit the worst case performance of a dict construction, O(n\ :sup:`2`) complexity. See - http://www.ocert.org/advisories/ocert-2011-003.html for details. + http://ocert.org/advisories/ocert-2011-003.html for details. :envvar:`PYTHONHASHSEED` allows you to set a fixed value for the hash seed secret. @@ -381,7 +381,7 @@ Miscellaneous options .. versionadded:: 3.2.3 -.. cmdoption:: -s +.. option:: -s Don't add the :data:`user site-packages directory ` to :data:`sys.path`. @@ -391,7 +391,7 @@ Miscellaneous options :pep:`370` -- Per user site-packages directory -.. cmdoption:: -S +.. option:: -S Disable the import of the module :mod:`site` and the site-dependent manipulations of :data:`sys.path` that it entails. Also disable these @@ -399,7 +399,7 @@ Miscellaneous options :func:`site.main` if you want them to be triggered). -.. cmdoption:: -u +.. option:: -u Force the stdout and stderr streams to be unbuffered. This option has no effect on the stdin stream. @@ -410,7 +410,7 @@ Miscellaneous options The text layer of the stdout and stderr streams now is unbuffered. -.. cmdoption:: -v +.. option:: -v Print a message each time a module is initialized, showing the place (filename or built-in module) from which it is loaded. When given twice @@ -425,7 +425,7 @@ Miscellaneous options .. _using-on-warnings: -.. cmdoption:: -W arg +.. option:: -W arg Warning control. Python's warning machinery by default prints warning messages to :data:`sys.stderr`. @@ -484,18 +484,19 @@ Miscellaneous options details. -.. cmdoption:: -x +.. option:: -x Skip the first line of the source, allowing use of non-Unix forms of ``#!cmd``. This is intended for a DOS specific hack only. -.. cmdoption:: -X +.. option:: -X Reserved for various implementation-specific options. CPython currently defines the following possible values: - * ``-X faulthandler`` to enable :mod:`faulthandler`; + * ``-X faulthandler`` to enable :mod:`faulthandler`. + See also :envvar:`PYTHONFAULTHANDLER`. * ``-X showrefcount`` to output the total reference count and number of used memory blocks when the program finishes or after each statement in the interactive interpreter. This only works on :ref:`debug builds @@ -503,8 +504,9 @@ Miscellaneous options * ``-X tracemalloc`` to start tracing Python memory allocations using the :mod:`tracemalloc` module. By default, only the most recent frame is stored in a traceback of a trace. Use ``-X tracemalloc=NFRAME`` to start - tracing with a traceback limit of *NFRAME* frames. See the - :func:`tracemalloc.start` for more information. + tracing with a traceback limit of *NFRAME* frames. + See :func:`tracemalloc.start` and :envvar:`PYTHONTRACEMALLOC` + for more information. * ``-X int_max_str_digits`` configures the :ref:`integer string conversion length limitation `. See also :envvar:`PYTHONINTMAXSTRDIGITS`. @@ -519,6 +521,7 @@ Miscellaneous options * ``-X utf8`` enables the :ref:`Python UTF-8 Mode `. ``-X utf8=0`` explicitly disables :ref:`Python UTF-8 Mode ` (even when it would otherwise activate automatically). + See also :envvar:`PYTHONUTF8`. * ``-X pycache_prefix=PATH`` enables writing ``.pyc`` files to a parallel tree rooted at the given directory instead of to the code tree. See also :envvar:`PYTHONPYCACHEPREFIX`. @@ -532,17 +535,30 @@ Miscellaneous options location indicators when the interpreter displays tracebacks. See also :envvar:`PYTHONNODEBUGRANGES`. * ``-X frozen_modules`` determines whether or not frozen modules are - ignored by the import machinery. A value of "on" means they get - imported and "off" means they are ignored. The default is "on" + ignored by the import machinery. A value of ``on`` means they get + imported and ``off`` means they are ignored. The default is ``on`` if this is an installed Python (the normal case). If it's under - development (running from the source tree) then the default is "off". - Note that the "importlib_bootstrap" and "importlib_bootstrap_external" - frozen modules are always used, even if this flag is set to "off". + development (running from the source tree) then the default is ``off``. + Note that the :mod:`!importlib_bootstrap` and + :mod:`!importlib_bootstrap_external` frozen modules are always used, even + if this flag is set to ``off``. See also :envvar:`PYTHON_FROZEN_MODULES`. * ``-X perf`` enables support for the Linux ``perf`` profiler. When this option is provided, the ``perf`` profiler will be able to report Python calls. This option is only available on some platforms and will do nothing if is not supported on the current system. The default value is "off". See also :envvar:`PYTHONPERFSUPPORT` and :ref:`perf_profiling`. + * :samp:`-X cpu_count={n}` overrides :func:`os.cpu_count`, + :func:`os.process_cpu_count`, and :func:`multiprocessing.cpu_count`. + *n* must be greater than or equal to 1. + This option may be useful for users who need to limit CPU resources of a + container system. See also :envvar:`PYTHON_CPU_COUNT`. + If *n* is ``default``, nothing is overridden. + * :samp:`-X presite={package.module}` specifies a module that should be + imported before the :mod:`site` module is executed and before the + :mod:`__main__` module exists. Therefore, the imported module isn't + :mod:`__main__`. This can be used to execute code early during Python + initialization. Python needs to be :ref:`built in debug mode ` + for this option to exist. See also :envvar:`PYTHON_PRESITE`. It also allows passing arbitrary values and retrieving them through the :data:`sys._xoptions` dictionary. @@ -590,11 +606,17 @@ Miscellaneous options .. versionadded:: 3.12 The ``-X perf`` option. + .. versionadded:: 3.13 + The ``-X cpu_count`` option. + + .. versionadded:: 3.13 + The ``-X presite`` option. + Options you shouldn't use ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -J +.. option:: -J Reserved for use by Jython_. @@ -808,8 +830,8 @@ conflict. Defines the :data:`user base directory `, which is used to compute the path of the :data:`user site-packages directory ` - and :ref:`Distutils installation paths ` for - ``python setup.py install --user``. + and :ref:`installation paths ` for + ``python -m pip install --user``. .. seealso:: @@ -848,9 +870,10 @@ conflict. If this environment variable is set to a non-empty string, :func:`faulthandler.enable` is called at startup: install a handler for - :const:`SIGSEGV`, :const:`SIGFPE`, :const:`SIGABRT`, :const:`SIGBUS` and - :const:`SIGILL` signals to dump the Python traceback. This is equivalent to - :option:`-X` ``faulthandler`` option. + :const:`~signal.SIGSEGV`, :const:`~signal.SIGFPE`, + :const:`~signal.SIGABRT`, :const:`~signal.SIGBUS` and + :const:`~signal.SIGILL` signals to dump the Python traceback. + This is equivalent to :option:`-X` ``faulthandler`` option. .. versionadded:: 3.3 @@ -861,7 +884,9 @@ conflict. Python memory allocations using the :mod:`tracemalloc` module. The value of the variable is the maximum number of frames stored in a traceback of a trace. For example, ``PYTHONTRACEMALLOC=1`` stores only the most recent - frame. See the :func:`tracemalloc.start` for more information. + frame. + See the :func:`tracemalloc.start` function for more information. + This is equivalent to setting the :option:`-X` ``tracemalloc`` option. .. versionadded:: 3.4 @@ -869,8 +894,8 @@ conflict. .. envvar:: PYTHONPROFILEIMPORTTIME If this environment variable is set to a non-empty string, Python will - show how long each import takes. This is exactly equivalent to setting - ``-X importtime`` on the command line. + show how long each import takes. + This is equivalent to setting the :option:`-X` ``importtime`` option. .. versionadded:: 3.7 @@ -892,11 +917,14 @@ conflict. * ``default``: use the :ref:`default memory allocators `. * ``malloc``: use the :c:func:`malloc` function of the C library - for all domains (:c:data:`PYMEM_DOMAIN_RAW`, :c:data:`PYMEM_DOMAIN_MEM`, - :c:data:`PYMEM_DOMAIN_OBJ`). + for all domains (:c:macro:`PYMEM_DOMAIN_RAW`, :c:macro:`PYMEM_DOMAIN_MEM`, + :c:macro:`PYMEM_DOMAIN_OBJ`). * ``pymalloc``: use the :ref:`pymalloc allocator ` for - :c:data:`PYMEM_DOMAIN_MEM` and :c:data:`PYMEM_DOMAIN_OBJ` domains and use - the :c:func:`malloc` function for the :c:data:`PYMEM_DOMAIN_RAW` domain. + :c:macro:`PYMEM_DOMAIN_MEM` and :c:macro:`PYMEM_DOMAIN_OBJ` domains and use + the :c:func:`malloc` function for the :c:macro:`PYMEM_DOMAIN_RAW` domain. + * ``mimalloc``: use the :ref:`mimalloc allocator ` for + :c:macro:`PYMEM_DOMAIN_MEM` and :c:macro:`PYMEM_DOMAIN_OBJ` domains and use + the :c:func:`malloc` function for the :c:macro:`PYMEM_DOMAIN_RAW` domain. Install :ref:`debug hooks `: @@ -904,6 +932,7 @@ conflict. allocators `. * ``malloc_debug``: same as ``malloc`` but also install debug hooks. * ``pymalloc_debug``: same as ``pymalloc`` but also install debug hooks. + * ``mimalloc_debug``: same as ``mimalloc`` but also install debug hooks. .. versionchanged:: 3.7 Added the ``"default"`` allocator. @@ -1012,6 +1041,7 @@ conflict. If this environment variable is set to a non-empty string, enable :ref:`Python Development Mode `, introducing additional runtime checks that are too expensive to be enabled by default. + This is equivalent to setting the :option:`-X` ``dev`` option. .. versionadded:: 3.7 @@ -1057,6 +1087,29 @@ conflict. .. versionadded:: 3.12 +.. envvar:: PYTHON_CPU_COUNT + + If this variable is set to a positive integer, it overrides the return + values of :func:`os.cpu_count` and :func:`os.process_cpu_count`. + + See also the :option:`-X cpu_count <-X>` command-line option. + + .. versionadded:: 3.13 + +.. envvar:: PYTHON_FROZEN_MODULES + + If this variable is set to ``on`` or ``off``, it determines whether or not + frozen modules are ignored by the import machinery. A value of ``on`` means + they get imported and ``off`` means they are ignored. The default is ``on`` + for non-debug builds (the normal case) and ``off`` for debug builds. + Note that the :mod:`!importlib_bootstrap` and + :mod:`!importlib_bootstrap_external` frozen modules are always used, even + if this flag is set to ``off``. + + See also the :option:`-X frozen_modules <-X>` command-line option. + + .. versionadded:: 3.13 + Debug-mode variables ~~~~~~~~~~~~~~~~~~~~ @@ -1066,13 +1119,33 @@ Debug-mode variables If set, Python will dump objects and reference counts still alive after shutting down the interpreter. - Need Python configured with the :option:`--with-trace-refs` build option. + Needs Python configured with the :option:`--with-trace-refs` build option. -.. envvar:: PYTHONDUMPREFSFILE=FILENAME +.. envvar:: PYTHONDUMPREFSFILE If set, Python will dump objects and reference counts still alive - after shutting down the interpreter into a file called *FILENAME*. + after shutting down the interpreter into a file under the path given + as the value to this environment variable. - Need Python configured with the :option:`--with-trace-refs` build option. + Needs Python configured with the :option:`--with-trace-refs` build option. .. versionadded:: 3.11 + +.. envvar:: PYTHON_PRESITE + + If this variable is set to a module, that module will be imported + early in the interpreter lifecycle, before the :mod:`site` module is + executed, and before the :mod:`__main__` module is created. + Therefore, the imported module is not treated as :mod:`__main__`. + + This can be used to execute code early during Python initialization. + + To import a submodule, use ``package.module`` as the value, like in + an import statement. + + See also the :option:`-X presite <-X>` command-line option, + which takes precedence over this variable. + + Needs Python configured with the :option:`--with-pydebug` build option. + + .. versionadded:: 3.13 diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst index b99d9bdaa3f007f..b51546e072a3530 100644 --- a/Doc/using/configure.rst +++ b/Doc/using/configure.rst @@ -2,12 +2,100 @@ Configure Python **************** +Build Requirements +================== + +Features and minimum versions required to build CPython: + +* A `C11 `_ compiler. `Optional C11 + features + `_ + are not required. + +* On Windows, Microsoft Visual Studio 2017 or later is required. + +* Support for `IEEE 754 `_ floating + point numbers and `floating point Not-a-Number (NaN) + `_. + +* Support for threads. + +* OpenSSL 1.1.1 is the minimum version and OpenSSL 3.0.9 is the recommended + minimum version for the :mod:`ssl` and :mod:`hashlib` extension modules. + +* SQLite 3.15.2 for the :mod:`sqlite3` extension module. + +* Tcl/Tk 8.5.12 for the :mod:`tkinter` module. + +* Autoconf 2.71 and aclocal 1.16.4 are required to regenerate the + :file:`configure` script. + +.. versionchanged:: 3.13: + Autoconf 2.71, aclocal 1.16.4 and SQLite 3.15.2 are now required. + +.. versionchanged:: 3.11 + C11 compiler, IEEE 754 and NaN support are now required. + On Windows, Visual Studio 2017 or later is required. + Tcl/Tk version 8.5.12 is now required for the :mod:`tkinter` module. + +.. versionchanged:: 3.10 + OpenSSL 1.1.1 is now required. + Require SQLite 3.7.15. + +.. versionchanged:: 3.7 + Thread support and OpenSSL 1.0.2 are now required. + +.. versionchanged:: 3.6 + Selected C99 features are now required, like ```` and ``static + inline`` functions. + +.. versionchanged:: 3.5 + On Windows, Visual Studio 2015 or later is now required. + Tcl/Tk version 8.4 is now required. + +.. versionchanged:: 3.1 + Tcl/Tk version 8.3.1 is now required. + +See also :pep:`7` "Style Guide for C Code" and :pep:`11` "CPython platform +support". + + +Generated files +=============== + +To reduce build dependencies, Python source code contains multiple generated +files. Commands to regenerate all generated files:: + + make regen-all + make regen-stdlib-module-names + make regen-limited-abi + make regen-configure + +The ``Makefile.pre.in`` file documents generated files, their inputs, and tools used +to regenerate them. Search for ``regen-*`` make targets. + +configure script +---------------- + +The ``make regen-configure`` command regenerates the ``aclocal.m4`` file and +the ``configure`` script using the ``Tools/build/regen-configure.sh`` shell +script which uses an Ubuntu container to get the same tools versions and have a +reproducible output. + +The container is optional, the following command can be run locally:: + + autoreconf -ivf -Werror + +The generated files can change depending on the exact ``autoconf-archive``, +``aclocal`` and ``pkg-config`` versions. + + .. _configure-options: Configure Options ================= -List all ``./configure`` script options using:: +List all :file:`configure` script options using:: ./configure --help @@ -16,22 +104,22 @@ See also the :file:`Misc/SpecialBuilds.txt` in the Python source distribution. General Options --------------- -.. cmdoption:: --enable-loadable-sqlite-extensions +.. option:: --enable-loadable-sqlite-extensions - Support loadable extensions in the :mod:`_sqlite` extension module (default - is no). + Support loadable extensions in the :mod:`!_sqlite` extension module (default + is no) of the :mod:`sqlite3` module. See the :meth:`sqlite3.Connection.enable_load_extension` method of the :mod:`sqlite3` module. .. versionadded:: 3.6 -.. cmdoption:: --disable-ipv6 +.. option:: --disable-ipv6 Disable IPv6 support (enabled by default if supported), see the :mod:`socket` module. -.. cmdoption:: --enable-big-digits=[15|30] +.. option:: --enable-big-digits=[15|30] Define the size in bits of Python :class:`int` digits: 15 or 30 bits. @@ -41,7 +129,7 @@ General Options See :data:`sys.int_info.bits_per_digit `. -.. cmdoption:: --with-suffix=SUFFIX +.. option:: --with-suffix=SUFFIX Set the Python executable suffix to *SUFFIX*. @@ -54,9 +142,9 @@ General Options The default suffix on WASM platform is one of ``.js``, ``.html`` or ``.wasm``. -.. cmdoption:: --with-tzpath= +.. option:: --with-tzpath= - Select the default time zone search path for :data:`zoneinfo.TZPATH`. + Select the default time zone search path for :const:`zoneinfo.TZPATH`. See the :ref:`Compile-time configuration ` of the :mod:`zoneinfo` module. @@ -66,16 +154,16 @@ General Options .. versionadded:: 3.9 -.. cmdoption:: --without-decimal-contextvar +.. option:: --without-decimal-contextvar Build the ``_decimal`` extension module using a thread-local context rather than a coroutine-local context (default), see the :mod:`decimal` module. - See :data:`decimal.HAVE_CONTEXTVAR` and the :mod:`contextvars` module. + See :const:`decimal.HAVE_CONTEXTVAR` and the :mod:`contextvars` module. .. versionadded:: 3.9 -.. cmdoption:: --with-dbmliborder= +.. option:: --with-dbmliborder= Override order to check db backends for the :mod:`dbm` module @@ -85,7 +173,7 @@ General Options * ``gdbm``; * ``bdb``. -.. cmdoption:: --without-c-locale-coercion +.. option:: --without-c-locale-coercion Disable C locale coercion to a UTF-8 based locale (enabled by default). @@ -93,7 +181,13 @@ General Options See :envvar:`PYTHONCOERCECLOCALE` and the :pep:`538`. -.. cmdoption:: --with-platlibdir=DIRNAME +.. option:: --without-freelists + + Disable all freelists except the empty tuple singleton. + + .. versionadded:: 3.11 + +.. option:: --with-platlibdir=DIRNAME Python library directory name (default is ``lib``). @@ -103,7 +197,7 @@ General Options .. versionadded:: 3.9 -.. cmdoption:: --with-wheel-pkg-dir=PATH +.. option:: --with-wheel-pkg-dir=PATH Directory of wheel packages used by the :mod:`ensurepip` module (none by default). @@ -111,11 +205,11 @@ General Options Some Linux distribution packaging policies recommend against bundling dependencies. For example, Fedora installs wheel packages in the ``/usr/share/python-wheels/`` directory and don't install the - :mod:`ensurepip._bundled` package. + :mod:`!ensurepip._bundled` package. .. versionadded:: 3.10 -.. cmdoption:: --with-pkg-config=[check|yes|no] +.. option:: --with-pkg-config=[check|yes|no] Whether configure should use :program:`pkg-config` to detect build dependencies. @@ -126,21 +220,216 @@ General Options .. versionadded:: 3.11 -.. cmdoption:: --enable-pystats +.. option:: --enable-pystats + + Turn on internal Python performance statistics gathering. + + By default, statistics gathering is off. Use ``python3 -X pystats`` command + or set ``PYTHONSTATS=1`` environment variable to turn on statistics + gathering at Python startup. + + At Python exit, dump statistics if statistics gathering was on and not + cleared. + + Effects: + + * Add :option:`-X pystats <-X>` command line option. + * Add :envvar:`!PYTHONSTATS` environment variable. + * Define the ``Py_STATS`` macro. + * Add functions to the :mod:`sys` module: - Turn on internal statistics gathering. + * :func:`!sys._stats_on`: Turns on statistics gathering. + * :func:`!sys._stats_off`: Turns off statistics gathering. + * :func:`!sys._stats_clear`: Clears the statistics. + * :func:`!sys._stats_dump`: Dump statistics to file, and clears the statistics. The statistics will be dumped to a arbitrary (probably unique) file in - ``/tmp/py_stats/``, or ``C:\temp\py_stats\`` on Windows. + ``/tmp/py_stats/`` (Unix) or ``C:\temp\py_stats\`` (Windows). If that + directory does not exist, results will be printed on stderr. Use ``Tools/scripts/summarize_stats.py`` to read the stats. + Statistics: + + * Opcode: + + * Specialization: success, failure, hit, deferred, miss, deopt, failures; + * Execution count; + * Pair count. + + * Call: + + * Inlined Python calls; + * PyEval calls; + * Frames pushed; + * Frame object created; + * Eval calls: vector, generator, legacy, function VECTORCALL, build class, + slot, function "ex", API, method. + + * Object: + + * incref and decref; + * interpreter incref and decref; + * allocations: all, 512 bytes, 4 kiB, big; + * free; + * to/from free lists; + * dictionary materialized/dematerialized; + * type cache; + * optimization attemps; + * optimization traces created/executed; + * uops executed. + + * Garbage collector: + + * Garbage collections; + * Objects visited; + * Objects collected. + .. versionadded:: 3.11 +.. option:: --disable-gil + + Enables **experimental** support for running Python without the + :term:`global interpreter lock` (GIL). + + See :pep:`703` "Making the Global Interpreter Lock Optional in CPython". + + .. versionadded:: 3.13 + +.. option:: PKG_CONFIG + + Path to ``pkg-config`` utility. + +.. option:: PKG_CONFIG_LIBDIR +.. option:: PKG_CONFIG_PATH + + ``pkg-config`` options. + + +C compiler options +------------------ + +.. option:: CC + + C compiler command. + +.. option:: CFLAGS + + C compiler flags. + +.. option:: CPP + + C preprocessor command. + +.. option:: CPPFLAGS + + C preprocessor flags, e.g. :samp:`-I{include_dir}`. + + +Linker options +-------------- + +.. option:: LDFLAGS + + Linker flags, e.g. :samp:`-L{library_directory}`. + +.. option:: LIBS + + Libraries to pass to the linker, e.g. :samp:`-l{library}`. + +.. option:: MACHDEP + + Name for machine-dependent library files. + + +Options for third-party dependencies +------------------------------------ + +.. versionadded:: 3.11 + +.. option:: BZIP2_CFLAGS +.. option:: BZIP2_LIBS + + C compiler and linker flags to link Python to ``libbz2``, used by :mod:`bz2` + module, overriding ``pkg-config``. + +.. option:: CURSES_CFLAGS +.. option:: CURSES_LIBS + + C compiler and linker flags for ``libncurses`` or ``libncursesw``, used by + :mod:`curses` module, overriding ``pkg-config``. + +.. option:: GDBM_CFLAGS +.. option:: GDBM_LIBS + + C compiler and linker flags for ``gdbm``. + +.. option:: LIBB2_CFLAGS +.. option:: LIBB2_LIBS + + C compiler and linker flags for ``libb2`` (:ref:`BLAKE2 `), + used by :mod:`hashlib` module, overriding ``pkg-config``. + +.. option:: LIBEDIT_CFLAGS +.. option:: LIBEDIT_LIBS + + C compiler and linker flags for ``libedit``, used by :mod:`readline` module, + overriding ``pkg-config``. + +.. option:: LIBFFI_CFLAGS +.. option:: LIBFFI_LIBS + + C compiler and linker flags for ``libffi``, used by :mod:`ctypes` module, + overriding ``pkg-config``. + +.. option:: LIBLZMA_CFLAGS +.. option:: LIBLZMA_LIBS + + C compiler and linker flags for ``liblzma``, used by :mod:`lzma` module, + overriding ``pkg-config``. + +.. option:: LIBREADLINE_CFLAGS +.. option:: LIBREADLINE_LIBS + + C compiler and linker flags for ``libreadline``, used by :mod:`readline` + module, overriding ``pkg-config``. + +.. option:: LIBSQLITE3_CFLAGS +.. option:: LIBSQLITE3_LIBS + + C compiler and linker flags for ``libsqlite3``, used by :mod:`sqlite3` + module, overriding ``pkg-config``. + +.. option:: LIBUUID_CFLAGS +.. option:: LIBUUID_LIBS + + C compiler and linker flags for ``libuuid``, used by :mod:`uuid` module, + overriding ``pkg-config``. + +.. option:: PANEL_CFLAGS +.. option:: PANEL_LIBS + + C compiler and Linker flags for PANEL, overriding ``pkg-config``. + + C compiler and linker flags for ``libpanel`` or ``libpanelw``, used by + :mod:`curses.panel` module, overriding ``pkg-config``. + +.. option:: TCLTK_CFLAGS +.. option:: TCLTK_LIBS + + C compiler and linker flags for TCLTK, overriding ``pkg-config``. + +.. option:: ZLIB_CFLAGS +.. option:: ZLIB_LIBS + + C compiler and linker flags for ``libzlib``, used by :mod:`gzip` module, + overriding ``pkg-config``. + + WebAssembly Options ------------------- -.. cmdoption:: --with-emscripten-target=[browser|node] +.. option:: --with-emscripten-target=[browser|node] Set build flavor for ``wasm32-emscripten``. @@ -149,7 +438,7 @@ WebAssembly Options .. versionadded:: 3.11 -.. cmdoption:: --enable-wasm-dynamic-linking +.. option:: --enable-wasm-dynamic-linking Turn on dynamic linking support for WASM. @@ -158,7 +447,7 @@ WebAssembly Options .. versionadded:: 3.11 -.. cmdoption:: --enable-wasm-pthreads +.. option:: --enable-wasm-pthreads Turn on pthreads support for WASM. @@ -168,14 +457,30 @@ WebAssembly Options Install Options --------------- -.. cmdoption:: --disable-test-modules +.. option:: --prefix=PREFIX + + Install architecture-independent files in PREFIX. On Unix, it + defaults to :file:`/usr/local`. + + This value can be retrieved at runtime using :data:`sys.prefix`. + + As an example, one can use ``--prefix="$HOME/.local/"`` to install + a Python in its home directory. + +.. option:: --exec-prefix=EPREFIX + + Install architecture-dependent files in EPREFIX, defaults to :option:`--prefix`. + + This value can be retrieved at runtime using :data:`sys.exec_prefix`. + +.. option:: --disable-test-modules Don't build nor install test modules, like the :mod:`test` package or the - :mod:`_testcapi` extension module (built and installed by default). + :mod:`!_testcapi` extension module (built and installed by default). .. versionadded:: 3.10 -.. cmdoption:: --with-ensurepip=[upgrade|install|no] +.. option:: --with-ensurepip=[upgrade|install|no] Select the :mod:`ensurepip` command run on Python installation: @@ -194,7 +499,7 @@ Configuring Python using ``--enable-optimizations --with-lto`` (PGO + LTO) is recommended for best performance. The experimental ``--enable-bolt`` flag can also be used to improve performance. -.. cmdoption:: --enable-optimizations +.. option:: --enable-optimizations Enable Profile Guided Optimization (PGO) using :envvar:`PROFILE_TASK` (disabled by default). @@ -220,7 +525,10 @@ also be used to improve performance. .. versionadded:: 3.8 -.. cmdoption:: --with-lto=[full|thin|no|yes] + .. versionchanged:: 3.13 + Task failure is no longer ignored silently. + +.. option:: --with-lto=[full|thin|no|yes] Enable Link Time Optimization (LTO) in any build (disabled by default). @@ -235,7 +543,7 @@ also be used to improve performance. .. versionchanged:: 3.12 Use ThinLTO as the default optimization policy on Clang if the compiler accepts the flag. -.. cmdoption:: --enable-bolt +.. option:: --enable-bolt Enable usage of the `BOLT post-link binary optimizer `_ (disabled by @@ -250,22 +558,49 @@ also be used to improve performance. is dependent on a combination of the build environment + the other optimization configure args + the CPU architecture, and not all combinations are supported. + BOLT versions before LLVM 16 are known to crash BOLT under some scenarios. + Use of LLVM 16 or newer for BOLT optimization is strongly encouraged. + + The :envvar:`!BOLT_INSTRUMENT_FLAGS` and :envvar:`!BOLT_APPLY_FLAGS` + :program:`configure` variables can be defined to override the default set of + arguments for :program:`llvm-bolt` to instrument and apply BOLT data to + binaries, respectively. + + .. versionadded:: 3.12 + +.. option:: BOLT_APPLY_FLAGS + + Arguments to ``llvm-bolt`` when creating a `BOLT optimized binary + `_. .. versionadded:: 3.12 -.. cmdoption:: --with-computed-gotos +.. option:: BOLT_INSTRUMENT_FLAGS + + Arguments to ``llvm-bolt`` when instrumenting binaries. + + .. versionadded:: 3.12 + +.. option:: --with-computed-gotos Enable computed gotos in evaluation loop (enabled by default on supported compilers). -.. cmdoption:: --without-pymalloc +.. option:: --without-mimalloc + + Disable the fast mimalloc allocator :ref:`mimalloc ` + (enabled by default). + + See also :envvar:`PYTHONMALLOC` environment variable. + +.. option:: --without-pymalloc Disable the specialized Python memory allocator :ref:`pymalloc ` (enabled by default). See also :envvar:`PYTHONMALLOC` environment variable. -.. cmdoption:: --without-doc-strings +.. option:: --without-doc-strings Disable static documentation strings to reduce the memory footprint (enabled by default). Documentation strings defined in Python are not affected. @@ -274,10 +609,15 @@ also be used to improve performance. See the ``PyDoc_STRVAR()`` macro. -.. cmdoption:: --enable-profiling +.. option:: --enable-profiling Enable C-level code profiling with ``gprof`` (disabled by default). +.. option:: --with-strict-overflow + + Add ``-fstrict-overflow`` to the C compiler flags (by default we add + ``-fno-strict-overflow`` instead). + .. _debug-build: @@ -292,7 +632,7 @@ Effects of a debug build: * Display all warnings by default: the list of default warning filters is empty in the :mod:`warnings` module. * Add ``d`` to :data:`sys.abiflags`. -* Add :func:`sys.gettotalrefcount` function. +* Add :func:`!sys.gettotalrefcount` function. * Add :option:`-X showrefcount <-X>` command line option. * Add :option:`-d` command line option and :envvar:`PYTHONDEBUG` environment variable to debug the parser. @@ -314,7 +654,7 @@ Effects of a debug build: * Check that deallocator functions don't change the current exception. * The garbage collector (:func:`gc.collect` function) runs some basic checks on objects consistency. - * The :c:macro:`Py_SAFE_DOWNCAST()` macro checks for integer underflow and + * The :c:macro:`!Py_SAFE_DOWNCAST()` macro checks for integer underflow and overflow when downcasting from wide types to narrow types. See also the :ref:`Python Development Mode ` and the @@ -323,34 +663,39 @@ See also the :ref:`Python Development Mode ` and the .. versionchanged:: 3.8 Release builds and debug builds are now ABI compatible: defining the ``Py_DEBUG`` macro no longer implies the ``Py_TRACE_REFS`` macro (see the - :option:`--with-trace-refs` option), which introduces the only ABI - incompatibility. + :option:`--with-trace-refs` option). Debug options ------------- -.. cmdoption:: --with-pydebug +.. option:: --with-pydebug :ref:`Build Python in debug mode `: define the ``Py_DEBUG`` macro (disabled by default). -.. cmdoption:: --with-trace-refs +.. option:: --with-trace-refs Enable tracing references for debugging purpose (disabled by default). Effects: * Define the ``Py_TRACE_REFS`` macro. - * Add :func:`sys.getobjects` function. + * Add :func:`!sys.getobjects` function. * Add :envvar:`PYTHONDUMPREFS` environment variable. - This build is not ABI compatible with release build (default build) or debug - build (``Py_DEBUG`` and ``Py_REF_DEBUG`` macros). + The :envvar:`PYTHONDUMPREFS` environment variable can be used to dump + objects and reference counts still alive at Python exit. + + :ref:`Statically allocated objects ` are not traced. + + .. versionchanged:: 3.13 + This build is now ABI compatible with release build and :ref:`debug build + `. .. versionadded:: 3.8 -.. cmdoption:: --with-assertions +.. option:: --with-assertions Build with C assertions enabled (default is no): ``assert(...);`` and ``_PyObject_ASSERT(...);``. @@ -363,11 +708,11 @@ Debug options .. versionadded:: 3.6 -.. cmdoption:: --with-valgrind +.. option:: --with-valgrind Enable Valgrind support (default is no). -.. cmdoption:: --with-dtrace +.. option:: --with-dtrace Enable DTrace support (default is no). @@ -376,19 +721,19 @@ Debug options .. versionadded:: 3.6 -.. cmdoption:: --with-address-sanitizer +.. option:: --with-address-sanitizer Enable AddressSanitizer memory error detector, ``asan`` (default is no). .. versionadded:: 3.6 -.. cmdoption:: --with-memory-sanitizer +.. option:: --with-memory-sanitizer Enable MemorySanitizer allocation error detector, ``msan`` (default is no). .. versionadded:: 3.6 -.. cmdoption:: --with-undefined-behavior-sanitizer +.. option:: --with-undefined-behavior-sanitizer Enable UndefinedBehaviorSanitizer undefined behaviour detector, ``ubsan`` (default is no). @@ -399,11 +744,11 @@ Debug options Linker options -------------- -.. cmdoption:: --enable-shared +.. option:: --enable-shared Enable building a shared Python library: ``libpython`` (default is no). -.. cmdoption:: --without-static-libpython +.. option:: --without-static-libpython Do not build ``libpythonMAJOR.MINOR.a`` and do not install ``python.o`` (built and enabled by default). @@ -414,36 +759,32 @@ Linker options Libraries options ----------------- -.. cmdoption:: --with-libs='lib1 ...' +.. option:: --with-libs='lib1 ...' Link against additional libraries (default is no). -.. cmdoption:: --with-system-expat +.. option:: --with-system-expat - Build the :mod:`pyexpat` module using an installed ``expat`` library + Build the :mod:`!pyexpat` module using an installed ``expat`` library (default is no). -.. cmdoption:: --with-system-ffi - - Build the :mod:`_ctypes` extension module using an installed ``ffi`` - library, see the :mod:`ctypes` module (default is system-dependent). - -.. cmdoption:: --with-system-libmpdec +.. option:: --with-system-libmpdec Build the ``_decimal`` extension module using an installed ``mpdec`` library, see the :mod:`decimal` module (default is no). .. versionadded:: 3.3 -.. cmdoption:: --with-readline=editline +.. option:: --with-readline=readline|editline - Use ``editline`` library for backend of the :mod:`readline` module. + Designate a backend library for the :mod:`readline` module. - Define the ``WITH_EDITLINE`` macro. + * readline: Use readline as the backend. + * editline: Use editline as the backend. .. versionadded:: 3.10 -.. cmdoption:: --without-readline +.. option:: --without-readline Don't build the :mod:`readline` module (built by default). @@ -451,21 +792,21 @@ Libraries options .. versionadded:: 3.10 -.. cmdoption:: --with-libm=STRING +.. option:: --with-libm=STRING Override ``libm`` math library to *STRING* (default is system-dependent). -.. cmdoption:: --with-libc=STRING +.. option:: --with-libc=STRING Override ``libc`` C library to *STRING* (default is system-dependent). -.. cmdoption:: --with-openssl=DIR +.. option:: --with-openssl=DIR Root of the OpenSSL directory. .. versionadded:: 3.7 -.. cmdoption:: --with-openssl-rpath=[no|auto|DIR] +.. option:: --with-openssl-rpath=[no|auto|DIR] Set runtime library directory (rpath) for OpenSSL libraries: @@ -480,7 +821,7 @@ Libraries options Security Options ---------------- -.. cmdoption:: --with-hash-algorithm=[fnv|siphash13|siphash24] +.. option:: --with-hash-algorithm=[fnv|siphash13|siphash24] Select hash algorithm for use in ``Python/pyhash.c``: @@ -493,7 +834,7 @@ Security Options .. versionadded:: 3.11 ``siphash13`` is added and it is the new default. -.. cmdoption:: --with-builtin-hashlib-hashes=md5,sha1,sha256,sha512,sha3,blake2 +.. option:: --with-builtin-hashlib-hashes=md5,sha1,sha256,sha512,sha3,blake2 Built-in hash modules: @@ -506,7 +847,7 @@ Security Options .. versionadded:: 3.9 -.. cmdoption:: --with-ssl-default-suites=[python|openssl|STRING] +.. option:: --with-ssl-default-suites=[python|openssl|STRING] Override the OpenSSL default cipher suites string: @@ -528,19 +869,19 @@ macOS Options See ``Mac/README.rst``. -.. cmdoption:: --enable-universalsdk -.. cmdoption:: --enable-universalsdk=SDKDIR +.. option:: --enable-universalsdk +.. option:: --enable-universalsdk=SDKDIR Create a universal binary build. *SDKDIR* specifies which macOS SDK should be used to perform the build (default is no). -.. cmdoption:: --enable-framework -.. cmdoption:: --enable-framework=INSTALLDIR +.. option:: --enable-framework +.. option:: --enable-framework=INSTALLDIR Create a Python.framework rather than a traditional Unix install. Optional *INSTALLDIR* specifies the installation path (default is no). -.. cmdoption:: --with-universal-archs=ARCH +.. option:: --with-universal-archs=ARCH Specify the kind of universal binary that should be created. This option is only valid when :option:`--enable-universalsdk` is set. @@ -556,7 +897,7 @@ See ``Mac/README.rst``. * ``intel-64``; * ``all``. -.. cmdoption:: --with-framework-name=FRAMEWORK +.. option:: --with-framework-name=FRAMEWORK Specify the name for the python framework on macOS only valid when :option:`--enable-framework` is set (default: ``Python``). @@ -570,21 +911,21 @@ for another CPU architecture or platform. Cross compiling requires a Python interpreter for the build platform. The version of the build Python must match the version of the cross compiled host Python. -.. cmdoption:: --build=BUILD +.. option:: --build=BUILD configure for building on BUILD, usually guessed by :program:`config.guess`. -.. cmdoption:: --host=HOST +.. option:: --host=HOST cross-compile to build programs to run on HOST (target platform) -.. cmdoption:: --with-build-python=path/to/python +.. option:: --with-build-python=path/to/python path to build ``python`` binary for cross compiling .. versionadded:: 3.11 -.. cmdoption:: CONFIG_SITE=file +.. option:: CONFIG_SITE=file An environment variable that points to a file with configure overrides. @@ -595,6 +936,12 @@ the version of the cross compiled host Python. ac_cv_file__dev_ptmx=yes ac_cv_file__dev_ptc=no +.. option:: HOSTRUNNER + + Program to run CPython for the host platform for cross-compilation. + + .. versionadded:: 3.11 + Cross compiling example:: @@ -615,7 +962,6 @@ Main files of the build system * :file:`pyconfig.h` (created by :file:`configure`); * :file:`Modules/Setup`: C extensions built by the Makefile using :file:`Module/makesetup` shell script; -* :file:`setup.py`: C extensions built using the ``setuptools`` package. Main build steps ---------------- @@ -624,8 +970,7 @@ Main build steps * A static ``libpython`` library (``.a``) is created from objects files. * ``python.o`` and the static ``libpython`` library are linked into the final ``python`` program. -* C extensions are built by the Makefile (see :file:`Modules/Setup`) - and ``python setup.py build``. +* C extensions are built by the Makefile (see :file:`Modules/Setup`). Main Makefile targets --------------------- @@ -637,9 +982,18 @@ Main Makefile targets You can use the configure :option:`--enable-optimizations` option to make this the default target of the ``make`` command (``make all`` or just ``make``). -* ``make buildbottest``: Build Python and run the Python test suite, the same - way than buildbots test Python. Set ``TESTTIMEOUT`` variable (in seconds) - to change the test timeout (1200 by default: 20 minutes). + +* ``make test``: Build Python and run the Python test suite with ``--fast-ci`` + option. Variables: + + * ``TESTOPTS``: additional regrtest command line options. + * ``TESTPYTHONOPTS``: additional Python command line options. + * ``TESTTIMEOUT``: timeout in seconds (default: 20 minutes). + +* ``make buildbottest``: Similar to ``make test``, but use ``--slow-ci`` + option and default timeout of 20 minutes, instead of ``--fast-ci`` option + and a default timeout of 10 minutes. + * ``make install``: Build and install Python. * ``make regen-all``: Regenerate (almost) all generated files; ``make regen-stdlib-module-names`` and ``autoconf`` must be run separately @@ -677,18 +1031,15 @@ Example on Linux x86-64:: At the beginning of the files, C extensions are built as built-in modules. Extensions defined after the ``*shared*`` marker are built as dynamic libraries. -The :file:`setup.py` script only builds C extensions as shared libraries using -the :mod:`distutils` module. - -The :c:macro:`PyAPI_FUNC()`, :c:macro:`PyAPI_API()` and -:c:macro:`PyMODINIT_FUNC()` macros of :file:`Include/pyport.h` are defined +The :c:macro:`!PyAPI_FUNC()`, :c:macro:`!PyAPI_DATA()` and +:c:macro:`PyMODINIT_FUNC` macros of :file:`Include/exports.h` are defined differently depending if the ``Py_BUILD_CORE_MODULE`` macro is defined: * Use ``Py_EXPORTED_SYMBOL`` if the ``Py_BUILD_CORE_MODULE`` is defined * Use ``Py_IMPORTED_SYMBOL`` otherwise. If the ``Py_BUILD_CORE_BUILTIN`` macro is used by mistake on a C extension -built as a shared library, its ``PyInit_xxx()`` function is not exported, +built as a shared library, its :samp:`PyInit_{xxx}()` function is not exported, causing an :exc:`ImportError` on import. @@ -709,11 +1060,11 @@ Preprocessor flags .. envvar:: CPPFLAGS - (Objective) C/C++ preprocessor flags, e.g. ``-I`` if you have - headers in a nonstandard directory ````. + (Objective) C/C++ preprocessor flags, e.g. :samp:`-I{include_dir}` if you have + headers in a nonstandard directory *include_dir*. Both :envvar:`CPPFLAGS` and :envvar:`LDFLAGS` need to contain the shell's - value for setup.py to be able to build extension modules using the + value to be able to build extension modules using the directories specified in the environment variables. .. envvar:: BASECPPFLAGS @@ -750,8 +1101,8 @@ Compiler flags .. envvar:: CFLAGS_NODIST :envvar:`CFLAGS_NODIST` is used for building the interpreter and stdlib C - extensions. Use it when a compiler flag should *not* be part of the - distutils :envvar:`CFLAGS` once Python is installed (:issue:`21121`). + extensions. Use it when a compiler flag should *not* be part of + :envvar:`CFLAGS` once Python is installed (:gh:`65320`). In particular, :envvar:`CFLAGS` should not contain: @@ -766,6 +1117,13 @@ Compiler flags .. versionadded:: 3.5 +.. envvar:: COMPILEALL_OPTS + + Options passed to the :mod:`compileall` command line when building PYC files + in ``make install``. Default: ``-j0``. + + .. versionadded:: 3.12 + .. envvar:: EXTRA_CFLAGS Extra C compiler flags. @@ -874,7 +1232,7 @@ Linker flags :envvar:`LDFLAGS_NODIST` is used in the same manner as :envvar:`CFLAGS_NODIST`. Use it when a linker flag should *not* be part of - the distutils :envvar:`LDFLAGS` once Python is installed (:issue:`35257`). + :envvar:`LDFLAGS` once Python is installed (:gh:`65320`). In particular, :envvar:`LDFLAGS` should not contain: @@ -892,11 +1250,11 @@ Linker flags .. envvar:: LDFLAGS - Linker flags, e.g. ``-L`` if you have libraries in a nonstandard - directory ````. + Linker flags, e.g. :samp:`-L{lib_dir}` if you have libraries in a nonstandard + directory *lib_dir*. Both :envvar:`CPPFLAGS` and :envvar:`LDFLAGS` need to contain the shell's - value for setup.py to be able to build extension modules using the + value to be able to build extension modules using the directories specified in the environment variables. .. envvar:: LIBS diff --git a/Doc/using/mac.rst b/Doc/using/mac.rst index 9ae0270eaee7aba..eb1413af2cbc3df 100644 --- a/Doc/using/mac.rst +++ b/Doc/using/mac.rst @@ -66,7 +66,7 @@ number of standard Unix command line editors, :program:`vim` and :program:`BBEdit` or :program:`TextWrangler` from Bare Bones Software (see http://www.barebones.com/products/bbedit/index.html) are good choices, as is :program:`TextMate` (see https://macromates.com/). Other editors include -:program:`Gvim` (https://macvim-dev.github.io/macvim/) and :program:`Aquamacs` +:program:`Gvim` (https://macvim.org/macvim/) and :program:`Aquamacs` (http://aquamacs.org/). To run your script from the Terminal window you must make sure that @@ -125,13 +125,9 @@ http://www.hashcollision.org/hkn/python/idle_intro/index.html. Installing Additional Python Packages ===================================== -There are several methods to install additional Python packages: +This section has moved to the `Python Packaging User Guide`_. -* Packages can be installed via the standard Python distutils mode (``python - setup.py install``). - -* Many packages can also be installed via the :program:`setuptools` extension - or :program:`pip` wrapper, see https://pip.pypa.io/. +.. _Python Packaging User Guide: https://packaging.python.org/en/latest/tutorials/installing-packages/ GUI Programming on the Mac @@ -144,8 +140,8 @@ the foundation of most modern Mac development. Information on PyObjC is available from https://pypi.org/project/pyobjc/. The standard Python GUI toolkit is :mod:`tkinter`, based on the cross-platform -Tk toolkit (https://www.tcl.tk). An Aqua-native version of Tk is bundled with OS -X by Apple, and the latest version can be downloaded and installed from +Tk toolkit (https://www.tcl.tk). An Aqua-native version of Tk is bundled with +macOS by Apple, and the latest version can be downloaded and installed from https://www.activestate.com; it can also be built from source. *wxPython* is another popular cross-platform GUI toolkit that runs natively on diff --git a/Doc/using/unix.rst b/Doc/using/unix.rst index 24c02c99f871d56..58838c28e6eb86b 100644 --- a/Doc/using/unix.rst +++ b/Doc/using/unix.rst @@ -30,9 +30,9 @@ following links: for Debian users https://en.opensuse.org/Portal:Packaging for OpenSuse users - https://docs-old.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch-creating-rpms.html + https://docs.fedoraproject.org/en-US/package-maintainers/Packaging_Tutorial_GNU_Hello/ for Fedora users - http://www.slackbook.org/html/package-management-making-packages.html + https://slackbook.org/html/package-management-making-packages.html for Slackware users @@ -54,13 +54,6 @@ On FreeBSD and OpenBSD pkg_add ftp://ftp.openbsd.org/pub/OpenBSD/4.2/packages/i386/python-2.5.1p2.tgz -On OpenSolaris --------------- - -You can get Python from `OpenCSW `_. Various versions -of Python are available and can be installed with e.g. ``pkgutil -i python27``. - - .. _building-python-on-unix: Building Python @@ -93,7 +86,7 @@ Python-related paths and files ============================== These are subject to difference depending on local installation conventions; -:envvar:`prefix` (``${prefix}``) and :envvar:`exec_prefix` (``${exec_prefix}``) +:option:`prefix <--prefix>` and :option:`exec_prefix <--exec-prefix>` are installation-dependent and should be interpreted as for GNU software; they may be the same. diff --git a/Doc/using/venv-create.inc b/Doc/using/venv-create.inc index d535b254f05698b..1cf438b198a9afa 100644 --- a/Doc/using/venv-create.inc +++ b/Doc/using/venv-create.inc @@ -1,7 +1,7 @@ Creation of :ref:`virtual environments ` is done by executing the command ``venv``:: - python3 -m venv /path/to/new/virtual/environment + python -m venv /path/to/new/virtual/environment Running this command creates the target directory (creating any parent directories that don't exist already) and places a ``pyvenv.cfg`` file in it @@ -35,37 +35,52 @@ your :ref:`Python installation `:: The command, if run with ``-h``, will show the available options:: - usage: venv [-h] [--system-site-packages] [--symlinks | --copies] [--clear] - [--upgrade] [--without-pip] [--prompt PROMPT] [--upgrade-deps] - ENV_DIR [ENV_DIR ...] - - Creates virtual Python environments in one or more target directories. - - positional arguments: - ENV_DIR A directory to create the environment in. - - optional arguments: - -h, --help show this help message and exit - --system-site-packages - Give the virtual environment access to the system - site-packages dir. - --symlinks Try to use symlinks rather than copies, when symlinks - are not the default for the platform. - --copies Try to use copies rather than symlinks, even when - symlinks are the default for the platform. - --clear Delete the contents of the environment directory if it - already exists, before environment creation. - --upgrade Upgrade the environment directory to use this version - of Python, assuming Python has been upgraded in-place. - --without-pip Skips installing or upgrading pip in the virtual - environment (pip is bootstrapped by default) - --prompt PROMPT Provides an alternative prompt prefix for this - environment. - --upgrade-deps Upgrade core dependencies: pip setuptools to the - latest version in PyPI - - Once an environment has been created, you may wish to activate it, e.g. by - sourcing an activate script in its bin directory. + usage: venv [-h] [--system-site-packages] [--symlinks | --copies] [--clear] + [--upgrade] [--without-pip] [--prompt PROMPT] [--upgrade-deps] + [--without-scm-ignore-file] + ENV_DIR [ENV_DIR ...] + + Creates virtual Python environments in one or more target directories. + + positional arguments: + ENV_DIR A directory to create the environment in. + + options: + -h, --help show this help message and exit + --system-site-packages + Give the virtual environment access to the system + site-packages dir. + --symlinks Try to use symlinks rather than copies, when + symlinks are not the default for the platform. + --copies Try to use copies rather than symlinks, even when + symlinks are the default for the platform. + --clear Delete the contents of the environment directory if + it already exists, before environment creation. + --upgrade Upgrade the environment directory to use this + version of Python, assuming Python has been upgraded + in-place. + --without-pip Skips installing or upgrading pip in the virtual + environment (pip is bootstrapped by default) + --prompt PROMPT Provides an alternative prompt prefix for this + environment. + --upgrade-deps Upgrade core dependencies (pip) to the latest + version in PyPI + --without-scm-ignore-file + Skips adding the default SCM ignore file to the + environment directory (the default is a .gitignore + file). + + Once an environment has been created, you may wish to activate it, e.g. by + sourcing an activate script in its bin directory. + +.. versionchanged:: 3.13 + + ``--without-scm-ignore-file`` was added along with creating an ignore file + for ``git`` by default. + +.. versionchanged:: 3.12 + + ``setuptools`` is no longer a core venv dependency. .. versionchanged:: 3.9 Add ``--upgrade-deps`` option to upgrade pip + setuptools to the latest on PyPI @@ -104,4 +119,3 @@ invoked to bootstrap ``pip`` into the virtual environment. Multiple paths can be given to ``venv``, in which case an identical virtual environment will be created, according to the given options, at each provided path. - diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst index fdbe4c15a200360..598bf3ca9bcc04b 100644 --- a/Doc/using/windows.rst +++ b/Doc/using/windows.rst @@ -470,7 +470,7 @@ user's system, including environment variables, system registry settings, and installed packages. The standard library is included as pre-compiled and optimized ``.pyc`` files in a ZIP, and ``python3.dll``, ``python37.dll``, ``python.exe`` and ``pythonw.exe`` are all provided. Tcl/tk (including all -dependants, such as Idle), pip and the Python documentation are not included. +dependents, such as Idle), pip and the Python documentation are not included. .. note:: @@ -541,7 +541,7 @@ Besides the standard CPython distribution, there are modified packages including additional functionality. The following is a list of popular versions and their key features: -`ActivePython `_ +`ActivePython `_ Installer with multi-platform compatibility, documentation, PyWin32 `Anaconda `_ @@ -743,22 +743,47 @@ command:: py -2 -You should find the latest version of Python 3.x starts. - If you see the following error, you do not have the launcher installed:: 'py' is not recognized as an internal or external command, operable program or batch file. -Per-user installations of Python do not add the launcher to :envvar:`PATH` -unless the option was selected on installation. - The command:: py --list displays the currently installed version(s) of Python. +The ``-x.y`` argument is the short form of the ``-V:Company/Tag`` argument, +which allows selecting a specific Python runtime, including those that may have +come from somewhere other than python.org. Any runtime registered by following +:pep:`514` will be discoverable. The ``--list`` command lists all available +runtimes using the ``-V:`` format. + +When using the ``-V:`` argument, specifying the Company will limit selection to +runtimes from that provider, while specifying only the Tag will select from all +providers. Note that omitting the slash implies a tag:: + + # Select any '3.*' tagged runtime + py -V:3 + + # Select any 'PythonCore' released runtime + py -V:PythonCore/ + + # Select PythonCore's latest Python 3 runtime + py -V:PythonCore/3 + +The short form of the argument (``-3``) only ever selects from core Python +releases, and not other distributions. However, the longer form (``-V:3``) will +select from any. + +The Company is matched on the full string, case-insenitive. The Tag is matched +oneither the full string, or a prefix, provided the next character is a dot or a +hyphen. This allows ``-V:3.1`` to match ``3.1-32``, but not ``3.10``. Tags are +sorted using numerical ordering (``3.10`` is newer than ``3.1``), but are +compared using text (``-V:3.01`` does not match ``3.1``). + + Virtual environments ^^^^^^^^^^^^^^^^^^^^ @@ -797,7 +822,7 @@ is printed. Now try changing the first line to be: Re-executing the command should now print the latest Python 3.x information. As with the above command-line examples, you can specify a more explicit version qualifier. Assuming you have Python 3.7 installed, try changing -the first line to ``#! python3.7`` and you should find the |version| +the first line to ``#! python3.7`` and you should find the 3.7 version information printed. Note that unlike interactive use, a bare "python" will use the latest @@ -831,7 +856,7 @@ To allow shebang lines in Python scripts to be portable between Unix and Windows, this launcher supports a number of 'virtual' commands to specify which interpreter to use. The supported virtual commands are: -* ``/usr/bin/env python`` +* ``/usr/bin/env`` * ``/usr/bin/python`` * ``/usr/local/bin/python`` * ``python`` @@ -842,17 +867,18 @@ For example, if the first line of your script starts with #! /usr/bin/python -The default Python will be located and used. As many Python scripts written -to work on Unix will already have this line, you should find these scripts can -be used by the launcher without modification. If you are writing a new script -on Windows which you hope will be useful on Unix, you should use one of the -shebang lines starting with ``/usr``. +The default Python or an active virtual environment will be located and used. +As many Python scripts written to work on Unix will already have this line, +you should find these scripts can be used by the launcher without modification. +If you are writing a new script on Windows which you hope will be useful on +Unix, you should use one of the shebang lines starting with ``/usr``. Any of the above virtual commands can be suffixed with an explicit version (either just the major version, or the major and minor version). Furthermore the 32-bit version can be requested by adding "-32" after the minor version. I.e. ``/usr/bin/python3.7-32`` will request usage of the -32-bit python 3.7. +32-bit Python 3.7. If a virtual environment is active, the version will be +ignored and the environment will be used. .. versionadded:: 3.7 @@ -864,18 +890,40 @@ minor version. I.e. ``/usr/bin/python3.7-32`` will request usage of the The "-64" suffix is deprecated, and now implies "any architecture that is not provably i386/32-bit". To request a specific environment, use the new - ``-V:`` argument with the complete tag. + :samp:`-V:{TAG}` argument with the complete tag. + +.. versionchanged:: 3.13 + + Virtual commands referencing ``python`` now prefer an active virtual + environment rather than searching :envvar:`PATH`. This handles cases where + the shebang specifies ``/usr/bin/env python3`` but :file:`python3.exe` is + not present in the active environment. The ``/usr/bin/env`` form of shebang line has one further special property. Before looking for installed Python interpreters, this form will search the -executable :envvar:`PATH` for a Python executable. This corresponds to the -behaviour of the Unix ``env`` program, which performs a :envvar:`PATH` search. +executable :envvar:`PATH` for a Python executable matching the name provided +as the first argument. This corresponds to the behaviour of the Unix ``env`` +program, which performs a :envvar:`PATH` search. If an executable matching the first argument after the ``env`` command cannot -be found, it will be handled as described below. Additionally, the environment -variable :envvar:`PYLAUNCHER_NO_SEARCH_PATH` may be set (to any value) to skip -this additional search. +be found, but the argument starts with ``python``, it will be handled as +described for the other virtual commands. +The environment variable :envvar:`PYLAUNCHER_NO_SEARCH_PATH` may be set +(to any value) to skip this search of :envvar:`PATH`. + +Shebang lines that do not match any of these patterns are looked up in the +``[commands]`` section of the launcher's :ref:`.INI file `. +This may be used to handle certain commands in a way that makes sense for your +system. The name of the command must be a single argument (no spaces in the +shebang executable), and the value substituted is the full path to the +executable (additional arguments specified in the .INI will be quoted as part +of the filename). + +.. code-block:: ini + + [commands] + /bin/xpython=C:\Program Files\XPython\python.exe -Shebang lines that do not match any of these patterns are treated as **Windows** +Any commands not found in the .INI file are treated as **Windows** executable paths that are absolute or relative to the directory containing the script file. This is a convenience for Windows-only scripts, such as those generated by an installer, since the behavior is not compatible with Unix-style shells. @@ -898,15 +946,16 @@ Then Python will be started with the ``-v`` option Customization ------------- +.. _launcher-ini: + Customization via INI files ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Two .ini files will be searched by the launcher - ``py.ini`` in the current -user's "application data" directory (i.e. the directory returned by calling the -Windows function ``SHGetFolderPath`` with ``CSIDL_LOCAL_APPDATA``) and ``py.ini`` in the -same directory as the launcher. The same .ini files are used for both the -'console' version of the launcher (i.e. py.exe) and for the 'windows' version -(i.e. pyw.exe). +user's application data directory (``%LOCALAPPDATA%`` or ``$env:LocalAppData``) +and ``py.ini`` in the same directory as the launcher. The same .ini files are +used for both the 'console' version of the launcher (i.e. py.exe) and for the +'windows' version (i.e. pyw.exe). Customization specified in the "application directory" will have precedence over the one next to the executable, so a user, who may not have write access to the @@ -1128,8 +1177,8 @@ following advice will prevent conflicts with other installations: listed. * If you are loading :file:`python3.dll` or :file:`python37.dll` in your own - executable, explicitly call :c:func:`Py_SetPath` or (at least) - :c:func:`Py_SetProgramName` before :c:func:`Py_Initialize`. + executable, explicitly set :c:member:`PyConfig.module_search_paths` before + :c:func:`Py_InitializeFromConfig`. * Clear and/or overwrite :envvar:`PYTHONPATH` and set :envvar:`PYTHONHOME` before launching :file:`python.exe` from your application. @@ -1146,21 +1195,22 @@ Otherwise, your users may experience problems using your application. Note that the first suggestion is the best, as the others may still be susceptible to non-standard paths in the registry and user site-packages. -.. versionchanged:: - 3.6 +.. versionchanged:: 3.6 + + Add ``._pth`` file support and removes ``applocal`` option from + ``pyvenv.cfg``. + +.. versionchanged:: 3.6 - * Adds ``._pth`` file support and removes ``applocal`` option from - ``pyvenv.cfg``. - * Adds ``pythonXX.zip`` as a potential landmark when directly adjacent - to the executable. + Add :file:`python{XX}.zip` as a potential landmark when directly adjacent + to the executable. -.. deprecated:: - 3.6 +.. deprecated:: 3.6 - Modules specified in the registry under ``Modules`` (not ``PythonPath``) - may be imported by :class:`importlib.machinery.WindowsRegistryFinder`. - This finder is enabled on Windows in 3.6.0 and earlier, but may need to - be explicitly added to :attr:`sys.meta_path` in the future. + Modules specified in the registry under ``Modules`` (not ``PythonPath``) + may be imported by :class:`importlib.machinery.WindowsRegistryFinder`. + This finder is enabled on Windows in 3.6.0 and earlier, but may need to + be explicitly added to :data:`sys.meta_path` in the future. Additional modules ================== @@ -1205,8 +1255,8 @@ shipped with PyWin32. It is an embeddable IDE with a built-in debugger. cx_Freeze --------- -`cx_Freeze `_ is a ``distutils`` -extension which wraps Python scripts into executable Windows programs +`cx_Freeze `_ +wraps Python scripts into executable Windows programs (:file:`{*}.exe` files). When you have done this, you can distribute your application without requiring your users to install Python. diff --git a/Doc/whatsnew/2.0.rst b/Doc/whatsnew/2.0.rst index 4bcb2acae1e640e..6d6e51006d5bd81 100644 --- a/Doc/whatsnew/2.0.rst +++ b/Doc/whatsnew/2.0.rst @@ -153,9 +153,9 @@ Lundh. A detailed explanation of the interface was written up as :pep:`100`, significant points about the Unicode interfaces. In Python source code, Unicode strings are written as ``u"string"``. Arbitrary -Unicode characters can be written using a new escape sequence, ``\uHHHH``, where +Unicode characters can be written using a new escape sequence, :samp:`\\u{HHHH}`, where *HHHH* is a 4-digit hexadecimal number from 0000 to FFFF. The existing -``\xHHHH`` escape sequence can also be used, and octal escapes can be used for +:samp:`\\x{HH}` escape sequence can also be used, and octal escapes can be used for characters up to U+01FF, which is represented by ``\777``. Unicode strings, just like regular strings, are an immutable sequence type. @@ -664,16 +664,16 @@ extra set of parentheses to pass both values as a tuple: ``L.append( (1,2) )``. The earlier versions of these methods were more forgiving because they used an old function in Python's C interface to parse their arguments; 2.0 modernizes -them to use :func:`PyArg_ParseTuple`, the current argument parsing function, +them to use :c:func:`PyArg_ParseTuple`, the current argument parsing function, which provides more helpful error messages and treats multi-argument calls as errors. If you absolutely must use 2.0 but can't fix your code, you can edit :file:`Objects/listobject.c` and define the preprocessor symbol ``NO_STRICT_LIST_APPEND`` to preserve the old behaviour; this isn't recommended. Some of the functions in the :mod:`socket` module are still forgiving in this -way. For example, :func:`socket.connect( ('hostname', 25) )` is the correct -form, passing a tuple representing an IP address, but :func:`socket.connect( -'hostname', 25 )` also works. :func:`socket.connect_ex` and :func:`socket.bind` +way. For example, ``socket.connect( ('hostname', 25) )`` is the correct +form, passing a tuple representing an IP address, but ``socket.connect('hostname', 25)`` +also works. :meth:`socket.connect_ex ` and :meth:`socket.bind ` are similarly easy-going. 2.0alpha1 tightened these functions up, but because the documentation actually used the erroneous multiple argument form, many people wrote code which would break with the stricter checking. GvR backed out @@ -766,7 +766,7 @@ file, :file:`Include/pyport.h`. Vladimir Marangozov's long-awaited malloc restructuring was completed, to make it easy to have the Python interpreter use a custom allocator instead of C's -standard :func:`malloc`. For documentation, read the comments in +standard :c:func:`malloc`. For documentation, read the comments in :file:`Include/pymem.h` and :file:`Include/objimpl.h`. For the lengthy discussions during which the interface was hammered out, see the web archives of the 'patches' and 'python-dev' lists at python.org. @@ -794,15 +794,15 @@ are generating Python code would run into this limit. A patch by Charles G. Waldman raises the limit from ``2**16`` to ``2**32``. Three new convenience functions intended for adding constants to a module's -dictionary at module initialization time were added: :func:`PyModule_AddObject`, -:func:`PyModule_AddIntConstant`, and :func:`PyModule_AddStringConstant`. Each +dictionary at module initialization time were added: :c:func:`PyModule_AddObject`, +:c:func:`PyModule_AddIntConstant`, and :c:func:`PyModule_AddStringConstant`. Each of these functions takes a module object, a null-terminated C string containing the name to be added, and a third argument for the value to be assigned to the name. This third argument is, respectively, a Python object, a C long, or a C string. -A wrapper API was added for Unix-style signal handlers. :func:`PyOS_getsig` gets -a signal handler and :func:`PyOS_setsig` will set a new handler. +A wrapper API was added for Unix-style signal handlers. :c:func:`PyOS_getsig` gets +a signal handler and :c:func:`PyOS_setsig` will set a new handler. .. ====================================================================== @@ -933,7 +933,7 @@ using it:: parser.parse( 'hamlet.xml' ) For more information, consult the Python documentation, or the XML HOWTO at -http://pyxml.sourceforge.net/topics/howto/xml-howto.html. +https://pyxml.sourceforge.net/topics/howto/xml-howto.html. DOM Support @@ -1030,9 +1030,9 @@ Module changes Lots of improvements and bugfixes were made to Python's extensive standard library; some of the affected modules include :mod:`readline`, -:mod:`ConfigParser`, :mod:`cgi`, :mod:`calendar`, :mod:`posix`, :mod:`readline`, -:mod:`xmllib`, :mod:`aifc`, :mod:`chunk, wave`, :mod:`random`, :mod:`shelve`, -and :mod:`nntplib`. Consult the CVS logs for the exact patch-by-patch details. +:mod:`ConfigParser`, :mod:`!cgi`, :mod:`calendar`, :mod:`posix`, :mod:`readline`, +:mod:`xmllib`, :mod:`!aifc`, :mod:`!chunk`, :mod:`wave`, :mod:`random`, :mod:`shelve`, +and :mod:`!nntplib`. Consult the CVS logs for the exact patch-by-patch details. Brian Gallew contributed OpenSSL support for the :mod:`socket` module. OpenSSL is an implementation of the Secure Socket Layer, which encrypts the data being diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst index 0136de587740380..f0e1ded75a9d27c 100644 --- a/Doc/whatsnew/2.1.rst +++ b/Doc/whatsnew/2.1.rst @@ -613,7 +613,7 @@ New and Improved Modules framework based on running embedded examples in docstrings and comparing the results against the expected output. PyUnit, contributed by Steve Purcell, is a unit testing framework inspired by JUnit, which was in turn an adaptation of - Kent Beck's Smalltalk testing framework. See http://pyunit.sourceforge.net/ for + Kent Beck's Smalltalk testing framework. See https://pyunit.sourceforge.net/ for more information about PyUnit. * The :mod:`difflib` module contains a class, :class:`SequenceMatcher`, which @@ -692,8 +692,8 @@ applied, and 136 bugs fixed; both figures are likely to be underestimates. Some of the more notable changes are: * A specialized object allocator is now optionally available, that should be - faster than the system :func:`malloc` and have less memory overhead. The - allocator uses C's :func:`malloc` function to get large pools of memory, and + faster than the system :c:func:`malloc` and have less memory overhead. The + allocator uses C's :c:func:`!malloc` function to get large pools of memory, and then fulfills smaller memory requests from these pools. It can be enabled by providing the :option:`!--with-pymalloc` option to the :program:`configure` script; see :file:`Objects/obmalloc.c` for the implementation details. @@ -701,13 +701,13 @@ of the more notable changes are: Authors of C extension modules should test their code with the object allocator enabled, because some incorrect code may break, causing core dumps at runtime. There are a bunch of memory allocation functions in Python's C API that have - previously been just aliases for the C library's :func:`malloc` and - :func:`free`, meaning that if you accidentally called mismatched functions, the + previously been just aliases for the C library's :c:func:`malloc` and + :c:func:`free`, meaning that if you accidentally called mismatched functions, the error wouldn't be noticeable. When the object allocator is enabled, these - functions aren't aliases of :func:`malloc` and :func:`free` any more, and + functions aren't aliases of :c:func:`!malloc` and :c:func:`!free` any more, and calling the wrong function to free memory will get you a core dump. For - example, if memory was allocated using :func:`PyMem_New`, it has to be freed - using :func:`PyMem_Del`, not :func:`free`. A few modules included with Python + example, if memory was allocated using :c:macro:`PyMem_New`, it has to be freed + using :c:func:`PyMem_Del`, not :c:func:`!free`. A few modules included with Python fell afoul of this and had to be fixed; doubtless there are more third-party modules that will have the same problem. @@ -717,7 +717,7 @@ of the more notable changes are: complain about its lack of speed, and because it's often been used as a naïve benchmark. The :meth:`readline` method of file objects has therefore been rewritten to be much faster. The exact amount of the speedup will vary from - platform to platform depending on how slow the C library's :func:`getc` was, but + platform to platform depending on how slow the C library's :c:func:`!getc` was, but is around 66%, and potentially much faster on some particular operating systems. Tim Peters did much of the benchmarking and coding for this change, motivated by a discussion in comp.lang.python. @@ -770,7 +770,7 @@ of the more notable changes are: reorganization done by Jeremy Hylton. * C extensions which import other modules have been changed to use - :func:`PyImport_ImportModule`, which means that they will use any import hooks + :c:func:`PyImport_ImportModule`, which means that they will use any import hooks that have been installed. This is also encouraged for third-party extensions that need to import some other module from C code. diff --git a/Doc/whatsnew/2.2.rst b/Doc/whatsnew/2.2.rst index 0c3bfda19339573..6dfe79cef00987f 100644 --- a/Doc/whatsnew/2.2.rst +++ b/Doc/whatsnew/2.2.rst @@ -424,22 +424,22 @@ Another significant addition to 2.2 is an iteration interface at both the C and Python levels. Objects can define how they can be looped over by callers. In Python versions up to 2.1, the usual way to make ``for item in obj`` work is -to define a :meth:`__getitem__` method that looks something like this:: +to define a :meth:`~object.__getitem__` method that looks something like this:: def __getitem__(self, index): return -:meth:`__getitem__` is more properly used to define an indexing operation on an +:meth:`~object.__getitem__` is more properly used to define an indexing operation on an object so that you can write ``obj[5]`` to retrieve the sixth element. It's a bit misleading when you're using this only to support :keyword:`for` loops. Consider some file-like object that wants to be looped over; the *index* parameter is essentially meaningless, as the class probably assumes that a -series of :meth:`__getitem__` calls will be made with *index* incrementing by -one each time. In other words, the presence of the :meth:`__getitem__` method +series of :meth:`~object.__getitem__` calls will be made with *index* incrementing by +one each time. In other words, the presence of the :meth:`~object.__getitem__` method doesn't mean that using ``file[5]`` to randomly access the sixth element will work, though it really should. -In Python 2.2, iteration can be implemented separately, and :meth:`__getitem__` +In Python 2.2, iteration can be implemented separately, and :meth:`~object.__getitem__` methods can be limited to classes that really do support random access. The basic idea of iterators is simple. A new built-in function, ``iter(obj)`` or ``iter(C, sentinel)``, is used to get an iterator. ``iter(obj)`` returns @@ -632,10 +632,10 @@ queen threatens another) and the Knight's Tour (a route that takes a knight to every square of an $NxN$ chessboard without visiting any square twice). The idea of generators comes from other programming languages, especially Icon -(https://www.cs.arizona.edu/icon/), where the idea of generators is central. In +(https://www2.cs.arizona.edu/icon/), where the idea of generators is central. In Icon, every expression and function call behaves like a generator. One example from "An Overview of the Icon Programming Language" at -https://www.cs.arizona.edu/icon/docs/ipd266.htm gives an idea of what this looks +https://www2.cs.arizona.edu/icon/docs/ipd266.htm gives an idea of what this looks like:: sentence := "Store it in the neighboring harbor" @@ -1078,17 +1078,17 @@ code, none of the changes described here will affect you very much. To upgrade an extension module to the new API, perform the following steps: -* Rename :c:func:`Py_TPFLAGS_GC` to :c:func:`PyTPFLAGS_HAVE_GC`. +* Rename :c:macro:`!Py_TPFLAGS_GC` to :c:macro:`Py_TPFLAGS_HAVE_GC`. * Use :c:func:`PyObject_GC_New` or :c:func:`PyObject_GC_NewVar` to allocate objects, and :c:func:`PyObject_GC_Del` to deallocate them. -* Rename :c:func:`PyObject_GC_Init` to :c:func:`PyObject_GC_Track` and - :c:func:`PyObject_GC_Fini` to :c:func:`PyObject_GC_UnTrack`. +* Rename :c:func:`!PyObject_GC_Init` to :c:func:`PyObject_GC_Track` and + :c:func:`!PyObject_GC_Fini` to :c:func:`PyObject_GC_UnTrack`. -* Remove :c:func:`PyGC_HEAD_SIZE` from object size calculations. +* Remove :c:macro:`!PyGC_HEAD_SIZE` from object size calculations. -* Remove calls to :c:func:`PyObject_AS_GC` and :c:func:`PyObject_FROM_GC`. +* Remove calls to :c:func:`!PyObject_AS_GC` and :c:func:`!PyObject_FROM_GC`. * A new ``et`` format sequence was added to :c:func:`PyArg_ParseTuple`; ``et`` takes both a parameter and an encoding name, and converts the parameter to the @@ -1105,11 +1105,11 @@ code, none of the changes described here will affect you very much. expected, and a set of pointers to :c:expr:`PyObject*` variables that will be filled in with argument values. -* Two new flags :const:`METH_NOARGS` and :const:`METH_O` are available in method +* Two new flags :c:macro:`METH_NOARGS` and :c:macro:`METH_O` are available in method definition tables to simplify implementation of methods with no arguments or a single untyped argument. Calling such methods is more efficient than calling a - corresponding method that uses :const:`METH_VARARGS`. Also, the old - :const:`METH_OLDARGS` style of writing C methods is now officially deprecated. + corresponding method that uses :c:macro:`METH_VARARGS`. Also, the old + :c:macro:`!METH_OLDARGS` style of writing C methods is now officially deprecated. * Two new wrapper functions, :c:func:`PyOS_snprintf` and :c:func:`PyOS_vsnprintf` were added to provide cross-platform implementations for the relatively new @@ -1219,7 +1219,7 @@ Some of the more notable changes are: operator, but these features were rarely used and therefore buggy. The :meth:`tolist` method and the :attr:`start`, :attr:`stop`, and :attr:`step` attributes are also being deprecated. At the C level, the fourth argument to - the :c:func:`PyRange_New` function, ``repeat``, has also been deprecated. + the :c:func:`!PyRange_New` function, ``repeat``, has also been deprecated. * There were a bunch of patches to the dictionary implementation, mostly to fix potential core dumps if a dictionary contains objects that sneakily changed diff --git a/Doc/whatsnew/2.3.rst b/Doc/whatsnew/2.3.rst index c6e2003e92f1b35..af332b28a28231e 100644 --- a/Doc/whatsnew/2.3.rst +++ b/Doc/whatsnew/2.3.rst @@ -218,10 +218,10 @@ queen threatens another) and the Knight's Tour (a route that takes a knight to every square of an $NxN$ chessboard without visiting any square twice). The idea of generators comes from other programming languages, especially Icon -(https://www.cs.arizona.edu/icon/), where the idea of generators is central. In +(https://www2.cs.arizona.edu/icon/), where the idea of generators is central. In Icon, every expression and function call behaves like a generator. One example from "An Overview of the Icon Programming Language" at -https://www.cs.arizona.edu/icon/docs/ipd266.htm gives an idea of what this looks +https://www2.cs.arizona.edu/icon/docs/ipd266.htm gives an idea of what this looks like:: sentence := "Store it in the neighboring harbor" @@ -728,7 +728,7 @@ module: Importer objects must have a single method, ``find_module(fullname, path=None)``. *fullname* will be a module or package name, e.g. ``string`` or -``distutils.core``. :meth:`find_module` must return a loader object that has a +``distutils.core``. :meth:`!find_module` must return a loader object that has a single method, ``load_module(fullname)``, that creates and returns the corresponding module object. @@ -925,7 +925,7 @@ Deletion is more straightforward:: >>> a [1, 3] -One can also now pass slice objects to the :meth:`__getitem__` methods of the +One can also now pass slice objects to the :meth:`~object.__getitem__` methods of the built-in sequences:: >>> range(10).__getitem__(slice(0, 5, 2)) @@ -1332,7 +1332,7 @@ complete list of changes, or look through the CVS logs for all the details. (Contributed by Kevin O'Connor.) * The IDLE integrated development environment has been updated using the code - from the IDLEfork project (http://idlefork.sourceforge.net). The most notable feature is + from the IDLEfork project (https://idlefork.sourceforge.net). The most notable feature is that the code being developed is now executed in a subprocess, meaning that there's no longer any need for manual ``reload()`` operations. IDLE's core code has been incorporated into the standard library as the :mod:`idlelib` package. @@ -1392,7 +1392,7 @@ complete list of changes, or look through the CVS logs for all the details. details. * The old and never-documented :mod:`linuxaudiodev` module has been deprecated, - and a new version named :mod:`ossaudiodev` has been added. The module was + and a new version named :mod:`!ossaudiodev` has been added. The module was renamed because the OSS sound drivers can be used on platforms other than Linux, and the interface has also been tidied and brought up to date in various ways. (Contributed by Greg Ward and Nicholas FitzRoy-Dale.) @@ -1474,7 +1474,7 @@ complete list of changes, or look through the CVS logs for all the details. * On Windows, the :mod:`socket` module now ships with Secure Sockets Layer (SSL) support. -* The value of the C :const:`PYTHON_API_VERSION` macro is now exposed at the +* The value of the C :c:macro:`PYTHON_API_VERSION` macro is now exposed at the Python level as ``sys.api_version``. The current exception can be cleared by calling the new :func:`sys.exc_clear` function. @@ -1555,7 +1555,7 @@ complete list of changes, or look through the CVS logs for all the details. # [0.36831796169281006, 0.37441694736480713, 0.35304892063140869] # [0.17574405670166016, 0.18193507194519043, 0.17565798759460449] -* The :mod:`Tix` module has received various bug fixes and updates for the +* The :mod:`!Tix` module has received various bug fixes and updates for the current version of the Tix package. * The :mod:`Tkinter` module now works with a thread-enabled version of Tcl. @@ -1596,7 +1596,7 @@ complete list of changes, or look through the CVS logs for all the details. module. Adding the mix-in as a superclass provides the full dictionary interface - whenever the class defines :meth:`__getitem__`, :meth:`__setitem__`, + whenever the class defines :meth:`~object.__getitem__`, :meth:`__setitem__`, :meth:`__delitem__`, and :meth:`keys`. For example:: >>> import UserDict @@ -1847,7 +1847,7 @@ specifically for allocating Python objects. :c:func:`PyObject_Malloc`, :c:func:`PyObject_Realloc`, and :c:func:`PyObject_Free`. * To allocate and free Python objects, use the "object" family - :c:func:`PyObject_New`, :c:func:`PyObject_NewVar`, and :c:func:`PyObject_Del`. + :c:macro:`PyObject_New`, :c:macro:`PyObject_NewVar`, and :c:func:`PyObject_Del`. Thanks to lots of work by Tim Peters, pymalloc in 2.3 also provides debugging features to catch memory overwrites and doubled frees in both extension modules @@ -1886,10 +1886,10 @@ Changes to Python's build process and to the C API include: (:file:`libpython2.3.so`) by supplying :option:`!--enable-shared` when running Python's :program:`configure` script. (Contributed by Ondrej Palkovsky.) -* The :c:macro:`DL_EXPORT` and :c:macro:`DL_IMPORT` macros are now deprecated. +* The :c:macro:`!DL_EXPORT` and :c:macro:`!DL_IMPORT` macros are now deprecated. Initialization functions for Python extension modules should now be declared using the new macro :c:macro:`PyMODINIT_FUNC`, while the Python core will - generally use the :c:macro:`PyAPI_FUNC` and :c:macro:`PyAPI_DATA` macros. + generally use the :c:macro:`!PyAPI_FUNC` and :c:macro:`!PyAPI_DATA` macros. * The interpreter can be compiled without any docstrings for the built-in functions and modules by supplying :option:`!--without-doc-strings` to the @@ -1897,12 +1897,12 @@ Changes to Python's build process and to the C API include: but will also mean that you can't get help for Python's built-ins. (Contributed by Gustavo Niemeyer.) -* The :c:func:`PyArg_NoArgs` macro is now deprecated, and code that uses it +* The :c:func:`!PyArg_NoArgs` macro is now deprecated, and code that uses it should be changed. For Python 2.2 and later, the method definition table can - specify the :const:`METH_NOARGS` flag, signalling that there are no arguments, + specify the :c:macro:`METH_NOARGS` flag, signalling that there are no arguments, and the argument checking can then be removed. If compatibility with pre-2.2 versions of Python is important, the code could use ``PyArg_ParseTuple(args, - "")`` instead, but this will be slower than using :const:`METH_NOARGS`. + "")`` instead, but this will be slower than using :c:macro:`METH_NOARGS`. * :c:func:`PyArg_ParseTuple` accepts new format characters for various sizes of unsigned integers: ``B`` for :c:expr:`unsigned char`, ``H`` for :c:expr:`unsigned @@ -1918,7 +1918,7 @@ Changes to Python's build process and to the C API include: seconds, according to one measurement). * It's now possible to define class and static methods for a C extension type by - setting either the :const:`METH_CLASS` or :const:`METH_STATIC` flags in a + setting either the :c:macro:`METH_CLASS` or :c:macro:`METH_STATIC` flags in a method's :c:type:`PyMethodDef` structure. * Python now includes a copy of the Expat XML parser's source code, removing any diff --git a/Doc/whatsnew/2.4.rst b/Doc/whatsnew/2.4.rst index 63e819876ce3104..cab321c3e54d18e 100644 --- a/Doc/whatsnew/2.4.rst +++ b/Doc/whatsnew/2.4.rst @@ -756,7 +756,7 @@ API that perform ASCII-only conversions, ignoring the locale setting: :c:expr:`double` to an ASCII string. The code for these functions came from the GLib library -(https://developer.gnome.org/glib/stable/), whose developers kindly +(https://developer-old.gnome.org/glib/2.26/), whose developers kindly relicensed the relevant functions and donated them to the Python Software Foundation. The :mod:`locale` module can now change the numeric locale, letting extensions such as GTK+ produce the correct results. @@ -1191,7 +1191,7 @@ complete list of changes, or look through the CVS logs for all the details. effect is to make :file:`.pyc` files significantly smaller. (Contributed by Martin von Löwis.) -* The :mod:`nntplib` module's :class:`NNTP` class gained :meth:`description` and +* The :mod:`!nntplib` module's :class:`NNTP` class gained :meth:`description` and :meth:`descriptions` methods to retrieve newsgroup descriptions for a single group or for a range of groups. (Contributed by Jürgen A. Erhard.) @@ -1468,7 +1468,7 @@ Some of the changes to Python's build process and to the C API are: *X* is a NaN. (Contributed by Tim Peters.) * C code can avoid unnecessary locking by using the new - :c:func:`PyEval_ThreadsInitialized` function to tell if any thread operations + :c:func:`!PyEval_ThreadsInitialized` function to tell if any thread operations have been performed. If this function returns false, no lock operations are needed. (Contributed by Nick Coghlan.) @@ -1476,7 +1476,7 @@ Some of the changes to Python's build process and to the C API are: :c:func:`PyArg_ParseTupleAndKeywords` but takes a :c:type:`va_list` instead of a number of arguments. (Contributed by Greg Chapman.) -* A new method flag, :const:`METH_COEXISTS`, allows a function defined in slots +* A new method flag, :c:macro:`METH_COEXIST`, allows a function defined in slots to co-exist with a :c:type:`PyCFunction` having the same name. This can halve the access time for a method such as :meth:`set.__contains__`. (Contributed by Raymond Hettinger.) @@ -1491,7 +1491,7 @@ Some of the changes to Python's build process and to the C API are: though that processor architecture doesn't call that register "the TSC register". (Contributed by Jeremy Hylton.) -* The :c:type:`tracebackobject` type has been renamed to +* The :c:type:`!tracebackobject` type has been renamed to :c:type:`PyTracebackObject`. .. ====================================================================== diff --git a/Doc/whatsnew/2.5.rst b/Doc/whatsnew/2.5.rst index dcfaef6ed294947..64b951da3fd5b87 100644 --- a/Doc/whatsnew/2.5.rst +++ b/Doc/whatsnew/2.5.rst @@ -575,15 +575,15 @@ structure is:: with-block The expression is evaluated, and it should result in an object that supports the -context management protocol (that is, has :meth:`__enter__` and :meth:`__exit__` +context management protocol (that is, has :meth:`~object.__enter__` and :meth:`~object.__exit__` methods. -The object's :meth:`__enter__` is called before *with-block* is executed and +The object's :meth:`~object.__enter__` is called before *with-block* is executed and therefore can run set-up code. It also may return a value that is bound to the name *variable*, if given. (Note carefully that *variable* is *not* assigned the result of *expression*.) -After execution of the *with-block* is finished, the object's :meth:`__exit__` +After execution of the *with-block* is finished, the object's :meth:`~object.__exit__` method is called, even if the block raised an exception, and can therefore run clean-up code. @@ -609,7 +609,7 @@ part-way through the block. .. note:: In this case, *f* is the same object created by :func:`open`, because - :meth:`file.__enter__` returns *self*. + :meth:`~object.__enter__` returns *self*. The :mod:`threading` module's locks and condition variables also support the ':keyword:`with`' statement:: @@ -652,10 +652,10 @@ underlying implementation and should keep reading. A high-level explanation of the context management protocol is: * The expression is evaluated and should result in an object called a "context - manager". The context manager must have :meth:`__enter__` and :meth:`__exit__` + manager". The context manager must have :meth:`~object.__enter__` and :meth:`~object.__exit__` methods. -* The context manager's :meth:`__enter__` method is called. The value returned +* The context manager's :meth:`~object.__enter__` method is called. The value returned is assigned to *VAR*. If no ``'as VAR'`` clause is present, the value is simply discarded. @@ -669,7 +669,7 @@ A high-level explanation of the context management protocol is: if you do the author of the code containing the ':keyword:`with`' statement will never realize anything went wrong. -* If *BLOCK* didn't raise an exception, the :meth:`__exit__` method is still +* If *BLOCK* didn't raise an exception, the :meth:`~object.__exit__` method is still called, but *type*, *value*, and *traceback* are all ``None``. Let's think through an example. I won't present detailed code but will only @@ -703,7 +703,7 @@ rolled back if there's an exception. Here's the basic interface for def rollback (self): "Rolls back current transaction" -The :meth:`__enter__` method is pretty easy, having only to start a new +The :meth:`~object.__enter__` method is pretty easy, having only to start a new transaction. For this application the resulting cursor object would be a useful result, so the method will return it. The user can then add ``as cursor`` to their ':keyword:`with`' statement to bind the cursor to a variable name. :: @@ -715,7 +715,7 @@ their ':keyword:`with`' statement to bind the cursor to a variable name. :: cursor = self.cursor() return cursor -The :meth:`__exit__` method is the most complicated because it's where most of +The :meth:`~object.__exit__` method is the most complicated because it's where most of the work has to be done. The method has to check if an exception occurred. If there was no exception, the transaction is committed. The transaction is rolled back if there was an exception. @@ -748,10 +748,10 @@ are useful for writing objects for use with the ':keyword:`with`' statement. The decorator is called :func:`contextmanager`, and lets you write a single generator function instead of defining a new class. The generator should yield exactly one value. The code up to the :keyword:`yield` will be executed as the -:meth:`__enter__` method, and the value yielded will be the method's return +:meth:`~object.__enter__` method, and the value yielded will be the method's return value that will get bound to the variable in the ':keyword:`with`' statement's :keyword:`!as` clause, if any. The code after the :keyword:`yield` will be -executed in the :meth:`__exit__` method. Any exception raised in the block will +executed in the :meth:`~object.__exit__` method. Any exception raised in the block will be raised by the :keyword:`!yield` statement. Our database example from the previous section could be written using this @@ -954,7 +954,7 @@ The return value must be either a Python integer or long integer. The interpreter will check that the type returned is correct, and raises a :exc:`TypeError` if this requirement isn't met. -A corresponding :attr:`nb_index` slot was added to the C-level +A corresponding :c:member:`~PyNumberMethods.nb_index` slot was added to the C-level :c:type:`PyNumberMethods` structure to let C extensions implement this protocol. ``PyNumber_Index(obj)`` can be used in extension code to call the :meth:`__index__` function and retrieve its result. @@ -1222,7 +1222,7 @@ Here's a partial list of the most notable changes, sorted alphabetically by module name. Consult the :file:`Misc/NEWS` file in the source tree for a more complete list of changes, or look through the SVN logs for all the details. -* The :mod:`audioop` module now supports the a-LAW encoding, and the code for +* The :mod:`!audioop` module now supports the a-LAW encoding, and the code for u-LAW encoding has been improved. (Contributed by Lars Immisch.) * The :mod:`codecs` module gained support for incremental codecs. The @@ -1347,7 +1347,7 @@ complete list of changes, or look through the SVN logs for all the details. :func:`input` function to allow opening files in binary or :term:`universal newlines` mode. Another new parameter, *openhook*, lets you use a function other than :func:`open` to open the input files. Once you're iterating over - the set of files, the :class:`FileInput` object's new :meth:`fileno` returns + the set of files, the :class:`FileInput` object's new :meth:`~fileinput.fileno` returns the file descriptor for the currently opened file. (Contributed by Georg Brandl.) @@ -1422,13 +1422,13 @@ complete list of changes, or look through the SVN logs for all the details. (Contributed by Gregory K. Johnson. Funding was provided by Google's 2005 Summer of Code.) -* New module: the :mod:`msilib` module allows creating Microsoft Installer +* New module: the :mod:`!msilib` module allows creating Microsoft Installer :file:`.msi` files and CAB files. Some support for reading the :file:`.msi` database is also included. (Contributed by Martin von Löwis.) -* The :mod:`nis` module now supports accessing domains other than the system - default domain by supplying a *domain* argument to the :func:`nis.match` and - :func:`nis.maps` functions. (Contributed by Ben Bell.) +* The :mod:`!nis` module now supports accessing domains other than the system + default domain by supplying a *domain* argument to the :func:`!nis.match` and + :func:`!nis.maps` functions. (Contributed by Ben Bell.) * The :mod:`operator` module's :func:`itemgetter` and :func:`attrgetter` functions now support multiple fields. A call such as @@ -1448,10 +1448,10 @@ complete list of changes, or look through the SVN logs for all the details. return times that are precise to fractions of a second; not all systems support such precision.) - Constants named :attr:`os.SEEK_SET`, :attr:`os.SEEK_CUR`, and - :attr:`os.SEEK_END` have been added; these are the parameters to the + Constants named :const:`os.SEEK_SET`, :const:`os.SEEK_CUR`, and + :const:`os.SEEK_END` have been added; these are the parameters to the :func:`os.lseek` function. Two new constants for locking are - :attr:`os.O_SHLOCK` and :attr:`os.O_EXLOCK`. + :const:`os.O_SHLOCK` and :const:`os.O_EXLOCK`. Two new functions, :func:`wait3` and :func:`wait4`, were added. They're similar the :func:`waitpid` function which waits for a child process to exit and returns @@ -1542,7 +1542,7 @@ complete list of changes, or look through the SVN logs for all the details. :meth:`getproto` accessor methods to retrieve the family, type, and protocol values for the socket. -* New module: the :mod:`spwd` module provides functions for accessing the shadow +* New module: the :mod:`!spwd` module provides functions for accessing the shadow password database on systems that support shadow passwords. * The :mod:`struct` is now faster because it compiles format strings into @@ -1602,7 +1602,7 @@ complete list of changes, or look through the SVN logs for all the details. * The :mod:`unicodedata` module has been updated to use version 4.1.0 of the Unicode character database. Version 3.2.0 is required by some specifications, - so it's still available as :attr:`unicodedata.ucd_3_2_0`. + so it's still available as :data:`unicodedata.ucd_3_2_0`. * New module: the :mod:`uuid` module generates universally unique identifiers (UUIDs) according to :rfc:`4122`. The RFC defines several different UUID @@ -2119,9 +2119,9 @@ Changes to Python's build process and to the C API include: the various AST nodes in :file:`Parser/Python.asdl`. A Python script reads this file and generates a set of C structure definitions in :file:`Include/Python-ast.h`. The :c:func:`PyParser_ASTFromString` and - :c:func:`PyParser_ASTFromFile`, defined in :file:`Include/pythonrun.h`, take + :c:func:`!PyParser_ASTFromFile`, defined in :file:`Include/pythonrun.h`, take Python source as input and return the root of an AST representing the contents. - This AST can then be turned into a code object by :c:func:`PyAST_Compile`. For + This AST can then be turned into a code object by :c:func:`!PyAST_Compile`. For more information, read the source code, and then ask questions on python-dev. The AST code was developed under Jeremy Hylton's management, and implemented by @@ -2151,8 +2151,8 @@ Changes to Python's build process and to the C API include: Previously these different families all reduced to the platform's :c:func:`malloc` and :c:func:`free` functions. This meant it didn't matter if - you got things wrong and allocated memory with the :c:func:`PyMem` function but - freed it with the :c:func:`PyObject` function. With 2.5's changes to obmalloc, + you got things wrong and allocated memory with the ``PyMem`` function but + freed it with the ``PyObject`` function. With 2.5's changes to obmalloc, these families now do different things and mismatches will probably result in a segfault. You should carefully test your C extension modules with Python 2.5. @@ -2172,7 +2172,7 @@ Changes to Python's build process and to the C API include: ``Py_LOCAL(type)`` declares the function as returning a value of the specified *type* and uses a fast-calling qualifier. ``Py_LOCAL_INLINE(type)`` does the same thing and also requests the - function be inlined. If :c:func:`PY_LOCAL_AGGRESSIVE` is defined before + function be inlined. If macro :c:macro:`!PY_LOCAL_AGGRESSIVE` is defined before :file:`python.h` is included, a set of more aggressive optimizations are enabled for the module; you should benchmark the results to find out if these optimizations actually make the code faster. (Contributed by Fredrik Lundh at @@ -2181,7 +2181,7 @@ Changes to Python's build process and to the C API include: * ``PyErr_NewException(name, base, dict)`` can now accept a tuple of base classes as its *base* argument. (Contributed by Georg Brandl.) -* The :c:func:`PyErr_Warn` function for issuing warnings is now deprecated in +* The :c:func:`!PyErr_Warn` function for issuing warnings is now deprecated in favour of ``PyErr_WarnEx(category, message, stacklevel)`` which lets you specify the number of stack frames separating this function and the caller. A *stacklevel* of 1 is the function calling :c:func:`PyErr_WarnEx`, 2 is the @@ -2191,7 +2191,7 @@ Changes to Python's build process and to the C API include: compiled with a C++ compiler without errors. (Implemented by Anthony Baxter, Martin von Löwis, Skip Montanaro.) -* The :c:func:`PyRange_New` function was removed. It was never documented, never +* The :c:func:`!PyRange_New` function was removed. It was never documented, never used in the core code, and had dangerously lax error checking. In the unlikely case that your extensions were using it, you can replace it by something like the following:: diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst index 34f2656f765c7df..016de153f3dd6ac 100644 --- a/Doc/whatsnew/2.6.rst +++ b/Doc/whatsnew/2.6.rst @@ -121,11 +121,11 @@ about features that will be removed in Python 3.0. You can run code with this switch to see how much work will be necessary to port code to 3.0. The value of this switch is available to Python code as the boolean variable :data:`sys.py3kwarning`, -and to C extension code as :c:data:`Py_Py3kWarningFlag`. +and to C extension code as :c:data:`!Py_Py3kWarningFlag`. .. seealso:: - The 3xxx series of PEPs, which contains proposals for Python 3.0. + The 3\ *xxx* series of PEPs, which contains proposals for Python 3.0. :pep:`3000` describes the development process for Python 3.0. Start with :pep:`3100` that describes the general goals for Python 3.0, and then explore the higher-numbered PEPS that propose @@ -172,7 +172,7 @@ this edition of "What's New in Python" links to the bug/patch item for each change. Hosting of the Python bug tracker is kindly provided by -`Upfront Systems `__ +`Upfront Systems `__ of Stellenbosch, South Africa. Martin von Löwis put a lot of effort into importing existing bugs and patches from SourceForge; his scripts for this import operation are at @@ -269,15 +269,15 @@ structure is:: with-block The expression is evaluated, and it should result in an object that supports the -context management protocol (that is, has :meth:`__enter__` and :meth:`__exit__` +context management protocol (that is, has :meth:`~object.__enter__` and :meth:`~object.__exit__` methods). -The object's :meth:`__enter__` is called before *with-block* is executed and +The object's :meth:`~object.__enter__` is called before *with-block* is executed and therefore can run set-up code. It also may return a value that is bound to the name *variable*, if given. (Note carefully that *variable* is *not* assigned the result of *expression*.) -After execution of the *with-block* is finished, the object's :meth:`__exit__` +After execution of the *with-block* is finished, the object's :meth:`~object.__exit__` method is called, even if the block raised an exception, and can therefore run clean-up code. @@ -296,7 +296,7 @@ part-way through the block. .. note:: In this case, *f* is the same object created by :func:`open`, because - :meth:`file.__enter__` returns *self*. + :meth:`~object.__enter__` returns *self*. The :mod:`threading` module's locks and condition variables also support the ':keyword:`with`' statement:: @@ -339,16 +339,16 @@ underlying implementation and should keep reading. A high-level explanation of the context management protocol is: * The expression is evaluated and should result in an object called a "context - manager". The context manager must have :meth:`__enter__` and :meth:`__exit__` + manager". The context manager must have :meth:`~object.__enter__` and :meth:`~object.__exit__` methods. -* The context manager's :meth:`__enter__` method is called. The value returned +* The context manager's :meth:`~object.__enter__` method is called. The value returned is assigned to *VAR*. If no ``as VAR`` clause is present, the value is simply discarded. * The code in *BLOCK* is executed. -* If *BLOCK* raises an exception, the context manager's :meth:`__exit__` method +* If *BLOCK* raises an exception, the context manager's :meth:`~object.__exit__` method is called with three arguments, the exception details (``type, value, traceback``, the same values returned by :func:`sys.exc_info`, which can also be ``None`` if no exception occurred). The method's return value controls whether an exception @@ -357,7 +357,7 @@ A high-level explanation of the context management protocol is: if you do the author of the code containing the ':keyword:`with`' statement will never realize anything went wrong. -* If *BLOCK* didn't raise an exception, the :meth:`__exit__` method is still +* If *BLOCK* didn't raise an exception, the :meth:`~object.__exit__` method is still called, but *type*, *value*, and *traceback* are all ``None``. Let's think through an example. I won't present detailed code but will only @@ -391,7 +391,7 @@ rolled back if there's an exception. Here's the basic interface for def rollback(self): "Rolls back current transaction" -The :meth:`__enter__` method is pretty easy, having only to start a new +The :meth:`~object.__enter__` method is pretty easy, having only to start a new transaction. For this application the resulting cursor object would be a useful result, so the method will return it. The user can then add ``as cursor`` to their ':keyword:`with`' statement to bind the cursor to a variable name. :: @@ -403,7 +403,7 @@ their ':keyword:`with`' statement to bind the cursor to a variable name. :: cursor = self.cursor() return cursor -The :meth:`__exit__` method is the most complicated because it's where most of +The :meth:`~object.__exit__` method is the most complicated because it's where most of the work has to be done. The method has to check if an exception occurred. If there was no exception, the transaction is committed. The transaction is rolled back if there was an exception. @@ -436,10 +436,10 @@ are useful when writing objects for use with the ':keyword:`with`' statement. The decorator is called :func:`contextmanager`, and lets you write a single generator function instead of defining a new class. The generator should yield exactly one value. The code up to the :keyword:`yield` will be executed as the -:meth:`__enter__` method, and the value yielded will be the method's return +:meth:`~object.__enter__` method, and the value yielded will be the method's return value that will get bound to the variable in the ':keyword:`with`' statement's :keyword:`!as` clause, if any. The code after the :keyword:`!yield` will be -executed in the :meth:`__exit__` method. Any exception raised in the block will +executed in the :meth:`~object.__exit__` method. Any exception raised in the block will be raised by the :keyword:`!yield` statement. Using this decorator, our database example from the previous section @@ -875,11 +875,11 @@ The signature of the new function is:: The parameters are: - * *args*: positional arguments whose values will be printed out. - * *sep*: the separator, which will be printed between arguments. - * *end*: the ending text, which will be printed after all of the - arguments have been output. - * *file*: the file object to which the output will be sent. +* *args*: positional arguments whose values will be printed out. +* *sep*: the separator, which will be printed between arguments. +* *end*: the ending text, which will be printed after all of the + arguments have been output. +* *file*: the file object to which the output will be sent. .. seealso:: @@ -977,7 +977,7 @@ can be used to include Unicode characters:: print len(s) # 12 Unicode characters At the C level, Python 3.0 will rename the existing 8-bit -string type, called :c:type:`PyStringObject` in Python 2.x, +string type, called :c:type:`!PyStringObject` in Python 2.x, to :c:type:`PyBytesObject`. Python 2.6 uses ``#define`` to support using the names :c:func:`PyBytesObject`, :c:func:`PyBytes_Check`, :c:func:`PyBytes_FromStringAndSize`, @@ -1138,13 +1138,13 @@ indicate that the external caller is done. The *flags* argument to :c:func:`PyObject_GetBuffer` specifies constraints upon the memory returned. Some examples are: - * :const:`PyBUF_WRITABLE` indicates that the memory must be writable. +* :c:macro:`PyBUF_WRITABLE` indicates that the memory must be writable. - * :const:`PyBUF_LOCK` requests a read-only or exclusive lock on the memory. +* :c:macro:`PyBUF_LOCK` requests a read-only or exclusive lock on the memory. - * :const:`PyBUF_C_CONTIGUOUS` and :const:`PyBUF_F_CONTIGUOUS` - requests a C-contiguous (last dimension varies the fastest) or - Fortran-contiguous (first dimension varies the fastest) array layout. +* :c:macro:`PyBUF_C_CONTIGUOUS` and :c:macro:`PyBUF_F_CONTIGUOUS` + requests a C-contiguous (last dimension varies the fastest) or + Fortran-contiguous (first dimension varies the fastest) array layout. Two new argument codes for :c:func:`PyArg_ParseTuple`, ``s*`` and ``z*``, return locked buffer objects for a parameter. @@ -1433,7 +1433,7 @@ one, :func:`math.trunc`, that's been backported to Python 2.6. `Scheme's numerical tower `__, from the Guile manual. - `Scheme's number datatypes `__ from the R5RS Scheme specification. + `Scheme's number datatypes `__ from the R5RS Scheme specification. The :mod:`fractions` Module @@ -1737,7 +1737,7 @@ Optimizations (Contributed by Antoine Pitrou.) Memory usage is reduced by using pymalloc for the Unicode string's data. -* The ``with`` statement now stores the :meth:`__exit__` method on the stack, +* The ``with`` statement now stores the :meth:`~object.__exit__` method on the stack, producing a small speedup. (Implemented by Jeffrey Yasskin.) * To reduce memory usage, the garbage collector will now clear internal @@ -1805,15 +1805,15 @@ changes, or look through the Subversion logs for all the details. available, instead of restricting itself to protocol 1. (Contributed by W. Barnes.) -* The :mod:`cgi` module will now read variables from the query string +* The :mod:`!cgi` module will now read variables from the query string of an HTTP POST request. This makes it possible to use form actions with URLs that include query strings such as "/cgi-bin/add.py?category=1". (Contributed by Alexandre Fiori and Nubis; :issue:`1817`.) The :func:`parse_qs` and :func:`parse_qsl` functions have been - relocated from the :mod:`cgi` module to the :mod:`urlparse` module. - The versions still available in the :mod:`cgi` module will + relocated from the :mod:`!cgi` module to the :mod:`urlparse` module. + The versions still available in the :mod:`!cgi` module will trigger :exc:`PendingDeprecationWarning` messages in 2.6 (:issue:`600362`). @@ -1850,8 +1850,8 @@ changes, or look through the Subversion logs for all the details. special values and floating-point exceptions in a manner consistent with Annex 'G' of the C99 standard. -* A new data type in the :mod:`collections` module: :class:`namedtuple(typename, - fieldnames)` is a factory function that creates subclasses of the standard tuple +* A new data type in the :mod:`collections` module: ``namedtuple(typename, fieldnames)`` + is a factory function that creates subclasses of the standard tuple whose fields are accessible by name as well as index. For example:: >>> var_type = collections.namedtuple('variable', @@ -1873,7 +1873,7 @@ changes, or look through the Subversion logs for all the details. variable(id=1, name='amplitude', type='int', size=4) Several places in the standard library that returned tuples have - been modified to return :class:`namedtuple` instances. For example, + been modified to return :func:`namedtuple` instances. For example, the :meth:`Decimal.as_tuple` method now returns a named tuple with :attr:`sign`, :attr:`digits`, and :attr:`exponent` fields. @@ -2289,7 +2289,7 @@ changes, or look through the Subversion logs for all the details. (Contributed by Raymond Hettinger; :issue:`1861`.) * The :mod:`select` module now has wrapper functions - for the Linux :c:func:`epoll` and BSD :c:func:`kqueue` system calls. + for the Linux :c:func:`!epoll` and BSD :c:func:`!kqueue` system calls. :meth:`modify` method was added to the existing :class:`poll` objects; ``pollobj.modify(fd, eventmask)`` takes a file descriptor or file object and an event mask, modifying the recorded event mask @@ -2328,7 +2328,7 @@ changes, or look through the Subversion logs for all the details. one for reading and one for writing. The writable descriptor will be passed to :func:`set_wakeup_fd`, and the readable descriptor will be added to the list of descriptors monitored by the event loop via - :c:func:`select` or :c:func:`poll`. + :c:func:`!select` or :c:func:`!poll`. On receiving a signal, a byte will be written and the main event loop will be woken up, avoiding the need to poll. @@ -2363,7 +2363,7 @@ changes, or look through the Subversion logs for all the details. negotiation itself. (Patch contributed by Bill Fenner; :issue:`829951`.) -* The :mod:`socket` module now supports TIPC (http://tipc.sourceforge.net/), +* The :mod:`socket` module now supports TIPC (https://tipc.sourceforge.net/), a high-performance non-IP-based protocol designed for use in clustered environments. TIPC addresses are 4- or 5-tuples. (Contributed by Alberto Bertogli; :issue:`1646`.) @@ -2465,7 +2465,7 @@ changes, or look through the Subversion logs for all the details. (All changes contributed by Lars Gustäbel). * An optional ``timeout`` parameter was added to the - :class:`telnetlib.Telnet` class constructor, specifying a timeout + :class:`!telnetlib.Telnet` class constructor, specifying a timeout measured in seconds. (Added by Facundo Batista.) * The :class:`tempfile.NamedTemporaryFile` class usually deletes @@ -2982,7 +2982,7 @@ Changes to Python's build process and to the C API include: * Python now must be compiled with C89 compilers (after 19 years!). This means that the Python source tree has dropped its - own implementations of :c:func:`memmove` and :c:func:`strerror`, which + own implementations of :c:func:`!memmove` and :c:func:`!strerror`, which are in the C89 standard library. * Python 2.6 can be built with Microsoft Visual Studio 2008 (version @@ -3012,11 +3012,11 @@ Changes to Python's build process and to the C API include: bug occurred if one thread closed a file object while another thread was reading from or writing to the object. In 2.6 file objects have a reference count, manipulated by the - :c:func:`PyFile_IncUseCount` and :c:func:`PyFile_DecUseCount` + :c:func:`!PyFile_IncUseCount` and :c:func:`!PyFile_DecUseCount` functions. File objects can't be closed unless the reference count - is zero. :c:func:`PyFile_IncUseCount` should be called while the GIL + is zero. :c:func:`!PyFile_IncUseCount` should be called while the GIL is still held, before carrying out an I/O operation using the - ``FILE *`` pointer, and :c:func:`PyFile_DecUseCount` should be called + ``FILE *`` pointer, and :c:func:`!PyFile_DecUseCount` should be called immediately after the GIL is re-acquired. (Contributed by Antoine Pitrou and Gregory P. Smith.) @@ -3060,9 +3060,9 @@ Changes to Python's build process and to the C API include: * Some macros were renamed in both 3.0 and 2.6 to make it clearer that they are macros, - not functions. :c:macro:`Py_Size()` became :c:macro:`Py_SIZE()`, - :c:macro:`Py_Type()` became :c:macro:`Py_TYPE()`, and - :c:macro:`Py_Refcnt()` became :c:macro:`Py_REFCNT()`. + not functions. :c:macro:`!Py_Size()` became :c:macro:`Py_SIZE()`, + :c:macro:`!Py_Type()` became :c:macro:`Py_TYPE()`, and + :c:macro:`!Py_Refcnt()` became :c:macro:`Py_REFCNT()`. The mixed-case macros are still available in Python 2.6 for backward compatibility. (:issue:`1629`) @@ -3135,7 +3135,7 @@ Port-Specific Changes: Windows registry reflection for 32-bit processes running on 64-bit systems. (:issue:`1753245`) -* The :mod:`msilib` module's :class:`Record` object +* The :mod:`!msilib` module's :class:`Record` object gained :meth:`GetInteger` and :meth:`GetString` methods that return field values as an integer or a string. (Contributed by Floris Bruynooghe; :issue:`2125`.) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst index 276ab63b97f8a92..da66dd731831bcf 100644 --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -355,7 +355,7 @@ added as a more powerful replacement for the This means Python now supports three different modules for parsing command-line arguments: :mod:`getopt`, :mod:`optparse`, and :mod:`argparse`. The :mod:`getopt` module closely resembles the C -library's :c:func:`getopt` function, so it remains useful if you're writing a +library's :c:func:`!getopt` function, so it remains useful if you're writing a Python prototype that will eventually be rewritten in C. :mod:`optparse` becomes redundant, but there are no plans to remove it because there are many scripts still using it, and there's no @@ -930,8 +930,8 @@ Optimizations Several performance enhancements have been added: * A new opcode was added to perform the initial setup for - :keyword:`with` statements, looking up the :meth:`__enter__` and - :meth:`__exit__` methods. (Contributed by Benjamin Peterson.) + :keyword:`with` statements, looking up the :meth:`~object.__enter__` and + :meth:`~object.__exit__` methods. (Contributed by Benjamin Peterson.) * The garbage collector now performs better for one common usage pattern: when many objects are being allocated without deallocating @@ -1331,6 +1331,7 @@ changes, or look through the Subversion logs for all the details. >>> from inspect import getcallargs >>> def f(a, b=1, *pos, **named): ... pass + ... >>> getcallargs(f, 1, 2, 3) {'a': 1, 'b': 2, 'pos': (3,), 'named': {}} >>> getcallargs(f, a=2, x=4) @@ -1428,7 +1429,7 @@ changes, or look through the Subversion logs for all the details. become very large. (Contributed by Charles Cazabon; :issue:`6963`.) -* The :mod:`nntplib` module now supports IPv6 addresses. +* The :mod:`!nntplib` module now supports IPv6 addresses. (Contributed by Derek Morr; :issue:`1664`.) * New functions: the :mod:`os` module wraps the following POSIX system @@ -1555,9 +1556,9 @@ changes, or look through the Subversion logs for all the details. :issue:`8484`.) The version of OpenSSL being used is now available as the module - attributes :data:`ssl.OPENSSL_VERSION` (a string), - :data:`ssl.OPENSSL_VERSION_INFO` (a 5-tuple), and - :data:`ssl.OPENSSL_VERSION_NUMBER` (an integer). (Added by Antoine + attributes :const:`ssl.OPENSSL_VERSION` (a string), + :const:`ssl.OPENSSL_VERSION_INFO` (a 5-tuple), and + :const:`ssl.OPENSSL_VERSION_NUMBER` (an integer). (Added by Antoine Pitrou; :issue:`8321`.) * The :mod:`struct` module will no longer silently ignore overflow @@ -2103,7 +2104,7 @@ Changes to Python's build process and to the C API include: * The latest release of the GNU Debugger, GDB 7, can be `scripted using Python - `__. + `__. When you begin debugging an executable program P, GDB will look for a file named ``P-gdb.py`` and automatically read it. Dave Malcolm contributed a :file:`python-gdb.py` that adds a number of @@ -2151,16 +2152,16 @@ Changes to Python's build process and to the C API include: * New function: stemming from the rewrite of string-to-float conversion, a new :c:func:`PyOS_string_to_double` function was added. The old - :c:func:`PyOS_ascii_strtod` and :c:func:`PyOS_ascii_atof` functions + :c:func:`!PyOS_ascii_strtod` and :c:func:`!PyOS_ascii_atof` functions are now deprecated. -* New function: :c:func:`PySys_SetArgvEx` sets the value of +* New function: :c:func:`!PySys_SetArgvEx` sets the value of ``sys.argv`` and can optionally update ``sys.path`` to include the directory containing the script named by ``sys.argv[0]`` depending on the value of an *updatepath* parameter. This function was added to close a security hole for applications - that embed Python. The old function, :c:func:`PySys_SetArgv`, would + that embed Python. The old function, :c:func:`!PySys_SetArgv`, would always update ``sys.path``, and sometimes it would add the current directory. This meant that, if you ran an application embedding Python in a directory controlled by someone else, attackers could @@ -2168,8 +2169,8 @@ Changes to Python's build process and to the C API include: :file:`os.py`) that your application would then import and run. If you maintain a C/C++ application that embeds Python, check - whether you're calling :c:func:`PySys_SetArgv` and carefully consider - whether the application should be using :c:func:`PySys_SetArgvEx` + whether you're calling :c:func:`!PySys_SetArgv` and carefully consider + whether the application should be using :c:func:`!PySys_SetArgvEx` with *updatepath* set to false. Security issue reported as `CVE-2008-5983 @@ -2194,13 +2195,13 @@ Changes to Python's build process and to the C API include: .. XXX these macros don't seem to be described in the c-api docs. -* Removed function: :c:macro:`PyEval_CallObject` is now only available +* Removed function: :c:func:`!PyEval_CallObject` is now only available as a macro. A function version was being kept around to preserve ABI linking compatibility, but that was in 1997; it can certainly be deleted by now. (Removed by Antoine Pitrou; :issue:`8276`.) -* New format codes: the :c:func:`PyFormat_FromString`, - :c:func:`PyFormat_FromStringV`, and :c:func:`PyErr_Format` functions now +* New format codes: the :c:func:`!PyString_FromFormat`, + :c:func:`!PyString_FromFormatV`, and :c:func:`PyErr_Format` functions now accept ``%lld`` and ``%llu`` format codes for displaying C's :c:expr:`long long` types. (Contributed by Mark Dickinson; :issue:`7228`.) @@ -2230,7 +2231,7 @@ Changes to Python's build process and to the C API include: * When using the :c:type:`PyMemberDef` structure to define attributes of a type, Python will no longer let you try to delete or set a - :const:`T_STRING_INPLACE` attribute. + :c:macro:`T_STRING_INPLACE` attribute. .. rev 79644 @@ -2286,10 +2287,10 @@ object, and then get the ``void *`` pointer, which will usually point to an array of pointers to the module's various API functions. There is an existing data type already used for this, -:c:type:`PyCObject`, but it doesn't provide type safety. Evil code +:c:type:`!PyCObject`, but it doesn't provide type safety. Evil code written in pure Python could cause a segmentation fault by taking a -:c:type:`PyCObject` from module A and somehow substituting it for the -:c:type:`PyCObject` in module B. Capsules know their own name, +:c:type:`!PyCObject` from module A and somehow substituting it for the +:c:type:`!PyCObject` in module B. Capsules know their own name, and getting the pointer requires providing the name: .. code-block:: c @@ -2309,10 +2310,10 @@ detect the mismatched name and return false. Refer to :ref:`using-capsules` for more information on using these objects. Python 2.7 now uses capsules internally to provide various -extension-module APIs, but the :c:func:`PyCObject_AsVoidPtr` was +extension-module APIs, but the :c:func:`!PyCObject_AsVoidPtr` was modified to handle capsules, preserving compile-time compatibility -with the :c:type:`CObject` interface. Use of -:c:func:`PyCObject_AsVoidPtr` will signal a +with the :c:type:`!PyCObject` interface. Use of +:c:func:`!PyCObject_AsVoidPtr` will signal a :exc:`PendingDeprecationWarning`, which is silent by default. Implemented in Python 3.1 and backported to 2.7 by Larry Hastings; @@ -2367,7 +2368,7 @@ Port-Specific Changes: Mac OS X installation and a user-installed copy of the same version. (Changed by Ronald Oussoren; :issue:`4865`.) - .. versionchanged:: 2.7.13 + .. versionchanged:: 2.7.13 As of 2.7.13, this change was removed. ``/Library/Python/2.7/site-packages``, the site-packages directory @@ -2382,8 +2383,8 @@ Port-Specific Changes: Mac OS X Port-Specific Changes: FreeBSD ----------------------------------- -* FreeBSD 7.1's :const:`SO_SETFIB` constant, used with - :func:`~socket.getsockopt`/:func:`~socket.setsockopt` to select an +* FreeBSD 7.1's :const:`SO_SETFIB` constant, used with the :func:`~socket.socket` methods + :func:`~socket.socket.getsockopt`/:func:`~socket.socket.setsockopt` to select an alternate routing table, is now available in the :mod:`socket` module. (Added by Kyle VanderBeek; :issue:`8235`.) @@ -2448,13 +2449,13 @@ that may require changes to your code: (Changed by Eric Smith; :issue:`5920`.) * Because of an optimization for the :keyword:`with` statement, the special - methods :meth:`__enter__` and :meth:`__exit__` must belong to the object's + methods :meth:`~object.__enter__` and :meth:`~object.__exit__` must belong to the object's type, and cannot be directly attached to the object's instance. This affects new-style classes (derived from :class:`object`) and C extension types. (:issue:`6101`.) * Due to a bug in Python 2.6, the *exc_value* parameter to - :meth:`__exit__` methods was often the string representation of the + :meth:`~object.__exit__` methods was often the string representation of the exception, not an instance. This was fixed in 2.7, so *exc_value* will be an instance as expected. (Fixed by Florent Xicluna; :issue:`7853`.) @@ -2539,16 +2540,16 @@ For C extensions: instead of triggering a :exc:`DeprecationWarning` (:issue:`5080`). * Use the new :c:func:`PyOS_string_to_double` function instead of the old - :c:func:`PyOS_ascii_strtod` and :c:func:`PyOS_ascii_atof` functions, + :c:func:`!PyOS_ascii_strtod` and :c:func:`!PyOS_ascii_atof` functions, which are now deprecated. For applications that embed Python: -* The :c:func:`PySys_SetArgvEx` function was added, letting +* The :c:func:`!PySys_SetArgvEx` function was added, letting applications close a security hole when the existing - :c:func:`PySys_SetArgv` function was used. Check whether you're - calling :c:func:`PySys_SetArgv` and carefully consider whether the - application should be using :c:func:`PySys_SetArgvEx` with + :c:func:`!PySys_SetArgv` function was used. Check whether you're + calling :c:func:`!PySys_SetArgv` and carefully consider whether the + application should be using :c:func:`!PySys_SetArgvEx` with *updatepath* set to false. .. ====================================================================== diff --git a/Doc/whatsnew/3.0.rst b/Doc/whatsnew/3.0.rst index 63b24748d8aab69..b0c2529e7802132 100644 --- a/Doc/whatsnew/3.0.rst +++ b/Doc/whatsnew/3.0.rst @@ -789,7 +789,7 @@ Operators And Special Methods :attr:`__doc__`, :attr:`__globals__`, :attr:`~definition.__name__`, respectively. -* :meth:`__nonzero__` is now :meth:`__bool__`. +* :meth:`!__nonzero__` is now :meth:`~object.__bool__`. Builtins -------- @@ -840,7 +840,7 @@ Builtins need it; however, 99 percent of the time an explicit :keyword:`for` loop is more readable. -* Removed :func:`reload`. Use :func:`imp.reload`. +* Removed :func:`reload`. Use :func:`!imp.reload`. * Removed. :meth:`dict.has_key` -- use the :keyword:`in` operator instead. @@ -865,8 +865,8 @@ to the C API. * No more C API support for restricted execution. -* :c:func:`PyNumber_Coerce`, :c:func:`PyNumber_CoerceEx`, - :c:func:`PyMember_Get`, and :c:func:`PyMember_Set` C APIs are removed. +* :c:func:`!PyNumber_Coerce`, :c:func:`!PyNumber_CoerceEx`, + :c:func:`!PyMember_Get`, and :c:func:`!PyMember_Set` C APIs are removed. * New C API :c:func:`PyImport_ImportModuleNoBlock`, works like :c:func:`PyImport_ImportModule` but won't block on the import lock @@ -875,7 +875,7 @@ to the C API. * Renamed the boolean conversion C-level slot and method: ``nb_nonzero`` is now ``nb_bool``. -* Removed :c:macro:`METH_OLDARGS` and :c:macro:`WITH_CYCLE_GC` from the C API. +* Removed :c:macro:`!METH_OLDARGS` and :c:macro:`!WITH_CYCLE_GC` from the C API. .. ====================================================================== @@ -911,7 +911,7 @@ best strategy is the following: tests still pass. 3. Run the ``2to3`` source-to-source translator over your source code - tree. (See :ref:`2to3-reference` for more on this tool.) Run the + tree. Run the result of the translation under Python 3.0. Manually fix up any remaining issues, fixing problems until all tests pass again. diff --git a/Doc/whatsnew/3.1.rst b/Doc/whatsnew/3.1.rst index fba8816bb243a3f..e237179f4b1829f 100644 --- a/Doc/whatsnew/3.1.rst +++ b/Doc/whatsnew/3.1.rst @@ -370,14 +370,14 @@ New, Improved, and Deprecated Modules * The :mod:`io` module has three new constants for the :meth:`seek` method :data:`SEEK_SET`, :data:`SEEK_CUR`, and :data:`SEEK_END`. -* The :attr:`sys.version_info` tuple is now a named tuple:: +* The :data:`sys.version_info` tuple is now a named tuple:: >>> sys.version_info sys.version_info(major=3, minor=1, micro=0, releaselevel='alpha', serial=2) (Contributed by Ross Light; :issue:`4285`.) -* The :mod:`nntplib` and :mod:`imaplib` modules now support IPv6. +* The :mod:`!nntplib` and :mod:`imaplib` modules now support IPv6. (Contributed by Derek Morr; :issue:`1655` and :issue:`1664`.) @@ -486,7 +486,7 @@ Changes to Python's build process and to the C API include: Apart from the performance improvements this change should be invisible to end users, with one exception: for testing and debugging purposes there's a - new :attr:`sys.int_info` that provides information about the + new :data:`sys.int_info` that provides information about the internal format, giving the number of bits per digit and the size in bytes of the C type used to store each digit:: @@ -501,16 +501,16 @@ Changes to Python's build process and to the C API include: (Contributed by Mark Dickinson and Lisandro Dalcrin; :issue:`5175`.) -* Deprecated :c:func:`PyNumber_Int`. Use :c:func:`PyNumber_Long` instead. +* Deprecated :c:func:`!PyNumber_Int`. Use :c:func:`PyNumber_Long` instead. (Contributed by Mark Dickinson; :issue:`4910`.) * Added a new :c:func:`PyOS_string_to_double` function to replace the - deprecated functions :c:func:`PyOS_ascii_strtod` and :c:func:`PyOS_ascii_atof`. + deprecated functions :c:func:`!PyOS_ascii_strtod` and :c:func:`!PyOS_ascii_atof`. (Contributed by Mark Dickinson; :issue:`5914`.) -* Added :c:type:`PyCapsule` as a replacement for the :c:type:`PyCObject` API. +* Added :c:type:`PyCapsule` as a replacement for the :c:type:`!PyCObject` API. The principal difference is that the new type has a well defined interface for passing typing safety information and a less complicated signature for calling a destructor. The old type had a problematic API and is now diff --git a/Doc/whatsnew/3.10.rst b/Doc/whatsnew/3.10.rst index 2389b1a63b1ca2b..df821d68eb8d9fc 100644 --- a/Doc/whatsnew/3.10.rst +++ b/Doc/whatsnew/3.10.rst @@ -2,8 +2,6 @@ What's New In Python 3.10 **************************** -:Release: |release| -:Date: |today| :Editor: Pablo Galindo Salgado .. Rules for maintenance: @@ -77,8 +75,9 @@ Interpreter improvements: New typing features: * :pep:`604`, Allow writing union types as X | Y -* :pep:`613`, Explicit Type Aliases * :pep:`612`, Parameter Specification Variables +* :pep:`613`, Explicit Type Aliases +* :pep:`647`, User-Defined Type Guards Important deprecations, removals or restrictions: @@ -222,116 +221,116 @@ have been incorporated. Some of the most notable ones are as follows: * Missing ``:`` before blocks: - .. code-block:: python + .. code-block:: python - >>> if rocket.position > event_horizon - File "", line 1 - if rocket.position > event_horizon - ^ - SyntaxError: expected ':' + >>> if rocket.position > event_horizon + File "", line 1 + if rocket.position > event_horizon + ^ + SyntaxError: expected ':' - (Contributed by Pablo Galindo in :issue:`42997`.) + (Contributed by Pablo Galindo in :issue:`42997`.) * Unparenthesised tuples in comprehensions targets: - .. code-block:: python + .. code-block:: python - >>> {x,y for x,y in zip('abcd', '1234')} - File "", line 1 - {x,y for x,y in zip('abcd', '1234')} - ^ - SyntaxError: did you forget parentheses around the comprehension target? + >>> {x,y for x,y in zip('abcd', '1234')} + File "", line 1 + {x,y for x,y in zip('abcd', '1234')} + ^ + SyntaxError: did you forget parentheses around the comprehension target? - (Contributed by Pablo Galindo in :issue:`43017`.) + (Contributed by Pablo Galindo in :issue:`43017`.) * Missing commas in collection literals and between expressions: - .. code-block:: python + .. code-block:: python - >>> items = { - ... x: 1, - ... y: 2 - ... z: 3, - File "", line 3 - y: 2 - ^ - SyntaxError: invalid syntax. Perhaps you forgot a comma? + >>> items = { + ... x: 1, + ... y: 2 + ... z: 3, + File "", line 3 + y: 2 + ^ + SyntaxError: invalid syntax. Perhaps you forgot a comma? - (Contributed by Pablo Galindo in :issue:`43822`.) + (Contributed by Pablo Galindo in :issue:`43822`.) * Multiple Exception types without parentheses: - .. code-block:: python + .. code-block:: python - >>> try: - ... build_dyson_sphere() - ... except NotEnoughScienceError, NotEnoughResourcesError: - File "", line 3 - except NotEnoughScienceError, NotEnoughResourcesError: - ^ - SyntaxError: multiple exception types must be parenthesized + >>> try: + ... build_dyson_sphere() + ... except NotEnoughScienceError, NotEnoughResourcesError: + File "", line 3 + except NotEnoughScienceError, NotEnoughResourcesError: + ^ + SyntaxError: multiple exception types must be parenthesized - (Contributed by Pablo Galindo in :issue:`43149`.) + (Contributed by Pablo Galindo in :issue:`43149`.) * Missing ``:`` and values in dictionary literals: - .. code-block:: python + .. code-block:: python - >>> values = { - ... x: 1, - ... y: 2, - ... z: - ... } - File "", line 4 - z: - ^ - SyntaxError: expression expected after dictionary key and ':' + >>> values = { + ... x: 1, + ... y: 2, + ... z: + ... } + File "", line 4 + z: + ^ + SyntaxError: expression expected after dictionary key and ':' - >>> values = {x:1, y:2, z w:3} - File "", line 1 - values = {x:1, y:2, z w:3} - ^ - SyntaxError: ':' expected after dictionary key + >>> values = {x:1, y:2, z w:3} + File "", line 1 + values = {x:1, y:2, z w:3} + ^ + SyntaxError: ':' expected after dictionary key - (Contributed by Pablo Galindo in :issue:`43823`.) + (Contributed by Pablo Galindo in :issue:`43823`.) * ``try`` blocks without ``except`` or ``finally`` blocks: - .. code-block:: python + .. code-block:: python - >>> try: - ... x = 2 - ... something = 3 - File "", line 3 - something = 3 - ^^^^^^^^^ - SyntaxError: expected 'except' or 'finally' block + >>> try: + ... x = 2 + ... something = 3 + File "", line 3 + something = 3 + ^^^^^^^^^ + SyntaxError: expected 'except' or 'finally' block - (Contributed by Pablo Galindo in :issue:`44305`.) + (Contributed by Pablo Galindo in :issue:`44305`.) * Usage of ``=`` instead of ``==`` in comparisons: - .. code-block:: python + .. code-block:: python - >>> if rocket.position = event_horizon: - File "", line 1 - if rocket.position = event_horizon: - ^ - SyntaxError: cannot assign to attribute here. Maybe you meant '==' instead of '='? + >>> if rocket.position = event_horizon: + File "", line 1 + if rocket.position = event_horizon: + ^ + SyntaxError: cannot assign to attribute here. Maybe you meant '==' instead of '='? - (Contributed by Pablo Galindo in :issue:`43797`.) + (Contributed by Pablo Galindo in :issue:`43797`.) * Usage of ``*`` in f-strings: - .. code-block:: python + .. code-block:: python - >>> f"Black holes {*all_black_holes} and revelations" - File "", line 1 - (*all_black_holes) - ^ - SyntaxError: f-string: cannot use starred expression here + >>> f"Black holes {*all_black_holes} and revelations" + File "", line 1 + (*all_black_holes) + ^ + SyntaxError: f-string: cannot use starred expression here - (Contributed by Pablo Galindo in :issue:`41064`.) + (Contributed by Pablo Galindo in :issue:`41064`.) IndentationErrors ~~~~~~~~~~~~~~~~~ @@ -366,10 +365,10 @@ raised from: (Contributed by Pablo Galindo in :issue:`38530`.) - .. warning:: - Notice this won't work if :c:func:`PyErr_Display` is not called to display the error - which can happen if some other custom error display function is used. This is a common - scenario in some REPLs like IPython. +.. warning:: + Notice this won't work if :c:func:`PyErr_Display` is not called to display the error + which can happen if some other custom error display function is used. This is a common + scenario in some REPLs like IPython. NameErrors ~~~~~~~~~~ @@ -388,10 +387,10 @@ was raised from: (Contributed by Pablo Galindo in :issue:`38530`.) - .. warning:: - Notice this won't work if :c:func:`PyErr_Display` is not called to display the error, - which can happen if some other custom error display function is used. This is a common - scenario in some REPLs like IPython. +.. warning:: + Notice this won't work if :c:func:`PyErr_Display` is not called to display the error, + which can happen if some other custom error display function is used. This is a common + scenario in some REPLs like IPython. PEP 626: Precise line numbers for debugging and other tools @@ -434,16 +433,16 @@ A match statement takes an expression and compares its value to successive patterns given as one or more case blocks. Specifically, pattern matching operates by: - 1. using data with type and shape (the ``subject``) - 2. evaluating the ``subject`` in the ``match`` statement - 3. comparing the subject with each pattern in a ``case`` statement - from top to bottom until a match is confirmed. - 4. executing the action associated with the pattern of the confirmed - match - 5. If an exact match is not confirmed, the last case, a wildcard ``_``, - if provided, will be used as the matching case. If an exact match is - not confirmed and a wildcard case does not exist, the entire match - block is a no-op. +1. using data with type and shape (the ``subject``) +2. evaluating the ``subject`` in the ``match`` statement +3. comparing the subject with each pattern in a ``case`` statement + from top to bottom until a match is confirmed. +4. executing the action associated with the pattern of the confirmed + match +5. If an exact match is not confirmed, the last case, a wildcard ``_``, + if provided, will be used as the matching case. If an exact match is + not confirmed and a wildcard case does not exist, the entire match + block is a no-op. Declarative approach ~~~~~~~~~~~~~~~~~~~~ @@ -879,7 +878,7 @@ Other Language Changes (Contributed by Raymond Hettinger in :issue:`43475`.) * A :exc:`SyntaxError` (instead of a :exc:`NameError`) will be raised when deleting - the :const:`__debug__` constant. (Contributed by Dong-hee Na in :issue:`45000`.) + the :const:`__debug__` constant. (Contributed by Donghee Na in :issue:`45000`.) * :exc:`SyntaxError` exceptions now have ``end_lineno`` and ``end_offset`` attributes. They will be ``None`` if not determined. @@ -888,7 +887,7 @@ Other Language Changes New Modules =========== -* None yet. +* None. Improved Modules @@ -1254,9 +1253,9 @@ descriptors without copying between kernel address space and user address space, where one of the file descriptors must refer to a pipe. (Contributed by Pablo Galindo in :issue:`41625`.) -Add :data:`~os.O_EVTONLY`, :data:`~os.O_FSYNC`, :data:`~os.O_SYMLINK` -and :data:`~os.O_NOFOLLOW_ANY` for macOS. -(Contributed by Dong-hee Na in :issue:`43106`.) +Add :const:`~os.O_EVTONLY`, :const:`~os.O_FSYNC`, :const:`~os.O_SYMLINK` +and :const:`~os.O_NOFOLLOW_ANY` for macOS. +(Contributed by Donghee Na in :issue:`43106`.) os.path ------- @@ -1320,7 +1319,7 @@ objects in the tree returned by :func:`pyclbr.readline` and shelve ------ -The :mod:`shelve` module now uses :data:`pickle.DEFAULT_PROTOCOL` by default +The :mod:`shelve` module now uses :const:`pickle.DEFAULT_PROTOCOL` by default instead of :mod:`pickle` protocol ``3`` when creating shelves. (Contributed by Zackery Spytz in :issue:`34204`.) @@ -1357,7 +1356,7 @@ The ssl module requires OpenSSL 1.1.1 or newer. (Contributed by Christian Heimes in :pep:`644` and :issue:`43669`.) The ssl module has preliminary support for OpenSSL 3.0.0 and new option -:data:`~ssl.OP_IGNORE_UNEXPECTED_EOF`. +:const:`~ssl.OP_IGNORE_UNEXPECTED_EOF`. (Contributed by Christian Heimes in :issue:`38820`, :issue:`43794`, :issue:`43788`, :issue:`43791`, :issue:`43799`, :issue:`43920`, :issue:`43789`, and :issue:`43811`.) @@ -1388,7 +1387,7 @@ Add a *timeout* parameter to the :func:`ssl.get_server_certificate` function. The ssl module uses heap-types and multi-phase initialization. (Contributed by Christian Heimes in :issue:`42333`.) -A new verify flag :data:`~ssl.VERIFY_X509_PARTIAL_CHAIN` has been added. +A new verify flag :const:`~ssl.VERIFY_X509_PARTIAL_CHAIN` has been added. (Contributed by l0x in :issue:`40849`.) sqlite3 @@ -1414,7 +1413,7 @@ _thread ------- :func:`_thread.interrupt_main` now takes an optional signal number to -simulate (the default is still :data:`signal.SIGINT`). +simulate (the default is still :const:`signal.SIGINT`). (Contributed by Antoine Pitrou in :issue:`43356`.) threading @@ -1509,7 +1508,7 @@ query parameter separators in :func:`urllib.parse.parse_qs` and :func:`urllib.parse.parse_qsl`. Due to security concerns, and to conform with newer W3C recommendations, this has been changed to allow only a single separator key, with ``&`` as the default. This change also affects -:func:`cgi.parse` and :func:`cgi.parse_multipart` as they use the affected +:func:`!cgi.parse` and :func:`!cgi.parse_multipart` as they use the affected functions internally. For more details, please see their respective documentation. (Contributed by Adam Goldschmidt, Senthil Kumaran and Ken Jin in :issue:`42967`.) @@ -1583,7 +1582,7 @@ Optimizations * The following built-in functions now support the faster :pep:`590` vectorcall calling convention: :func:`map`, :func:`filter`, :func:`reversed`, :func:`bool` and :func:`float`. - (Contributed by Dong-hee Na and Jeroen Demeyer in :issue:`43575`, :issue:`43287`, :issue:`41922`, :issue:`41873` and :issue:`41870`.) + (Contributed by Donghee Na and Jeroen Demeyer in :issue:`43575`, :issue:`43287`, :issue:`41922`, :issue:`41873` and :issue:`41870`.) * :class:`BZ2File` performance is improved by removing internal ``RLock``. This makes :class:`BZ2File` thread unsafe in the face of multiple simultaneous @@ -1609,11 +1608,11 @@ Deprecated * Starting in this release, there will be a concerted effort to begin cleaning up old import semantics that were kept for Python 2.7 compatibility. Specifically, - :meth:`~importlib.abc.PathEntryFinder.find_loader`/:meth:`~importlib.abc.Finder.find_module` + :meth:`!find_loader`/:meth:`!find_module` (superseded by :meth:`~importlib.abc.Finder.find_spec`), :meth:`~importlib.abc.Loader.load_module` (superseded by :meth:`~importlib.abc.Loader.exec_module`), - :meth:`~importlib.abc.Loader.module_repr` (which the import system + :meth:`!module_repr` (which the import system takes care of for you), the ``__package__`` attribute (superseded by ``__spec__.parent``), the ``__loader__`` attribute (superseded by ``__spec__.loader``), and the ``__cached__`` attribute @@ -1646,8 +1645,8 @@ Deprecated :meth:`~importlib.abc.Loader.exec_module` is preferred. (Contributed by Brett Cannon in :issue:`26131`.) -* The use of :meth:`importlib.abc.MetaPathFinder.find_module` and - :meth:`importlib.abc.PathEntryFinder.find_module` by the import system now +* The use of :meth:`!importlib.abc.MetaPathFinder.find_module` and + :meth:`!importlib.abc.PathEntryFinder.find_module` by the import system now trigger an :exc:`ImportWarning` as :meth:`importlib.abc.MetaPathFinder.find_spec` and :meth:`importlib.abc.PathEntryFinder.find_spec` @@ -1655,53 +1654,53 @@ Deprecated :func:`importlib.util.spec_from_loader` to help in porting. (Contributed by Brett Cannon in :issue:`42134`.) -* The use of :meth:`importlib.abc.PathEntryFinder.find_loader` by the import +* The use of :meth:`!importlib.abc.PathEntryFinder.find_loader` by the import system now triggers an :exc:`ImportWarning` as :meth:`importlib.abc.PathEntryFinder.find_spec` is preferred. You can use :func:`importlib.util.spec_from_loader` to help in porting. (Contributed by Brett Cannon in :issue:`43672`.) * The various implementations of - :meth:`importlib.abc.MetaPathFinder.find_module` ( - :meth:`importlib.machinery.BuiltinImporter.find_module`, - :meth:`importlib.machinery.FrozenImporter.find_module`, - :meth:`importlib.machinery.WindowsRegistryFinder.find_module`, - :meth:`importlib.machinery.PathFinder.find_module`, - :meth:`importlib.abc.MetaPathFinder.find_module` ), - :meth:`importlib.abc.PathEntryFinder.find_module` ( - :meth:`importlib.machinery.FileFinder.find_module` ), and - :meth:`importlib.abc.PathEntryFinder.find_loader` ( - :meth:`importlib.machinery.FileFinder.find_loader` ) + :meth:`!importlib.abc.MetaPathFinder.find_module` ( + :meth:`!importlib.machinery.BuiltinImporter.find_module`, + :meth:`!importlib.machinery.FrozenImporter.find_module`, + :meth:`!importlib.machinery.WindowsRegistryFinder.find_module`, + :meth:`!importlib.machinery.PathFinder.find_module`, + :meth:`!importlib.abc.MetaPathFinder.find_module` ), + :meth:`!importlib.abc.PathEntryFinder.find_module` ( + :meth:`!importlib.machinery.FileFinder.find_module` ), and + :meth:`!importlib.abc.PathEntryFinder.find_loader` ( + :meth:`!importlib.machinery.FileFinder.find_loader` ) now raise :exc:`DeprecationWarning` and are slated for removal in Python 3.12 (previously they were documented as deprecated in Python 3.4). (Contributed by Brett Cannon in :issue:`42135`.) -* :class:`importlib.abc.Finder` is deprecated (including its sole method, - :meth:`~importlib.abc.Finder.find_module`). Both +* :class:`!importlib.abc.Finder` is deprecated (including its sole method, + :meth:`!find_module`). Both :class:`importlib.abc.MetaPathFinder` and :class:`importlib.abc.PathEntryFinder` no longer inherit from the class. Users should inherit from one of these two classes as appropriate instead. (Contributed by Brett Cannon in :issue:`42135`.) -* The deprecations of :mod:`imp`, :func:`importlib.find_loader`, - :func:`importlib.util.set_package_wrapper`, - :func:`importlib.util.set_loader_wrapper`, - :func:`importlib.util.module_for_loader`, - :class:`pkgutil.ImpImporter`, and - :class:`pkgutil.ImpLoader` have all been updated to list Python 3.12 as the +* The deprecations of :mod:`!imp`, :func:`!importlib.find_loader`, + :func:`!importlib.util.set_package_wrapper`, + :func:`!importlib.util.set_loader_wrapper`, + :func:`!importlib.util.module_for_loader`, + :class:`!pkgutil.ImpImporter`, and + :class:`!pkgutil.ImpLoader` have all been updated to list Python 3.12 as the slated version of removal (they began raising :exc:`DeprecationWarning` in previous versions of Python). (Contributed by Brett Cannon in :issue:`43720`.) * The import system now uses the ``__spec__`` attribute on modules before - falling back on :meth:`~importlib.abc.Loader.module_repr` for a module's + falling back on :meth:`!module_repr` for a module's ``__repr__()`` method. Removal of the use of ``module_repr()`` is scheduled for Python 3.12. (Contributed by Brett Cannon in :issue:`42137`.) -* :meth:`importlib.abc.Loader.module_repr`, - :meth:`importlib.machinery.FrozenLoader.module_repr`, and - :meth:`importlib.machinery.BuiltinLoader.module_repr` are deprecated and +* :meth:`!importlib.abc.Loader.module_repr`, + :meth:`!importlib.machinery.FrozenLoader.module_repr`, and + :meth:`!importlib.machinery.BuiltinLoader.module_repr` are deprecated and slated for removal in Python 3.12. (Contributed by Brett Cannon in :issue:`42136`.) @@ -1710,19 +1709,6 @@ Deprecated scheduled for removal in Python 3.12. (Contributed by Erlend E. Aasland in :issue:`42264`.) -* :func:`asyncio.get_event_loop` now emits a deprecation warning if there is - no running event loop. In the future it will be an alias of - :func:`~asyncio.get_running_loop`. - :mod:`asyncio` functions which implicitly create :class:`~asyncio.Future` - or :class:`~asyncio.Task` objects now emit - a deprecation warning if there is no running event loop and no explicit - *loop* argument is passed: :func:`~asyncio.ensure_future`, - :func:`~asyncio.wrap_future`, :func:`~asyncio.gather`, - :func:`~asyncio.shield`, :func:`~asyncio.as_completed` and constructors of - :class:`~asyncio.Future`, :class:`~asyncio.Task`, - :class:`~asyncio.StreamReader`, :class:`~asyncio.StreamReaderProtocol`. - (Contributed by Serhiy Storchaka in :issue:`39529`.) - * The undocumented built-in function ``sqlite3.enable_shared_cache`` is now deprecated, scheduled for removal in Python 3.12. Its use is strongly discouraged by the SQLite3 documentation. See `the SQLite3 docs @@ -1771,8 +1757,8 @@ Deprecated * :data:`~ssl.PROTOCOL_SSLv2`, :data:`~ssl.PROTOCOL_SSLv3`, :data:`~ssl.PROTOCOL_SSLv23`, :data:`~ssl.PROTOCOL_TLSv1`, :data:`~ssl.PROTOCOL_TLSv1_1`, :data:`~ssl.PROTOCOL_TLSv1_2`, and - :data:`~ssl.PROTOCOL_TLS` are deprecated in favor of - :data:`~ssl.PROTOCOL_TLS_CLIENT` and :data:`~ssl.PROTOCOL_TLS_SERVER` + :const:`~ssl.PROTOCOL_TLS` are deprecated in favor of + :const:`~ssl.PROTOCOL_TLS_CLIENT` and :const:`~ssl.PROTOCOL_TLS_SERVER` * :func:`~ssl.wrap_socket` is replaced by :meth:`ssl.SSLContext.wrap_socket` @@ -1831,10 +1817,10 @@ Removed scheduled to be removed in Python 3.6, but such removals were delayed until after Python 2.7 EOL. Existing users should copy whatever classes they use into their code. - (Contributed by Dong-hee Na and Terry J. Reedy in :issue:`42299`.) + (Contributed by Donghee Na and Terry J. Reedy in :issue:`42299`.) -* Removed the :c:func:`PyModule_GetWarningsModule` function that was useless - now due to the _warnings module was converted to a builtin module in 2.6. +* Removed the :c:func:`!PyModule_GetWarningsModule` function that was useless + now due to the :mod:`!_warnings` module was converted to a builtin module in 2.6. (Contributed by Hai Shi in :issue:`42599`.) * Remove deprecated aliases to :ref:`collections-abstract-base-classes` from @@ -2138,11 +2124,11 @@ New Features These functions allow to activate, deactivate and query the state of the garbage collector from C code without having to import the :mod:`gc` module. -* Add a new :c:data:`Py_TPFLAGS_DISALLOW_INSTANTIATION` type flag to disallow +* Add a new :c:macro:`Py_TPFLAGS_DISALLOW_INSTANTIATION` type flag to disallow creating type instances. (Contributed by Victor Stinner in :issue:`43916`.) -* Add a new :c:data:`Py_TPFLAGS_IMMUTABLETYPE` type flag for creating immutable +* Add a new :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` type flag for creating immutable type objects: type attributes cannot be set nor deleted. (Contributed by Victor Stinner and Erlend E. Aasland in :issue:`43908`.) @@ -2201,9 +2187,9 @@ Porting to Python 3.10 been included directly, consider including ``Python.h`` instead. (Contributed by Nicholas Sim in :issue:`35134`.) -* Use the :c:data:`Py_TPFLAGS_IMMUTABLETYPE` type flag to create immutable type - objects. Do not rely on :c:data:`Py_TPFLAGS_HEAPTYPE` to decide if a type - object is mutable or not; check if :c:data:`Py_TPFLAGS_IMMUTABLETYPE` is set +* Use the :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` type flag to create immutable type + objects. Do not rely on :c:macro:`Py_TPFLAGS_HEAPTYPE` to decide if a type + object is mutable or not; check if :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` is set instead. (Contributed by Victor Stinner and Erlend E. Aasland in :issue:`43908`.) @@ -2225,16 +2211,16 @@ Removed * Removed ``Py_UNICODE_str*`` functions manipulating ``Py_UNICODE*`` strings. (Contributed by Inada Naoki in :issue:`41123`.) - * ``Py_UNICODE_strlen``: use :c:func:`PyUnicode_GetLength` or - :c:macro:`PyUnicode_GET_LENGTH` - * ``Py_UNICODE_strcat``: use :c:func:`PyUnicode_CopyCharacters` or - :c:func:`PyUnicode_FromFormat` - * ``Py_UNICODE_strcpy``, ``Py_UNICODE_strncpy``: use - :c:func:`PyUnicode_CopyCharacters` or :c:func:`PyUnicode_Substring` - * ``Py_UNICODE_strcmp``: use :c:func:`PyUnicode_Compare` - * ``Py_UNICODE_strncmp``: use :c:func:`PyUnicode_Tailmatch` - * ``Py_UNICODE_strchr``, ``Py_UNICODE_strrchr``: use - :c:func:`PyUnicode_FindChar` + * ``Py_UNICODE_strlen``: use :c:func:`PyUnicode_GetLength` or + :c:macro:`PyUnicode_GET_LENGTH` + * ``Py_UNICODE_strcat``: use :c:func:`PyUnicode_CopyCharacters` or + :c:func:`PyUnicode_FromFormat` + * ``Py_UNICODE_strcpy``, ``Py_UNICODE_strncpy``: use + :c:func:`PyUnicode_CopyCharacters` or :c:func:`PyUnicode_Substring` + * ``Py_UNICODE_strcmp``: use :c:func:`PyUnicode_Compare` + * ``Py_UNICODE_strncmp``: use :c:func:`PyUnicode_Tailmatch` + * ``Py_UNICODE_strchr``, ``Py_UNICODE_strrchr``: use + :c:func:`PyUnicode_FindChar` * Removed ``PyUnicode_GetMax()``. Please migrate to new (:pep:`393`) APIs. (Contributed by Inada Naoki in :issue:`41103`.) diff --git a/Doc/whatsnew/3.11.rst b/Doc/whatsnew/3.11.rst index 6eb90df89cac0ed..48a0e621baad023 100644 --- a/Doc/whatsnew/3.11.rst +++ b/Doc/whatsnew/3.11.rst @@ -2,8 +2,7 @@ What's New In Python 3.11 **************************** -:Release: |release| -:Date: |today| +:Editor: Pablo Galindo Salgado .. Rules for maintenance: @@ -46,7 +45,7 @@ when researching a change. This article explains the new features in Python 3.11, compared to 3.10. - +Python 3.11 was released on October 24, 2022. For full details, see the :ref:`changelog `. @@ -219,17 +218,17 @@ Windows ``py.exe`` launcher improvements The copy of the :ref:`launcher` included with Python 3.11 has been significantly updated. It now supports company/tag syntax as defined in :pep:`514` using the -``-V:/`` argument instead of the limited ``-.``. +:samp:`-V:{}/{}` argument instead of the limited :samp:`-{}.{}`. This allows launching distributions other than ``PythonCore``, -the one hosted on `python.org `_. +the one hosted on `python.org `_. When using ``-V:`` selectors, either company or tag can be omitted, but all installs will be searched. For example, ``-V:OtherPython/`` will select the "best" tag registered for ``OtherPython``, while ``-V:3.11`` or ``-V:/3.11`` will select the "best" distribution with tag ``3.11``. -When using the legacy ``-``, ``-.``, -``--`` or ``-.-`` arguments, +When using the legacy :samp:`-{}`, :samp:`-{}.{}`, +:samp:`-{}-{}` or :samp:`-{}.{}-{}` arguments, all existing behaviour should be preserved from past versions, and only releases from ``PythonCore`` will be selected. However, the ``-64`` suffix now implies "not 32-bit" (not necessarily x86-64), @@ -460,6 +459,10 @@ Other Language Changes :class:`collections.OrderedDict`, :class:`collections.deque`, :class:`weakref.WeakSet`, and :class:`datetime.tzinfo` now copies and pickles instance attributes implemented as :term:`slots <__slots__>`. + This change has an unintended side effect: It trips up a small minority + of existing Python projects not expecting :meth:`object.__getstate__` to + exist. See the later comments on :gh:`70766` for discussions of what + workarounds such code may need. (Contributed by Serhiy Storchaka in :issue:`26579`.) .. _whatsnew311-pythonsafepath: @@ -496,7 +499,7 @@ Other CPython Implementation Changes * The special methods :meth:`~object.__complex__` for :class:`complex` and :meth:`~object.__bytes__` for :class:`bytes` are implemented to support the :class:`typing.SupportsComplex` and :class:`typing.SupportsBytes` protocols. - (Contributed by Mark Dickinson and Dong-hee Na in :issue:`24234`.) + (Contributed by Mark Dickinson and Donghee Na in :issue:`24234`.) * ``siphash13`` is added as a new internal hashing algorithm. It has similar security properties as ``siphash24``, @@ -641,7 +644,7 @@ dataclasses datetime -------- -* Add :attr:`datetime.UTC`, a convenience alias for +* Add :const:`datetime.UTC`, a convenience alias for :attr:`datetime.timezone.utc`. (Contributed by Kabir Kwatra in :gh:`91973`.) * :meth:`datetime.date.fromisoformat`, :meth:`datetime.time.fromisoformat` and @@ -667,19 +670,11 @@ enum for :meth:`~object.__str__` and :meth:`~object.__format__` (used by :func:`str`, :func:`format` and :term:`f-string`\s). -* Changed :class:`~enum.IntEnum`, :class:`~enum.IntFlag` and :class:`~enum.StrEnum` - to now inherit from :class:`~enum.ReprEnum`, - so their :func:`str` output now matches :func:`format` - (both ``str(AnIntEnum.ONE)`` and ``format(AnIntEnum.ONE)`` return ``'1'``, - whereas before ``str(AnIntEnum.ONE)`` returned ``'AnIntEnum.ONE'``. - -* Changed :meth:`Enum.__format__() ` - (the default for :func:`format`, :meth:`str.format` and :term:`f-string`\s) - of enums with mixed-in types (e.g. :class:`int`, :class:`str`) - to also include the class name in the output, not just the member's key. - This matches the existing behavior of :meth:`enum.Enum.__str__`, - returning e.g. ``'AnEnum.MEMBER'`` for an enum ``AnEnum(str, Enum)`` - instead of just ``'MEMBER'``. +* Changed :meth:`Enum.__format__() ` (the default for + :func:`format`, :meth:`str.format` and :term:`f-string`\s) to always produce + the same result as :meth:`Enum.__str__()`: for enums inheriting from + :class:`~enum.ReprEnum` it will be the member's value; for all other enums + it will be the enum and member name (e.g. ``Color.RED``). * Added a new *boundary* class parameter to :class:`~enum.Flag` enums and the :class:`~enum.FlagBoundary` enum with its options, @@ -699,7 +694,7 @@ enum * Added the :func:`~enum.global_enum` enum decorator, which adjusts :meth:`~object.__repr__` and :meth:`~object.__str__` to show values as members of their module rather than the enum class. - For example, ``'re.ASCII'`` for the :data:`~re.ASCII` member + For example, ``'re.ASCII'`` for the :const:`~re.ASCII` member of :class:`re.RegexFlag` rather than ``'RegexFlag.ASCII'``. * Enhanced :class:`~enum.Flag` to support @@ -902,7 +897,7 @@ os * On Windows, :func:`os.urandom` now uses ``BCryptGenRandom()``, instead of ``CryptGenRandom()`` which is deprecated. - (Contributed by Dong-hee Na in :issue:`44611`.) + (Contributed by Donghee Na in :issue:`44611`.) .. _whatsnew311-pathlib: @@ -1072,8 +1067,8 @@ threading * On Unix, if the ``sem_clockwait()`` function is available in the C library (glibc 2.30 and newer), the :meth:`threading.Lock.acquire` method now uses - the monotonic clock (:data:`time.CLOCK_MONOTONIC`) for the timeout, rather - than using the system clock (:data:`time.CLOCK_REALTIME`), to not be affected + the monotonic clock (:const:`time.CLOCK_MONOTONIC`) for the timeout, rather + than using the system clock (:const:`time.CLOCK_REALTIME`), to not be affected by system clock changes. (Contributed by Victor Stinner in :issue:`41710`.) @@ -1094,7 +1089,7 @@ time `_ which has a resolution of 100 nanoseconds (10\ :sup:`-7` seconds). Previously, it had a resolution of 1 millisecond (10\ :sup:`-3` seconds). - (Contributed by Benjamin SzÅ‘ke, Dong-hee Na, Eryk Sun and Victor Stinner in :issue:`21302` and :issue:`45429`.) + (Contributed by Benjamin SzÅ‘ke, Donghee Na, Eryk Sun and Victor Stinner in :issue:`21302` and :issue:`45429`.) .. _whatsnew311-tkinter: @@ -1310,7 +1305,7 @@ This section covers specific optimizations independent of the * :func:`unicodedata.normalize` now normalizes pure-ASCII strings in constant time. - (Contributed by Dong-hee Na in :issue:`44987`.) + (Contributed by Donghee Na in :issue:`44987`.) .. _whatsnew311-faster-cpython: @@ -1318,14 +1313,17 @@ This section covers specific optimizations independent of the Faster CPython ============== -CPython 3.11 is on average `25% faster `_ -than CPython 3.10 when measured with the +CPython 3.11 is an average of +`25% faster `_ +than CPython 3.10 as measured with the `pyperformance `_ benchmark suite, -and compiled with GCC on Ubuntu Linux. Depending on your workload, the speedup -could be up to 10-60% faster. +when compiled with GCC on Ubuntu Linux. +Depending on your workload, the overall speedup could be 10-60%. -This project focuses on two major areas in Python: faster startup and faster -runtime. Other optimizations not under this project are listed in `Optimizations`_. +This project focuses on two major areas in Python: +:ref:`whatsnew311-faster-startup` and :ref:`whatsnew311-faster-runtime`. +Optimizations not covered by this project are listed separately under +:ref:`whatsnew311-optimizations`. .. _whatsnew311-faster-startup: @@ -1338,8 +1336,8 @@ Faster Startup Frozen imports / Static code objects ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python caches bytecode in the :ref:`__pycache__` directory to -speed up module loading. +Python caches :term:`bytecode` in the :ref:`__pycache__ ` +directory to speed up module loading. Previously in 3.10, Python module execution looked like this: @@ -1348,8 +1346,9 @@ Previously in 3.10, Python module execution looked like this: Read __pycache__ -> Unmarshal -> Heap allocated code object -> Evaluate In Python 3.11, the core modules essential for Python startup are "frozen". -This means that their code objects (and bytecode) are statically allocated -by the interpreter. This reduces the steps in module execution process to this: +This means that their :ref:`codeobjects` (and bytecode) +are statically allocated by the interpreter. +This reduces the steps in module execution process to: .. code-block:: text @@ -1358,7 +1357,7 @@ by the interpreter. This reduces the steps in module execution process to this: Interpreter startup is now 10-15% faster in Python 3.11. This has a big impact for short-running programs using Python. -(Contributed by Eric Snow, Guido van Rossum and Kumar Aditya in numerous issues.) +(Contributed by Eric Snow, Guido van Rossum and Kumar Aditya in many issues.) .. _whatsnew311-faster-runtime: @@ -1371,17 +1370,19 @@ Faster Runtime Cheaper, lazy Python frames ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python frames are created whenever Python calls a Python function. This frame -holds execution information. The following are new frame optimizations: +Python frames, holding execution information, +are created whenever Python calls a Python function. +The following are new frame optimizations: - Streamlined the frame creation process. - Avoided memory allocation by generously re-using frame space on the C stack. - Streamlined the internal frame struct to contain only essential information. Frames previously held extra debugging and memory management information. -Old-style frame objects are now created only when requested by debuggers or -by Python introspection functions such as ``sys._getframe`` or -``inspect.currentframe``. For most user code, no frame objects are +Old-style :ref:`frame objects ` +are now created only when requested by debuggers +or by Python introspection functions such as :func:`sys._getframe` and +:func:`inspect.currentframe`. For most user code, no frame objects are created at all. As a result, nearly all Python functions calls have sped up significantly. We measured a 3-7% speedup in pyperformance. @@ -1402,10 +1403,11 @@ In 3.11, when CPython detects Python code calling another Python function, it sets up a new frame, and "jumps" to the new code inside the new frame. This avoids calling the C interpreting function altogether. -Most Python function calls now consume no C stack space. This speeds up -most of such calls. In simple recursive functions like fibonacci or -factorial, a 1.7x speedup was observed. This also means recursive functions -can recurse significantly deeper (if the user increases the recursion limit). +Most Python function calls now consume no C stack space, speeding them up. +In simple recursive functions like fibonacci or +factorial, we observed a 1.7x speedup. This also means recursive functions +can recurse significantly deeper +(if the user increases the recursion limit with :func:`sys.setrecursionlimit`). We measured a 1-3% improvement in pyperformance. (Contributed by Pablo Galindo and Mark Shannon in :issue:`45256`.) @@ -1416,7 +1418,7 @@ We measured a 1-3% improvement in pyperformance. PEP 659: Specializing Adaptive Interpreter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:pep:`659` is one of the key parts of the faster CPython project. The general +:pep:`659` is one of the key parts of the Faster CPython project. The general idea is that while Python is a dynamic language, most code has regions where objects and types rarely change. This concept is known as *type stability*. @@ -1425,17 +1427,18 @@ in the executing code. Python will then replace the current operation with a more specialized one. This specialized operation uses fast paths available only to those use cases/types, which generally outperform their generic counterparts. This also brings in another concept called *inline caching*, where -Python caches the results of expensive operations directly in the bytecode. +Python caches the results of expensive operations directly in the +:term:`bytecode`. The specializer will also combine certain common instruction pairs into one -superinstruction. This reduces the overhead during execution. +superinstruction, reducing the overhead during execution. Python will only specialize when it sees code that is "hot" (executed multiple times). This prevents Python -from wasting time for run-once code. Python can also de-specialize when code is +from wasting time on run-once code. Python can also de-specialize when code is too dynamic or when the use changes. Specialization is attempted periodically, -and specialization attempts are not too expensive. This allows specialization -to adapt to new circumstances. +and specialization attempts are not too expensive, +allowing specialization to adapt to new circumstances. (PEP written by Mark Shannon, with ideas inspired by Stefan Brunthaler. See :pep:`659` for more information. Implementation by Mark Shannon and Brandt @@ -1448,32 +1451,32 @@ Bucher, with additional help from Irit Katriel and Dennis Sweeney.) | Operation | Form | Specialization | Operation speedup | Contributor(s) | | | | | (up to) | | +===============+====================+=======================================================+===================+===================+ -| Binary | ``x+x; x*x; x-x;`` | Binary add, multiply and subtract for common types | 10% | Mark Shannon, | -| operations | | such as ``int``, ``float``, and ``str`` take custom | | Dong-hee Na, | -| | | fast paths for their underlying types. | | Brandt Bucher, | +| Binary | ``x + x`` | Binary add, multiply and subtract for common types | 10% | Mark Shannon, | +| operations | | such as :class:`int`, :class:`float` and :class:`str` | | Donghee Na, | +| | ``x - x`` | take custom fast paths for their underlying types. | | Brandt Bucher, | | | | | | Dennis Sweeney | +| | ``x * x`` | | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ -| Subscript | ``a[i]`` | Subscripting container types such as ``list``, | 10-25% | Irit Katriel, | -| | | ``tuple`` and ``dict`` directly index the underlying | | Mark Shannon | -| | | data structures. | | | +| Subscript | ``a[i]`` | Subscripting container types such as :class:`list`, | 10-25% | Irit Katriel, | +| | | :class:`tuple` and :class:`dict` directly index | | Mark Shannon | +| | | the underlying data structures. | | | | | | | | | -| | | Subscripting custom ``__getitem__`` | | | +| | | Subscripting custom :meth:`~object.__getitem__` | | | | | | is also inlined similar to :ref:`inline-calls`. | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ | Store | ``a[i] = z`` | Similar to subscripting specialization above. | 10-25% | Dennis Sweeney | | subscript | | | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ | Calls | ``f(arg)`` | Calls to common builtin (C) functions and types such | 20% | Mark Shannon, | -| | ``C(arg)`` | as ``len`` and ``str`` directly call their underlying | | Ken Jin | -| | | C version. This avoids going through the internal | | | -| | | calling convention. | | | -| | | | | | +| | | as :func:`len` and :class:`str` directly call their | | Ken Jin | +| | ``C(arg)`` | underlying C version. This avoids going through the | | | +| | | internal calling convention. | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ -| Load | ``print`` | The object's index in the globals/builtins namespace | [1]_ | Mark Shannon | -| global | ``len`` | is cached. Loading globals and builtins require | | | -| variable | | zero namespace lookups. | | | +| Load | ``print`` | The object's index in the globals/builtins namespace | [#load-global]_ | Mark Shannon | +| global | | is cached. Loading globals and builtins require | | | +| variable | ``len`` | zero namespace lookups. | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ -| Load | ``o.attr`` | Similar to loading global variables. The attribute's | [2]_ | Mark Shannon | +| Load | ``o.attr`` | Similar to loading global variables. The attribute's | [#load-attr]_ | Mark Shannon | | attribute | | index inside the class/object's namespace is cached. | | | | | | In most cases, attribute loading will require zero | | | | | | namespace lookups. | | | @@ -1485,14 +1488,15 @@ Bucher, with additional help from Irit Katriel and Dennis Sweeney.) | Store | ``o.attr = z`` | Similar to load attribute optimization. | 2% | Mark Shannon | | attribute | | | in pyperformance | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ -| Unpack | ``*seq`` | Specialized for common containers such as ``list`` | 8% | Brandt Bucher | -| Sequence | | and ``tuple``. Avoids internal calling convention. | | | +| Unpack | ``*seq`` | Specialized for common containers such as | 8% | Brandt Bucher | +| Sequence | | :class:`list` and :class:`tuple`. | | | +| | | Avoids internal calling convention. | | | +---------------+--------------------+-------------------------------------------------------+-------------------+-------------------+ -.. [1] A similar optimization already existed since Python 3.8. 3.11 - specializes for more forms and reduces some overhead. +.. [#load-global] A similar optimization already existed since Python 3.8. + 3.11 specializes for more forms and reduces some overhead. -.. [2] A similar optimization already existed since Python 3.10. +.. [#load-attr] A similar optimization already existed since Python 3.10. 3.11 specializes for more forms. Furthermore, all attribute loads should be sped up by :issue:`45947`. @@ -1502,49 +1506,72 @@ Bucher, with additional help from Irit Katriel and Dennis Sweeney.) Misc ---- -* Objects now require less memory due to lazily created object namespaces. Their - namespace dictionaries now also share keys more freely. +* Objects now require less memory due to lazily created object namespaces. + Their namespace dictionaries now also share keys more freely. (Contributed Mark Shannon in :issue:`45340` and :issue:`40116`.) +* "Zero-cost" exceptions are implemented, eliminating the cost + of :keyword:`try` statements when no exception is raised. + (Contributed by Mark Shannon in :issue:`40222`.) + * A more concise representation of exceptions in the interpreter reduced the time required for catching an exception by about 10%. (Contributed by Irit Katriel in :issue:`45711`.) +* :mod:`re`'s regular expression matching engine has been partially refactored, + and now uses computed gotos (or "threaded code") on supported platforms. As a + result, Python 3.11 executes the `pyperformance regular expression benchmarks + `_ up to 10% + faster than Python 3.10. + (Contributed by Brandt Bucher in :gh:`91404`.) + .. _whatsnew311-faster-cpython-faq: FAQ --- -| Q: How should I write my code to utilize these speedups? -| -| A: You don't have to change your code. Write Pythonic code that follows common - best practices. The Faster CPython project optimizes for common code - patterns we observe. -| -| -| Q: Will CPython 3.11 use more memory? -| -| A: Maybe not. We don't expect memory use to exceed 20% more than 3.10. - This is offset by memory optimizations for frame objects and object - dictionaries as mentioned above. -| -| -| Q: I don't see any speedups in my workload. Why? -| -| A: Certain code won't have noticeable benefits. If your code spends most of - its time on I/O operations, or already does most of its - computation in a C extension library like numpy, there won't be significant - speedup. This project currently benefits pure-Python workloads the most. -| -| Furthermore, the pyperformance figures are a geometric mean. Even within the - pyperformance benchmarks, certain benchmarks have slowed down slightly, while - others have sped up by nearly 2x! -| -| -| Q: Is there a JIT compiler? -| -| A: No. We're still exploring other optimizations. +.. _faster-cpython-faq-my-code: + +How should I write my code to utilize these speedups? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Write Pythonic code that follows common best practices; +you don't have to change your code. +The Faster CPython project optimizes for common code patterns we observe. + + +.. _faster-cpython-faq-memory: + +Will CPython 3.11 use more memory? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Maybe not; we don't expect memory use to exceed 20% higher than 3.10. +This is offset by memory optimizations for frame objects and object +dictionaries as mentioned above. + + +.. _faster-cpython-ymmv: + +I don't see any speedups in my workload. Why? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Certain code won't have noticeable benefits. If your code spends most of +its time on I/O operations, or already does most of its +computation in a C extension library like NumPy, there won't be significant +speedups. This project currently benefits pure-Python workloads the most. + +Furthermore, the pyperformance figures are a geometric mean. Even within the +pyperformance benchmarks, certain benchmarks have slowed down slightly, while +others have sped up by nearly 2x! + + +.. _faster-cpython-jit: + +Is there a JIT compiler? +^^^^^^^^^^^^^^^^^^^^^^^^ + +No. We're still exploring other optimizations. .. _whatsnew311-faster-cpython-about: @@ -1708,13 +1735,13 @@ Modules slated for removal in Python 3.13: +---------------------+---------------------+---------------------+---------------------+---------------------+ - | :mod:`aifc` | :mod:`chunk` | :mod:`msilib` | :mod:`pipes` | :mod:`telnetlib` | + | :mod:`!aifc` | :mod:`!chunk` | :mod:`!msilib` | :mod:`!pipes` | :mod:`!telnetlib` | +---------------------+---------------------+---------------------+---------------------+---------------------+ - | :mod:`audioop` | :mod:`crypt` | :mod:`nis` | :mod:`sndhdr` | :mod:`uu` | + | :mod:`!audioop` | :mod:`!crypt` | :mod:`!nis` | :mod:`!sndhdr` | :mod:`!uu` | +---------------------+---------------------+---------------------+---------------------+---------------------+ - | :mod:`cgi` | :mod:`imghdr` | :mod:`nntplib` | :mod:`spwd` | :mod:`xdrlib` | + | :mod:`!cgi` | :mod:`!imghdr` | :mod:`!nntplib` | :mod:`!spwd` | :mod:`!xdrlib` | +---------------------+---------------------+---------------------+---------------------+---------------------+ - | :mod:`cgitb` | :mod:`mailcap` | :mod:`ossaudiodev` | :mod:`sunau` | | + | :mod:`!cgitb` | :mod:`!mailcap` | :mod:`!ossaudiodev` | :mod:`!sunau` | | +---------------------+---------------------+---------------------+---------------------+---------------------+ (Contributed by Brett Cannon in :issue:`47061` and Victor Stinner in @@ -1725,7 +1752,7 @@ Modules warnings have now been updated to note they will be removed in Python 3.12. (Contributed by Hugo van Kemenade in :issue:`47022`.) -* The :mod:`lib2to3` package and :ref:`2to3 <2to3-reference>` tool +* The :mod:`!lib2to3` package and ``2to3`` tool are now deprecated and may not be able to parse Python 3.10 or newer. See :pep:`617`, introducing the new PEG parser, for details. (Contributed by Victor Stinner in :issue:`40360`.) @@ -1771,7 +1798,7 @@ Standard Library * :func:`importlib.resources.path` * The :func:`locale.getdefaultlocale` function is deprecated and will be - removed in Python 3.13. Use :func:`locale.setlocale`, + removed in Python 3.15. Use :func:`locale.setlocale`, :func:`locale.getpreferredencoding(False) ` and :func:`locale.getlocale` functions instead. (Contributed by Victor Stinner in :gh:`90817`.) @@ -1789,12 +1816,12 @@ Standard Library (Contributed by Serhiy Storchaka in :gh:`91760`.) * In the :mod:`re` module, the :func:`!re.template` function - and the corresponding :data:`!re.TEMPLATE` and :data:`!re.T` flags + and the corresponding :const:`!re.TEMPLATE` and :const:`!re.T` flags are deprecated, as they were undocumented and lacked an obvious purpose. They will be removed in Python 3.13. (Contributed by Serhiy Storchaka and Miro HronÄok in :gh:`92728`.) -* :func:`turtle.settiltangle` has been deprecated since Python 3.1; +* :func:`!turtle.settiltangle` has been deprecated since Python 3.1; it now emits a deprecation warning and will be removed in Python 3.13. Use :func:`turtle.tiltangle` instead (it was earlier incorrectly marked as deprecated, and its docstring is now corrected). @@ -1812,7 +1839,7 @@ Standard Library * :class:`!webbrowser.MacOSX` is deprecated and will be removed in Python 3.13. It is untested, undocumented, and not used by :mod:`webbrowser` itself. - (Contributed by Dong-hee Na in :issue:`42255`.) + (Contributed by Donghee Na in :issue:`42255`.) * The behavior of returning a value from a :class:`~unittest.TestCase` and :class:`~unittest.IsolatedAsyncioTestCase` test methods (other than the @@ -1833,6 +1860,10 @@ Standard Library (Contributed by Erlend E. Aasland in :issue:`5846`.) +* :meth:`~!unittest.TestProgram.usageExit` is marked deprecated, to be removed + in 3.13. + (Contributed by Carlos Damázio in :gh:`67048`.) + .. _whatsnew311-pending-removal: .. _whatsnew311-python-api-pending-removal: @@ -1849,28 +1880,28 @@ C APIs pending removal are * The :mod:`asynchat` module * The :mod:`asyncore` module * The :ref:`entire distutils package ` -* The :mod:`imp` module +* The :mod:`!imp` module * The :class:`typing.io ` namespace * The :class:`typing.re ` namespace * :func:`!cgi.log` -* :func:`importlib.find_loader` -* :meth:`importlib.abc.Loader.module_repr` -* :meth:`importlib.abc.MetaPathFinder.find_module` -* :meth:`importlib.abc.PathEntryFinder.find_loader` -* :meth:`importlib.abc.PathEntryFinder.find_module` +* :func:`!importlib.find_loader` +* :meth:`!importlib.abc.Loader.module_repr` +* :meth:`!importlib.abc.MetaPathFinder.find_module` +* :meth:`!importlib.abc.PathEntryFinder.find_loader` +* :meth:`!importlib.abc.PathEntryFinder.find_module` * :meth:`!importlib.machinery.BuiltinImporter.find_module` * :meth:`!importlib.machinery.BuiltinLoader.module_repr` * :meth:`!importlib.machinery.FileFinder.find_loader` * :meth:`!importlib.machinery.FileFinder.find_module` * :meth:`!importlib.machinery.FrozenImporter.find_module` * :meth:`!importlib.machinery.FrozenLoader.module_repr` -* :meth:`importlib.machinery.PathFinder.find_module` +* :meth:`!importlib.machinery.PathFinder.find_module` * :meth:`!importlib.machinery.WindowsRegistryFinder.find_module` -* :func:`importlib.util.module_for_loader` +* :func:`!importlib.util.module_for_loader` * :func:`!importlib.util.set_loader_wrapper` * :func:`!importlib.util.set_package_wrapper` -* :class:`pkgutil.ImpImporter` -* :class:`pkgutil.ImpLoader` +* :class:`!pkgutil.ImpImporter` +* :class:`!pkgutil.ImpLoader` * :meth:`pathlib.Path.link_to` * :func:`!sqlite3.enable_shared_cache` * :func:`!sqlite3.OptimizedUnicode` @@ -1953,7 +1984,7 @@ Removed C APIs are :ref:`listed separately `. :meth:`!NullTranslations.set_output_charset` methods, and the *codeset* parameter of :func:`!translation` and :func:`!install`, since they are only used for the :func:`!l*gettext` functions. - (Contributed by Dong-hee Na and Serhiy Storchaka in :issue:`44235`.) + (Contributed by Donghee Na and Serhiy Storchaka in :issue:`44235`.) * Removed from the :mod:`inspect` module: @@ -1978,7 +2009,7 @@ Removed C APIs are :ref:`listed separately `. * Removed the :class:`!MailmanProxy` class in the :mod:`smtpd` module, as it is unusable without the external :mod:`!mailman` package. - (Contributed by Dong-hee Na in :issue:`35800`.) + (Contributed by Donghee Na in :issue:`35800`.) * Removed the deprecated :meth:`!split` method of :class:`!_tkinter.TkappType`. (Contributed by Erlend E. Aasland in :issue:`38371`.) @@ -2083,30 +2114,22 @@ Build Changes and WASI contributed by Christian Heimes in :gh:`90473`; platforms promoted in :gh:`95085`) -* Building Python now requires: +* Building CPython now requires: - * A `C11 `_ compiler. + * A `C11 `_ compiler and standard library. `Optional C11 features `_ are not required. - (Contributed by Victor Stinner in :issue:`46656`.) + (Contributed by Victor Stinner in :issue:`46656`, + :issue:`45440` and :issue:`46640`.) * Support for `IEEE 754 `_ floating point numbers. (Contributed by Victor Stinner in :issue:`46917`.) - * Support for `floating point Not-a-Number (NaN) - `_, - as the :c:macro:`!Py_NO_NAN` macro has been removed. - (Contributed by Victor Stinner in :issue:`46656`.) - - * A `C99 `_ - ```` header file providing the - :c:func:`!copysign`, :c:func:`!hypot`, :c:func:`!isfinite`, - :c:func:`!isinf`, :c:func:`!isnan`, and :c:func:`!round` functions - (contributed by Victor Stinner in :issue:`45440`); - and a :c:data:`!NAN` constant or the :c:func:`!__builtin_nan` function - (Contributed by Victor Stinner in :issue:`46640`). +* The :c:macro:`!Py_NO_NAN` macro has been removed. + Since CPython now requires IEEE 754 floats, NaN values are always available. + (Contributed by Victor Stinner in :issue:`46656`.) * The :mod:`tkinter` package now requires `Tcl/Tk `_ version 8.5.12 or newer. @@ -2128,10 +2151,10 @@ Build Changes * CPython can now be built with the `ThinLTO `_ option via passing ``thin`` to :option:`--with-lto`, i.e. ``--with-lto=thin``. - (Contributed by Dong-hee Na and Brett Holman in :issue:`44340`.) + (Contributed by Donghee Na and Brett Holman in :issue:`44340`.) * Freelists for object structs can now be disabled. A new :program:`configure` - option :option:`!--without-freelists` can be used to disable all freelists + option :option:`--without-freelists` can be used to disable all freelists except empty tuple singleton. (Contributed by Christian Heimes in :issue:`45522`.) @@ -2197,7 +2220,7 @@ New Features * :c:func:`PyBuffer_SizeFromFormat` * :c:func:`PyBuffer_ToContiguous` * :c:func:`PyBuffer_FromContiguous` - * :c:func:`PyBuffer_CopyData` + * :c:func:`PyObject_CopyData` * :c:func:`PyBuffer_IsContiguous` * :c:func:`PyBuffer_FillContiguousStrides` * :c:func:`PyBuffer_FillInfo` @@ -2208,7 +2231,7 @@ New Features (Contributed by Christian Heimes in :issue:`45459`.) -* Added the :c:data:`PyType_GetModuleByDef` function, used to get the module +* Added the :c:func:`PyType_GetModuleByDef` function, used to get the module in which a method was defined, in cases where this information is not available directly (via :c:type:`PyCMethod`). (Contributed by Petr Viktorin in :issue:`46613`.) @@ -2319,7 +2342,7 @@ Porting to Python 3.11 can define the following macros and use them throughout the code (credit: these were copied from the ``mypy`` codebase):: - #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 8 + #if PY_VERSION_HEX >= 0x03080000 # define CPy_TRASHCAN_BEGIN(op, dealloc) Py_TRASHCAN_BEGIN(op, dealloc) # define CPy_TRASHCAN_END(op) Py_TRASHCAN_END #else @@ -2328,11 +2351,11 @@ Porting to Python 3.11 #endif * The :c:func:`PyType_Ready` function now raises an error if a type is defined - with the :const:`Py_TPFLAGS_HAVE_GC` flag set but has no traverse function + with the :c:macro:`Py_TPFLAGS_HAVE_GC` flag set but has no traverse function (:c:member:`PyTypeObject.tp_traverse`). (Contributed by Victor Stinner in :issue:`44263`.) -* Heap types with the :const:`Py_TPFLAGS_IMMUTABLETYPE` flag can now inherit +* Heap types with the :c:macro:`Py_TPFLAGS_IMMUTABLETYPE` flag can now inherit the :pep:`590` vectorcall protocol. Previously, this was only possible for :ref:`static types `. (Contributed by Erlend E. Aasland in :issue:`43908`) @@ -2466,7 +2489,7 @@ Porting to Python 3.11 #endif Or use the `pythoncapi_compat project - `__ to get these two + `__ to get these two functions on older Python versions. * Changes of the :c:type:`PyThreadState` structure members: @@ -2518,8 +2541,8 @@ Porting to Python 3.11 } #endif - Or use `the pythoncapi_compat project - `__ to get these functions + Or use `the pythoncapi-compat project + `__ to get these functions on old Python functions. * Distributors are encouraged to build Python with the optimized Blake2 @@ -2543,18 +2566,18 @@ Deprecated * Deprecate the following functions to configure the Python initialization: - * :c:func:`PySys_AddWarnOptionUnicode` - * :c:func:`PySys_AddWarnOption` - * :c:func:`PySys_AddXOption` - * :c:func:`PySys_HasWarnOptions` - * :c:func:`PySys_SetArgvEx` - * :c:func:`PySys_SetArgv` - * :c:func:`PySys_SetPath` - * :c:func:`Py_SetPath` - * :c:func:`Py_SetProgramName` - * :c:func:`Py_SetPythonHome` - * :c:func:`Py_SetStandardStreamEncoding` - * :c:func:`_Py_SetProgramFullPath` + * :c:func:`!PySys_AddWarnOptionUnicode` + * :c:func:`!PySys_AddWarnOption` + * :c:func:`!PySys_AddXOption` + * :c:func:`!PySys_HasWarnOptions` + * :c:func:`!PySys_SetArgvEx` + * :c:func:`!PySys_SetArgv` + * :c:func:`!PySys_SetPath` + * :c:func:`!Py_SetPath` + * :c:func:`!Py_SetProgramName` + * :c:func:`!Py_SetPythonHome` + * :c:func:`!Py_SetStandardStreamEncoding` + * :c:func:`!_Py_SetProgramFullPath` Use the new :c:type:`PyConfig` API of the :ref:`Python Initialization Configuration ` instead (:pep:`587`). @@ -2572,22 +2595,22 @@ Pending Removal in Python 3.12 The following C APIs have been deprecated in earlier Python releases, and will be removed in Python 3.12. -* :c:func:`PyUnicode_AS_DATA` -* :c:func:`PyUnicode_AS_UNICODE` -* :c:func:`PyUnicode_AsUnicodeAndSize` -* :c:func:`PyUnicode_AsUnicode` -* :c:func:`PyUnicode_FromUnicode` -* :c:func:`PyUnicode_GET_DATA_SIZE` -* :c:func:`PyUnicode_GET_SIZE` -* :c:func:`PyUnicode_GetSize` +* :c:func:`!PyUnicode_AS_DATA` +* :c:func:`!PyUnicode_AS_UNICODE` +* :c:func:`!PyUnicode_AsUnicodeAndSize` +* :c:func:`!PyUnicode_AsUnicode` +* :c:func:`!PyUnicode_FromUnicode` +* :c:func:`!PyUnicode_GET_DATA_SIZE` +* :c:func:`!PyUnicode_GET_SIZE` +* :c:func:`!PyUnicode_GetSize` * :c:func:`PyUnicode_IS_COMPACT` * :c:func:`PyUnicode_IS_READY` * :c:func:`PyUnicode_READY` -* :c:func:`Py_UNICODE_WSTR_LENGTH` -* :c:func:`_PyUnicode_AsUnicode` -* :c:macro:`PyUnicode_WCHAR_KIND` +* :c:func:`!PyUnicode_WSTR_LENGTH` +* :c:func:`!_PyUnicode_AsUnicode` +* :c:macro:`!PyUnicode_WCHAR_KIND` * :c:type:`PyUnicodeObject` -* :c:func:`PyUnicode_InternImmortal()` +* :c:func:`!PyUnicode_InternImmortal` .. _whatsnew311-c-api-removed: @@ -2595,7 +2618,7 @@ and will be removed in Python 3.12. Removed ------- -* :c:func:`PyFrame_BlockSetup` and :c:func:`PyFrame_BlockPop` have been +* :c:func:`!PyFrame_BlockSetup` and :c:func:`!PyFrame_BlockPop` have been removed. (Contributed by Mark Shannon in :issue:`40222`.) @@ -2631,7 +2654,7 @@ Removed * :c:func:`PyMarshal_WriteObjectToString` * the ``Py_MARSHAL_VERSION`` macro - These are not part of the :ref:`limited API `. + These are not part of the :ref:`limited API `. (Contributed by Victor Stinner in :issue:`45474`.) diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index a9432561f3fc500..a4b3a6d12970b47 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -3,8 +3,7 @@ What's New In Python 3.12 **************************** -:Release: |release| -:Date: |today| +:Editor: Adam Turner .. Rules for maintenance: @@ -47,15 +46,12 @@ researching a change. This article explains the new features in Python 3.12, compared to 3.11. - +Python 3.12 was released on October 2, 2023. For full details, see the :ref:`changelog `. -.. note:: - - Prerelease users should be aware that this document is currently in draft - form. It will be updated substantially as Python 3.12 moves towards release, - so it's worth checking back even after reading earlier versions. +.. seealso:: + :pep:`693` -- Python 3.12 Release Schedule Summary -- Release highlights ============================= @@ -63,18 +59,403 @@ Summary -- Release highlights .. This section singles out the most important changes in Python 3.12. Brevity is key. +Python 3.12 is the latest stable release of the Python programming language, +with a mix of changes to the language and the standard library. +The library changes focus on cleaning up deprecated APIs, usability, and correctness. +Of note, the :mod:`!distutils` package has been removed from the standard library. +Filesystem support in :mod:`os` and :mod:`pathlib` has seen a number of improvements, +and several modules have better performance. + +The language changes focus on usability, +as :term:`f-strings ` have had many limitations removed +and 'Did you mean ...' suggestions continue to improve. +The new :ref:`type parameter syntax ` +and :keyword:`type` statement improve ergonomics for using :term:`generic types +` and :term:`type aliases ` with static type checkers. + +This article doesn't attempt to provide a complete specification of all new features, +but instead gives a convenient overview. +For full details, you should refer to the documentation, +such as the :ref:`Library Reference ` +and :ref:`Language Reference `. +If you want to understand the complete implementation and design rationale for a change, +refer to the PEP for a particular new feature; +but note that PEPs usually are not kept up-to-date +once a feature has been fully implemented. + +-------------- .. PEP-sized items next. +New syntax features: + +* :ref:`PEP 695 `, type parameter syntax and the :keyword:`type` statement + +New grammar features: + +* :ref:`PEP 701 `, :term:`f-strings ` in the grammar + +Interpreter improvements: + +* :ref:`PEP 684 `, a unique per-interpreter :term:`GIL + ` +* :ref:`PEP 669 `, low impact monitoring +* `Improved 'Did you mean ...' suggestions `_ + for :exc:`NameError`, :exc:`ImportError`, and :exc:`SyntaxError` exceptions + +Python data model improvements: + +* :ref:`PEP 688 `, using the :ref:`buffer protocol + ` from Python + +Significant improvements in the standard library: + +* The :class:`pathlib.Path` class now supports subclassing +* The :mod:`os` module received several improvements for Windows support +* A :ref:`command-line interface ` has been added to the + :mod:`sqlite3` module +* :func:`isinstance` checks against :func:`runtime-checkable protocols + ` enjoy a speed up of between two and 20 times +* The :mod:`asyncio` package has had a number of performance improvements, + with some benchmarks showing a 75% speed up. +* A :ref:`command-line interface ` has been added to the + :mod:`uuid` module +* Due to the changes in :ref:`PEP 701 `, + producing tokens via the :mod:`tokenize` module is up to 64% faster. + +Security improvements: + +* Replace the builtin :mod:`hashlib` implementations of + SHA1, SHA3, SHA2-384, SHA2-512, and MD5 with formally verified code from the + `HACL* `__ project. + These builtin implementations remain as fallbacks that are only used when + OpenSSL does not provide them. + +C API improvements: + +* :ref:`PEP 697 `, unstable C API tier +* :ref:`PEP 683 `, immortal objects + +CPython implementation improvements: + +* :ref:`PEP 709 `, comprehension inlining +* :ref:`CPython support ` for the Linux ``perf`` profiler +* Implement stack overflow protection on supported platforms + +New typing features: + +* :ref:`PEP 692 `, using :class:`~typing.TypedDict` to + annotate :term:`**kwargs ` +* :ref:`PEP 698 `, :func:`typing.override` decorator + Important deprecations, removals or restrictions: -* :pep:`623`, Remove wstr from Unicode +* :pep:`623`: Remove ``wstr`` from Unicode objects in Python's C API, + reducing the size of every :class:`str` object by at least 8 bytes. + +* :pep:`632`: Remove the :mod:`!distutils` package. + See `the migration guide `_ + for advice replacing the APIs it provided. + The third-party `Setuptools `__ + package continues to provide :mod:`!distutils`, + if you still require it in Python 3.12 and beyond. + +* :gh:`95299`: Do not pre-install ``setuptools`` in virtual environments + created with :mod:`venv`. + This means that ``distutils``, ``setuptools``, ``pkg_resources``, + and ``easy_install`` will no longer available by default; to access these + run ``pip install setuptools`` in the :ref:`activated ` + virtual environment. + +* The :mod:`!asynchat`, :mod:`!asyncore`, and :mod:`!imp` modules have been + removed, along with several :class:`unittest.TestCase` + `method aliases `_. + + +New Features +============ + +.. _whatsnew312-pep695: + +PEP 695: Type Parameter Syntax +------------------------------ + +Generic classes and functions under :pep:`484` were declared using a verbose syntax +that left the scope of type parameters unclear and required explicit declarations of +variance. + +:pep:`695` introduces a new, more compact and explicit way to create +:ref:`generic classes ` and :ref:`functions `:: + + def max[T](args: Iterable[T]) -> T: + ... + + class list[T]: + def __getitem__(self, index: int, /) -> T: + ... + + def append(self, element: T) -> None: + ... + +In addition, the PEP introduces a new way to declare :ref:`type aliases ` +using the :keyword:`type` statement, which creates an instance of +:class:`~typing.TypeAliasType`:: + + type Point = tuple[float, float] + +Type aliases can also be :ref:`generic `:: + + type Point[T] = tuple[T, T] + +The new syntax allows declaring :class:`~typing.TypeVarTuple` +and :class:`~typing.ParamSpec` parameters, as well as :class:`~typing.TypeVar` +parameters with bounds or constraints:: + + type IntFunc[**P] = Callable[P, int] # ParamSpec + type LabeledTuple[*Ts] = tuple[str, *Ts] # TypeVarTuple + type HashableSequence[T: Hashable] = Sequence[T] # TypeVar with bound + type IntOrStrSequence[T: (int, str)] = Sequence[T] # TypeVar with constraints + +The value of type aliases and the bound and constraints of type variables +created through this syntax are evaluated only on demand (see +:ref:`lazy evaluation `). This means type aliases are able to +refer to other types defined later in the file. + +Type parameters declared through a type parameter list are visible within the +scope of the declaration and any nested scopes, but not in the outer scope. For +example, they can be used in the type annotations for the methods of a generic +class or in the class body. However, they cannot be used in the module scope after +the class is defined. See :ref:`type-params` for a detailed description of the +runtime semantics of type parameters. + +In order to support these scoping semantics, a new kind of scope is introduced, +the :ref:`annotation scope `. Annotation scopes behave for the +most part like function scopes, but interact differently with enclosing class scopes. +In Python 3.13, :term:`annotations ` will also be evaluated in +annotation scopes. + +See :pep:`695` for more details. + +(PEP written by Eric Traut. Implementation by Jelle Zijlstra, Eric Traut, +and others in :gh:`103764`.) + +.. _whatsnew312-pep701: + +PEP 701: Syntactic formalization of f-strings +--------------------------------------------- + +:pep:`701` lifts some restrictions on the usage of :term:`f-strings `. +Expression components inside f-strings can now be any valid Python expression, +including strings reusing the same quote as the containing f-string, +multi-line expressions, comments, backslashes, and unicode escape sequences. +Let's cover these in detail: + +* Quote reuse: in Python 3.11, reusing the same quotes as the enclosing f-string + raises a :exc:`SyntaxError`, forcing the user to either use other available + quotes (like using double quotes or triple quotes if the f-string uses single + quotes). In Python 3.12, you can now do things like this: + + >>> songs = ['Take me back to Eden', 'Alkaline', 'Ascensionism'] + >>> f"This is the playlist: {", ".join(songs)}" + 'This is the playlist: Take me back to Eden, Alkaline, Ascensionism' + + Note that before this change there was no explicit limit in how f-strings can + be nested, but the fact that string quotes cannot be reused inside the + expression component of f-strings made it impossible to nest f-strings + arbitrarily. In fact, this is the most nested f-string that could be written: + + >>> f"""{f'''{f'{f"{1+1}"}'}'''}""" + '2' + + As now f-strings can contain any valid Python expression inside expression + components, it is now possible to nest f-strings arbitrarily: + + >>> f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}" + '2' + +* Multi-line expressions and comments: In Python 3.11, f-string expressions + must be defined in a single line, even if the expression within the f-string + could normally span multiple lines + (like literal lists being defined over multiple lines), + making them harder to read. In Python 3.12 you can now define f-strings + spanning multiple lines, and add inline comments: + + >>> f"This is the playlist: {", ".join([ + ... 'Take me back to Eden', # My, my, those eyes like fire + ... 'Alkaline', # Not acid nor alkaline + ... 'Ascensionism' # Take to the broken skies at last + ... ])}" + 'This is the playlist: Take me back to Eden, Alkaline, Ascensionism' + +* Backslashes and unicode characters: before Python 3.12 f-string expressions + couldn't contain any ``\`` character. This also affected unicode :ref:`escape + sequences ` (such as ``\N{snowman}``) as these contain + the ``\N`` part that previously could not be part of expression components of + f-strings. Now, you can define expressions like this: + + >>> print(f"This is the playlist: {"\n".join(songs)}") + This is the playlist: Take me back to Eden + Alkaline + Ascensionism + >>> print(f"This is the playlist: {"\N{BLACK HEART SUIT}".join(songs)}") + This is the playlist: Take me back to Eden♥Alkaline♥Ascensionism + +See :pep:`701` for more details. + +As a positive side-effect of how this feature has been implemented (by parsing f-strings +with :pep:`the PEG parser <617>`), now error messages for f-strings are more precise +and include the exact location of the error. For example, in Python 3.11, the following +f-string raises a :exc:`SyntaxError`: + +.. code-block:: python + + >>> my_string = f"{x z y}" + f"{1 + 1}" + File "", line 1 + (x z y) + ^^^ + SyntaxError: f-string: invalid syntax. Perhaps you forgot a comma? + +but the error message doesn't include the exact location of the error within the line and +also has the expression artificially surrounded by parentheses. In Python 3.12, as f-strings +are parsed with the PEG parser, error messages can be more precise and show the entire line: + +.. code-block:: python + + >>> my_string = f"{x z y}" + f"{1 + 1}" + File "", line 1 + my_string = f"{x z y}" + f"{1 + 1}" + ^^^ + SyntaxError: invalid syntax. Perhaps you forgot a comma? + +(Contributed by Pablo Galindo, Batuhan Taskaya, Lysandros Nikolaou, Cristián +Maureira-Fredes and Marta Gómez in :gh:`102856`. PEP written by Pablo Galindo, +Batuhan Taskaya, Lysandros Nikolaou and Marta Gómez). + +.. _whatsnew312-pep684: + +PEP 684: A Per-Interpreter GIL +------------------------------ + +:pep:`684` introduces a per-interpreter :term:`GIL `, +so that sub-interpreters may now be created with a unique GIL per interpreter. +This allows Python programs to take full advantage of multiple CPU +cores. This is currently only available through the C-API, +though a Python API is :pep:`anticipated for 3.13 <554>`. + +Use the new :c:func:`Py_NewInterpreterFromConfig` function to +create an interpreter with its own GIL:: + + PyInterpreterConfig config = { + .check_multi_interp_extensions = 1, + .gil = PyInterpreterConfig_OWN_GIL, + }; + PyThreadState *tstate = NULL; + PyStatus status = Py_NewInterpreterFromConfig(&tstate, &config); + if (PyStatus_Exception(status)) { + return -1; + } + /* The new interpreter is now active in the current thread. */ + +For further examples how to use the C-API for sub-interpreters with a +per-interpreter GIL, see :source:`Modules/_xxsubinterpretersmodule.c`. + +(Contributed by Eric Snow in :gh:`104210`, etc.) + +.. _whatsnew312-pep669: + +PEP 669: Low impact monitoring for CPython +------------------------------------------ + +:pep:`669` defines a new :mod:`API ` for profilers, +debuggers, and other tools to monitor events in CPython. +It covers a wide range of events, including calls, +returns, lines, exceptions, jumps, and more. +This means that you only pay for what you use, providing support +for near-zero overhead debuggers and coverage tools. +See :mod:`sys.monitoring` for details. + +(Contributed by Mark Shannon in :gh:`103082`.) + +.. _whatsnew312-pep688: + +PEP 688: Making the buffer protocol accessible in Python +-------------------------------------------------------- + +:pep:`688` introduces a way to use the :ref:`buffer protocol ` +from Python code. Classes that implement the :meth:`~object.__buffer__` method +are now usable as buffer types. + +The new :class:`collections.abc.Buffer` ABC provides a standard +way to represent buffer objects, for example in type annotations. +The new :class:`inspect.BufferFlags` enum represents the flags that +can be used to customize buffer creation. +(Contributed by Jelle Zijlstra in :gh:`102500`.) + +.. _whatsnew312-pep709: + +PEP 709: Comprehension inlining +------------------------------- + +Dictionary, list, and set comprehensions are now inlined, rather than creating a +new single-use function object for each execution of the comprehension. This +speeds up execution of a comprehension by up to two times. +See :pep:`709` for further details. + +Comprehension iteration variables remain isolated and don't overwrite a +variable of the same name in the outer scope, nor are they visible after the +comprehension. Inlining does result in a few visible behavior changes: + +* There is no longer a separate frame for the comprehension in tracebacks, + and tracing/profiling no longer shows the comprehension as a function call. +* The :mod:`symtable` module will no longer produce child symbol tables for each + comprehension; instead, the comprehension's locals will be included in the + parent function's symbol table. +* Calling :func:`locals` inside a comprehension now includes variables + from outside the comprehension, and no longer includes the synthetic ``.0`` + variable for the comprehension "argument". +* A comprehension iterating directly over ``locals()`` (e.g. ``[k for k in + locals()]``) may see "RuntimeError: dictionary changed size during iteration" + when run under tracing (e.g. code coverage measurement). This is the same + behavior already seen in e.g. ``for k in locals():``. To avoid the error, first + create a list of keys to iterate over: ``keys = list(locals()); [k for k in + keys]``. + +(Contributed by Carl Meyer and Vladimir Matveev in :pep:`709`.) Improved Error Messages -======================= +----------------------- + +* Modules from the standard library are now potentially suggested as part of + the error messages displayed by the interpreter when a :exc:`NameError` is + raised to the top level. (Contributed by Pablo Galindo in :gh:`98254`.) + + >>> sys.version_info + Traceback (most recent call last): + File "", line 1, in + NameError: name 'sys' is not defined. Did you forget to import 'sys'? + +* Improve the error suggestion for :exc:`NameError` exceptions for instances. + Now if a :exc:`NameError` is raised in a method and the instance has an + attribute that's exactly equal to the name in the exception, the suggestion + will include ``self.`` instead of the closest match in the method + scope. (Contributed by Pablo Galindo in :gh:`99139`.) + + >>> class A: + ... def __init__(self): + ... self.blech = 1 + ... + ... def foo(self): + ... somethin = blech + ... + >>> A().foo() + Traceback (most recent call last): + File "", line 1 + somethin = blech + ^^^^^ + NameError: name 'blech' is not defined. Did you mean: 'self.blech'? * Improve the :exc:`SyntaxError` error message when the user types ``import x - from y`` instead of ``from y import x``. Contributed by Pablo Galindo in :gh:`98931`. + from y`` instead of ``from y import x``. (Contributed by Pablo Galindo in :gh:`98931`.) >>> import a.y.z from b.y.z Traceback (most recent call last): @@ -83,48 +464,168 @@ Improved Error Messages ^^^^^^^^^^^^^^^^^^^^^^^ SyntaxError: Did you mean to use 'from ... import ...' instead? -New Features -============ +* :exc:`ImportError` exceptions raised from failed ``from import + `` statements now include suggestions for the value of ```` based on the + available names in ````. (Contributed by Pablo Galindo in :gh:`91058`.) -* Add :ref:`perf_profiling` through the new - environment variable :envvar:`PYTHONPERFSUPPORT`, - the new command-line option :option:`-X perf <-X>`, - as well as the new :func:`sys.activate_stack_trampoline`, - :func:`sys.deactivate_stack_trampoline`, - and :func:`sys.is_stack_trampoline_active` APIs. - (Design by Pablo Galindo. Contributed by Pablo Galindo and Christian Heimes - with contributions from Gregory P. Smith [Google] and Mark Shannon - in :gh:`96123`.) + >>> from collections import chainmap + Traceback (most recent call last): + File "", line 1, in + ImportError: cannot import name 'chainmap' from 'collections'. Did you mean: 'ChainMap'? -Other Language Changes -====================== +New Features Related to Type Hints +================================== -* :class:`types.MappingProxyType` instances are now hashable if the underlying - mapping is hashable. - (Contributed by Serhiy Storchaka in :gh:`87995`.) +This section covers major changes affecting :pep:`type hints <484>` and +the :mod:`typing` module. -* :class:`memoryview` now supports the half-float type (the "e" format code). - (Contributed by Dong-hee Na and Antoine Pitrou in :gh:`90751`.) +.. _whatsnew312-pep692: + +PEP 692: Using ``TypedDict`` for more precise ``**kwargs`` typing +----------------------------------------------------------------- + +Typing ``**kwargs`` in a function signature as introduced by :pep:`484` allowed +for valid annotations only in cases where all of the ``**kwargs`` were of the +same type. + +:pep:`692` specifies a more precise way of typing ``**kwargs`` by relying on +typed dictionaries:: + + from typing import TypedDict, Unpack + + class Movie(TypedDict): + name: str + year: int + + def foo(**kwargs: Unpack[Movie]): ... + +See :pep:`692` for more details. + +(Contributed by Franek Magiera in :gh:`103629`.) + +.. _whatsnew312-pep698: + +PEP 698: Override Decorator for Static Typing +--------------------------------------------- + +A new decorator :func:`typing.override` has been added to the :mod:`typing` +module. It indicates to type checkers that the method is intended to override +a method in a superclass. This allows type checkers to catch mistakes where +a method that is intended to override something in a base class +does not in fact do so. + +Example:: + + from typing import override + + class Base: + def get_color(self) -> str: + return "blue" + + class GoodChild(Base): + @override # ok: overrides Base.get_color + def get_color(self) -> str: + return "yellow" + + class BadChild(Base): + @override # type checker error: does not override Base.get_color + def get_colour(self) -> str: + return "red" + +See :pep:`698` for more details. + +(Contributed by Steven Troxler in :gh:`101561`.) + +Other Language Changes +====================== * The parser now raises :exc:`SyntaxError` when parsing source code containing null bytes. (Contributed by Pablo Galindo in :gh:`96670`.) -* :func:`ast.parse` now raises :exc:`SyntaxError` instead of :exc:`ValueError` - when parsing source code containing null bytes. (Contributed by Pablo Galindo - in :gh:`96670`.) +* A backslash-character pair that is not a valid escape sequence now generates + a :exc:`SyntaxWarning`, instead of :exc:`DeprecationWarning`. + For example, ``re.compile("\d+\.\d+")`` now emits a :exc:`SyntaxWarning` + (``"\d"`` is an invalid escape sequence, use raw strings for regular + expression: ``re.compile(r"\d+\.\d+")``). + In a future Python version, :exc:`SyntaxError` will eventually be raised, + instead of :exc:`SyntaxWarning`. + (Contributed by Victor Stinner in :gh:`98401`.) + +* Octal escapes with value larger than ``0o377`` (ex: ``"\477"``), deprecated + in Python 3.11, now produce a :exc:`SyntaxWarning`, instead of + :exc:`DeprecationWarning`. + In a future Python version they will be eventually a :exc:`SyntaxError`. + (Contributed by Victor Stinner in :gh:`98401`.) + +* Variables used in the target part of comprehensions that are not stored to + can now be used in assignment expressions (``:=``). + For example, in ``[(b := 1) for a, b.prop in some_iter]``, the assignment to + ``b`` is now allowed. Note that assigning to variables stored to in the target + part of comprehensions (like ``a``) is still disallowed, as per :pep:`572`. + (Contributed by Nikita Sobolev in :gh:`100581`.) + +* Exceptions raised in a class or type's ``__set_name__`` method are no longer + wrapped by a :exc:`RuntimeError`. Context information is added to the + exception as a :pep:`678` note. (Contributed by Irit Katriel in :gh:`77757`.) + +* When a ``try-except*`` construct handles the entire :exc:`ExceptionGroup` + and raises one other exception, that exception is no longer wrapped in an + :exc:`ExceptionGroup`. Also changed in version 3.11.4. (Contributed by Irit + Katriel in :gh:`103590`.) * The Garbage Collector now runs only on the eval breaker mechanism of the - Python bytecode evaluation loop instead on object allocations. The GC can + Python bytecode evaluation loop instead of object allocations. The GC can also run when :c:func:`PyErr_CheckSignals` is called so C extensions that need to run for a long time without executing any Python code also have a chance to execute the GC periodically. (Contributed by Pablo Galindo in :gh:`97922`.) +* All builtin and extension callables expecting boolean parameters now accept + arguments of any type instead of just :class:`bool` and :class:`int`. + (Contributed by Serhiy Storchaka in :gh:`60203`.) + +* :class:`memoryview` now supports the half-float type (the "e" format code). + (Contributed by Donghee Na and Antoine Pitrou in :gh:`90751`.) + +* :class:`slice` objects are now hashable, allowing them to be used as dict keys and + set items. (Contributed by Will Bradshaw, Furkan Onder, and Raymond Hettinger in :gh:`101264`.) + +* :func:`sum` now uses Neumaier summation to improve accuracy and commutativity + when summing floats or mixed ints and floats. + (Contributed by Raymond Hettinger in :gh:`100425`.) + +* :func:`ast.parse` now raises :exc:`SyntaxError` instead of :exc:`ValueError` + when parsing source code containing null bytes. (Contributed by Pablo Galindo + in :gh:`96670`.) + +* The extraction methods in :mod:`tarfile`, and :func:`shutil.unpack_archive`, + have a new a *filter* argument that allows limiting tar features than may be + surprising or dangerous, such as creating files outside the destination + directory. + See :ref:`tarfile extraction filters ` for details. + In Python 3.14, the default will switch to ``'data'``. + (Contributed by Petr Viktorin in :pep:`706`.) + +* :class:`types.MappingProxyType` instances are now hashable if the underlying + mapping is hashable. + (Contributed by Serhiy Storchaka in :gh:`87995`.) + +* Add :ref:`support for the perf profiler ` through the new + environment variable :envvar:`PYTHONPERFSUPPORT` + and command-line option :option:`-X perf <-X>`, + as well as the new :func:`sys.activate_stack_trampoline`, + :func:`sys.deactivate_stack_trampoline`, + and :func:`sys.is_stack_trampoline_active` functions. + (Design by Pablo Galindo. Contributed by Pablo Galindo and Christian Heimes + with contributions from Gregory P. Smith [Google] and Mark Shannon + in :gh:`96123`.) + + New Modules =========== -* None yet. +* None. Improved Modules @@ -139,37 +640,55 @@ array asyncio ------- -* On Linux, :mod:`asyncio` uses :class:`~asyncio.PidfdChildWatcher` by default +* The performance of writing to sockets in :mod:`asyncio` has been + significantly improved. ``asyncio`` now avoids unnecessary copying when + writing to sockets and uses :meth:`~socket.socket.sendmsg` if the platform + supports it. (Contributed by Kumar Aditya in :gh:`91166`.) + +* Add :func:`asyncio.eager_task_factory` and :func:`asyncio.create_eager_task_factory` + functions to allow opting an event loop in to eager task execution, + making some use-cases 2x to 5x faster. + (Contributed by Jacob Bower & Itamar Oren in :gh:`102853`, :gh:`104140`, and :gh:`104138`) + +* On Linux, :mod:`asyncio` uses :class:`asyncio.PidfdChildWatcher` by default if :func:`os.pidfd_open` is available and functional instead of - :class:`~asyncio.ThreadedChildWatcher`. + :class:`asyncio.ThreadedChildWatcher`. (Contributed by Kumar Aditya in :gh:`98024`.) -* The child watcher classes :class:`~asyncio.MultiLoopChildWatcher`, - :class:`~asyncio.FastChildWatcher` and - :class:`~asyncio.SafeChildWatcher` are deprecated and - will be removed in Python 3.14. It is recommended to not manually - configure a child watcher as the event loop now uses the best available - child watcher for each platform (:class:`~asyncio.PidfdChildWatcher` - if supported and :class:`~asyncio.ThreadedChildWatcher` otherwise). +* The event loop now uses the best available child watcher for each platform + (:class:`asyncio.PidfdChildWatcher` if supported and + :class:`asyncio.ThreadedChildWatcher` otherwise), so manually + configuring a child watcher is not recommended. (Contributed by Kumar Aditya in :gh:`94597`.) -* :func:`asyncio.set_child_watcher`, :func:`asyncio.get_child_watcher`, - :meth:`asyncio.AbstractEventLoopPolicy.set_child_watcher` and - :meth:`asyncio.AbstractEventLoopPolicy.get_child_watcher` are deprecated - and will be removed in Python 3.14. - (Contributed by Kumar Aditya in :gh:`94597`.) +* Add *loop_factory* parameter to :func:`asyncio.run` to allow specifying + a custom event loop factory. + (Contributed by Kumar Aditya in :gh:`99388`.) -pathlib -------- +* Add C implementation of :func:`asyncio.current_task` for 4x-6x speedup. + (Contributed by Itamar Oren and Pranav Thulasiram Bhat in :gh:`100344`.) -* Add :meth:`~pathlib.Path.walk` for walking the directory trees and generating - all file or directory names within them, similar to :func:`os.walk`. - (Contributed by Stanislav Zmiev in :gh:`90385`.) +* :func:`asyncio.iscoroutine` now returns ``False`` for generators as + :mod:`asyncio` does not support legacy generator-based coroutines. + (Contributed by Kumar Aditya in :gh:`102748`.) -* Add *walk_up* optional parameter to :meth:`pathlib.PurePath.relative_to` - to allow the insertion of ``..`` entries in the result; this behavior is - more consistent with :func:`os.path.relpath`. - (Contributed by Domenico Ragusa in :issue:`40358`.) +* :func:`asyncio.wait` and :func:`asyncio.as_completed` now accepts generators + yielding tasks. + (Contributed by Kumar Aditya in :gh:`78530`.) + +calendar +-------- + +* Add enums :data:`calendar.Month` and :data:`calendar.Day` + defining months of the year and days of the week. + (Contributed by Prince Roshan in :gh:`103636`.) + +csv +--- + +* Add :const:`csv.QUOTE_NOTNULL` and :const:`csv.QUOTE_STRINGS` flags to + provide finer grained control of ``None`` and empty strings by + :class:`csv.writer` objects. dis --- @@ -177,18 +696,137 @@ dis * Pseudo instruction opcodes (which are used by the compiler but do not appear in executable bytecode) are now exposed in the :mod:`dis` module. - :data:`~dis.HAVE_ARGUMENT` is still relevant to real opcodes, + :opcode:`HAVE_ARGUMENT` is still relevant to real opcodes, but it is not useful for pseudo instructions. Use the new - :data:`~dis.hasarg` collection instead. + :data:`dis.hasarg` collection instead. (Contributed by Irit Katriel in :gh:`94216`.) +* Add the :data:`dis.hasexc` collection to signify instructions that set + an exception handler. (Contributed by Irit Katriel in :gh:`94216`.) + +fractions +--------- + +* Objects of type :class:`fractions.Fraction` now support float-style + formatting. (Contributed by Mark Dickinson in :gh:`100161`.) + +importlib.resources +------------------- + +* :func:`importlib.resources.as_file` now supports resource directories. + (Contributed by Jason R. Coombs in :gh:`97930`.) + +* Rename first parameter of :func:`importlib.resources.files` to *anchor*. + (Contributed by Jason R. Coombs in :gh:`100598`.) + +inspect +------- + +* Add :func:`inspect.markcoroutinefunction` to mark sync functions that return + a :term:`coroutine` for use with :func:`inspect.iscoroutinefunction`. + (Contributed Carlton Gibson in :gh:`99247`.) + +* Add :func:`inspect.getasyncgenstate` and :func:`inspect.getasyncgenlocals` + for determining the current state of asynchronous generators. + (Contributed by Thomas Krennwallner in :gh:`79940`.) + +* The performance of :func:`inspect.getattr_static` has been considerably + improved. Most calls to the function should be at least 2x faster than they + were in Python 3.11, and some may be 6x faster or more. (Contributed by Alex + Waygood in :gh:`103193`.) + +itertools +--------- + +* Add :class:`itertools.batched()` for collecting into even-sized + tuples where the last batch may be shorter than the rest. + (Contributed by Raymond Hettinger in :gh:`98363`.) + +math +---- + +* Add :func:`math.sumprod` for computing a sum of products. + (Contributed by Raymond Hettinger in :gh:`100485`.) + +* Extend :func:`math.nextafter` to include a *steps* argument + for moving up or down multiple steps at a time. + (By Matthias Goergens, Mark Dickinson, and Raymond Hettinger in :gh:`94906`.) + os -- -* Add :data:`os.PIDFD_NONBLOCK` to open a file descriptor +* Add :const:`os.PIDFD_NONBLOCK` to open a file descriptor for a process with :func:`os.pidfd_open` in non-blocking mode. (Contributed by Kumar Aditya in :gh:`93312`.) +* :class:`os.DirEntry` now includes an :meth:`os.DirEntry.is_junction` + method to check if the entry is a junction. + (Contributed by Charles Machalow in :gh:`99547`.) + +* Add :func:`os.listdrives`, :func:`os.listvolumes` and :func:`os.listmounts` + functions on Windows for enumerating drives, volumes and mount points. + (Contributed by Steve Dower in :gh:`102519`.) + +* :func:`os.stat` and :func:`os.lstat` are now more accurate on Windows. + The ``st_birthtime`` field will now be filled with the creation time + of the file, and ``st_ctime`` is deprecated but still contains the + creation time (but in the future will return the last metadata change, + for consistency with other platforms). ``st_dev`` may be up to 64 bits + and ``st_ino`` up to 128 bits depending on your file system, and + ``st_rdev`` is always set to zero rather than incorrect values. + Both functions may be significantly faster on newer releases of + Windows. (Contributed by Steve Dower in :gh:`99726`.) + +os.path +------- + +* Add :func:`os.path.isjunction` to check if a given path is a junction. + (Contributed by Charles Machalow in :gh:`99547`.) + +* Add :func:`os.path.splitroot` to split a path into a triad + ``(drive, root, tail)``. (Contributed by Barney Gale in :gh:`101000`.) + +pathlib +------- + +* Add support for subclassing :class:`pathlib.PurePath` and + :class:`pathlib.Path`, plus their Posix- and Windows-specific variants. + Subclasses may override the :meth:`pathlib.PurePath.with_segments` method + to pass information between path instances. + +* Add :meth:`pathlib.Path.walk` for walking the directory trees and generating + all file or directory names within them, similar to :func:`os.walk`. + (Contributed by Stanislav Zmiev in :gh:`90385`.) + +* Add *walk_up* optional parameter to :meth:`pathlib.PurePath.relative_to` + to allow the insertion of ``..`` entries in the result; this behavior is + more consistent with :func:`os.path.relpath`. + (Contributed by Domenico Ragusa in :gh:`84538`.) + +* Add :meth:`pathlib.Path.is_junction` as a proxy to :func:`os.path.isjunction`. + (Contributed by Charles Machalow in :gh:`99547`.) + +* Add *case_sensitive* optional parameter to :meth:`pathlib.Path.glob`, + :meth:`pathlib.Path.rglob` and :meth:`pathlib.PurePath.match` for matching + the path's case sensitivity, allowing for more precise control over the matching process. + +pdb +--- + +* Add convenience variables to hold values temporarily for debug session + and provide quick access to values like the current frame or the return + value. + (Contributed by Tian Gao in :gh:`103693`.) + +random +------ + +* Add :func:`random.binomialvariate`. + (Contributed by Raymond Hettinger in :gh:`81620`.) + +* Add a default of ``lambd=1.0`` to :func:`random.expovariate`. + (Contributed by Raymond Hettinger in :gh:`100234`.) + shutil ------ @@ -198,6 +836,26 @@ shutil of the process to *root_dir* to perform archiving. (Contributed by Serhiy Storchaka in :gh:`74696`.) +* :func:`shutil.rmtree` now accepts a new argument *onexc* which is an + error handler like *onerror* but which expects an exception instance + rather than a *(typ, val, tb)* triplet. *onerror* is deprecated and + will be removed in Python 3.14. + (Contributed by Irit Katriel in :gh:`102828`.) + +* :func:`shutil.which` now consults the *PATHEXT* environment variable to + find matches within *PATH* on Windows even when the given *cmd* includes + a directory component. + (Contributed by Charles Machalow in :gh:`103179`.) + + :func:`shutil.which` will call ``NeedCurrentDirectoryForExePathW`` when + querying for executables on Windows to determine if the current working + directory should be prepended to the search path. + (Contributed by Charles Machalow in :gh:`103179`.) + + :func:`shutil.which` will return a path matching the *cmd* with a component + from ``PATHEXT`` prior to a direct match elsewhere in the search path on + Windows. + (Contributed by Charles Machalow in :gh:`103179`.) sqlite3 ------- @@ -205,6 +863,71 @@ sqlite3 * Add a :ref:`command-line interface `. (Contributed by Erlend E. Aasland in :gh:`77617`.) +* Add the :attr:`sqlite3.Connection.autocommit` attribute + to :class:`sqlite3.Connection` + and the *autocommit* parameter to :func:`sqlite3.connect` + to control :pep:`249`-compliant + :ref:`transaction handling `. + (Contributed by Erlend E. Aasland in :gh:`83638`.) + +* Add *entrypoint* keyword-only parameter to + :meth:`sqlite3.Connection.load_extension`, + for overriding the SQLite extension entry point. + (Contributed by Erlend E. Aasland in :gh:`103015`.) + +* Add :meth:`sqlite3.Connection.getconfig` and + :meth:`sqlite3.Connection.setconfig` to :class:`sqlite3.Connection` + to make configuration changes to a database connection. + (Contributed by Erlend E. Aasland in :gh:`103489`.) + +statistics +---------- + +* Extend :func:`statistics.correlation` to include as a ``ranked`` method + for computing the Spearman correlation of ranked data. + (Contributed by Raymond Hettinger in :gh:`95861`.) + +sys +--- + +* Add the :mod:`sys.monitoring` namespace to expose the new :ref:`PEP 669 + ` monitoring API. + (Contributed by Mark Shannon in :gh:`103082`.) + +* Add :func:`sys.activate_stack_trampoline` and + :func:`sys.deactivate_stack_trampoline` for activating and deactivating + stack profiler trampolines, + and :func:`sys.is_stack_trampoline_active` for querying if stack profiler + trampolines are active. + (Contributed by Pablo Galindo and Christian Heimes + with contributions from Gregory P. Smith [Google] and Mark Shannon + in :gh:`96123`.) + +* Add :data:`sys.last_exc` which holds the last unhandled exception that + was raised (for post-mortem debugging use cases). Deprecate the + three fields that have the same information in its legacy form: + :data:`sys.last_type`, :data:`sys.last_value` and :data:`sys.last_traceback`. + (Contributed by Irit Katriel in :gh:`102778`.) + +* :func:`sys._current_exceptions` now returns a mapping from thread-id to an + exception instance, rather than to a ``(typ, exc, tb)`` tuple. + (Contributed by Irit Katriel in :gh:`103176`.) + +* :func:`sys.setrecursionlimit` and :func:`sys.getrecursionlimit`. + The recursion limit now applies only to Python code. Builtin functions do + not use the recursion limit, but are protected by a different mechanism + that prevents recursion from causing a virtual machine crash. + +tempfile +-------- + +* The :class:`tempfile.NamedTemporaryFile` function has a new optional parameter + *delete_on_close* (Contributed by Evgeny Zorin in :gh:`58451`.) +* :func:`tempfile.mkdtemp` now always returns an absolute path, even if the + argument provided to the *dir* parameter is a relative path. + +.. _whatsnew-typing-py312: + threading --------- @@ -213,181 +936,767 @@ threading profiling functions in all running threads in addition to the calling one. (Contributed by Pablo Galindo in :gh:`93503`.) +tkinter +------- + +* ``tkinter.Canvas.coords()`` now flattens its arguments. + It now accepts not only coordinates as separate arguments + (``x1, y1, x2, y2, ...``) and a sequence of coordinates + (``[x1, y1, x2, y2, ...]``), but also coordinates grouped in pairs + (``(x1, y1), (x2, y2), ...`` and ``[(x1, y1), (x2, y2), ...]``), + like ``create_*()`` methods. + (Contributed by Serhiy Storchaka in :gh:`94473`.) + +tokenize +-------- + +* The :mod:`tokenize` module includes the changes introduced in :pep:`701`. + (Contributed by Marta Gómez Macías and Pablo Galindo in :gh:`102856`.) + See :ref:`whatsnew312-porting-to-python312` for more information on the + changes to the :mod:`tokenize` module. + +types +----- + +* Add :func:`types.get_original_bases` to allow for further introspection of + :ref:`user-defined-generics` when subclassed. (Contributed by + James Hilton-Balfe and Alex Waygood in :gh:`101827`.) + +typing +------ + +* :func:`isinstance` checks against + :func:`runtime-checkable protocols ` now use + :func:`inspect.getattr_static` rather than :func:`hasattr` to lookup whether + attributes exist. This means that descriptors and :meth:`~object.__getattr__` + methods are no longer unexpectedly evaluated during ``isinstance()`` checks + against runtime-checkable protocols. However, it may also mean that some + objects which used to be considered instances of a runtime-checkable protocol + may no longer be considered instances of that protocol on Python 3.12+, and + vice versa. Most users are unlikely to be affected by this change. + (Contributed by Alex Waygood in :gh:`102433`.) + +* The members of a runtime-checkable protocol are now considered "frozen" at + runtime as soon as the class has been created. Monkey-patching attributes + onto a runtime-checkable protocol will still work, but will have no impact on + :func:`isinstance` checks comparing objects to the protocol. For example:: + + >>> from typing import Protocol, runtime_checkable + >>> @runtime_checkable + ... class HasX(Protocol): + ... x = 1 + ... + >>> class Foo: ... + ... + >>> f = Foo() + >>> isinstance(f, HasX) + False + >>> f.x = 1 + >>> isinstance(f, HasX) + True + >>> HasX.y = 2 + >>> isinstance(f, HasX) # unchanged, even though HasX now also has a "y" attribute + True + + This change was made in order to speed up ``isinstance()`` checks against + runtime-checkable protocols. + +* The performance profile of :func:`isinstance` checks against + :func:`runtime-checkable protocols ` has changed + significantly. Most ``isinstance()`` checks against protocols with only a few + members should be at least 2x faster than in 3.11, and some may be 20x + faster or more. However, ``isinstance()`` checks against protocols with fourteen + or more members may be slower than in Python 3.11. (Contributed by Alex + Waygood in :gh:`74690` and :gh:`103193`.) + +* All :data:`typing.TypedDict` and :data:`typing.NamedTuple` classes now have the + ``__orig_bases__`` attribute. (Contributed by Adrian Garcia Badaracco in + :gh:`103699`.) + +* Add ``frozen_default`` parameter to :func:`typing.dataclass_transform`. + (Contributed by Erik De Bonte in :gh:`99957`.) + unicodedata ----------- * The Unicode database has been updated to version 15.0.0. (Contributed by Benjamin Peterson in :gh:`96734`). -tempfile +unittest -------- -The :class:`tempfile.NamedTemporaryFile` function has a new optional parameter -*delete_on_close* (Contributed by Evgeny Zorin in :gh:`58451`.) +Add a ``--durations`` command line option, showing the N slowest test cases:: -sys ---- + python3 -m unittest --durations=3 lib.tests.test_threading + ..... + Slowest test durations + ---------------------------------------------------------------------- + 1.210s test_timeout (Lib.test.test_threading.BarrierTests) + 1.003s test_default_timeout (Lib.test.test_threading.BarrierTests) + 0.518s test_timeout (Lib.test.test_threading.EventTests) -* Add :func:`sys.activate_stack_trampoline` and - :func:`sys.deactivate_stack_trampoline` for activating and deactivating - stack profiler trampolines, - and :func:`sys.is_stack_trampoline_active` for querying if stack profiler - trampolines are active. - (Contributed by Pablo Galindo and Christian Heimes - with contributions from Gregory P. Smith [Google] and Mark Shannon - in :gh:`96123`.) + (0.000 durations hidden. Use -v to show these durations.) + ---------------------------------------------------------------------- + Ran 158 tests in 9.869s + + OK (skipped=3) + +(Contributed by Giampaolo Rodola in :gh:`48330`) + +uuid +---- + +* Add a :ref:`command-line interface `. + (Contributed by Adam Chhina in :gh:`88597`.) + + +Optimizations +============= + +* Remove ``wstr`` and ``wstr_length`` members from Unicode objects. + It reduces object size by 8 or 16 bytes on 64bit platform. (:pep:`623`) + (Contributed by Inada Naoki in :gh:`92536`.) + +* Add experimental support for using the BOLT binary optimizer in the build + process, which improves performance by 1-5%. + (Contributed by Kevin Modzelewski in :gh:`90536` and tuned by Donghee Na in :gh:`101525`) + +* Speed up the regular expression substitution (functions :func:`re.sub` and + :func:`re.subn` and corresponding :class:`!re.Pattern` methods) for + replacement strings containing group references by 2--3 times. + (Contributed by Serhiy Storchaka in :gh:`91524`.) + +* Speed up :class:`asyncio.Task` creation by deferring expensive string formatting. + (Contributed by Itamar Oren in :gh:`103793`.) + +* The :func:`tokenize.tokenize` and :func:`tokenize.generate_tokens` functions are + up to 64% faster as a side effect of the changes required to cover :pep:`701` in + the :mod:`tokenize` module. (Contributed by Marta Gómez Macías and Pablo Galindo + in :gh:`102856`.) + +* Speed up :func:`super` method calls and attribute loads via the + new :opcode:`LOAD_SUPER_ATTR` instruction. (Contributed by Carl Meyer and + Vladimir Matveev in :gh:`103497`.) + + +CPython bytecode changes +======================== + +* Remove the :opcode:`!LOAD_METHOD` instruction. It has been merged into + :opcode:`LOAD_ATTR`. :opcode:`LOAD_ATTR` will now behave like the old + :opcode:`!LOAD_METHOD` instruction if the low bit of its oparg is set. + (Contributed by Ken Jin in :gh:`93429`.) + +* Remove the :opcode:`!JUMP_IF_FALSE_OR_POP` and :opcode:`!JUMP_IF_TRUE_OR_POP` + instructions. (Contributed by Irit Katriel in :gh:`102859`.) + +* Remove the :opcode:`!PRECALL` instruction. (Contributed by Mark Shannon in + :gh:`92925`.) + +* Add the :opcode:`BINARY_SLICE` and :opcode:`STORE_SLICE` instructions. + (Contributed by Mark Shannon in :gh:`94163`.) + +* Add the :opcode:`CALL_INTRINSIC_1` instructions. + (Contributed by Mark Shannon in :gh:`99005`.) + +* Add the :opcode:`CALL_INTRINSIC_2` instruction. + (Contributed by Irit Katriel in :gh:`101799`.) + +* Add the :opcode:`CLEANUP_THROW` instruction. + (Contributed by Brandt Bucher in :gh:`90997`.) + +* Add the :opcode:`!END_SEND` instruction. + (Contributed by Mark Shannon in :gh:`103082`.) + +* Add the :opcode:`LOAD_FAST_AND_CLEAR` instruction as part of the + implementation of :pep:`709`. (Contributed by Carl Meyer in :gh:`101441`.) + +* Add the :opcode:`LOAD_FAST_CHECK` instruction. + (Contributed by Dennis Sweeney in :gh:`93143`.) + +* Add the :opcode:`LOAD_FROM_DICT_OR_DEREF`, :opcode:`LOAD_FROM_DICT_OR_GLOBALS`, + and :opcode:`LOAD_LOCALS` opcodes as part of the implementation of :pep:`695`. + Remove the :opcode:`!LOAD_CLASSDEREF` opcode, which can be replaced with + :opcode:`LOAD_LOCALS` plus :opcode:`LOAD_FROM_DICT_OR_DEREF`. (Contributed + by Jelle Zijlstra in :gh:`103764`.) + +* Add the :opcode:`LOAD_SUPER_ATTR` instruction. (Contributed by Carl Meyer and + Vladimir Matveev in :gh:`103497`.) + +* Add the :opcode:`RETURN_CONST` instruction. (Contributed by Wenyang Wang in :gh:`101632`.) + +Demos and Tools +=============== + +* Remove the ``Tools/demo/`` directory which contained old demo scripts. A copy + can be found in the `old-demos project + `_. + (Contributed by Victor Stinner in :gh:`97681`.) + +* Remove outdated example scripts of the ``Tools/scripts/`` directory. + A copy can be found in the `old-demos project + `_. + (Contributed by Victor Stinner in :gh:`97669`.) + + +Deprecated +========== + +* :mod:`argparse`: The *type*, *choices*, and *metavar* parameters + of :class:`!argparse.BooleanOptionalAction` are deprecated + and will be removed in 3.14. + (Contributed by Nikita Sobolev in :gh:`92248`.) + +* :mod:`ast`: The following :mod:`ast` features have been deprecated in documentation since + Python 3.8, now cause a :exc:`DeprecationWarning` to be emitted at runtime + when they are accessed or used, and will be removed in Python 3.14: + + * :class:`!ast.Num` + * :class:`!ast.Str` + * :class:`!ast.Bytes` + * :class:`!ast.NameConstant` + * :class:`!ast.Ellipsis` + + Use :class:`ast.Constant` instead. + (Contributed by Serhiy Storchaka in :gh:`90953`.) + +* :mod:`asyncio`: + + * The child watcher classes :class:`asyncio.MultiLoopChildWatcher`, + :class:`asyncio.FastChildWatcher`, :class:`asyncio.AbstractChildWatcher` + and :class:`asyncio.SafeChildWatcher` are deprecated and + will be removed in Python 3.14. + (Contributed by Kumar Aditya in :gh:`94597`.) + + * :func:`asyncio.set_child_watcher`, :func:`asyncio.get_child_watcher`, + :meth:`asyncio.AbstractEventLoopPolicy.set_child_watcher` and + :meth:`asyncio.AbstractEventLoopPolicy.get_child_watcher` are deprecated + and will be removed in Python 3.14. + (Contributed by Kumar Aditya in :gh:`94597`.) + + * The :meth:`~asyncio.get_event_loop` method of the + default event loop policy now emits a :exc:`DeprecationWarning` if there + is no current event loop set and it decides to create one. + (Contributed by Serhiy Storchaka and Guido van Rossum in :gh:`100160`.) + +* :mod:`calendar`: ``calendar.January`` and ``calendar.February`` constants are deprecated and + replaced by :data:`calendar.JANUARY` and :data:`calendar.FEBRUARY`. + (Contributed by Prince Roshan in :gh:`103636`.) + +* :mod:`collections.abc`: Deprecated :class:`collections.abc.ByteString`. + Prefer :class:`Sequence` or :class:`collections.abc.Buffer`. + For use in typing, prefer a union, like ``bytes | bytearray``, or :class:`collections.abc.Buffer`. + (Contributed by Shantanu Jain in :gh:`91896`.) + +* :mod:`datetime`: :class:`datetime.datetime`'s :meth:`~datetime.datetime.utcnow` and + :meth:`~datetime.datetime.utcfromtimestamp` are deprecated and will be + removed in a future version. Instead, use timezone-aware objects to represent + datetimes in UTC: respectively, call :meth:`~datetime.datetime.now` and + :meth:`~datetime.datetime.fromtimestamp` with the *tz* parameter set to + :const:`datetime.UTC`. + (Contributed by Paul Ganssle in :gh:`103857`.) + +* :mod:`email`: Deprecate the *isdst* parameter in :func:`email.utils.localtime`. + (Contributed by Alan Williams in :gh:`72346`.) + +* :mod:`importlib.abc`: Deprecated the following classes, scheduled for removal in + Python 3.14: + + * :class:`!importlib.abc.ResourceReader` + * :class:`!importlib.abc.Traversable` + * :class:`!importlib.abc.TraversableResources` + + Use :mod:`importlib.resources.abc` classes instead: + + * :class:`importlib.resources.abc.Traversable` + * :class:`importlib.resources.abc.TraversableResources` + + (Contributed by Jason R. Coombs and Hugo van Kemenade in :gh:`93963`.) + +* :mod:`itertools`: Deprecate the support for copy, deepcopy, and pickle operations, + which is undocumented, inefficient, historically buggy, and inconsistent. + This will be removed in 3.14 for a significant reduction in code + volume and maintenance burden. + (Contributed by Raymond Hettinger in :gh:`101588`.) + +* :mod:`multiprocessing`: In Python 3.14, the default :mod:`multiprocessing` + start method will change to a safer one on Linux, BSDs, + and other non-macOS POSIX platforms where ``'fork'`` is currently + the default (:gh:`84559`). Adding a runtime warning about this was deemed too + disruptive as the majority of code is not expected to care. Use the + :func:`~multiprocessing.get_context` or + :func:`~multiprocessing.set_start_method` APIs to explicitly specify when + your code *requires* ``'fork'``. See :ref:`contexts and start methods + `. + +* :mod:`pkgutil`: :func:`pkgutil.find_loader` and :func:`pkgutil.get_loader` + are deprecated and will be removed in Python 3.14; + use :func:`importlib.util.find_spec` instead. + (Contributed by Nikita Sobolev in :gh:`97850`.) + +* :mod:`pty`: The module has two undocumented ``master_open()`` and ``slave_open()`` + functions that have been deprecated since Python 2 but only gained a + proper :exc:`DeprecationWarning` in 3.12. Remove them in 3.14. + (Contributed by Soumendra Ganguly and Gregory P. Smith in :gh:`85984`.) + +* :mod:`os`: + + * The ``st_ctime`` fields return by :func:`os.stat` and :func:`os.lstat` on + Windows are deprecated. In a future release, they will contain the last + metadata change time, consistent with other platforms. For now, they still + contain the creation time, which is also available in the new ``st_birthtime`` + field. (Contributed by Steve Dower in :gh:`99726`.) + + * On POSIX platforms, :func:`os.fork` can now raise a + :exc:`DeprecationWarning` when it can detect being called from a + multithreaded process. There has always been a fundamental incompatibility + with the POSIX platform when doing so. Even if such code *appeared* to work. + We added the warning to to raise awareness as issues encounted by code doing + this are becoming more frequent. See the :func:`os.fork` documentation for + more details along with `this discussion on fork being incompatible with threads + `_ for *why* we're now surfacing this + longstanding platform compatibility problem to developers. + + When this warning appears due to usage of :mod:`multiprocessing` or + :mod:`concurrent.futures` the fix is to use a different + :mod:`multiprocessing` start method such as ``"spawn"`` or ``"forkserver"``. + +* :mod:`shutil`: The *onerror* argument of :func:`shutil.rmtree` is deprecated and will be removed + in Python 3.14. Use *onexc* instead. (Contributed by Irit Katriel in :gh:`102828`.) + +* :mod:`sqlite3`: + + * :ref:`default adapters and converters + ` are now deprecated. + Instead, use the :ref:`sqlite3-adapter-converter-recipes` + and tailor them to your needs. + (Contributed by Erlend E. Aasland in :gh:`90016`.) + + * In :meth:`~sqlite3.Cursor.execute`, :exc:`DeprecationWarning` is now emitted + when :ref:`named placeholders ` are used together with + parameters supplied as a :term:`sequence` instead of as a :class:`dict`. + Starting from Python 3.14, using named placeholders with parameters supplied + as a sequence will raise a :exc:`~sqlite3.ProgrammingError`. + (Contributed by Erlend E. Aasland in :gh:`101698`.) + +* :mod:`sys`: The :data:`sys.last_type`, :data:`sys.last_value` and :data:`sys.last_traceback` + fields are deprecated. Use :data:`sys.last_exc` instead. + (Contributed by Irit Katriel in :gh:`102778`.) + +* :mod:`tarfile`: Extracting tar archives without specifying *filter* is deprecated until + Python 3.14, when ``'data'`` filter will become the default. + See :ref:`tarfile-extraction-filter` for details. + +* :mod:`typing`: + + * :class:`typing.Hashable` and :class:`typing.Sized`, aliases for + :class:`collections.abc.Hashable` and :class:`collections.abc.Sized` respectively, are + deprecated. (:gh:`94309`.) + + * :class:`typing.ByteString`, deprecated since Python 3.9, now causes a + :exc:`DeprecationWarning` to be emitted when it is used. + (Contributed by Alex Waygood in :gh:`91896`.) + +* :mod:`xml.etree.ElementTree`: The module now emits :exc:`DeprecationWarning` + when testing the truth value of an :class:`xml.etree.ElementTree.Element`. + Before, the Python implementation emitted :exc:`FutureWarning`, and the C + implementation emitted nothing. + (Contributed by Jacob Walls in :gh:`83122`.) + +* The 3-arg signatures (type, value, traceback) of :meth:`coroutine throw() + `, :meth:`generator throw() ` and + :meth:`async generator throw() ` are deprecated and + may be removed in a future version of Python. Use the single-arg versions + of these functions instead. (Contributed by Ofey Chan in :gh:`89874`.) + +* :exc:`DeprecationWarning` is now raised when ``__package__`` on a + module differs from ``__spec__.parent`` (previously it was + :exc:`ImportWarning`). + (Contributed by Brett Cannon in :gh:`65961`.) + +* Setting ``__package__`` or ``__cached__`` on a module is deprecated, + and will cease to be set or taken into consideration by the import system in Python 3.14. + (Contributed by Brett Cannon in :gh:`65961`.) + +* The bitwise inversion operator (``~``) on bool is deprecated. It will throw an + error in Python 3.14. Use ``not`` for logical negation of bools instead. + In the rare case that you really need the bitwise inversion of the underlying + ``int``, convert to int explicitly: ``~int(x)``. (Contributed by Tim Hoffmann + in :gh:`103487`.) + +* Accessing ``co_lnotab`` on code objects was deprecated in Python 3.10 via :pep:`626`, + but it only got a proper :exc:`DeprecationWarning` in 3.12, + therefore it will be removed in 3.14. + (Contributed by Nikita Sobolev in :gh:`101866`.) + +Pending Removal in Python 3.13 +------------------------------ + +The following modules and APIs have been deprecated in earlier Python releases, +and will be removed in Python 3.13. + +Modules (see :pep:`594`): + +* :mod:`!aifc` +* :mod:`!audioop` +* :mod:`!cgi` +* :mod:`!cgitb` +* :mod:`!chunk` +* :mod:`!crypt` +* :mod:`!imghdr` +* :mod:`!mailcap` +* :mod:`!msilib` +* :mod:`!nis` +* :mod:`!nntplib` +* :mod:`!ossaudiodev` +* :mod:`!pipes` +* :mod:`!sndhdr` +* :mod:`!spwd` +* :mod:`!sunau` +* :mod:`!telnetlib` +* :mod:`!uu` +* :mod:`!xdrlib` + +Other modules: + +* :mod:`!lib2to3`, and the :program:`2to3` program (:gh:`84540`) + +APIs: + +* :class:`!configparser.LegacyInterpolation` (:gh:`90765`) +* ``locale.resetlocale()`` (:gh:`90817`) +* :meth:`!turtle.RawTurtle.settiltangle` (:gh:`50096`) +* :func:`!unittest.findTestCases` (:gh:`50096`) +* :func:`!unittest.getTestCaseNames` (:gh:`50096`) +* :func:`!unittest.makeSuite` (:gh:`50096`) +* :meth:`!unittest.TestProgram.usageExit` (:gh:`67048`) +* :class:`!webbrowser.MacOSX` (:gh:`86421`) +* :class:`classmethod` descriptor chaining (:gh:`89519`) + +Pending Removal in Python 3.14 +------------------------------ + +The following APIs have been deprecated +and will be removed in Python 3.14. + +* :mod:`argparse`: The *type*, *choices*, and *metavar* parameters + of :class:`!argparse.BooleanOptionalAction` + +* :mod:`ast`: + + * :class:`!ast.Num` + * :class:`!ast.Str` + * :class:`!ast.Bytes` + * :class:`!ast.NameConstant` + * :class:`!ast.Ellipsis` + +* :mod:`asyncio`: + + * :class:`!asyncio.MultiLoopChildWatcher` + * :class:`!asyncio.FastChildWatcher` + * :class:`!asyncio.AbstractChildWatcher` + * :class:`!asyncio.SafeChildWatcher` + * :func:`!asyncio.set_child_watcher` + * :func:`!asyncio.get_child_watcher`, + * :meth:`!asyncio.AbstractEventLoopPolicy.set_child_watcher` + * :meth:`!asyncio.AbstractEventLoopPolicy.get_child_watcher` + +* :mod:`collections.abc`: :class:`!collections.abc.ByteString`. + +* :mod:`email`: the *isdst* parameter in :func:`email.utils.localtime`. + +* :mod:`importlib.abc`: + + * :class:`!importlib.abc.ResourceReader` + * :class:`!importlib.abc.Traversable` + * :class:`!importlib.abc.TraversableResources` + +* :mod:`itertools`: Support for copy, deepcopy, and pickle operations. + +* :mod:`pkgutil`: + + * :func:`!pkgutil.find_loader` + * :func:`!pkgutil.get_loader`. + +* :mod:`pty`: + + * :func:`!pty.master_open` + * :func:`!pty.slave_open` + +* :mod:`shutil`: The *onerror* argument of :func:`shutil.rmtree` + +* :mod:`typing`: :class:`!typing.ByteString` + +* :mod:`xml.etree.ElementTree`: Testing the truth value of an :class:`xml.etree.ElementTree.Element`. + +* The ``__package__`` and ``__cached__`` attributes on module objects. + +* The ``co_lnotab`` attribute of code objects. + +Pending Removal in Python 3.15 +------------------------------ + +The following APIs have been deprecated +and will be removed in Python 3.15. + +APIs: + +* :func:`locale.getdefaultlocale` (:gh:`90817`) + + +Pending Removal in Future Versions +---------------------------------- + +The following APIs were deprecated in earlier Python versions and will be removed, +although there is currently no date scheduled for their removal. + +* :mod:`array`'s ``'u'`` format code (:gh:`57281`) + +* :class:`typing.Text` (:gh:`92332`) + +* Currently Python accepts numeric literals immediately followed by keywords, + for example ``0in x``, ``1or x``, ``0if 1else 2``. It allows confusing + and ambiguous expressions like ``[0x1for x in y]`` (which can be + interpreted as ``[0x1 for x in y]`` or ``[0x1f or x in y]``). + A syntax warning is raised if the numeric literal is + immediately followed by one of keywords :keyword:`and`, :keyword:`else`, + :keyword:`for`, :keyword:`if`, :keyword:`in`, :keyword:`is` and :keyword:`or`. + In a future release it will be changed to a syntax error. (:gh:`87999`) + + +Removed +======= + +asynchat and asyncore +--------------------- + +* These two modules have been removed + according to the schedule in :pep:`594`, + having been deprecated in Python 3.6. + Use :mod:`asyncio` instead. + (Contributed by Nikita Sobolev in :gh:`96580`.) + +configparser +------------ + +* Several names deprecated in the :mod:`configparser` way back in 3.2 have + been removed per :gh:`89336`: + + * :class:`configparser.ParsingError` no longer has a ``filename`` attribute + or argument. Use the ``source`` attribute and argument instead. + * :mod:`configparser` no longer has a ``SafeConfigParser`` class. Use the + shorter :class:`~configparser.ConfigParser` name instead. + * :class:`configparser.ConfigParser` no longer has a ``readfp`` method. + Use :meth:`~configparser.ConfigParser.read_file` instead. +distutils +--------- -Optimizations -============= +* Remove the :py:mod:`!distutils` package. It was deprecated in Python 3.10 by + :pep:`632` "Deprecate distutils module". For projects still using + ``distutils`` and cannot be updated to something else, the ``setuptools`` + project can be installed: it still provides ``distutils``. + (Contributed by Victor Stinner in :gh:`92584`.) -* Removed ``wstr`` and ``wstr_length`` members from Unicode objects. - It reduces object size by 8 or 16 bytes on 64bit platform. (:pep:`623`) - (Contributed by Inada Naoki in :gh:`92536`.) +ensurepip +--------- -* Added experimental support for using the BOLT binary optimizer in the build - process, which improves performance by 1-5%. - (Contributed by Kevin Modzelewski in :gh:`90536`.) +* Remove the bundled setuptools wheel from :mod:`ensurepip`, + and stop installing setuptools in environments created by :mod:`venv`. -* Speed up the regular expression substitution (functions :func:`re.sub` and - :func:`re.subn` and corresponding :class:`re.Pattern` methods) for - replacement strings containing group references by 2--3 times. - (Contributed by Serhiy Storchaka in :gh:`91524`.) + ``pip (>= 22.1)`` does not require setuptools to be installed in the + environment. ``setuptools``-based (and ``distutils``-based) packages + can still be used with ``pip install``, since pip will provide + ``setuptools`` in the build environment it uses for building a + package. + ``easy_install``, ``pkg_resources``, ``setuptools`` and ``distutils`` + are no longer provided by default in environments created with + ``venv`` or bootstrapped with ``ensurepip``, since they are part of + the ``setuptools`` package. For projects relying on these at runtime, + the ``setuptools`` project should be declared as a dependency and + installed separately (typically, using pip). -CPython bytecode changes -======================== + (Contributed by Pradyun Gedam in :gh:`95299`.) -* Removed the :opcode:`LOAD_METHOD` instruction. It has been merged into - :opcode:`LOAD_ATTR`. :opcode:`LOAD_ATTR` will now behave like the old - :opcode:`LOAD_METHOD` instruction if the low bit of its oparg is set. - (Contributed by Ken Jin in :gh:`93429`.) +enum +---- +* Remove :mod:`enum`'s ``EnumMeta.__getattr__``, which is no longer needed for + enum attribute access. + (Contributed by Ethan Furman in :gh:`95083`.) -Demos and Tools -=============== +ftplib +------ -* Remove the ``Tools/demo/`` directory which contained old demo scripts. A copy - can be found in the `old-demos project - `_. - (Contributed by Victor Stinner in :gh:`97681`.) +* Remove :mod:`ftplib`'s ``FTP_TLS.ssl_version`` class attribute: use the + *context* parameter instead. + (Contributed by Victor Stinner in :gh:`94172`.) -* Remove outdated example scripts of the ``Tools/scripts/`` directory. - A copy can be found in the `old-demos project - `_. - (Contributed by Victor Stinner in :gh:`97669`.) +gzip +---- +* Remove the ``filename`` attribute of :mod:`gzip`'s :class:`gzip.GzipFile`, + deprecated since Python 2.6, use the :attr:`~gzip.GzipFile.name` attribute + instead. In write mode, the ``filename`` attribute added ``'.gz'`` file + extension if it was not present. + (Contributed by Victor Stinner in :gh:`94196`.) -Deprecated -========== +hashlib +------- -* :class:`typing.Hashable` and :class:`typing.Sized` aliases for :class:`collections.abc.Hashable` - and :class:`collections.abc.Sized`. (:gh:`94309`.) +* Remove the pure Python implementation of :mod:`hashlib`'s + :func:`hashlib.pbkdf2_hmac()`, deprecated in Python 3.10. Python 3.10 and + newer requires OpenSSL 1.1.1 (:pep:`644`): this OpenSSL version provides + a C implementation of :func:`~hashlib.pbkdf2_hmac()` which is faster. + (Contributed by Victor Stinner in :gh:`94199`.) -* The :mod:`sqlite3` :ref:`default adapters and converters - ` are now deprecated. - Instead, use the :ref:`sqlite3-adapter-converter-recipes` - and tailor them to your needs. - (Contributed by Erlend E. Aasland in :gh:`90016`.) +importlib +--------- -* The 3-arg signatures (type, value, traceback) of :meth:`~coroutine.throw`, - :meth:`~generator.throw` and :meth:`~agen.athrow` are deprecated and - may be removed in a future version of Python. Use the single-arg versions - of these functions instead. (Contributed by Ofey Chan in :gh:`89874`.) +* Many previously deprecated cleanups in :mod:`importlib` have now been + completed: -* :exc:`DeprecationWarning` is now raised when ``__package__`` on a - module differs from ``__spec__.parent`` (previously it was - :exc:`ImportWarning`). - (Contributed by Brett Cannon in :gh:`65961`.) + * References to, and support for :meth:`!module_repr()` has been removed. + (Contributed by Barry Warsaw in :gh:`97850`.) + * ``importlib.util.set_package``, ``importlib.util.set_loader`` and + ``importlib.util.module_for_loader`` have all been removed. (Contributed by + Brett Cannon and Nikita Sobolev in :gh:`65961` and :gh:`97850`.) -Pending Removal in Python 3.13 ------------------------------- + * Support for ``find_loader()`` and ``find_module()`` APIs have been + removed. (Contributed by Barry Warsaw in :gh:`98040`.) -The following modules and APIs have been deprecated in earlier Python releases, -and will be removed in Python 3.13. + * ``importlib.abc.Finder``, ``pkgutil.ImpImporter``, and ``pkgutil.ImpLoader`` + have been removed. (Contributed by Barry Warsaw in :gh:`98040`.) -Modules (see :pep:`594`): +imp +--- -* :mod:`aifc` -* :mod:`audioop` -* :mod:`cgi` -* :mod:`cgitb` -* :mod:`chunk` -* :mod:`crypt` -* :mod:`imghdr` -* :mod:`mailcap` -* :mod:`msilib` -* :mod:`nis` -* :mod:`nntplib` -* :mod:`ossaudiodev` -* :mod:`pipes` -* :mod:`sndhdr` -* :mod:`spwd` -* :mod:`sunau` -* :mod:`telnetlib` -* :mod:`uu` -* :mod:`xdrlib` +* The :mod:`!imp` module has been removed. (Contributed by Barry Warsaw in + :gh:`98040`.) + + To migrate, consult the following correspondence table: + + ================================= ======================================= + imp importlib + ================================= ======================================= + ``imp.NullImporter`` Insert ``None`` into ``sys.path_importer_cache`` + ``imp.cache_from_source()`` :func:`importlib.util.cache_from_source` + ``imp.find_module()`` :func:`importlib.util.find_spec` + ``imp.get_magic()`` :attr:`importlib.util.MAGIC_NUMBER` + ``imp.get_suffixes()`` :attr:`importlib.machinery.SOURCE_SUFFIXES`, :attr:`importlib.machinery.EXTENSION_SUFFIXES`, and :attr:`importlib.machinery.BYTECODE_SUFFIXES` + ``imp.get_tag()`` :attr:`sys.implementation.cache_tag ` + ``imp.load_module()`` :func:`importlib.import_module` + ``imp.new_module(name)`` ``types.ModuleType(name)`` + ``imp.reload()`` :func:`importlib.reload` + ``imp.source_from_cache()`` :func:`importlib.util.source_from_cache` + ``imp.load_source()`` *See below* + ================================= ======================================= + + Replace ``imp.load_source()`` with:: + + import importlib.util + import importlib.machinery + + def load_source(modname, filename): + loader = importlib.machinery.SourceFileLoader(modname, filename) + spec = importlib.util.spec_from_file_location(modname, filename, loader=loader) + module = importlib.util.module_from_spec(spec) + # The module is always executed and not cached in sys.modules. + # Uncomment the following line to cache the module. + # sys.modules[module.__name__] = module + loader.exec_module(module) + return module + +* Remove :mod:`!imp` functions and attributes with no replacements: + + * Undocumented functions: + + * ``imp.init_builtin()`` + * ``imp.load_compiled()`` + * ``imp.load_dynamic()`` + * ``imp.load_package()`` + + * ``imp.lock_held()``, ``imp.acquire_lock()``, ``imp.release_lock()``: + the locking scheme has changed in Python 3.3 to per-module locks. + * ``imp.find_module()`` constants: ``SEARCH_ERROR``, ``PY_SOURCE``, + ``PY_COMPILED``, ``C_EXTENSION``, ``PY_RESOURCE``, ``PKG_DIRECTORY``, + ``C_BUILTIN``, ``PY_FROZEN``, ``PY_CODERESOURCE``, ``IMP_HOOK``. + +io +-- -APIs: +* Remove :mod:`io`'s ``io.OpenWrapper`` and ``_pyio.OpenWrapper``, deprecated in Python + 3.10: just use :func:`open` instead. The :func:`open` (:func:`io.open`) + function is a built-in function. Since Python 3.10, :func:`!_pyio.open` is + also a static method. + (Contributed by Victor Stinner in :gh:`94169`.) -* :class:`configparser.LegacyInterpolation` (:gh:`90765`) -* :func:`locale.getdefaultlocale` (:gh:`90817`) -* :meth:`turtle.RawTurtle.settiltangle` (:gh:`50096`) -* :func:`unittest.findTestCases` (:gh:`50096`) -* :func:`unittest.makeSuite` (:gh:`50096`) -* :func:`unittest.getTestCaseNames` (:gh:`50096`) -* :class:`webbrowser.MacOSX` (:gh:`86421`) +locale +------ -Pending Removal in Python 3.14 -============================== +* Remove :mod:`locale`'s :func:`!locale.format` function, deprecated in Python 3.7: + use :func:`locale.format_string` instead. + (Contributed by Victor Stinner in :gh:`94226`.) -* Deprecated the following :mod:`importlib.abc` classes, scheduled for removal in - Python 3.14: +* ``smtpd``: The module has been removed according to the schedule in :pep:`594`, + having been deprecated in Python 3.4.7 and 3.5.4. + Use aiosmtpd_ PyPI module or any other + :mod:`asyncio`-based server instead. + (Contributed by Oleg Iarygin in :gh:`93243`.) - * :class:`importlib.abc.ResourceReader` - * :class:`importlib.abc.Traversable` - * :class:`importlib.abc.TraversableResources` +.. _aiosmtpd: https://pypi.org/project/aiosmtpd/ - Use :mod:`importlib.resources.abc` classes instead: +sqlite3 +------- - * :class:`importlib.resources.abc.TraversableResources` - * :class:`importlib.resources.abc.Traversable` - * :class:`importlib.resources.abc.TraversableResources` +* The following undocumented :mod:`sqlite3` features, deprecated in Python + 3.10, are now removed: - (Contributed by Jason R. Coombs and Hugo van Kemenade in :gh:`93963`.) + * ``sqlite3.enable_shared_cache()`` + * ``sqlite3.OptimizedUnicode`` -* Creating :c:data:`immutable types ` with mutable - bases using the C API. + If a shared cache must be used, open the database in URI mode using the + ``cache=shared`` query parameter. -* ``__package__`` and ``__cached__`` will cease to be set or taken - into consideration by the import system (:gh:`97879`). + The ``sqlite3.OptimizedUnicode`` text factory has been an alias for + :class:`str` since Python 3.3. Code that previously set the text factory to + ``OptimizedUnicode`` can either use ``str`` explicitly, or rely on the + default value which is also ``str``. + (Contributed by Erlend E. Aasland in :gh:`92548`.) -Pending Removal in Future Versions ----------------------------------- +ssl +--- -The following APIs were deprecated in earlier Python versions and will be removed, -although there is currently no date scheduled for their removal. +* Remove :mod:`ssl`'s :func:`!ssl.RAND_pseudo_bytes` function, deprecated in Python 3.6: + use :func:`os.urandom` or :func:`ssl.RAND_bytes` instead. + (Contributed by Victor Stinner in :gh:`94199`.) -* :class:`typing.Text` (:gh:`92332`) +* Remove the :func:`!ssl.match_hostname` function. + It was deprecated in Python 3.7. OpenSSL performs + hostname matching since Python 3.7, Python no longer uses the + :func:`!ssl.match_hostname` function. + (Contributed by Victor Stinner in :gh:`94199`.) -* Currently Python accepts numeric literals immediately followed by keywords, - for example ``0in x``, ``1or x``, ``0if 1else 2``. It allows confusing - and ambiguous expressions like ``[0x1for x in y]`` (which can be - interpreted as ``[0x1 for x in y]`` or ``[0x1f or x in y]``). - A syntax warning is raised if the numeric literal is - immediately followed by one of keywords :keyword:`and`, :keyword:`else`, - :keyword:`for`, :keyword:`if`, :keyword:`in`, :keyword:`is` and :keyword:`or`. - In a future release it will be changed to a syntax error. (:gh:`87999`) +* Remove the :func:`!ssl.wrap_socket` function, deprecated in Python 3.7: + instead, create a :class:`ssl.SSLContext` object and call its + :class:`ssl.SSLContext.wrap_socket` method. Any package that still uses + :func:`!ssl.wrap_socket` is broken and insecure. The function neither sends a + SNI TLS extension nor validates server hostname. Code is subject to `CWE-295 + `_: Improper Certificate + Validation. + (Contributed by Victor Stinner in :gh:`94199`.) +unittest +-------- -Removed -======= +* Remove many long-deprecated :mod:`unittest` features: -* Removed many old deprecated :mod:`unittest` features: + .. _unittest-TestCase-removed-aliases: - - A number of :class:`~unittest.TestCase` method aliases: + * A number of :class:`~unittest.TestCase` method aliases: ============================ =============================== =============== Deprecated alias Method Name Deprecated in @@ -412,118 +1721,67 @@ Removed You can use https://github.com/isidentical/teyit to automatically modernise your unit tests. - - Undocumented and broken :class:`~unittest.TestCase` method + * Undocumented and broken :class:`~unittest.TestCase` method ``assertDictContainsSubset`` (deprecated in Python 3.2). - - Undocumented :meth:`TestLoader.loadTestsFromModule + * Undocumented :meth:`TestLoader.loadTestsFromModule ` parameter *use_load_tests* (deprecated and ignored since Python 3.2). - - An alias of the :class:`~unittest.TextTestResult` class: + * An alias of the :class:`~unittest.TextTestResult` class: ``_TextTestResult`` (deprecated in Python 3.2). - (Contributed by Serhiy Storchaka in :issue:`45162`.) - -* Several names deprecated in the :mod:`configparser` way back in 3.2 have - been removed per :gh:`89336`: - - * :class:`configparser.ParsingError` no longer has a ``filename`` attribute - or argument. Use the ``source`` attribute and argument instead. - * :mod:`configparser` no longer has a ``SafeConfigParser`` class. Use the - shorter :class:`~configparser.ConfigParser` name instead. - * :class:`configparser.ConfigParser` no longer has a ``readfp`` method. - Use :meth:`~configparser.ConfigParser.read_file` instead. - -* The following undocumented :mod:`sqlite3` features, deprecated in Python - 3.10, are now removed: - - * ``sqlite3.enable_shared_cache()`` - * ``sqlite3.OptimizedUnicode`` - - If a shared cache must be used, open the database in URI mode using the - ``cache=shared`` query parameter. - - The ``sqlite3.OptimizedUnicode`` text factory has been an alias for - :class:`str` since Python 3.3. Code that previously set the text factory to - ``OptimizedUnicode`` can either use ``str`` explicitly, or rely on the - default value which is also ``str``. - - (Contributed by Erlend E. Aasland in :gh:`92548`.) - -* ``smtpd`` has been removed according to the schedule in :pep:`594`, - having been deprecated in Python 3.4.7 and 3.5.4. - Use aiosmtpd_ PyPI module or any other - :mod:`asyncio`-based server instead. - (Contributed by Oleg Iarygin in :gh:`93243`.) - -.. _aiosmtpd: https://pypi.org/project/aiosmtpd/ - -* Remove ``io.OpenWrapper`` and ``_pyio.OpenWrapper``, deprecated in Python - 3.10: just use :func:`open` instead. The :func:`open` (:func:`io.open`) - function is a built-in function. Since Python 3.10, :func:`_pyio.open` is - also a static method. - (Contributed by Victor Stinner in :gh:`94169`.) - -* Remove the :func:`ssl.RAND_pseudo_bytes` function, deprecated in Python 3.6: - use :func:`os.urandom` or :func:`ssl.RAND_bytes` instead. - (Contributed by Victor Stinner in :gh:`94199`.) - -* :mod:`gzip`: Remove the ``filename`` attribute of :class:`gzip.GzipFile`, - deprecated since Python 2.6, use the :attr:`~gzip.GzipFile.name` attribute - instead. In write mode, the ``filename`` attribute added ``'.gz'`` file - extension if it was not present. - (Contributed by Victor Stinner in :gh:`94196`.) + (Contributed by Serhiy Storchaka in :gh:`89325`.) -* Remove the :func:`ssl.match_hostname` function. The - :func:`ssl.match_hostname` was deprecated in Python 3.7. OpenSSL performs - hostname matching since Python 3.7, Python no longer uses the - :func:`ssl.match_hostname` function. - (Contributed by Victor Stinner in :gh:`94199`.) +webbrowser +---------- -* Remove the :func:`locale.format` function, deprecated in Python 3.7: - use :func:`locale.format_string` instead. - (Contributed by Victor Stinner in :gh:`94226`.) +* Remove support for obsolete browsers from :mod:`webbrowser`. + The removed browsers include: Grail, Mosaic, Netscape, Galeon, Skipstone, + Iceape, Firebird, and Firefox versions 35 and below (:gh:`102871`). -* :mod:`hashlib`: Remove the pure Python implementation of - :func:`hashlib.pbkdf2_hmac()`, deprecated in Python 3.10. Python 3.10 and - newer requires OpenSSL 1.1.1 (:pep:`644`): this OpenSSL version provides - a C implementation of :func:`~hashlib.pbkdf2_hmac()` which is faster. - (Contributed by Victor Stinner in :gh:`94199`.) +xml.etree.ElementTree +--------------------- -* :mod:`xml.etree`: Remove the ``ElementTree.Element.copy()`` method of the +* Remove the ``ElementTree.Element.copy()`` method of the pure Python implementation, deprecated in Python 3.10, use the - :func:`copy.copy` function instead. The C implementation of :mod:`xml.etree` + :func:`copy.copy` function instead. The C implementation of :mod:`xml.etree.ElementTree` has no ``copy()`` method, only a ``__copy__()`` method. (Contributed by Victor Stinner in :gh:`94383`.) -* :mod:`zipimport`: Remove ``find_loader()`` and ``find_module()`` methods, +zipimport +--------- + +* Remove :mod:`zipimport`'s ``find_loader()`` and ``find_module()`` methods, deprecated in Python 3.10: use the ``find_spec()`` method instead. See :pep:`451` for the rationale. (Contributed by Victor Stinner in :gh:`94379`.) -* Remove the :func:`ssl.wrap_socket` function, deprecated in Python 3.7: - instead, create a :class:`ssl.SSLContext` object and call its - :class:`ssl.SSLContext.wrap_socket` method. Any package that still uses - :func:`ssl.wrap_socket` is broken and insecure. The function neither sends a - SNI TLS extension nor validates server hostname. Code is subject to `CWE-295 - `_: Improper Certificate - Validation. - (Contributed by Victor Stinner in :gh:`94199`.) +Others +------ -* Many previously deprecated cleanups in :mod:`importlib` have now been - completed: +* Remove the ``suspicious`` rule from the documentation :file:`Makefile` and + :file:`Doc/tools/rstlint.py`, both in favor of `sphinx-lint + `_. + (Contributed by Julien Palard in :gh:`98179`.) - * References to, and support for ``module_repr()`` has been eradicated. +* Remove the *keyfile* and *certfile* parameters from the + :mod:`ftplib`, :mod:`imaplib`, :mod:`poplib` and :mod:`smtplib` modules, + and the *key_file*, *cert_file* and *check_hostname* parameters from the + :mod:`http.client` module, + all deprecated since Python 3.6. Use the *context* parameter + (*ssl_context* in :mod:`imaplib`) instead. + (Contributed by Victor Stinner in :gh:`94172`.) +* Remove ``Jython`` compatibility hacks from several stdlib modules and tests. + (Contributed by Nikita Sobolev in :gh:`99482`.) -* ``importlib.util.set_package`` has been removed. - (Contributed by Brett Cannon in :gh:`65961`.) +* Remove ``_use_broken_old_ctypes_structure_semantics_`` flag + from :mod:`ctypes` module. + (Contributed by Nikita Sobolev in :gh:`99285`.) -* Removed the ``suspicious`` rule from the documentation Makefile, and - removed ``Doc/tools/rstlint.py``, both in favor of `sphinx-lint - `_. - (Contributed by Julien Palard in :gh:`98179`.) +.. _whatsnew312-porting-to-python312: Porting to Python 3.12 ====================== @@ -541,13 +1799,13 @@ Changes in the Python API contain ASCII letters and digits and underscore. (Contributed by Serhiy Storchaka in :gh:`91760`.) -* Removed randrange() functionality deprecated since Python 3.10. Formerly, - randrange(10.0) losslessly converted to randrange(10). Now, it raises a - TypeError. Also, the exception raised for non-integral values such as - randrange(10.5) or randrange('10') has been changed from ValueError to - TypeError. This also prevents bugs where ``randrange(1e25)`` would silently +* Remove ``randrange()`` functionality deprecated since Python 3.10. Formerly, + ``randrange(10.0)`` losslessly converted to ``randrange(10)``. Now, it raises a + :exc:`TypeError`. Also, the exception raised for non-integer values such as + ``randrange(10.5)`` or ``randrange('10')`` has been changed from :exc:`ValueError` to + :exc:`TypeError`. This also prevents bugs where ``randrange(1e25)`` would silently select from a larger range than ``randrange(10**25)``. - (Originally suggested by Serhiy Storchaka gh-86388.) + (Originally suggested by Serhiy Storchaka :gh:`86388`.) * :class:`argparse.ArgumentParser` changed encoding and error handler for reading arguments from file (e.g. ``fromfile_prefix_chars`` option) @@ -555,7 +1813,7 @@ Changes in the Python API to :term:`filesystem encoding and error handler`. Argument files should be encoded in UTF-8 instead of ANSI Codepage on Windows. -* Removed the ``asyncore``-based ``smtpd`` module deprecated in Python 3.4.7 +* Remove the ``asyncore``-based ``smtpd`` module deprecated in Python 3.4.7 and 3.5.4. A recommended replacement is the :mod:`asyncio`-based aiosmtpd_ PyPI module. @@ -569,13 +1827,77 @@ Changes in the Python API :class:`bytes` type is accepted for bytes strings. (Contributed by Victor Stinner in :gh:`98393`.) +* :func:`syslog.openlog` and :func:`syslog.closelog` now fail if used in subinterpreters. + :func:`syslog.syslog` may still be used in subinterpreters, + but now only if :func:`syslog.openlog` has already been called in the main interpreter. + These new restrictions do not apply to the main interpreter, + so only a very small set of users might be affected. + This change helps with interpreter isolation. Furthermore, :mod:`syslog` is a wrapper + around process-global resources, which are best managed from the main interpreter. + (Contributed by Donghee Na in :gh:`99127`.) + +* The undocumented locking behavior of :func:`~functools.cached_property` + is removed, because it locked across all instances of the class, leading to high + lock contention. This means that a cached property getter function could now run + more than once for a single instance, if two threads race. For most simple + cached properties (e.g. those that are idempotent and simply calculate a value + based on other attributes of the instance) this will be fine. If + synchronization is needed, implement locking within the cached property getter + function or around multi-threaded access points. + +* :func:`sys._current_exceptions` now returns a mapping from thread-id to an + exception instance, rather than to a ``(typ, exc, tb)`` tuple. + (Contributed by Irit Katriel in :gh:`103176`.) + +* When extracting tar files using :mod:`tarfile` or + :func:`shutil.unpack_archive`, pass the *filter* argument to limit features + that may be surprising or dangerous. + See :ref:`tarfile-extraction-filter` for details. + +* The output of the :func:`tokenize.tokenize` and :func:`tokenize.generate_tokens` + functions is now changed due to the changes introduced in :pep:`701`. This + means that ``STRING`` tokens are not emitted any more for f-strings and the + tokens described in :pep:`701` are now produced instead: ``FSTRING_START``, + ``FSTRING_MIDDLE`` and ``FSTRING_END`` are now emitted for f-string "string" + parts in addition to the appropriate tokens for the tokenization in the + expression components. For example for the f-string ``f"start {1+1} end"`` + the old version of the tokenizer emitted:: + + 1,0-1,18: STRING 'f"start {1+1} end"' + + while the new version emits:: + + 1,0-1,2: FSTRING_START 'f"' + 1,2-1,8: FSTRING_MIDDLE 'start ' + 1,8-1,9: OP '{' + 1,9-1,10: NUMBER '1' + 1,10-1,11: OP '+' + 1,11-1,12: NUMBER '1' + 1,12-1,13: OP '}' + 1,13-1,17: FSTRING_MIDDLE ' end' + 1,17-1,18: FSTRING_END '"' + + Additionally, there may be some minor behavioral changes as a consequence of the + changes required to support :pep:`701`. Some of these changes include: + + * The ``type`` attribute of the tokens emitted when tokenizing some invalid Python + characters such as ``!`` has changed from ``ERRORTOKEN`` to ``OP``. + + * Incomplete single-line strings now also raise :exc:`tokenize.TokenError` as incomplete + multiline strings do. + + * Some incomplete or invalid Python code now raises :exc:`tokenize.TokenError` instead of + returning arbitrary ``ERRORTOKEN`` tokens when tokenizing it. + + * Mixing tabs and spaces as indentation in the same file is not supported anymore and will + raise a :exc:`TabError`. Build Changes ============= -* Python no longer uses ``setup.py`` to build shared C extension modules. +* Python no longer uses :file:`setup.py` to build shared C extension modules. Build parameters like headers and libraries are detected in ``configure`` - script. Extensions are built by ``Makefile``. Most extensions use + script. Extensions are built by :file:`Makefile`. Most extensions use ``pkg-config`` and fall back to manual detection. (Contributed by Christian Heimes in :gh:`93939`.) @@ -586,7 +1908,27 @@ Build Changes * CPython now uses the ThinLTO option as the default link time optimization policy if the Clang compiler accepts the flag. - (Contributed by Dong-hee Na in :gh:`89536`.) + (Contributed by Donghee Na in :gh:`89536`.) + +* Add ``COMPILEALL_OPTS`` variable in :file:`Makefile` to override :mod:`compileall` + options (default: ``-j0``) in ``make install``. Also merged the 3 + ``compileall`` commands into a single command to build .pyc files for all + optimization levels (0, 1, 2) at once. + (Contributed by Victor Stinner in :gh:`99289`.) + +* Add platform triplets for 64-bit LoongArch: + + * loongarch64-linux-gnusf + * loongarch64-linux-gnuf32 + * loongarch64-linux-gnu + + (Contributed by Zhang Na in :gh:`90656`.) + +* ``PYTHON_FOR_REGEN`` now require Python 3.10 or newer. + +* Autoconf 2.71 and aclocal 1.16.4 is now required to regenerate + :file:`!configure`. + (Contributed by Christian Heimes in :gh:`89886`.) C API Changes @@ -595,7 +1937,46 @@ C API Changes New Features ------------ -* Added the new limited C API function :c:func:`PyType_FromMetaclass`, +.. _whatsnew312-pep697: + +* :pep:`697`: Introduce the :ref:`Unstable C API tier `, + intended for low-level tools like debuggers and JIT compilers. + This API may change in each minor release of CPython without deprecation + warnings. + Its contents are marked by the ``PyUnstable_`` prefix in names. + + Code object constructors: + + - ``PyUnstable_Code_New()`` (renamed from ``PyCode_New``) + - ``PyUnstable_Code_NewWithPosOnlyArgs()`` (renamed from ``PyCode_NewWithPosOnlyArgs``) + + Extra storage for code objects (:pep:`523`): + + - ``PyUnstable_Eval_RequestCodeExtraIndex()`` (renamed from ``_PyEval_RequestCodeExtraIndex``) + - ``PyUnstable_Code_GetExtra()`` (renamed from ``_PyCode_GetExtra``) + - ``PyUnstable_Code_SetExtra()`` (renamed from ``_PyCode_SetExtra``) + + The original names will continue to be available until the respective + API changes. + + (Contributed by Petr Viktorin in :gh:`101101`.) + +* :pep:`697`: Add an API for extending types whose instance memory layout is + opaque: + + - :c:member:`PyType_Spec.basicsize` can be zero or negative to specify + inheriting or extending the base class size. + - :c:func:`PyObject_GetTypeData` and :c:func:`PyType_GetTypeDataSize` + added to allow access to subclass-specific instance data. + - :c:macro:`Py_TPFLAGS_ITEMS_AT_END` and :c:func:`PyObject_GetItemData` + added to allow safely extending certain variable-sized types, including + :c:var:`PyType_Type`. + - :c:macro:`Py_RELATIVE_OFFSET` added to allow defining + :c:type:`members ` in terms of a subclass-specific struct. + + (Contributed by Petr Viktorin in :gh:`103509`.) + +* Add the new :ref:`limited C API ` function :c:func:`PyType_FromMetaclass`, which generalizes the existing :c:func:`PyType_FromModuleAndSpec` using an additional metaclass argument. (Contributed by Wenzel Jakob in :gh:`93012`.) @@ -604,44 +1985,116 @@ New Features :ref:`the vectorcall protocol ` was added to the :ref:`Limited API `: - * :const:`Py_TPFLAGS_HAVE_VECTORCALL` + * :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` * :c:func:`PyVectorcall_NARGS` * :c:func:`PyVectorcall_Call` * :c:type:`vectorcallfunc` - The :const:`Py_TPFLAGS_HAVE_VECTORCALL` flag is now removed from a class + The :c:macro:`Py_TPFLAGS_HAVE_VECTORCALL` flag is now removed from a class when the class's :py:meth:`~object.__call__` method is reassigned. This makes vectorcall safe to use with mutable types (i.e. heap types - without the :const:`immutable ` flag). + without the immutable flag, :c:macro:`Py_TPFLAGS_IMMUTABLETYPE`). Mutable types that do not override :c:member:`~PyTypeObject.tp_call` now inherit the ``Py_TPFLAGS_HAVE_VECTORCALL`` flag. (Contributed by Petr Viktorin in :gh:`93274`.) - The :const:`Py_TPFLAGS_MANAGED_DICT` and :const:`Py_TPFLAGS_MANAGED_WEAKREF` + The :c:macro:`Py_TPFLAGS_MANAGED_DICT` and :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` flags have been added. This allows extensions classes to support object ``__dict__`` and weakrefs with less bookkeeping, using less memory and with faster access. -* Added two new public functions, +* API for performing calls using + :ref:`the vectorcall protocol ` was added to the + :ref:`Limited API `: + + * :c:func:`PyObject_Vectorcall` + * :c:func:`PyObject_VectorcallMethod` + * :c:macro:`PY_VECTORCALL_ARGUMENTS_OFFSET` + + This means that both the incoming and outgoing ends of the vector call + protocol are now available in the :ref:`Limited API `. (Contributed + by Wenzel Jakob in :gh:`98586`.) + +* Add two new public functions, :c:func:`PyEval_SetProfileAllThreads` and :c:func:`PyEval_SetTraceAllThreads`, that allow to set tracing and profiling functions in all running threads in addition to the calling one. (Contributed by Pablo Galindo in :gh:`93503`.) -* Added new function :c:func:`PyFunction_SetVectorcall` to the C API +* Add new function :c:func:`PyFunction_SetVectorcall` to the C API which sets the vectorcall field of a given :c:type:`PyFunctionObject`. (Contributed by Andrew Frost in :gh:`92257`.) * The C API now permits registering callbacks via :c:func:`PyDict_AddWatcher`, - :c:func:`PyDict_AddWatch` and related APIs to be called whenever a dictionary + :c:func:`PyDict_Watch` and related APIs to be called whenever a dictionary is modified. This is intended for use by optimizing interpreters, JIT compilers, or debuggers. (Contributed by Carl Meyer in :gh:`91052`.) -* Added :c:func:`PyType_AddWatcher` and :c:func:`PyType_Watch` API to register +* Add :c:func:`PyType_AddWatcher` and :c:func:`PyType_Watch` API to register callbacks to receive notification on changes to a type. (Contributed by Carl Meyer in :gh:`91051`.) +* Add :c:func:`PyCode_AddWatcher` and :c:func:`PyCode_ClearWatcher` + APIs to register callbacks to receive notification on creation and + destruction of code objects. + (Contributed by Itamar Oren in :gh:`91054`.) + +* Add :c:func:`PyFrame_GetVar` and :c:func:`PyFrame_GetVarString` functions to + get a frame variable by its name. + (Contributed by Victor Stinner in :gh:`91248`.) + +* Add :c:func:`PyErr_GetRaisedException` and :c:func:`PyErr_SetRaisedException` + for saving and restoring the current exception. + These functions return and accept a single exception object, + rather than the triple arguments of the now-deprecated + :c:func:`PyErr_Fetch` and :c:func:`PyErr_Restore`. + This is less error prone and a bit more efficient. + (Contributed by Mark Shannon in :gh:`101578`.) + +* Add ``_PyErr_ChainExceptions1``, which takes an exception instance, + to replace the legacy-API ``_PyErr_ChainExceptions``, which is now + deprecated. (Contributed by Mark Shannon in :gh:`101578`.) + +* Add :c:func:`PyException_GetArgs` and :c:func:`PyException_SetArgs` + as convenience functions for retrieving and modifying + the :attr:`~BaseException.args` passed to the exception's constructor. + (Contributed by Mark Shannon in :gh:`101578`.) + +* Add :c:func:`PyErr_DisplayException`, which takes an exception instance, + to replace the legacy-api :c:func:`!PyErr_Display`. (Contributed by + Irit Katriel in :gh:`102755`). + +.. _whatsnew312-pep683: + +* :pep:`683`: Introduce *Immortal Objects*, which allows objects + to bypass reference counts, and related changes to the C-API: + + - ``_Py_IMMORTAL_REFCNT``: The reference count that defines an object + as immortal. + - ``_Py_IsImmortal`` Checks if an object has the immortal reference count. + - ``PyObject_HEAD_INIT`` This will now initialize reference count to + ``_Py_IMMORTAL_REFCNT`` when used with ``Py_BUILD_CORE``. + - ``SSTATE_INTERNED_IMMORTAL`` An identifier for interned unicode objects + that are immortal. + - ``SSTATE_INTERNED_IMMORTAL_STATIC`` An identifier for interned unicode + objects that are immortal and static + - ``sys.getunicodeinternedsize`` This returns the total number of unicode + objects that have been interned. This is now needed for :file:`refleak.py` to + correctly track reference counts and allocated blocks + + (Contributed by Eddie Elizondo in :gh:`84436`.) + +* :pep:`684`: Add the new :c:func:`Py_NewInterpreterFromConfig` + function and :c:type:`PyInterpreterConfig`, which may be used + to create sub-interpreters with their own GILs. + (See :ref:`whatsnew312-pep684` for more info.) + (Contributed by Eric Snow in :gh:`104110`.) + +* In the limited C API version 3.12, :c:func:`Py_INCREF` and + :c:func:`Py_DECREF` functions are now implemented as opaque function calls to + hide implementation details. + (Contributed by Victor Stinner in :gh:`105387`.) Porting to Python 3.12 ---------------------- @@ -669,25 +2122,31 @@ Porting to Python 3.12 :py:meth:`~class.__subclasses__` (using :c:func:`PyObject_CallMethod`, for example). +* Add support of more formatting options (left aligning, octals, uppercase + hexadecimals, :c:type:`intmax_t`, :c:type:`ptrdiff_t`, :c:type:`wchar_t` C + strings, variable width and precision) in :c:func:`PyUnicode_FromFormat` and + :c:func:`PyUnicode_FromFormatV`. + (Contributed by Serhiy Storchaka in :gh:`98836`.) + * An unrecognized format character in :c:func:`PyUnicode_FromFormat` and :c:func:`PyUnicode_FromFormatV` now sets a :exc:`SystemError`. In previous versions it caused all the rest of the format string to be copied as-is to the result string, and any extra arguments discarded. (Contributed by Serhiy Storchaka in :gh:`95781`.) -* Fixed wrong sign placement in :c:func:`PyUnicode_FromFormat` and +* Fix wrong sign placement in :c:func:`PyUnicode_FromFormat` and :c:func:`PyUnicode_FromFormatV`. (Contributed by Philip Georgi in :gh:`95504`.) * Extension classes wanting to add a ``__dict__`` or weak reference slot - should use :const:`Py_TPFLAGS_MANAGED_DICT` and - :const:`Py_TPFLAGS_MANAGED_WEAKREF` instead of ``tp_dictoffset`` and + should use :c:macro:`Py_TPFLAGS_MANAGED_DICT` and + :c:macro:`Py_TPFLAGS_MANAGED_WEAKREF` instead of ``tp_dictoffset`` and ``tp_weaklistoffset``, respectively. The use of ``tp_dictoffset`` and ``tp_weaklistoffset`` is still supported, but does not fully support multiple inheritance (:gh:`95589`), and performance may be worse. - Classes declaring :const:`Py_TPFLAGS_MANAGED_DICT` should call - :c:func:`_PyObject_VisitManagedDict` and :c:func:`_PyObject_ClearManagedDict` + Classes declaring :c:macro:`Py_TPFLAGS_MANAGED_DICT` must call + :c:func:`!_PyObject_VisitManagedDict` and :c:func:`!_PyObject_ClearManagedDict` to traverse and clear their instance's dictionaries. To clear weakrefs, call :c:func:`PyObject_ClearWeakRefs`, as before. @@ -696,9 +2155,95 @@ Porting to Python 3.12 :class:`bytes` type is accepted for bytes strings. (Contributed by Victor Stinner in :gh:`98393`.) +* The :c:macro:`Py_CLEAR`, :c:macro:`Py_SETREF` and :c:macro:`Py_XSETREF` + macros now only evaluate their arguments once. If an argument has side + effects, these side effects are no longer duplicated. + (Contributed by Victor Stinner in :gh:`98724`.) + +* The interpreter's error indicator is now always normalized. This means + that :c:func:`PyErr_SetObject`, :c:func:`PyErr_SetString` and the other + functions that set the error indicator now normalize the exception + before storing it. (Contributed by Mark Shannon in :gh:`101578`.) + +* ``_Py_RefTotal`` is no longer authoritative and only kept around + for ABI compatibility. Note that it is an internal global and only + available on debug builds. If you happen to be using it then you'll + need to start using ``_Py_GetGlobalRefTotal()``. + +* The following functions now select an appropriate metaclass for the newly + created type: + + * :c:func:`PyType_FromSpec` + * :c:func:`PyType_FromSpecWithBases` + * :c:func:`PyType_FromModuleAndSpec` + + Creating classes whose metaclass overrides :c:member:`~PyTypeObject.tp_new` + is deprecated, and in Python 3.14+ it will be disallowed. + Note that these functions ignore ``tp_new`` of the metaclass, possibly + allowing incomplete initialization. + + Note that :c:func:`PyType_FromMetaclass` (added in Python 3.12) + already disallows creating classes whose metaclass overrides ``tp_new`` + (:meth:`~object.__new__` in Python). + + Since ``tp_new`` overrides almost everything ``PyType_From*`` functions do, + the two are incompatible with each other. + The existing behavior -- ignoring the metaclass for several steps + of type creation -- is unsafe in general, since (meta)classes assume that + ``tp_new`` was called. + There is no simple general workaround. One of the following may work for you: + + - If you control the metaclass, avoid using ``tp_new`` in it: + + - If initialization can be skipped, it can be done in + :c:member:`~PyTypeObject.tp_init` instead. + - If the metaclass doesn't need to be instantiated from Python, + set its ``tp_new`` to ``NULL`` using + the :c:macro:`Py_TPFLAGS_DISALLOW_INSTANTIATION` flag. + This makes it acceptable for ``PyType_From*`` functions. + + - Avoid ``PyType_From*`` functions: if you don't need C-specific features + (slots or setting the instance size), create types by :ref:`calling ` + the metaclass. + + - If you *know* the ``tp_new`` can be skipped safely, filter the deprecation + warning out using :func:`warnings.catch_warnings` from Python. + +* :c:var:`PyOS_InputHook` and :c:var:`PyOS_ReadlineFunctionPointer` are no + longer called in :ref:`subinterpreters `. This is + because clients generally rely on process-wide global state (since these + callbacks have no way of recovering extension module state). + + This also avoids situations where extensions may find themselves running in a + subinterpreter that they don't support (or haven't yet been loaded in). See + :gh:`104668` for more info. + +* :c:struct:`PyLongObject` has had its internals changed for better performance. + Although the internals of :c:struct:`PyLongObject` are private, they are used + by some extension modules. + The internal fields should no longer be accessed directly, instead the API + functions beginning ``PyLong_...`` should be used instead. + Two new *unstable* API functions are provided for efficient access to the + value of :c:struct:`PyLongObject`\s which fit into a single machine word: + + * :c:func:`PyUnstable_Long_IsCompact` + * :c:func:`PyUnstable_Long_CompactValue` + +* Custom allocators, set via :c:func:`PyMem_SetAllocator`, are now + required to be thread-safe, regardless of memory domain. Allocators + that don't have their own state, including "hooks", are not affected. + If your custom allocator is not already thread-safe and you need + guidance then please create a new GitHub issue + and CC ``@ericsnowcurrently``. + Deprecated ---------- +* In accordance with :pep:`699`, the ``ma_version_tag`` field in :c:type:`PyDictObject` + is deprecated for extension modules. Accessing this field will generate a compiler + warning at compile time. This field will be removed in Python 3.14. + (Contributed by Ramvikrams and Kumar Aditya in :gh:`101193`. PEP by Ken Jin.) + * Deprecate global configuration variable: * :c:var:`Py_DebugFlag`: use :c:member:`PyConfig.parser_debug` @@ -717,40 +2262,173 @@ Deprecated * :c:var:`Py_HashRandomizationFlag`: use :c:member:`PyConfig.use_hash_seed` and :c:member:`PyConfig.hash_seed` * :c:var:`Py_IsolatedFlag`: use :c:member:`PyConfig.isolated` - * :c:var:`Py_LegacyWindowsFSEncodingFlag`: use :c:member:`PyConfig.legacy_windows_fs_encoding` + * :c:var:`Py_LegacyWindowsFSEncodingFlag`: use :c:member:`PyPreConfig.legacy_windows_fs_encoding` * :c:var:`Py_LegacyWindowsStdioFlag`: use :c:member:`PyConfig.legacy_windows_stdio` - * :c:var:`Py_FileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` - * :c:var:`Py_FileSystemDefaultEncodeErrors`: use :c:member:`PyConfig.filesystem_errors` - * :c:var:`Py_UTF8Mode`: use :c:member:`PyPreConfig.utf8_mode` (see :c:func:`Py_PreInitialize`) + * :c:var:`!Py_FileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_HasFileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_FileSystemDefaultEncodeErrors`: use :c:member:`PyConfig.filesystem_errors` + * :c:var:`!Py_UTF8Mode`: use :c:member:`PyPreConfig.utf8_mode` (see :c:func:`Py_PreInitialize`) The :c:func:`Py_InitializeFromConfig` API should be used with :c:type:`PyConfig` instead. (Contributed by Victor Stinner in :gh:`77782`.) * Creating :c:data:`immutable types ` with mutable - bases is deprecated and will be disabled in Python 3.14. + bases is deprecated and will be disabled in Python 3.14. (:gh:`95388`) + +* The :file:`structmember.h` header is deprecated, though it continues to be + available and there are no plans to remove it. + + Its contents are now available just by including :file:`Python.h`, + with a ``Py`` prefix added if it was missing: + + - :c:struct:`PyMemberDef`, :c:func:`PyMember_GetOne` and + :c:func:`PyMember_SetOne` + - Type macros like :c:macro:`Py_T_INT`, :c:macro:`Py_T_DOUBLE`, etc. + (previously ``T_INT``, ``T_DOUBLE``, etc.) + - The flags :c:macro:`Py_READONLY` (previously ``READONLY``) and + :c:macro:`Py_AUDIT_READ` (previously all uppercase) + + Several items are not exposed from :file:`Python.h`: + + - :c:macro:`T_OBJECT` (use :c:macro:`Py_T_OBJECT_EX`) + - :c:macro:`T_NONE` (previously undocumented, and pretty quirky) + - The macro ``WRITE_RESTRICTED`` which does nothing. + - The macros ``RESTRICTED`` and ``READ_RESTRICTED``, equivalents of + :c:macro:`Py_AUDIT_READ`. + - In some configurations, ```` is not included from :file:`Python.h`. + It should be included manually when using ``offsetof()``. + + The deprecated header continues to provide its original + contents under the original names. + Your old code can stay unchanged, unless the extra include and non-namespaced + macros bother you greatly. + + (Contributed in :gh:`47146` by Petr Viktorin, based on + earlier work by Alexander Belopolsky and Matthias Braun.) + +* :c:func:`PyErr_Fetch` and :c:func:`PyErr_Restore` are deprecated. + Use :c:func:`PyErr_GetRaisedException` and + :c:func:`PyErr_SetRaisedException` instead. + (Contributed by Mark Shannon in :gh:`101578`.) + +* :c:func:`!PyErr_Display` is deprecated. Use :c:func:`PyErr_DisplayException` + instead. (Contributed by Irit Katriel in :gh:`102755`). + +* ``_PyErr_ChainExceptions`` is deprecated. Use ``_PyErr_ChainExceptions1`` + instead. (Contributed by Irit Katriel in :gh:`102192`.) + +* Using :c:func:`PyType_FromSpec`, :c:func:`PyType_FromSpecWithBases` + or :c:func:`PyType_FromModuleAndSpec` to create a class whose metaclass + overrides :c:member:`~PyTypeObject.tp_new` is deprecated. + Call the metaclass instead. + +Pending Removal in Python 3.14 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* The ``ma_version_tag`` field in :c:type:`PyDictObject` for extension modules + (:pep:`699`; :gh:`101193`). + +* Global configuration variables: + + * :c:var:`Py_DebugFlag`: use :c:member:`PyConfig.parser_debug` + * :c:var:`Py_VerboseFlag`: use :c:member:`PyConfig.verbose` + * :c:var:`Py_QuietFlag`: use :c:member:`PyConfig.quiet` + * :c:var:`Py_InteractiveFlag`: use :c:member:`PyConfig.interactive` + * :c:var:`Py_InspectFlag`: use :c:member:`PyConfig.inspect` + * :c:var:`Py_OptimizeFlag`: use :c:member:`PyConfig.optimization_level` + * :c:var:`Py_NoSiteFlag`: use :c:member:`PyConfig.site_import` + * :c:var:`Py_BytesWarningFlag`: use :c:member:`PyConfig.bytes_warning` + * :c:var:`Py_FrozenFlag`: use :c:member:`PyConfig.pathconfig_warnings` + * :c:var:`Py_IgnoreEnvironmentFlag`: use :c:member:`PyConfig.use_environment` + * :c:var:`Py_DontWriteBytecodeFlag`: use :c:member:`PyConfig.write_bytecode` + * :c:var:`Py_NoUserSiteDirectory`: use :c:member:`PyConfig.user_site_directory` + * :c:var:`Py_UnbufferedStdioFlag`: use :c:member:`PyConfig.buffered_stdio` + * :c:var:`Py_HashRandomizationFlag`: use :c:member:`PyConfig.use_hash_seed` + and :c:member:`PyConfig.hash_seed` + * :c:var:`Py_IsolatedFlag`: use :c:member:`PyConfig.isolated` + * :c:var:`Py_LegacyWindowsFSEncodingFlag`: use :c:member:`PyPreConfig.legacy_windows_fs_encoding` + * :c:var:`Py_LegacyWindowsStdioFlag`: use :c:member:`PyConfig.legacy_windows_stdio` + * :c:var:`!Py_FileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_HasFileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_FileSystemDefaultEncodeErrors`: use :c:member:`PyConfig.filesystem_errors` + * :c:var:`!Py_UTF8Mode`: use :c:member:`PyPreConfig.utf8_mode` (see :c:func:`Py_PreInitialize`) + + The :c:func:`Py_InitializeFromConfig` API should be used with + :c:type:`PyConfig` instead. + +* Creating :c:data:`immutable types ` with mutable + bases (:gh:`95388`). + +Pending Removal in Python 3.15 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* :c:func:`PyImport_ImportModuleNoBlock`: use :c:func:`PyImport_ImportModule` +* :c:type:`!Py_UNICODE_WIDE` type: use :c:type:`wchar_t` +* :c:type:`Py_UNICODE` type: use :c:type:`wchar_t` +* Python initialization functions: + + * :c:func:`PySys_ResetWarnOptions`: clear :data:`sys.warnoptions` and + :data:`!warnings.filters` + * :c:func:`Py_GetExecPrefix`: get :data:`sys.exec_prefix` + * :c:func:`Py_GetPath`: get :data:`sys.path` + * :c:func:`Py_GetPrefix`: get :data:`sys.prefix` + * :c:func:`Py_GetProgramFullPath`: get :data:`sys.executable` + * :c:func:`Py_GetProgramName`: get :data:`sys.executable` + * :c:func:`Py_GetPythonHome`: get :c:member:`PyConfig.home` or + the :envvar:`PYTHONHOME` environment variable + +Pending Removal in Future Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following APIs are deprecated and will be removed, +although there is currently no date scheduled for their removal. +* :c:macro:`Py_TPFLAGS_HAVE_FINALIZE`: unneeded since Python 3.8 +* :c:func:`PyErr_Fetch`: use :c:func:`PyErr_GetRaisedException` +* :c:func:`PyErr_NormalizeException`: use :c:func:`PyErr_GetRaisedException` +* :c:func:`PyErr_Restore`: use :c:func:`PyErr_SetRaisedException` +* :c:func:`PyModule_GetFilename`: use :c:func:`PyModule_GetFilenameObject` +* :c:func:`PyOS_AfterFork`: use :c:func:`PyOS_AfterFork_Child` +* :c:func:`PySlice_GetIndicesEx`: use :c:func:`PySlice_Unpack` and :c:func:`PySlice_AdjustIndices` +* :c:func:`!PyUnicode_AsDecodedObject`: use :c:func:`PyCodec_Decode` +* :c:func:`!PyUnicode_AsDecodedUnicode`: use :c:func:`PyCodec_Decode` +* :c:func:`!PyUnicode_AsEncodedObject`: use :c:func:`PyCodec_Encode` +* :c:func:`!PyUnicode_AsEncodedUnicode`: use :c:func:`PyCodec_Encode` +* :c:func:`PyUnicode_READY`: unneeded since Python 3.12 +* :c:func:`!PyErr_Display`: use :c:func:`PyErr_DisplayException` +* :c:func:`!_PyErr_ChainExceptions`: use ``_PyErr_ChainExceptions1`` +* :c:member:`!PyBytesObject.ob_shash` member: + call :c:func:`PyObject_Hash` instead +* :c:member:`!PyDictObject.ma_version_tag` member +* Thread Local Storage (TLS) API: + + * :c:func:`PyThread_create_key`: use :c:func:`PyThread_tss_alloc` + * :c:func:`PyThread_delete_key`: use :c:func:`PyThread_tss_free` + * :c:func:`PyThread_set_key_value`: use :c:func:`PyThread_tss_set` + * :c:func:`PyThread_get_key_value`: use :c:func:`PyThread_tss_get` + * :c:func:`PyThread_delete_key_value`: use :c:func:`PyThread_tss_delete` + * :c:func:`PyThread_ReInitTLS`: unneeded since Python 3.7 Removed ------- -* Remove the ``token.h`` header file. There was never any public tokenizer C - API. The ``token.h`` header file was only designed to be used by Python +* Remove the :file:`token.h` header file. There was never any public tokenizer C + API. The :file:`token.h` header file was only designed to be used by Python internals. (Contributed by Victor Stinner in :gh:`92651`.) -* Leagcy Unicode APIs has been removed. See :pep:`623` for detail. +* Legacy Unicode APIs have been removed. See :pep:`623` for detail. - * :c:macro:`PyUnicode_WCHAR_KIND` - * :c:func:`PyUnicode_AS_UNICODE` - * :c:func:`PyUnicode_AsUnicode` - * :c:func:`PyUnicode_AsUnicodeAndSize` - * :c:func:`PyUnicode_AS_DATA` - * :c:func:`PyUnicode_FromUnicode` - * :c:func:`PyUnicode_GET_SIZE` - * :c:func:`PyUnicode_GetSize` - * :c:func:`PyUnicode_GET_DATA_SIZE` + * :c:macro:`!PyUnicode_WCHAR_KIND` + * :c:func:`!PyUnicode_AS_UNICODE` + * :c:func:`!PyUnicode_AsUnicode` + * :c:func:`!PyUnicode_AsUnicodeAndSize` + * :c:func:`!PyUnicode_AS_DATA` + * :c:func:`!PyUnicode_FromUnicode` + * :c:func:`!PyUnicode_GET_SIZE` + * :c:func:`!PyUnicode_GetSize` + * :c:func:`!PyUnicode_GET_DATA_SIZE` -* Remove the ``PyUnicode_InternImmortal()`` function and the - ``SSTATE_INTERNED_IMMORTAL`` macro. +* Remove the ``PyUnicode_InternImmortal()`` function macro. (Contributed by Victor Stinner in :gh:`85858`.) diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst new file mode 100644 index 000000000000000..198ea3a4b57bde1 --- /dev/null +++ b/Doc/whatsnew/3.13.rst @@ -0,0 +1,1557 @@ + +**************************** + What's New In Python 3.13 +**************************** + +:Editor: TBD + +.. Rules for maintenance: + + * Anyone can add text to this document. Do not spend very much time + on the wording of your changes, because your text will probably + get rewritten to some degree. + + * The maintainer will go through Misc/NEWS periodically and add + changes; it's therefore more important to add your changes to + Misc/NEWS than to this file. + + * This is not a complete list of every single change; completeness + is the purpose of Misc/NEWS. Some changes I consider too small + or esoteric to include. If such a change is added to the text, + I'll just remove it. (This is another reason you shouldn't spend + too much time on writing your addition.) + + * If you want to draw your new text to the attention of the + maintainer, add 'XXX' to the beginning of the paragraph or + section. + + * It's OK to just add a fragmentary note about a change. For + example: "XXX Describe the transmogrify() function added to the + socket module." The maintainer will research the change and + write the necessary text. + + * You can comment out your additions if you like, but it's not + necessary (especially when a final release is some months away). + + * Credit the author of a patch or bugfix. Just the name is + sufficient; the e-mail address isn't necessary. + + * It's helpful to add the issue number as a comment: + + XXX Describe the transmogrify() function added to the socket + module. + (Contributed by P.Y. Developer in :gh:`12345`.) + + This saves the maintainer the effort of going through the VCS log + when researching a change. + +This article explains the new features in Python 3.13, compared to 3.12. + +For full details, see the :ref:`changelog `. + +.. note:: + + Prerelease users should be aware that this document is currently in draft + form. It will be updated substantially as Python 3.13 moves towards release, + so it's worth checking back even after reading earlier versions. + + +Summary -- Release highlights +============================= + +.. This section singles out the most important changes in Python 3.13. + Brevity is key. + + +.. PEP-sized items next. + +Important deprecations, removals or restrictions: + +* :ref:`PEP 594 `: The remaining 19 "dead batteries" + have been removed from the standard library: + :mod:`!aifc`, :mod:`!audioop`, :mod:`!cgi`, :mod:`!cgitb`, :mod:`!chunk`, + :mod:`!crypt`, :mod:`!imghdr`, :mod:`!mailcap`, :mod:`!msilib`, :mod:`!nis`, + :mod:`!nntplib`, :mod:`!ossaudiodev`, :mod:`!pipes`, :mod:`!sndhdr`, :mod:`!spwd`, + :mod:`!sunau`, :mod:`!telnetlib`, :mod:`!uu` and :mod:`!xdrlib`. + +* :pep:`602` ("Annual Release Cycle for Python") has been updated: + + * Python 3.9 - 3.12 have one and a half years of full support, + followed by three and a half years of security fixes. + * Python 3.13 and later have two years of full support, + followed by three years of security fixes. + + +New Features +============ + + + +Other Language Changes +====================== + +* Allow the *count* argument of :meth:`str.replace` to be a keyword. + (Contributed by Hugo van Kemenade in :gh:`106487`.) + +* Compiler now strip indents from docstrings. + This will reduce the size of :term:`bytecode cache ` (e.g. ``.pyc`` file). + For example, cache file size for ``sqlalchemy.orm.session`` in SQLAlchemy 2.0 + is reduced by about 5%. + This change will affect tools using docstrings, like :mod:`doctest`. + (Contributed by Inada Naoki in :gh:`81283`.) + +* The :func:`compile` built-in can now accept a new flag, + ``ast.PyCF_OPTIMIZED_AST``, which is similar to ``ast.PyCF_ONLY_AST`` + except that the returned ``AST`` is optimized according to the value + of the ``optimize`` argument. + (Contributed by Irit Katriel in :gh:`108113`). + +* :mod:`multiprocessing`, :mod:`concurrent.futures`, :mod:`compileall`: + Replace :func:`os.cpu_count` with :func:`os.process_cpu_count` to select the + default number of worker threads and processes. Get the CPU affinity + if supported. + (Contributed by Victor Stinner in :gh:`109649`.) + +* :func:`os.path.realpath` now resolves MS-DOS style file names even if + the file is not accessible. + (Contributed by Moonsik Park in :gh:`82367`.) + +* Fixed a bug where a :keyword:`global` declaration in an :keyword:`except` block + is rejected when the global is used in the :keyword:`else` block. + (Contributed by Irit Katriel in :gh:`111123`.) + +* Added a new environment variable :envvar:`PYTHON_FROZEN_MODULES`. It + determines whether or not frozen modules are ignored by the import machinery, + equivalent of the :option:`-X frozen_modules <-X>` command-line option. + (Contributed by Yilei Yang in :gh:`111374`.) + +New Modules +=========== + +* None yet. + + +Improved Modules +================ + +ast +--- + +* :func:`ast.parse` now accepts an optional argument ``optimize`` + which is passed on to the :func:`compile` built-in. This makes it + possible to obtain an optimized ``AST``. + (Contributed by Irit Katriel in :gh:`108113`). + +array +----- + +* Add ``'w'`` type code (``Py_UCS4``) that can be used for Unicode strings. + It can be used instead of ``'u'`` type code, which is deprecated. + (Contributed by Inada Naoki in :gh:`80480`.) + +asyncio +------- + +* :meth:`asyncio.loop.create_unix_server` will now automatically remove + the Unix socket when the server is closed. + (Contributed by Pierre Ossman in :gh:`111246`.) + +copy +---- + +* Add :func:`copy.replace` function which allows to create a modified copy of + an object, which is especially useful for immutable objects. + It supports named tuples created with the factory function + :func:`collections.namedtuple`, :class:`~dataclasses.dataclass` instances, + various :mod:`datetime` objects, :class:`~inspect.Signature` objects, + :class:`~inspect.Parameter` objects, :ref:`code object `, and + any user classes which define the :meth:`!__replace__` method. + (Contributed by Serhiy Storchaka in :gh:`108751`.) + +dis +--- + +* Change the output of :mod:`dis` module functions to show logical + labels for jump targets and exception handlers, rather than offsets. + The offsets can be added with the new ``-O`` command line option or + the ``show_offsets`` parameter. + (Contributed by Irit Katriel in :gh:`112137`.) + +dbm +--- + +* Add :meth:`dbm.gnu.gdbm.clear` and :meth:`dbm.ndbm.ndbm.clear` methods that remove all items + from the database. + (Contributed by Donghee Na in :gh:`107122`.) + +doctest +------- + +* The :meth:`doctest.DocTestRunner.run` method now counts the number of skipped + tests. Add :attr:`doctest.DocTestRunner.skips` and + :attr:`doctest.TestResults.skipped` attributes. + (Contributed by Victor Stinner in :gh:`108794`.) + +glob +---- + +* Add :func:`glob.translate` function that converts a path specification with + shell-style wildcards to a regular expression. + (Contributed by Barney Gale in :gh:`72904`.) + +io +-- + +The :class:`io.IOBase` finalizer now logs the ``close()`` method errors with +:data:`sys.unraisablehook`. Previously, errors were ignored silently by default, +and only logged in :ref:`Python Development Mode ` or on :ref:`Python +built on debug mode `. +(Contributed by Victor Stinner in :gh:`62948`.) + +ipaddress +--------- + +* Add the :attr:`ipaddress.IPv4Address.ipv6_mapped` property, which returns the IPv4-mapped IPv6 address. + (Contributed by Charles Machalow in :gh:`109466`.) + +mmap +---- + +* The :class:`mmap.mmap` class now has an :meth:`~mmap.mmap.seekable` method + that can be used where it requires a file-like object with seekable and + the :meth:`~mmap.mmap.seek` method return the new absolute position. + (Contributed by Donghee Na and Sylvie Liberman in :gh:`111835`.) + +opcode +------ + +* Move ``opcode.ENABLE_SPECIALIZATION`` to ``_opcode.ENABLE_SPECIALIZATION``. + This field was added in 3.12, it was never documented and is not intended for + external usage. (Contributed by Irit Katriel in :gh:`105481`.) + +* Removed ``opcode.is_pseudo``, ``opcode.MIN_PSEUDO_OPCODE`` and + ``opcode.MAX_PSEUDO_OPCODE``, which were added in 3.12, were never + documented or exposed through ``dis``, and were not intended to be + used externally. + +os +-- + +* Add :func:`os.process_cpu_count` function to get the number of logical CPUs + usable by the calling thread of the current process. + (Contributed by Victor Stinner in :gh:`109649`.) + +* Add a low level interface for Linux's timer notification file descriptors + via :func:`os.timerfd_create`, + :func:`os.timerfd_settime`, :func:`os.timerfd_settime_ns`, + :func:`os.timerfd_gettime`, and :func:`os.timerfd_gettime_ns`, + :const:`os.TFD_NONBLOCK`, :const:`os.TFD_CLOEXEC`, + :const:`os.TFD_TIMER_ABSTIME`, and :const:`os.TFD_TIMER_CANCEL_ON_SET` + (Contributed by Masaru Tsuchiyama in :gh:`108277`.) + +* :func:`os.cpu_count` and :func:`os.process_cpu_count` can be overridden through + the new environment variable :envvar:`PYTHON_CPU_COUNT` or the new command-line option + :option:`-X cpu_count <-X>`. This option is useful for users who need to limit + CPU resources of a container system without having to modify the container (application code). + (Contributed by Donghee Na in :gh:`109595`) + +pathlib +------- + +* Add :exc:`pathlib.UnsupportedOperation`, which is raised instead of + :exc:`NotImplementedError` when a path operation isn't supported. + (Contributed by Barney Gale in :gh:`89812`.) + +* Add :meth:`pathlib.Path.from_uri`, a new constructor to create a :class:`pathlib.Path` + object from a 'file' URI (``file:/``). + (Contributed by Barney Gale in :gh:`107465`.) + +* Add support for recursive wildcards in :meth:`pathlib.PurePath.match`. + (Contributed by Barney Gale in :gh:`73435`.) + +* Add *follow_symlinks* keyword-only argument to :meth:`pathlib.Path.glob`, + :meth:`~pathlib.Path.rglob`, :meth:`~pathlib.Path.is_file`, and + :meth:`~pathlib.Path.is_dir`. + (Contributed by Barney Gale in :gh:`77609` and :gh:`105793`.) + +pdb +--- + +* Add ability to move between chained exceptions during post mortem debugging in :func:`~pdb.pm` using + the new ``exceptions [exc_number]`` command for Pdb. (Contributed by Matthias + Bussonnier in :gh:`106676`.) + +* Expressions/Statements whose prefix is a pdb command are now correctly + identified and executed. + (Contributed by Tian Gao in :gh:`108464`.) + +* ``sys.path[0]`` will no longer be replaced by the directory of the script + being debugged when ``sys.flags.safe_path`` is set (via the :option:`-P` + command line option or :envvar:`PYTHONSAFEPATH` environment variable). + (Contributed by Tian Gao and Christian Walther in :gh:`111762`.) + +sqlite3 +------- + +* A :exc:`ResourceWarning` is now emitted if a :class:`sqlite3.Connection` + object is not :meth:`closed ` explicitly. + (Contributed by Erlend E. Aasland in :gh:`105539`.) + +tkinter +------- + +* Add :mod:`tkinter` widget methods: + :meth:`!tk_busy_hold`, :meth:`!tk_busy_configure`, + :meth:`!tk_busy_cget`, :meth:`!tk_busy_forget`, + :meth:`!tk_busy_current`, and :meth:`!tk_busy_status`. + (Contributed by Miguel, klappnase and Serhiy Storchaka in :gh:`72684`.) + +* Add support of the "vsapi" element type in + the :meth:`~tkinter.ttk.Style.element_create` method of + :class:`tkinter.ttk.Style`. + (Contributed by Serhiy Storchaka in :gh:`68166`.) + +traceback +--------- + +* Add *show_group* parameter to :func:`traceback.TracebackException.format_exception_only` + to format the nested exceptions of a :exc:`BaseExceptionGroup` instance, recursively. + (Contributed by Irit Katriel in :gh:`105292`.) + +* Add the field *exc_type_str* to :class:`~traceback.TracebackException`, which + holds a string display of the *exc_type*. Deprecate the field *exc_type* + which holds the type object itself. Add parameter *save_exc_type* (default + ``True``) to indicate whether ``exc_type`` should be saved. + (Contributed by Irit Katriel in :gh:`112332`.) + +typing +------ + +* Add :func:`typing.get_protocol_members` to return the set of members + defining a :class:`typing.Protocol`. Add :func:`typing.is_protocol` to + check whether a class is a :class:`typing.Protocol`. (Contributed by Jelle Zijlstra in + :gh:`104873`.) + +unicodedata +----------- + +* The Unicode database has been updated to version 15.1.0. (Contributed by + James Gerity in :gh:`109559`.) + +venv +---- + +* Add support for adding source control management (SCM) ignore files to a + virtual environment's directory. By default, Git is supported. This is + implemented as opt-in via the API which can be extended to support other SCMs + (:class:`venv.EnvBuilder` and :func:`venv.create`), and opt-out via the CLI + (using ``--without-scm-ignore-files``). (Contributed by Brett Cannon in + :gh:`108125`.) + +Optimizations +============= + +* :func:`textwrap.indent` is now ~30% faster than before for large input. + (Contributed by Inada Naoki in :gh:`107369`.) + + +Deprecated +========== + +* :mod:`array`: :mod:`array`'s ``'u'`` format code, deprecated in docs since Python 3.3, + emits :exc:`DeprecationWarning` since 3.13 + and will be removed in Python 3.16. + Use the ``'w'`` format code instead. + (contributed by Hugo van Kemenade in :gh:`80480`) + +* :mod:`ctypes`: Deprecate undocumented :func:`!ctypes.SetPointerType` + and :func:`!ctypes.ARRAY` functions. + Replace ``ctypes.ARRAY(item_type, size)`` with ``item_type * size``. + (Contributed by Victor Stinner in :gh:`105733`.) + +* :mod:`getopt` and :mod:`optparse` modules: They are now + :term:`soft deprecated`: the :mod:`argparse` should be used for new projects. + Previously, the :mod:`optparse` module was already deprecated, its removal + was not scheduled, and no warnings was emitted: so there is no change in + practice. + (Contributed by Victor Stinner in :gh:`106535`.) + +* :mod:`http.server`: :class:`http.server.CGIHTTPRequestHandler` now emits a + :exc:`DeprecationWarning` as it will be removed in 3.15. Process based CGI + http servers have been out of favor for a very long time. This code was + outdated, unmaintained, and rarely used. It has a high potential for both + security and functionality bugs. This includes removal of the ``--cgi`` + flag to the ``python -m http.server`` command line in 3.15. + +* :mod:`traceback`: + + * The field *exc_type* of :class:`traceback.TracebackException` is + deprecated. Use *exc_type_str* instead. + +* :mod:`typing`: + + * Creating a :class:`typing.NamedTuple` class using keyword arguments to denote + the fields (``NT = NamedTuple("NT", x=int, y=int)``) is deprecated, and will + be disallowed in Python 3.15. Use the class-based syntax or the functional + syntax instead. (Contributed by Alex Waygood in :gh:`105566`.) + + * When using the functional syntax to create a :class:`typing.NamedTuple` + class or a :class:`typing.TypedDict` class, failing to pass a value to the + 'fields' parameter (``NT = NamedTuple("NT")`` or ``TD = TypedDict("TD")``) is + deprecated. Passing ``None`` to the 'fields' parameter + (``NT = NamedTuple("NT", None)`` or ``TD = TypedDict("TD", None)``) is also + deprecated. Both will be disallowed in Python 3.15. To create a NamedTuple + class with 0 fields, use ``class NT(NamedTuple): pass`` or + ``NT = NamedTuple("NT", [])``. To create a TypedDict class with 0 fields, use + ``class TD(TypedDict): pass`` or ``TD = TypedDict("TD", {})``. + (Contributed by Alex Waygood in :gh:`105566` and :gh:`105570`.) + + * :func:`typing.no_type_check_decorator` is deprecated, and scheduled for + removal in Python 3.15. After eight years in the :mod:`typing` module, it + has yet to be supported by any major type checkers. + (Contributed by Alex Waygood in :gh:`106309`.) + + * :data:`typing.AnyStr` is deprecated. In Python 3.16, it will be removed from + ``typing.__all__``, and a :exc:`DeprecationWarning` will be emitted when it + is imported or accessed. It will be removed entirely in Python 3.18. Use + the new :ref:`type parameter syntax ` instead. + (Contributed by Michael The in :gh:`107116`.) + +* :mod:`wave`: Deprecate the ``getmark()``, ``setmark()`` and ``getmarkers()`` + methods of the :class:`wave.Wave_read` and :class:`wave.Wave_write` classes. + They will be removed in Python 3.15. + (Contributed by Victor Stinner in :gh:`105096`.) + +* Passing more than one positional argument to :func:`sqlite3.connect` and the + :class:`sqlite3.Connection` constructor is deprecated. The remaining + parameters will become keyword-only in Python 3.15. + + Deprecate passing name, number of arguments, and the callable as keyword + arguments, for the following :class:`sqlite3.Connection` APIs: + + * :meth:`~sqlite3.Connection.create_function` + * :meth:`~sqlite3.Connection.create_aggregate` + + Deprecate passing the callback callable by keyword for the following + :class:`sqlite3.Connection` APIs: + + * :meth:`~sqlite3.Connection.set_authorizer` + * :meth:`~sqlite3.Connection.set_progress_handler` + * :meth:`~sqlite3.Connection.set_trace_callback` + + The affected parameters will become positional-only in Python 3.15. + + (Contributed by Erlend E. Aasland in :gh:`107948` and :gh:`108278`.) + +* The ``dis.HAVE_ARGUMENT`` separator is deprecated. Check membership + in :data:`~dis.hasarg` instead. + (Contributed by Irit Katriel in :gh:`109319`.) + +* Deprecate non-standard format specifier "N" for :class:`decimal.Decimal`. + It was not documented and only supported in the C implementation. + (Contributed by Serhiy Storchaka in :gh:`89902`.) + +* Emit deprecation warning for non-integer numbers in :mod:`gettext` functions + and methods that consider plural forms even if the translation was not found. + (Contributed by Serhiy Storchaka in :gh:`88434`.) + +* Calling :meth:`frame.clear` on a suspended frame raises :exc:`RuntimeError` + (as has always been the case for an executing frame). + (Contributed by Irit Katriel in :gh:`79932`.) + +* Assignment to a function's ``__code__`` attribute where the new code + object's type does not match the function's type, is deprecated. The + different types are: plain function, generator, async generator and + coroutine. + (Contributed by Irit Katriel in :gh:`81137`.) + + +Pending Removal in Python 3.14 +------------------------------ + +* :mod:`argparse`: The *type*, *choices*, and *metavar* parameters + of :class:`!argparse.BooleanOptionalAction` are deprecated + and will be removed in 3.14. + (Contributed by Nikita Sobolev in :gh:`92248`.) + +* :mod:`ast`: The following features have been deprecated in documentation + since Python 3.8, now cause a :exc:`DeprecationWarning` to be emitted at + runtime when they are accessed or used, and will be removed in Python 3.14: + + * :class:`!ast.Num` + * :class:`!ast.Str` + * :class:`!ast.Bytes` + * :class:`!ast.NameConstant` + * :class:`!ast.Ellipsis` + + Use :class:`ast.Constant` instead. + (Contributed by Serhiy Storchaka in :gh:`90953`.) + +* :mod:`collections.abc`: Deprecated :class:`~collections.abc.ByteString`. + Prefer :class:`!Sequence` or :class:`~collections.abc.Buffer`. + For use in typing, prefer a union, like ``bytes | bytearray``, + or :class:`collections.abc.Buffer`. + (Contributed by Shantanu Jain in :gh:`91896`.) + +* :mod:`email`: Deprecated the *isdst* parameter in :func:`email.utils.localtime`. + (Contributed by Alan Williams in :gh:`72346`.) + +* :mod:`importlib`: ``__package__`` and ``__cached__`` will cease to be set or + taken into consideration by the import system (:gh:`97879`). + +* :mod:`importlib.abc` deprecated classes: + + * :class:`!importlib.abc.ResourceReader` + * :class:`!importlib.abc.Traversable` + * :class:`!importlib.abc.TraversableResources` + + Use :mod:`importlib.resources.abc` classes instead: + + * :class:`importlib.resources.abc.Traversable` + * :class:`importlib.resources.abc.TraversableResources` + + (Contributed by Jason R. Coombs and Hugo van Kemenade in :gh:`93963`.) + +* :mod:`itertools` had undocumented, inefficient, historically buggy, + and inconsistent support for copy, deepcopy, and pickle operations. + This will be removed in 3.14 for a significant reduction in code + volume and maintenance burden. + (Contributed by Raymond Hettinger in :gh:`101588`.) + +* :mod:`multiprocessing`: The default start method will change to a safer one on + Linux, BSDs, and other non-macOS POSIX platforms where ``'fork'`` is currently + the default (:gh:`84559`). Adding a runtime warning about this was deemed too + disruptive as the majority of code is not expected to care. Use the + :func:`~multiprocessing.get_context` or + :func:`~multiprocessing.set_start_method` APIs to explicitly specify when + your code *requires* ``'fork'``. See :ref:`multiprocessing-start-methods`. + +* :mod:`pathlib`: :meth:`~pathlib.PurePath.is_relative_to`, + :meth:`~pathlib.PurePath.relative_to`: passing additional arguments is + deprecated. + +* :func:`pkgutil.find_loader` and :func:`pkgutil.get_loader` + now raise :exc:`DeprecationWarning`; + use :func:`importlib.util.find_spec` instead. + (Contributed by Nikita Sobolev in :gh:`97850`.) + +* :mod:`pty`: + + * ``master_open()``: use :func:`pty.openpty`. + * ``slave_open()``: use :func:`pty.openpty`. + +* :func:`shutil.rmtree` *onerror* parameter is deprecated in 3.12, + and will be removed in 3.14: use the *onexc* parameter instead. + +* :mod:`sqlite3`: + + * :data:`~sqlite3.version` and :data:`~sqlite3.version_info`. + + * :meth:`~sqlite3.Cursor.execute` and :meth:`~sqlite3.Cursor.executemany` + if :ref:`named placeholders ` are used and + *parameters* is a sequence instead of a :class:`dict`. + + * date and datetime adapter, date and timestamp converter: + see the :mod:`sqlite3` documentation for suggested replacement recipes. + +* :class:`types.CodeType`: Accessing ``co_lnotab`` was deprecated in :pep:`626` + since 3.10 and was planned to be removed in 3.12, + but it only got a proper :exc:`DeprecationWarning` in 3.12. + May be removed in 3.14. + (Contributed by Nikita Sobolev in :gh:`101866`.) + +* :mod:`typing`: :class:`~typing.ByteString`, deprecated since Python 3.9, + now causes a :exc:`DeprecationWarning` to be emitted when it is used. + +* :class:`!urllib.parse.Quoter` is deprecated: it was not intended to be a + public API. + (Contributed by Gregory P. Smith in :gh:`88168`.) + +* :mod:`xml.etree.ElementTree`: Testing the truth value of an + :class:`~xml.etree.ElementTree.Element` is deprecated and will raise an + exception in Python 3.14. + +Pending Removal in Python 3.15 +------------------------------ + +* :class:`http.server.CGIHTTPRequestHandler` will be removed along with its + related ``--cgi`` flag to ``python -m http.server``. It was obsolete and + rarely used. No direct replacement exists. *Anything* is better than CGI + to interface a web server with a request handler. + +* :class:`locale`: :func:`locale.getdefaultlocale` was deprecated in Python 3.11 + and originally planned for removal in Python 3.13 (:gh:`90817`), + but removal has been postponed to Python 3.15. + Use :func:`locale.setlocale()`, :func:`locale.getencoding()` and + :func:`locale.getlocale()` instead. + (Contributed by Hugo van Kemenade in :gh:`111187`.) + +* :class:`typing.NamedTuple`: + + * The undocumented keyword argument syntax for creating NamedTuple classes + (``NT = NamedTuple("NT", x=int)``) is deprecated, and will be disallowed in + 3.15. Use the class-based syntax or the functional syntax instead. + + * When using the functional syntax to create a NamedTuple class, failing to + pass a value to the 'fields' parameter (``NT = NamedTuple("NT")``) is + deprecated. Passing ``None`` to the 'fields' parameter + (``NT = NamedTuple("NT", None)``) is also deprecated. Both will be + disallowed in Python 3.15. To create a NamedTuple class with 0 fields, use + ``class NT(NamedTuple): pass`` or ``NT = NamedTuple("NT", [])``. + +* :class:`typing.TypedDict`: When using the functional syntax to create a + TypedDict class, failing to pass a value to the 'fields' parameter (``TD = + TypedDict("TD")``) is deprecated. Passing ``None`` to the 'fields' parameter + (``TD = TypedDict("TD", None)``) is also deprecated. Both will be disallowed + in Python 3.15. To create a TypedDict class with 0 fields, use ``class + TD(TypedDict): pass`` or ``TD = TypedDict("TD", {})``. + +* :mod:`wave`: Deprecate the ``getmark()``, ``setmark()`` and ``getmarkers()`` + methods of the :class:`wave.Wave_read` and :class:`wave.Wave_write` classes. + They will be removed in Python 3.15. + (Contributed by Victor Stinner in :gh:`105096`.) + +* Passing any arguments to :func:`threading.RLock` is now deprecated. + C version allows any numbers of args and kwargs, + but they are just ignored. Python version does not allow any arguments. + All arguments will be removed from :func:`threading.RLock` in Python 3.15. + (Contributed by Nikita Sobolev in :gh:`102029`.) + +Pending Removal in Python 3.16 +------------------------------ + +* :class:`array.array` ``'u'`` type (:c:type:`wchar_t`): + use the ``'w'`` type instead (``Py_UCS4``). + +Pending Removal in Future Versions +---------------------------------- + +The following APIs were deprecated in earlier Python versions and will be removed, +although there is currently no date scheduled for their removal. + +* :mod:`argparse`: Nesting argument groups and nesting mutually exclusive + groups are deprecated. + +* :mod:`builtins`: + + * ``~bool``, bitwise inversion on bool. + * ``bool(NotImplemented)``. + * Generators: ``throw(type, exc, tb)`` and ``athrow(type, exc, tb)`` + signature is deprecated: use ``throw(exc)`` and ``athrow(exc)`` instead, + the single argument signature. + * Currently Python accepts numeric literals immediately followed by keywords, + for example ``0in x``, ``1or x``, ``0if 1else 2``. It allows confusing and + ambiguous expressions like ``[0x1for x in y]`` (which can be interpreted as + ``[0x1 for x in y]`` or ``[0x1f or x in y]``). A syntax warning is raised + if the numeric literal is immediately followed by one of keywords + :keyword:`and`, :keyword:`else`, :keyword:`for`, :keyword:`if`, + :keyword:`in`, :keyword:`is` and :keyword:`or`. In a future release it + will be changed to a syntax error. (:gh:`87999`) + * Support for ``__index__()`` and ``__int__()`` method returning non-int type: + these methods will be required to return an instance of a strict subclass of + :class:`int`. + * Support for ``__float__()`` method returning a strict subclass of + :class:`float`: these methods will be required to return an instance of + :class:`float`. + * Support for ``__complex__()`` method returning a strict subclass of + :class:`complex`: these methods will be required to return an instance of + :class:`complex`. + * Delegation of ``int()`` to ``__trunc__()`` method. + +* :mod:`calendar`: ``calendar.January`` and ``calendar.February`` constants are + deprecated and replaced by :data:`calendar.JANUARY` and + :data:`calendar.FEBRUARY`. + (Contributed by Prince Roshan in :gh:`103636`.) + +* :mod:`datetime`: + + * :meth:`~datetime.datetime.utcnow`: + use ``datetime.datetime.now(tz=datetime.UTC)``. + * :meth:`~datetime.datetime.utcfromtimestamp`: + use ``datetime.datetime.fromtimestamp(timestamp, tz=datetime.UTC)``. + +* :mod:`gettext`: Plural value must be an integer. + +* :mod:`importlib`: + + * ``load_module()`` method: use ``exec_module()`` instead. + * :func:`~importlib.util.cache_from_source` *debug_override* parameter is + deprecated: use the *optimization* parameter instead. + +* :mod:`importlib.metadata`: + + * ``EntryPoints`` tuple interface. + * Implicit ``None`` on return values. + +* :mod:`mailbox`: Use of StringIO input and text mode is deprecated, use + BytesIO and binary mode instead. + +* :mod:`os`: Calling :func:`os.register_at_fork` in multi-threaded process. + +* :class:`!pydoc.ErrorDuringImport`: A tuple value for *exc_info* parameter is + deprecated, use an exception instance. + +* :mod:`re`: More strict rules are now applied for numerical group references + and group names in regular expressions. Only sequence of ASCII digits is now + accepted as a numerical reference. The group name in bytes patterns and + replacement strings can now only contain ASCII letters and digits and + underscore. + (Contributed by Serhiy Storchaka in :gh:`91760`.) + +* :mod:`ssl` options and protocols: + + * :class:`ssl.SSLContext` without protocol argument is deprecated. + * :class:`ssl.SSLContext`: :meth:`~ssl.SSLContext.set_npn_protocols` and + :meth:`!~ssl.SSLContext.selected_npn_protocol` are deprecated: use ALPN + instead. + * ``ssl.OP_NO_SSL*`` options + * ``ssl.OP_NO_TLS*`` options + * ``ssl.PROTOCOL_SSLv3`` + * ``ssl.PROTOCOL_TLS`` + * ``ssl.PROTOCOL_TLSv1`` + * ``ssl.PROTOCOL_TLSv1_1`` + * ``ssl.PROTOCOL_TLSv1_2`` + * ``ssl.TLSVersion.SSLv3`` + * ``ssl.TLSVersion.TLSv1`` + * ``ssl.TLSVersion.TLSv1_1`` + +* :mod:`!sre_compile`, :mod:`!sre_constants` and :mod:`!sre_parse` modules. + +* ``types.CodeType.co_lnotab``: use the ``co_lines`` attribute instead. + +* :class:`typing.Text` (:gh:`92332`). + +* :func:`sysconfig.is_python_build` *check_home* parameter is deprecated and + ignored. + +* :mod:`threading` methods: + + * :meth:`!threading.Condition.notifyAll`: use :meth:`~threading.Condition.notify_all`. + * :meth:`!threading.Event.isSet`: use :meth:`~threading.Event.is_set`. + * :meth:`!threading.Thread.isDaemon`, :meth:`threading.Thread.setDaemon`: + use :attr:`threading.Thread.daemon` attribute. + * :meth:`!threading.Thread.getName`, :meth:`threading.Thread.setName`: + use :attr:`threading.Thread.name` attribute. + * :meth:`!threading.currentThread`: use :meth:`threading.current_thread`. + * :meth:`!threading.activeCount`: use :meth:`threading.active_count`. + +* :class:`unittest.IsolatedAsyncioTestCase`: it is deprecated to return a value + that is not None from a test case. + +* :mod:`urllib.request`: :class:`~urllib.request.URLopener` and + :class:`~urllib.request.FancyURLopener` style of invoking requests is + deprecated. Use newer :func:`~urllib.request.urlopen` functions and methods. + +* :func:`!urllib.parse.to_bytes`. + +* :mod:`urllib.parse` deprecated functions: :func:`~urllib.parse.urlparse` instead + + * ``splitattr()`` + * ``splithost()`` + * ``splitnport()`` + * ``splitpasswd()`` + * ``splitport()`` + * ``splitquery()`` + * ``splittag()`` + * ``splittype()`` + * ``splituser()`` + * ``splitvalue()`` + +* :mod:`wsgiref`: ``SimpleHandler.stdout.write()`` should not do partial + writes. + +* :meth:`zipimport.zipimporter.load_module` is deprecated: + use :meth:`~zipimport.zipimporter.exec_module` instead. + + +Removed +======= + +.. _whatsnew313-pep594: + +PEP 594: dead batteries +----------------------- + +* :pep:`594` removed 19 modules from the standard library, + deprecated in Python 3.11: + + * :mod:`!aifc`. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!audioop`. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!chunk`. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!cgi` and :mod:`!cgitb`. + + * ``cgi.FieldStorage`` can typically be replaced with + :func:`urllib.parse.parse_qsl` for ``GET`` and ``HEAD`` requests, + and the :mod:`email.message` module or `multipart + `__ PyPI project for ``POST`` and + ``PUT``. + + * ``cgi.parse()`` can be replaced by calling :func:`urllib.parse.parse_qs` + directly on the desired query string, except for ``multipart/form-data`` + input, which can be handled as described for ``cgi.parse_multipart()``. + + * ``cgi.parse_header()`` can be replaced with the functionality in the + :mod:`email` package, which implements the same MIME RFCs. For example, + with :class:`email.message.EmailMessage`:: + + from email.message import EmailMessage + msg = EmailMessage() + msg['content-type'] = 'application/json; charset="utf8"' + main, params = msg.get_content_type(), msg['content-type'].params + + * ``cgi.parse_multipart()`` can be replaced with the functionality in the + :mod:`email` package (e.g. :class:`email.message.EmailMessage` and + :class:`email.message.Message`) which implements the same MIME RFCs, or + with the `multipart `__ PyPI project. + + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!crypt` module and its private :mod:`!_crypt` extension. + The :mod:`hashlib` module is a potential replacement for certain use cases. + Otherwise, the following PyPI projects can be used: + + * `bcrypt `_: + Modern password hashing for your software and your servers. + * `passlib `_: + Comprehensive password hashing framework supporting over 30 schemes. + * `argon2-cffi `_: + The secure Argon2 password hashing algorithm. + * `legacycrypt `_: + Wrapper to the POSIX crypt library call and associated functionality. + + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!imghdr`: use the projects + `filetype `_, + `puremagic `_, + or `python-magic `_ instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!mailcap`. + The :mod:`mimetypes` module provides an alternative. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!msilib`. + (Contributed by Zachary Ware in :gh:`104773`.) + + * :mod:`!nis`. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!nntplib`: + the `PyPI nntplib project `_ + can be used instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!ossaudiodev`: use the + `pygame project `_ for audio playback. + (Contributed by Victor Stinner in :gh:`104780`.) + + * :mod:`!pipes`: use the :mod:`subprocess` module instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!sndhdr`: use the projects + `filetype `_, + `puremagic `_, or + `python-magic `_ instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!spwd`: + the `python-pam project `_ + can be used instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!sunau`. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!telnetlib`, use the projects + `telnetlib3 `_ or + `Exscript `_ instead. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!uu`: the :mod:`base64` module is a modern alternative. + (Contributed by Victor Stinner in :gh:`104773`.) + + * :mod:`!xdrlib`. + (Contributed by Victor Stinner in :gh:`104773`.) + +2to3 +---- + +* Remove the ``2to3`` program and the :mod:`!lib2to3` module, + deprecated in Python 3.11. + (Contributed by Victor Stinner in :gh:`104780`.) + +configparser +------------ + +* Remove the undocumented :class:`!configparser.LegacyInterpolation` class, + deprecated in the docstring since Python 3.2, + and with a deprecation warning since Python 3.11. + (Contributed by Hugo van Kemenade in :gh:`104886`.) + +importlib +--------- + +* Remove :mod:`importlib.resources` deprecated methods: + + * ``contents()`` + * ``is_resource()`` + * ``open_binary()`` + * ``open_text()`` + * ``path()`` + * ``read_binary()`` + * ``read_text()`` + + Use :func:`importlib.resources.files()` instead. Refer to `importlib-resources: Migrating from Legacy + `_ + for migration advice. + (Contributed by Jason R. Coombs in :gh:`106532`.) + +locale +------ + +* Remove ``locale.resetlocale()`` function deprecated in Python 3.11: + use ``locale.setlocale(locale.LC_ALL, "")`` instead. + (Contributed by Victor Stinner in :gh:`104783`.) + +logging +------- + +* :mod:`logging`: Remove undocumented and untested ``Logger.warn()`` and + ``LoggerAdapter.warn()`` methods and ``logging.warn()`` function. Deprecated + since Python 3.3, they were aliases to the :meth:`logging.Logger.warning` + method, :meth:`!logging.LoggerAdapter.warning` method and + :func:`logging.warning` function. + (Contributed by Victor Stinner in :gh:`105376`.) + +pathlib +------- + +* Remove support for using :class:`pathlib.Path` objects as context managers. + This functionality was deprecated and made a no-op in Python 3.9. + +re +-- + +* Remove undocumented, never working, and deprecated ``re.template`` function + and ``re.TEMPLATE`` flag (and ``re.T`` alias). + (Contributed by Serhiy Storchaka and Nikita Sobolev in :gh:`105687`.) + +tkinter +------- + +* Remove the :mod:`!tkinter.tix` module, deprecated in Python 3.6. The + third-party Tix library which the module wrapped is unmaintained. + (Contributed by Zachary Ware in :gh:`75552`.) + +turtle +------ + +* Remove the :meth:`!turtle.RawTurtle.settiltangle` method, + deprecated in docs since Python 3.1 + and with a deprecation warning since Python 3.11. + (Contributed by Hugo van Kemenade in :gh:`104876`.) + +typing +------ + +* Namespaces ``typing.io`` and ``typing.re``, deprecated in Python 3.8, + are now removed. The items in those namespaces can be imported directly + from :mod:`typing`. (Contributed by Sebastian Rittau in :gh:`92871`.) + +* Remove support for the keyword-argument method of creating + :class:`typing.TypedDict` types, deprecated in Python 3.11. + (Contributed by Tomas Roun in :gh:`104786`.) + +unittest +-------- + +* Removed the following :mod:`unittest` functions, deprecated in Python 3.11: + + * :func:`!unittest.findTestCases` + * :func:`!unittest.makeSuite` + * :func:`!unittest.getTestCaseNames` + + Use :class:`~unittest.TestLoader` methods instead: + + * :meth:`unittest.TestLoader.loadTestsFromModule` + * :meth:`unittest.TestLoader.loadTestsFromTestCase` + * :meth:`unittest.TestLoader.getTestCaseNames` + + (Contributed by Hugo van Kemenade in :gh:`104835`.) + +* Remove the untested and undocumented :meth:`!unittest.TestProgram.usageExit` + method, deprecated in Python 3.11. + (Contributed by Hugo van Kemenade in :gh:`104992`.) + +urllib +------ + +* Remove *cafile*, *capath* and *cadefault* parameters of the + :func:`urllib.request.urlopen` function, deprecated in Python 3.6: use the + *context* parameter instead. Please use + :meth:`ssl.SSLContext.load_cert_chain` instead, or let + :func:`ssl.create_default_context` select the system's trusted CA + certificates for you. + (Contributed by Victor Stinner in :gh:`105382`.) + +webbrowser +---------- + +* Remove the untested and undocumented :mod:`webbrowser` :class:`!MacOSX` class, + deprecated in Python 3.11. + Use the :class:`!MacOSXOSAScript` class (introduced in Python 3.2) instead. + (Contributed by Hugo van Kemenade in :gh:`104804`.) + +* Remove deprecated ``webbrowser.MacOSXOSAScript._name`` attribute. + Use :attr:`webbrowser.MacOSXOSAScript.name ` + attribute instead. + (Contributed by Nikita Sobolev in :gh:`105546`.) + +Others +------ + +* None yet + +CPython bytecode changes +======================== + +* The oparg of ``YIELD_VALUE`` is now ``1`` if the yield is part of a + yield-from or await, and ``0`` otherwise. The oparg of ``RESUME`` was + changed to add a bit indicating whether the except-depth is 1, which + is needed to optimize closing of generators. + (Contributed by Irit Katriel in :gh:`111354`.) + +Porting to Python 3.13 +====================== + +This section lists previously described changes and other bugfixes +that may require changes to your code. + +Changes in the Python API +------------------------- + +* :meth:`!tkinter.Text.count` now always returns an integer if one or less + counting options are specified. + Previously it could return a single count as a 1-tuple, an integer (only if + option ``"update"`` was specified) or ``None`` if no items found. + The result is now the same if ``wantobjects`` is set to ``0``. + (Contributed by Serhiy Storchaka in :gh:`97928`.) + +* Functions :c:func:`PyDict_GetItem`, :c:func:`PyDict_GetItemString`, + :c:func:`PyMapping_HasKey`, :c:func:`PyMapping_HasKeyString`, + :c:func:`PyObject_HasAttr`, :c:func:`PyObject_HasAttrString`, and + :c:func:`PySys_GetObject`, which clear all errors occurred during calling + the function, report now them using :func:`sys.unraisablehook`. + You can consider to replace these functions with other functions as + recomended in the documentation. + (Contributed by Serhiy Storchaka in :gh:`106672`.) + +* An :exc:`OSError` is now raised by :func:`getpass.getuser` for any failure to + retrieve a username, instead of :exc:`ImportError` on non-Unix platforms or + :exc:`KeyError` on Unix platforms where the password database is empty. + + +Build Changes +============= + +* Autoconf 2.71 and aclocal 1.16.4 is now required to regenerate + the :file:`configure` script. + (Contributed by Christian Heimes in :gh:`89886`.) + +* SQLite 3.15.2 or newer is required to build the :mod:`sqlite3` extension module. + (Contributed by Erlend Aasland in :gh:`105875`.) + +* Python built with :file:`configure` :option:`--with-trace-refs` (tracing + references) is now ABI compatible with Python release build and + :ref:`debug build `. + (Contributed by Victor Stinner in :gh:`108634`.) + +* Building CPython now requires a compiler with support for the C11 atomic + library, GCC built-in atomic functions, or MSVC interlocked intrinsics. + +* The ``errno``, ``md5``, ``resource``, ``winsound``, ``_ctypes_test``, + ``_multiprocessing.posixshmem``, ``_scproxy``, ``_stat``, + ``_testimportmultiple`` and ``_uuid`` C extensions are now built with the + :ref:`limited C API `. + (Contributed by Victor Stinner in :gh:`85283`.) + + +C API Changes +============= + +New Features +------------ + +* You no longer have to define the ``PY_SSIZE_T_CLEAN`` macro before including + :file:`Python.h` when using ``#`` formats in + :ref:`format codes `. + APIs accepting the format codes always use ``Py_ssize_t`` for ``#`` formats. + (Contributed by Inada Naoki in :gh:`104922`.) + +* Add :c:func:`PyImport_AddModuleRef`: similar to + :c:func:`PyImport_AddModule`, but return a :term:`strong reference` instead + of a :term:`borrowed reference`. + (Contributed by Victor Stinner in :gh:`105922`.) + +* Add :c:func:`PyWeakref_GetRef` function: similar to + :c:func:`PyWeakref_GetObject` but returns a :term:`strong reference`, or + ``NULL`` if the referent is no longer live. + (Contributed by Victor Stinner in :gh:`105927`.) + +* Add :c:func:`PyObject_GetOptionalAttr` and + :c:func:`PyObject_GetOptionalAttrString`, variants of + :c:func:`PyObject_GetAttr` and :c:func:`PyObject_GetAttrString` which + don't raise :exc:`AttributeError` if the attribute is not found. + These variants are more convenient and faster if the missing attribute + should not be treated as a failure. + (Contributed by Serhiy Storchaka in :gh:`106521`.) + +* Add :c:func:`PyMapping_GetOptionalItem` and + :c:func:`PyMapping_GetOptionalItemString`: variants of + :c:func:`PyObject_GetItem` and :c:func:`PyMapping_GetItemString` which don't + raise :exc:`KeyError` if the key is not found. + These variants are more convenient and faster if the missing key should not + be treated as a failure. + (Contributed by Serhiy Storchaka in :gh:`106307`.) + +* Add fixed variants of functions which silently ignore errors: + + - :c:func:`PyObject_HasAttrWithError` replaces :c:func:`PyObject_HasAttr`. + - :c:func:`PyObject_HasAttrStringWithError` replaces :c:func:`PyObject_HasAttrString`. + - :c:func:`PyMapping_HasKeyWithError` replaces :c:func:`PyMapping_HasKey`. + - :c:func:`PyMapping_HasKeyStringWithError` replaces :c:func:`PyMapping_HasKeyString`. + + New functions return not only ``1`` for true and ``0`` for false, but also + ``-1`` for error. + + (Contributed by Serhiy Storchaka in :gh:`108511`.) + +* If Python is built in :ref:`debug mode ` or :option:`with + assertions <--with-assertions>`, :c:func:`PyTuple_SET_ITEM` and + :c:func:`PyList_SET_ITEM` now check the index argument with an assertion. + (Contributed by Victor Stinner in :gh:`106168`.) + +* Add :c:func:`PyModule_Add` function: similar to + :c:func:`PyModule_AddObjectRef` and :c:func:`PyModule_AddObject` but + always steals a reference to the value. + (Contributed by Serhiy Storchaka in :gh:`86493`.) + +* Added :c:func:`PyDict_GetItemRef` and :c:func:`PyDict_GetItemStringRef` + functions: similar to :c:func:`PyDict_GetItemWithError` but returning a + :term:`strong reference` instead of a :term:`borrowed reference`. Moreover, + these functions return -1 on error and so checking ``PyErr_Occurred()`` is + not needed. + (Contributed by Victor Stinner in :gh:`106004`.) + +* Added :c:func:`PyDict_ContainsString` function: same as + :c:func:`PyDict_Contains`, but *key* is specified as a :c:expr:`const char*` + UTF-8 encoded bytes string, rather than a :c:expr:`PyObject*`. + (Contributed by Victor Stinner in :gh:`108314`.) + +* Add :c:func:`Py_IsFinalizing` function: check if the main Python interpreter is + :term:`shutting down `. + (Contributed by Victor Stinner in :gh:`108014`.) + +* Add :c:func:`PyLong_AsInt` function: similar to :c:func:`PyLong_AsLong`, but + store the result in a C :c:expr:`int` instead of a C :c:expr:`long`. + Previously, it was known as the private function :c:func:`!_PyLong_AsInt` + (with an underscore prefix). + (Contributed by Victor Stinner in :gh:`108014`.) + +* Python built with :file:`configure` :option:`--with-trace-refs` (tracing + references) now supports the :ref:`Limited API `. + (Contributed by Victor Stinner in :gh:`108634`.) + +* Add :c:func:`PyObject_VisitManagedDict` and + :c:func:`PyObject_ClearManagedDict` functions which must be called by the + traverse and clear functions of a type using + :c:macro:`Py_TPFLAGS_MANAGED_DICT` flag. The `pythoncapi-compat project + `__ can be used to get these + functions on Python 3.11 and 3.12. + (Contributed by Victor Stinner in :gh:`107073`.) + +* Add :c:func:`PyUnicode_EqualToUTF8AndSize` and :c:func:`PyUnicode_EqualToUTF8` + functions: compare Unicode object with a :c:expr:`const char*` UTF-8 encoded + string and return true (``1``) if they are equal, or false (``0``) otherwise. + These functions do not raise exceptions. + (Contributed by Serhiy Storchaka in :gh:`110289`.) + +* Add :c:func:`PyThreadState_GetUnchecked()` function: similar to + :c:func:`PyThreadState_Get()`, but don't kill the process with a fatal error + if it is NULL. The caller is responsible to check if the result is NULL. + Previously, the function was private and known as + ``_PyThreadState_UncheckedGet()``. + (Contributed by Victor Stinner in :gh:`108867`.) + +* Add :c:func:`PySys_AuditTuple` function: similar to :c:func:`PySys_Audit`, + but pass event arguments as a Python :class:`tuple` object. + (Contributed by Victor Stinner in :gh:`85283`.) + +* :c:func:`PyArg_ParseTupleAndKeywords` now supports non-ASCII keyword + parameter names. + (Contributed by Serhiy Storchaka in :gh:`110815`.) + +* Add :c:func:`PyMem_RawMalloc`, :c:func:`PyMem_RawCalloc`, + :c:func:`PyMem_RawRealloc` and :c:func:`PyMem_RawFree` to the limited C API + (version 3.13). + (Contributed by Victor Stinner in :gh:`85283`.) + +* Add :c:func:`PySys_Audit` and :c:func:`PySys_AuditTuple` functions to the + limited C API. + (Contributed by Victor Stinner in :gh:`85283`.) + +* Add :c:func:`PyErr_FormatUnraisable` function: similar to + :c:func:`PyErr_WriteUnraisable`, but allow to customize the warning mesage. + (Contributed by Serhiy Storchaka in :gh:`108082`.) + +* Add :c:func:`PyList_Extend` and :c:func:`PyList_Clear` functions: similar to + Python ``list.extend()`` and ``list.clear()`` methods. + (Contributed by Victor Stinner in :gh:`111138`.) + +* Add :c:func:`PyDict_Pop` and :c:func:`PyDict_PopString` functions: remove a + key from a dictionary and optionally return the removed value. This is + similar to :meth:`dict.pop`, but without the default value and not raising + :exc:`KeyError` if the key missing. + (Contributed by Stefan Behnel and Victor Stinner in :gh:`111262`.) + + +Porting to Python 3.13 +---------------------- + +* ``Python.h`` no longer includes the ```` standard header. It was + included for the ``finite()`` function which is now provided by the + ```` header. It should now be included explicitly if needed. Remove + also the ``HAVE_IEEEFP_H`` macro. + (Contributed by Victor Stinner in :gh:`108765`.) + +* ``Python.h`` no longer includes these standard header files: ````, + ```` and ````. If needed, they should now be + included explicitly. For example, ```` provides the ``clock()`` and + ``gmtime()`` functions, ```` provides the ``select()`` + function, and ```` provides the ``futimes()``, ``gettimeofday()`` + and ``setitimer()`` functions. + (Contributed by Victor Stinner in :gh:`108765`.) + +* If the :c:macro:`Py_LIMITED_API` macro is defined, :c:macro:`!Py_BUILD_CORE`, + :c:macro:`!Py_BUILD_CORE_BUILTIN` and :c:macro:`!Py_BUILD_CORE_MODULE` macros + are now undefined by ````. + (Contributed by Victor Stinner in :gh:`85283`.) + +* The old trashcan macros ``Py_TRASHCAN_SAFE_BEGIN`` and ``Py_TRASHCAN_SAFE_END`` + were removed. They should be replaced by the new macros ``Py_TRASHCAN_BEGIN`` + and ``Py_TRASHCAN_END``. + + A tp_dealloc function that has the old macros, such as:: + + static void + mytype_dealloc(mytype *p) + { + PyObject_GC_UnTrack(p); + Py_TRASHCAN_SAFE_BEGIN(p); + ... + Py_TRASHCAN_SAFE_END + } + + should migrate to the new macros as follows:: + + static void + mytype_dealloc(mytype *p) + { + PyObject_GC_UnTrack(p); + Py_TRASHCAN_BEGIN(p, mytype_dealloc) + ... + Py_TRASHCAN_END + } + + Note that ``Py_TRASHCAN_BEGIN`` has a second argument which + should be the deallocation function it is in. + +* On Windows, ``Python.h`` no longer includes the ```` standard + header file. If needed, it should now be included explicitly. For example, it + provides ``offsetof()`` function, and ``size_t`` and ``ptrdiff_t`` types. + Including ```` explicitly was already needed by all other + platforms, the ``HAVE_STDDEF_H`` macro is only defined on Windows. + (Contributed by Victor Stinner in :gh:`108765`.) + + +Deprecated +---------- + +* Passing optional arguments *maxsplit*, *count* and *flags* in module-level + functions :func:`re.split`, :func:`re.sub` and :func:`re.subn` as positional + arguments is now deprecated. + In future Python versions these parameters will be + :ref:`keyword-only `. + (Contributed by Serhiy Storchaka in :gh:`56166`.) + +* Deprecate the old ``Py_UNICODE`` and ``PY_UNICODE_TYPE`` types: use directly + the :c:type:`wchar_t` type instead. Since Python 3.3, ``Py_UNICODE`` and + ``PY_UNICODE_TYPE`` are just aliases to :c:type:`wchar_t`. + (Contributed by Victor Stinner in :gh:`105156`.) + +* Deprecate old Python initialization functions: + + * :c:func:`PySys_ResetWarnOptions`: + clear :data:`sys.warnoptions` and :data:`!warnings.filters` instead. + * :c:func:`Py_GetExecPrefix`: get :data:`sys.exec_prefix` instead. + * :c:func:`Py_GetPath`: get :data:`sys.path` instead. + * :c:func:`Py_GetPrefix`: get :data:`sys.prefix` instead. + * :c:func:`Py_GetProgramFullPath`: get :data:`sys.executable` instead. + * :c:func:`Py_GetProgramName`: get :data:`sys.executable` instead. + * :c:func:`Py_GetPythonHome`: get :c:member:`PyConfig.home` or + :envvar:`PYTHONHOME` environment variable instead. + + Functions scheduled for removal in Python 3.15. + (Contributed by Victor Stinner in :gh:`105145`.) + +* Deprecate the :c:func:`PyImport_ImportModuleNoBlock` function which is just + an alias to :c:func:`PyImport_ImportModule` since Python 3.3. + Scheduled for removal in Python 3.15. + (Contributed by Victor Stinner in :gh:`105396`.) + +* Deprecate the :c:func:`PyWeakref_GetObject` and + :c:func:`PyWeakref_GET_OBJECT` functions, which return a :term:`borrowed + reference`: use the new :c:func:`PyWeakref_GetRef` function instead, it + returns a :term:`strong reference`. The `pythoncapi-compat project + `__ can be used to get + :c:func:`PyWeakref_GetRef` on Python 3.12 and older. + (Contributed by Victor Stinner in :gh:`105927`.) + +Removed +------- + +* Removed chained :class:`classmethod` descriptors (introduced in + :issue:`19072`). This can no longer be used to wrap other descriptors + such as :class:`property`. The core design of this feature was flawed + and caused a number of downstream problems. To "pass-through" a + :class:`classmethod`, consider using the :attr:`!__wrapped__` + attribute that was added in Python 3.10. (Contributed by Raymond + Hettinger in :gh:`89519`.) + +* Remove many APIs (functions, macros, variables) with names prefixed by + ``_Py`` or ``_PY`` (considered as private API). If your project is affected + by one of these removals and you consider that the removed API should remain + available, please open a new issue to request a public C API and + add ``cc @vstinner`` to the issue to notify Victor Stinner. + (Contributed by Victor Stinner in :gh:`106320`.) + +* Remove functions deprecated in Python 3.9. + + * ``PyEval_CallObject()``, ``PyEval_CallObjectWithKeywords()``: use + :c:func:`PyObject_CallNoArgs` or :c:func:`PyObject_Call` instead. + Warning: :c:func:`PyObject_Call` positional arguments must be a + :class:`tuple` and must not be *NULL*, keyword arguments must be a + :class:`dict` or *NULL*, whereas removed functions checked arguments type + and accepted *NULL* positional and keyword arguments. + To replace ``PyEval_CallObjectWithKeywords(func, NULL, kwargs)`` with + :c:func:`PyObject_Call`, pass an empty tuple as positional arguments using + :c:func:`PyTuple_New(0) `. + * ``PyEval_CallFunction()``: use :c:func:`PyObject_CallFunction` instead. + * ``PyEval_CallMethod()``: use :c:func:`PyObject_CallMethod` instead. + * ``PyCFunction_Call()``: use :c:func:`PyObject_Call` instead. + + (Contributed by Victor Stinner in :gh:`105107`.) + +* Remove old buffer protocols deprecated in Python 3.0. Use :ref:`bufferobjects` instead. + + * :c:func:`!PyObject_CheckReadBuffer`: Use :c:func:`PyObject_CheckBuffer` to + test if the object supports the buffer protocol. + Note that :c:func:`PyObject_CheckBuffer` doesn't guarantee that + :c:func:`PyObject_GetBuffer` will succeed. + To test if the object is actually readable, see the next example + of :c:func:`PyObject_GetBuffer`. + + * :c:func:`!PyObject_AsCharBuffer`, :c:func:`!PyObject_AsReadBuffer`: + :c:func:`PyObject_GetBuffer` and :c:func:`PyBuffer_Release` instead: + + .. code-block:: c + + Py_buffer view; + if (PyObject_GetBuffer(obj, &view, PyBUF_SIMPLE) < 0) { + return NULL; + } + // Use `view.buf` and `view.len` to read from the buffer. + // You may need to cast buf as `(const char*)view.buf`. + PyBuffer_Release(&view); + + * :c:func:`!PyObject_AsWriteBuffer`: Use + :c:func:`PyObject_GetBuffer` and :c:func:`PyBuffer_Release` instead: + + .. code-block:: c + + Py_buffer view; + if (PyObject_GetBuffer(obj, &view, PyBUF_WRITABLE) < 0) { + return NULL; + } + // Use `view.buf` and `view.len` to write to the buffer. + PyBuffer_Release(&view); + + (Contributed by Inada Naoki in :gh:`85275`.) + +* Remove the following old functions to configure the Python initialization, + deprecated in Python 3.11: + + * ``PySys_AddWarnOptionUnicode()``: use :c:member:`PyConfig.warnoptions` instead. + * ``PySys_AddWarnOption()``: use :c:member:`PyConfig.warnoptions` instead. + * ``PySys_AddXOption()``: use :c:member:`PyConfig.xoptions` instead. + * ``PySys_HasWarnOptions()``: use :c:member:`PyConfig.xoptions` instead. + * ``PySys_SetArgvEx()``: set :c:member:`PyConfig.argv` instead. + * ``PySys_SetArgv()``: set :c:member:`PyConfig.argv` instead. + * ``PySys_SetPath()``: set :c:member:`PyConfig.module_search_paths` instead. + * ``Py_SetPath()``: set :c:member:`PyConfig.module_search_paths` instead. + * ``Py_SetProgramName()``: set :c:member:`PyConfig.program_name` instead. + * ``Py_SetPythonHome()``: set :c:member:`PyConfig.home` instead. + * ``Py_SetStandardStreamEncoding()``: set :c:member:`PyConfig.stdio_encoding` + instead, and set also maybe :c:member:`PyConfig.legacy_windows_stdio` (on + Windows). + * ``_Py_SetProgramFullPath()``: set :c:member:`PyConfig.executable` instead. + + Use the new :c:type:`PyConfig` API of the :ref:`Python Initialization + Configuration ` instead (:pep:`587`), added to Python 3.8. + (Contributed by Victor Stinner in :gh:`105145`.) + +* Remove the old trashcan macros ``Py_TRASHCAN_SAFE_BEGIN`` and + ``Py_TRASHCAN_SAFE_END``. They should be replaced by the new macros + ``Py_TRASHCAN_BEGIN`` and ``Py_TRASHCAN_END``. The new macros were + added in Python 3.8 and the old macros were deprecated in Python 3.11. + (Contributed by Irit Katriel in :gh:`105111`.) + +* Remove ``PyEval_InitThreads()`` and ``PyEval_ThreadsInitialized()`` + functions, deprecated in Python 3.9. Since Python 3.7, ``Py_Initialize()`` + always creates the GIL: calling ``PyEval_InitThreads()`` did nothing and + ``PyEval_ThreadsInitialized()`` always returned non-zero. + (Contributed by Victor Stinner in :gh:`105182`.) + +* Remove ``PyEval_AcquireLock()`` and ``PyEval_ReleaseLock()`` functions, + deprecated in Python 3.2. They didn't update the current thread state. + They can be replaced with: + + * :c:func:`PyEval_SaveThread` and :c:func:`PyEval_RestoreThread`; + * low-level :c:func:`PyEval_AcquireThread` and :c:func:`PyEval_RestoreThread`; + * or :c:func:`PyGILState_Ensure` and :c:func:`PyGILState_Release`. + + (Contributed by Victor Stinner in :gh:`105182`.) + +* Remove private ``_PyObject_FastCall()`` function: + use ``PyObject_Vectorcall()`` which is available since Python 3.8 + (:pep:`590`). + (Contributed by Victor Stinner in :gh:`106023`.) + +* Remove ``cpython/pytime.h`` header file: it only contained private functions. + (Contributed by Victor Stinner in :gh:`106316`.) + +* Remove ``_PyInterpreterState_Get()`` alias to + :c:func:`PyInterpreterState_Get()` which was kept for backward compatibility + with Python 3.8. The `pythoncapi-compat project + `__ can be used to get + :c:func:`PyInterpreterState_Get()` on Python 3.8 and older. + (Contributed by Victor Stinner in :gh:`106320`.) + +* The :c:func:`PyModule_AddObject` function is now :term:`soft deprecated`: + :c:func:`PyModule_Add` or :c:func:`PyModule_AddObjectRef` functions should + be used instead. + (Contributed by Serhiy Storchaka in :gh:`86493`.) + +Pending Removal in Python 3.14 +------------------------------ + +* Creating immutable types (:c:macro:`Py_TPFLAGS_IMMUTABLETYPE`) with mutable + bases using the C API. +* Global configuration variables: + + * :c:var:`Py_DebugFlag`: use :c:member:`PyConfig.parser_debug` + * :c:var:`Py_VerboseFlag`: use :c:member:`PyConfig.verbose` + * :c:var:`Py_QuietFlag`: use :c:member:`PyConfig.quiet` + * :c:var:`Py_InteractiveFlag`: use :c:member:`PyConfig.interactive` + * :c:var:`Py_InspectFlag`: use :c:member:`PyConfig.inspect` + * :c:var:`Py_OptimizeFlag`: use :c:member:`PyConfig.optimization_level` + * :c:var:`Py_NoSiteFlag`: use :c:member:`PyConfig.site_import` + * :c:var:`Py_BytesWarningFlag`: use :c:member:`PyConfig.bytes_warning` + * :c:var:`Py_FrozenFlag`: use :c:member:`PyConfig.pathconfig_warnings` + * :c:var:`Py_IgnoreEnvironmentFlag`: use :c:member:`PyConfig.use_environment` + * :c:var:`Py_DontWriteBytecodeFlag`: use :c:member:`PyConfig.write_bytecode` + * :c:var:`Py_NoUserSiteDirectory`: use :c:member:`PyConfig.user_site_directory` + * :c:var:`Py_UnbufferedStdioFlag`: use :c:member:`PyConfig.buffered_stdio` + * :c:var:`Py_HashRandomizationFlag`: use :c:member:`PyConfig.use_hash_seed` + and :c:member:`PyConfig.hash_seed` + * :c:var:`Py_IsolatedFlag`: use :c:member:`PyConfig.isolated` + * :c:var:`Py_LegacyWindowsFSEncodingFlag`: use :c:member:`PyPreConfig.legacy_windows_fs_encoding` + * :c:var:`Py_LegacyWindowsStdioFlag`: use :c:member:`PyConfig.legacy_windows_stdio` + * :c:var:`!Py_FileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_HasFileSystemDefaultEncoding`: use :c:member:`PyConfig.filesystem_encoding` + * :c:var:`!Py_FileSystemDefaultEncodeErrors`: use :c:member:`PyConfig.filesystem_errors` + * :c:var:`!Py_UTF8Mode`: use :c:member:`PyPreConfig.utf8_mode` (see :c:func:`Py_PreInitialize`) + + The :c:func:`Py_InitializeFromConfig` API should be used with + :c:type:`PyConfig` instead. + +Pending Removal in Python 3.15 +------------------------------ + +* :c:func:`PyImport_ImportModuleNoBlock`: use :c:func:`PyImport_ImportModule`. +* :c:func:`PyWeakref_GET_OBJECT`: use :c:func:`PyWeakref_GetRef` instead. +* :c:func:`PyWeakref_GetObject`: use :c:func:`PyWeakref_GetRef` instead. +* :c:type:`!Py_UNICODE_WIDE` type: use :c:type:`wchar_t` instead. +* :c:type:`Py_UNICODE` type: use :c:type:`wchar_t` instead. +* Python initialization functions: + + * :c:func:`PySys_ResetWarnOptions`: clear :data:`sys.warnoptions` and + :data:`!warnings.filters` instead. + * :c:func:`Py_GetExecPrefix`: get :data:`sys.exec_prefix` instead. + * :c:func:`Py_GetPath`: get :data:`sys.path` instead. + * :c:func:`Py_GetPrefix`: get :data:`sys.prefix` instead. + * :c:func:`Py_GetProgramFullPath`: get :data:`sys.executable` instead. + * :c:func:`Py_GetProgramName`: get :data:`sys.executable` instead. + * :c:func:`Py_GetPythonHome`: get :c:member:`PyConfig.home` or + :envvar:`PYTHONHOME` environment variable instead. + +Pending Removal in Future Versions +---------------------------------- + +The following APIs were deprecated in earlier Python versions and will be +removed, although there is currently no date scheduled for their removal. + +* :c:macro:`Py_TPFLAGS_HAVE_FINALIZE`: no needed since Python 3.8. +* :c:func:`PyErr_Fetch`: use :c:func:`PyErr_GetRaisedException`. +* :c:func:`PyErr_NormalizeException`: use :c:func:`PyErr_GetRaisedException`. +* :c:func:`PyErr_Restore`: use :c:func:`PyErr_SetRaisedException`. +* :c:func:`PyModule_GetFilename`: use :c:func:`PyModule_GetFilenameObject`. +* :c:func:`PyOS_AfterFork`: use :c:func:`PyOS_AfterFork_Child()`. +* :c:func:`PySlice_GetIndicesEx`. +* :c:func:`!PyUnicode_AsDecodedObject`. +* :c:func:`!PyUnicode_AsDecodedUnicode`. +* :c:func:`!PyUnicode_AsEncodedObject`. +* :c:func:`!PyUnicode_AsEncodedUnicode`. +* :c:func:`PyUnicode_READY`: not needed since Python 3.12. +* :c:func:`!_PyErr_ChainExceptions`. +* :c:member:`!PyBytesObject.ob_shash` member: + call :c:func:`PyObject_Hash` instead. +* :c:member:`!PyDictObject.ma_version_tag` member. +* TLS API: + + * :c:func:`PyThread_create_key`: use :c:func:`PyThread_tss_alloc`. + * :c:func:`PyThread_delete_key`: use :c:func:`PyThread_tss_free`. + * :c:func:`PyThread_set_key_value`: use :c:func:`PyThread_tss_set`. + * :c:func:`PyThread_get_key_value`: use :c:func:`PyThread_tss_get`. + * :c:func:`PyThread_delete_key_value`: use :c:func:`PyThread_tss_delete`. + * :c:func:`PyThread_ReInitTLS`: no longer needed. + +* Remove undocumented ``PY_TIMEOUT_MAX`` constant from the limited C API. + (Contributed by Victor Stinner in :gh:`110014`.) + + +Regression Test Changes +======================= + +* Python built with :file:`configure` :option:`--with-pydebug` now + supports a :option:`-X presite=package.module <-X>` command-line + option. If used, it specifies a module that should be imported early + in the lifecycle of the interpreter, before ``site.py`` is executed. + (Contributed by Åukasz Langa in :gh:`110769`.) diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst index 6037db9f954d264..df32b76b6d7b03f 100644 --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -319,7 +319,7 @@ aspects that are visible to the programmer: >>> collections.__cached__ # doctest: +SKIP 'c:/py32/lib/__pycache__/collections.cpython-32.pyc' -* The tag that is unique to each interpreter is accessible from the :mod:`imp` +* The tag that is unique to each interpreter is accessible from the :mod:`!imp` module: >>> import imp # doctest: +SKIP @@ -328,7 +328,7 @@ aspects that are visible to the programmer: * Scripts that try to deduce source filename from the imported file now need to be smarter. It is no longer sufficient to simply strip the "c" from a ".pyc" - filename. Instead, use the new functions in the :mod:`imp` module: + filename. Instead, use the new functions in the :mod:`!imp` module: >>> imp.source_from_cache('c:/py32/lib/__pycache__/collections.cpython-32.pyc') # doctest: +SKIP 'c:/py32/lib/collections.py' @@ -424,7 +424,7 @@ protocols, the users must to be able access the environment using native strings even though the underlying platform may have a different convention. To bridge this gap, the :mod:`wsgiref` module has a new function, :func:`wsgiref.handlers.read_environ` for transcoding CGI variables from -:attr:`os.environ` into native strings and returning a new dictionary. +:data:`os.environ` into native strings and returning a new dictionary. .. seealso:: @@ -468,6 +468,7 @@ Some smaller changes made to the core Python language are: >>> class LowerCasedDict(dict): ... def __getitem__(self, key): ... return dict.__getitem__(self, key.lower()) + ... >>> lcd = LowerCasedDict(part='widgets', quantity=10) >>> 'There are {QUANTITY} {Part} in stock'.format_map(lcd) 'There are 10 widgets in stock' @@ -475,6 +476,7 @@ Some smaller changes made to the core Python language are: >>> class PlaceholderDict(dict): ... def __missing__(self, key): ... return '<{}>'.format(key) + ... >>> 'Hello {name}, welcome to {location}'.format_map(PlaceholderDict()) 'Hello , welcome to ' @@ -483,7 +485,7 @@ Some smaller changes made to the core Python language are: * The interpreter can now be started with a quiet option, ``-q``, to prevent the copyright and version information from being displayed in the interactive - mode. The option can be introspected using the :attr:`sys.flags` attribute: + mode. The option can be introspected using the :data:`sys.flags` attribute: .. code-block:: shell-session @@ -564,9 +566,9 @@ Some smaller changes made to the core Python language are: (See :issue:`4617`.) -* The internal :c:type:`structsequence` tool now creates subclasses of tuple. +* :ref:`Struct sequence types ` are now subclasses of tuple. This means that C structures like those returned by :func:`os.stat`, - :func:`time.gmtime`, and :attr:`sys.version_info` now work like a + :func:`time.gmtime`, and :data:`sys.version_info` now work like a :term:`named tuple` and now work with functions and methods that expect a tuple as an argument. This is a big step forward in making the C structures as flexible as their pure Python counterparts: @@ -596,7 +598,7 @@ Some smaller changes made to the core Python language are: module, or on the command line. A :exc:`ResourceWarning` is issued at interpreter shutdown if the - :data:`gc.garbage` list isn't empty, and if :attr:`gc.DEBUG_UNCOLLECTABLE` is + :data:`gc.garbage` list isn't empty, and if :const:`gc.DEBUG_UNCOLLECTABLE` is set, all uncollectable objects are printed. This is meant to make the programmer aware that their code contains object finalization issues. @@ -621,7 +623,7 @@ Some smaller changes made to the core Python language are: :class:`collections.Sequence` :term:`abstract base class`. As a result, the language will have a more uniform API. In addition, :class:`range` objects now support slicing and negative indices, even with values larger than - :attr:`sys.maxsize`. This makes *range* more interoperable with lists:: + :data:`sys.maxsize`. This makes *range* more interoperable with lists:: >>> range(0, 100, 2).count(10) 1 @@ -660,7 +662,7 @@ Python's standard library has undergone significant maintenance efforts and quality improvements. The biggest news for Python 3.2 is that the :mod:`email` package, :mod:`mailbox` -module, and :mod:`nntplib` modules now work correctly with the bytes/text model +module, and :mod:`!nntplib` modules now work correctly with the bytes/text model in Python 3. For the first time, there is correct handling of messages with mixed encodings. @@ -783,8 +785,8 @@ functools (Contributed by Raymond Hettinger and incorporating design ideas from Jim Baker, Miki Tebeka, and Nick Coghlan; see `recipe 498245 - `_\, `recipe 577479 - `_\, :issue:`10586`, and + `_\, `recipe 577479 + `_\, :issue:`10586`, and :issue:`10593`.) * The :func:`functools.wraps` decorator now adds a :attr:`__wrapped__` attribute @@ -1005,13 +1007,13 @@ datetime and time after 1900. The new supported year range is from 1000 to 9999 inclusive. * Whenever a two-digit year is used in a time tuple, the interpretation has been - governed by :attr:`time.accept2dyear`. The default is ``True`` which means that + governed by :data:`time.accept2dyear`. The default is ``True`` which means that for a two-digit year, the century is guessed according to the POSIX rules governing the ``%y`` strptime format. Starting with Py3.2, use of the century guessing heuristic will emit a :exc:`DeprecationWarning`. Instead, it is recommended that - :attr:`time.accept2dyear` be set to ``False`` so that large date ranges + :data:`time.accept2dyear` be set to ``False`` so that large date ranges can be used without guesswork:: >>> import time, warnings @@ -1029,7 +1031,7 @@ datetime and time 'Fri Jan 1 12:34:56 11' Several functions now have significantly expanded date ranges. When - :attr:`time.accept2dyear` is false, the :func:`time.asctime` function will + :data:`time.accept2dyear` is false, the :func:`time.asctime` function will accept any year that fits in a C int, while the :func:`time.mktime` and :func:`time.strftime` functions will accept the full range supported by the corresponding operating system functions. @@ -1192,11 +1194,11 @@ can be set to "$" for the shell-style formatting provided by If no configuration is set-up before a logging event occurs, there is now a default configuration using a :class:`~logging.StreamHandler` directed to -:attr:`sys.stderr` for events of ``WARNING`` level or higher. Formerly, an +:data:`sys.stderr` for events of ``WARNING`` level or higher. Formerly, an event occurring before a configuration was set-up would either raise an exception or silently drop the event depending on the value of -:attr:`logging.raiseExceptions`. The new default handler is stored in -:attr:`logging.lastResort`. +:data:`logging.raiseExceptions`. The new default handler is stored in +:data:`logging.lastResort`. The use of filters has been simplified. Instead of creating a :class:`~logging.Filter` object, the predicate can be any Python callable that @@ -1298,7 +1300,7 @@ values are equal (:issue:`8188`):: hash(Decimal("1.5")) == hash(complex(1.5, 0)) Some of the hashing details are exposed through a new attribute, -:attr:`sys.hash_info`, which describes the bit width of the hash value, the +:data:`sys.hash_info`, which describes the bit width of the hash value, the prime modulus, the hash values for *infinity* and *nan*, and the multiplier used for the imaginary part of a number: @@ -1386,7 +1388,7 @@ select ------ The :mod:`select` module now exposes a new, constant attribute, -:attr:`~select.PIPE_BUF`, which gives the minimum number of bytes which are +:const:`~select.PIPE_BUF`, which gives the minimum number of bytes which are guaranteed not to block when :func:`select.select` says a pipe is ready for writing. @@ -1527,7 +1529,7 @@ filenames: b'Sehensw\xc3\xbcrdigkeiten' Some operating systems allow direct access to encoded bytes in the -environment. If so, the :attr:`os.supports_bytes_environ` constant will be +environment. If so, the :const:`os.supports_bytes_environ` constant will be true. For direct access to encoded environment variables (if available), @@ -1664,9 +1666,9 @@ for secure (encrypted, authenticated) internet connections: algorithm" error. * The version of OpenSSL being used is now accessible using the module - attributes :data:`ssl.OPENSSL_VERSION` (a string), - :data:`ssl.OPENSSL_VERSION_INFO` (a 5-tuple), and - :data:`ssl.OPENSSL_VERSION_NUMBER` (an integer). + attributes :const:`ssl.OPENSSL_VERSION` (a string), + :const:`ssl.OPENSSL_VERSION_INFO` (a 5-tuple), and + :const:`ssl.OPENSSL_VERSION_NUMBER` (an integer). (Contributed by Antoine Pitrou in :issue:`8850`, :issue:`1589`, :issue:`8322`, :issue:`5639`, :issue:`4870`, :issue:`8484`, and :issue:`8321`.) @@ -1674,13 +1676,13 @@ for secure (encrypted, authenticated) internet connections: nntp ---- -The :mod:`nntplib` module has a revamped implementation with better bytes and +The :mod:`!nntplib` module has a revamped implementation with better bytes and text semantics as well as more practical APIs. These improvements break compatibility with the nntplib version in Python 3.1, which was partly dysfunctional in itself. Support for secure connections through both implicit (using -:class:`nntplib.NNTP_SSL`) and explicit (using :meth:`nntplib.NNTP.starttls`) +:class:`!nntplib.NNTP_SSL`) and explicit (using :meth:`!nntplib.NNTP.starttls`) TLS has also been added. (Contributed by Antoine Pitrou in :issue:`9360` and Andrew Vant in :issue:`1926`.) @@ -1886,6 +1888,7 @@ inspect >>> from inspect import getgeneratorstate >>> def gen(): ... yield 'demo' + ... >>> g = gen() >>> getgeneratorstate(g) 'GEN_CREATED' @@ -2299,7 +2302,7 @@ turtledemo The demonstration code for the :mod:`turtle` module was moved from the *Demo* directory to main library. It includes over a dozen sample scripts with -lively displays. Being on :attr:`sys.path`, it can now be run directly +lively displays. Being on :data:`sys.path`, it can now be run directly from the command-line: .. code-block:: shell-session @@ -2563,10 +2566,10 @@ Changes to Python's build process and to the C API include: (:issue:`2443`). * A new C API function :c:func:`PySys_SetArgvEx` allows an embedded interpreter - to set :attr:`sys.argv` without also modifying :attr:`sys.path` + to set :data:`sys.argv` without also modifying :data:`sys.path` (:issue:`5753`). -* :c:macro:`PyEval_CallObject` is now only available in macro form. The +* :c:func:`!PyEval_CallObject` is now only available in macro form. The function declaration, which was kept for backwards compatibility reasons, is now removed -- the macro was introduced in 1997 (:issue:`8276`). @@ -2600,7 +2603,7 @@ Also, there were a number of updates to the Mac OS X build, see for details. For users running a 32/64-bit build, there is a known problem with the default Tcl/Tk on Mac OS X 10.6. Accordingly, we recommend installing an updated alternative such as -`ActiveState Tcl/Tk 8.5.9 `_\. +`ActiveState Tcl/Tk 8.5.9 `_\. See https://www.python.org/download/mac/tcltk/ for additional details. Porting to Python 3.2 @@ -2640,7 +2643,7 @@ require changes to your code: * ``""`` is now a valid value and is no longer automatically converted to an empty string. For empty strings, use ``"option ="`` in a line. -* The :mod:`nntplib` module was reworked extensively, meaning that its APIs +* The :mod:`!nntplib` module was reworked extensively, meaning that its APIs are often incompatible with the 3.1 APIs. * :class:`bytearray` objects can no longer be used as filenames; instead, @@ -2655,7 +2658,7 @@ require changes to your code: * "t#" format has been removed: use "s#" or "s*" instead * "w" and "w#" formats has been removed: use "w*" instead -* The :c:type:`PyCObject` type, deprecated in 3.1, has been removed. To wrap +* The :c:type:`!PyCObject` type, deprecated in 3.1, has been removed. To wrap opaque C pointers in Python objects, the :c:type:`PyCapsule` API should be used instead; the new type has a well-defined interface for passing typing safety information and a less complicated signature for calling a destructor. @@ -2728,15 +2731,15 @@ require changes to your code: (Contributed by Antoine Pitrou, :issue:`10272`.) -* The misleading functions :c:func:`PyEval_AcquireLock()` and - :c:func:`PyEval_ReleaseLock()` have been officially deprecated. The - thread-state aware APIs (such as :c:func:`PyEval_SaveThread()` - and :c:func:`PyEval_RestoreThread()`) should be used instead. +* The misleading functions :c:func:`!PyEval_AcquireLock` and + :c:func:`!PyEval_ReleaseLock` have been officially deprecated. The + thread-state aware APIs (such as :c:func:`PyEval_SaveThread` + and :c:func:`PyEval_RestoreThread`) should be used instead. * Due to security risks, :func:`asyncore.handle_accept` has been deprecated, and a new function, :func:`asyncore.handle_accepted`, was added to replace it. (Contributed by Giampaolo Rodola in :issue:`6706`.) -* Due to the new :term:`GIL` implementation, :c:func:`PyEval_InitThreads()` - cannot be called before :c:func:`Py_Initialize()` anymore. +* Due to the new :term:`GIL` implementation, :c:func:`!PyEval_InitThreads` + cannot be called before :c:func:`Py_Initialize` anymore. diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst index 96a632577b2c566..5674bc7f359b720 100644 --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -249,7 +249,7 @@ Changes introduced by :pep:`393` are the following: non-BMP code points. * The value of :data:`sys.maxunicode` is now always ``1114111`` (``0x10FFFF`` - in hexadecimal). The :c:func:`PyUnicode_GetMax` function still returns + in hexadecimal). The :c:func:`!PyUnicode_GetMax` function still returns either ``0xFFFF`` or ``0x10FFFF`` for backward compatibility, and it should not be used with the new Unicode API (see :issue:`13054`). @@ -560,6 +560,7 @@ Example with (non-bound) methods:: >>> class C: ... def meth(self): ... pass + ... >>> C.meth.__name__ 'meth' >>> C.meth.__qualname__ @@ -647,7 +648,7 @@ PEP 421: Adding sys.implementation A new attribute on the :mod:`sys` module exposes details specific to the implementation of the currently running interpreter. The initial set of -attributes on :attr:`sys.implementation` are ``name``, ``version``, +attributes on :data:`sys.implementation` are ``name``, ``version``, ``hexversion``, and ``cache_tag``. The intention of ``sys.implementation`` is to consolidate into one namespace @@ -684,7 +685,7 @@ through normal attribute access. Using importlib as the Implementation of Import =============================================== :issue:`2377` - Replace __import__ w/ importlib.__import__ -:issue:`13959` - Re-implement parts of :mod:`imp` in pure Python +:issue:`13959` - Re-implement parts of :mod:`!imp` in pure Python :issue:`14605` - Make import machinery explicit :issue:`14646` - Require loaders set __loader__ and __package__ @@ -713,12 +714,12 @@ to properly delineate between :term:`meta path finders ` and :term:`path entry finders ` by introducing :class:`importlib.abc.MetaPathFinder` and :class:`importlib.abc.PathEntryFinder`, respectively. The old ABC of -:class:`importlib.abc.Finder` is now only provided for backwards-compatibility +:class:`!importlib.abc.Finder` is now only provided for backwards-compatibility and does not enforce any method requirements. In terms of finders, :class:`importlib.machinery.FileFinder` exposes the mechanism used to search for source and bytecode files of a module. Previously -this class was an implicit member of :attr:`sys.path_hooks`. +this class was an implicit member of :data:`sys.path_hooks`. For loaders, the new abstract base class :class:`importlib.abc.FileLoader` helps write a loader that uses the file system as the storage mechanism for a module's @@ -734,7 +735,7 @@ provide the full name of the module now instead of just the tail end of the module's name. The :func:`importlib.invalidate_caches` function will now call the method with -the same name on all finders cached in :attr:`sys.path_importer_cache` to help +the same name on all finders cached in :data:`sys.path_importer_cache` to help clean up any stored state as necessary. Visible Changes @@ -744,8 +745,8 @@ For potential required changes to code, see the `Porting Python code`_ section. Beyond the expanse of what :mod:`importlib` now exposes, there are other -visible changes to import. The biggest is that :attr:`sys.meta_path` and -:attr:`sys.path_hooks` now store all of the meta path finders and path entry +visible changes to import. The biggest is that :data:`sys.meta_path` and +:data:`sys.path_hooks` now store all of the meta path finders and path entry hooks used by import. Previously the finders were implicit and hidden within the C code of import instead of being directly exposed. This means that one can now easily remove or change the order of the various finders to fit one's needs. @@ -760,9 +761,9 @@ Loaders are also now expected to set the ``__package__`` attribute from :pep:`366`. Once again, import itself is already setting this on all loaders from :mod:`importlib` and import itself is setting the attribute post-load. -``None`` is now inserted into :attr:`sys.path_importer_cache` when no finder -can be found on :attr:`sys.path_hooks`. Since :class:`imp.NullImporter` is not -directly exposed on :attr:`sys.path_hooks` it could no longer be relied upon to +``None`` is now inserted into :data:`sys.path_importer_cache` when no finder +can be found on :data:`sys.path_hooks`. Since :class:`!imp.NullImporter` is not +directly exposed on :data:`sys.path_hooks` it could no longer be relied upon to always be available to use as a value representing no finder found. All other changes relate to semantic changes which should be taken into @@ -841,7 +842,7 @@ Builtin functions and types * :func:`open` gets a new *opener* parameter: the underlying file descriptor for the file object is then obtained by calling *opener* with (*file*, - *flags*). It can be used to use custom flags like :data:`os.O_CLOEXEC` for + *flags*). It can be used to use custom flags like :const:`os.O_CLOEXEC` for example. The ``'x'`` mode was added: open for exclusive creation, failing if the file already exists. * :func:`print`: added the *flush* keyword argument. If the *flush* keyword @@ -916,12 +917,12 @@ abstract methods. The recommended approach to declaring abstract descriptors is now to provide :attr:`__isabstractmethod__` as a dynamically updated property. The built-in descriptors have been updated accordingly. - * :class:`abc.abstractproperty` has been deprecated, use :class:`property` - with :func:`abc.abstractmethod` instead. - * :class:`abc.abstractclassmethod` has been deprecated, use - :class:`classmethod` with :func:`abc.abstractmethod` instead. - * :class:`abc.abstractstaticmethod` has been deprecated, use - :class:`staticmethod` with :func:`abc.abstractmethod` instead. +* :class:`abc.abstractproperty` has been deprecated, use :class:`property` + with :func:`abc.abstractmethod` instead. +* :class:`abc.abstractclassmethod` has been deprecated, use + :class:`classmethod` with :func:`abc.abstractmethod` instead. +* :class:`abc.abstractstaticmethod` has been deprecated, use + :class:`staticmethod` with :func:`abc.abstractmethod` instead. (Contributed by Darren Dale in :issue:`11610`.) @@ -1051,40 +1052,40 @@ their ``__init__`` method (for example, file objects) or in their crypt ----- -Addition of salt and modular crypt format (hashing method) and the :func:`~crypt.mksalt` -function to the :mod:`crypt` module. +Addition of salt and modular crypt format (hashing method) and the :func:`~!crypt.mksalt` +function to the :mod:`!crypt` module. (:issue:`10924`) curses ------ - * If the :mod:`curses` module is linked to the ncursesw library, use Unicode - functions when Unicode strings or characters are passed (e.g. - :c:func:`waddwstr`), and bytes functions otherwise (e.g. :c:func:`waddstr`). - * Use the locale encoding instead of ``utf-8`` to encode Unicode strings. - * :class:`curses.window` has a new :attr:`curses.window.encoding` attribute. - * The :class:`curses.window` class has a new :meth:`~curses.window.get_wch` - method to get a wide character - * The :mod:`curses` module has a new :meth:`~curses.unget_wch` function to - push a wide character so the next :meth:`~curses.window.get_wch` will return - it +* If the :mod:`curses` module is linked to the ncursesw library, use Unicode + functions when Unicode strings or characters are passed (e.g. + :c:func:`waddwstr`), and bytes functions otherwise (e.g. :c:func:`waddstr`). +* Use the locale encoding instead of ``utf-8`` to encode Unicode strings. +* :class:`curses.window` has a new :attr:`curses.window.encoding` attribute. +* The :class:`curses.window` class has a new :meth:`~curses.window.get_wch` + method to get a wide character +* The :mod:`curses` module has a new :meth:`~curses.unget_wch` function to + push a wide character so the next :meth:`~curses.window.get_wch` will return + it (Contributed by Iñigo Serna in :issue:`6755`.) datetime -------- - * Equality comparisons between naive and aware :class:`~datetime.datetime` - instances now return :const:`False` instead of raising :exc:`TypeError` - (:issue:`15006`). - * New :meth:`datetime.datetime.timestamp` method: Return POSIX timestamp - corresponding to the :class:`~datetime.datetime` instance. - * The :meth:`datetime.datetime.strftime` method supports formatting years - older than 1000. - * The :meth:`datetime.datetime.astimezone` method can now be - called without arguments to convert datetime instance to the system - timezone. +* Equality comparisons between naive and aware :class:`~datetime.datetime` + instances now return :const:`False` instead of raising :exc:`TypeError` + (:issue:`15006`). +* New :meth:`datetime.datetime.timestamp` method: Return POSIX timestamp + corresponding to the :class:`~datetime.datetime` instance. +* The :meth:`datetime.datetime.strftime` method supports formatting years + older than 1000. +* The :meth:`datetime.datetime.astimezone` method can now be + called without arguments to convert datetime instance to the system + timezone. .. _new-decimal: @@ -1126,7 +1127,7 @@ Features * If Python is compiled without threads, the C version automatically disables the expensive thread local context machinery. In this case, - the variable :data:`~decimal.HAVE_THREADS` is set to ``False``. + the variable :const:`~decimal.HAVE_THREADS` is set to ``False``. API changes ~~~~~~~~~~~ @@ -1134,20 +1135,20 @@ API changes * The C module has the following context limits, depending on the machine architecture: - +-------------------+---------------------+------------------------------+ - | | 32-bit | 64-bit | - +===================+=====================+==============================+ - | :const:`MAX_PREC` | :const:`425000000` | :const:`999999999999999999` | - +-------------------+---------------------+------------------------------+ - | :const:`MAX_EMAX` | :const:`425000000` | :const:`999999999999999999` | - +-------------------+---------------------+------------------------------+ - | :const:`MIN_EMIN` | :const:`-425000000` | :const:`-999999999999999999` | - +-------------------+---------------------+------------------------------+ + +-------------------+----------------+-------------------------+ + | | 32-bit | 64-bit | + +===================+================+=========================+ + | :const:`MAX_PREC` | ``425000000`` | ``999999999999999999`` | + +-------------------+----------------+-------------------------+ + | :const:`MAX_EMAX` | ``425000000`` | ``999999999999999999`` | + +-------------------+----------------+-------------------------+ + | :const:`MIN_EMIN` | ``-425000000`` | ``-999999999999999999`` | + +-------------------+----------------+-------------------------+ * In the context templates (:class:`~decimal.DefaultContext`, :class:`~decimal.BasicContext` and :class:`~decimal.ExtendedContext`) the magnitude of :attr:`~decimal.Context.Emax` and - :attr:`~decimal.Context.Emin` has changed to :const:`999999`. + :attr:`~decimal.Context.Emin` has changed to ``999999``. * The :class:`~decimal.Decimal` constructor in decimal.py does not observe the context limits and converts values with arbitrary exponents or precision @@ -1209,25 +1210,25 @@ the ``Message`` object it is serializing. The default policy is The minimum set of controls implemented by all ``policy`` objects are: - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - =============== ======================================================= - max_line_length The maximum length, excluding the linesep character(s), - individual lines may have when a ``Message`` is - serialized. Defaults to 78. +=============== ======================================================= +max_line_length The maximum length, excluding the linesep character(s), + individual lines may have when a ``Message`` is + serialized. Defaults to 78. - linesep The character used to separate individual lines when a - ``Message`` is serialized. Defaults to ``\n``. +linesep The character used to separate individual lines when a + ``Message`` is serialized. Defaults to ``\n``. - cte_type ``7bit`` or ``8bit``. ``8bit`` applies only to a - ``Bytes`` ``generator``, and means that non-ASCII may - be used where allowed by the protocol (or where it - exists in the original input). +cte_type ``7bit`` or ``8bit``. ``8bit`` applies only to a + ``Bytes`` ``generator``, and means that non-ASCII may + be used where allowed by the protocol (or where it + exists in the original input). - raise_on_defect Causes a ``parser`` to raise error when defects are - encountered instead of adding them to the ``Message`` - object's ``defects`` list. - =============== ======================================================= +raise_on_defect Causes a ``parser`` to raise error when defects are + encountered instead of adding them to the ``Message`` + object's ``defects`` list. +=============== ======================================================= A new policy instance, with new settings, is created using the :meth:`~email.policy.Policy.clone` method of policy objects. ``clone`` takes @@ -1262,21 +1263,21 @@ removal of the code) may occur if deemed necessary by the core developers. The new policies are instances of :class:`~email.policy.EmailPolicy`, and add the following additional controls: - .. tabularcolumns:: |l|L| +.. tabularcolumns:: |l|L| - =============== ======================================================= - refold_source Controls whether or not headers parsed by a - :mod:`~email.parser` are refolded by the - :mod:`~email.generator`. It can be ``none``, ``long``, - or ``all``. The default is ``long``, which means that - source headers with a line longer than - ``max_line_length`` get refolded. ``none`` means no - line get refolded, and ``all`` means that all lines - get refolded. +=============== ======================================================= +refold_source Controls whether or not headers parsed by a + :mod:`~email.parser` are refolded by the + :mod:`~email.generator`. It can be ``none``, ``long``, + or ``all``. The default is ``long``, which means that + source headers with a line longer than + ``max_line_length`` get refolded. ``none`` means no + line get refolded, and ``all`` means that all lines + get refolded. - header_factory A callable that take a ``name`` and ``value`` and - produces a custom header object. - =============== ======================================================= +header_factory A callable that take a ``name`` and ``value`` and + produces a custom header object. +=============== ======================================================= The ``header_factory`` is the key to the new features provided by the new policies. When one of the new policies is used, any header retrieved from @@ -1351,18 +1352,18 @@ API. New utility functions: - * :func:`~email.utils.format_datetime`: given a :class:`~datetime.datetime`, - produce a string formatted for use in an email header. +* :func:`~email.utils.format_datetime`: given a :class:`~datetime.datetime`, + produce a string formatted for use in an email header. - * :func:`~email.utils.parsedate_to_datetime`: given a date string from - an email header, convert it into an aware :class:`~datetime.datetime`, - or a naive :class:`~datetime.datetime` if the offset is ``-0000``. +* :func:`~email.utils.parsedate_to_datetime`: given a date string from + an email header, convert it into an aware :class:`~datetime.datetime`, + or a naive :class:`~datetime.datetime` if the offset is ``-0000``. - * :func:`~email.utils.localtime`: With no argument, returns the - current local time as an aware :class:`~datetime.datetime` using the local - :class:`~datetime.timezone`. Given an aware :class:`~datetime.datetime`, - converts it into an aware :class:`~datetime.datetime` using the - local :class:`~datetime.timezone`. +* :func:`~email.utils.localtime`: With no argument, returns the + current local time as an aware :class:`~datetime.datetime` using the local + :class:`~datetime.timezone`. Given an aware :class:`~datetime.datetime`, + converts it into an aware :class:`~datetime.datetime` using the + local :class:`~datetime.timezone`. ftplib @@ -1557,7 +1558,7 @@ Schlawack in :issue:`12708`.) nntplib ------- -The :class:`nntplib.NNTP` class now supports the context management protocol to +The :class:`!nntplib.NNTP` class now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the NNTP connection when done:: @@ -1575,8 +1576,8 @@ os -- * The :mod:`os` module has a new :func:`~os.pipe2` function that makes it - possible to create a pipe with :data:`~os.O_CLOEXEC` or - :data:`~os.O_NONBLOCK` flags set atomically. This is especially useful to + possible to create a pipe with :const:`~os.O_CLOEXEC` or + :const:`~os.O_NONBLOCK` flags set atomically. This is especially useful to avoid race conditions in multi-threaded programs. * The :mod:`os` module has a new :func:`~os.sendfile` function which provides @@ -1690,9 +1691,9 @@ os * Some platforms now support additional constants for the :func:`~os.lseek` function, such as ``os.SEEK_HOLE`` and ``os.SEEK_DATA``. -* New constants :data:`~os.RTLD_LAZY`, :data:`~os.RTLD_NOW`, - :data:`~os.RTLD_GLOBAL`, :data:`~os.RTLD_LOCAL`, :data:`~os.RTLD_NODELETE`, - :data:`~os.RTLD_NOLOAD`, and :data:`~os.RTLD_DEEPBIND` are available on +* New constants :const:`~os.RTLD_LAZY`, :const:`~os.RTLD_NOW`, + :const:`~os.RTLD_GLOBAL`, :const:`~os.RTLD_LOCAL`, :const:`~os.RTLD_NODELETE`, + :const:`~os.RTLD_NOLOAD`, and :const:`~os.RTLD_DEEPBIND` are available on platforms that support them. These are for use with the :func:`sys.setdlopenflags` function, and supersede the similar constants defined in :mod:`ctypes` and :mod:`DLFCN`. (Contributed by Victor Stinner @@ -1777,7 +1778,7 @@ shlex ----- The previously undocumented helper function ``quote`` from the -:mod:`pipes` modules has been moved to the :mod:`shlex` module and +:mod:`!pipes` modules has been moved to the :mod:`shlex` module and documented. :func:`~shlex.quote` properly escapes all characters in a string that might be otherwise given special meaning by the shell. @@ -1892,7 +1893,7 @@ socket * The :class:`~socket.socket` class now supports the PF_RDS protocol family (https://en.wikipedia.org/wiki/Reliable_Datagram_Sockets and - https://oss.oracle.com/projects/rds/). + `https://oss.oracle.com/projects/rds `__). * The :class:`~socket.socket` class now supports the ``PF_SYSTEM`` protocol family on OS X. (Contributed by Michael Goderbauer in :issue:`13777`.) @@ -1951,7 +1952,7 @@ ssl * You can query the SSL compression algorithm used by an SSL socket, thanks to its new :meth:`~ssl.SSLSocket.compression` method. The new attribute - :attr:`~ssl.OP_NO_COMPRESSION` can be used to disable compression. + :const:`~ssl.OP_NO_COMPRESSION` can be used to disable compression. (Contributed by Antoine Pitrou in :issue:`13634`.) * Support has been added for the Next Protocol Negotiation extension using @@ -1965,7 +1966,7 @@ ssl * The :func:`~ssl.get_server_certificate` function now supports IPv6. (Contributed by Charles-François Natali in :issue:`11811`.) -* New attribute :attr:`~ssl.OP_CIPHER_SERVER_PREFERENCE` allows setting +* New attribute :const:`~ssl.OP_CIPHER_SERVER_PREFERENCE` allows setting SSLv3 server sockets to use the server's cipher ordering preference rather than the client's (:issue:`13635`). @@ -1983,7 +1984,7 @@ the form '-rwxrwxrwx'. struct ------ -The :mod:`struct` module now supports ``ssize_t`` and ``size_t`` via the +The :mod:`struct` module now supports :c:type:`ssize_t` and :c:type:`size_t` via the new codes ``n`` and ``N``, respectively. (Contributed by Antoine Pitrou in :issue:`3163`.) @@ -1994,7 +1995,7 @@ subprocess Command strings can now be bytes objects on posix platforms. (Contributed by Victor Stinner in :issue:`8513`.) -A new constant :data:`~subprocess.DEVNULL` allows suppressing output in a +A new constant :const:`~subprocess.DEVNULL` allows suppressing output in a platform-independent fashion. (Contributed by Ross Lagerwall in :issue:`5870`.) @@ -2066,7 +2067,7 @@ The :pep:`418` added new functions to the :mod:`time` module: Other new functions: * :func:`~time.clock_getres`, :func:`~time.clock_gettime` and - :func:`~time.clock_settime` functions with ``CLOCK_xxx`` constants. + :func:`~time.clock_settime` functions with :samp:`CLOCK_{xxx}` constants. (Contributed by Victor Stinner in :issue:`10278`.) To improve cross platform consistency, :func:`~time.sleep` now raises a @@ -2140,7 +2141,7 @@ New attribute :attr:`zlib.Decompress.eof` makes it possible to distinguish between a properly formed compressed stream and an incomplete or truncated one. (Contributed by Nadeem Vawda in :issue:`12646`.) -New attribute :attr:`zlib.ZLIB_RUNTIME_VERSION` reports the version string of +New attribute :const:`zlib.ZLIB_RUNTIME_VERSION` reports the version string of the underlying ``zlib`` library that is loaded at runtime. (Contributed by Torsten Landschoff in :issue:`12306`.) @@ -2194,8 +2195,8 @@ Changes to Python's build process and to the C API include: * :c:func:`PyUnicode_AsUCS4`, :c:func:`PyUnicode_AsUCS4Copy` * :c:macro:`PyUnicode_DATA`, :c:macro:`PyUnicode_1BYTE_DATA`, :c:macro:`PyUnicode_2BYTE_DATA`, :c:macro:`PyUnicode_4BYTE_DATA` - * :c:macro:`PyUnicode_KIND` with :c:type:`PyUnicode_Kind` enum: - :c:data:`PyUnicode_WCHAR_KIND`, :c:data:`PyUnicode_1BYTE_KIND`, + * :c:macro:`PyUnicode_KIND` with :c:enum:`PyUnicode_Kind` enum: + :c:data:`!PyUnicode_WCHAR_KIND`, :c:data:`PyUnicode_1BYTE_KIND`, :c:data:`PyUnicode_2BYTE_KIND`, :c:data:`PyUnicode_4BYTE_KIND` * :c:macro:`PyUnicode_READ`, :c:macro:`PyUnicode_READ_CHAR`, :c:macro:`PyUnicode_WRITE` * :c:macro:`PyUnicode_MAX_CHAR_VALUE` @@ -2269,58 +2270,58 @@ removed in Python 4. All functions using this type are deprecated: Unicode functions and methods using :c:type:`Py_UNICODE` and :c:expr:`Py_UNICODE*` types: -* :c:macro:`PyUnicode_FromUnicode`: use :c:func:`PyUnicode_FromWideChar` or +* :c:macro:`!PyUnicode_FromUnicode`: use :c:func:`PyUnicode_FromWideChar` or :c:func:`PyUnicode_FromKindAndData` -* :c:macro:`PyUnicode_AS_UNICODE`, :c:func:`PyUnicode_AsUnicode`, - :c:func:`PyUnicode_AsUnicodeAndSize`: use :c:func:`PyUnicode_AsWideCharString` -* :c:macro:`PyUnicode_AS_DATA`: use :c:macro:`PyUnicode_DATA` with +* :c:macro:`!PyUnicode_AS_UNICODE`, :c:func:`!PyUnicode_AsUnicode`, + :c:func:`!PyUnicode_AsUnicodeAndSize`: use :c:func:`PyUnicode_AsWideCharString` +* :c:macro:`!PyUnicode_AS_DATA`: use :c:macro:`PyUnicode_DATA` with :c:macro:`PyUnicode_READ` and :c:macro:`PyUnicode_WRITE` -* :c:macro:`PyUnicode_GET_SIZE`, :c:func:`PyUnicode_GetSize`: use +* :c:macro:`!PyUnicode_GET_SIZE`, :c:func:`!PyUnicode_GetSize`: use :c:macro:`PyUnicode_GET_LENGTH` or :c:func:`PyUnicode_GetLength` -* :c:macro:`PyUnicode_GET_DATA_SIZE`: use +* :c:macro:`!PyUnicode_GET_DATA_SIZE`: use ``PyUnicode_GET_LENGTH(str) * PyUnicode_KIND(str)`` (only work on ready strings) -* :c:func:`PyUnicode_AsUnicodeCopy`: use :c:func:`PyUnicode_AsUCS4Copy` or +* :c:func:`!PyUnicode_AsUnicodeCopy`: use :c:func:`PyUnicode_AsUCS4Copy` or :c:func:`PyUnicode_AsWideCharString` -* :c:func:`PyUnicode_GetMax` +* :c:func:`!PyUnicode_GetMax` Functions and macros manipulating Py_UNICODE* strings: -* :c:macro:`Py_UNICODE_strlen`: use :c:func:`PyUnicode_GetLength` or +* :c:macro:`!Py_UNICODE_strlen()`: use :c:func:`PyUnicode_GetLength` or :c:macro:`PyUnicode_GET_LENGTH` -* :c:macro:`Py_UNICODE_strcat`: use :c:func:`PyUnicode_CopyCharacters` or +* :c:macro:`!Py_UNICODE_strcat()`: use :c:func:`PyUnicode_CopyCharacters` or :c:func:`PyUnicode_FromFormat` -* :c:macro:`Py_UNICODE_strcpy`, :c:macro:`Py_UNICODE_strncpy`, - :c:macro:`Py_UNICODE_COPY`: use :c:func:`PyUnicode_CopyCharacters` or +* :c:macro:`!Py_UNICODE_strcpy()`, :c:macro:`!Py_UNICODE_strncpy()`, + :c:macro:`!Py_UNICODE_COPY()`: use :c:func:`PyUnicode_CopyCharacters` or :c:func:`PyUnicode_Substring` -* :c:macro:`Py_UNICODE_strcmp`: use :c:func:`PyUnicode_Compare` -* :c:macro:`Py_UNICODE_strncmp`: use :c:func:`PyUnicode_Tailmatch` -* :c:macro:`Py_UNICODE_strchr`, :c:macro:`Py_UNICODE_strrchr`: use +* :c:macro:`!Py_UNICODE_strcmp()`: use :c:func:`PyUnicode_Compare` +* :c:macro:`!Py_UNICODE_strncmp()`: use :c:func:`PyUnicode_Tailmatch` +* :c:macro:`!Py_UNICODE_strchr()`, :c:macro:`!Py_UNICODE_strrchr()`: use :c:func:`PyUnicode_FindChar` -* :c:macro:`Py_UNICODE_FILL`: use :c:func:`PyUnicode_Fill` -* :c:macro:`Py_UNICODE_MATCH` +* :c:macro:`!Py_UNICODE_FILL()`: use :c:func:`PyUnicode_Fill` +* :c:macro:`!Py_UNICODE_MATCH` Encoders: -* :c:func:`PyUnicode_Encode`: use :c:func:`PyUnicode_AsEncodedObject` -* :c:func:`PyUnicode_EncodeUTF7` -* :c:func:`PyUnicode_EncodeUTF8`: use :c:func:`PyUnicode_AsUTF8` or +* :c:func:`!PyUnicode_Encode`: use :c:func:`!PyUnicode_AsEncodedObject` +* :c:func:`!PyUnicode_EncodeUTF7` +* :c:func:`!PyUnicode_EncodeUTF8`: use :c:func:`PyUnicode_AsUTF8` or :c:func:`PyUnicode_AsUTF8String` -* :c:func:`PyUnicode_EncodeUTF32` -* :c:func:`PyUnicode_EncodeUTF16` -* :c:func:`PyUnicode_EncodeUnicodeEscape` use +* :c:func:`!PyUnicode_EncodeUTF32` +* :c:func:`!PyUnicode_EncodeUTF16` +* :c:func:`!PyUnicode_EncodeUnicodeEscape` use :c:func:`PyUnicode_AsUnicodeEscapeString` -* :c:func:`PyUnicode_EncodeRawUnicodeEscape` use +* :c:func:`!PyUnicode_EncodeRawUnicodeEscape` use :c:func:`PyUnicode_AsRawUnicodeEscapeString` -* :c:func:`PyUnicode_EncodeLatin1`: use :c:func:`PyUnicode_AsLatin1String` -* :c:func:`PyUnicode_EncodeASCII`: use :c:func:`PyUnicode_AsASCIIString` -* :c:func:`PyUnicode_EncodeCharmap` -* :c:func:`PyUnicode_TranslateCharmap` -* :c:func:`PyUnicode_EncodeMBCS`: use :c:func:`PyUnicode_AsMBCSString` or +* :c:func:`!PyUnicode_EncodeLatin1`: use :c:func:`PyUnicode_AsLatin1String` +* :c:func:`!PyUnicode_EncodeASCII`: use :c:func:`PyUnicode_AsASCIIString` +* :c:func:`!PyUnicode_EncodeCharmap` +* :c:func:`!PyUnicode_TranslateCharmap` +* :c:func:`!PyUnicode_EncodeMBCS`: use :c:func:`PyUnicode_AsMBCSString` or :c:func:`PyUnicode_EncodeCodePage` (with ``CP_ACP`` code_page) -* :c:func:`PyUnicode_EncodeDecimal`, - :c:func:`PyUnicode_TransformDecimalToASCII` +* :c:func:`!PyUnicode_EncodeDecimal`, + :c:func:`!PyUnicode_TransformDecimalToASCII` Deprecated features @@ -2377,19 +2378,19 @@ Porting Python code * :func:`__import__` no longer allows one to use an index value other than 0 for top-level modules. E.g. ``__import__('sys', level=1)`` is now an error. -* Because :attr:`sys.meta_path` and :attr:`sys.path_hooks` now have finders on +* Because :data:`sys.meta_path` and :data:`sys.path_hooks` now have finders on them by default, you will most likely want to use :meth:`list.insert` instead of :meth:`list.append` to add to those lists. -* Because ``None`` is now inserted into :attr:`sys.path_importer_cache`, if you +* Because ``None`` is now inserted into :data:`sys.path_importer_cache`, if you are clearing out entries in the dictionary of paths that do not have a finder, you will need to remove keys paired with values of ``None`` **and** - :class:`imp.NullImporter` to be backwards-compatible. This will lead to extra + :class:`!imp.NullImporter` to be backwards-compatible. This will lead to extra overhead on older versions of Python that re-insert ``None`` into - :attr:`sys.path_importer_cache` where it represents the use of implicit + :data:`sys.path_importer_cache` where it represents the use of implicit finders, but semantically it should not change anything. -* :class:`importlib.abc.Finder` no longer specifies a ``find_module()`` abstract +* :class:`!importlib.abc.Finder` no longer specifies a ``find_module()`` abstract method that must be implemented. If you were relying on subclasses to implement that method, make sure to check for the method's existence first. You will probably want to check for ``find_loader()`` first, though, in the @@ -2444,7 +2445,7 @@ Porting Python code error instead of sleeping forever. It has always raised an error on posix. * The ``ast.__version__`` constant has been removed. If you need to - make decisions affected by the AST version, use :attr:`sys.version_info` + make decisions affected by the AST version, use :data:`sys.version_info` to make the decision. * Code that used to work around the fact that the :mod:`threading` module used @@ -2461,7 +2462,7 @@ Porting C code -------------- * In the course of changes to the buffer API the undocumented - :c:member:`~Py_buffer.smalltable` member of the + :c:member:`!smalltable` member of the :c:type:`Py_buffer` structure has been removed and the layout of the :c:type:`PyMemoryViewObject` has changed. diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst index b7bb505a8184828..2ddab76814369e6 100644 --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -605,15 +605,15 @@ Using ``ABC`` as a base class has essentially the same effect as specifying aifc ---- -The :meth:`~aifc.aifc.getparams` method now returns a namedtuple rather than a +The :meth:`~!aifc.aifc.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`17818`.) -:func:`aifc.open` now supports the context management protocol: when used in a -:keyword:`with` block, the :meth:`~aifc.aifc.close` method of the returned +:func:`!aifc.open` now supports the context management protocol: when used in a +:keyword:`with` block, the :meth:`~!aifc.aifc.close` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchacha in :issue:`16486`.) -The :meth:`~aifc.aifc.writeframesraw` and :meth:`~aifc.aifc.writeframes` +The :meth:`~!aifc.aifc.writeframesraw` and :meth:`~!aifc.aifc.writeframes` methods now accept any :term:`bytes-like object`. (Contributed by Serhiy Storchaka in :issue:`8311`.) @@ -629,14 +629,14 @@ by Lucas Maystre in :issue:`11175`.) audioop ------- -:mod:`audioop` now supports 24-bit samples. (Contributed by Serhiy Storchaka +:mod:`!audioop` now supports 24-bit samples. (Contributed by Serhiy Storchaka in :issue:`12866`.) -New :func:`~audioop.byteswap` function converts big-endian samples to +New :func:`~!audioop.byteswap` function converts big-endian samples to little-endian and vice versa. (Contributed by Serhiy Storchaka in :issue:`19641`.) -All :mod:`audioop` functions now accept any :term:`bytes-like object`. Strings +All :mod:`!audioop` functions now accept any :term:`bytes-like object`. Strings are not accepted: they didn't work before, now they raise an error right away. (Contributed by Serhiy Storchaka in :issue:`16685`.) @@ -775,7 +775,7 @@ of a given opcode and argument, information that is not otherwise available. doctest ------- -A new :ref:`option flag `, :data:`~doctest.FAIL_FAST`, halts +A new :ref:`option flag `, :const:`~doctest.FAIL_FAST`, halts test running as soon as the first failure is detected. (Contributed by R. David Murray and Daniel Urban in :issue:`16522`.) @@ -841,7 +841,7 @@ for example, if the file might have been changed and re-checked in less time than the resolution of a particular filesystem's file modification time field. (Contributed by Mark Levitt in :issue:`18149`.) -New module attribute :data:`~filecmp.DEFAULT_IGNORES` provides the list of +New module attribute :const:`~filecmp.DEFAULT_IGNORES` provides the list of directories that are used as the default value for the *ignore* parameter of the :func:`~filecmp.dircmp` function. (Contributed by Eli Bendersky in :issue:`15442`.) @@ -991,18 +991,18 @@ for the :meth:`~importlib.abc.InspectLoader.get_code` method. However, it will normally be desirable to override the default implementation for performance reasons. (Contributed by Brett Cannon in :issue:`18072`.) -The :func:`~importlib.reload` function has been moved from :mod:`imp` to -:mod:`importlib` as part of the :mod:`imp` module deprecation. (Contributed by +The :func:`~importlib.reload` function has been moved from :mod:`!imp` to +:mod:`importlib` as part of the :mod:`!imp` module deprecation. (Contributed by Berker Peksag in :issue:`18193`.) :mod:`importlib.util` now has a :data:`~importlib.util.MAGIC_NUMBER` attribute providing access to the bytecode version number. This replaces the -:func:`~imp.get_magic` function in the deprecated :mod:`imp` module. +:func:`!get_magic` function in the deprecated :mod:`!imp` module. (Contributed by Brett Cannon in :issue:`18192`.) New :mod:`importlib.util` functions :func:`~importlib.util.cache_from_source` and :func:`~importlib.util.source_from_cache` replace the same-named functions -in the deprecated :mod:`imp` module. (Contributed by Brett Cannon in +in the deprecated :mod:`!imp` module. (Contributed by Brett Cannon in :issue:`18194`.) The :mod:`importlib` bootstrap :class:`.NamespaceLoader` now conforms to @@ -1189,7 +1189,7 @@ Windows). (Contributed by Brian Curtin in :issue:`11939`.) root on Windows. (Contributed by Tim Golden in :issue:`9035`.) :func:`os.open` supports two new flags on platforms that provide them, -:data:`~os.O_PATH` (un-opened file descriptor), and :data:`~os.O_TMPFILE` +:const:`~os.O_PATH` (un-opened file descriptor), and :const:`~os.O_TMPFILE` (unnamed temporary file; as of 3.4.0 release available only on Linux systems with a kernel version of 3.11 or newer that have uapi headers). (Contributed by Christian Heimes in :issue:`18673` and Benjamin Peterson, respectively.) @@ -1238,8 +1238,8 @@ plistlib stdlib serialization protocols, with new :func:`~plistlib.load`, :func:`~plistlib.dump`, :func:`~plistlib.loads`, and :func:`~plistlib.dumps` functions. (The older API is now deprecated.) In addition to the already -supported XML plist format (:data:`~plistlib.FMT_XML`), it also now supports -the binary plist format (:data:`~plistlib.FMT_BINARY`). (Contributed by Ronald +supported XML plist format (:const:`~plistlib.FMT_XML`), it also now supports +the binary plist format (:const:`~plistlib.FMT_BINARY`). (Contributed by Ronald Oussoren and others in :issue:`14455`.) @@ -1323,14 +1323,14 @@ ability to query or set the resource limits for processes other than the one making the call. (Contributed by Christian Heimes in :issue:`16595`.) On Linux kernel version 2.6.36 or later, there are also some new -Linux specific constants: :attr:`~resource.RLIMIT_MSGQUEUE`, -:attr:`~resource.RLIMIT_NICE`, :attr:`~resource.RLIMIT_RTPRIO`, -:attr:`~resource.RLIMIT_RTTIME`, and :attr:`~resource.RLIMIT_SIGPENDING`. +Linux specific constants: :const:`~resource.RLIMIT_MSGQUEUE`, +:const:`~resource.RLIMIT_NICE`, :const:`~resource.RLIMIT_RTPRIO`, +:const:`~resource.RLIMIT_RTTIME`, and :const:`~resource.RLIMIT_SIGPENDING`. (Contributed by Christian Heimes in :issue:`19324`.) On FreeBSD version 9 and later, there some new FreeBSD specific constants: -:attr:`~resource.RLIMIT_SBSIZE`, :attr:`~resource.RLIMIT_SWAP`, and -:attr:`~resource.RLIMIT_NPTS`. (Contributed by Claudiu Popa in +:const:`~resource.RLIMIT_SBSIZE`, :const:`~resource.RLIMIT_SWAP`, and +:const:`~resource.RLIMIT_NPTS`. (Contributed by Claudiu Popa in :issue:`19343`.) @@ -1388,7 +1388,7 @@ try/except statement by code that only cares whether or not an error occurred. socket ------ -The socket module now supports the :data:`~socket.CAN_BCM` protocol on +The socket module now supports the :const:`~socket.CAN_BCM` protocol on platforms that support it. (Contributed by Brian Thorne in :issue:`15359`.) Socket objects have new methods to get or set their :ref:`inheritable flag @@ -1399,7 +1399,7 @@ The ``socket.AF_*`` and ``socket.SOCK_*`` constants are now enumeration values using the new :mod:`enum` module. This allows meaningful names to be printed during debugging, instead of integer "magic numbers". -The :data:`~socket.AF_LINK` constant is now available on BSD and OSX. +The :const:`~socket.AF_LINK` constant is now available on BSD and OSX. :func:`~socket.inet_pton` and :func:`~socket.inet_ntop` are now supported on Windows. (Contributed by Atsuo Ishimoto in :issue:`7171`.) @@ -1460,8 +1460,8 @@ Heimes in :issue:`18147`.) If OpenSSL 0.9.8 or later is available, :class:`~ssl.SSLContext` has a new attribute :attr:`~ssl.SSLContext.verify_flags` that can be used to control the certificate verification process by setting it to some combination of the new -constants :data:`~ssl.VERIFY_DEFAULT`, :data:`~ssl.VERIFY_CRL_CHECK_LEAF`, -:data:`~ssl.VERIFY_CRL_CHECK_CHAIN`, or :data:`~ssl.VERIFY_X509_STRICT`. +constants :const:`~ssl.VERIFY_DEFAULT`, :const:`~ssl.VERIFY_CRL_CHECK_LEAF`, +:const:`~ssl.VERIFY_CRL_CHECK_CHAIN`, or :const:`~ssl.VERIFY_X509_STRICT`. OpenSSL does not do any CRL verification by default. (Contributed by Christien Heimes in :issue:`8813`.) @@ -1500,7 +1500,7 @@ implementation is required as most of the values aren't standardized and are platform-dependent. (Contributed by Christian Heimes in :issue:`11016`.) The module supports new :mod:`~stat.ST_MODE` flags, :mod:`~stat.S_IFDOOR`, -:attr:`~stat.S_IFPORT`, and :attr:`~stat.S_IFWHT`. (Contributed by +:const:`~stat.S_IFPORT`, and :const:`~stat.S_IFWHT`. (Contributed by Christian Hiemes in :issue:`11016`.) @@ -1528,10 +1528,10 @@ work on Windows. This change was actually inadvertently made in 3.3.4. sunau ----- -The :meth:`~sunau.getparams` method now returns a namedtuple rather than a +The :meth:`~!sunau.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`18901`.) -:meth:`sunau.open` now supports the context management protocol: when used in a +:meth:`!sunau.open` now supports the context management protocol: when used in a :keyword:`with` block, the ``close`` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchaka in :issue:`18878`.) @@ -1540,8 +1540,8 @@ in :issue:`18878`.) support for writing 24 sample using the module. (Contributed by Serhiy Storchaka in :issue:`19261`.) -The :meth:`~sunau.AU_write.writeframesraw` and -:meth:`~sunau.AU_write.writeframes` methods now accept any :term:`bytes-like +The :meth:`~!sunau.AU_write.writeframesraw` and +:meth:`~!sunau.AU_write.writeframes` methods now accept any :term:`bytes-like object`. (Contributed by Serhiy Storchaka in :issue:`8311`.) @@ -1849,7 +1849,7 @@ Python's default implementation to a SipHash implementation on platforms that have a 64 bit data type. Any performance differences in comparison with the older FNV algorithm are trivial. -The PEP adds additional fields to the :attr:`sys.hash_info` named tuple to +The PEP adds additional fields to the :data:`sys.hash_info` named tuple to describe the hash algorithm in use by the currently executing binary. Otherwise, the PEP does not alter any existing CPython APIs. @@ -1891,7 +1891,7 @@ Other Build and C API Changes allowing retrieval of function pointers from named type slots when using the limited API. (Contributed by Martin von Löwis in :issue:`17162`.) -* The new :c:func:`Py_SetStandardStreamEncoding` pre-initialization API +* The new :c:func:`!Py_SetStandardStreamEncoding` pre-initialization API allows applications embedding the CPython interpreter to reliably force a particular encoding and error handler for the standard streams. (Contributed by Bastien Montagne and Nick Coghlan in :issue:`16129`.) @@ -2077,31 +2077,31 @@ Deprecations in the Python API ------------------------------ * As mentioned in :ref:`whatsnew-pep-451`, a number of :mod:`importlib` - methods and functions are deprecated: :meth:`importlib.find_loader` is + methods and functions are deprecated: :meth:`!importlib.find_loader` is replaced by :func:`importlib.util.find_spec`; - :meth:`importlib.machinery.PathFinder.find_module` is replaced by + :meth:`!importlib.machinery.PathFinder.find_module` is replaced by :meth:`importlib.machinery.PathFinder.find_spec`; - :meth:`importlib.abc.MetaPathFinder.find_module` is replaced by + :meth:`!importlib.abc.MetaPathFinder.find_module` is replaced by :meth:`importlib.abc.MetaPathFinder.find_spec`; - :meth:`importlib.abc.PathEntryFinder.find_loader` and - :meth:`~importlib.abc.PathEntryFinder.find_module` are replaced by - :meth:`importlib.abc.PathEntryFinder.find_spec`; all of the ``xxxLoader`` ABC - ``load_module`` methods (:meth:`importlib.abc.Loader.load_module`, - :meth:`importlib.abc.InspectLoader.load_module`, - :meth:`importlib.abc.FileLoader.load_module`, - :meth:`importlib.abc.SourceLoader.load_module`) should no longer be + :meth:`!importlib.abc.PathEntryFinder.find_loader` and + :meth:`!find_module` are replaced by + :meth:`importlib.abc.PathEntryFinder.find_spec`; all of the :samp:`{xxx}Loader` ABC + ``load_module`` methods (:meth:`!importlib.abc.Loader.load_module`, + :meth:`!importlib.abc.InspectLoader.load_module`, + :meth:`!importlib.abc.FileLoader.load_module`, + :meth:`!importlib.abc.SourceLoader.load_module`) should no longer be implemented, instead loaders should implement an ``exec_module`` method (:meth:`importlib.abc.Loader.exec_module`, :meth:`importlib.abc.InspectLoader.exec_module` :meth:`importlib.abc.SourceLoader.exec_module`) and let the import system take care of the rest; and - :meth:`importlib.abc.Loader.module_repr`, - :meth:`importlib.util.module_for_loader`, :meth:`importlib.util.set_loader`, - and :meth:`importlib.util.set_package` are no longer needed because their + :meth:`!importlib.abc.Loader.module_repr`, + :meth:`!importlib.util.module_for_loader`, :meth:`!importlib.util.set_loader`, + and :meth:`!importlib.util.set_package` are no longer needed because their functions are now handled automatically by the import system. -* The :mod:`imp` module is pending deprecation. To keep compatibility with +* The :mod:`!imp` module is pending deprecation. To keep compatibility with Python 2/3 code bases, the module's removal is currently not scheduled. * The :mod:`formatter` module is pending deprecation and is slated for removal @@ -2277,7 +2277,7 @@ Changes in the Python API in a backwards-compatible fashion, use e.g. ``getattr(module, '__loader__', None) is not None``. (:issue:`17115`.) -* :meth:`importlib.util.module_for_loader` now sets ``__loader__`` and +* :meth:`!importlib.util.module_for_loader` now sets ``__loader__`` and ``__package__`` unconditionally to properly support reloading. If this is not desired then you will need to set these attributes manually. You can use :func:`importlib.util.module_to_load` for module management. @@ -2300,7 +2300,7 @@ Changes in the Python API then you can see if the module's ``__spec__.location`` is set to ``'frozen'``, check if the loader is a subclass of :class:`importlib.machinery.FrozenImporter`, - or if Python 2 compatibility is necessary you can use :func:`imp.is_frozen`. + or if Python 2 compatibility is necessary you can use :func:`!imp.is_frozen`. * :func:`py_compile.compile` now raises :exc:`FileExistsError` if the file path it would write to is a symlink or a non-regular file. This is to act as a @@ -2371,11 +2371,11 @@ Changes in the Python API 3.3.3. * The :attr:`~cgi.FieldStorage.file` attribute is now automatically closed when - the creating :class:`cgi.FieldStorage` instance is garbage collected. If you - were pulling the file object out separately from the :class:`cgi.FieldStorage` + the creating :class:`!cgi.FieldStorage` instance is garbage collected. If you + were pulling the file object out separately from the :class:`!cgi.FieldStorage` instance and not keeping the instance alive, then you should either store the - entire :class:`cgi.FieldStorage` instance or read the contents of the file - before the :class:`cgi.FieldStorage` instance is garbage collected. + entire :class:`!cgi.FieldStorage` instance or read the contents of the file + before the :class:`!cgi.FieldStorage` instance is garbage collected. * Calling ``read`` or ``write`` on a closed SSL socket now raises an informative :exc:`ValueError` rather than the previous more mysterious @@ -2434,7 +2434,7 @@ Changes in the Python API matched the input type, so if your code was depending on the return value being, say, a ``bytearray``, you will need to change your code. -* :mod:`audioop` functions now raise an error immediately if passed string +* :mod:`!audioop` functions now raise an error immediately if passed string input, instead of failing randomly later on (:issue:`16685`). * The new *convert_charrefs* argument to :class:`~html.parser.HTMLParser` diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst index f872579ef546f59..ae6affcab664c60 100644 --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -425,7 +425,7 @@ are declared in the annotations:: While these annotations are available at runtime through the usual :attr:`__annotations__` attribute, *no automatic type checking happens at runtime*. Instead, it is assumed that a separate off-line type checker -(e.g. `mypy `_) will be used for on-demand +(e.g. `mypy `_) will be used for on-demand source code analysis. The type system supports unions, generic types, and a special type @@ -478,7 +478,7 @@ not make an additional system call:: PEP 475: Retry system calls failing with EINTR ---------------------------------------------- -An :py:data:`errno.EINTR` error code is returned whenever a system call, that +An :py:const:`errno.EINTR` error code is returned whenever a system call, that is waiting for I/O, is interrupted by a signal. Previously, Python would raise :exc:`InterruptedError` in such cases. This meant that, when writing a Python application, the developer had two choices: @@ -527,7 +527,7 @@ by a signal: :func:`~os.writev`; * special cases: :func:`os.close` and :func:`os.dup2` now ignore - :py:data:`~errno.EINTR` errors; the syscall is not retried (see the PEP + :py:const:`~errno.EINTR` errors; the syscall is not retried (see the PEP for the rationale); * :mod:`select` functions: :func:`devpoll.poll() `, @@ -921,7 +921,7 @@ and improves their substitutability for lists. Docstrings produced by :func:`~collections.namedtuple` can now be updated:: Point = namedtuple('Point', ['x', 'y']) - Point.__doc__ += ': Cartesian coodinate' + Point.__doc__ += ': Cartesian coordinate' Point.x.__doc__ = 'abscissa' Point.y.__doc__ = 'ordinate' @@ -1045,8 +1045,8 @@ not just sequences. (Contributed by Serhiy Storchaka in :issue:`23171`.) curses ------ -The new :func:`~curses.update_lines_cols` function updates the :envvar:`LINES` -and :envvar:`COLS` environment variables. This is useful for detecting +The new :func:`~curses.update_lines_cols` function updates the :data:`LINES` +and :data:`COLS` module variables. This is useful for detecting manual screen resizing. (Contributed by Arnon Yaari in :issue:`4254`.) @@ -1252,7 +1252,7 @@ Oberkirch in :issue:`21800`.) imghdr ------ -The :func:`~imghdr.what` function now recognizes the +The :func:`~!imghdr.what` function now recognizes the `OpenEXR `_ format (contributed by Martin Vignali and Claudiu Popa in :issue:`20295`), and the `WebP `_ format @@ -1498,7 +1498,7 @@ use ``/dev/urandom`` and avoiding failures due to potential file descriptor exhaustion. (Contributed by Victor Stinner in :issue:`22181`.) New :func:`~os.get_blocking` and :func:`~os.set_blocking` functions allow -getting and setting a file descriptor's blocking mode (:data:`~os.O_NONBLOCK`.) +getting and setting a file descriptor's blocking mode (:const:`~os.O_NONBLOCK`.) (Contributed by Victor Stinner in :issue:`22054`.) The :func:`~os.truncate` and :func:`~os.ftruncate` functions are now supported @@ -1783,7 +1783,7 @@ the TLS handshake. The new :meth:`SSLSocket.selected_alpn_protocol() ` returns the protocol that was selected during the TLS handshake. -The :data:`~ssl.HAS_ALPN` flag indicates whether ALPN support is present. +The :const:`~ssl.HAS_ALPN` flag indicates whether ALPN support is present. Other Changes @@ -2095,7 +2095,7 @@ accepts ``"x"`` to request exclusive creation. Other module-level changes ========================== -Many functions in the :mod:`mmap`, :mod:`ossaudiodev`, :mod:`socket`, +Many functions in the :mod:`mmap`, :mod:`!ossaudiodev`, :mod:`socket`, :mod:`ssl`, and :mod:`codecs` modules now accept writable :term:`bytes-like objects `. (Contributed by Serhiy Storchaka in :issue:`23001`.) @@ -2192,7 +2192,7 @@ encode error with ``\N{...}`` escapes. (Contributed by Serhiy Storchaka in :issue:`19676`.) A new :c:func:`PyErr_FormatV` function similar to :c:func:`PyErr_Format`, -but accepts a ``va_list`` argument. +but accepts a :c:type:`va_list` argument. (Contributed by Antoine Pitrou in :issue:`18711`.) A new :c:data:`PyExc_RecursionError` exception. @@ -2212,7 +2212,7 @@ for details.) The :c:member:`PyTypeObject.tp_finalize` slot is now part of the stable ABI. Windows builds now require Microsoft Visual C++ 14.0, which -is available as part of `Visual Studio 2015 `_. +is available as part of `Visual Studio 2015 `_. Extension modules now include a platform information tag in their filename on some platforms (the tag is optional, and CPython will import extensions without @@ -2476,7 +2476,7 @@ Changes in the Python API in Python 3.5, all old ``.pyo`` files from previous versions of Python are invalid regardless of this PEP. -* The :mod:`socket` module now exports the :data:`~socket.CAN_RAW_FD_FRAMES` +* The :mod:`socket` module now exports the :const:`~socket.CAN_RAW_FD_FRAMES` constant on linux 3.6 and greater. * The :func:`ssl.cert_time_to_seconds` function now interprets the input time @@ -2512,7 +2512,7 @@ Changes in the Python API Changes in the C API -------------------- -* The undocumented :c:member:`~PyMemoryViewObject.format` member of the +* The undocumented :c:member:`!format` member of the (non-public) :c:type:`PyMemoryViewObject` structure has been removed. All extensions relying on the relevant parts in ``memoryobject.h`` must be rebuilt. @@ -2520,7 +2520,7 @@ Changes in the C API * The :c:type:`PyMemAllocator` structure was renamed to :c:type:`PyMemAllocatorEx` and a new ``calloc`` field was added. -* Removed non-documented macro :c:macro:`PyObject_REPR` which leaked references. +* Removed non-documented macro :c:macro:`!PyObject_REPR()` which leaked references. Use format character ``%R`` in :c:func:`PyUnicode_FromFormat`-like functions to format the :func:`repr` of the object. (Contributed by Serhiy Storchaka in :issue:`22453`.) @@ -2533,7 +2533,7 @@ Changes in the C API * As part of the :pep:`492` implementation, the ``tp_reserved`` slot of :c:type:`PyTypeObject` was replaced with a - :c:member:`tp_as_async` slot. Refer to :ref:`coro-objects` for + :c:member:`~PyTypeObject.tp_as_async` slot. Refer to :ref:`coro-objects` for new types, structures and functions. diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst index e4294c88b585721..c15d8be651fd176 100644 --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -238,7 +238,7 @@ and the ``__annotations__`` attribute. and Guido van Rossum. Implemented by Ivan Levkivskyi. Tools that use or will use the new syntax: - `mypy `_, + `mypy `_, `pytype `_, PyCharm, etc. @@ -650,8 +650,8 @@ compiled in release mode using ``PYTHONMALLOC=debug``. Effects of debug hooks: * Detect writes before the start of a buffer (buffer underflows) * Detect writes after the end of a buffer (buffer overflows) * Check that the :term:`GIL ` is held when allocator - functions of :c:data:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) and - :c:data:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) domains are called. + functions of :c:macro:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) and + :c:macro:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) domains are called. Checking if the GIL is held is also a new feature of Python 3.6. @@ -1388,7 +1388,7 @@ are treated as punctuation. site ---- -When specifying paths to add to :attr:`sys.path` in a ``.pth`` file, +When specifying paths to add to :data:`sys.path` in a ``.pth`` file, you may now specify file paths on top of directories (e.g. zip files). (Contributed by Wolfgang Langner in :issue:`26587`). @@ -1404,7 +1404,7 @@ socket ------ The :func:`~socket.socket.ioctl` function now supports the -:data:`~socket.SIO_LOOPBACK_FAST_PATH` control code. +:const:`~socket.SIO_LOOPBACK_FAST_PATH` control code. (Contributed by Daniel Stokes in :issue:`26536`.) The :meth:`~socket.socket.getsockopt` constants ``SO_DOMAIN``, @@ -1416,7 +1416,7 @@ The :meth:`~socket.socket.setsockopt` now supports the (Contributed by Christian Heimes in :issue:`27744`.) The socket module now supports the address family -:data:`~socket.AF_ALG` to interface with Linux Kernel crypto API. ``ALG_*``, +:const:`~socket.AF_ALG` to interface with Linux Kernel crypto API. ``ALG_*``, ``SOL_ALG`` and :meth:`~socket.socket.sendmsg_afalg` were added. (Contributed by Christian Heimes in :issue:`27744` with support from Victor Stinner.) @@ -1520,7 +1520,7 @@ rather than the version that is being emulated for the process telnetlib --------- -:class:`~telnetlib.Telnet` is now a context manager (contributed by +:class:`!telnetlib.Telnet` is now a context manager (contributed by Stéphane Wirtel in :issue:`25485`). @@ -1822,7 +1822,7 @@ Optimizations up to 80% faster. (Contributed by Josh Snider in :issue:`26574`). * Allocator functions of the :c:func:`PyMem_Malloc` domain - (:c:data:`PYMEM_DOMAIN_MEM`) now use the :ref:`pymalloc memory allocator + (:c:macro:`PYMEM_DOMAIN_MEM`) now use the :ref:`pymalloc memory allocator ` instead of :c:func:`malloc` function of the C library. The pymalloc allocator is optimized for objects smaller or equal to 512 bytes with a short lifetime, and use :c:func:`malloc` for larger memory blocks. @@ -1874,8 +1874,8 @@ Build and C API Changes (Original patch by Alecsandru Patrascu of Intel in :issue:`26359`.) * The :term:`GIL ` must now be held when allocator - functions of :c:data:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) and - :c:data:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) domains are called. + functions of :c:macro:`PYMEM_DOMAIN_OBJ` (ex: :c:func:`PyObject_Malloc`) and + :c:macro:`PYMEM_DOMAIN_MEM` (ex: :c:func:`PyMem_Malloc`) domains are called. * New :c:func:`Py_FinalizeEx` API which indicates if flushing buffered data failed. @@ -2010,7 +2010,7 @@ been deprecated in previous versions of Python in favour of :meth:`importlib.abc.Loader.exec_module`. The :class:`importlib.machinery.WindowsRegistryFinder` class is now -deprecated. As of 3.6.0, it is still added to :attr:`sys.meta_path` by +deprecated. As of 3.6.0, it is still added to :data:`sys.meta_path` by default (on Windows), but this may change in future releases. os @@ -2049,7 +2049,7 @@ of OpenSSL. Other features are deprecated in favor of a different API. tkinter ~~~~~~~ -The :mod:`tkinter.tix` module is now deprecated. :mod:`tkinter` users +The :mod:`!tkinter.tix` module is now deprecated. :mod:`tkinter` users should use :mod:`tkinter.ttk` instead. .. _whatsnew36-venv: @@ -2066,9 +2066,9 @@ environment. (Contributed by Brett Cannon in :issue:`25154`.) Deprecated functions and types of the C API ------------------------------------------- -Undocumented functions :c:func:`PyUnicode_AsEncodedObject`, -:c:func:`PyUnicode_AsDecodedObject`, :c:func:`PyUnicode_AsEncodedUnicode` -and :c:func:`PyUnicode_AsDecodedUnicode` are deprecated now. +Undocumented functions :c:func:`!PyUnicode_AsEncodedObject`, +:c:func:`!PyUnicode_AsDecodedObject`, :c:func:`!PyUnicode_AsEncodedUnicode` +and :c:func:`!PyUnicode_AsDecodedUnicode` are deprecated now. Use the :ref:`generic codec based API ` instead. @@ -2180,12 +2180,12 @@ Changes in the Python API now raises :exc:`ValueError` for out-of-range values, rather than returning :const:`None`. See :issue:`20059`. -* The :mod:`imp` module now raises a :exc:`DeprecationWarning` instead of +* The :mod:`!imp` module now raises a :exc:`DeprecationWarning` instead of :exc:`PendingDeprecationWarning`. * The following modules have had missing APIs added to their :attr:`__all__` attributes to match the documented APIs: - :mod:`calendar`, :mod:`cgi`, :mod:`csv`, + :mod:`calendar`, :mod:`!cgi`, :mod:`csv`, :mod:`~xml.etree.ElementTree`, :mod:`enum`, :mod:`fileinput`, :mod:`ftplib`, :mod:`logging`, :mod:`mailbox`, :mod:`mimetypes`, :mod:`optparse`, :mod:`plistlib`, :mod:`smtpd`, @@ -2211,7 +2211,7 @@ Changes in the Python API the exception will stop a single-threaded server. (Contributed by Martin Panter in :issue:`23430`.) -* :func:`spwd.getspnam` now raises a :exc:`PermissionError` instead of +* :func:`!spwd.getspnam` now raises a :exc:`PermissionError` instead of :exc:`KeyError` if the user doesn't have privileges. * The :meth:`socket.socket.close` method now raises an exception if @@ -2274,7 +2274,7 @@ Changes in the Python API :class:`~collections.OrderedDict`. (Contributed by Steve Holden in :issue:`27842`.) -* The :const:`crypt.METHOD_CRYPT` will no longer be added to ``crypt.methods`` +* The :const:`!crypt.METHOD_CRYPT` will no longer be added to ``crypt.methods`` if unsupported by the platform. (Contributed by Victor Stinner in :issue:`25287`.) @@ -2455,7 +2455,7 @@ query parameter separators in :func:`urllib.parse.parse_qs` and :func:`urllib.parse.parse_qsl`. Due to security concerns, and to conform with newer W3C recommendations, this has been changed to allow only a single separator key, with ``&`` as the default. This change also affects -:func:`cgi.parse` and :func:`cgi.parse_multipart` as they use the affected +:func:`!cgi.parse` and :func:`!cgi.parse_multipart` as they use the affected functions internally. For more details, please see their respective documentation. (Contributed by Adam Goldschmidt, Senthil Kumaran and Ken Jin in :issue:`42967`.) diff --git a/Doc/whatsnew/3.7.rst b/Doc/whatsnew/3.7.rst index df3b636cb9ec466..a7d5c3db6ddcb2b 100644 --- a/Doc/whatsnew/3.7.rst +++ b/Doc/whatsnew/3.7.rst @@ -848,10 +848,10 @@ alternative to script path. (Contributed by Sanyam Khurana in :issue:`21862`.) crypt ----- -The :mod:`crypt` module now supports the Blowfish hashing method. +The :mod:`!crypt` module now supports the Blowfish hashing method. (Contributed by Serhiy Storchaka in :issue:`31664`.) -The :func:`~crypt.mksalt` function now allows specifying the number of rounds +The :func:`~!crypt.mksalt` function now allows specifying the number of rounds for hashing. (Contributed by Serhiy Storchaka in :issue:`31702`.) @@ -1133,7 +1133,7 @@ The MIME type of .bmp has been changed from ``'image/x-ms-bmp'`` to msilib ------ -The new :meth:`Database.Close() ` method can be used +The new :meth:`!Database.Close()` method can be used to close the :abbr:`MSI` database. (Contributed by Berker Peksag in :issue:`20486`.) @@ -1280,13 +1280,13 @@ This function should be used instead of :func:`os.close` for better compatibility across platforms. (Contributed by Christian Heimes in :issue:`32454`.) -The :mod:`socket` module now exposes the :data:`socket.TCP_CONGESTION` -(Linux 2.6.13), :data:`socket.TCP_USER_TIMEOUT` (Linux 2.6.37), and -:data:`socket.TCP_NOTSENT_LOWAT` (Linux 3.12) constants. +The :mod:`socket` module now exposes the :const:`socket.TCP_CONGESTION` +(Linux 2.6.13), :const:`socket.TCP_USER_TIMEOUT` (Linux 2.6.37), and +:const:`socket.TCP_NOTSENT_LOWAT` (Linux 3.12) constants. (Contributed by Omar Sandoval in :issue:`26273` and Nathaniel J. Smith in :issue:`29728`.) -Support for :data:`socket.AF_VSOCK` sockets has been added to allow +Support for :const:`socket.AF_VSOCK` sockets has been added to allow communication between virtual machines and their hosts. (Contributed by Cathy Avery in :issue:`27584`.) @@ -1394,7 +1394,7 @@ subprocess The :func:`subprocess.run` function accepts the new *capture_output* keyword argument. When true, stdout and stderr will be captured. -This is equivalent to passing :data:`subprocess.PIPE` as *stdout* and +This is equivalent to passing :const:`subprocess.PIPE` as *stdout* and *stderr* arguments. (Contributed by Bo Bayles in :issue:`32102`.) @@ -1453,12 +1453,12 @@ time New clock identifiers have been added: -* :data:`time.CLOCK_BOOTTIME` (Linux): Identical to - :data:`time.CLOCK_MONOTONIC`, except it also includes any time that the +* :const:`time.CLOCK_BOOTTIME` (Linux): Identical to + :const:`time.CLOCK_MONOTONIC`, except it also includes any time that the system is suspended. -* :data:`time.CLOCK_PROF` (FreeBSD, NetBSD and OpenBSD): High-resolution +* :const:`time.CLOCK_PROF` (FreeBSD, NetBSD and OpenBSD): High-resolution per-process CPU timer. -* :data:`time.CLOCK_UPTIME` (FreeBSD, OpenBSD): Time whose absolute value is +* :const:`time.CLOCK_UPTIME` (FreeBSD, OpenBSD): Time whose absolute value is the time the system has been running and not suspended, providing accurate uptime measurement. @@ -1547,7 +1547,7 @@ adding ``~`` to the set of characters that are never quoted by default. uu -- -The :func:`uu.encode` function now accepts an optional *backtick* +The :func:`!uu.encode` function now accepts an optional *backtick* keyword argument. When it's true, zeros are represented by ``'`'`` instead of spaces. (Contributed by Xiang Zhang in :issue:`30103`.) @@ -1580,13 +1580,13 @@ The initialization of the default warnings filters has changed as follows: * warnings filters enabled via the command line or the environment now have the following order of precedence: - * the ``BytesWarning`` filter for :option:`-b` (or ``-bb``) - * any filters specified with the :option:`-W` option - * any filters specified with the :envvar:`PYTHONWARNINGS` environment - variable - * any other CPython specific filters (e.g. the ``default`` filter added - for the new ``-X dev`` mode) - * any implicit filters defined directly by the warnings machinery + * the ``BytesWarning`` filter for :option:`-b` (or ``-bb``) + * any filters specified with the :option:`-W` option + * any filters specified with the :envvar:`PYTHONWARNINGS` environment + variable + * any other CPython specific filters (e.g. the ``default`` filter added + for the new ``-X dev`` mode) + * any implicit filters defined directly by the warnings machinery * in :ref:`CPython debug builds `, all warnings are now displayed by default (the implicit filter list is empty) @@ -1674,10 +1674,10 @@ The new :c:func:`import__find__load__start` and module imports. (Contributed by Christian Heimes in :issue:`31574`.) -The fields :c:member:`name` and :c:member:`doc` of structures +The fields :c:member:`!name` and :c:member:`!doc` of structures :c:type:`PyMemberDef`, :c:type:`PyGetSetDef`, :c:type:`PyStructSequence_Field`, :c:type:`PyStructSequence_Desc`, -and :c:type:`wrapperbase` are now of type ``const char *`` rather of +and :c:struct:`wrapperbase` are now of type ``const char *`` rather of ``char *``. (Contributed by Serhiy Storchaka in :issue:`28761`.) The result of :c:func:`PyUnicode_AsUTF8AndSize` and :c:func:`PyUnicode_AsUTF8` @@ -1940,8 +1940,8 @@ Deprecated Python modules, functions and methods aifc ---- -:func:`aifc.openfp` has been deprecated and will be removed in Python 3.9. -Use :func:`aifc.open` instead. +:func:`!aifc.openfp` has been deprecated and will be removed in Python 3.9. +Use :func:`!aifc.open` instead. (Contributed by Brian Curtin in :issue:`31985`.) @@ -2004,11 +2004,11 @@ importlib --------- Methods -:meth:`MetaPathFinder.find_module() ` +:meth:`MetaPathFinder.find_module() ` (replaced by :meth:`MetaPathFinder.find_spec() `) and -:meth:`PathEntryFinder.find_loader() ` +:meth:`PathEntryFinder.find_loader() ` (replaced by :meth:`PathEntryFinder.find_spec() `) both deprecated in Python 3.4 now emit :exc:`DeprecationWarning`. @@ -2061,8 +2061,8 @@ ssl sunau ----- -:func:`sunau.openfp` has been deprecated and will be removed in Python 3.9. -Use :func:`sunau.open` instead. +:func:`!sunau.openfp` has been deprecated and will be removed in Python 3.9. +Use :func:`!sunau.open` instead. (Contributed by Brian Curtin in :issue:`31985`.) @@ -2495,12 +2495,12 @@ either in embedding applications, or in CPython itself. :issue:`22257`, and further updated by Nick, Eric, and Victor Stinner in a number of other issues). Some known details affected: -* :c:func:`PySys_AddWarnOptionUnicode` is not currently usable by embedding +* :c:func:`!PySys_AddWarnOptionUnicode` is not currently usable by embedding applications due to the requirement to create a Unicode object prior to - calling ``Py_Initialize``. Use :c:func:`PySys_AddWarnOption` instead. + calling ``Py_Initialize``. Use :c:func:`!PySys_AddWarnOption` instead. * warnings filters added by an embedding application with - :c:func:`PySys_AddWarnOption` should now more consistently take precedence + :c:func:`!PySys_AddWarnOption` should now more consistently take precedence over the default filters set by the interpreter Due to changes in the way the default warnings filters are configured, @@ -2567,7 +2567,7 @@ query parameter separators in :func:`urllib.parse.parse_qs` and :func:`urllib.parse.parse_qsl`. Due to security concerns, and to conform with newer W3C recommendations, this has been changed to allow only a single separator key, with ``&`` as the default. This change also affects -:func:`cgi.parse` and :func:`cgi.parse_multipart` as they use the affected +:func:`!cgi.parse` and :func:`!cgi.parse_multipart` as they use the affected functions internally. For more details, please see their respective documentation. (Contributed by Adam Goldschmidt, Senthil Kumaran and Ken Jin in :issue:`42967`.) diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst index 37a6cf24e545629..4574702b1a600f7 100644 --- a/Doc/whatsnew/3.8.rst +++ b/Doc/whatsnew/3.8.rst @@ -123,7 +123,7 @@ There is a new function parameter syntax ``/`` to indicate that some function parameters must be specified positionally and cannot be used as keyword arguments. This is the same notation shown by ``help()`` for C functions annotated with Larry Hastings' -:ref:`Argument Clinic ` tool. +`Argument Clinic `__ tool. In the following example, parameters *a* and *b* are positional-only, while *c* or *d* can be positional or keyword, and *e* or *f* are @@ -404,7 +404,7 @@ Other Language Changes or :meth:`~object.__complex__` is not available. (Contributed by Serhiy Storchaka in :issue:`20092`.) -* Added support of ``\N{name}`` escapes in :mod:`regular expressions `:: +* Added support of :samp:`\\N\\{{name}\\}` escapes in :mod:`regular expressions `:: >>> notice = 'Copyright © 2019' >>> copyright_year_pattern = re.compile(r'\N{copyright sign}\s*(\d{4})') @@ -947,7 +947,7 @@ This made it difficult to update, experiment with, or teach the various logging configuration options using the interactive prompt or a Jupyter notebook. -(Suggested by Raymond Hettinger, implemented by Dong-hee Na, and +(Suggested by Raymond Hettinger, implemented by Donghee Na, and reviewed by Vinay Sajip in :issue:`33897`.) @@ -1305,7 +1305,7 @@ Zackery Spytz in :issue:`25451`.) time ---- -Added new clock :data:`~time.CLOCK_UPTIME_RAW` for macOS 10.12. +Added new clock :const:`~time.CLOCK_UPTIME_RAW` for macOS 10.12. (Contributed by Joannah Nanjekye in :issue:`35702`.) @@ -1574,12 +1574,12 @@ Build and C API Changes * :c:func:`Py_INCREF`, :c:func:`Py_DECREF` * :c:func:`Py_XINCREF`, :c:func:`Py_XDECREF` * :c:func:`PyObject_INIT`, :c:func:`PyObject_INIT_VAR` - * Private functions: :c:func:`_PyObject_GC_TRACK`, - :c:func:`_PyObject_GC_UNTRACK`, :c:func:`_Py_Dealloc` + * Private functions: :c:func:`!_PyObject_GC_TRACK`, + :c:func:`!_PyObject_GC_UNTRACK`, :c:func:`!_Py_Dealloc` (Contributed by Victor Stinner in :issue:`35059`.) -* The :c:func:`PyByteArray_Init` and :c:func:`PyByteArray_Fini` functions have +* The :c:func:`!PyByteArray_Init` and :c:func:`!PyByteArray_Fini` functions have been removed. They did nothing since Python 2.7.4 and Python 3.2.0, were excluded from the limited API (stable ABI), and were not documented. (Contributed by Victor Stinner in :issue:`35713`.) @@ -1628,7 +1628,7 @@ Build and C API Changes parameter for indicating the number of positional-only arguments. (Contributed by Pablo Galindo in :issue:`37221`.) -* :c:func:`Py_SetPath` now sets :data:`sys.executable` to the program full +* :c:func:`!Py_SetPath` now sets :data:`sys.executable` to the program full path (:c:func:`Py_GetProgramFullPath`) rather than to the program name (:c:func:`Py_GetProgramName`). (Contributed by Victor Stinner in :issue:`38234`.) @@ -1653,7 +1653,7 @@ Deprecated deprecated and will be prohibited in Python 3.9. (Contributed by Elvis Pranskevichus in :issue:`34075`.) -* The :meth:`__getitem__` methods of :class:`xml.dom.pulldom.DOMEventStream`, +* The :meth:`~object.__getitem__` methods of :class:`xml.dom.pulldom.DOMEventStream`, :class:`wsgiref.util.FileWrapper` and :class:`fileinput.FileInput` have been deprecated. @@ -1714,7 +1714,7 @@ Deprecated * The :meth:`~threading.Thread.isAlive()` method of :class:`threading.Thread` has been deprecated. - (Contributed by Dong-hee Na in :issue:`35283`.) + (Contributed by Donghee Na in :issue:`35283`.) * Many builtin and extension functions that take integer arguments will now emit a deprecation warning for :class:`~decimal.Decimal`\ s, @@ -1774,7 +1774,7 @@ The following features and APIs have been removed from Python 3.8: to help eliminate confusion as to what Python interpreter the ``pyvenv`` script is tied to. (Contributed by Brett Cannon in :issue:`25427`.) -* ``parse_qs``, ``parse_qsl``, and ``escape`` are removed from the :mod:`cgi` +* ``parse_qs``, ``parse_qsl``, and ``escape`` are removed from the :mod:`!cgi` module. They are deprecated in Python 3.2 or older. They should be imported from the ``urllib.parse`` and ``html`` modules instead. @@ -1839,18 +1839,18 @@ Changes in Python behavior classes will affect their string representation. (Contributed by Serhiy Storchaka in :issue:`36793`.) -* On AIX, :attr:`sys.platform` doesn't contain the major version anymore. +* On AIX, :data:`sys.platform` doesn't contain the major version anymore. It is always ``'aix'``, instead of ``'aix3'`` .. ``'aix7'``. Since older Python versions include the version number, so it is recommended to always use ``sys.platform.startswith('aix')``. (Contributed by M. Felt in :issue:`36588`.) -* :c:func:`PyEval_AcquireLock` and :c:func:`PyEval_AcquireThread` now +* :c:func:`!PyEval_AcquireLock` and :c:func:`!PyEval_AcquireThread` now terminate the current thread if called while the interpreter is finalizing, making them consistent with :c:func:`PyEval_RestoreThread`, :c:func:`Py_END_ALLOW_THREADS`, and :c:func:`PyGILState_Ensure`. If this - behavior is not desired, guard the call by checking :c:func:`_Py_IsFinalizing` - or :c:func:`sys.is_finalizing`. + behavior is not desired, guard the call by checking :c:func:`!_Py_IsFinalizing` + or :func:`sys.is_finalizing`. (Contributed by Joannah Nanjekye in :issue:`36475`.) @@ -2021,7 +2021,7 @@ Changes in the C API *cf_flags*. (Contributed by Guido van Rossum in :issue:`35766`.) -* The :c:func:`PyEval_ReInitThreads` function has been removed from the C API. +* The :c:func:`!PyEval_ReInitThreads` function has been removed from the C API. It should not be called explicitly: use :c:func:`PyOS_AfterFork_Child` instead. (Contributed by Victor Stinner in :issue:`36728`.) @@ -2061,8 +2061,8 @@ Changes in the C API * Remove :c:macro:`Py_INCREF` on the type object after allocating an instance - if any. - This may happen after calling :c:func:`PyObject_New`, - :c:func:`PyObject_NewVar`, :c:func:`PyObject_GC_New`, + This may happen after calling :c:macro:`PyObject_New`, + :c:macro:`PyObject_NewVar`, :c:func:`PyObject_GC_New`, :c:func:`PyObject_GC_NewVar`, or any other custom allocator that uses :c:func:`PyObject_Init` or :c:func:`PyObject_INIT`. @@ -2116,12 +2116,12 @@ Changes in the C API extension types across feature releases, anymore. A :c:type:`PyTypeObject` exported by a third-party extension module is supposed to have all the slots expected in the current Python version, including - :c:member:`~PyTypeObject.tp_finalize` (:const:`Py_TPFLAGS_HAVE_FINALIZE` + :c:member:`~PyTypeObject.tp_finalize` (:c:macro:`Py_TPFLAGS_HAVE_FINALIZE` is not checked anymore before reading :c:member:`~PyTypeObject.tp_finalize`). (Contributed by Antoine Pitrou in :issue:`32388`.) -* The functions :c:func:`PyNode_AddChild` and :c:func:`PyParser_AddToken` now accept +* The functions :c:func:`!PyNode_AddChild` and :c:func:`!PyParser_AddToken` now accept two additional ``int`` arguments *end_lineno* and *end_col_offset*. * The :file:`libpython38.a` file to allow MinGW tools to link directly against @@ -2229,7 +2229,7 @@ The benchmarks were measured on an `Intel® Coreâ„¢ i7-4960HQ processor `_ running the macOS 64-bit builds found at -`python.org `_. +`python.org `_. The benchmark script displays timings in nanoseconds. @@ -2251,7 +2251,7 @@ query parameter separators in :func:`urllib.parse.parse_qs` and :func:`urllib.parse.parse_qsl`. Due to security concerns, and to conform with newer W3C recommendations, this has been changed to allow only a single separator key, with ``&`` as the default. This change also affects -:func:`cgi.parse` and :func:`cgi.parse_multipart` as they use the affected +:func:`!cgi.parse` and :func:`!cgi.parse_multipart` as they use the affected functions internally. For more details, please see their respective documentation. (Contributed by Adam Goldschmidt, Senthil Kumaran and Ken Jin in :issue:`42967`.) diff --git a/Doc/whatsnew/3.9.rst b/Doc/whatsnew/3.9.rst index 624e71f9254c457..cb2482ee48d7fa2 100644 --- a/Doc/whatsnew/3.9.rst +++ b/Doc/whatsnew/3.9.rst @@ -2,8 +2,6 @@ What's New In Python 3.9 **************************** -:Release: |release| -:Date: |today| :Editor: Åukasz Langa .. Rules for maintenance: @@ -46,7 +44,6 @@ This article explains the new features in Python 3.9, compared to 3.8. Python 3.9 was released on October 5, 2020. - For full details, see the :ref:`changelog `. .. seealso:: @@ -84,12 +81,12 @@ Interpreter improvements: * a number of Python builtins (range, tuple, set, frozenset, list, dict) are now sped up using :pep:`590` vectorcall; * garbage collection does not block on resurrected objects; -* a number of Python modules (:mod:`_abc`, :mod:`audioop`, :mod:`_bz2`, - :mod:`_codecs`, :mod:`_contextvars`, :mod:`_crypt`, :mod:`_functools`, +* a number of Python modules (:mod:`_abc`, :mod:`!audioop`, :mod:`_bz2`, + :mod:`_codecs`, :mod:`_contextvars`, :mod:`!_crypt`, :mod:`_functools`, :mod:`_json`, :mod:`_locale`, :mod:`math`, :mod:`operator`, :mod:`resource`, :mod:`time`, :mod:`_weakref`) now use multiphase initialization as defined by PEP 489; -* a number of standard library modules (:mod:`audioop`, :mod:`ast`, :mod:`grp`, +* a number of standard library modules (:mod:`!audioop`, :mod:`ast`, :mod:`grp`, :mod:`_hashlib`, :mod:`pwd`, :mod:`_posixsubprocess`, :mod:`random`, :mod:`select`, :mod:`struct`, :mod:`termios`, :mod:`zlib`) are now using the stable ABI defined by PEP 384. @@ -417,7 +414,7 @@ datetime The :meth:`~datetime.date.isocalendar()` of :class:`datetime.date` and :meth:`~datetime.datetime.isocalendar()` of :class:`datetime.datetime` methods now returns a :func:`~collections.namedtuple` instead of a :class:`tuple`. -(Contributed by Dong-hee Na in :issue:`24416`.) +(Contributed by Donghee Na in :issue:`24416`.) distutils --------- @@ -429,16 +426,16 @@ digests. It skips MD5 on platforms that block MD5 digest. fcntl ----- -Added constants :data:`~fcntl.F_OFD_GETLK`, :data:`~fcntl.F_OFD_SETLK` -and :data:`~fcntl.F_OFD_SETLKW`. -(Contributed by Dong-hee Na in :issue:`38602`.) +Added constants :const:`~fcntl.F_OFD_GETLK`, :const:`~fcntl.F_OFD_SETLK` +and :const:`~fcntl.F_OFD_SETLKW`. +(Contributed by Donghee Na in :issue:`38602`.) ftplib ------- :class:`~ftplib.FTP` and :class:`~ftplib.FTP_TLS` now raise a :class:`ValueError` if the given timeout for their constructor is zero to prevent the creation of -a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.) +a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.) gc -- @@ -470,7 +467,7 @@ http ---- HTTP status codes ``103 EARLY_HINTS``, ``418 IM_A_TEAPOT`` and ``425 TOO_EARLY`` are added to -:class:`http.HTTPStatus`. (Contributed by Dong-hee Na in :issue:`39509` and Ross Rhodes in :issue:`39507`.) +:class:`http.HTTPStatus`. (Contributed by Donghee Na in :issue:`39509` and Ross Rhodes in :issue:`39507`.) IDLE and idlelib ---------------- @@ -511,14 +508,14 @@ an optional *timeout* parameter for their constructors. Also, the :meth:`~imaplib.IMAP4.open` method now has an optional *timeout* parameter with this change. The overridden methods of :class:`~imaplib.IMAP4_SSL` and :class:`~imaplib.IMAP4_stream` were applied to this change. -(Contributed by Dong-hee Na in :issue:`38615`.) +(Contributed by Donghee Na in :issue:`38615`.) :meth:`imaplib.IMAP4.unselect` is added. :meth:`imaplib.IMAP4.unselect` frees server's resources associated with the selected mailbox and returns the server to the authenticated state. This command performs the same actions as :meth:`imaplib.IMAP4.close`, except that no messages are permanently removed from the currently -selected mailbox. (Contributed by Dong-hee Na in :issue:`40375`.) +selected mailbox. (Contributed by Donghee Na in :issue:`40375`.) importlib --------- @@ -588,18 +585,18 @@ queue. nntplib ------- -:class:`~nntplib.NNTP` and :class:`~nntplib.NNTP_SSL` now raise a :class:`ValueError` +:class:`~!nntplib.NNTP` and :class:`~!nntplib.NNTP_SSL` now raise a :class:`ValueError` if the given timeout for their constructor is zero to prevent the creation of -a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.) +a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.) os -- -Added :data:`~os.CLD_KILLED` and :data:`~os.CLD_STOPPED` for :attr:`si_code`. -(Contributed by Dong-hee Na in :issue:`38493`.) +Added :const:`~os.CLD_KILLED` and :const:`~os.CLD_STOPPED` for :attr:`si_code`. +(Contributed by Donghee Na in :issue:`38493`.) Exposed the Linux-specific :func:`os.pidfd_open` (:issue:`38692`) and -:data:`os.P_PIDFD` (:issue:`38713`) for process management with file +:const:`os.P_PIDFD` (:issue:`38713`) for process management with file descriptors. The :func:`os.unsetenv` function is now also available on Windows. @@ -631,7 +628,7 @@ poplib :class:`~poplib.POP3` and :class:`~poplib.POP3_SSL` now raise a :class:`ValueError` if the given timeout for their constructor is zero to prevent the creation of -a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.) +a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.) pprint ------ @@ -663,19 +660,19 @@ smtplib :class:`~smtplib.SMTP` and :class:`~smtplib.SMTP_SSL` now raise a :class:`ValueError` if the given timeout for their constructor is zero to prevent the creation of -a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.) +a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.) :class:`~smtplib.LMTP` constructor now has an optional *timeout* parameter. -(Contributed by Dong-hee Na in :issue:`39329`.) +(Contributed by Donghee Na in :issue:`39329`.) socket ------ -The :mod:`socket` module now exports the :data:`~socket.CAN_RAW_JOIN_FILTERS` +The :mod:`socket` module now exports the :const:`~socket.CAN_RAW_JOIN_FILTERS` constant on Linux 4.1 and greater. (Contributed by Stefan Tatschner and Zackery Spytz in :issue:`25780`.) -The socket module now supports the :data:`~socket.CAN_J1939` protocol on +The socket module now supports the :const:`~socket.CAN_J1939` protocol on platforms that support it. (Contributed by Karl Ding in :issue:`40291`.) The socket module now has the :func:`socket.send_fds` and @@ -694,13 +691,13 @@ which has nanosecond resolution, rather than sys --- -Added a new :attr:`sys.platlibdir` attribute: name of the platform-specific +Added a new :data:`sys.platlibdir` attribute: name of the platform-specific library directory. It is used to build the path of standard library and the paths of installed extension modules. It is equal to ``"lib"`` on most platforms. On Fedora and SuSE, it is equal to ``"lib64"`` on 64-bit platforms. (Contributed by Jan MatÄ›jek, MatÄ›j Cepl, Charalampos Stratakis and Victor Stinner in :issue:`1294959`.) -Previously, :attr:`sys.stderr` was block-buffered when non-interactive. Now +Previously, :data:`sys.stderr` was block-buffered when non-interactive. Now ``stderr`` defaults to always being line-buffered. (Contributed by Jendrik Seipp in :issue:`13601`.) @@ -779,7 +776,7 @@ Optimizations * A number of Python builtins (:class:`range`, :class:`tuple`, :class:`set`, :class:`frozenset`, :class:`list`, :class:`dict`) are now sped up by using :pep:`590` vectorcall protocol. - (Contributed by Dong-hee Na, Mark Shannon, Jeroen Demeyer and Petr Viktorin in :issue:`37207`.) + (Contributed by Donghee Na, Mark Shannon, Jeroen Demeyer and Petr Viktorin in :issue:`37207`.) * Optimized :func:`~set.difference_update` for the case when the other set is much larger than the base set. @@ -793,7 +790,7 @@ Optimizations * :term:`floor division` of float operation now has a better performance. Also the message of :exc:`ZeroDivisionError` for this operation is updated. - (Contributed by Dong-hee Na in :issue:`39434`.) + (Contributed by Donghee Na in :issue:`39434`.) * Decoding short ASCII strings with UTF-8 and ascii codecs is now about 15% faster. (Contributed by Inada Naoki in :issue:`37348`.) @@ -851,7 +848,7 @@ in nanoseconds. The benchmarks were measured on an `Intel® Coreâ„¢ i7-4960HQ processor `_ running the macOS 64-bit builds found at -`python.org `_. +`python.org `_. Deprecated @@ -872,9 +869,9 @@ Deprecated users can leverage the Abstract Syntax Tree (AST) generation and compilation stage, using the :mod:`ast` module. -* The Public C API functions :c:func:`PyParser_SimpleParseStringFlags`, - :c:func:`PyParser_SimpleParseStringFlagsFilename`, - :c:func:`PyParser_SimpleParseFileFlags` and :c:func:`PyNode_Compile` +* The Public C API functions :c:func:`!PyParser_SimpleParseStringFlags`, + :c:func:`!PyParser_SimpleParseStringFlagsFilename`, + :c:func:`!PyParser_SimpleParseFileFlags` and :c:func:`!PyNode_Compile` are deprecated and will be removed in Python 3.10 together with the old parser. * Using :data:`NotImplemented` in a boolean context has been deprecated, @@ -925,10 +922,10 @@ Deprecated (Contributed by Batuhan Taskaya in :issue:`39639` and :issue:`39969` and Serhiy Storchaka in :issue:`39988`.) -* The :c:func:`PyEval_InitThreads` and :c:func:`PyEval_ThreadsInitialized` +* The :c:func:`!PyEval_InitThreads` and :c:func:`!PyEval_ThreadsInitialized` functions are now deprecated and will be removed in Python 3.11. Calling - :c:func:`PyEval_InitThreads` now does nothing. The :term:`GIL` is initialized - by :c:func:`Py_Initialize()` since Python 3.7. + :c:func:`!PyEval_InitThreads` now does nothing. The :term:`GIL` is initialized + by :c:func:`Py_Initialize` since Python 3.7. (Contributed by Victor Stinner in :issue:`39877`.) * Passing ``None`` as the first argument to the :func:`shlex.split` function @@ -937,10 +934,10 @@ Deprecated * :func:`smtpd.MailmanProxy` is now deprecated as it is unusable without an external module, ``mailman``. (Contributed by Samuel Colvin in :issue:`35800`.) -* The :mod:`lib2to3` module now emits a :exc:`PendingDeprecationWarning`. +* The :mod:`!lib2to3` module now emits a :exc:`PendingDeprecationWarning`. Python 3.9 switched to a PEG parser (see :pep:`617`), and Python 3.10 may include new language syntax that is not parsable by lib2to3's LL(1) parser. - The ``lib2to3`` module may be removed from the standard library in a future + The :mod:`!lib2to3` module may be removed from the standard library in a future Python version. Consider third-party alternatives such as `LibCST`_ or `parso`_. (Contributed by Carl Meyer in :issue:`40360`.) @@ -958,12 +955,12 @@ Removed * The erroneous version at :data:`unittest.mock.__version__` has been removed. -* :class:`nntplib.NNTP`: ``xpath()`` and ``xgtitle()`` methods have been removed. +* :class:`!nntplib.NNTP`: ``xpath()`` and ``xgtitle()`` methods have been removed. These methods are deprecated since Python 3.3. Generally, these extensions are not supported or not enabled by NNTP server administrators. - For ``xgtitle()``, please use :meth:`nntplib.NNTP.descriptions` or - :meth:`nntplib.NNTP.description` instead. - (Contributed by Dong-hee Na in :issue:`39366`.) + For ``xgtitle()``, please use :meth:`!nntplib.NNTP.descriptions` or + :meth:`!nntplib.NNTP.description` instead. + (Contributed by Donghee Na in :issue:`39366`.) * :class:`array.array`: ``tostring()`` and ``fromstring()`` methods have been removed. They were aliases to ``tobytes()`` and ``frombytes()``, deprecated @@ -996,7 +993,7 @@ Removed * The :meth:`~threading.Thread.isAlive()` method of :class:`threading.Thread` has been removed. It was deprecated since Python 3.8. Use :meth:`~threading.Thread.is_alive()` instead. - (Contributed by Dong-hee Na in :issue:`37804`.) + (Contributed by Donghee Na in :issue:`37804`.) * Methods ``getchildren()`` and ``getiterator()`` of classes :class:`~xml.etree.ElementTree.ElementTree` and @@ -1086,7 +1083,7 @@ Changes in the Python API ``__VENV_PROMPT__`` is set to ``""``. * The :meth:`select.epoll.unregister` method no longer ignores the - :data:`~errno.EBADF` error. + :const:`~errno.EBADF` error. (Contributed by Victor Stinner in :issue:`39239`.) * The *compresslevel* parameter of :class:`bz2.BZ2File` became keyword-only, @@ -1117,9 +1114,9 @@ Changes in the Python API ``PyCF_ALLOW_TOP_LEVEL_AWAIT`` was clashing with ``CO_FUTURE_DIVISION``. (Contributed by Batuhan Taskaya in :issue:`39562`) -* ``array('u')`` now uses ``wchar_t`` as C type instead of ``Py_UNICODE``. +* ``array('u')`` now uses :c:type:`wchar_t` as C type instead of ``Py_UNICODE``. This change doesn't affect to its behavior because ``Py_UNICODE`` is alias - of ``wchar_t`` since Python 3.3. + of :c:type:`wchar_t` since Python 3.3. (Contributed by Inada Naoki in :issue:`34538`.) * The :func:`logging.getLogger` API now returns the root logger when passed @@ -1228,8 +1225,8 @@ Build Changes ============= * Added ``--with-platlibdir`` option to the ``configure`` script: name of the - platform-specific library directory, stored in the new :attr:`sys.platlibdir` - attribute. See :attr:`sys.platlibdir` attribute for more information. + platform-specific library directory, stored in the new :data:`sys.platlibdir` + attribute. See :data:`sys.platlibdir` attribute for more information. (Contributed by Jan MatÄ›jek, MatÄ›j Cepl, Charalampos Stratakis and Victor Stinner in :issue:`1294959`.) @@ -1278,7 +1275,7 @@ New Features * :pep:`573`: Added :c:func:`PyType_FromModuleAndSpec` to associate a module with a class; :c:func:`PyType_GetModule` and :c:func:`PyType_GetModuleState` to retrieve the module and its state; and - :c:data:`PyCMethod` and :c:data:`METH_METHOD` to allow a method to + :c:type:`PyCMethod` and :c:macro:`METH_METHOD` to allow a method to access the class it was defined in. (Contributed by Marcel Plch and Petr Viktorin in :issue:`38787`.) @@ -1317,7 +1314,7 @@ New Features * The :c:func:`PyModule_AddType` function is added to help adding a type to a module. - (Contributed by Dong-hee Na in :issue:`40024`.) + (Contributed by Donghee Na in :issue:`40024`.) * Added the functions :c:func:`PyObject_GC_IsTracked` and :c:func:`PyObject_GC_IsFinalized` to the public API to allow to query if @@ -1372,8 +1369,8 @@ Porting to Python 3.9 (Contributed by Victor Stinner in :issue:`40241`.) * The ``Py_UNICODE_COPY``, ``Py_UNICODE_FILL``, ``PyUnicode_WSTR_LENGTH``, - :c:func:`PyUnicode_FromUnicode`, :c:func:`PyUnicode_AsUnicode`, - ``_PyUnicode_AsUnicode``, and :c:func:`PyUnicode_AsUnicodeAndSize` are + :c:func:`!PyUnicode_FromUnicode`, :c:func:`!PyUnicode_AsUnicode`, + ``_PyUnicode_AsUnicode``, and :c:func:`!PyUnicode_AsUnicodeAndSize` are marked as deprecated in C. They have been deprecated by :pep:`393` since Python 3.3. (Contributed by Inada Naoki in :issue:`36346`.) @@ -1391,8 +1388,8 @@ Porting to Python 3.9 * :c:func:`PyObject_IS_GC` macro was converted to a function. * The :c:func:`PyObject_NEW` macro becomes an alias to the - :c:func:`PyObject_New` macro, and the :c:func:`PyObject_NEW_VAR` macro - becomes an alias to the :c:func:`PyObject_NewVar` macro. They no longer + :c:macro:`PyObject_New` macro, and the :c:func:`PyObject_NEW_VAR` macro + becomes an alias to the :c:macro:`PyObject_NewVar` macro. They no longer access directly the :c:member:`PyTypeObject.tp_basicsize` member. * :c:func:`PyObject_GET_WEAKREFS_LISTPTR` macro was converted to a function: @@ -1561,7 +1558,7 @@ query parameter separators in :func:`urllib.parse.parse_qs` and :func:`urllib.parse.parse_qsl`. Due to security concerns, and to conform with newer W3C recommendations, this has been changed to allow only a single separator key, with ``&`` as the default. This change also affects -:func:`cgi.parse` and :func:`cgi.parse_multipart` as they use the affected +:func:`!cgi.parse` and :func:`!cgi.parse_multipart` as they use the affected functions internally. For more details, please see their respective documentation. (Contributed by Adam Goldschmidt, Senthil Kumaran and Ken Jin in :issue:`42967`.) diff --git a/Doc/whatsnew/index.rst b/Doc/whatsnew/index.rst index bfee225791eee94..b9c19602653219c 100644 --- a/Doc/whatsnew/index.rst +++ b/Doc/whatsnew/index.rst @@ -11,6 +11,7 @@ anyone wishing to stay up-to-date after a new release. .. toctree:: :maxdepth: 2 + 3.13.rst 3.12.rst 3.11.rst 3.10.rst diff --git a/Grammar/Tokens b/Grammar/Tokens index 1f3e3b09913653f..20bb803b7d58a64 100644 --- a/Grammar/Tokens +++ b/Grammar/Tokens @@ -53,16 +53,18 @@ ATEQUAL '@=' RARROW '->' ELLIPSIS '...' COLONEQUAL ':=' +EXCLAMATION '!' OP -AWAIT -ASYNC TYPE_IGNORE TYPE_COMMENT SOFT_KEYWORD +FSTRING_START +FSTRING_MIDDLE +FSTRING_END +COMMENT +NL ERRORTOKEN # These aren't used by the C tokenizer but are needed for tokenize.py -COMMENT -NL ENCODING diff --git a/Grammar/python.gram b/Grammar/python.gram index 7dfc3df22fdd3cf..c9269b058bc7f61 100644 --- a/Grammar/python.gram +++ b/Grammar/python.gram @@ -19,8 +19,6 @@ _PyPegen_parse(Parser *p) result = eval_rule(p); } else if (p->start_rule == Py_func_type_input) { result = func_type_rule(p); - } else if (p->start_rule == Py_fstring_input) { - result = fstring_rule(p); } return result; @@ -89,7 +87,6 @@ file[mod_ty]: a=[statements] ENDMARKER { _PyPegen_make_module(p, a) } interactive[mod_ty]: a=statement_newline { _PyAST_Interactive(a, p->arena) } eval[mod_ty]: a=expressions NEWLINE* ENDMARKER { _PyAST_Expression(a, p->arena) } func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMARKER { _PyAST_FunctionType(a, b, p->arena) } -fstring[expr_ty]: star_expressions # GENERAL STATEMENTS # ================== @@ -112,6 +109,7 @@ simple_stmts[asdl_stmt_seq*]: # will throw a SyntaxError. simple_stmt[stmt_ty] (memo): | assignment + | &"type" type_alias | e=star_expressions { _PyAST_Expr(e, EXTRA) } | &'return' return_stmt | &('import' | 'from') import_stmt @@ -126,11 +124,12 @@ simple_stmt[stmt_ty] (memo): | &'nonlocal' nonlocal_stmt compound_stmt[stmt_ty]: - | &('def' | '@' | ASYNC) function_def + | invalid_compound_stmt + | &('def' | '@' | 'async') function_def | &'if' if_stmt | &('class' | '@') class_def - | &('with' | ASYNC) with_stmt - | &('for' | ASYNC) for_stmt + | &('with' | 'async') with_stmt + | &('for' | 'async') for_stmt | &'try' try_stmt | &'while' while_stmt | match_stmt @@ -194,7 +193,7 @@ yield_stmt[stmt_ty]: y=yield_expr { _PyAST_Expr(y, EXTRA) } assert_stmt[stmt_ty]: 'assert' a=expression b=[',' z=expression { z }] { _PyAST_Assert(a, b, EXTRA) } -import_stmt[stmt_ty]: +import_stmt[stmt_ty]: | invalid_import | import_name | import_from @@ -252,11 +251,11 @@ class_def[stmt_ty]: class_def_raw[stmt_ty]: | invalid_class_def_raw - | 'class' a=NAME b=['(' z=[arguments] ')' { z }] ':' c=block { + | 'class' a=NAME t=[type_params] b=['(' z=[arguments] ')' { z }] ':' c=block { _PyAST_ClassDef(a->v.Name.id, (b) ? ((expr_ty) b)->v.Call.args : NULL, (b) ? ((expr_ty) b)->v.Call.keywords : NULL, - c, NULL, EXTRA) } + c, NULL, t, EXTRA) } # Function definitions # -------------------- @@ -267,18 +266,18 @@ function_def[stmt_ty]: function_def_raw[stmt_ty]: | invalid_def_raw - | 'def' n=NAME &&'(' params=[params] ')' a=['->' z=expression { z }] &&':' tc=[func_type_comment] b=block { + | 'def' n=NAME t=[type_params] &&'(' params=[params] ')' a=['->' z=expression { z }] &&':' tc=[func_type_comment] b=block { _PyAST_FunctionDef(n->v.Name.id, (params) ? params : CHECK(arguments_ty, _PyPegen_empty_arguments(p)), - b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) } - | ASYNC 'def' n=NAME &&'(' params=[params] ')' a=['->' z=expression { z }] &&':' tc=[func_type_comment] b=block { + b, NULL, a, NEW_TYPE_COMMENT(p, tc), t, EXTRA) } + | 'async' 'def' n=NAME t=[type_params] &&'(' params=[params] ')' a=['->' z=expression { z }] &&':' tc=[func_type_comment] b=block { CHECK_VERSION( stmt_ty, 5, "Async functions are", _PyAST_AsyncFunctionDef(n->v.Name.id, (params) ? params : CHECK(arguments_ty, _PyPegen_empty_arguments(p)), - b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) + b, NULL, a, NEW_TYPE_COMMENT(p, tc), t, EXTRA) ) } # Function parameters @@ -384,7 +383,7 @@ for_stmt[stmt_ty]: | invalid_for_stmt | 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { _PyAST_For(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA) } - | ASYNC 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { + | 'async' 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { CHECK_VERSION(stmt_ty, 5, "Async for loops are", _PyAST_AsyncFor(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA)) } | invalid_for_target @@ -393,13 +392,13 @@ for_stmt[stmt_ty]: with_stmt[stmt_ty]: | invalid_with_stmt_indent - | 'with' '(' a[asdl_withitem_seq*]=','.with_item+ ','? ')' ':' b=block { - CHECK_VERSION(stmt_ty, 9, "Parenthesized context managers are", _PyAST_With(a, b, NULL, EXTRA)) } + | 'with' '(' a[asdl_withitem_seq*]=','.with_item+ ','? ')' ':' tc=[TYPE_COMMENT] b=block { + CHECK_VERSION(stmt_ty, 9, "Parenthesized context managers are", _PyAST_With(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA)) } | 'with' a[asdl_withitem_seq*]=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { _PyAST_With(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA) } - | ASYNC 'with' '(' a[asdl_withitem_seq*]=','.with_item+ ','? ')' ':' b=block { + | 'async' 'with' '(' a[asdl_withitem_seq*]=','.with_item+ ','? ')' ':' b=block { CHECK_VERSION(stmt_ty, 5, "Async with statements are", _PyAST_AsyncWith(a, b, NULL, EXTRA)) } - | ASYNC 'with' a[asdl_withitem_seq*]=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { + | 'async' 'with' a[asdl_withitem_seq*]=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { CHECK_VERSION(stmt_ty, 5, "Async with statements are", _PyAST_AsyncWith(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA)) } | invalid_with_stmt @@ -415,8 +414,8 @@ try_stmt[stmt_ty]: | invalid_try_stmt | 'try' &&':' b=block f=finally_block { _PyAST_Try(b, NULL, NULL, f, EXTRA) } | 'try' &&':' b=block ex[asdl_excepthandler_seq*]=except_block+ el=[else_block] f=[finally_block] { _PyAST_Try(b, ex, el, f, EXTRA) } - | 'try' &&':' b=block ex[asdl_excepthandler_seq*]=except_star_block+ el=[else_block] f=[finally_block] { - CHECK_VERSION(stmt_ty, 11, "Exception groups are", + | 'try' &&':' b=block ex[asdl_excepthandler_seq*]=except_star_block+ el=[else_block] f=[finally_block] { + CHECK_VERSION(stmt_ty, 11, "Exception groups are", _PyAST_TryStar(b, ex, el, f, EXTRA)) } @@ -628,6 +627,39 @@ keyword_patterns[asdl_seq*]: keyword_pattern[KeyPatternPair*]: | arg=NAME '=' value=pattern { _PyPegen_key_pattern_pair(p, arg, value) } +# Type statement +# --------------- + +type_alias[stmt_ty]: + | "type" n=NAME t=[type_params] '=' b=expression { + CHECK_VERSION(stmt_ty, 12, "Type statement is", + _PyAST_TypeAlias(CHECK(expr_ty, _PyPegen_set_expr_context(p, n, Store)), t, b, EXTRA)) } + +# Type parameter declaration +# -------------------------- + +type_params[asdl_type_param_seq*]: '[' t=type_param_seq ']' { + CHECK_VERSION(asdl_type_param_seq *, 12, "Type parameter lists are", t) } + +type_param_seq[asdl_type_param_seq*]: a[asdl_type_param_seq*]=','.type_param+ [','] { a } + +type_param[type_param_ty] (memo): + | a=NAME b=[type_param_bound] { _PyAST_TypeVar(a->v.Name.id, b, EXTRA) } + | '*' a=NAME colon=':' e=expression { + RAISE_SYNTAX_ERROR_STARTING_FROM(colon, e->kind == Tuple_kind + ? "cannot use constraints with TypeVarTuple" + : "cannot use bound with TypeVarTuple") + } + | '*' a=NAME { _PyAST_TypeVarTuple(a->v.Name.id, EXTRA) } + | '**' a=NAME colon=':' e=expression { + RAISE_SYNTAX_ERROR_STARTING_FROM(colon, e->kind == Tuple_kind + ? "cannot use constraints with ParamSpec" + : "cannot use bound with ParamSpec") + } + | '**' a=NAME { _PyAST_ParamSpec(a->v.Name.id, EXTRA) } + +type_param_bound[expr_ty]: ':' e=expression { e } + # EXPRESSIONS # ----------- @@ -780,7 +812,7 @@ power[expr_ty]: # Primary elements are things like "obj.something.something", "obj[something]", "obj(something)", "obj" ... await_primary[expr_ty] (memo): - | AWAIT a=primary { CHECK_VERSION(expr_ty, 5, "Await expressions are", _PyAST_Await(a, EXTRA)) } + | 'await' a=primary { CHECK_VERSION(expr_ty, 5, "Await expressions are", _PyAST_Await(a, EXTRA)) } | primary primary[expr_ty]: @@ -807,7 +839,7 @@ atom[expr_ty]: | 'True' { _PyAST_Constant(Py_True, NULL, EXTRA) } | 'False' { _PyAST_Constant(Py_False, NULL, EXTRA) } | 'None' { _PyAST_Constant(Py_None, NULL, EXTRA) } - | &STRING strings + | &(STRING|FSTRING_START) strings | NUMBER | &'(' (tuple | group | genexp) | &'[' (list | listcomp) @@ -877,7 +909,25 @@ lambda_param[arg_ty]: a=NAME { _PyAST_arg(a->v.Name.id, NULL, NULL, EXTRA) } # LITERALS # ======== -strings[expr_ty] (memo): a=STRING+ { _PyPegen_concatenate_strings(p, a) } +fstring_middle[expr_ty]: + | fstring_replacement_field + | t=FSTRING_MIDDLE { _PyPegen_constant_from_token(p, t) } +fstring_replacement_field[expr_ty]: + | '{' a=(yield_expr | star_expressions) debug_expr='='? conversion=[fstring_conversion] format=[fstring_full_format_spec] rbrace='}' { + _PyPegen_formatted_value(p, a, debug_expr, conversion, format, rbrace, EXTRA) } + | invalid_replacement_field +fstring_conversion[ResultTokenWithMetadata*]: + | conv_token="!" conv=NAME { _PyPegen_check_fstring_conversion(p, conv_token, conv) } +fstring_full_format_spec[ResultTokenWithMetadata*]: + | colon=':' spec=fstring_format_spec* { _PyPegen_setup_full_format_spec(p, colon, (asdl_expr_seq *) spec, EXTRA) } +fstring_format_spec[expr_ty]: + | t=FSTRING_MIDDLE { _PyPegen_decoded_constant_from_token(p, t) } + | fstring_replacement_field +fstring[expr_ty]: + | a=FSTRING_START b=fstring_middle* c=FSTRING_END { _PyPegen_joined_str(p, a, (asdl_expr_seq*)b, c) } + +string[expr_ty]: s[Token*]=STRING { _PyPegen_constant_from_string(p, s) } +strings[expr_ty] (memo): a[asdl_expr_seq*]=(fstring|string)+ { _PyPegen_concatenate_strings(p, a, EXTRA) } list[expr_ty]: | '[' a=[star_named_expressions] ']' { _PyAST_List(a, Load, EXTRA) } @@ -914,7 +964,7 @@ for_if_clauses[asdl_comprehension_seq*]: | a[asdl_comprehension_seq*]=for_if_clause+ { a } for_if_clause[comprehension_ty]: - | ASYNC 'for' a=star_targets 'in' ~ b=disjunction c[asdl_expr_seq*]=('if' z=disjunction { z })* { + | 'async' 'for' a=star_targets 'in' ~ b=disjunction c[asdl_expr_seq*]=('if' z=disjunction { z })* { CHECK_VERSION(comprehension_ty, 6, "Async comprehensions are", _PyAST_comprehension(a, b, c, 1, p->arena)) } | 'for' a=star_targets 'in' ~ b=disjunction c[asdl_expr_seq*]=('if' z=disjunction { z })* { _PyAST_comprehension(a, b, c, 0, p->arena) } @@ -957,6 +1007,7 @@ kwargs[asdl_seq*]: | ','.kwarg_or_double_starred+ starred_expression[expr_ty]: + | invalid_starred_expression | '*' a=expression { _PyAST_Starred(a, Load, EXTRA) } kwarg_or_starred[KeywordOrStarred*]: @@ -1078,11 +1129,14 @@ func_type_comment[Token*]: # From here on, there are rules for invalid syntax with specialised error messages invalid_arguments: - | a=args ',' '*' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "iterable argument unpacking follows keyword argument unpacking") } + | ((','.(starred_expression | ( assignment_expression | expression !':=') !'=')+ ',' kwargs) | kwargs) ',' b='*' { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(b, "iterable argument unpacking follows keyword argument unpacking") } | a=expression b=for_if_clauses ',' [args | expression for_if_clauses] { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, _PyPegen_get_last_comprehension_item(PyPegen_last_item(b, comprehension_ty)), "Generator expression must be parenthesized") } | a=NAME b='=' expression for_if_clauses { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "invalid syntax. Maybe you meant '==' or ':=' instead of '='?")} + | (args ',')? a=NAME b='=' &(',' | ')') { + RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "expected argument value expression")} | a=args b=for_if_clauses { _PyPegen_nonparen_genexp_in_call(p, a, b) } | args ',' a=expression b=for_if_clauses { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, _PyPegen_get_last_comprehension_item(PyPegen_last_item(b, comprehension_ty)), "Generator expression must be parenthesized") } @@ -1095,6 +1149,8 @@ invalid_kwarg: | !(NAME '=') a=expression b='=' { RAISE_SYNTAX_ERROR_KNOWN_RANGE( a, b, "expression cannot contain assignment, perhaps you meant \"==\"?") } + | a='**' expression '=' b=expression { + RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "cannot assign to keyword argument unpacking") } # IMPORTANT: Note that the "_without_invalid" suffix causes the rule to not call invalid rules under it expression_without_invalid[expr_ty]: @@ -1113,6 +1169,8 @@ invalid_expression: _PyPegen_check_legacy_stmt(p, a) ? NULL : p->tokens[p->mark-1]->level == 0 ? NULL : RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "invalid syntax. Perhaps you forgot a comma?") } | a=disjunction 'if' b=disjunction !('else'|':') { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "expected 'else' after 'if' expression") } + | a='lambda' [lambda_params] b=':' &FSTRING_MIDDLE { + RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "f-string: lambda expressions are not allowed without parentheses") } invalid_named_expression(memo): | a=expression ':=' expression { @@ -1225,7 +1283,7 @@ invalid_with_item: RAISE_SYNTAX_ERROR_INVALID_TARGET(STAR_TARGETS, a) } invalid_for_target: - | ASYNC? 'for' a=star_expressions { + | 'async'? 'for' a=star_expressions { RAISE_SYNTAX_ERROR_INVALID_TARGET(FOR_TARGETS, a) } invalid_group: @@ -1234,28 +1292,34 @@ invalid_group: | '(' a='**' expression ')' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "cannot use double starred expression here") } invalid_import: - | a='import' dotted_name 'from' dotted_name { + | a='import' ','.dotted_name+ 'from' dotted_name { RAISE_SYNTAX_ERROR_STARTING_FROM(a, "Did you mean to use 'from ... import ...' instead?") } - + invalid_import_from_targets: | import_from_as_names ',' NEWLINE { RAISE_SYNTAX_ERROR("trailing comma not allowed without surrounding parentheses") } +invalid_compound_stmt: + | a='elif' named_expression ':' { RAISE_SYNTAX_ERROR_STARTING_FROM(a, "'elif' must match an if-statement here") } + | a='else' ':' { RAISE_SYNTAX_ERROR_STARTING_FROM(a, "'else' must match a valid statement here") } + invalid_with_stmt: - | [ASYNC] 'with' ','.(expression ['as' star_target])+ NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } - | [ASYNC] 'with' '(' ','.(expressions ['as' star_target])+ ','? ')' NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } + | ['async'] 'with' ','.(expression ['as' star_target])+ NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } + | ['async'] 'with' '(' ','.(expressions ['as' star_target])+ ','? ')' NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } invalid_with_stmt_indent: - | [ASYNC] a='with' ','.(expression ['as' star_target])+ ':' NEWLINE !INDENT { + | ['async'] a='with' ','.(expression ['as' star_target])+ ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'with' statement on line %d", a->lineno) } - | [ASYNC] a='with' '(' ','.(expressions ['as' star_target])+ ','? ')' ':' NEWLINE !INDENT { + | ['async'] a='with' '(' ','.(expressions ['as' star_target])+ ','? ')' ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'with' statement on line %d", a->lineno) } invalid_try_stmt: | a='try' ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'try' statement on line %d", a->lineno) } | 'try' ':' block !('except' | 'finally') { RAISE_SYNTAX_ERROR("expected 'except' or 'finally' block") } - | 'try' ':' block* ((except_block+ except_star_block) | (except_star_block+ except_block)) block* { - RAISE_SYNTAX_ERROR("cannot have both 'except' and 'except*' on the same 'try'") } + | 'try' ':' block* except_block+ a='except' b='*' expression ['as' NAME] ':' { + RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "cannot have both 'except' and 'except*' on the same 'try'") } + | 'try' ':' block* except_star_block+ a='except' [expression ['as' NAME]] ':' { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "cannot have both 'except' and 'except*' on the same 'try'") } invalid_except_stmt: | 'except' '*'? a=expression ',' expressions ['as' NAME ] ':' { RAISE_SYNTAX_ERROR_STARTING_FROM(a, "multiple exception types must be parenthesized") } @@ -1306,15 +1370,15 @@ invalid_while_stmt: | a='while' named_expression ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'while' statement on line %d", a->lineno) } invalid_for_stmt: - | [ASYNC] 'for' star_targets 'in' star_expressions NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } - | [ASYNC] a='for' star_targets 'in' star_expressions ':' NEWLINE !INDENT { + | ['async'] 'for' star_targets 'in' star_expressions NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } + | ['async'] a='for' star_targets 'in' star_expressions ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'for' statement on line %d", a->lineno) } invalid_def_raw: - | [ASYNC] a='def' NAME '(' [params] ')' ['->' expression] ':' NEWLINE !INDENT { + | ['async'] a='def' NAME [type_params] '(' [params] ')' ['->' expression] ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after function definition on line %d", a->lineno) } invalid_class_def_raw: - | 'class' NAME ['(' [arguments] ')'] NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } - | a='class' NAME ['(' [arguments] ')'] ':' NEWLINE !INDENT { + | 'class' NAME [type_params] ['(' [arguments] ')'] NEWLINE { RAISE_SYNTAX_ERROR("expected ':'") } + | a='class' NAME [type_params] ['(' [arguments] ')'] ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after class definition on line %d", a->lineno) } invalid_double_starred_kvpairs: @@ -1326,3 +1390,26 @@ invalid_kvpair: RAISE_ERROR_KNOWN_LOCATION(p, PyExc_SyntaxError, a->lineno, a->end_col_offset - 1, a->end_lineno, -1, "':' expected after dictionary key") } | expression ':' a='*' bitwise_or { RAISE_SYNTAX_ERROR_STARTING_FROM(a, "cannot use a starred expression in a dictionary value") } | expression a=':' &('}'|',') {RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "expression expected after dictionary key and ':'") } +invalid_starred_expression: + | a='*' expression '=' b=expression { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "cannot assign to iterable argument unpacking") } +invalid_replacement_field: + | '{' a='=' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "f-string: valid expression required before '='") } + | '{' a='!' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "f-string: valid expression required before '!'") } + | '{' a=':' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "f-string: valid expression required before ':'") } + | '{' a='}' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "f-string: valid expression required before '}'") } + | '{' !(yield_expr | star_expressions) { RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting a valid expression after '{'")} + | '{' (yield_expr | star_expressions) !('=' | '!' | ':' | '}') { + PyErr_Occurred() ? NULL : RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting '=', or '!', or ':', or '}'") } + | '{' (yield_expr | star_expressions) '=' !('!' | ':' | '}') { + PyErr_Occurred() ? NULL : RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting '!', or ':', or '}'") } + | '{' (yield_expr | star_expressions) '='? invalid_conversion_character + | '{' (yield_expr | star_expressions) '='? ['!' NAME] !(':' | '}') { + PyErr_Occurred() ? NULL : RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting ':' or '}'") } + | '{' (yield_expr | star_expressions) '='? ['!' NAME] ':' fstring_format_spec* !'}' { + PyErr_Occurred() ? NULL : RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting '}', or format specs") } + | '{' (yield_expr | star_expressions) '='? ['!' NAME] !'}' { + PyErr_Occurred() ? NULL : RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: expecting '}'") } + +invalid_conversion_character: + | '!' &(':' | '}') { RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: missing conversion character") } + | '!' !NAME { RAISE_SYNTAX_ERROR_ON_NEXT_TOKEN("f-string: invalid conversion character") } diff --git a/Include/Python.h b/Include/Python.h index 52a7aac6ba6cb6e..196751c3201e620 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -5,42 +5,56 @@ #ifndef Py_PYTHON_H #define Py_PYTHON_H -// Since this is a "meta-include" file, no #ifdef __cplusplus / extern "C" { +// Since this is a "meta-include" file, "#ifdef __cplusplus / extern "C" {" +// is not needed. + // Include Python header files #include "patchlevel.h" #include "pyconfig.h" #include "pymacconfig.h" -#if defined(__sgi) && !defined(_SGI_MP_SOURCE) -# define _SGI_MP_SOURCE + +// Include standard header files +#include // assert() +#include // uintptr_t +#include // INT_MAX +#include // HUGE_VAL +#include // va_list +#include // wchar_t +#ifdef HAVE_SYS_TYPES_H +# include // ssize_t #endif -// stdlib.h, stdio.h, errno.h and string.h headers are not used by Python -// headers, but kept for backward compatibility. They are excluded from the -// limited C API of Python 3.11. +// , , and headers are no longer used +// by Python, but kept for the backward compatibility of existing third party C +// extensions. They are not included by limited C API version 3.11 and newer. +// +// The and headers are not included by limited C API +// version 3.13 and newer. #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 -# include -# include // FILE* # include // errno +# include // FILE* +# include // getenv() # include // memcpy() #endif -#ifndef MS_WINDOWS -# include -#endif -#ifdef HAVE_STDDEF_H -# include // size_t +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030d0000 +# include // tolower() +# ifndef MS_WINDOWS +# include // close() +# endif #endif -#include // assert() -#include // wchar_t +// Include Python header files #include "pyport.h" #include "pymacro.h" #include "pymath.h" #include "pymem.h" #include "pytypedefs.h" #include "pybuffer.h" +#include "pystats.h" +#include "pyatomic.h" #include "object.h" #include "objimpl.h" #include "typeslots.h" @@ -83,7 +97,6 @@ #include "weakrefobject.h" #include "structseq.h" #include "cpython/picklebufobject.h" -#include "cpython/pytime.h" #include "codecs.h" #include "pyerrors.h" #include "pythread.h" @@ -104,6 +117,7 @@ #include "pystrcmp.h" #include "fileutils.h" #include "cpython/pyfpe.h" -#include "tracemalloc.h" +#include "cpython/tracemalloc.h" +#include "cpython/optimizer.h" #endif /* !Py_PYTHON_H */ diff --git a/Include/README.rst b/Include/README.rst index f52e690eac9a91c..531f09692f783f5 100644 --- a/Include/README.rst +++ b/Include/README.rst @@ -1,11 +1,13 @@ The Python C API ================ -The C API is divided into three sections: +The C API is divided into these sections: 1. ``Include/``: Limited API 2. ``Include/cpython/``: CPython implementation details -3. ``Include/internal/``: The internal API +3. ``Include/cpython/``, names with the ``PyUnstable_`` prefix: API that can + change between minor releases +4. ``Include/internal/``, and any name with ``_`` prefix: The internal API Information on changing the C API is available `in the developer guide`_ diff --git a/Include/abstract.h b/Include/abstract.h index 064b0300b51ea29..bd12a54963c13fb 100644 --- a/Include/abstract.h +++ b/Include/abstract.h @@ -50,6 +50,25 @@ extern "C" { This function always succeeds. */ + +/* Implemented elsewhere: + + int PyObject_HasAttrStringWithError(PyObject *o, const char *attr_name); + + Returns 1 if object 'o' has the attribute attr_name, and 0 otherwise. + This is equivalent to the Python expression: hasattr(o,attr_name). + Returns -1 on failure. */ + + +/* Implemented elsewhere: + + int PyObject_HasAttrWithError(PyObject *o, PyObject *attr_name); + + Returns 1 if o has the attribute attr_name, and 0 otherwise. + This is equivalent to the Python expression: hasattr(o,attr_name). + Returns -1 on failure. */ + + /* Implemented elsewhere: PyObject* PyObject_GetAttr(PyObject *o, PyObject *attr_name); @@ -60,6 +79,38 @@ extern "C" { This is the equivalent of the Python expression: o.attr_name. */ +/* Implemented elsewhere: + + int PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result); + + Variant of PyObject_GetAttr() which doesn't raise AttributeError + if the attribute is not found. + + If the attribute is found, return 1 and set *result to a new strong + reference to the attribute. + If the attribute is not found, return 0 and set *result to NULL; + the AttributeError is silenced. + If an error other than AttributeError is raised, return -1 and + set *result to NULL. +*/ + + +/* Implemented elsewhere: + + int PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result); + + Variant of PyObject_GetAttrString() which doesn't raise AttributeError + if the attribute is not found. + + If the attribute is found, return 1 and set *result to a new strong + reference to the attribute. + If the attribute is not found, return 0 and set *result to NULL; + the AttributeError is silenced. + If an error other than AttributeError is raised, return -1 and + set *result to NULL. +*/ + + /* Implemented elsewhere: int PyObject_SetAttrString(PyObject *o, const char *attr_name, PyObject *v); @@ -80,7 +131,7 @@ extern "C" { This is the equivalent of the Python statement o.attr_name=v. */ -/* Implemented as a macro: +/* Implemented elsewhere: int PyObject_DelAttrString(PyObject *o, const char *attr_name); @@ -88,17 +139,15 @@ extern "C" { -1 on failure. This is the equivalent of the Python statement: del o.attr_name. */ -#define PyObject_DelAttrString(O, A) PyObject_SetAttrString((O), (A), NULL) -/* Implemented as a macro: +/* Implemented elsewhere: int PyObject_DelAttr(PyObject *o, PyObject *attr_name); Delete attribute named attr_name, for object o. Returns -1 on failure. This is the equivalent of the Python statement: del o.attr_name. */ -#define PyObject_DelAttr(O, A) PyObject_SetAttr((O), (A), NULL) /* Implemented elsewhere: @@ -135,12 +184,6 @@ extern "C" { This function always succeeds. */ -#ifdef PY_SSIZE_T_CLEAN -# define PyObject_CallFunction _PyObject_CallFunction_SizeT -# define PyObject_CallMethod _PyObject_CallMethod_SizeT -#endif - - #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000 /* Call a callable Python object without any arguments */ PyAPI_FUNC(PyObject *) PyObject_CallNoArgs(PyObject *func); @@ -195,15 +238,6 @@ PyAPI_FUNC(PyObject *) PyObject_CallMethod(PyObject *obj, const char *name, const char *format, ...); -PyAPI_FUNC(PyObject *) _PyObject_CallFunction_SizeT(PyObject *callable, - const char *format, - ...); - -PyAPI_FUNC(PyObject *) _PyObject_CallMethod_SizeT(PyObject *obj, - const char *name, - const char *format, - ...); - /* Call a callable Python object 'callable' with a variable number of C arguments. The C arguments are provided as PyObject* values, terminated by a NULL. @@ -335,55 +369,6 @@ PyAPI_FUNC(int) PyObject_DelItemString(PyObject *o, const char *key); PyAPI_FUNC(int) PyObject_DelItem(PyObject *o, PyObject *key); -/* === Old Buffer API ============================================ */ - -/* FIXME: usage of these should all be replaced in Python itself - but for backwards compatibility we will implement them. - Their usage without a corresponding "unlock" mechanism - may create issues (but they would already be there). */ - -/* Takes an arbitrary object which must support the (character, single segment) - buffer interface and returns a pointer to a read-only memory location - usable as character based input for subsequent processing. - - Return 0 on success. buffer and buffer_len are only set in case no error - occurs. Otherwise, -1 is returned and an exception set. */ -Py_DEPRECATED(3.0) -PyAPI_FUNC(int) PyObject_AsCharBuffer(PyObject *obj, - const char **buffer, - Py_ssize_t *buffer_len); - -/* Checks whether an arbitrary object supports the (character, single segment) - buffer interface. - - Returns 1 on success, 0 on failure. */ -Py_DEPRECATED(3.0) PyAPI_FUNC(int) PyObject_CheckReadBuffer(PyObject *obj); - -/* Same as PyObject_AsCharBuffer() except that this API expects (readable, - single segment) buffer interface and returns a pointer to a read-only memory - location which can contain arbitrary data. - - 0 is returned on success. buffer and buffer_len are only set in case no - error occurs. Otherwise, -1 is returned and an exception set. */ -Py_DEPRECATED(3.0) -PyAPI_FUNC(int) PyObject_AsReadBuffer(PyObject *obj, - const void **buffer, - Py_ssize_t *buffer_len); - -/* Takes an arbitrary object which must support the (writable, single segment) - buffer interface and returns a pointer to a writable memory location in - buffer of size 'buffer_len'. - - Return 0 on success. buffer and buffer_len are only set in case no error - occurs. Otherwise, -1 is returned and an exception set. */ -Py_DEPRECATED(3.0) -PyAPI_FUNC(int) PyObject_AsWriteBuffer(PyObject *obj, - void **buffer, - Py_ssize_t *buffer_len); - - -/* === New Buffer API ============================================ */ - /* Takes an arbitrary object and returns the result of calling obj.__format__(format_spec). */ PyAPI_FUNC(PyObject *) PyObject_Format(PyObject *obj, @@ -855,6 +840,18 @@ PyAPI_FUNC(int) PyMapping_HasKeyString(PyObject *o, const char *key); This function always succeeds. */ PyAPI_FUNC(int) PyMapping_HasKey(PyObject *o, PyObject *key); +/* Return 1 if the mapping object has the key 'key', and 0 otherwise. + This is equivalent to the Python expression: key in o. + On failure, return -1. */ + +PyAPI_FUNC(int) PyMapping_HasKeyWithError(PyObject *o, PyObject *key); + +/* Return 1 if the mapping object has the key 'key', and 0 otherwise. + This is equivalent to the Python expression: key in o. + On failure, return -1. */ + +PyAPI_FUNC(int) PyMapping_HasKeyStringWithError(PyObject *o, const char *key); + /* On success, return a list or tuple of the keys in mapping object 'o'. On failure, return NULL. */ PyAPI_FUNC(PyObject *) PyMapping_Keys(PyObject *o); @@ -874,6 +871,21 @@ PyAPI_FUNC(PyObject *) PyMapping_Items(PyObject *o); PyAPI_FUNC(PyObject *) PyMapping_GetItemString(PyObject *o, const char *key); +/* Variants of PyObject_GetItem() and PyMapping_GetItemString() which don't + raise KeyError if the key is not found. + + If the key is found, return 1 and set *result to a new strong + reference to the corresponding value. + If the key is not found, return 0 and set *result to NULL; + the KeyError is silenced. + If an error other than KeyError is raised, return -1 and + set *result to NULL. +*/ +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(int) PyMapping_GetOptionalItem(PyObject *, PyObject *, PyObject **); +PyAPI_FUNC(int) PyMapping_GetOptionalItemString(PyObject *, const char *, PyObject **); +#endif + /* Map the string 'key' to the value 'v' in the mapping 'o'. Returns -1 on failure. diff --git a/Include/boolobject.h b/Include/boolobject.h index ca21fbfad8e8277..976fa35201d035d 100644 --- a/Include/boolobject.h +++ b/Include/boolobject.h @@ -11,8 +11,7 @@ PyAPI_DATA(PyTypeObject) PyBool_Type; #define PyBool_Check(x) Py_IS_TYPE((x), &PyBool_Type) -/* Py_False and Py_True are the only two bools in existence. -Don't forget to apply Py_INCREF() when returning either!!! */ +/* Py_False and Py_True are the only two bools in existence. */ /* Don't use these directly */ PyAPI_DATA(PyLongObject) _Py_FalseStruct; @@ -31,8 +30,8 @@ PyAPI_FUNC(int) Py_IsFalse(PyObject *x); #define Py_IsFalse(x) Py_Is((x), Py_False) /* Macros for returning Py_True or Py_False, respectively */ -#define Py_RETURN_TRUE return Py_NewRef(Py_True) -#define Py_RETURN_FALSE return Py_NewRef(Py_False) +#define Py_RETURN_TRUE return Py_True +#define Py_RETURN_FALSE return Py_False /* Function to return a bool from a C long */ PyAPI_FUNC(PyObject *) PyBool_FromLong(long); diff --git a/Include/bytesobject.h b/Include/bytesobject.h index ee448cd02bdab38..c5a24195be6bc37 100644 --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -1,5 +1,4 @@ - -/* Bytes object interface */ +// Bytes object interface #ifndef Py_BYTESOBJECT_H #define Py_BYTESOBJECT_H @@ -7,8 +6,6 @@ extern "C" { #endif -#include // va_list - /* Type PyBytesObject represents a byte string. An extra zero byte is reserved at the end to ensure it is zero-terminated, but a size is diff --git a/Include/ceval.h b/Include/ceval.h index ad4d909d6f2b14a..9885bdb7febc21a 100644 --- a/Include/ceval.h +++ b/Include/ceval.h @@ -17,27 +17,6 @@ PyAPI_FUNC(PyObject *) PyEval_EvalCodeEx(PyObject *co, PyObject *const *defs, int defc, PyObject *kwdefs, PyObject *closure); -/* PyEval_CallObjectWithKeywords(), PyEval_CallObject(), PyEval_CallFunction - * and PyEval_CallMethod are deprecated. Since they are officially part of the - * stable ABI (PEP 384), they must be kept for backward compatibility. - * PyObject_Call(), PyObject_CallFunction() and PyObject_CallMethod() are - * recommended to call a callable object. - */ - -Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords( - PyObject *callable, - PyObject *args, - PyObject *kwargs); - -/* Deprecated since PyEval_CallObjectWithKeywords is deprecated */ -#define PyEval_CallObject(callable, arg) \ - PyEval_CallObjectWithKeywords((callable), (arg), _PyObject_CAST(_Py_NULL)) - -Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallFunction( - PyObject *callable, const char *format, ...); -Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallMethod( - PyObject *obj, const char *name, const char *format, ...); - PyAPI_FUNC(PyObject *) PyEval_GetBuiltins(void); PyAPI_FUNC(PyObject *) PyEval_GetGlobals(void); PyAPI_FUNC(PyObject *) PyEval_GetLocals(void); @@ -128,14 +107,6 @@ PyAPI_FUNC(PyObject *) PyEval_EvalFrameEx(PyFrameObject *f, int exc); PyAPI_FUNC(PyThreadState *) PyEval_SaveThread(void); PyAPI_FUNC(void) PyEval_RestoreThread(PyThreadState *); -Py_DEPRECATED(3.9) PyAPI_FUNC(int) PyEval_ThreadsInitialized(void); -Py_DEPRECATED(3.9) PyAPI_FUNC(void) PyEval_InitThreads(void); -/* PyEval_AcquireLock() and PyEval_ReleaseLock() are part of stable ABI. - * They will be removed from this header file in the future version. - * But they will be remained in ABI until Python 4.0. - */ -Py_DEPRECATED(3.2) PyAPI_FUNC(void) PyEval_AcquireLock(void); -Py_DEPRECATED(3.2) PyAPI_FUNC(void) PyEval_ReleaseLock(void); PyAPI_FUNC(void) PyEval_AcquireThread(PyThreadState *tstate); PyAPI_FUNC(void) PyEval_ReleaseThread(PyThreadState *tstate); diff --git a/Include/codecs.h b/Include/codecs.h index 37ecfb4ab757b41..512a3c723eca183 100644 --- a/Include/codecs.h +++ b/Include/codecs.h @@ -35,34 +35,6 @@ PyAPI_FUNC(int) PyCodec_Unregister( PyObject *search_function ); -/* Codec registry lookup API. - - Looks up the given encoding and returns a CodecInfo object with - function attributes which implement the different aspects of - processing the encoding. - - The encoding string is looked up converted to all lower-case - characters. This makes encodings looked up through this mechanism - effectively case-insensitive. - - If no codec is found, a KeyError is set and NULL returned. - - As side effect, this tries to load the encodings package, if not - yet done. This is part of the lazy load strategy for the encodings - package. - - */ - -#ifndef Py_LIMITED_API -PyAPI_FUNC(PyObject *) _PyCodec_Lookup( - const char *encoding - ); - -PyAPI_FUNC(int) _PyCodec_Forget( - const char *encoding - ); -#endif - /* Codec registry encoding check API. Returns 1/0 depending on whether there is a registered codec for @@ -106,102 +78,58 @@ PyAPI_FUNC(PyObject *) PyCodec_Decode( const char *errors ); -#ifndef Py_LIMITED_API -/* Text codec specific encoding and decoding API. - - Checks the encoding against a list of codecs which do not - implement a str<->bytes encoding before attempting the - operation. +// --- Codec Lookup APIs -------------------------------------------------- - Please note that these APIs are internal and should not - be used in Python C extensions. - - XXX (ncoghlan): should we make these, or something like them, public - in Python 3.5+? +/* Codec registry lookup API. - */ -PyAPI_FUNC(PyObject *) _PyCodec_LookupTextEncoding( - const char *encoding, - const char *alternate_command - ); + Looks up the given encoding and returns a CodecInfo object with + function attributes which implement the different aspects of + processing the encoding. -PyAPI_FUNC(PyObject *) _PyCodec_EncodeText( - PyObject *object, - const char *encoding, - const char *errors - ); + The encoding string is looked up converted to all lower-case + characters. This makes encodings looked up through this mechanism + effectively case-insensitive. -PyAPI_FUNC(PyObject *) _PyCodec_DecodeText( - PyObject *object, - const char *encoding, - const char *errors - ); + If no codec is found, a KeyError is set and NULL returned. -/* These two aren't actually text encoding specific, but _io.TextIOWrapper - * is the only current API consumer. + As side effect, this tries to load the encodings package, if not + yet done. This is part of the lazy load strategy for the encodings + package. */ -PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalDecoder( - PyObject *codec_info, - const char *errors - ); - -PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalEncoder( - PyObject *codec_info, - const char *errors - ); -#endif - - - -/* --- Codec Lookup APIs -------------------------------------------------- - - All APIs return a codec object with incremented refcount and are - based on _PyCodec_Lookup(). The same comments w/r to the encoding - name also apply to these APIs. - -*/ /* Get an encoder function for the given encoding. */ -PyAPI_FUNC(PyObject *) PyCodec_Encoder( - const char *encoding - ); +PyAPI_FUNC(PyObject *) PyCodec_Encoder(const char *encoding); /* Get a decoder function for the given encoding. */ -PyAPI_FUNC(PyObject *) PyCodec_Decoder( - const char *encoding - ); +PyAPI_FUNC(PyObject *) PyCodec_Decoder(const char *encoding); /* Get an IncrementalEncoder object for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_IncrementalEncoder( - const char *encoding, - const char *errors - ); + const char *encoding, + const char *errors); /* Get an IncrementalDecoder object function for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_IncrementalDecoder( - const char *encoding, - const char *errors - ); + const char *encoding, + const char *errors); /* Get a StreamReader factory function for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_StreamReader( - const char *encoding, - PyObject *stream, - const char *errors - ); + const char *encoding, + PyObject *stream, + const char *errors); /* Get a StreamWriter factory function for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_StreamWriter( - const char *encoding, - PyObject *stream, - const char *errors - ); + const char *encoding, + PyObject *stream, + const char *errors); /* Unicode encoding error handling callback registry API */ diff --git a/Include/compile.h b/Include/compile.h index 3c5acd7209f763e..52d0bc76c9fca44 100644 --- a/Include/compile.h +++ b/Include/compile.h @@ -10,9 +10,6 @@ extern "C" { #define Py_eval_input 258 #define Py_func_type_input 345 -/* This doesn't need to match anything */ -#define Py_fstring_input 800 - #ifndef Py_LIMITED_API # define Py_CPYTHON_COMPILE_H # include "cpython/compile.h" diff --git a/Include/cpython/abstract.h b/Include/cpython/abstract.h index 3b27aab2fc4798d..4e7b7a46703a6d0 100644 --- a/Include/cpython/abstract.h +++ b/Include/cpython/abstract.h @@ -4,9 +4,12 @@ /* === Object Protocol ================================================== */ -#ifdef PY_SSIZE_T_CLEAN -# define _PyObject_CallMethodId _PyObject_CallMethodId_SizeT -#endif +/* Like PyObject_CallMethod(), but expect a _Py_Identifier* + as the method name. */ +PyAPI_FUNC(PyObject*) _PyObject_CallMethodId( + PyObject *obj, + _Py_Identifier *name, + const char *format, ...); /* Convert keyword arguments from the FASTCALL (stack: C array, kwnames: tuple) format to a Python dictionary ("kwargs" dict). @@ -18,51 +21,25 @@ Duplicate keys are merged using the last value. If duplicate keys must raise an exception, the caller is responsible to implement an explicit keys on kwnames. */ -PyAPI_FUNC(PyObject *) _PyStack_AsDict( - PyObject *const *values, - PyObject *kwnames); - -/* Suggested size (number of positional arguments) for arrays of PyObject* - allocated on a C stack to avoid allocating memory on the heap memory. Such - array is used to pass positional arguments to call functions of the - PyObject_Vectorcall() family. - - The size is chosen to not abuse the C stack and so limit the risk of stack - overflow. The size is also chosen to allow using the small stack for most - function calls of the Python standard library. On 64-bit CPU, it allocates - 40 bytes on the stack. */ -#define _PY_FASTCALL_SMALL_STACK 5 - -PyAPI_FUNC(PyObject *) _Py_CheckFunctionResult( - PyThreadState *tstate, - PyObject *callable, - PyObject *result, - const char *where); +PyAPI_FUNC(PyObject*) _PyStack_AsDict(PyObject *const *values, PyObject *kwnames); -/* === Vectorcall protocol (PEP 590) ============================= */ -/* Call callable using tp_call. Arguments are like PyObject_Vectorcall() - or PyObject_FastCallDict() (both forms are supported), - except that nargs is plainly the number of arguments without flags. */ -PyAPI_FUNC(PyObject *) _PyObject_MakeTpCall( - PyThreadState *tstate, - PyObject *callable, - PyObject *const *args, Py_ssize_t nargs, - PyObject *keywords); +/* === Vectorcall protocol (PEP 590) ============================= */ // PyVectorcall_NARGS() is exported as a function for the stable ABI. // Here (when we are not using the stable ABI), the name is overridden to // call a static inline function for best performance. -#define PyVectorcall_NARGS(n) _PyVectorcall_NARGS(n) static inline Py_ssize_t _PyVectorcall_NARGS(size_t n) { return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; } +#define PyVectorcall_NARGS(n) _PyVectorcall_NARGS(n) PyAPI_FUNC(vectorcallfunc) PyVectorcall_Function(PyObject *callable); -// Backwards compatibility aliases for API that was provisional in Python 3.8 +// Backwards compatibility aliases (PEP 590) for API that was provisional +// in Python 3.8 #define _PyObject_Vectorcall PyObject_Vectorcall #define _PyObject_VectorcallMethod PyObject_VectorcallMethod #define _PyObject_FastCallDict PyObject_VectorcallDict @@ -79,12 +56,6 @@ PyAPI_FUNC(PyObject *) PyObject_VectorcallDict( size_t nargsf, PyObject *kwargs); -// Same as PyObject_Vectorcall(), except without keyword arguments -PyAPI_FUNC(PyObject *) _PyObject_FastCall( - PyObject *func, - PyObject *const *args, - Py_ssize_t nargs); - PyAPI_FUNC(PyObject *) PyObject_CallOneArg(PyObject *func, PyObject *arg); static inline PyObject * @@ -103,56 +74,6 @@ PyObject_CallMethodOneArg(PyObject *self, PyObject *name, PyObject *arg) return PyObject_VectorcallMethod(name, args, nargsf, _Py_NULL); } -PyAPI_FUNC(PyObject *) _PyObject_CallMethod(PyObject *obj, - PyObject *name, - const char *format, ...); - -/* Like PyObject_CallMethod(), but expect a _Py_Identifier* - as the method name. */ -PyAPI_FUNC(PyObject *) _PyObject_CallMethodId(PyObject *obj, - _Py_Identifier *name, - const char *format, ...); - -PyAPI_FUNC(PyObject *) _PyObject_CallMethodId_SizeT(PyObject *obj, - _Py_Identifier *name, - const char *format, - ...); - -PyAPI_FUNC(PyObject *) _PyObject_CallMethodIdObjArgs( - PyObject *obj, - _Py_Identifier *name, - ...); - -static inline PyObject * -_PyObject_VectorcallMethodId( - _Py_Identifier *name, PyObject *const *args, - size_t nargsf, PyObject *kwnames) -{ - PyObject *oname = _PyUnicode_FromId(name); /* borrowed */ - if (!oname) { - return _Py_NULL; - } - return PyObject_VectorcallMethod(oname, args, nargsf, kwnames); -} - -static inline PyObject * -_PyObject_CallMethodIdNoArgs(PyObject *self, _Py_Identifier *name) -{ - size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; - return _PyObject_VectorcallMethodId(name, &self, nargsf, _Py_NULL); -} - -static inline PyObject * -_PyObject_CallMethodIdOneArg(PyObject *self, _Py_Identifier *name, PyObject *arg) -{ - PyObject *args[2] = {self, arg}; - size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET; - assert(arg != NULL); - return _PyObject_VectorcallMethodId(name, args, nargsf, _Py_NULL); -} - -PyAPI_FUNC(int) _PyObject_HasLen(PyObject *o); - /* Guess the size of object 'o' using len(o) or o.__length_hint__(). If neither of those return a non-negative value, then return the default value. If one of the calls fails, this function returns -1. */ @@ -164,43 +85,3 @@ PyAPI_FUNC(Py_ssize_t) PyObject_LengthHint(PyObject *o, Py_ssize_t); need to be corrected for a negative index. */ #define PySequence_ITEM(o, i)\ ( Py_TYPE(o)->tp_as_sequence->sq_item((o), (i)) ) - -#define PY_ITERSEARCH_COUNT 1 -#define PY_ITERSEARCH_INDEX 2 -#define PY_ITERSEARCH_CONTAINS 3 - -/* Iterate over seq. - - Result depends on the operation: - - PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if - error. - PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of - obj in seq; set ValueError and return -1 if none found; - also return -1 on error. - PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on - error. */ -PyAPI_FUNC(Py_ssize_t) _PySequence_IterSearch(PyObject *seq, - PyObject *obj, int operation); - -/* === Mapping protocol ================================================= */ - -PyAPI_FUNC(int) _PyObject_RealIsInstance(PyObject *inst, PyObject *cls); - -PyAPI_FUNC(int) _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls); - -PyAPI_FUNC(char *const *) _PySequence_BytesToCharpArray(PyObject* self); - -PyAPI_FUNC(void) _Py_FreeCharPArray(char *const array[]); - -/* For internal use by buffer API functions */ -PyAPI_FUNC(void) _Py_add_one_to_index_F(int nd, Py_ssize_t *index, - const Py_ssize_t *shape); -PyAPI_FUNC(void) _Py_add_one_to_index_C(int nd, Py_ssize_t *index, - const Py_ssize_t *shape); - -/* Convert Python int to Py_ssize_t. Do nothing if the argument is None. */ -PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *); - -/* Same as PyNumber_Index but can return an instance of a subclass of int. */ -PyAPI_FUNC(PyObject *) _PyNumber_Index(PyObject *o); diff --git a/Include/cpython/bytesobject.h b/Include/cpython/bytesobject.h index e982031c107de28..816823716e9a6f5 100644 --- a/Include/cpython/bytesobject.h +++ b/Include/cpython/bytesobject.h @@ -15,18 +15,6 @@ typedef struct { } PyBytesObject; PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t); -PyAPI_FUNC(PyObject*) _PyBytes_FormatEx( - const char *format, - Py_ssize_t format_len, - PyObject *args, - int use_bytearray); -PyAPI_FUNC(PyObject*) _PyBytes_FromHex( - PyObject *string, - int use_bytearray); - -/* Helper for PyBytes_DecodeEscape that detects invalid escape chars. */ -PyAPI_FUNC(PyObject *) _PyBytes_DecodeEscape(const char *, Py_ssize_t, - const char *, const char **); /* Macros and static inline functions, trading safety for speed */ #define _PyBytes_CAST(op) \ @@ -43,87 +31,3 @@ static inline Py_ssize_t PyBytes_GET_SIZE(PyObject *op) { return Py_SIZE(self); } #define PyBytes_GET_SIZE(self) PyBytes_GET_SIZE(_PyObject_CAST(self)) - -/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*, - x must be an iterable object. */ -PyAPI_FUNC(PyObject *) _PyBytes_Join(PyObject *sep, PyObject *x); - - -/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer". - A _PyBytesWriter variable must be declared at the end of variables in a - function to optimize the memory allocation on the stack. */ -typedef struct { - /* bytes, bytearray or NULL (when the small buffer is used) */ - PyObject *buffer; - - /* Number of allocated size. */ - Py_ssize_t allocated; - - /* Minimum number of allocated bytes, - incremented by _PyBytesWriter_Prepare() */ - Py_ssize_t min_size; - - /* If non-zero, use a bytearray instead of a bytes object for buffer. */ - int use_bytearray; - - /* If non-zero, overallocate the buffer (default: 0). - This flag must be zero if use_bytearray is non-zero. */ - int overallocate; - - /* Stack buffer */ - int use_small_buffer; - char small_buffer[512]; -} _PyBytesWriter; - -/* Initialize a bytes writer - - By default, the overallocation is disabled. Set the overallocate attribute - to control the allocation of the buffer. */ -PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer); - -/* Get the buffer content and reset the writer. - Return a bytes object, or a bytearray object if use_bytearray is non-zero. - Raise an exception and return NULL on error. */ -PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, - void *str); - -/* Deallocate memory of a writer (clear its internal buffer). */ -PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer); - -/* Allocate the buffer to write size bytes. - Return the pointer to the beginning of buffer data. - Raise an exception and return NULL on error. */ -PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, - Py_ssize_t size); - -/* Ensure that the buffer is large enough to write *size* bytes. - Add size to the writer minimum size (min_size attribute). - - str is the current pointer inside the buffer. - Return the updated current pointer inside the buffer. - Raise an exception and return NULL on error. */ -PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, - void *str, - Py_ssize_t size); - -/* Resize the buffer to make it larger. - The new buffer may be larger than size bytes because of overallocation. - Return the updated current pointer inside the buffer. - Raise an exception and return NULL on error. - - Note: size must be greater than the number of allocated bytes in the writer. - - This function doesn't use the writer minimum size (min_size attribute). - - See also _PyBytesWriter_Prepare(). - */ -PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer, - void *str, - Py_ssize_t size); - -/* Write bytes. - Raise an exception and return NULL on error. */ -PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, - void *str, - const void *bytes, - Py_ssize_t size); diff --git a/Include/cpython/ceval.h b/Include/cpython/ceval.h index 74665c9fa10580b..78f7405661662f8 100644 --- a/Include/cpython/ceval.h +++ b/Include/cpython/ceval.h @@ -4,14 +4,9 @@ PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *); PyAPI_FUNC(void) PyEval_SetProfileAllThreads(Py_tracefunc, PyObject *); -PyAPI_DATA(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg); PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *); PyAPI_FUNC(void) PyEval_SetTraceAllThreads(Py_tracefunc, PyObject *); -PyAPI_FUNC(int) _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg); -/* Helper to look up a builtin object */ -PyAPI_FUNC(PyObject *) _PyEval_GetBuiltin(PyObject *); -PyAPI_FUNC(PyObject *) _PyEval_GetBuiltinId(_Py_Identifier *); /* Look at the current frame's (if any) code's co_flags, and turn on the corresponding compiler flags in cf->cf_flags. Return 1 if any flag was set, else return 0. */ @@ -19,10 +14,12 @@ PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf); PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(PyThreadState *tstate, struct _PyInterpreterFrame *f, int exc); -PyAPI_FUNC(void) _PyEval_SetSwitchInterval(unsigned long microseconds); -PyAPI_FUNC(unsigned long) _PyEval_GetSwitchInterval(void); - -PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc); +PyAPI_FUNC(Py_ssize_t) PyUnstable_Eval_RequestCodeExtraIndex(freefunc); +// Old name -- remove when this API changes: +_Py_DEPRECATED_EXTERNALLY(3.12) static inline Py_ssize_t +_PyEval_RequestCodeExtraIndex(freefunc f) { + return PyUnstable_Eval_RequestCodeExtraIndex(f); +} PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *); PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *); diff --git a/Include/cpython/classobject.h b/Include/cpython/classobject.h index 051041965002a3b..d7c9ddd1336c46d 100644 --- a/Include/cpython/classobject.h +++ b/Include/cpython/classobject.h @@ -26,12 +26,20 @@ PyAPI_FUNC(PyObject *) PyMethod_New(PyObject *, PyObject *); PyAPI_FUNC(PyObject *) PyMethod_Function(PyObject *); PyAPI_FUNC(PyObject *) PyMethod_Self(PyObject *); -/* Macros for direct access to these values. Type checks are *not* - done, so use with care. */ -#define PyMethod_GET_FUNCTION(meth) \ - (((PyMethodObject *)(meth)) -> im_func) -#define PyMethod_GET_SELF(meth) \ - (((PyMethodObject *)(meth)) -> im_self) +#define _PyMethod_CAST(meth) \ + (assert(PyMethod_Check(meth)), _Py_CAST(PyMethodObject*, meth)) + +/* Static inline functions for direct access to these values. + Type checks are *not* done, so use with care. */ +static inline PyObject* PyMethod_GET_FUNCTION(PyObject *meth) { + return _PyMethod_CAST(meth)->im_func; +} +#define PyMethod_GET_FUNCTION(meth) PyMethod_GET_FUNCTION(_PyObject_CAST(meth)) + +static inline PyObject* PyMethod_GET_SELF(PyObject *meth) { + return _PyMethod_CAST(meth)->im_self; +} +#define PyMethod_GET_SELF(meth) PyMethod_GET_SELF(_PyObject_CAST(meth)) typedef struct { PyObject_HEAD @@ -45,10 +53,16 @@ PyAPI_DATA(PyTypeObject) PyInstanceMethod_Type; PyAPI_FUNC(PyObject *) PyInstanceMethod_New(PyObject *); PyAPI_FUNC(PyObject *) PyInstanceMethod_Function(PyObject *); -/* Macros for direct access to these values. Type checks are *not* - done, so use with care. */ -#define PyInstanceMethod_GET_FUNCTION(meth) \ - (((PyInstanceMethodObject *)(meth)) -> func) +#define _PyInstanceMethod_CAST(meth) \ + (assert(PyInstanceMethod_Check(meth)), \ + _Py_CAST(PyInstanceMethodObject*, meth)) + +/* Static inline function for direct access to these values. + Type checks are *not* done, so use with care. */ +static inline PyObject* PyInstanceMethod_GET_FUNCTION(PyObject *meth) { + return _PyInstanceMethod_CAST(meth)->func; +} +#define PyInstanceMethod_GET_FUNCTION(meth) PyInstanceMethod_GET_FUNCTION(_PyObject_CAST(meth)) #ifdef __cplusplus } diff --git a/Include/cpython/code.h b/Include/cpython/code.h index ebac0b12a461bc8..cf715c55a2b3b86 100644 --- a/Include/cpython/code.h +++ b/Include/cpython/code.h @@ -3,10 +3,27 @@ #ifndef Py_LIMITED_API #ifndef Py_CODE_H #define Py_CODE_H + #ifdef __cplusplus extern "C" { #endif +/* Count of all local monitoring events */ +#define _PY_MONITORING_LOCAL_EVENTS 10 +/* Count of all "real" monitoring events (not derived from other events) */ +#define _PY_MONITORING_UNGROUPED_EVENTS 15 +/* Count of all monitoring events */ +#define _PY_MONITORING_EVENTS 17 + +/* Tables of which tools are active for each monitored event. */ +typedef struct _Py_LocalMonitors { + uint8_t tools[_PY_MONITORING_LOCAL_EVENTS]; +} _Py_LocalMonitors; + +typedef struct _Py_GlobalMonitors { + uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS]; +} _Py_GlobalMonitors; + /* Each instruction in a code object is a fixed-width value, * currently 2 bytes: 1-byte opcode + 1-byte oparg. The EXTENDED_ARG * opcode allows for larger values but the current limit is 3 uses @@ -16,21 +33,38 @@ extern "C" { * 2**32 - 1, rather than INT_MAX. */ -typedef uint16_t _Py_CODEUNIT; +typedef union { + uint16_t cache; + struct { + uint8_t code; + uint8_t arg; + } op; +} _Py_CODEUNIT; + + +/* These macros only remain defined for compatibility. */ +#define _Py_OPCODE(word) ((word).op.code) +#define _Py_OPARG(word) ((word).op.arg) + +static inline _Py_CODEUNIT +_py_make_codeunit(uint8_t opcode, uint8_t oparg) +{ + // No designated initialisers because of C++ compat + _Py_CODEUNIT word; + word.op.code = opcode; + word.op.arg = oparg; + return word; +} + +static inline void +_py_set_opcode(_Py_CODEUNIT *word, uint8_t opcode) +{ + word->op.code = opcode; +} -#ifdef WORDS_BIGENDIAN -# define _Py_OPCODE(word) ((word) >> 8) -# define _Py_OPARG(word) ((word) & 255) -# define _Py_MAKECODEUNIT(opcode, oparg) (((opcode)<<8)|(oparg)) -#else -# define _Py_OPCODE(word) ((word) & 255) -# define _Py_OPARG(word) ((word) >> 8) -# define _Py_MAKECODEUNIT(opcode, oparg) ((opcode)|((oparg)<<8)) -#endif +#define _Py_MAKE_CODEUNIT(opcode, oparg) _py_make_codeunit((opcode), (oparg)) +#define _Py_SET_OPCODE(word, opcode) _py_set_opcode(&(word), (opcode)) -// Use "unsigned char" instead of "uint8_t" here to avoid illegal aliasing: -#define _Py_SET_OPCODE(word, opcode) \ - do { ((unsigned char *)&(word))[0] = (opcode); } while (0) typedef struct { PyObject *_co_code; @@ -39,6 +73,42 @@ typedef struct { PyObject *_co_freevars; } _PyCoCached; +/* Ancilliary data structure used for instrumentation. + Line instrumentation creates an array of + these. One entry per code unit.*/ +typedef struct { + uint8_t original_opcode; + int8_t line_delta; +} _PyCoLineInstrumentationData; + + +typedef struct { + int size; + int capacity; + struct _PyExecutorObject *executors[1]; +} _PyExecutorArray; + +/* Main data structure used for instrumentation. + * This is allocated when needed for instrumentation + */ +typedef struct { + /* Monitoring specific to this code object */ + _Py_LocalMonitors local_monitors; + /* Monitoring that is active on this code object */ + _Py_LocalMonitors active_monitors; + /* The tools that are to be notified for events for the matching code unit */ + uint8_t *tools; + /* Information to support line events */ + _PyCoLineInstrumentationData *lines; + /* The tools that are to be notified for line events for the matching code unit */ + uint8_t *line_tools; + /* Information to support instruction events */ + /* The underlying instructions, which can themselves be instrumented */ + uint8_t *per_instruction_opcodes; + /* The tools that are to be notified for instruction events for the matching code unit */ + uint8_t *per_instruction_tools; +} _PyCoMonitoringData; + // To avoid repeating ourselves in deepfreeze.py, all PyCodeObject members are // defined in this macro: #define _PyCode_DEF(SIZE) { \ @@ -70,7 +140,6 @@ typedef struct { PyObject *co_exceptiontable; /* Byte string encoding exception handling \ table */ \ int co_flags; /* CO_..., see below */ \ - short _co_linearray_entry_size; /* Size of each entry in _co_linearray */ \ \ /* The rest are not so impactful on performance. */ \ int co_argcount; /* #arguments, except *args */ \ @@ -84,9 +153,9 @@ typedef struct { int co_nlocalsplus; /* number of local + cell + free variables */ \ int co_framesize; /* Size of frame in words */ \ int co_nlocals; /* number of local variables */ \ - int co_nplaincellvars; /* number of non-arg cell variables */ \ int co_ncellvars; /* total number of cell variables */ \ int co_nfreevars; /* number of free variables */ \ + uint32_t co_version; /* version number */ \ \ PyObject *co_localsplusnames; /* tuple mapping offsets to names */ \ PyObject *co_localspluskinds; /* Bytes mapping to local kinds (one byte \ @@ -96,9 +165,11 @@ typedef struct { PyObject *co_qualname; /* unicode (qualname, for reference) */ \ PyObject *co_linetable; /* bytes object that holds location info */ \ PyObject *co_weakreflist; /* to support weakrefs to code objects */ \ + _PyExecutorArray *co_executors; /* executors from optimizer */ \ _PyCoCached *_co_cached; /* cached co_* attributes */ \ + uintptr_t _co_instrumentation_version; /* current instrumentation version */ \ + _PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \ int _co_firsttraceable; /* index of first traceable instruction */ \ - char *_co_linearray; /* array of line offsets */ \ /* Scratch space for extra data relating to the code object. \ Type is a void* to keep the format private in codeobject.c to force \ people to go through the proper APIs. */ \ @@ -147,23 +218,54 @@ struct PyCodeObject _PyCode_DEF(1); PyAPI_DATA(PyTypeObject) PyCode_Type; #define PyCode_Check(op) Py_IS_TYPE((op), &PyCode_Type) -#define PyCode_GetNumFree(op) ((op)->co_nfreevars) -#define _PyCode_CODE(CO) ((_Py_CODEUNIT *)(CO)->co_code_adaptive) + +static inline Py_ssize_t PyCode_GetNumFree(PyCodeObject *op) { + assert(PyCode_Check(op)); + return op->co_nfreevars; +} + +static inline int PyCode_GetFirstFree(PyCodeObject *op) { + assert(PyCode_Check(op)); + return op->co_nlocalsplus - op->co_nfreevars; +} + +#define _PyCode_CODE(CO) _Py_RVALUE((_Py_CODEUNIT *)(CO)->co_code_adaptive) #define _PyCode_NBYTES(CO) (Py_SIZE(CO) * (Py_ssize_t)sizeof(_Py_CODEUNIT)) -/* Public interface */ -PyAPI_FUNC(PyCodeObject *) PyCode_New( +/* Unstable public interface */ +PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_New( int, int, int, int, int, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, int, PyObject *, PyObject *); -PyAPI_FUNC(PyCodeObject *) PyCode_NewWithPosOnlyArgs( +PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_NewWithPosOnlyArgs( int, int, int, int, int, int, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, int, PyObject *, PyObject *); /* same as struct above */ +// Old names -- remove when this API changes: +_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject * +PyCode_New( + int a, int b, int c, int d, int e, PyObject *f, PyObject *g, + PyObject *h, PyObject *i, PyObject *j, PyObject *k, + PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p, + PyObject *q) +{ + return PyUnstable_Code_New( + a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q); +} +_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject * +PyCode_NewWithPosOnlyArgs( + int a, int poac, int b, int c, int d, int e, PyObject *f, PyObject *g, + PyObject *h, PyObject *i, PyObject *j, PyObject *k, + PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p, + PyObject *q) +{ + return PyUnstable_Code_NewWithPosOnlyArgs( + a, poac, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q); +} /* Creates a new empty code object with the specified source location. */ PyAPI_FUNC(PyCodeObject *) @@ -176,6 +278,46 @@ PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int); PyAPI_FUNC(int) PyCode_Addr2Location(PyCodeObject *, int, int *, int *, int *, int *); +#define PY_FOREACH_CODE_EVENT(V) \ + V(CREATE) \ + V(DESTROY) + +typedef enum { + #define PY_DEF_EVENT(op) PY_CODE_EVENT_##op, + PY_FOREACH_CODE_EVENT(PY_DEF_EVENT) + #undef PY_DEF_EVENT +} PyCodeEvent; + + +/* + * A callback that is invoked for different events in a code object's lifecycle. + * + * The callback is invoked with a borrowed reference to co, after it is + * created and before it is destroyed. + * + * If the callback sets an exception, it must return -1. Otherwise + * it should return 0. + */ +typedef int (*PyCode_WatchCallback)( + PyCodeEvent event, + PyCodeObject* co); + +/* + * Register a per-interpreter callback that will be invoked for code object + * lifecycle events. + * + * Returns a handle that may be passed to PyCode_ClearWatcher on success, + * or -1 and sets an error if no more handles are available. + */ +PyAPI_FUNC(int) PyCode_AddWatcher(PyCode_WatchCallback callback); + +/* + * Clear the watcher associated with the watcher_id handle. + * + * Returns 0 on success or -1 if no watcher exists for the provided id. + */ +PyAPI_FUNC(int) PyCode_ClearWatcher(int watcher_id); + /* for internal use only */ struct _opaque { int computed_line; @@ -207,11 +349,21 @@ PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj); PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names, PyObject *lnotab); - -PyAPI_FUNC(int) _PyCode_GetExtra(PyObject *code, Py_ssize_t index, - void **extra); -PyAPI_FUNC(int) _PyCode_SetExtra(PyObject *code, Py_ssize_t index, - void *extra); +PyAPI_FUNC(int) PyUnstable_Code_GetExtra( + PyObject *code, Py_ssize_t index, void **extra); +PyAPI_FUNC(int) PyUnstable_Code_SetExtra( + PyObject *code, Py_ssize_t index, void *extra); +// Old names -- remove when this API changes: +_Py_DEPRECATED_EXTERNALLY(3.12) static inline int +_PyCode_GetExtra(PyObject *code, Py_ssize_t index, void **extra) +{ + return PyUnstable_Code_GetExtra(code, index, extra); +} +_Py_DEPRECATED_EXTERNALLY(3.12) static inline int +_PyCode_SetExtra(PyObject *code, Py_ssize_t index, void *extra) +{ + return PyUnstable_Code_SetExtra(code, index, extra); +} /* Equivalent to getattr(code, 'co_code') in Python. Returns a strong reference to a bytes object. */ diff --git a/Include/cpython/compile.h b/Include/cpython/compile.h index f5a62a8ec6dd0cd..0d587505ef7f85f 100644 --- a/Include/cpython/compile.h +++ b/Include/cpython/compile.h @@ -19,9 +19,10 @@ #define PyCF_TYPE_COMMENTS 0x1000 #define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000 #define PyCF_ALLOW_INCOMPLETE_INPUT 0x4000 +#define PyCF_OPTIMIZED_AST (0x8000 | PyCF_ONLY_AST) #define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \ PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT | \ - PyCF_ALLOW_INCOMPLETE_INPUT) + PyCF_ALLOW_INCOMPLETE_INPUT | PyCF_OPTIMIZED_AST) typedef struct { int cf_flags; /* bitmask of CO_xxx flags relevant to future */ diff --git a/Include/cpython/complexobject.h b/Include/cpython/complexobject.h index b7d7283ae889650..fbdc6a91fe895c0 100644 --- a/Include/cpython/complexobject.h +++ b/Include/cpython/complexobject.h @@ -7,8 +7,7 @@ typedef struct { double imag; } Py_complex; -/* Operations on complex numbers from complexmodule.c */ - +// Operations on complex numbers. PyAPI_FUNC(Py_complex) _Py_c_sum(Py_complex, Py_complex); PyAPI_FUNC(Py_complex) _Py_c_diff(Py_complex, Py_complex); PyAPI_FUNC(Py_complex) _Py_c_neg(Py_complex); @@ -17,6 +16,7 @@ PyAPI_FUNC(Py_complex) _Py_c_quot(Py_complex, Py_complex); PyAPI_FUNC(Py_complex) _Py_c_pow(Py_complex, Py_complex); PyAPI_FUNC(double) _Py_c_abs(Py_complex); + /* Complex object interface */ /* @@ -31,14 +31,3 @@ typedef struct { PyAPI_FUNC(PyObject *) PyComplex_FromCComplex(Py_complex); PyAPI_FUNC(Py_complex) PyComplex_AsCComplex(PyObject *op); - -#ifdef Py_BUILD_CORE -/* Format the object based on the format_spec, as defined in PEP 3101 - (Advanced String Formatting). */ -extern int _PyComplex_FormatAdvancedWriter( - _PyUnicodeWriter *writer, - PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); -#endif // Py_BUILD_CORE diff --git a/Include/cpython/context.h b/Include/cpython/context.h index 9879fc7192ebb8f..a3249fc29b082e4 100644 --- a/Include/cpython/context.h +++ b/Include/cpython/context.h @@ -67,10 +67,6 @@ PyAPI_FUNC(PyObject *) PyContextVar_Set(PyObject *var, PyObject *value); PyAPI_FUNC(int) PyContextVar_Reset(PyObject *var, PyObject *token); -/* This method is exposed only for CPython tests. Don not use it. */ -PyAPI_FUNC(PyObject *) _PyContext_NewHamtForTests(void); - - #ifdef __cplusplus } #endif diff --git a/Include/cpython/descrobject.h b/Include/cpython/descrobject.h index e2ea1b9a2d30580..bbad8b59c225ab5 100644 --- a/Include/cpython/descrobject.h +++ b/Include/cpython/descrobject.h @@ -57,8 +57,6 @@ typedef struct { void *d_wrapped; /* This can be any function pointer */ } PyWrapperDescrObject; -PyAPI_DATA(PyTypeObject) _PyMethodWrapper_Type; - PyAPI_FUNC(PyObject *) PyDescr_NewWrapper(PyTypeObject *, struct wrapperbase *, void *); PyAPI_FUNC(int) PyDescr_IsData(PyObject *); diff --git a/Include/cpython/dictobject.h b/Include/cpython/dictobject.h index 2dff59ef0b8a6b8..944965fb9e5351a 100644 --- a/Include/cpython/dictobject.h +++ b/Include/cpython/dictobject.h @@ -16,7 +16,11 @@ typedef struct { /* Dictionary version: globally unique, value change each time the dictionary is modified */ +#ifdef Py_BUILD_CORE uint64_t ma_version_tag; +#else + Py_DEPRECATED(3.12) uint64_t ma_version_tag; +#endif PyDictKeysObject *ma_keys; @@ -29,21 +33,10 @@ typedef struct { } PyDictObject; PyAPI_FUNC(PyObject *) _PyDict_GetItem_KnownHash(PyObject *mp, PyObject *key, - Py_hash_t hash); -PyAPI_FUNC(PyObject *) _PyDict_GetItemWithError(PyObject *dp, PyObject *key); -PyAPI_FUNC(PyObject *) _PyDict_GetItemIdWithError(PyObject *dp, - _Py_Identifier *key); + Py_hash_t hash); PyAPI_FUNC(PyObject *) _PyDict_GetItemStringWithError(PyObject *, const char *); PyAPI_FUNC(PyObject *) PyDict_SetDefault( PyObject *mp, PyObject *key, PyObject *defaultobj); -PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key, - PyObject *item, Py_hash_t hash); -PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, - Py_hash_t hash); -PyAPI_FUNC(int) _PyDict_DelItemIf(PyObject *mp, PyObject *key, - int (*predicate)(PyObject *value)); -PyAPI_FUNC(int) _PyDict_Next( - PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash); /* Get the number of items of a dictionary. */ static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) { @@ -54,45 +47,28 @@ static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) { } #define PyDict_GET_SIZE(op) PyDict_GET_SIZE(_PyObject_CAST(op)) -PyAPI_FUNC(int) _PyDict_Contains_KnownHash(PyObject *, PyObject *, Py_hash_t); -PyAPI_FUNC(int) _PyDict_ContainsId(PyObject *, _Py_Identifier *); -PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused); -PyAPI_FUNC(void) _PyDict_MaybeUntrack(PyObject *mp); -PyAPI_FUNC(int) _PyDict_HasOnlyStringKeys(PyObject *mp); -PyAPI_FUNC(Py_ssize_t) _PyDict_SizeOf(PyDictObject *); -PyAPI_FUNC(PyObject *) _PyDict_Pop(PyObject *, PyObject *, PyObject *); -#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL) - -/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0, - the first occurrence of a key wins, if override is 1, the last occurrence - of a key wins, if override is 2, a KeyError with conflicting key as - argument is raised. -*/ -PyAPI_FUNC(int) _PyDict_MergeEx(PyObject *mp, PyObject *other, int override); -PyAPI_FUNC(int) _PyDict_SetItemId(PyObject *dp, _Py_Identifier *key, PyObject *item); - -PyAPI_FUNC(int) _PyDict_DelItemId(PyObject *mp, _Py_Identifier *key); -PyAPI_FUNC(void) _PyDict_DebugMallocStats(FILE *out); - -/* _PyDictView */ +PyAPI_FUNC(int) PyDict_ContainsString(PyObject *mp, const char *key); -typedef struct { - PyObject_HEAD - PyDictObject *dv_dict; -} _PyDictViewObject; +PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused); -PyAPI_FUNC(PyObject *) _PyDictView_New(PyObject *, PyTypeObject *); -PyAPI_FUNC(PyObject *) _PyDictView_Intersect(PyObject* self, PyObject *other); +PyAPI_FUNC(int) PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result); +PyAPI_FUNC(int) PyDict_PopString(PyObject *dict, const char *key, PyObject **result); +PyAPI_FUNC(PyObject *) _PyDict_Pop(PyObject *dict, PyObject *key, PyObject *default_value); /* Dictionary watchers */ +#define PY_FOREACH_DICT_EVENT(V) \ + V(ADDED) \ + V(MODIFIED) \ + V(DELETED) \ + V(CLONED) \ + V(CLEARED) \ + V(DEALLOCATED) + typedef enum { - PyDict_EVENT_ADDED, - PyDict_EVENT_MODIFIED, - PyDict_EVENT_DELETED, - PyDict_EVENT_CLONED, - PyDict_EVENT_CLEARED, - PyDict_EVENT_DEALLOCATED, + #define PY_DEF_EVENT(EVENT) PyDict_EVENT_##EVENT, + PY_FOREACH_DICT_EVENT(PY_DEF_EVENT) + #undef PY_DEF_EVENT } PyDict_WatchEvent; // Callback to be invoked when a watched dict is cleared, dealloced, or modified. diff --git a/Include/cpython/fileobject.h b/Include/cpython/fileobject.h index b70ec318986d821..e2d89c522bdd132 100644 --- a/Include/cpython/fileobject.h +++ b/Include/cpython/fileobject.h @@ -3,7 +3,6 @@ #endif PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *); -PyAPI_FUNC(char *) _Py_UniversalNewlineFgetsWithSize(char *, int, FILE*, PyObject *, size_t*); /* The std printer acts as a preliminary sys.stderr until the new io infrastructure is in place. */ @@ -15,5 +14,3 @@ typedef PyObject * (*Py_OpenCodeHookFunction)(PyObject *, void *); PyAPI_FUNC(PyObject *) PyFile_OpenCode(const char *utf8path); PyAPI_FUNC(PyObject *) PyFile_OpenCodeObject(PyObject *path); PyAPI_FUNC(int) PyFile_SetOpenCodeHook(Py_OpenCodeHookFunction hook, void *userData); - -PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *); diff --git a/Include/cpython/floatobject.h b/Include/cpython/floatobject.h index 7795d9f83f05cb8..127093098bfe642 100644 --- a/Include/cpython/floatobject.h +++ b/Include/cpython/floatobject.h @@ -7,9 +7,15 @@ typedef struct { double ob_fval; } PyFloatObject; -// Macro version of PyFloat_AsDouble() trading safety for speed. +#define _PyFloat_CAST(op) \ + (assert(PyFloat_Check(op)), _Py_CAST(PyFloatObject*, op)) + +// Static inline version of PyFloat_AsDouble() trading safety for speed. // It doesn't check if op is a double object. -#define PyFloat_AS_DOUBLE(op) (((PyFloatObject *)(op))->ob_fval) +static inline double PyFloat_AS_DOUBLE(PyObject *op) { + return _PyFloat_CAST(op)->ob_fval; +} +#define PyFloat_AS_DOUBLE(op) PyFloat_AS_DOUBLE(_PyObject_CAST(op)) PyAPI_FUNC(int) PyFloat_Pack2(double x, char *p, int le); diff --git a/Include/cpython/funcobject.h b/Include/cpython/funcobject.h index dd8f20b2c20b39b..de2013323d2c720 100644 --- a/Include/cpython/funcobject.h +++ b/Include/cpython/funcobject.h @@ -41,6 +41,7 @@ typedef struct { PyObject *func_weakreflist; /* List of weak references */ PyObject *func_module; /* The __module__ attribute, can be anything */ PyObject *func_annotations; /* Annotations, a dict or NULL */ + PyObject *func_typeparams; /* Tuple of active type variables or NULL */ vectorcallfunc vectorcall; /* Version number for use by specializer. * Can set to non-zero when we want to specialize. @@ -78,12 +79,6 @@ PyAPI_FUNC(int) PyFunction_SetClosure(PyObject *, PyObject *); PyAPI_FUNC(PyObject *) PyFunction_GetAnnotations(PyObject *); PyAPI_FUNC(int) PyFunction_SetAnnotations(PyObject *, PyObject *); -PyAPI_FUNC(PyObject *) _PyFunction_Vectorcall( - PyObject *func, - PyObject *const *stack, - size_t nargsf, - PyObject *kwnames); - #define _PyFunction_CAST(func) \ (assert(PyFunction_Check(func)), _Py_CAST(PyFunctionObject*, func)) @@ -131,6 +126,55 @@ PyAPI_DATA(PyTypeObject) PyStaticMethod_Type; PyAPI_FUNC(PyObject *) PyClassMethod_New(PyObject *); PyAPI_FUNC(PyObject *) PyStaticMethod_New(PyObject *); +#define PY_FOREACH_FUNC_EVENT(V) \ + V(CREATE) \ + V(DESTROY) \ + V(MODIFY_CODE) \ + V(MODIFY_DEFAULTS) \ + V(MODIFY_KWDEFAULTS) + +typedef enum { + #define PY_DEF_EVENT(EVENT) PyFunction_EVENT_##EVENT, + PY_FOREACH_FUNC_EVENT(PY_DEF_EVENT) + #undef PY_DEF_EVENT +} PyFunction_WatchEvent; + +/* + * A callback that is invoked for different events in a function's lifecycle. + * + * The callback is invoked with a borrowed reference to func, after it is + * created and before it is modified or destroyed. The callback should not + * modify func. + * + * When a function's code object, defaults, or kwdefaults are modified the + * callback will be invoked with the respective event and new_value will + * contain a borrowed reference to the new value that is about to be stored in + * the function. Otherwise the third argument is NULL. + * + * If the callback returns with an exception set, it must return -1. Otherwise + * it should return 0. + */ +typedef int (*PyFunction_WatchCallback)( + PyFunction_WatchEvent event, + PyFunctionObject *func, + PyObject *new_value); + +/* + * Register a per-interpreter callback that will be invoked for function lifecycle + * events. + * + * Returns a handle that may be passed to PyFunction_ClearWatcher on success, + * or -1 and sets an error if no more handles are available. + */ +PyAPI_FUNC(int) PyFunction_AddWatcher(PyFunction_WatchCallback callback); + +/* + * Clear the watcher associated with the watcher_id handle. + * + * Returns 0 on success or -1 if no watcher exists for the supplied id. + */ +PyAPI_FUNC(int) PyFunction_ClearWatcher(int watcher_id); + #ifdef __cplusplus } #endif diff --git a/Include/cpython/genobject.h b/Include/cpython/genobject.h index 6127ba7babb80f2..49e46c277d75ae8 100644 --- a/Include/cpython/genobject.h +++ b/Include/cpython/genobject.h @@ -13,8 +13,6 @@ extern "C" { and coroutine objects. */ #define _PyGenObject_HEAD(prefix) \ PyObject_HEAD \ - /* The code object backing the generator */ \ - PyCodeObject *prefix##_code; \ /* List of weak reference. */ \ PyObject *prefix##_weakreflist; \ /* Name of the generator. */ \ @@ -28,7 +26,7 @@ extern "C" { char prefix##_running_async; \ /* The frame */ \ int8_t prefix##_frame_state; \ - PyObject *prefix##_iframe[1]; + PyObject *prefix##_iframe[1]; \ typedef struct { /* The gi_ prefix is intended to remind of generator-iterator. */ @@ -43,9 +41,7 @@ PyAPI_DATA(PyTypeObject) PyGen_Type; PyAPI_FUNC(PyObject *) PyGen_New(PyFrameObject *); PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(PyFrameObject *, PyObject *name, PyObject *qualname); -PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *); -PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **); -PyAPI_FUNC(void) _PyGen_Finalize(PyObject *self); +PyAPI_FUNC(PyCodeObject *) PyGen_GetCode(PyGenObject *gen); /* --- PyCoroObject ------------------------------------------------------- */ @@ -55,7 +51,6 @@ typedef struct { } PyCoroObject; PyAPI_DATA(PyTypeObject) PyCoro_Type; -PyAPI_DATA(PyTypeObject) _PyCoroWrapper_Type; #define PyCoro_CheckExact(op) Py_IS_TYPE((op), &PyCoro_Type) PyAPI_FUNC(PyObject *) PyCoro_New(PyFrameObject *, @@ -70,14 +65,14 @@ typedef struct { PyAPI_DATA(PyTypeObject) PyAsyncGen_Type; PyAPI_DATA(PyTypeObject) _PyAsyncGenASend_Type; -PyAPI_DATA(PyTypeObject) _PyAsyncGenWrappedValue_Type; -PyAPI_DATA(PyTypeObject) _PyAsyncGenAThrow_Type; PyAPI_FUNC(PyObject *) PyAsyncGen_New(PyFrameObject *, PyObject *name, PyObject *qualname); #define PyAsyncGen_CheckExact(op) Py_IS_TYPE((op), &PyAsyncGen_Type) +#define PyAsyncGenASend_CheckExact(op) Py_IS_TYPE((op), &_PyAsyncGenASend_Type) + #undef _PyGenObject_HEAD diff --git a/Include/cpython/import.h b/Include/cpython/import.h index a69b4f34def342a..7daf0b84fcf71bf 100644 --- a/Include/cpython/import.h +++ b/Include/cpython/import.h @@ -4,27 +4,11 @@ PyMODINIT_FUNC PyInit__imp(void); -PyAPI_FUNC(int) _PyImport_IsInitialized(PyInterpreterState *); - -PyAPI_FUNC(PyObject *) _PyImport_GetModuleId(_Py_Identifier *name); -PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module); -PyAPI_FUNC(int) _PyImport_SetModuleString(const char *name, PyObject* module); - -PyAPI_FUNC(void) _PyImport_AcquireLock(void); -PyAPI_FUNC(int) _PyImport_ReleaseLock(void); - -PyAPI_FUNC(int) _PyImport_FixupBuiltin( - PyObject *mod, - const char *name, /* UTF-8 encoded string */ - PyObject *modules - ); -PyAPI_FUNC(int) _PyImport_FixupExtensionObject(PyObject*, PyObject *, - PyObject *, PyObject *); - struct _inittab { const char *name; /* ASCII encoded string */ PyObject* (*initfunc)(void); }; +// This is not used after Py_Initialize() is called. PyAPI_DATA(struct _inittab *) PyImport_Inittab; PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab); @@ -33,13 +17,9 @@ struct _frozen { const unsigned char *code; int size; int is_package; - PyObject *(*get_code)(void); }; /* Embedding apps may change this pointer to point to their favorite collection of frozen modules: */ PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules; - -PyAPI_DATA(PyObject *) _PyImport_GetModuleAttr(PyObject *, PyObject *); -PyAPI_DATA(PyObject *) _PyImport_GetModuleAttrString(const char *, const char *); diff --git a/Include/cpython/initconfig.h b/Include/cpython/initconfig.h index 6ce42b4c09502f6..87c059c521cbc92 100644 --- a/Include/cpython/initconfig.h +++ b/Include/cpython/initconfig.h @@ -180,6 +180,8 @@ typedef struct PyConfig { int safe_path; int int_max_str_digits; + int cpu_count; + /* --- Path configuration inputs ------------ */ int pathconfig_warnings; wchar_t *program_name; @@ -204,6 +206,9 @@ typedef struct PyConfig { wchar_t *run_module; wchar_t *run_filename; + /* --- Set by Py_Main() -------------------------- */ + wchar_t *sys_path_0; + /* --- Private fields ---------------------------- */ // Install importlib? If equals to 0, importlib is not initialized at all. @@ -215,6 +220,17 @@ typedef struct PyConfig { // If non-zero, we believe we're running from a source tree. int _is_python_build; + +#ifdef Py_STATS + // If non-zero, turns on statistics gathering. + int _pystats; +#endif + +#ifdef Py_DEBUG + // If not empty, import a non-__main__ module before site.py is executed. + // PYTHON_PRESITE=package.module or -X presite=package.module + wchar_t *run_presite; +#endif } PyConfig; PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config); @@ -241,31 +257,6 @@ PyAPI_FUNC(PyStatus) PyConfig_SetWideStringList(PyConfig *config, Py_ssize_t length, wchar_t **items); -/* --- PyInterpreterConfig ------------------------------------ */ - -typedef struct { - int allow_fork; - int allow_exec; - int allow_threads; - int allow_daemon_threads; -} _PyInterpreterConfig; - -#define _PyInterpreterConfig_INIT \ - { \ - .allow_fork = 0, \ - .allow_exec = 0, \ - .allow_threads = 1, \ - .allow_daemon_threads = 0, \ - } - -#define _PyInterpreterConfig_LEGACY_INIT \ - { \ - .allow_fork = 1, \ - .allow_exec = 1, \ - .allow_threads = 1, \ - .allow_daemon_threads = 1, \ - } - /* --- Helper functions --------------------------------------- */ /* Get the original command line arguments, before Python modified them. diff --git a/Include/cpython/interpreteridobject.h b/Include/cpython/interpreteridobject.h new file mode 100644 index 000000000000000..4ab9ad5d315f806 --- /dev/null +++ b/Include/cpython/interpreteridobject.h @@ -0,0 +1,11 @@ +#ifndef Py_CPYTHON_INTERPRETERIDOBJECT_H +# error "this header file must not be included directly" +#endif + +/* Interpreter ID Object */ + +PyAPI_DATA(PyTypeObject) PyInterpreterID_Type; + +PyAPI_FUNC(PyObject *) PyInterpreterID_New(int64_t); +PyAPI_FUNC(PyObject *) PyInterpreterState_GetIDObject(PyInterpreterState *); +PyAPI_FUNC(PyInterpreterState *) PyInterpreterID_LookUp(PyObject *); diff --git a/Include/cpython/listobject.h b/Include/cpython/listobject.h index 8fa82122d8d248b..8ade1b164681f9e 100644 --- a/Include/cpython/listobject.h +++ b/Include/cpython/listobject.h @@ -21,9 +21,6 @@ typedef struct { Py_ssize_t allocated; } PyListObject; -PyAPI_FUNC(PyObject *) _PyList_Extend(PyListObject *, PyObject *); -PyAPI_FUNC(void) _PyList_DebugMallocStats(FILE *out); - /* Cast argument to PyListObject* type. */ #define _PyList_CAST(op) \ (assert(PyList_Check(op)), _Py_CAST(PyListObject*, (op))) @@ -41,7 +38,12 @@ static inline Py_ssize_t PyList_GET_SIZE(PyObject *op) { static inline void PyList_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) { PyListObject *list = _PyList_CAST(op); + assert(0 <= index); + assert(index < list->allocated); list->ob_item[index] = value; } #define PyList_SET_ITEM(op, index, value) \ PyList_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value)) + +PyAPI_FUNC(int) PyList_Extend(PyObject *self, PyObject *iterable); +PyAPI_FUNC(int) PyList_Clear(PyObject *self); diff --git a/Include/cpython/longintrepr.h b/Include/cpython/longintrepr.h index 68dbf9c4382dc5d..f5ccbb704e8bb8e 100644 --- a/Include/cpython/longintrepr.h +++ b/Include/cpython/longintrepr.h @@ -71,20 +71,61 @@ typedef long stwodigits; /* signed variant of twodigits */ 0 <= ob_digit[i] <= MASK. The allocation function takes care of allocating extra memory so that ob_digit[0] ... ob_digit[abs(ob_size)-1] are actually available. + We always allocate memory for at least one digit, so accessing ob_digit[0] + is always safe. However, in the case ob_size == 0, the contents of + ob_digit[0] may be undefined. CAUTION: Generic code manipulating subtypes of PyVarObject has to aware that ints abuse ob_size's sign bit. */ -struct _longobject { - PyObject_VAR_HEAD +typedef struct _PyLongValue { + uintptr_t lv_tag; /* Number of digits, sign and flags */ digit ob_digit[1]; +} _PyLongValue; + +struct _longobject { + PyObject_HEAD + _PyLongValue long_value; }; -PyAPI_FUNC(PyLongObject *) _PyLong_New(Py_ssize_t); +PyAPI_FUNC(PyLongObject*) _PyLong_New(Py_ssize_t); + +// Return a copy of src. +PyAPI_FUNC(PyObject*) _PyLong_Copy(PyLongObject *src); + +PyAPI_FUNC(PyLongObject*) _PyLong_FromDigits( + int negative, + Py_ssize_t digit_count, + digit *digits); + + +/* Inline some internals for speed. These should be in pycore_long.h + * if user code didn't need them inlined. */ + +#define _PyLong_SIGN_MASK 3 +#define _PyLong_NON_SIZE_BITS 3 + + +static inline int +_PyLong_IsCompact(const PyLongObject* op) { + assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS)); + return op->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS); +} + +#define PyUnstable_Long_IsCompact _PyLong_IsCompact + +static inline Py_ssize_t +_PyLong_CompactValue(const PyLongObject *op) +{ + assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS)); + assert(PyUnstable_Long_IsCompact(op)); + Py_ssize_t sign = 1 - (op->long_value.lv_tag & _PyLong_SIGN_MASK); + return sign * (Py_ssize_t)op->long_value.ob_digit[0]; +} + +#define PyUnstable_Long_CompactValue _PyLong_CompactValue -/* Return a copy of src. */ -PyAPI_FUNC(PyObject *) _PyLong_Copy(PyLongObject *src); #ifdef __cplusplus } diff --git a/Include/cpython/longobject.h b/Include/cpython/longobject.h index 1a73799d658fe02..fd1be29ed397d18 100644 --- a/Include/cpython/longobject.h +++ b/Include/cpython/longobject.h @@ -2,48 +2,16 @@ # error "this header file must not be included directly" #endif -PyAPI_FUNC(int) _PyLong_AsInt(PyObject *); +PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base); -PyAPI_FUNC(int) _PyLong_UnsignedShort_Converter(PyObject *, void *); -PyAPI_FUNC(int) _PyLong_UnsignedInt_Converter(PyObject *, void *); -PyAPI_FUNC(int) _PyLong_UnsignedLong_Converter(PyObject *, void *); -PyAPI_FUNC(int) _PyLong_UnsignedLongLong_Converter(PyObject *, void *); -PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *); +PyAPI_FUNC(int) PyUnstable_Long_IsCompact(const PyLongObject* op); +PyAPI_FUNC(Py_ssize_t) PyUnstable_Long_CompactValue(const PyLongObject* op); -/* _PyLong_Frexp returns a double x and an exponent e such that the - true value is approximately equal to x * 2**e. e is >= 0. x is - 0.0 if and only if the input is 0 (in which case, e and x are both - zeroes); otherwise, 0.5 <= abs(x) < 1.0. On overflow, which is - possible if the number of bits doesn't fit into a Py_ssize_t, sets - OverflowError and returns -1.0 for x, 0 for e. */ -PyAPI_FUNC(double) _PyLong_Frexp(PyLongObject *a, Py_ssize_t *e); - -PyAPI_FUNC(PyObject *) PyLong_FromUnicodeObject(PyObject *u, int base); -PyAPI_FUNC(PyObject *) _PyLong_FromBytes(const char *, Py_ssize_t, int); - -/* _PyLong_Sign. Return 0 if v is 0, -1 if v < 0, +1 if v > 0. - v must not be NULL, and must be a normalized long. - There are no error cases. -*/ +// _PyLong_Sign. Return 0 if v is 0, -1 if v < 0, +1 if v > 0. +// v must not be NULL, and must be a normalized long. +// There are no error cases. PyAPI_FUNC(int) _PyLong_Sign(PyObject *v); -/* _PyLong_NumBits. Return the number of bits needed to represent the - absolute value of a long. For example, this returns 1 for 1 and -1, 2 - for 2 and -2, and 2 for 3 and -3. It returns 0 for 0. - v must not be NULL, and must be a normalized long. - (size_t)-1 is returned and OverflowError set if the true result doesn't - fit in a size_t. -*/ -PyAPI_FUNC(size_t) _PyLong_NumBits(PyObject *v); - -/* _PyLong_DivmodNear. Given integers a and b, compute the nearest - integer q to the exact quotient a / b, rounding to the nearest even integer - in the case of a tie. Return (q, r), where r = a - q*b. The remainder r - will satisfy abs(r) <= abs(b)/2, with equality possible only if q is - even. -*/ -PyAPI_FUNC(PyObject *) _PyLong_DivmodNear(PyObject *, PyObject *); - /* _PyLong_FromByteArray: View the n unsigned bytes as a binary integer in base 256, and return a Python int with the same numeric value. If n is 0, the integer is 0. Else: @@ -84,12 +52,5 @@ PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v, unsigned char* bytes, size_t n, int little_endian, int is_signed); -/* _PyLong_Format: Convert the long to a string object with given base, - appending a base prefix of 0[box] if base is 2, 8 or 16. */ -PyAPI_FUNC(PyObject *) _PyLong_Format(PyObject *obj, int base); - /* For use by the gcd function in mathmodule.c */ PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *); - -PyAPI_FUNC(PyObject *) _PyLong_Rshift(PyObject *, size_t); -PyAPI_FUNC(PyObject *) _PyLong_Lshift(PyObject *, size_t); diff --git a/Include/cpython/memoryobject.h b/Include/cpython/memoryobject.h new file mode 100644 index 000000000000000..961161b70f20580 --- /dev/null +++ b/Include/cpython/memoryobject.h @@ -0,0 +1,50 @@ +#ifndef Py_CPYTHON_MEMORYOBJECT_H +# error "this header file must not be included directly" +#endif + +/* The structs are declared here so that macros can work, but they shouldn't + be considered public. Don't access their fields directly, use the macros + and functions instead! */ +#define _Py_MANAGED_BUFFER_RELEASED 0x001 /* access to exporter blocked */ +#define _Py_MANAGED_BUFFER_FREE_FORMAT 0x002 /* free format */ + +typedef struct { + PyObject_HEAD + int flags; /* state flags */ + Py_ssize_t exports; /* number of direct memoryview exports */ + Py_buffer master; /* snapshot buffer obtained from the original exporter */ +} _PyManagedBufferObject; + + +/* memoryview state flags */ +#define _Py_MEMORYVIEW_RELEASED 0x001 /* access to master buffer blocked */ +#define _Py_MEMORYVIEW_C 0x002 /* C-contiguous layout */ +#define _Py_MEMORYVIEW_FORTRAN 0x004 /* Fortran contiguous layout */ +#define _Py_MEMORYVIEW_SCALAR 0x008 /* scalar: ndim = 0 */ +#define _Py_MEMORYVIEW_PIL 0x010 /* PIL-style layout */ +#define _Py_MEMORYVIEW_RESTRICTED 0x020 /* Disallow new references to the memoryview's buffer */ + +typedef struct { + PyObject_VAR_HEAD + _PyManagedBufferObject *mbuf; /* managed buffer */ + Py_hash_t hash; /* hash value for read-only views */ + int flags; /* state flags */ + Py_ssize_t exports; /* number of buffer re-exports */ + Py_buffer view; /* private copy of the exporter's view */ + PyObject *weakreflist; + Py_ssize_t ob_array[1]; /* shape, strides, suboffsets */ +} PyMemoryViewObject; + +#define _PyMemoryView_CAST(op) _Py_CAST(PyMemoryViewObject*, op) + +/* Get a pointer to the memoryview's private copy of the exporter's buffer. */ +static inline Py_buffer* PyMemoryView_GET_BUFFER(PyObject *op) { + return (&_PyMemoryView_CAST(op)->view); +} +#define PyMemoryView_GET_BUFFER(op) PyMemoryView_GET_BUFFER(_PyObject_CAST(op)) + +/* Get a pointer to the exporting object (this may be NULL!). */ +static inline PyObject* PyMemoryView_GET_BASE(PyObject *op) { + return _PyMemoryView_CAST(op)->view.obj; +} +#define PyMemoryView_GET_BASE(op) PyMemoryView_GET_BASE(_PyObject_CAST(op)) diff --git a/Include/cpython/modsupport.h b/Include/cpython/modsupport.h deleted file mode 100644 index d8458923b3fab8f..000000000000000 --- a/Include/cpython/modsupport.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef Py_CPYTHON_MODSUPPORT_H -# error "this header file must not be included directly" -#endif - -/* If PY_SSIZE_T_CLEAN is defined, each functions treats #-specifier - to mean Py_ssize_t */ -#ifdef PY_SSIZE_T_CLEAN -#define _Py_VaBuildStack _Py_VaBuildStack_SizeT -#else -PyAPI_FUNC(PyObject *) _Py_VaBuildValue_SizeT(const char *, va_list); -PyAPI_FUNC(PyObject **) _Py_VaBuildStack_SizeT( - PyObject **small_stack, - Py_ssize_t small_stack_len, - const char *format, - va_list va, - Py_ssize_t *p_nargs); -#endif - -PyAPI_FUNC(int) _PyArg_UnpackStack( - PyObject *const *args, - Py_ssize_t nargs, - const char *name, - Py_ssize_t min, - Py_ssize_t max, - ...); - -PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs); -PyAPI_FUNC(int) _PyArg_NoKwnames(const char *funcname, PyObject *kwnames); -PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args); -#define _PyArg_NoKeywords(funcname, kwargs) \ - ((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs))) -#define _PyArg_NoKwnames(funcname, kwnames) \ - ((kwnames) == NULL || _PyArg_NoKwnames((funcname), (kwnames))) -#define _PyArg_NoPositional(funcname, args) \ - ((args) == NULL || _PyArg_NoPositional((funcname), (args))) - -#define _Py_ANY_VARARGS(n) ((n) == PY_SSIZE_T_MAX) - -PyAPI_FUNC(void) _PyArg_BadArgument(const char *, const char *, const char *, PyObject *); -PyAPI_FUNC(int) _PyArg_CheckPositional(const char *, Py_ssize_t, - Py_ssize_t, Py_ssize_t); -#define _PyArg_CheckPositional(funcname, nargs, min, max) \ - ((!_Py_ANY_VARARGS(max) && (min) <= (nargs) && (nargs) <= (max)) \ - || _PyArg_CheckPositional((funcname), (nargs), (min), (max))) - -PyAPI_FUNC(PyObject **) _Py_VaBuildStack( - PyObject **small_stack, - Py_ssize_t small_stack_len, - const char *format, - va_list va, - Py_ssize_t *p_nargs); - -typedef struct _PyArg_Parser { - int initialized; - const char *format; - const char * const *keywords; - const char *fname; - const char *custom_msg; - int pos; /* number of positional-only arguments */ - int min; /* minimal number of arguments */ - int max; /* maximal number of positional arguments */ - PyObject *kwtuple; /* tuple of keyword parameter names */ - struct _PyArg_Parser *next; -} _PyArg_Parser; - -#ifdef PY_SSIZE_T_CLEAN -#define _PyArg_ParseTupleAndKeywordsFast _PyArg_ParseTupleAndKeywordsFast_SizeT -#define _PyArg_ParseStack _PyArg_ParseStack_SizeT -#define _PyArg_ParseStackAndKeywords _PyArg_ParseStackAndKeywords_SizeT -#define _PyArg_VaParseTupleAndKeywordsFast _PyArg_VaParseTupleAndKeywordsFast_SizeT -#endif - -PyAPI_FUNC(int) _PyArg_ParseTupleAndKeywordsFast(PyObject *, PyObject *, - struct _PyArg_Parser *, ...); -PyAPI_FUNC(int) _PyArg_ParseStack( - PyObject *const *args, - Py_ssize_t nargs, - const char *format, - ...); -PyAPI_FUNC(int) _PyArg_ParseStackAndKeywords( - PyObject *const *args, - Py_ssize_t nargs, - PyObject *kwnames, - struct _PyArg_Parser *, - ...); -PyAPI_FUNC(int) _PyArg_VaParseTupleAndKeywordsFast(PyObject *, PyObject *, - struct _PyArg_Parser *, va_list); -PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywords( - PyObject *const *args, Py_ssize_t nargs, - PyObject *kwargs, PyObject *kwnames, - struct _PyArg_Parser *parser, - int minpos, int maxpos, int minkw, - PyObject **buf); - -PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywordsWithVararg( - PyObject *const *args, Py_ssize_t nargs, - PyObject *kwargs, PyObject *kwnames, - struct _PyArg_Parser *parser, - int minpos, int maxpos, int minkw, - int vararg, PyObject **buf); - -#define _PyArg_UnpackKeywords(args, nargs, kwargs, kwnames, parser, minpos, maxpos, minkw, buf) \ - (((minkw) == 0 && (kwargs) == NULL && (kwnames) == NULL && \ - (minpos) <= (nargs) && (nargs) <= (maxpos) && (args) != NULL) ? (args) : \ - _PyArg_UnpackKeywords((args), (nargs), (kwargs), (kwnames), (parser), \ - (minpos), (maxpos), (minkw), (buf))) - -PyAPI_FUNC(PyObject *) _PyModule_CreateInitialized(PyModuleDef*, int apiver); - -PyAPI_DATA(const char *) _Py_PackageContext; diff --git a/Include/cpython/object.h b/Include/cpython/object.h index fa0cfb24484952c..762e8a3b86ee1e5 100644 --- a/Include/cpython/object.h +++ b/Include/cpython/object.h @@ -3,14 +3,14 @@ #endif PyAPI_FUNC(void) _Py_NewReference(PyObject *op); - -#ifdef Py_TRACE_REFS -/* Py_TRACE_REFS is such major surgery that we call external routines. */ -PyAPI_FUNC(void) _Py_ForgetReference(PyObject *); -#endif +PyAPI_FUNC(void) _Py_NewReferenceNoTotal(PyObject *op); #ifdef Py_REF_DEBUG -PyAPI_FUNC(Py_ssize_t) _Py_GetRefTotal(void); +/* These are useful as debugging aids when chasing down refleaks. */ +PyAPI_FUNC(Py_ssize_t) _Py_GetGlobalRefTotal(void); +# define _Py_GetRefTotal() _Py_GetGlobalRefTotal() +PyAPI_FUNC(Py_ssize_t) _Py_GetLegacyRefTotal(void); +PyAPI_FUNC(Py_ssize_t) _PyInterpreterState_GetRefTotal(PyInterpreterState *); #endif @@ -41,7 +41,7 @@ typedef struct _Py_Identifier { Py_ssize_t index; } _Py_Identifier; -#if defined(NEEDS_PY_IDENTIFIER) || !defined(Py_BUILD_CORE) +#ifndef Py_BUILD_CORE // For now we are keeping _Py_IDENTIFIER for continued use // in non-builtin extensions (and naughty PyPI modules). @@ -49,7 +49,8 @@ typedef struct _Py_Identifier { #define _Py_static_string(varname, value) static _Py_Identifier varname = _Py_static_string_init(value) #define _Py_IDENTIFIER(varname) _Py_static_string(PyId_##varname, #varname) -#endif /* NEEDS_PY_IDENTIFIER */ +#endif /* !Py_BUILD_CORE */ + typedef struct { /* Number implementations must check *both* @@ -222,14 +223,26 @@ struct _typeobject { vectorcallfunc tp_vectorcall; /* bitset of which type-watchers care about this type */ - char tp_watched; + unsigned char tp_watched; }; /* This struct is used by the specializer - * It should should be treated as an opaque blob + * It should be treated as an opaque blob * by code other than the specializer and interpreter. */ struct _specialization_cache { + // In order to avoid bloating the bytecode with lots of inline caches, the + // members of this structure have a somewhat unique contract. They are set + // by the specialization machinery, and are invalidated by PyType_Modified. + // The rules for using them are as follows: + // - If getitem is non-NULL, then it is the same Python function that + // PyType_Lookup(cls, "__getitem__") would return. + // - If getitem is NULL, then getitem_version is meaningless. + // - If getitem->func_version == getitem_version, then getitem can be called + // with two positional arguments and no keyword arguments, and has neither + // *args nor **kwargs (as required by BINARY_SUBSCR_GETITEM): PyObject *getitem; + uint32_t getitem_version; + PyObject *init; }; /* The *real* layout of a type object when allocated on the heap */ @@ -256,42 +269,16 @@ typedef struct _heaptypeobject { PyAPI_FUNC(const char *) _PyType_Name(PyTypeObject *); PyAPI_FUNC(PyObject *) _PyType_Lookup(PyTypeObject *, PyObject *); -PyAPI_FUNC(PyObject *) _PyType_LookupId(PyTypeObject *, _Py_Identifier *); -PyAPI_FUNC(PyObject *) _PyObject_LookupSpecialId(PyObject *, _Py_Identifier *); -#ifndef Py_BUILD_CORE -// Backward compatibility for 3rd-party extensions -// that may be using the old name. -#define _PyObject_LookupSpecial _PyObject_LookupSpecialId -#endif -PyAPI_FUNC(PyTypeObject *) _PyType_CalculateMetaclass(PyTypeObject *, PyObject *); -PyAPI_FUNC(PyObject *) _PyType_GetDocFromInternalDoc(const char *, const char *); -PyAPI_FUNC(PyObject *) _PyType_GetTextSignatureFromInternalDoc(const char *, const char *); PyAPI_FUNC(PyObject *) PyType_GetModuleByDef(PyTypeObject *, PyModuleDef *); +PyAPI_FUNC(PyObject *) PyType_GetDict(PyTypeObject *); PyAPI_FUNC(int) PyObject_Print(PyObject *, FILE *, int); PyAPI_FUNC(void) _Py_BreakPoint(void); PyAPI_FUNC(void) _PyObject_Dump(PyObject *); -PyAPI_FUNC(int) _PyObject_IsFreed(PyObject *); - -PyAPI_FUNC(int) _PyObject_IsAbstract(PyObject *); -PyAPI_FUNC(PyObject *) _PyObject_GetAttrId(PyObject *, _Py_Identifier *); -PyAPI_FUNC(int) _PyObject_SetAttrId(PyObject *, _Py_Identifier *, PyObject *); -/* Replacements of PyObject_GetAttr() and _PyObject_GetAttrId() which - don't raise AttributeError. - - Return 1 and set *result != NULL if an attribute is found. - Return 0 and set *result == NULL if an attribute is not found; - an AttributeError is silenced. - Return -1 and set *result == NULL if an error other than AttributeError - is raised. -*/ -PyAPI_FUNC(int) _PyObject_LookupAttr(PyObject *, PyObject *, PyObject **); -PyAPI_FUNC(int) _PyObject_LookupAttrId(PyObject *, _Py_Identifier *, PyObject **); -PyAPI_FUNC(int) _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); +PyAPI_FUNC(PyObject*) _PyObject_GetAttrId(PyObject *, _Py_Identifier *); PyAPI_FUNC(PyObject **) _PyObject_GetDictPtr(PyObject *); -PyAPI_FUNC(PyObject *) _PyObject_NextNotImplemented(PyObject *); PyAPI_FUNC(void) PyObject_CallFinalizer(PyObject *); PyAPI_FUNC(int) PyObject_CallFinalizerFromDealloc(PyObject *); @@ -305,53 +292,70 @@ _PyObject_GenericSetAttrWithDict(PyObject *, PyObject *, PyAPI_FUNC(PyObject *) _PyObject_FunctionStr(PyObject *); -/* Safely decref `op` and set `op` to `op2`. +/* Safely decref `dst` and set `dst` to `src`. * * As in case of Py_CLEAR "the obvious" code can be deadly: * - * Py_DECREF(op); - * op = op2; + * Py_DECREF(dst); + * dst = src; * * The safe way is: * - * Py_SETREF(op, op2); + * Py_SETREF(dst, src); + * + * That arranges to set `dst` to `src` _before_ decref'ing, so that any code + * triggered as a side-effect of `dst` getting torn down no longer believes + * `dst` points to a valid object. * - * That arranges to set `op` to `op2` _before_ decref'ing, so that any code - * triggered as a side-effect of `op` getting torn down no longer believes - * `op` points to a valid object. + * Temporary variables are used to only evalutate macro arguments once and so + * avoid the duplication of side effects. _Py_TYPEOF() or memcpy() is used to + * avoid a miscompilation caused by type punning. See Py_CLEAR() comment for + * implementation details about type punning. * - * Py_XSETREF is a variant of Py_SETREF that uses Py_XDECREF instead of - * Py_DECREF. + * The memcpy() implementation does not emit a compiler warning if 'src' has + * not the same type than 'src': any pointer type is accepted for 'src'. */ - -#define Py_SETREF(op, op2) \ - do { \ - PyObject *_py_tmp = _PyObject_CAST(op); \ - (op) = (op2); \ - Py_DECREF(_py_tmp); \ +#ifdef _Py_TYPEOF +#define Py_SETREF(dst, src) \ + do { \ + _Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \ + _Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = (src); \ + Py_DECREF(_tmp_old_dst); \ } while (0) - -#define Py_XSETREF(op, op2) \ - do { \ - PyObject *_py_tmp = _PyObject_CAST(op); \ - (op) = (op2); \ - Py_XDECREF(_py_tmp); \ +#else +#define Py_SETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \ + PyObject *_tmp_src = _PyObject_CAST(src); \ + memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \ + Py_DECREF(_tmp_old_dst); \ } while (0) +#endif - -PyAPI_DATA(PyTypeObject) _PyNone_Type; -PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type; - -/* Maps Py_LT to Py_GT, ..., Py_GE to Py_LE. - * Defined in object.c. +/* Py_XSETREF() is a variant of Py_SETREF() that uses Py_XDECREF() instead of + * Py_DECREF(). */ -PyAPI_DATA(int) _Py_SwappedOp[]; +#ifdef _Py_TYPEOF +#define Py_XSETREF(dst, src) \ + do { \ + _Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \ + _Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = (src); \ + Py_XDECREF(_tmp_old_dst); \ + } while (0) +#else +#define Py_XSETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \ + PyObject *_tmp_src = _PyObject_CAST(src); \ + memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \ + Py_XDECREF(_tmp_old_dst); \ + } while (0) +#endif -PyAPI_FUNC(void) -_PyDebugAllocatorStats(FILE *out, const char *block_name, int num_blocks, - size_t sizeof_block); -PyAPI_FUNC(void) -_PyObject_DebugTypeStats(FILE *out); /* Define a pair of assertion macros: _PyObject_ASSERT_FROM(), _PyObject_ASSERT_WITH_MSG() and _PyObject_ASSERT(). @@ -401,21 +405,6 @@ PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed( int line, const char *function); -/* Check if an object is consistent. For example, ensure that the reference - counter is greater than or equal to 1, and ensure that ob_type is not NULL. - - Call _PyObject_AssertFailed() if the object is inconsistent. - - If check_content is zero, only check header fields: reduce the overhead. - - The function always return 1. The return value is just here to be able to - write: - - assert(_PyObject_CheckConsistency(obj, 1)); */ -PyAPI_FUNC(int) _PyObject_CheckConsistency( - PyObject *op, - int check_content); - /* Trashcan mechanism, thanks to Christian Tismer. @@ -476,7 +465,7 @@ PyAPI_FUNC(int) _PyTrash_cond(PyObject *op, destructor dealloc); /* If "cond" is false, then _tstate remains NULL and the deallocator \ * is run normally without involving the trashcan */ \ if (cond) { \ - _tstate = PyThreadState_Get(); \ + _tstate = PyThreadState_GetUnchecked(); \ if (_PyTrash_begin(_tstate, _PyObject_CAST(op))) { \ break; \ } \ @@ -492,23 +481,11 @@ PyAPI_FUNC(int) _PyTrash_cond(PyObject *op, destructor dealloc); Py_TRASHCAN_BEGIN_CONDITION((op), \ _PyTrash_cond(_PyObject_CAST(op), (destructor)(dealloc))) -/* The following two macros, Py_TRASHCAN_SAFE_BEGIN and - * Py_TRASHCAN_SAFE_END, are deprecated since version 3.11 and - * will be removed in the future. - * Use Py_TRASHCAN_BEGIN and Py_TRASHCAN_END instead. - */ -Py_DEPRECATED(3.11) typedef int UsingDeprecatedTrashcanMacro; -#define Py_TRASHCAN_SAFE_BEGIN(op) \ - do { \ - UsingDeprecatedTrashcanMacro cond=1; \ - Py_TRASHCAN_BEGIN_CONDITION((op), cond); -#define Py_TRASHCAN_SAFE_END(op) \ - Py_TRASHCAN_END; \ - } while(0); +PyAPI_FUNC(void *) PyObject_GetItemData(PyObject *obj); -PyAPI_FUNC(int) _PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg); -PyAPI_FUNC(void) _PyObject_ClearManagedDict(PyObject *obj); +PyAPI_FUNC(int) PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg); +PyAPI_FUNC(void) PyObject_ClearManagedDict(PyObject *obj); #define TYPE_MAX_WATCHERS 8 @@ -517,3 +494,10 @@ PyAPI_FUNC(int) PyType_AddWatcher(PyType_WatchCallback callback); PyAPI_FUNC(int) PyType_ClearWatcher(int watcher_id); PyAPI_FUNC(int) PyType_Watch(int watcher_id, PyObject *type); PyAPI_FUNC(int) PyType_Unwatch(int watcher_id, PyObject *type); + +/* Attempt to assign a version tag to the given type. + * + * Returns 1 if the type already had a valid version tag or a new one was + * assigned, or 0 if a new tag could not be assigned. + */ +PyAPI_FUNC(int) PyUnstable_Type_AssignVersionTag(PyTypeObject *type); diff --git a/Include/cpython/objimpl.h b/Include/cpython/objimpl.h index d7c76eab5c7312e..58a30aeea6ac642 100644 --- a/Include/cpython/objimpl.h +++ b/Include/cpython/objimpl.h @@ -2,7 +2,9 @@ # error "this header file must not be included directly" #endif -#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize ) +static inline size_t _PyObject_SIZE(PyTypeObject *type) { + return _Py_STATIC_CAST(size_t, type->tp_basicsize); +} /* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a vrbl-size object with nitems items, exclusive of gc overhead (if any). The @@ -18,10 +20,11 @@ # error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2" #endif -#define _PyObject_VAR_SIZE(typeobj, nitems) \ - _Py_SIZE_ROUND_UP((typeobj)->tp_basicsize + \ - (nitems)*(typeobj)->tp_itemsize, \ - SIZEOF_VOID_P) +static inline size_t _PyObject_VAR_SIZE(PyTypeObject *type, Py_ssize_t nitems) { + size_t size = _Py_STATIC_CAST(size_t, type->tp_basicsize); + size += _Py_STATIC_CAST(size_t, nitems) * _Py_STATIC_CAST(size_t, type->tp_itemsize); + return _Py_SIZE_ROUND_UP(size, SIZEOF_VOID_P); +} /* This example code implements an object constructor with a custom @@ -75,15 +78,10 @@ PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator); PyAPI_FUNC(int) PyObject_IS_GC(PyObject *obj); -/* Code built with Py_BUILD_CORE must include pycore_gc.h instead which - defines a different _PyGC_FINALIZED() macro. */ -#ifndef Py_BUILD_CORE - // Kept for backward compatibility with Python 3.8 -# define _PyGC_FINALIZED(o) PyObject_GC_IsFinalized(o) -#endif - - // Test if a type supports weak references PyAPI_FUNC(int) PyType_SUPPORTS_WEAKREFS(PyTypeObject *type); PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op); + +PyAPI_FUNC(PyObject *) PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *, + size_t); diff --git a/Include/cpython/optimizer.h b/Include/cpython/optimizer.h new file mode 100644 index 000000000000000..adc2c1fc442280a --- /dev/null +++ b/Include/cpython/optimizer.h @@ -0,0 +1,88 @@ + +#ifndef Py_LIMITED_API +#ifndef Py_OPTIMIZER_H +#define Py_OPTIMIZER_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _PyExecutorLinkListNode { + struct _PyExecutorObject *next; + struct _PyExecutorObject *previous; +} _PyExecutorLinkListNode; + + +/* Bloom filter with m = 256 + * https://en.wikipedia.org/wiki/Bloom_filter */ +#define BLOOM_FILTER_WORDS 8 + +typedef struct _bloom_filter { + uint32_t bits[BLOOM_FILTER_WORDS]; +} _PyBloomFilter; + +typedef struct { + uint8_t opcode; + uint8_t oparg; + uint8_t valid; + uint8_t linked; + _PyBloomFilter bloom; + _PyExecutorLinkListNode links; +} _PyVMData; + +typedef struct _PyExecutorObject { + PyObject_VAR_HEAD + /* WARNING: execute consumes a reference to self. This is necessary to allow executors to tail call into each other. */ + struct _PyInterpreterFrame *(*execute)(struct _PyExecutorObject *self, struct _PyInterpreterFrame *frame, PyObject **stack_pointer); + _PyVMData vm_data; /* Used by the VM, but opaque to the optimizer */ + /* Data needed by the executor goes here, but is opaque to the VM */ +} _PyExecutorObject; + +typedef struct _PyOptimizerObject _PyOptimizerObject; + +/* Should return > 0 if a new executor is created. O if no executor is produced and < 0 if an error occurred. */ +typedef int (*optimize_func)(_PyOptimizerObject* self, PyCodeObject *code, _Py_CODEUNIT *instr, _PyExecutorObject **, int curr_stackentries); + +typedef struct _PyOptimizerObject { + PyObject_HEAD + optimize_func optimize; + /* These thresholds are treated as signed so do not exceed INT16_MAX + * Use INT16_MAX to indicate that the optimizer should never be called */ + uint16_t resume_threshold; + uint16_t backedge_threshold; + /* Data needed by the optimizer goes here, but is opaque to the VM */ +} _PyOptimizerObject; + +PyAPI_FUNC(int) PyUnstable_Replace_Executor(PyCodeObject *code, _Py_CODEUNIT *instr, _PyExecutorObject *executor); + +PyAPI_FUNC(void) PyUnstable_SetOptimizer(_PyOptimizerObject* optimizer); + +PyAPI_FUNC(_PyOptimizerObject *) PyUnstable_GetOptimizer(void); + +PyAPI_FUNC(_PyExecutorObject *) PyUnstable_GetExecutor(PyCodeObject *code, int offset); + +int +_PyOptimizer_BackEdge(struct _PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNIT *dest, PyObject **stack_pointer); + +extern _PyOptimizerObject _PyOptimizer_Default; + +void _Py_ExecutorInit(_PyExecutorObject *, _PyBloomFilter *); +void _Py_ExecutorClear(_PyExecutorObject *); +void _Py_BloomFilter_Init(_PyBloomFilter *); +void _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *obj); +PyAPI_FUNC(void) _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj); +PyAPI_FUNC(void) _Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj); +extern void _Py_Executors_InvalidateAll(PyInterpreterState *interp); + +/* For testing */ +PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewCounter(void); +PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewUOpOptimizer(void); + +#define OPTIMIZER_BITS_IN_COUNTER 4 +/* Minimum of 16 additional executions before retry */ +#define MINIMUM_TIER2_BACKOFF 4 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OPTIMIZER_H */ +#endif /* Py_LIMITED_API */ diff --git a/Include/cpython/pthread_stubs.h b/Include/cpython/pthread_stubs.h index d95ee03d8308ce4..e542eaa5bff0cf0 100644 --- a/Include/cpython/pthread_stubs.h +++ b/Include/cpython/pthread_stubs.h @@ -21,13 +21,29 @@ #ifdef __wasi__ // WASI's bits/alltypes.h provides type definitions when __NEED_ is set. // The header file can be included multiple times. -# define __NEED_pthread_cond_t 1 -# define __NEED_pthread_condattr_t 1 -# define __NEED_pthread_mutex_t 1 -# define __NEED_pthread_mutexattr_t 1 -# define __NEED_pthread_key_t 1 -# define __NEED_pthread_t 1 -# define __NEED_pthread_attr_t 1 +// +// may also define these macros. +# ifndef __NEED_pthread_cond_t +# define __NEED_pthread_cond_t 1 +# endif +# ifndef __NEED_pthread_condattr_t +# define __NEED_pthread_condattr_t 1 +# endif +# ifndef __NEED_pthread_mutex_t +# define __NEED_pthread_mutex_t 1 +# endif +# ifndef __NEED_pthread_mutexattr_t +# define __NEED_pthread_mutexattr_t 1 +# endif +# ifndef __NEED_pthread_key_t +# define __NEED_pthread_key_t 1 +# endif +# ifndef __NEED_pthread_t +# define __NEED_pthread_t 1 +# endif +# ifndef __NEED_pthread_attr_t +# define __NEED_pthread_attr_t 1 +# endif # include #else typedef struct { void *__x; } pthread_cond_t; @@ -67,6 +83,7 @@ PyAPI_FUNC(int) pthread_create(pthread_t *restrict thread, void *(*start_routine)(void *), void *restrict arg); PyAPI_FUNC(int) pthread_detach(pthread_t thread); +PyAPI_FUNC(int) pthread_join(pthread_t thread, void** value_ptr); PyAPI_FUNC(pthread_t) pthread_self(void); PyAPI_FUNC(int) pthread_exit(void *retval) __attribute__ ((__noreturn__)); PyAPI_FUNC(int) pthread_attr_init(pthread_attr_t *attr); diff --git a/Include/cpython/pyatomic.h b/Include/cpython/pyatomic.h new file mode 100644 index 000000000000000..5314a70436bfc39 --- /dev/null +++ b/Include/cpython/pyatomic.h @@ -0,0 +1,534 @@ +// This header provides cross-platform low-level atomic operations +// similar to C11 atomics. +// +// Operations are sequentially consistent unless they have a suffix indicating +// otherwise. If in doubt, prefer the sequentially consistent operations. +// +// The "_relaxed" suffix for load and store operations indicates the "relaxed" +// memory order. They don't provide synchronization, but (roughly speaking) +// guarantee somewhat sane behavior for races instead of undefined behavior. +// In practice, they correspond to "normal" hardware load and store +// instructions, so they are almost as inexpensive as plain loads and stores +// in C. +// +// Note that atomic read-modify-write operations like _Py_atomic_add_* return +// the previous value of the atomic variable, not the new value. +// +// See https://en.cppreference.com/w/c/atomic for more information on C11 +// atomics. +// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf +// "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage +// or relaxed atomics. +// +// Functions with pseudo Python code: +// +// def _Py_atomic_load(obj): +// return obj # sequential consistency +// +// def _Py_atomic_load_relaxed(obj): +// return obj # relaxed consistency +// +// def _Py_atomic_store(obj, value): +// obj = value # sequential consistency +// +// def _Py_atomic_store_relaxed(obj, value): +// obj = value # relaxed consistency +// +// def _Py_atomic_exchange(obj, value): +// # sequential consistency +// old_obj = obj +// obj = value +// return old_obj +// +// def _Py_atomic_compare_exchange(obj, expected, desired): +// # sequential consistency +// if obj == expected: +// obj = desired +// return True +// else: +// expected = obj +// return False +// +// def _Py_atomic_add(obj, value): +// # sequential consistency +// old_obj = obj +// obj += value +// return old_obj +// +// def _Py_atomic_and(obj, value): +// # sequential consistency +// old_obj = obj +// obj &= value +// return old_obj +// +// def _Py_atomic_or(obj, value): +// # sequential consistency +// old_obj = obj +// obj |= value +// return old_obj +// +// Other functions: +// +// def _Py_atomic_load_ptr_acquire(obj): +// return obj # acquire +// +// def _Py_atomic_store_ptr_release(obj): +// return obj # release +// +// def _Py_atomic_fence_seq_cst(): +// # sequential consistency +// ... +// +// def _Py_atomic_fence_release(): +// # release +// ... + +#ifndef Py_CPYTHON_ATOMIC_H +# error "this header file must not be included directly" +#endif + +// --- _Py_atomic_add -------------------------------------------------------- +// Atomically adds `value` to `obj` and returns the previous value + +static inline int +_Py_atomic_add_int(int *obj, int value); + +static inline int8_t +_Py_atomic_add_int8(int8_t *obj, int8_t value); + +static inline int16_t +_Py_atomic_add_int16(int16_t *obj, int16_t value); + +static inline int32_t +_Py_atomic_add_int32(int32_t *obj, int32_t value); + +static inline int64_t +_Py_atomic_add_int64(int64_t *obj, int64_t value); + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *obj, intptr_t value); + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *obj, unsigned int value); + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *obj, uint8_t value); + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *obj, uint16_t value); + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *obj, uint32_t value); + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *obj, uint64_t value); + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value); + + +// --- _Py_atomic_compare_exchange ------------------------------------------- +// Performs an atomic compare-and-exchange. +// +// - If `*obj` and `*expected` are equal, store `desired` into `*obj` +// and return 1 (success). +// - Otherwise, store the `*obj` current value into `*expected` +// and return 0 (failure). +// +// These correspond to the C11 atomic_compare_exchange_strong() function. + +static inline int +_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired); + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired); + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired); + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired); + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired); + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired); + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired); + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired); + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired); + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired); + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired); + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired); + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired); + +// NOTE: `obj` and `expected` are logically `void**` types, but we use `void*` +// so that we can pass types like `PyObject**` without a cast. +static inline int +_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value); + + +// --- _Py_atomic_exchange --------------------------------------------------- +// Atomically replaces `*obj` with `value` and returns the previous value of `*obj`. + +static inline int +_Py_atomic_exchange_int(int *obj, int value); + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *obj, int8_t value); + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *obj, int16_t value); + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *obj, int32_t value); + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *obj, int64_t value); + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value); + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value); + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value); + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value); + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value); + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value); + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value); + +static inline void * +_Py_atomic_exchange_ptr(void *obj, void *value); + + +// --- _Py_atomic_and -------------------------------------------------------- +// Performs `*obj &= value` atomically and returns the previous value of `*obj`. + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *obj, uint8_t value); + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *obj, uint16_t value); + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *obj, uint32_t value); + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *obj, uint64_t value); + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value); + + +// --- _Py_atomic_or --------------------------------------------------------- +// Performs `*obj |= value` atomically and returns the previous value of `*obj`. + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *obj, uint8_t value); + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *obj, uint16_t value); + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *obj, uint32_t value); + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *obj, uint64_t value); + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value); + + +// --- _Py_atomic_load ------------------------------------------------------- +// Atomically loads `*obj` (sequential consistency) + +static inline int +_Py_atomic_load_int(const int *obj); + +static inline int8_t +_Py_atomic_load_int8(const int8_t *obj); + +static inline int16_t +_Py_atomic_load_int16(const int16_t *obj); + +static inline int32_t +_Py_atomic_load_int32(const int32_t *obj); + +static inline int64_t +_Py_atomic_load_int64(const int64_t *obj); + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *obj); + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *obj); + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *obj); + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *obj); + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *obj); + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *obj); + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *obj); + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *obj); + +static inline void * +_Py_atomic_load_ptr(const void *obj); + + +// --- _Py_atomic_load_relaxed ----------------------------------------------- +// Loads `*obj` (relaxed consistency, i.e., no ordering) + +static inline int +_Py_atomic_load_int_relaxed(const int *obj); + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *obj); + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *obj); + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *obj); + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *obj); + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *obj); + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *obj); + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *obj); + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *obj); + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *obj); + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj); + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *obj); + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj); + +static inline void * +_Py_atomic_load_ptr_relaxed(const void *obj); + + +// --- _Py_atomic_store ------------------------------------------------------ +// Atomically performs `*obj = value` (sequential consistency) + +static inline void +_Py_atomic_store_int(int *obj, int value); + +static inline void +_Py_atomic_store_int8(int8_t *obj, int8_t value); + +static inline void +_Py_atomic_store_int16(int16_t *obj, int16_t value); + +static inline void +_Py_atomic_store_int32(int32_t *obj, int32_t value); + +static inline void +_Py_atomic_store_int64(int64_t *obj, int64_t value); + +static inline void +_Py_atomic_store_intptr(intptr_t *obj, intptr_t value); + +static inline void +_Py_atomic_store_uint8(uint8_t *obj, uint8_t value); + +static inline void +_Py_atomic_store_uint16(uint16_t *obj, uint16_t value); + +static inline void +_Py_atomic_store_uint32(uint32_t *obj, uint32_t value); + +static inline void +_Py_atomic_store_uint64(uint64_t *obj, uint64_t value); + +static inline void +_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value); + +static inline void +_Py_atomic_store_uint(unsigned int *obj, unsigned int value); + +static inline void +_Py_atomic_store_ptr(void *obj, void *value); + +static inline void +_Py_atomic_store_ssize(Py_ssize_t* obj, Py_ssize_t value); + + +// --- _Py_atomic_store_relaxed ---------------------------------------------- +// Stores `*obj = value` (relaxed consistency, i.e., no ordering) + +static inline void +_Py_atomic_store_int_relaxed(int *obj, int value); + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value); + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value); + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value); + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value); + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value); + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t* obj, uint8_t value); + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value); + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value); + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value); + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value); + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value); + +static inline void +_Py_atomic_store_ptr_relaxed(void *obj, void *value); + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value); + + +// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------ + +// Loads `*obj` (acquire operation) +static inline void * +_Py_atomic_load_ptr_acquire(const void *obj); + +// Stores `*obj = value` (release operation) +static inline void +_Py_atomic_store_ptr_release(void *obj, void *value); + +static inline void +_Py_atomic_store_int_release(int *obj, int value); + +static inline int +_Py_atomic_load_int_acquire(const int *obj); + + +// --- _Py_atomic_fence ------------------------------------------------------ + +// Sequential consistency fence. C11 fences have complex semantics. When +// possible, use the atomic operations on variables defined above, which +// generally do not require explicit use of a fence. +// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence +static inline void _Py_atomic_fence_seq_cst(void); + +// Release fence +static inline void _Py_atomic_fence_release(void); + + +#ifndef _Py_USE_GCC_BUILTIN_ATOMICS +# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +# define _Py_USE_GCC_BUILTIN_ATOMICS 1 +# elif defined(__clang__) +# if __has_builtin(__atomic_load) +# define _Py_USE_GCC_BUILTIN_ATOMICS 1 +# endif +# endif +#endif + +#if _Py_USE_GCC_BUILTIN_ATOMICS +# define Py_ATOMIC_GCC_H +# include "cpython/pyatomic_gcc.h" +# undef Py_ATOMIC_GCC_H +#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +# define Py_ATOMIC_STD_H +# include "cpython/pyatomic_std.h" +# undef Py_ATOMIC_STD_H +#elif defined(_MSC_VER) +# define Py_ATOMIC_MSC_H +# include "cpython/pyatomic_msc.h" +# undef Py_ATOMIC_MSC_H +#else +# error "no available pyatomic implementation for this platform/compiler" +#endif + + +// --- aliases --------------------------------------------------------------- + +#if SIZEOF_LONG == 8 +# define _Py_atomic_load_ulong(p) \ + _Py_atomic_load_uint64((uint64_t *)p) +# define _Py_atomic_load_ulong_relaxed(p) \ + _Py_atomic_load_uint64_relaxed((uint64_t *)p) +# define _Py_atomic_store_ulong(p, v) \ + _Py_atomic_store_uint64((uint64_t *)p, v) +# define _Py_atomic_store_ulong_relaxed(p, v) \ + _Py_atomic_store_uint64_relaxed((uint64_t *)p, v) +#elif SIZEOF_LONG == 4 +# define _Py_atomic_load_ulong(p) \ + _Py_atomic_load_uint32((uint32_t *)p) +# define _Py_atomic_load_ulong_relaxed(p) \ + _Py_atomic_load_uint32_relaxed((uint32_t *)p) +# define _Py_atomic_store_ulong(p, v) \ + _Py_atomic_store_uint32((uint32_t *)p, v) +# define _Py_atomic_store_ulong_relaxed(p, v) \ + _Py_atomic_store_uint32_relaxed((uint32_t *)p, v) +#else +# error "long must be 4 or 8 bytes in size" +#endif // SIZEOF_LONG diff --git a/Include/cpython/pyatomic_gcc.h b/Include/cpython/pyatomic_gcc.h new file mode 100644 index 000000000000000..70f2b7e1b5706a9 --- /dev/null +++ b/Include/cpython/pyatomic_gcc.h @@ -0,0 +1,507 @@ +// This is the implementation of Python atomic operations using GCC's built-in +// functions that match the C+11 memory model. This implementation is preferred +// for GCC compatible compilers, such as Clang. These functions are available +// in GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11. + +#ifndef Py_ATOMIC_GCC_H +# error "this header file must not be included directly" +#endif + + +// --- _Py_atomic_add -------------------------------------------------------- + +static inline int +_Py_atomic_add_int(int *obj, int value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline int8_t +_Py_atomic_add_int8(int8_t *obj, int8_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline int16_t +_Py_atomic_add_int16(int16_t *obj, int16_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline int32_t +_Py_atomic_add_int32(int32_t *obj, int32_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline int64_t +_Py_atomic_add_int64(int64_t *obj, int64_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *obj, intptr_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *obj, unsigned int value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *obj, uint8_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *obj, uint16_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *obj, uint32_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *obj, uint64_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_compare_exchange ------------------------------------------- + +static inline int +_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired) +{ return __atomic_compare_exchange_n(obj, expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + +static inline int +_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired) +{ return __atomic_compare_exchange_n((void **)obj, (void **)expected, desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_exchange --------------------------------------------------- + +static inline int +_Py_atomic_exchange_int(int *obj, int value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *obj, int8_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *obj, int16_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *obj, int32_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *obj, int64_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void * +_Py_atomic_exchange_ptr(void *obj, void *value) +{ return __atomic_exchange_n((void **)obj, value, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_and -------------------------------------------------------- + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *obj, uint8_t value) +{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *obj, uint16_t value) +{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *obj, uint32_t value) +{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *obj, uint64_t value) +{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); } + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value) +{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_or --------------------------------------------------------- + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *obj, uint8_t value) +{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *obj, uint16_t value) +{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *obj, uint32_t value) +{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); } + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *obj, uint64_t value) +{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); } + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value) +{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_load ------------------------------------------------------- + +static inline int +_Py_atomic_load_int(const int *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline int8_t +_Py_atomic_load_int8(const int8_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline int16_t +_Py_atomic_load_int16(const int16_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline int32_t +_Py_atomic_load_int32(const int32_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline int64_t +_Py_atomic_load_int64(const int64_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); } + +static inline void * +_Py_atomic_load_ptr(const void *obj) +{ return (void *)__atomic_load_n((void **)obj, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_load_relaxed ----------------------------------------------- + +static inline int +_Py_atomic_load_int_relaxed(const int *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj) +{ return __atomic_load_n(obj, __ATOMIC_RELAXED); } + +static inline void * +_Py_atomic_load_ptr_relaxed(const void *obj) +{ return (void *)__atomic_load_n((const void **)obj, __ATOMIC_RELAXED); } + + +// --- _Py_atomic_store ------------------------------------------------------ + +static inline void +_Py_atomic_store_int(int *obj, int value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_int8(int8_t *obj, int8_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_int16(int16_t *obj, int16_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_int32(int32_t *obj, int32_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_int64(int64_t *obj, int64_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_intptr(intptr_t *obj, intptr_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uint8(uint8_t *obj, uint8_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uint16(uint16_t *obj, uint16_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uint32(uint32_t *obj, uint32_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uint64(uint64_t *obj, uint64_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_uint(unsigned int *obj, unsigned int value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_ptr(void *obj, void *value) +{ __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); } + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); } + + +// --- _Py_atomic_store_relaxed ---------------------------------------------- + +static inline void +_Py_atomic_store_int_relaxed(int *obj, int value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_ptr_relaxed(void *obj, void *value) +{ __atomic_store_n((void **)obj, value, __ATOMIC_RELAXED); } + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value) +{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); } + + +// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------ + +static inline void * +_Py_atomic_load_ptr_acquire(const void *obj) +{ return (void *)__atomic_load_n((void **)obj, __ATOMIC_ACQUIRE); } + +static inline void +_Py_atomic_store_ptr_release(void *obj, void *value) +{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); } + +static inline void +_Py_atomic_store_int_release(int *obj, int value) +{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); } + +static inline int +_Py_atomic_load_int_acquire(const int *obj) +{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); } + + +// --- _Py_atomic_fence ------------------------------------------------------ + +static inline void +_Py_atomic_fence_seq_cst(void) +{ __atomic_thread_fence(__ATOMIC_SEQ_CST); } + + static inline void +_Py_atomic_fence_release(void) +{ __atomic_thread_fence(__ATOMIC_RELEASE); } diff --git a/Include/cpython/pyatomic_msc.h b/Include/cpython/pyatomic_msc.h new file mode 100644 index 000000000000000..601a0cf65afc1c5 --- /dev/null +++ b/Include/cpython/pyatomic_msc.h @@ -0,0 +1,970 @@ +// This is the implementation of Python atomic operations for MSVC if the +// compiler does not support C11 or C++11 atomics. +// +// MSVC intrinsics are defined on char, short, long, __int64, and pointer +// types. Note that long and int are both 32-bits even on 64-bit Windows, +// so operations on int are cast to long. +// +// The volatile keyword has additional memory ordering semantics on MSVC. On +// x86 and x86-64, volatile accesses have acquire-release semantics. On ARM64, +// volatile accesses behave like C11's memory_order_relaxed. + +#ifndef Py_ATOMIC_MSC_H +# error "this header file must not be included directly" +#endif + +#include + +#define _Py_atomic_ASSERT_ARG_TYPE(TYPE) \ + Py_BUILD_ASSERT(sizeof(*obj) == sizeof(TYPE)) + + +// --- _Py_atomic_add -------------------------------------------------------- + +static inline int8_t +_Py_atomic_add_int8(int8_t *obj, int8_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(char); + return (int8_t)_InterlockedExchangeAdd8((volatile char *)obj, (char)value); +} + +static inline int16_t +_Py_atomic_add_int16(int16_t *obj, int16_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(short); + return (int16_t)_InterlockedExchangeAdd16((volatile short *)obj, (short)value); +} + +static inline int32_t +_Py_atomic_add_int32(int32_t *obj, int32_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(long); + return (int32_t)_InterlockedExchangeAdd((volatile long *)obj, (long)value); +} + +static inline int64_t +_Py_atomic_add_int64(int64_t *obj, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(__int64); + return (int64_t)_InterlockedExchangeAdd64((volatile __int64 *)obj, (__int64)value); +#else + int64_t old_value = _Py_atomic_load_int64_relaxed(obj); + for (;;) { + int64_t new_value = old_value + value; + if (_Py_atomic_compare_exchange_int64(obj, &old_value, new_value)) { + return old_value; + } + } +#endif +} + + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *obj, uint8_t value) +{ + return (uint8_t)_Py_atomic_add_int8((int8_t *)obj, (int8_t)value); +} + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *obj, uint16_t value) +{ + return (uint16_t)_Py_atomic_add_int16((int16_t *)obj, (int16_t)value); +} + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *obj, uint32_t value) +{ + return (uint32_t)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value); +} + +static inline int +_Py_atomic_add_int(int *obj, int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return (int)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value); +} + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *obj, unsigned int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return (unsigned int)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value); +} + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *obj, uint64_t value) +{ + return (uint64_t)_Py_atomic_add_int64((int64_t *)obj, (int64_t)value); +} + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *obj, intptr_t value) +{ +#if SIZEOF_VOID_P == 8 + _Py_atomic_ASSERT_ARG_TYPE(int64_t); + return (intptr_t)_Py_atomic_add_int64((int64_t *)obj, (int64_t)value); +#else + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return (intptr_t)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value); +#endif +} + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(intptr_t); + return (uintptr_t)_Py_atomic_add_intptr((intptr_t *)obj, (intptr_t)value); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(intptr_t); + return (Py_ssize_t)_Py_atomic_add_intptr((intptr_t *)obj, (intptr_t)value); +} + + +// --- _Py_atomic_compare_exchange ------------------------------------------- + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(char); + int8_t initial = (int8_t)_InterlockedCompareExchange8( + (volatile char *)obj, + (char)value, + (char)*expected); + if (initial == *expected) { + return 1; + } + *expected = initial; + return 0; +} + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(short); + int16_t initial = (int16_t)_InterlockedCompareExchange16( + (volatile short *)obj, + (short)value, + (short)*expected); + if (initial == *expected) { + return 1; + } + *expected = initial; + return 0; +} + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(long); + int32_t initial = (int32_t)_InterlockedCompareExchange( + (volatile long *)obj, + (long)value, + (long)*expected); + if (initial == *expected) { + return 1; + } + *expected = initial; + return 0; +} + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(__int64); + int64_t initial = (int64_t)_InterlockedCompareExchange64( + (volatile __int64 *)obj, + (__int64)value, + (__int64)*expected); + if (initial == *expected) { + return 1; + } + *expected = initial; + return 0; +} + +static inline int +_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value) +{ + void *initial = _InterlockedCompareExchangePointer( + (void**)obj, + value, + *(void**)expected); + if (initial == *(void**)expected) { + return 1; + } + *(void**)expected = initial; + return 0; +} + + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t value) +{ + return _Py_atomic_compare_exchange_int8((int8_t *)obj, + (int8_t *)expected, + (int8_t)value); +} + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t value) +{ + return _Py_atomic_compare_exchange_int16((int16_t *)obj, + (int16_t *)expected, + (int16_t)value); +} + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t value) +{ + return _Py_atomic_compare_exchange_int32((int32_t *)obj, + (int32_t *)expected, + (int32_t)value); +} + +static inline int +_Py_atomic_compare_exchange_int(int *obj, int *expected, int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return _Py_atomic_compare_exchange_int32((int32_t *)obj, + (int32_t *)expected, + (int32_t)value); +} + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return _Py_atomic_compare_exchange_int32((int32_t *)obj, + (int32_t *)expected, + (int32_t)value); +} + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t value) +{ + return _Py_atomic_compare_exchange_int64((int64_t *)obj, + (int64_t *)expected, + (int64_t)value); +} + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return _Py_atomic_compare_exchange_ptr((void**)obj, + (void**)expected, + (void*)value); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return _Py_atomic_compare_exchange_ptr((void**)obj, + (void**)expected, + (void*)value); +} + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return _Py_atomic_compare_exchange_ptr((void**)obj, + (void**)expected, + (void*)value); +} + + +// --- _Py_atomic_exchange --------------------------------------------------- + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *obj, int8_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(char); + return (int8_t)_InterlockedExchange8((volatile char *)obj, (char)value); +} + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *obj, int16_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(short); + return (int16_t)_InterlockedExchange16((volatile short *)obj, (short)value); +} + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *obj, int32_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(long); + return (int32_t)_InterlockedExchange((volatile long *)obj, (long)value); +} + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *obj, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(__int64); + return (int64_t)_InterlockedExchange64((volatile __int64 *)obj, (__int64)value); +#else + int64_t old_value = _Py_atomic_load_int64_relaxed(obj); + for (;;) { + if (_Py_atomic_compare_exchange_int64(obj, &old_value, value)) { + return old_value; + } + } +#endif +} + +static inline void* +_Py_atomic_exchange_ptr(void *obj, void *value) +{ + return (void*)_InterlockedExchangePointer((void * volatile *)obj, (void *)value); +} + + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value) +{ + return (uint8_t)_Py_atomic_exchange_int8((int8_t *)obj, + (int8_t)value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value) +{ + return (uint16_t)_Py_atomic_exchange_int16((int16_t *)obj, + (int16_t)value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value) +{ + return (uint32_t)_Py_atomic_exchange_int32((int32_t *)obj, + (int32_t)value); +} + +static inline int +_Py_atomic_exchange_int(int *obj, int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return (int)_Py_atomic_exchange_int32((int32_t *)obj, + (int32_t)value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value) +{ + _Py_atomic_ASSERT_ARG_TYPE(int32_t); + return (unsigned int)_Py_atomic_exchange_int32((int32_t *)obj, + (int32_t)value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value) +{ + return (uint64_t)_Py_atomic_exchange_int64((int64_t *)obj, + (int64_t)value); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (intptr_t)_Py_atomic_exchange_ptr((void**)obj, + (void*)value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (uintptr_t)_Py_atomic_exchange_ptr((void**)obj, + (void*)value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (Py_ssize_t)_Py_atomic_exchange_ptr((void**)obj, + (void*)value); +} + + +// --- _Py_atomic_and -------------------------------------------------------- + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *obj, uint8_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(char); + return (uint8_t)_InterlockedAnd8((volatile char *)obj, (char)value); +} + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *obj, uint16_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(short); + return (uint16_t)_InterlockedAnd16((volatile short *)obj, (short)value); +} + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *obj, uint32_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(long); + return (uint32_t)_InterlockedAnd((volatile long *)obj, (long)value); +} + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *obj, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(__int64); + return (uint64_t)_InterlockedAnd64((volatile __int64 *)obj, (__int64)value); +#else + uint64_t old_value = _Py_atomic_load_uint64_relaxed(obj); + for (;;) { + uint64_t new_value = old_value & value; + if (_Py_atomic_compare_exchange_uint64(obj, &old_value, new_value)) { + return old_value; + } + } +#endif +} + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + _Py_atomic_ASSERT_ARG_TYPE(uint64_t); + return (uintptr_t)_Py_atomic_and_uint64((uint64_t *)obj, + (uint64_t)value); +#else + _Py_atomic_ASSERT_ARG_TYPE(uint32_t); + return (uintptr_t)_Py_atomic_and_uint32((uint32_t *)obj, + (uint32_t)value); +#endif +} + + +// --- _Py_atomic_or --------------------------------------------------------- + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *obj, uint8_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(char); + return (uint8_t)_InterlockedOr8((volatile char *)obj, (char)value); +} + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *obj, uint16_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(short); + return (uint16_t)_InterlockedOr16((volatile short *)obj, (short)value); +} + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *obj, uint32_t value) +{ + _Py_atomic_ASSERT_ARG_TYPE(long); + return (uint32_t)_InterlockedOr((volatile long *)obj, (long)value); +} + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *obj, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(__int64); + return (uint64_t)_InterlockedOr64((volatile __int64 *)obj, (__int64)value); +#else + uint64_t old_value = _Py_atomic_load_uint64_relaxed(obj); + for (;;) { + uint64_t new_value = old_value | value; + if (_Py_atomic_compare_exchange_uint64(obj, &old_value, new_value)) { + return old_value; + } + } +#endif +} + + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + _Py_atomic_ASSERT_ARG_TYPE(uint64_t); + return (uintptr_t)_Py_atomic_or_uint64((uint64_t *)obj, + (uint64_t)value); +#else + _Py_atomic_ASSERT_ARG_TYPE(uint32_t); + return (uintptr_t)_Py_atomic_or_uint32((uint32_t *)obj, + (uint32_t)value); +#endif +} + + +// --- _Py_atomic_load ------------------------------------------------------- + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint8_t *)obj; +#elif defined(_M_ARM64) + return (uint8_t)__ldar8((unsigned __int8 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_uint8" +#endif +} + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint16_t *)obj; +#elif defined(_M_ARM64) + return (uint16_t)__ldar16((unsigned __int16 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_uint16" +#endif +} + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint32_t *)obj; +#elif defined(_M_ARM64) + return (uint32_t)__ldar32((unsigned __int32 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_uint32" +#endif +} + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint64_t *)obj; +#elif defined(_M_ARM64) + return (uint64_t)__ldar64((unsigned __int64 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_uint64" +#endif +} + +static inline int8_t +_Py_atomic_load_int8(const int8_t *obj) +{ + return (int8_t)_Py_atomic_load_uint8((const uint8_t *)obj); +} + +static inline int16_t +_Py_atomic_load_int16(const int16_t *obj) +{ + return (int16_t)_Py_atomic_load_uint16((const uint16_t *)obj); +} + +static inline int32_t +_Py_atomic_load_int32(const int32_t *obj) +{ + return (int32_t)_Py_atomic_load_uint32((const uint32_t *)obj); +} + +static inline int +_Py_atomic_load_int(const int *obj) +{ + _Py_atomic_ASSERT_ARG_TYPE(uint32_t); + return (int)_Py_atomic_load_uint32((uint32_t *)obj); +} + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *obj) +{ + _Py_atomic_ASSERT_ARG_TYPE(uint32_t); + return (unsigned int)_Py_atomic_load_uint32((uint32_t *)obj); +} + +static inline int64_t +_Py_atomic_load_int64(const int64_t *obj) +{ + return (int64_t)_Py_atomic_load_uint64((const uint64_t *)obj); +} + +static inline void* +_Py_atomic_load_ptr(const void *obj) +{ +#if SIZEOF_VOID_P == 8 + return (void*)_Py_atomic_load_uint64((const uint64_t *)obj); +#else + return (void*)_Py_atomic_load_uint32((const uint32_t *)obj); +#endif +} + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *obj) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (intptr_t)_Py_atomic_load_ptr((void*)obj); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *obj) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (uintptr_t)_Py_atomic_load_ptr((void*)obj); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *obj) +{ + _Py_atomic_ASSERT_ARG_TYPE(void*); + return (Py_ssize_t)_Py_atomic_load_ptr((void*)obj); +} + + +// --- _Py_atomic_load_relaxed ----------------------------------------------- + +static inline int +_Py_atomic_load_int_relaxed(const int *obj) +{ + return *(volatile int *)obj; +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *obj) +{ + return *(volatile int8_t *)obj; +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *obj) +{ + return *(volatile int16_t *)obj; +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *obj) +{ + return *(volatile int32_t *)obj; +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *obj) +{ + return *(volatile int64_t *)obj; +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *obj) +{ + return *(volatile intptr_t *)obj; +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *obj) +{ + return *(volatile uint8_t *)obj; +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *obj) +{ + return *(volatile uint16_t *)obj; +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *obj) +{ + return *(volatile uint32_t *)obj; +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *obj) +{ + return *(volatile uint64_t *)obj; +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj) +{ + return *(volatile uintptr_t *)obj; +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *obj) +{ + return *(volatile unsigned int *)obj; +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj) +{ + return *(volatile Py_ssize_t *)obj; +} + +static inline void* +_Py_atomic_load_ptr_relaxed(const void *obj) +{ + return *(void * volatile *)obj; +} + + +// --- _Py_atomic_store ------------------------------------------------------ + +static inline void +_Py_atomic_store_int(int *obj, int value) +{ + (void)_Py_atomic_exchange_int(obj, value); +} + +static inline void +_Py_atomic_store_int8(int8_t *obj, int8_t value) +{ + (void)_Py_atomic_exchange_int8(obj, value); +} + +static inline void +_Py_atomic_store_int16(int16_t *obj, int16_t value) +{ + (void)_Py_atomic_exchange_int16(obj, value); +} + +static inline void +_Py_atomic_store_int32(int32_t *obj, int32_t value) +{ + (void)_Py_atomic_exchange_int32(obj, value); +} + +static inline void +_Py_atomic_store_int64(int64_t *obj, int64_t value) +{ + (void)_Py_atomic_exchange_int64(obj, value); +} + +static inline void +_Py_atomic_store_intptr(intptr_t *obj, intptr_t value) +{ + (void)_Py_atomic_exchange_intptr(obj, value); +} + +static inline void +_Py_atomic_store_uint8(uint8_t *obj, uint8_t value) +{ + (void)_Py_atomic_exchange_uint8(obj, value); +} + +static inline void +_Py_atomic_store_uint16(uint16_t *obj, uint16_t value) +{ + (void)_Py_atomic_exchange_uint16(obj, value); +} + +static inline void +_Py_atomic_store_uint32(uint32_t *obj, uint32_t value) +{ + (void)_Py_atomic_exchange_uint32(obj, value); +} + +static inline void +_Py_atomic_store_uint64(uint64_t *obj, uint64_t value) +{ + (void)_Py_atomic_exchange_uint64(obj, value); +} + +static inline void +_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value) +{ + (void)_Py_atomic_exchange_uintptr(obj, value); +} + +static inline void +_Py_atomic_store_uint(unsigned int *obj, unsigned int value) +{ + (void)_Py_atomic_exchange_uint(obj, value); +} + +static inline void +_Py_atomic_store_ptr(void *obj, void *value) +{ + (void)_Py_atomic_exchange_ptr(obj, value); +} + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + (void)_Py_atomic_exchange_ssize(obj, value); +} + + +// --- _Py_atomic_store_relaxed ---------------------------------------------- + +static inline void +_Py_atomic_store_int_relaxed(int *obj, int value) +{ + *(volatile int *)obj = value; +} + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value) +{ + *(volatile int8_t *)obj = value; +} + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value) +{ + *(volatile int16_t *)obj = value; +} + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value) +{ + *(volatile int32_t *)obj = value; +} + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value) +{ + *(volatile int64_t *)obj = value; +} + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value) +{ + *(volatile intptr_t *)obj = value; +} + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value) +{ + *(volatile uint8_t *)obj = value; +} + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value) +{ + *(volatile uint16_t *)obj = value; +} + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value) +{ + *(volatile uint32_t *)obj = value; +} + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value) +{ + *(volatile uint64_t *)obj = value; +} + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value) +{ + *(volatile uintptr_t *)obj = value; +} + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value) +{ + *(volatile unsigned int *)obj = value; +} + +static inline void +_Py_atomic_store_ptr_relaxed(void *obj, void* value) +{ + *(void * volatile *)obj = value; +} + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value) +{ + *(volatile Py_ssize_t *)obj = value; +} + +// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------ + +static inline void * +_Py_atomic_load_ptr_acquire(const void *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(void * volatile *)obj; +#elif defined(_M_ARM64) + return (void *)__ldar64((unsigned __int64 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_ptr_acquire" +#endif +} + +static inline void +_Py_atomic_store_ptr_release(void *obj, void *value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *(void * volatile *)obj = value; +#elif defined(_M_ARM64) + __stlr64((unsigned __int64 volatile *)obj, (uintptr_t)value); +#else +# error "no implementation of _Py_atomic_store_ptr_release" +#endif +} + +static inline void +_Py_atomic_store_int_release(int *obj, int value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *(int volatile *)obj = value; +#elif defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(unsigned __int32); + __stlr32((unsigned __int32 volatile *)obj, (unsigned __int32)value); +#else +# error "no implementation of _Py_atomic_store_int_release" +#endif +} + +static inline int +_Py_atomic_load_int_acquire(const int *obj) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(int volatile *)obj; +#elif defined(_M_ARM64) + _Py_atomic_ASSERT_ARG_TYPE(unsigned __int32); + return (int)__ldar32((unsigned __int32 volatile *)obj); +#else +# error "no implementation of _Py_atomic_load_int_acquire" +#endif +} + + +// --- _Py_atomic_fence ------------------------------------------------------ + + static inline void +_Py_atomic_fence_seq_cst(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) + __faststorefence(); +#elif defined(_M_IX86) + _mm_mfence(); +#else +# error "no implementation of _Py_atomic_fence_seq_cst" +#endif +} + + static inline void +_Py_atomic_fence_release(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) || defined(_M_IX86) + _ReadWriteBarrier(); +#else +# error "no implementation of _Py_atomic_fence_release" +#endif +} + +#undef _Py_atomic_ASSERT_ARG_TYPE diff --git a/Include/cpython/pyatomic_std.h b/Include/cpython/pyatomic_std.h new file mode 100644 index 000000000000000..a05bfaec47e89d5 --- /dev/null +++ b/Include/cpython/pyatomic_std.h @@ -0,0 +1,889 @@ +// This is the implementation of Python atomic operations using C++11 or C11 +// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC +// compatible compilers, even if they support C++11 atomics. + +#ifndef Py_ATOMIC_STD_H +# error "this header file must not be included directly" +#endif + +#ifdef __cplusplus +extern "C++" { +# include +} +# define _Py_USING_STD using namespace std +# define _Atomic(tp) atomic +#else +# define _Py_USING_STD +# include +#endif + + +// --- _Py_atomic_add -------------------------------------------------------- + +static inline int +_Py_atomic_add_int(int *obj, int value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(int)*)obj, value); +} + +static inline int8_t +_Py_atomic_add_int8(int8_t *obj, int8_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(int8_t)*)obj, value); +} + +static inline int16_t +_Py_atomic_add_int16(int16_t *obj, int16_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(int16_t)*)obj, value); +} + +static inline int32_t +_Py_atomic_add_int32(int32_t *obj, int32_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(int32_t)*)obj, value); +} + +static inline int64_t +_Py_atomic_add_int64(int64_t *obj, int64_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(int64_t)*)obj, value); +} + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *obj, intptr_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(intptr_t)*)obj, value); +} + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *obj, unsigned int value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(unsigned int)*)obj, value); +} + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(uint8_t)*)obj, value); +} + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(uint16_t)*)obj, value); +} + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(uint32_t)*)obj, value); +} + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(uint64_t)*)obj, value); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(uintptr_t)*)obj, value); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_USING_STD; + return atomic_fetch_add((_Atomic(Py_ssize_t)*)obj, value); +} + + +// --- _Py_atomic_compare_exchange ------------------------------------------- + +static inline int +_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(int)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(int8_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(int16_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(int32_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(int64_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(intptr_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(unsigned int)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(uint8_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(uint16_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(uint32_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(uint64_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)obj, + expected, desired); +} + +static inline int +_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired) +{ + _Py_USING_STD; + return atomic_compare_exchange_strong((_Atomic(void *)*)obj, + (void **)expected, desired); +} + + +// --- _Py_atomic_exchange --------------------------------------------------- + +static inline int +_Py_atomic_exchange_int(int *obj, int value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(int)*)obj, value); +} + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *obj, int8_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(int8_t)*)obj, value); +} + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *obj, int16_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(int16_t)*)obj, value); +} + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *obj, int32_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(int32_t)*)obj, value); +} + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *obj, int64_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(int64_t)*)obj, value); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(intptr_t)*)obj, value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(unsigned int)*)obj, value); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(uint8_t)*)obj, value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(uint16_t)*)obj, value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(uint32_t)*)obj, value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(uint64_t)*)obj, value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(uintptr_t)*)obj, value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(Py_ssize_t)*)obj, value); +} + +static inline void* +_Py_atomic_exchange_ptr(void *obj, void *value) +{ + _Py_USING_STD; + return atomic_exchange((_Atomic(void *)*)obj, value); +} + + +// --- _Py_atomic_and -------------------------------------------------------- + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + return atomic_fetch_and((_Atomic(uint8_t)*)obj, value); +} + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + return atomic_fetch_and((_Atomic(uint16_t)*)obj, value); +} + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + return atomic_fetch_and((_Atomic(uint32_t)*)obj, value); +} + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + return atomic_fetch_and((_Atomic(uint64_t)*)obj, value); +} + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + return atomic_fetch_and((_Atomic(uintptr_t)*)obj, value); +} + + +// --- _Py_atomic_or --------------------------------------------------------- + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + return atomic_fetch_or((_Atomic(uint8_t)*)obj, value); +} + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + return atomic_fetch_or((_Atomic(uint16_t)*)obj, value); +} + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + return atomic_fetch_or((_Atomic(uint32_t)*)obj, value); +} + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + return atomic_fetch_or((_Atomic(uint64_t)*)obj, value); +} + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + return atomic_fetch_or((_Atomic(uintptr_t)*)obj, value); +} + + +// --- _Py_atomic_load ------------------------------------------------------- + +static inline int +_Py_atomic_load_int(const int *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(int)*)obj); +} + +static inline int8_t +_Py_atomic_load_int8(const int8_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(int8_t)*)obj); +} + +static inline int16_t +_Py_atomic_load_int16(const int16_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(int16_t)*)obj); +} + +static inline int32_t +_Py_atomic_load_int32(const int32_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(int32_t)*)obj); +} + +static inline int64_t +_Py_atomic_load_int64(const int64_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(int64_t)*)obj); +} + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(intptr_t)*)obj); +} + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(uint8_t)*)obj); +} + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(uint32_t)*)obj); +} + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(uint32_t)*)obj); +} + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(uint64_t)*)obj); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(uintptr_t)*)obj); +} + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(unsigned int)*)obj); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(Py_ssize_t)*)obj); +} + +static inline void* +_Py_atomic_load_ptr(const void *obj) +{ + _Py_USING_STD; + return atomic_load((const _Atomic(void*)*)obj); +} + + +// --- _Py_atomic_load_relaxed ----------------------------------------------- + +static inline int +_Py_atomic_load_int_relaxed(const int *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int)*)obj, + memory_order_relaxed); +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int8_t)*)obj, + memory_order_relaxed); +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int16_t)*)obj, + memory_order_relaxed); +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int32_t)*)obj, + memory_order_relaxed); +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int64_t)*)obj, + memory_order_relaxed); +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(intptr_t)*)obj, + memory_order_relaxed); +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(uint8_t)*)obj, + memory_order_relaxed); +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(uint16_t)*)obj, + memory_order_relaxed); +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(uint32_t)*)obj, + memory_order_relaxed); +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(uint64_t)*)obj, + memory_order_relaxed); +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(uintptr_t)*)obj, + memory_order_relaxed); +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(unsigned int)*)obj, + memory_order_relaxed); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj, + memory_order_relaxed); +} + +static inline void* +_Py_atomic_load_ptr_relaxed(const void *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(void*)*)obj, + memory_order_relaxed); +} + + +// --- _Py_atomic_store ------------------------------------------------------ + +static inline void +_Py_atomic_store_int(int *obj, int value) +{ + _Py_USING_STD; + atomic_store((_Atomic(int)*)obj, value); +} + +static inline void +_Py_atomic_store_int8(int8_t *obj, int8_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(int8_t)*)obj, value); +} + +static inline void +_Py_atomic_store_int16(int16_t *obj, int16_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(int16_t)*)obj, value); +} + +static inline void +_Py_atomic_store_int32(int32_t *obj, int32_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(int32_t)*)obj, value); +} + +static inline void +_Py_atomic_store_int64(int64_t *obj, int64_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(int64_t)*)obj, value); +} + +static inline void +_Py_atomic_store_intptr(intptr_t *obj, intptr_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(intptr_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uint8(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(uint8_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uint16(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(uint16_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uint32(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(uint32_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uint64(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(uint64_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(uintptr_t)*)obj, value); +} + +static inline void +_Py_atomic_store_uint(unsigned int *obj, unsigned int value) +{ + _Py_USING_STD; + atomic_store((_Atomic(unsigned int)*)obj, value); +} + +static inline void +_Py_atomic_store_ptr(void *obj, void *value) +{ + _Py_USING_STD; + atomic_store((_Atomic(void*)*)obj, value); +} + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_USING_STD; + atomic_store((_Atomic(Py_ssize_t)*)obj, value); +} + + +// --- _Py_atomic_store_relaxed ---------------------------------------------- + +static inline void +_Py_atomic_store_int_relaxed(int *obj, int value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int8_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int16_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int32_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int64_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(intptr_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(uint8_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(uint16_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(uint32_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(uint64_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(uintptr_t)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(unsigned int)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ptr_relaxed(void *obj, void *value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(void*)*)obj, value, + memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value, + memory_order_relaxed); +} + + +// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------ + +static inline void * +_Py_atomic_load_ptr_acquire(const void *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(void*)*)obj, + memory_order_acquire); +} + +static inline void +_Py_atomic_store_ptr_release(void *obj, void *value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(void*)*)obj, value, + memory_order_release); +} + +static inline void +_Py_atomic_store_int_release(int *obj, int value) +{ + _Py_USING_STD; + atomic_store_explicit((_Atomic(int)*)obj, value, + memory_order_release); +} + +static inline int +_Py_atomic_load_int_acquire(const int *obj) +{ + _Py_USING_STD; + return atomic_load_explicit((const _Atomic(int)*)obj, + memory_order_acquire); +} + + + +// --- _Py_atomic_fence ------------------------------------------------------ + + static inline void +_Py_atomic_fence_seq_cst(void) +{ + _Py_USING_STD; + atomic_thread_fence(memory_order_seq_cst); +} + + static inline void +_Py_atomic_fence_release(void) +{ + _Py_USING_STD; + atomic_thread_fence(memory_order_release); +} diff --git a/Include/cpython/pyerrors.h b/Include/cpython/pyerrors.h index 141341667795e8f..479b908fb7058ac 100644 --- a/Include/cpython/pyerrors.h +++ b/Include/cpython/pyerrors.h @@ -88,51 +88,15 @@ typedef PyOSErrorObject PyEnvironmentErrorObject; typedef PyOSErrorObject PyWindowsErrorObject; #endif -/* Error handling definitions */ - -PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *); -PyAPI_FUNC(_PyErr_StackItem*) _PyErr_GetTopmostException(PyThreadState *tstate); -PyAPI_FUNC(PyObject*) _PyErr_GetHandledException(PyThreadState *); -PyAPI_FUNC(void) _PyErr_SetHandledException(PyThreadState *, PyObject *); -PyAPI_FUNC(void) _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, PyObject **); - -/* Context manipulation (PEP 3134) */ - -PyAPI_FUNC(void) _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *); - -/* Like PyErr_Format(), but saves current exception as __context__ and - __cause__. - */ -PyAPI_FUNC(PyObject *) _PyErr_FormatFromCause( - PyObject *exception, - const char *format, /* ASCII-encoded string */ - ... - ); - /* In exceptions.c */ -/* Helper that attempts to replace the current exception with one of the - * same type but with a prefix added to the exception text. The resulting - * exception description looks like: - * - * prefix (exc_type: original_exc_str) - * - * Only some exceptions can be safely replaced. If the function determines - * it isn't safe to perform the replacement, it will leave the original - * unmodified exception in place. - * - * Returns a borrowed reference to the new exception (if any), NULL if the - * existing exception was left in place. - */ -PyAPI_FUNC(PyObject *) _PyErr_TrySetFromCause( - const char *prefix_format, /* ASCII-encoded string */ - ... - ); +PyAPI_FUNC(PyObject*) PyUnstable_Exc_PrepReraiseStar( + PyObject *orig, + PyObject *excs); /* In signalmodule.c */ int PySignal_SetWakeupFd(int fd); -PyAPI_FUNC(int) _PyErr_CheckSignals(void); /* Support for adding program text to SyntaxErrors */ @@ -152,36 +116,10 @@ PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject( PyObject *filename, int lineno); -PyAPI_FUNC(PyObject *) _PyErr_ProgramDecodedTextObject( - PyObject *filename, - int lineno, - const char* encoding); - -PyAPI_FUNC(PyObject *) _PyUnicodeTranslateError_Create( - PyObject *object, - Py_ssize_t start, - Py_ssize_t end, - const char *reason /* UTF-8 encoded string */ - ); - -PyAPI_FUNC(void) _PyErr_WriteUnraisableMsg( - const char *err_msg, - PyObject *obj); - PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFunc( const char *func, const char *message); -PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFormat( - const char *func, - const char *format, - ...); - -extern PyObject *_PyErr_SetImportErrorWithNameFrom( - PyObject *, - PyObject *, - PyObject *, - PyObject *); - +PyAPI_FUNC(void) PyErr_FormatUnraisable(const char *, ...); #define Py_FatalError(message) _Py_FatalErrorFunc(__func__, (message)) diff --git a/Include/cpython/pyframe.h b/Include/cpython/pyframe.h index 1dc634ccee9a27a..c5adbbe4868f698 100644 --- a/Include/cpython/pyframe.h +++ b/Include/cpython/pyframe.h @@ -14,4 +14,30 @@ PyAPI_FUNC(PyObject *) PyFrame_GetBuiltins(PyFrameObject *frame); PyAPI_FUNC(PyObject *) PyFrame_GetGenerator(PyFrameObject *frame); PyAPI_FUNC(int) PyFrame_GetLasti(PyFrameObject *frame); +PyAPI_FUNC(PyObject*) PyFrame_GetVar(PyFrameObject *frame, PyObject *name); +PyAPI_FUNC(PyObject*) PyFrame_GetVarString(PyFrameObject *frame, const char *name); +/* The following functions are for use by debuggers and other tools + * implementing custom frame evaluators with PEP 523. */ + +struct _PyInterpreterFrame; + +/* Returns the code object of the frame (strong reference). + * Does not raise an exception. */ +PyAPI_FUNC(PyObject *) PyUnstable_InterpreterFrame_GetCode(struct _PyInterpreterFrame *frame); + +/* Returns a byte ofsset into the last executed instruction. + * Does not raise an exception. */ +PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLasti(struct _PyInterpreterFrame *frame); + +/* Returns the currently executing line number, or -1 if there is no line number. + * Does not raise an exception. */ +PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLine(struct _PyInterpreterFrame *frame); + +#define PyUnstable_EXECUTABLE_KIND_SKIP 0 +#define PyUnstable_EXECUTABLE_KIND_PY_FUNCTION 1 +#define PyUnstable_EXECUTABLE_KIND_BUILTIN_FUNCTION 3 +#define PyUnstable_EXECUTABLE_KIND_METHOD_DESCRIPTOR 4 +#define PyUnstable_EXECUTABLE_KINDS 5 + +PyAPI_DATA(const PyTypeObject *) const PyUnstable_ExecutableKinds[PyUnstable_EXECUTABLE_KINDS+1]; diff --git a/Include/cpython/pyhash.h b/Include/cpython/pyhash.h new file mode 100644 index 000000000000000..6f7113daa5fe4de --- /dev/null +++ b/Include/cpython/pyhash.h @@ -0,0 +1,35 @@ +#ifndef Py_CPYTHON_HASH_H +# error "this header file must not be included directly" +#endif + +/* Prime multiplier used in string and various other hashes. */ +#define _PyHASH_MULTIPLIER 1000003UL /* 0xf4243 */ + +/* Parameters used for the numeric hash implementation. See notes for + _Py_HashDouble in Python/pyhash.c. Numeric hashes are based on + reduction modulo the prime 2**_PyHASH_BITS - 1. */ + +#if SIZEOF_VOID_P >= 8 +# define _PyHASH_BITS 61 +#else +# define _PyHASH_BITS 31 +#endif + +#define _PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1) +#define _PyHASH_INF 314159 +#define _PyHASH_IMAG _PyHASH_MULTIPLIER + +/* Helpers for hash functions */ +PyAPI_FUNC(Py_hash_t) _Py_HashDouble(PyObject *, double); +PyAPI_FUNC(Py_hash_t) _Py_HashPointer(const void*); + + +/* hash function definition */ +typedef struct { + Py_hash_t (*const hash)(const void *, Py_ssize_t); + const char *name; + const int hash_bits; + const int seed_bits; +} PyHash_FuncDef; + +PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void); diff --git a/Include/cpython/pylifecycle.h b/Include/cpython/pylifecycle.h index e1f83acbffc3602..d425a233f710000 100644 --- a/Include/cpython/pylifecycle.h +++ b/Include/cpython/pylifecycle.h @@ -6,13 +6,6 @@ in all builds of Python */ PyAPI_FUNC(int) Py_FrozenMain(int argc, char **argv); -/* Only used by applications that embed the interpreter and need to - * override the standard encoding determination mechanism - */ -Py_DEPRECATED(3.11) PyAPI_FUNC(int) Py_SetStandardStreamEncoding( - const char *encoding, - const char *errors); - /* PEP 432 Multi-phase initialization API (Private while provisional!) */ PyAPI_FUNC(PyStatus) Py_PreInitialize( @@ -26,13 +19,13 @@ PyAPI_FUNC(PyStatus) Py_PreInitializeFromArgs( Py_ssize_t argc, wchar_t **argv); -PyAPI_FUNC(int) _Py_IsCoreInitialized(void); - /* Initialization and finalization */ PyAPI_FUNC(PyStatus) Py_InitializeFromConfig( const PyConfig *config); + +// Python 3.8 provisional API (PEP 587) PyAPI_FUNC(PyStatus) _Py_InitializeMain(void); PyAPI_FUNC(int) Py_RunMain(void); @@ -40,27 +33,51 @@ PyAPI_FUNC(int) Py_RunMain(void); PyAPI_FUNC(void) _Py_NO_RETURN Py_ExitStatusException(PyStatus err); -/* Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. */ -PyAPI_FUNC(void) _Py_RestoreSignals(void); - PyAPI_FUNC(int) Py_FdIsInteractive(FILE *, const char *); -PyAPI_FUNC(int) _Py_FdIsInteractive(FILE *fp, PyObject *filename); - -Py_DEPRECATED(3.11) PyAPI_FUNC(void) _Py_SetProgramFullPath(const wchar_t *); - -PyAPI_FUNC(const char *) _Py_gitidentifier(void); -PyAPI_FUNC(const char *) _Py_gitversion(void); - -PyAPI_FUNC(int) _Py_IsFinalizing(void); - -/* Random */ -PyAPI_FUNC(int) _PyOS_URandom(void *buffer, Py_ssize_t size); -PyAPI_FUNC(int) _PyOS_URandomNonblock(void *buffer, Py_ssize_t size); - -/* Legacy locale support */ -PyAPI_FUNC(int) _Py_CoerceLegacyLocale(int warn); -PyAPI_FUNC(int) _Py_LegacyLocaleDetected(int warn); -PyAPI_FUNC(char *) _Py_SetLocaleFromEnv(int category); -PyAPI_FUNC(PyThreadState *) _Py_NewInterpreterFromConfig( - const _PyInterpreterConfig *); +/* --- PyInterpreterConfig ------------------------------------ */ + +#define PyInterpreterConfig_DEFAULT_GIL (0) +#define PyInterpreterConfig_SHARED_GIL (1) +#define PyInterpreterConfig_OWN_GIL (2) + +typedef struct { + // XXX "allow_object_sharing"? "own_objects"? + int use_main_obmalloc; + int allow_fork; + int allow_exec; + int allow_threads; + int allow_daemon_threads; + int check_multi_interp_extensions; + int gil; +} PyInterpreterConfig; + +#define _PyInterpreterConfig_INIT \ + { \ + .use_main_obmalloc = 0, \ + .allow_fork = 0, \ + .allow_exec = 0, \ + .allow_threads = 1, \ + .allow_daemon_threads = 0, \ + .check_multi_interp_extensions = 1, \ + .gil = PyInterpreterConfig_OWN_GIL, \ + } + +#define _PyInterpreterConfig_LEGACY_INIT \ + { \ + .use_main_obmalloc = 1, \ + .allow_fork = 1, \ + .allow_exec = 1, \ + .allow_threads = 1, \ + .allow_daemon_threads = 1, \ + .check_multi_interp_extensions = 0, \ + .gil = PyInterpreterConfig_SHARED_GIL, \ + } + +PyAPI_FUNC(PyStatus) Py_NewInterpreterFromConfig( + PyThreadState **tstate_p, + const PyInterpreterConfig *config); + +typedef void (*atexit_datacallbackfunc)(void *); +PyAPI_FUNC(int) PyUnstable_AtExit( + PyInterpreterState *, atexit_datacallbackfunc, void *); diff --git a/Include/cpython/pymem.h b/Include/cpython/pymem.h index d1054d76520b9aa..76b3221f7b9f39f 100644 --- a/Include/cpython/pymem.h +++ b/Include/cpython/pymem.h @@ -2,24 +2,6 @@ # error "this header file must not be included directly" #endif -PyAPI_FUNC(void *) PyMem_RawMalloc(size_t size); -PyAPI_FUNC(void *) PyMem_RawCalloc(size_t nelem, size_t elsize); -PyAPI_FUNC(void *) PyMem_RawRealloc(void *ptr, size_t new_size); -PyAPI_FUNC(void) PyMem_RawFree(void *ptr); - -/* Try to get the allocators name set by _PyMem_SetupAllocators(). */ -PyAPI_FUNC(const char*) _PyMem_GetCurrentAllocatorName(void); - -/* strdup() using PyMem_RawMalloc() */ -PyAPI_FUNC(char *) _PyMem_RawStrdup(const char *str); - -/* strdup() using PyMem_Malloc() */ -PyAPI_FUNC(char *) _PyMem_Strdup(const char *str); - -/* wcsdup() using PyMem_RawMalloc() */ -PyAPI_FUNC(wchar_t*) _PyMem_RawWcsdup(const wchar_t *str); - - typedef enum { /* PyMem_RawMalloc(), PyMem_RawRealloc() and PyMem_RawFree() */ PYMEM_DOMAIN_RAW, @@ -41,6 +23,10 @@ typedef enum { PYMEM_ALLOCATOR_PYMALLOC = 5, PYMEM_ALLOCATOR_PYMALLOC_DEBUG = 6, #endif +#ifdef WITH_MIMALLOC + PYMEM_ALLOCATOR_MIMALLOC = 7, + PYMEM_ALLOCATOR_MIMALLOC_DEBUG = 8, +#endif } PyMemAllocatorName; diff --git a/Include/cpython/pystate.h b/Include/cpython/pystate.h index 70c23427807e52d..56172d231c44f4e 100644 --- a/Include/cpython/pystate.h +++ b/Include/cpython/pystate.h @@ -3,38 +3,12 @@ #endif -/* -Runtime Feature Flags - -Each flag indicate whether or not a specific runtime feature -is available in a given context. For example, forking the process -might not be allowed in the current interpreter (i.e. os.fork() would fail). -*/ - -/* Set if threads are allowed. */ -#define Py_RTFLAGS_THREADS (1UL << 10) - -/* Set if daemon threads are allowed. */ -#define Py_RTFLAGS_DAEMON_THREADS (1UL << 11) - -/* Set if os.fork() is allowed. */ -#define Py_RTFLAGS_FORK (1UL << 15) - -/* Set if os.exec*() is allowed. */ -#define Py_RTFLAGS_EXEC (1UL << 16) - - -PyAPI_FUNC(int) _PyInterpreterState_HasFeature(PyInterpreterState *interp, - unsigned long feature); - - /* private interpreter helpers */ PyAPI_FUNC(int) _PyInterpreterState_RequiresIDRef(PyInterpreterState *); PyAPI_FUNC(void) _PyInterpreterState_RequireIDRef(PyInterpreterState *, int); -PyAPI_FUNC(PyObject *) _PyInterpreterState_GetMainModule(PyInterpreterState *); - +PyAPI_FUNC(PyObject *) PyUnstable_InterpreterState_GetMainModule(PyInterpreterState *); /* State unique per thread */ @@ -55,31 +29,6 @@ typedef int (*Py_tracefunc)(PyObject *, PyFrameObject *, int, PyObject *); #define PyTrace_C_RETURN 6 #define PyTrace_OPCODE 7 - -typedef struct { - PyCodeObject *code; // The code object for the bounds. May be NULL. - PyCodeAddressRange bounds; // Only valid if code != NULL. -} PyTraceInfo; - -// Internal structure: you should not use it directly, but use public functions -// like PyThreadState_EnterTracing() and PyThreadState_LeaveTracing(). -typedef struct _PyCFrame { - /* This struct will be threaded through the C stack - * allowing fast access to per-thread state that needs - * to be accessed quickly by the interpreter, but can - * be modified outside of the interpreter. - * - * WARNING: This makes data on the C stack accessible from - * heap objects. Care must be taken to maintain stack - * discipline and make sure that instances of this struct cannot - * accessed outside of their lifetime. - */ - uint8_t use_tracing; // 0 or 255 (or'ed into opcode, hence 8-bit type) - /* Pointer to the currently executing frame (it can be NULL) */ - struct _PyInterpreterFrame *current_frame; - struct _PyCFrame *previous; -} _PyCFrame; - typedef struct _err_stackitem { /* This struct represents a single execution context where we might * be currently handling an exception. It is a per-coroutine state @@ -107,6 +56,11 @@ typedef struct _stack_chunk { PyObject * data[1]; /* Variable sized */ } _PyStackChunk; +struct _py_trashcan { + int delete_nesting; + PyObject *delete_later; +}; + struct _ts { /* See Python/ceval.c for comments explaining most fields */ @@ -114,14 +68,43 @@ struct _ts { PyThreadState *next; PyInterpreterState *interp; - /* Has been initialized to a safe state. - - In order to be effective, this must be set to 0 during or right - after allocation. */ - int _initialized; + struct { + /* Has been initialized to a safe state. + + In order to be effective, this must be set to 0 during or right + after allocation. */ + unsigned int initialized:1; + + /* Has been bound to an OS thread. */ + unsigned int bound:1; + /* Has been unbound from its OS thread. */ + unsigned int unbound:1; + /* Has been bound aa current for the GILState API. */ + unsigned int bound_gilstate:1; + /* Currently in use (maybe holds the GIL). */ + unsigned int active:1; + + /* various stages of finalization */ + unsigned int finalizing:1; + unsigned int cleared:1; + unsigned int finalized:1; + + /* padding to align to 4 bytes */ + unsigned int :24; + } _status; +#ifdef Py_BUILD_CORE +# define _PyThreadState_WHENCE_NOTSET -1 +# define _PyThreadState_WHENCE_UNKNOWN 0 +# define _PyThreadState_WHENCE_INTERP 1 +# define _PyThreadState_WHENCE_THREADING 2 +# define _PyThreadState_WHENCE_GILSTATE 3 +# define _PyThreadState_WHENCE_EXEC 4 +#endif + int _whence; - /* Was this thread state statically allocated? */ - int _static; + /* Thread state (_Py_THREAD_ATTACHED, _Py_THREAD_DETACHED, _Py_THREAD_GC). + See Include/internal/pycore_pystate.h for more details. */ + int state; int py_recursion_remaining; int py_recursion_limit; @@ -133,11 +116,10 @@ struct _ts { This is to prevent the actual trace/profile code from being recorded in the trace/profile. */ int tracing; - int tracing_what; /* The event currently being traced, if any. */ + int what_event; /* The event currently being monitored, if any. */ - /* Pointer to current _PyCFrame in the C stack frame of the currently, - * or most recently, executing _PyEval_EvalFrameDefault. */ - _PyCFrame *cframe; + /* Pointer to currently executing frame. */ + struct _PyInterpreterFrame *current_frame; Py_tracefunc c_profilefunc; Py_tracefunc c_tracefunc; @@ -145,9 +127,7 @@ struct _ts { PyObject *c_traceobj; /* The exception currently being raised */ - PyObject *curexc_type; - PyObject *curexc_value; - PyObject *curexc_traceback; + PyObject *current_exception; /* Pointer to the top of the exception stack for the exceptions * we may be currently handling. (See _PyErr_StackItem above.) @@ -167,8 +147,14 @@ struct _ts { */ unsigned long native_thread_id; - int trash_delete_nesting; - PyObject *trash_delete_later; + struct _py_trashcan trash; + + /* Tagged pointer to top-most critical section, or zero if there is no + * active critical section. Critical sections are only used in + * `--disable-gil` builds (i.e., when Py_GIL_DISABLED is defined to 1). In the + * default build, this field is always zero. + */ + uintptr_t critical_section; /* Called when a thread state is deleted normally, but not when it * is destroyed after fork(). @@ -207,8 +193,6 @@ struct _ts { /* Unique thread state id. */ uint64_t id; - PyTraceInfo trace_info; - _PyStackChunk *datastack_chunk; PyObject **datastack_top; PyObject **datastack_limit; @@ -228,33 +212,32 @@ struct _ts { /* The thread's exception stack entry. (Always the last entry.) */ _PyErr_StackItem exc_state; - /* The bottom-most frame on the stack. */ - _PyCFrame root_cframe; }; -/* WASI has limited call stack. Python's recursion limit depends on code - layout, optimization, and WASI runtime. Wasmtime can handle about 700 - recursions, sometimes less. 500 is a more conservative limit. */ -#ifndef C_RECURSION_LIMIT -# ifdef __wasi__ -# define C_RECURSION_LIMIT 500 -# else -# define C_RECURSION_LIMIT 800 -# endif +#ifdef Py_DEBUG + // A debug build is likely built with low optimization level which implies + // higher stack memory usage than a release build: use a lower limit. +# define Py_C_RECURSION_LIMIT 500 +#elif defined(__wasi__) + // WASI has limited call stack. Python's recursion limit depends on code + // layout, optimization, and WASI runtime. Wasmtime can handle about 700 + // recursions, sometimes less. 500 is a more conservative limit. +# define Py_C_RECURSION_LIMIT 500 +#else + // This value is duplicated in Lib/test/support/__init__.py +# define Py_C_RECURSION_LIMIT 1500 #endif -/* other API */ - -// Alias for backward compatibility with Python 3.8 -#define _PyInterpreterState_Get PyInterpreterState_Get -PyAPI_FUNC(PyThreadState *) _PyThreadState_Prealloc(PyInterpreterState *); +/* other API */ /* Similar to PyThreadState_Get(), but don't issue a fatal error * if it is NULL. */ -PyAPI_FUNC(PyThreadState *) _PyThreadState_UncheckedGet(void); +PyAPI_FUNC(PyThreadState *) PyThreadState_GetUnchecked(void); + +// Alias kept for backward compatibility +#define _PyThreadState_UncheckedGet PyThreadState_GetUnchecked -PyAPI_FUNC(PyObject *) _PyThreadState_GetDict(PyThreadState *tstate); // Disable tracing and profiling. PyAPI_FUNC(void) PyThreadState_EnterTracing(PyThreadState *tstate); @@ -271,24 +254,10 @@ PyAPI_FUNC(void) PyThreadState_LeaveTracing(PyThreadState *tstate); The function returns 1 if _PyGILState_check_enabled is non-zero. */ PyAPI_FUNC(int) PyGILState_Check(void); -/* Get the single PyInterpreterState used by this process' GILState - implementation. - - This function doesn't check for error. Return NULL before _PyGILState_Init() - is called and after _PyGILState_Fini() is called. - - See also _PyInterpreterState_Get() and _PyInterpreterState_GET(). */ -PyAPI_FUNC(PyInterpreterState *) _PyGILState_GetInterpreterStateUnsafe(void); - /* The implementation of sys._current_frames() Returns a dict mapping thread id to that thread's current frame. */ -PyAPI_FUNC(PyObject *) _PyThread_CurrentFrames(void); - -/* The implementation of sys._current_exceptions() Returns a dict mapping - thread id to that thread's current exception. -*/ -PyAPI_FUNC(PyObject *) _PyThread_CurrentExceptions(void); +PyAPI_FUNC(PyObject*) _PyThread_CurrentFrames(void); /* Routines for advanced debuggers, requested by David Beazley. Don't use unless you know what you are doing! */ @@ -308,102 +277,3 @@ PyAPI_FUNC(_PyFrameEvalFunction) _PyInterpreterState_GetEvalFrameFunc( PyAPI_FUNC(void) _PyInterpreterState_SetEvalFrameFunc( PyInterpreterState *interp, _PyFrameEvalFunction eval_frame); - -PyAPI_FUNC(const PyConfig*) _PyInterpreterState_GetConfig(PyInterpreterState *interp); - -/* Get a copy of the current interpreter configuration. - - Return 0 on success. Raise an exception and return -1 on error. - - The caller must initialize 'config', using PyConfig_InitPythonConfig() - for example. - - Python must be preinitialized to call this method. - The caller must hold the GIL. - - Once done with the configuration, PyConfig_Clear() must be called to clear - it. */ -PyAPI_FUNC(int) _PyInterpreterState_GetConfigCopy( - struct PyConfig *config); - -/* Set the configuration of the current interpreter. - - This function should be called during or just after the Python - initialization. - - Update the sys module with the new configuration. If the sys module was - modified directly after the Python initialization, these changes are lost. - - Some configuration like faulthandler or warnoptions can be updated in the - configuration, but don't reconfigure Python (don't enable/disable - faulthandler and don't reconfigure warnings filters). - - Return 0 on success. Raise an exception and return -1 on error. - - The configuration should come from _PyInterpreterState_GetConfigCopy(). */ -PyAPI_FUNC(int) _PyInterpreterState_SetConfig( - const struct PyConfig *config); - -// Get the configuration of the current interpreter. -// The caller must hold the GIL. -PyAPI_FUNC(const PyConfig*) _Py_GetConfig(void); - - -/* cross-interpreter data */ - -// _PyCrossInterpreterData is similar to Py_buffer as an effectively -// opaque struct that holds data outside the object machinery. This -// is necessary to pass safely between interpreters in the same process. -typedef struct _xid _PyCrossInterpreterData; - -struct _xid { - // data is the cross-interpreter-safe derivation of a Python object - // (see _PyObject_GetCrossInterpreterData). It will be NULL if the - // new_object func (below) encodes the data. - void *data; - // obj is the Python object from which the data was derived. This - // is non-NULL only if the data remains bound to the object in some - // way, such that the object must be "released" (via a decref) when - // the data is released. In that case the code that sets the field, - // likely a registered "crossinterpdatafunc", is responsible for - // ensuring it owns the reference (i.e. incref). - PyObject *obj; - // interp is the ID of the owning interpreter of the original - // object. It corresponds to the active interpreter when - // _PyObject_GetCrossInterpreterData() was called. This should only - // be set by the cross-interpreter machinery. - // - // We use the ID rather than the PyInterpreterState to avoid issues - // with deleted interpreters. Note that IDs are never re-used, so - // each one will always correspond to a specific interpreter - // (whether still alive or not). - int64_t interp; - // new_object is a function that returns a new object in the current - // interpreter given the data. The resulting object (a new - // reference) will be equivalent to the original object. This field - // is required. - PyObject *(*new_object)(_PyCrossInterpreterData *); - // free is called when the data is released. If it is NULL then - // nothing will be done to free the data. For some types this is - // okay (e.g. bytes) and for those types this field should be set - // to NULL. However, for most the data was allocated just for - // cross-interpreter use, so it must be freed when - // _PyCrossInterpreterData_Release is called or the memory will - // leak. In that case, at the very least this field should be set - // to PyMem_RawFree (the default if not explicitly set to NULL). - // The call will happen with the original interpreter activated. - void (*free)(void *); -}; - -PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *); -PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *); -PyAPI_FUNC(void) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *); - -PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *); - -/* cross-interpreter data registry */ - -typedef int (*crossinterpdatafunc)(PyObject *, _PyCrossInterpreterData *); - -PyAPI_FUNC(int) _PyCrossInterpreterData_RegisterClass(PyTypeObject *, crossinterpdatafunc); -PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *); diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h new file mode 100644 index 000000000000000..294bf1505f01153 --- /dev/null +++ b/Include/cpython/pystats.h @@ -0,0 +1,142 @@ +// Statistics on Python performance. +// +// API: +// +// - _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF() +// and Py_DECREF(). +// - _Py_stats variable +// +// Functions of the sys module: +// +// - sys._stats_on() +// - sys._stats_off() +// - sys._stats_clear() +// - sys._stats_dump() +// +// Python must be built with ./configure --enable-pystats to define the +// Py_STATS macro. +// +// Define _PY_INTERPRETER macro to increment interpreter_increfs and +// interpreter_decrefs. Otherwise, increment increfs and decrefs. + +#ifndef Py_CPYTHON_PYSTATS_H +# error "this header file must not be included directly" +#endif + +#define SPECIALIZATION_FAILURE_KINDS 36 + +/* Stats for determining who is calling PyEval_EvalFrame */ +#define EVAL_CALL_TOTAL 0 +#define EVAL_CALL_VECTOR 1 +#define EVAL_CALL_GENERATOR 2 +#define EVAL_CALL_LEGACY 3 +#define EVAL_CALL_FUNCTION_VECTORCALL 4 +#define EVAL_CALL_BUILD_CLASS 5 +#define EVAL_CALL_SLOT 6 +#define EVAL_CALL_FUNCTION_EX 7 +#define EVAL_CALL_API 8 +#define EVAL_CALL_METHOD 9 + +#define EVAL_CALL_KINDS 10 + +typedef struct _specialization_stats { + uint64_t success; + uint64_t failure; + uint64_t hit; + uint64_t deferred; + uint64_t miss; + uint64_t deopt; + uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS]; +} SpecializationStats; + +typedef struct _opcode_stats { + SpecializationStats specialization; + uint64_t execution_count; + uint64_t pair_count[256]; +} OpcodeStats; + +typedef struct _call_stats { + uint64_t inlined_py_calls; + uint64_t pyeval_calls; + uint64_t frames_pushed; + uint64_t frame_objects_created; + uint64_t eval_calls[EVAL_CALL_KINDS]; +} CallStats; + +typedef struct _object_stats { + uint64_t increfs; + uint64_t decrefs; + uint64_t interpreter_increfs; + uint64_t interpreter_decrefs; + uint64_t allocations; + uint64_t allocations512; + uint64_t allocations4k; + uint64_t allocations_big; + uint64_t frees; + uint64_t to_freelist; + uint64_t from_freelist; + uint64_t new_values; + uint64_t dict_materialized_on_request; + uint64_t dict_materialized_new_key; + uint64_t dict_materialized_too_big; + uint64_t dict_materialized_str_subclass; + uint64_t dict_dematerialized; + uint64_t type_cache_hits; + uint64_t type_cache_misses; + uint64_t type_cache_dunder_hits; + uint64_t type_cache_dunder_misses; + uint64_t type_cache_collisions; + /* Temporary value used during GC */ + uint64_t object_visits; +} ObjectStats; + +typedef struct _gc_stats { + uint64_t collections; + uint64_t object_visits; + uint64_t objects_collected; +} GCStats; + +typedef struct _uop_stats { + uint64_t execution_count; + uint64_t miss; +} UOpStats; + +#define _Py_UOP_HIST_SIZE 32 + +typedef struct _optimization_stats { + uint64_t attempts; + uint64_t traces_created; + uint64_t traces_executed; + uint64_t uops_executed; + uint64_t trace_stack_overflow; + uint64_t trace_stack_underflow; + uint64_t trace_too_long; + uint64_t trace_too_short; + uint64_t inner_loop; + uint64_t recursive_call; + UOpStats opcode[512]; + uint64_t unsupported_opcode[256]; + uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; + uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE]; + uint64_t optimized_trace_length_hist[_Py_UOP_HIST_SIZE]; +} OptimizationStats; + +typedef struct _stats { + OpcodeStats opcode_stats[256]; + CallStats call_stats; + ObjectStats object_stats; + OptimizationStats optimization_stats; + GCStats *gc_stats; +} PyStats; + + +// Export for shared extensions like 'math' +PyAPI_DATA(PyStats*) _Py_stats; + +#ifdef _PY_INTERPRETER +# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_increfs++; } while (0) +# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_decrefs++; } while (0) +#else +# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.increfs++; } while (0) +# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.decrefs++; } while (0) +#endif diff --git a/Include/cpython/pythonrun.h b/Include/cpython/pythonrun.h index fb6176553740268..edc40952254029f 100644 --- a/Include/cpython/pythonrun.h +++ b/Include/cpython/pythonrun.h @@ -3,21 +3,11 @@ #endif PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *); -PyAPI_FUNC(int) _PyRun_SimpleFileObject( - FILE *fp, - PyObject *filename, - int closeit, - PyCompilerFlags *flags); PyAPI_FUNC(int) PyRun_AnyFileExFlags( FILE *fp, const char *filename, /* decoded from the filesystem encoding */ int closeit, PyCompilerFlags *flags); -PyAPI_FUNC(int) _PyRun_AnyFileObject( - FILE *fp, - PyObject *filename, - int closeit, - PyCompilerFlags *flags); PyAPI_FUNC(int) PyRun_SimpleFileExFlags( FILE *fp, const char *filename, /* decoded from the filesystem encoding */ @@ -35,10 +25,6 @@ PyAPI_FUNC(int) PyRun_InteractiveLoopFlags( FILE *fp, const char *filename, /* decoded from the filesystem encoding */ PyCompilerFlags *flags); -PyAPI_FUNC(int) _PyRun_InteractiveLoopObject( - FILE *fp, - PyObject *filename, - PyCompilerFlags *flags); PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *, @@ -69,15 +55,6 @@ PyAPI_FUNC(PyObject *) Py_CompileStringObject( #define Py_CompileString(str, p, s) Py_CompileStringExFlags((str), (p), (s), NULL, -1) #define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags((str), (p), (s), (f), -1) - -PyAPI_FUNC(const char *) _Py_SourceAsString( - PyObject *cmd, - const char *funcname, - const char *what, - PyCompilerFlags *cf, - PyObject **cmd_copy); - - /* A function flavor is also exported by libpython. It is required when libpython is accessed directly rather than using header files which defines macros below. On Windows, for example, PyAPI_FUNC() uses dllexport to @@ -114,8 +91,6 @@ PyAPI_FUNC(PyObject *) PyRun_FileFlags(FILE *fp, const char *p, int s, PyObject #define PyRun_FileFlags(fp, p, s, g, l, flags) \ PyRun_FileExFlags((fp), (p), (s), (g), (l), 0, (flags)) - /* Stuff with no proper home (yet) */ PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *); -PyAPI_DATA(PyThreadState*) _PyOS_ReadlineTState; PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *); diff --git a/Include/cpython/pythread.h b/Include/cpython/pythread.h index ce4ec8f65b15ea0..03f710a9f7ef2e7 100644 --- a/Include/cpython/pythread.h +++ b/Include/cpython/pythread.h @@ -2,14 +2,15 @@ # error "this header file must not be included directly" #endif -#define PYTHREAD_INVALID_THREAD_ID ((unsigned long)-1) +// PY_TIMEOUT_MAX is the highest usable value (in microseconds) of PY_TIMEOUT_T +// type, and depends on the system threading API. +// +// NOTE: this isn't the same value as `_thread.TIMEOUT_MAX`. The _thread module +// exposes a higher-level API, with timeouts expressed in seconds and +// floating-point numbers allowed. +PyAPI_DATA(const long long) PY_TIMEOUT_MAX; -#ifdef HAVE_FORK -/* Private function to reinitialize a lock at fork in the child process. - Reset the lock to the unlocked state. - Return 0 on success, return -1 on error. */ -PyAPI_FUNC(int) _PyThread_at_fork_reinit(PyThread_type_lock *lock); -#endif /* HAVE_FORK */ +#define PYTHREAD_INVALID_THREAD_ID ((unsigned long)-1) #ifdef HAVE_PTHREAD_H /* Darwin needs pthread.h to know type name the pthread_key_t. */ diff --git a/Include/cpython/pytime.h b/Include/cpython/pytime.h deleted file mode 100644 index e64f3b13e75ca17..000000000000000 --- a/Include/cpython/pytime.h +++ /dev/null @@ -1,327 +0,0 @@ -// The _PyTime_t API is written to use timestamp and timeout values stored in -// various formats and to read clocks. -// -// The _PyTime_t type is an integer to support directly common arithmetic -// operations like t1 + t2. -// -// The _PyTime_t API supports a resolution of 1 nanosecond. The _PyTime_t type -// is signed to support negative timestamps. The supported range is around -// [-292.3 years; +292.3 years]. Using the Unix epoch (January 1st, 1970), the -// supported date range is around [1677-09-21; 2262-04-11]. -// -// Formats: -// -// * seconds -// * seconds as a floating pointer number (C double) -// * milliseconds (10^-3 seconds) -// * microseconds (10^-6 seconds) -// * 100 nanoseconds (10^-7 seconds) -// * nanoseconds (10^-9 seconds) -// * timeval structure, 1 microsecond resolution (10^-6 seconds) -// * timespec structure, 1 nanosecond resolution (10^-9 seconds) -// -// Integer overflows are detected and raise OverflowError. Conversion to a -// resolution worse than 1 nanosecond is rounded correctly with the requested -// rounding mode. There are 4 rounding modes: floor (towards -inf), ceiling -// (towards +inf), half even and up (away from zero). -// -// Some functions clamp the result in the range [_PyTime_MIN; _PyTime_MAX], so -// the caller doesn't have to handle errors and doesn't need to hold the GIL. -// For example, _PyTime_Add(t1, t2) computes t1+t2 and clamp the result on -// overflow. -// -// Clocks: -// -// * System clock -// * Monotonic clock -// * Performance counter -// -// Operations like (t * k / q) with integers are implemented in a way to reduce -// the risk of integer overflow. Such operation is used to convert a clock -// value expressed in ticks with a frequency to _PyTime_t, like -// QueryPerformanceCounter() with QueryPerformanceFrequency(). - -#ifndef Py_LIMITED_API -#ifndef Py_PYTIME_H -#define Py_PYTIME_H - -/************************************************************************** -Symbols and macros to supply platform-independent interfaces to time related -functions and constants -**************************************************************************/ -#ifdef __cplusplus -extern "C" { -#endif - -/* _PyTime_t: Python timestamp with subsecond precision. It can be used to - store a duration, and so indirectly a date (related to another date, like - UNIX epoch). */ -typedef int64_t _PyTime_t; -// _PyTime_MIN nanoseconds is around -292.3 years -#define _PyTime_MIN INT64_MIN -// _PyTime_MAX nanoseconds is around +292.3 years -#define _PyTime_MAX INT64_MAX -#define _SIZEOF_PYTIME_T 8 - -typedef enum { - /* Round towards minus infinity (-inf). - For example, used to read a clock. */ - _PyTime_ROUND_FLOOR=0, - /* Round towards infinity (+inf). - For example, used for timeout to wait "at least" N seconds. */ - _PyTime_ROUND_CEILING=1, - /* Round to nearest with ties going to nearest even integer. - For example, used to round from a Python float. */ - _PyTime_ROUND_HALF_EVEN=2, - /* Round away from zero - For example, used for timeout. _PyTime_ROUND_CEILING rounds - -1e-9 to 0 milliseconds which causes bpo-31786 issue. - _PyTime_ROUND_UP rounds -1e-9 to -1 millisecond which keeps - the timeout sign as expected. select.poll(timeout) must block - for negative values." */ - _PyTime_ROUND_UP=3, - /* _PyTime_ROUND_TIMEOUT (an alias for _PyTime_ROUND_UP) should be - used for timeouts. */ - _PyTime_ROUND_TIMEOUT = _PyTime_ROUND_UP -} _PyTime_round_t; - - -/* Convert a time_t to a PyLong. */ -PyAPI_FUNC(PyObject *) _PyLong_FromTime_t( - time_t sec); - -/* Convert a PyLong to a time_t. */ -PyAPI_FUNC(time_t) _PyLong_AsTime_t( - PyObject *obj); - -/* Convert a number of seconds, int or float, to time_t. */ -PyAPI_FUNC(int) _PyTime_ObjectToTime_t( - PyObject *obj, - time_t *sec, - _PyTime_round_t); - -/* Convert a number of seconds, int or float, to a timeval structure. - usec is in the range [0; 999999] and rounded towards zero. - For example, -1.2 is converted to (-2, 800000). */ -PyAPI_FUNC(int) _PyTime_ObjectToTimeval( - PyObject *obj, - time_t *sec, - long *usec, - _PyTime_round_t); - -/* Convert a number of seconds, int or float, to a timespec structure. - nsec is in the range [0; 999999999] and rounded towards zero. - For example, -1.2 is converted to (-2, 800000000). */ -PyAPI_FUNC(int) _PyTime_ObjectToTimespec( - PyObject *obj, - time_t *sec, - long *nsec, - _PyTime_round_t); - - -/* Create a timestamp from a number of seconds. */ -PyAPI_FUNC(_PyTime_t) _PyTime_FromSeconds(int seconds); - -/* Macro to create a timestamp from a number of seconds, no integer overflow. - Only use the macro for small values, prefer _PyTime_FromSeconds(). */ -#define _PYTIME_FROMSECONDS(seconds) \ - ((_PyTime_t)(seconds) * (1000 * 1000 * 1000)) - -/* Create a timestamp from a number of nanoseconds. */ -PyAPI_FUNC(_PyTime_t) _PyTime_FromNanoseconds(_PyTime_t ns); - -/* Create a timestamp from a number of microseconds. - * Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. */ -PyAPI_FUNC(_PyTime_t) _PyTime_FromMicrosecondsClamp(_PyTime_t us); - -/* Create a timestamp from nanoseconds (Python int). */ -PyAPI_FUNC(int) _PyTime_FromNanosecondsObject(_PyTime_t *t, - PyObject *obj); - -/* Convert a number of seconds (Python float or int) to a timestamp. - Raise an exception and return -1 on error, return 0 on success. */ -PyAPI_FUNC(int) _PyTime_FromSecondsObject(_PyTime_t *t, - PyObject *obj, - _PyTime_round_t round); - -/* Convert a number of milliseconds (Python float or int, 10^-3) to a timestamp. - Raise an exception and return -1 on error, return 0 on success. */ -PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t, - PyObject *obj, - _PyTime_round_t round); - -/* Convert a timestamp to a number of seconds as a C double. */ -PyAPI_FUNC(double) _PyTime_AsSecondsDouble(_PyTime_t t); - -/* Convert timestamp to a number of milliseconds (10^-3 seconds). */ -PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, - _PyTime_round_t round); - -/* Convert timestamp to a number of microseconds (10^-6 seconds). */ -PyAPI_FUNC(_PyTime_t) _PyTime_AsMicroseconds(_PyTime_t t, - _PyTime_round_t round); - -/* Convert timestamp to a number of nanoseconds (10^-9 seconds). */ -PyAPI_FUNC(_PyTime_t) _PyTime_AsNanoseconds(_PyTime_t t); - -#ifdef MS_WINDOWS -// Convert timestamp to a number of 100 nanoseconds (10^-7 seconds). -PyAPI_FUNC(_PyTime_t) _PyTime_As100Nanoseconds(_PyTime_t t, - _PyTime_round_t round); -#endif - -/* Convert timestamp to a number of nanoseconds (10^-9 seconds) as a Python int - object. */ -PyAPI_FUNC(PyObject *) _PyTime_AsNanosecondsObject(_PyTime_t t); - -#ifndef MS_WINDOWS -/* Create a timestamp from a timeval structure. - Raise an exception and return -1 on overflow, return 0 on success. */ -PyAPI_FUNC(int) _PyTime_FromTimeval(_PyTime_t *tp, struct timeval *tv); -#endif - -/* Convert a timestamp to a timeval structure (microsecond resolution). - tv_usec is always positive. - Raise an exception and return -1 if the conversion overflowed, - return 0 on success. */ -PyAPI_FUNC(int) _PyTime_AsTimeval(_PyTime_t t, - struct timeval *tv, - _PyTime_round_t round); - -/* Similar to _PyTime_AsTimeval() but don't raise an exception on overflow. - On overflow, clamp tv_sec to _PyTime_t min/max. */ -PyAPI_FUNC(void) _PyTime_AsTimeval_clamp(_PyTime_t t, - struct timeval *tv, - _PyTime_round_t round); - -/* Convert a timestamp to a number of seconds (secs) and microseconds (us). - us is always positive. This function is similar to _PyTime_AsTimeval() - except that secs is always a time_t type, whereas the timeval structure - uses a C long for tv_sec on Windows. - Raise an exception and return -1 if the conversion overflowed, - return 0 on success. */ -PyAPI_FUNC(int) _PyTime_AsTimevalTime_t( - _PyTime_t t, - time_t *secs, - int *us, - _PyTime_round_t round); - -#if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE) -/* Create a timestamp from a timespec structure. - Raise an exception and return -1 on overflow, return 0 on success. */ -PyAPI_FUNC(int) _PyTime_FromTimespec(_PyTime_t *tp, struct timespec *ts); - -/* Convert a timestamp to a timespec structure (nanosecond resolution). - tv_nsec is always positive. - Raise an exception and return -1 on error, return 0 on success. */ -PyAPI_FUNC(int) _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts); - -/* Similar to _PyTime_AsTimespec() but don't raise an exception on overflow. - On overflow, clamp tv_sec to _PyTime_t min/max. */ -PyAPI_FUNC(void) _PyTime_AsTimespec_clamp(_PyTime_t t, struct timespec *ts); -#endif - - -// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. -PyAPI_FUNC(_PyTime_t) _PyTime_Add(_PyTime_t t1, _PyTime_t t2); - -/* Compute ticks * mul / div. - Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. - The caller must ensure that ((div - 1) * mul) cannot overflow. */ -PyAPI_FUNC(_PyTime_t) _PyTime_MulDiv(_PyTime_t ticks, - _PyTime_t mul, - _PyTime_t div); - -/* Structure used by time.get_clock_info() */ -typedef struct { - const char *implementation; - int monotonic; - int adjustable; - double resolution; -} _Py_clock_info_t; - -/* Get the current time from the system clock. - - If the internal clock fails, silently ignore the error and return 0. - On integer overflow, silently ignore the overflow and clamp the clock to - [_PyTime_MIN; _PyTime_MAX]. - - Use _PyTime_GetSystemClockWithInfo() to check for failure. */ -PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void); - -/* Get the current time from the system clock. - * On success, set *t and *info (if not NULL), and return 0. - * On error, raise an exception and return -1. - */ -PyAPI_FUNC(int) _PyTime_GetSystemClockWithInfo( - _PyTime_t *t, - _Py_clock_info_t *info); - -/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards. - The clock is not affected by system clock updates. The reference point of - the returned value is undefined, so that only the difference between the - results of consecutive calls is valid. - - If the internal clock fails, silently ignore the error and return 0. - On integer overflow, silently ignore the overflow and clamp the clock to - [_PyTime_MIN; _PyTime_MAX]. - - Use _PyTime_GetMonotonicClockWithInfo() to check for failure. */ -PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void); - -/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards. - The clock is not affected by system clock updates. The reference point of - the returned value is undefined, so that only the difference between the - results of consecutive calls is valid. - - Fill info (if set) with information of the function used to get the time. - - Return 0 on success, raise an exception and return -1 on error. */ -PyAPI_FUNC(int) _PyTime_GetMonotonicClockWithInfo( - _PyTime_t *t, - _Py_clock_info_t *info); - - -/* Converts a timestamp to the Gregorian time, using the local time zone. - Return 0 on success, raise an exception and return -1 on error. */ -PyAPI_FUNC(int) _PyTime_localtime(time_t t, struct tm *tm); - -/* Converts a timestamp to the Gregorian time, assuming UTC. - Return 0 on success, raise an exception and return -1 on error. */ -PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); - -/* Get the performance counter: clock with the highest available resolution to - measure a short duration. - - If the internal clock fails, silently ignore the error and return 0. - On integer overflow, silently ignore the overflow and clamp the clock to - [_PyTime_MIN; _PyTime_MAX]. - - Use _PyTime_GetPerfCounterWithInfo() to check for failure. */ -PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void); - -/* Get the performance counter: clock with the highest available resolution to - measure a short duration. - - Fill info (if set) with information of the function used to get the time. - - Return 0 on success, raise an exception and return -1 on error. */ -PyAPI_FUNC(int) _PyTime_GetPerfCounterWithInfo( - _PyTime_t *t, - _Py_clock_info_t *info); - - -// Create a deadline. -// Pseudo code: _PyTime_GetMonotonicClock() + timeout. -PyAPI_FUNC(_PyTime_t) _PyDeadline_Init(_PyTime_t timeout); - -// Get remaining time from a deadline. -// Pseudo code: deadline - _PyTime_GetMonotonicClock(). -PyAPI_FUNC(_PyTime_t) _PyDeadline_Get(_PyTime_t deadline); - -#ifdef __cplusplus -} -#endif - -#endif /* Py_PYTIME_H */ -#endif /* Py_LIMITED_API */ diff --git a/Include/cpython/setobject.h b/Include/cpython/setobject.h index b4443a678b7e771..1778c778a05324c 100644 --- a/Include/cpython/setobject.h +++ b/Include/cpython/setobject.h @@ -58,10 +58,10 @@ typedef struct { PyObject *weakreflist; /* List of weak references */ } PySetObject; -#define PySet_GET_SIZE(so) \ - (assert(PyAnySet_Check(so)), (((PySetObject *)(so))->used)) +#define _PySet_CAST(so) \ + (assert(PyAnySet_Check(so)), _Py_CAST(PySetObject*, so)) -PyAPI_DATA(PyObject *) _PySet_Dummy; - -PyAPI_FUNC(int) _PySet_NextEntry(PyObject *set, Py_ssize_t *pos, PyObject **key, Py_hash_t *hash); -PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable); +static inline Py_ssize_t PySet_GET_SIZE(PyObject *so) { + return _PySet_CAST(so)->used; +} +#define PySet_GET_SIZE(so) PySet_GET_SIZE(_PyObject_CAST(so)) diff --git a/Include/cpython/sysmodule.h b/Include/cpython/sysmodule.h index 19d9dddc344a4f0..9fd7cc0cb439311 100644 --- a/Include/cpython/sysmodule.h +++ b/Include/cpython/sysmodule.h @@ -2,15 +2,25 @@ # error "this header file must not be included directly" #endif -PyAPI_FUNC(PyObject *) _PySys_GetAttr(PyThreadState *tstate, - PyObject *name); - -PyAPI_FUNC(size_t) _PySys_GetSizeOf(PyObject *); - typedef int(*Py_AuditHookFunction)(const char *, PyObject *, void *); PyAPI_FUNC(int) PySys_Audit( const char *event, - const char *argFormat, + const char *format, ...); PyAPI_FUNC(int) PySys_AddAuditHook(Py_AuditHookFunction, void*); + +typedef struct { + FILE* perf_map; + PyThread_type_lock map_lock; +} PerfMapState; + +PyAPI_FUNC(int) PyUnstable_PerfMapState_Init(void); +PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry( + const void *code_addr, + unsigned int code_size, + const char *entry_name); +PyAPI_FUNC(void) PyUnstable_PerfMapState_Fini(void); +PyAPI_FUNC(int) PyUnstable_CopyPerfMapFile(const char* parent_filename); +PyAPI_FUNC(int) PyUnstable_PerfTrampoline_CompileCode(PyCodeObject *); +PyAPI_FUNC(int) PyUnstable_PerfTrampoline_SetPersistAfterFork(int enable); diff --git a/Include/cpython/traceback.h b/Include/cpython/traceback.h index a4e087b2b4ecedb..81c51944f136f29 100644 --- a/Include/cpython/traceback.h +++ b/Include/cpython/traceback.h @@ -11,6 +11,3 @@ struct _traceback { int tb_lasti; int tb_lineno; }; - -PyAPI_FUNC(int) _Py_DisplaySourceLine(PyObject *, PyObject *, int, int, int *, PyObject **); -PyAPI_FUNC(void) _PyTraceback_Add(const char *, const char *, int); diff --git a/Include/cpython/tracemalloc.h b/Include/cpython/tracemalloc.h new file mode 100644 index 000000000000000..61a16ea9a9f3ebd --- /dev/null +++ b/Include/cpython/tracemalloc.h @@ -0,0 +1,26 @@ +#ifndef Py_LIMITED_API +#ifndef Py_TRACEMALLOC_H +#define Py_TRACEMALLOC_H + +/* Track an allocated memory block in the tracemalloc module. + Return 0 on success, return -1 on error (failed to allocate memory to store + the trace). + + Return -2 if tracemalloc is disabled. + + If memory block is already tracked, update the existing trace. */ +PyAPI_FUNC(int) PyTraceMalloc_Track( + unsigned int domain, + uintptr_t ptr, + size_t size); + +/* Untrack an allocated memory block in the tracemalloc module. + Do nothing if the block was not tracked. + + Return -2 if tracemalloc is disabled, otherwise return 0. */ +PyAPI_FUNC(int) PyTraceMalloc_Untrack( + unsigned int domain, + uintptr_t ptr); + +#endif // !Py_TRACEMALLOC_H +#endif // !Py_LIMITED_API diff --git a/Include/cpython/tupleobject.h b/Include/cpython/tupleobject.h index f6a1f076e033305..e530c8beda44ab3 100644 --- a/Include/cpython/tupleobject.h +++ b/Include/cpython/tupleobject.h @@ -11,7 +11,6 @@ typedef struct { } PyTupleObject; PyAPI_FUNC(int) _PyTuple_Resize(PyObject **, Py_ssize_t); -PyAPI_FUNC(void) _PyTuple_MaybeUntrack(PyObject *); /* Cast argument to PyTupleObject* type. */ #define _PyTuple_CAST(op) \ @@ -31,9 +30,9 @@ static inline Py_ssize_t PyTuple_GET_SIZE(PyObject *op) { static inline void PyTuple_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) { PyTupleObject *tuple = _PyTuple_CAST(op); + assert(0 <= index); + assert(index < Py_SIZE(tuple)); tuple->ob_item[index] = value; } #define PyTuple_SET_ITEM(op, index, value) \ PyTuple_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value)) - -PyAPI_FUNC(void) _PyTuple_DebugMallocStats(FILE *out); diff --git a/Include/cpython/unicodeobject.h b/Include/cpython/unicodeobject.h index 8444507ade1b31e..d9b54bce83202da 100644 --- a/Include/cpython/unicodeobject.h +++ b/Include/cpython/unicodeobject.h @@ -6,8 +6,9 @@ Python and represents a single Unicode element in the Unicode type. With PEP 393, Py_UNICODE is deprecated and replaced with a typedef to wchar_t. */ -#define PY_UNICODE_TYPE wchar_t -/* Py_DEPRECATED(3.3) */ typedef wchar_t Py_UNICODE; +Py_DEPRECATED(3.13) typedef wchar_t PY_UNICODE_TYPE; +Py_DEPRECATED(3.13) typedef wchar_t Py_UNICODE; + /* --- Internal Unicode Operations ---------------------------------------- */ @@ -43,6 +44,7 @@ static inline Py_UCS4 Py_UNICODE_LOW_SURROGATE(Py_UCS4 ch) { return (0xDC00 + (ch & 0x3FF)); } + /* --- Unicode Type ------------------------------------------------------- */ /* ASCII-only strings created through PyUnicode_New use the PyASCIIObject @@ -98,9 +100,16 @@ typedef struct { Py_ssize_t length; /* Number of code points in the string */ Py_hash_t hash; /* Hash value; -1 if not set */ struct { - /* If interned is set, the two references from the - dictionary to this object are *not* counted in ob_refcnt. */ - unsigned int interned:1; + /* If interned is non-zero, the two references from the + dictionary to this object are *not* counted in ob_refcnt. + The possible values here are: + 0: Not Interned + 1: Interned + 2: Interned and Immortal + 3: Interned, Immortal, and Static + This categorization allows the runtime to determine the right + cleanup mechanism at runtime shutdown. */ + unsigned int interned:2; /* Character size: - PyUnicode_1BYTE_KIND (1): @@ -133,9 +142,11 @@ typedef struct { and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is set, use the PyASCIIObject structure. */ unsigned int ascii:1; + /* The object is statically allocated. */ + unsigned int statically_allocated:1; /* Padding to ensure that PyUnicode_DATA() is always aligned to 4 bytes (see issue #19537 on m68k). */ - unsigned int :25; + unsigned int :24; } state; } PyASCIIObject; @@ -160,10 +171,6 @@ typedef struct { } data; /* Canonical, smallest-form Unicode buffer */ } PyUnicodeObject; -PyAPI_FUNC(int) _PyUnicode_CheckConsistency( - PyObject *op, - int check_content); - #define _PyASCIIObject_CAST(op) \ (assert(PyUnicode_Check(op)), \ @@ -183,6 +190,8 @@ PyAPI_FUNC(int) _PyUnicode_CheckConsistency( /* Interning state. */ #define SSTATE_NOT_INTERNED 0 #define SSTATE_INTERNED_MORTAL 1 +#define SSTATE_INTERNED_IMMORTAL 2 +#define SSTATE_INTERNED_IMMORTAL_STATIC 3 /* Use only if you know it's a string */ static inline unsigned int PyUnicode_CHECK_INTERNED(PyObject *op) { @@ -231,9 +240,7 @@ enum PyUnicode_Kind { // new compiler warnings on "kind < PyUnicode_KIND(str)" (compare signed and // unsigned numbers) where kind type is an int or on // "unsigned int kind = PyUnicode_KIND(str)" (cast signed to unsigned). -// Only declare the function as static inline function in the limited C API -// version 3.12 which is stricter. -#define PyUnicode_KIND(op) (_PyASCIIObject_CAST(op)->state.kind) +#define PyUnicode_KIND(op) _Py_RVALUE(_PyASCIIObject_CAST(op)->state.kind) /* Return a void pointer to the raw unicode buffer. */ static inline void* _PyUnicode_COMPACT_DATA(PyObject *op) { @@ -370,9 +377,8 @@ static inline Py_UCS4 PyUnicode_MAX_CHAR_VALUE(PyObject *op) #define PyUnicode_MAX_CHAR_VALUE(op) \ PyUnicode_MAX_CHAR_VALUE(_PyObject_CAST(op)) -/* === Public API ========================================================= */ -/* --- Plain Py_UNICODE --------------------------------------------------- */ +/* === Public API ========================================================= */ /* With PEP 393, this is the recommended way to allocate a new unicode object. This function will allocate the object and its buffer in a single memory @@ -389,11 +395,6 @@ static inline int PyUnicode_READY(PyObject* Py_UNUSED(op)) } #define PyUnicode_READY(op) PyUnicode_READY(_PyObject_CAST(op)) -/* Get a copy of a Unicode string. */ -PyAPI_FUNC(PyObject*) _PyUnicode_Copy( - PyObject *unicode - ); - /* Copy character from one unicode object into another, this function performs character conversion when necessary and falls back to memcpy() if possible. @@ -420,17 +421,6 @@ PyAPI_FUNC(Py_ssize_t) PyUnicode_CopyCharacters( Py_ssize_t how_many ); -/* Unsafe version of PyUnicode_CopyCharacters(): don't check arguments and so - may crash if parameters are invalid (e.g. if the output string - is too short). */ -PyAPI_FUNC(void) _PyUnicode_FastCopyCharacters( - PyObject *to, - Py_ssize_t to_start, - PyObject *from, - Py_ssize_t from_start, - Py_ssize_t how_many - ); - /* Fill a string with a character: write fill_char into unicode[start:start+length]. @@ -446,15 +436,6 @@ PyAPI_FUNC(Py_ssize_t) PyUnicode_Fill( Py_UCS4 fill_char ); -/* Unsafe version of PyUnicode_Fill(): don't check arguments and so may crash - if parameters are invalid (e.g. if length is longer than the string). */ -PyAPI_FUNC(void) _PyUnicode_FastFill( - PyObject *unicode, - Py_ssize_t start, - Py_ssize_t length, - Py_UCS4 fill_char - ); - /* Create a new string from a buffer of Py_UCS1, Py_UCS2 or Py_UCS4 characters. Scan the string to find the maximum character. */ PyAPI_FUNC(PyObject*) PyUnicode_FromKindAndData( @@ -462,18 +443,6 @@ PyAPI_FUNC(PyObject*) PyUnicode_FromKindAndData( const void *buffer, Py_ssize_t size); -/* Create a new string from a buffer of ASCII characters. - WARNING: Don't check if the string contains any non-ASCII character. */ -PyAPI_FUNC(PyObject*) _PyUnicode_FromASCII( - const char *buffer, - Py_ssize_t size); - -/* Compute the maximum character of the substring unicode[start:end]. - Return 127 for an empty string. */ -PyAPI_FUNC(Py_UCS4) _PyUnicode_FindMaxChar ( - PyObject *unicode, - Py_ssize_t start, - Py_ssize_t end); /* --- _PyUnicodeWriter API ----------------------------------------------- */ @@ -499,11 +468,11 @@ typedef struct { unsigned char readonly; } _PyUnicodeWriter ; -/* Initialize a Unicode writer. - * - * By default, the minimum buffer size is 0 character and overallocation is - * disabled. Set min_length, min_char and overallocate attributes to control - * the allocation of the buffer. */ +// Initialize a Unicode writer. +// +// By default, the minimum buffer size is 0 character and overallocation is +// disabled. Set min_length, min_char and overallocate attributes to control +// the allocation of the buffer. PyAPI_FUNC(void) _PyUnicodeWriter_Init(_PyUnicodeWriter *writer); @@ -591,15 +560,6 @@ PyAPI_FUNC(void) _PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer); -/* Format the object based on the format_spec, as defined in PEP 3101 - (Advanced String Formatting). */ -PyAPI_FUNC(int) _PyUnicode_FormatAdvancedWriter( - _PyUnicodeWriter *writer, - PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); - /* --- Manage the default encoding ---------------------------------------- */ /* Returns a pointer to the default encoding (UTF-8) of the @@ -617,171 +577,9 @@ PyAPI_FUNC(int) _PyUnicode_FormatAdvancedWriter( PyAPI_FUNC(const char *) PyUnicode_AsUTF8(PyObject *unicode); +// Alias kept for backward compatibility #define _PyUnicode_AsString PyUnicode_AsUTF8 -/* --- UTF-7 Codecs ------------------------------------------------------- */ - -PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF7( - PyObject *unicode, /* Unicode object */ - int base64SetO, /* Encode RFC2152 Set O characters in base64 */ - int base64WhiteSpace, /* Encode whitespace (sp, ht, nl, cr) in base64 */ - const char *errors /* error handling */ - ); - -/* --- UTF-8 Codecs ------------------------------------------------------- */ - -PyAPI_FUNC(PyObject*) _PyUnicode_AsUTF8String( - PyObject *unicode, - const char *errors); - -/* --- UTF-32 Codecs ------------------------------------------------------ */ - -PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF32( - PyObject *object, /* Unicode object */ - const char *errors, /* error handling */ - int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ - ); - -/* --- UTF-16 Codecs ------------------------------------------------------ */ - -/* Returns a Python string object holding the UTF-16 encoded value of - the Unicode data. - - If byteorder is not 0, output is written according to the following - byte order: - - byteorder == -1: little endian - byteorder == 0: native byte order (writes a BOM mark) - byteorder == 1: big endian - - If byteorder is 0, the output string will always start with the - Unicode BOM mark (U+FEFF). In the other two modes, no BOM mark is - prepended. -*/ -PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF16( - PyObject* unicode, /* Unicode object */ - const char *errors, /* error handling */ - int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ - ); - -/* --- Unicode-Escape Codecs ---------------------------------------------- */ - -/* Variant of PyUnicode_DecodeUnicodeEscape that supports partial decoding. */ -PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscapeStateful( - const char *string, /* Unicode-Escape encoded string */ - Py_ssize_t length, /* size of string */ - const char *errors, /* error handling */ - Py_ssize_t *consumed /* bytes consumed */ -); -/* Helper for PyUnicode_DecodeUnicodeEscape that detects invalid escape - chars. */ -PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscapeInternal( - const char *string, /* Unicode-Escape encoded string */ - Py_ssize_t length, /* size of string */ - const char *errors, /* error handling */ - Py_ssize_t *consumed, /* bytes consumed */ - const char **first_invalid_escape /* on return, points to first - invalid escaped char in - string. */ -); - -/* --- Raw-Unicode-Escape Codecs ---------------------------------------------- */ - -/* Variant of PyUnicode_DecodeRawUnicodeEscape that supports partial decoding. */ -PyAPI_FUNC(PyObject*) _PyUnicode_DecodeRawUnicodeEscapeStateful( - const char *string, /* Unicode-Escape encoded string */ - Py_ssize_t length, /* size of string */ - const char *errors, /* error handling */ - Py_ssize_t *consumed /* bytes consumed */ -); - -/* --- Latin-1 Codecs ----------------------------------------------------- */ - -PyAPI_FUNC(PyObject*) _PyUnicode_AsLatin1String( - PyObject* unicode, - const char* errors); - -/* --- ASCII Codecs ------------------------------------------------------- */ - -PyAPI_FUNC(PyObject*) _PyUnicode_AsASCIIString( - PyObject* unicode, - const char* errors); - -/* --- Character Map Codecs ----------------------------------------------- */ - -/* Translate an Unicode object by applying a character mapping table to - it and return the resulting Unicode object. - - The mapping table must map Unicode ordinal integers to Unicode strings, - Unicode ordinal integers or None (causing deletion of the character). - - Mapping tables may be dictionaries or sequences. Unmapped character - ordinals (ones which cause a LookupError) are left untouched and - are copied as-is. -*/ -PyAPI_FUNC(PyObject*) _PyUnicode_EncodeCharmap( - PyObject *unicode, /* Unicode object */ - PyObject *mapping, /* encoding mapping */ - const char *errors /* error handling */ - ); - -/* --- Decimal Encoder ---------------------------------------------------- */ - -/* Coverts a Unicode object holding a decimal value to an ASCII string - for using in int, float and complex parsers. - Transforms code points that have decimal digit property to the - corresponding ASCII digit code points. Transforms spaces to ASCII. - Transforms code points starting from the first non-ASCII code point that - is neither a decimal digit nor a space to the end into '?'. */ - -PyAPI_FUNC(PyObject*) _PyUnicode_TransformDecimalAndSpaceToASCII( - PyObject *unicode /* Unicode object */ - ); - -/* --- Methods & Slots ---------------------------------------------------- */ - -PyAPI_FUNC(PyObject *) _PyUnicode_JoinArray( - PyObject *separator, - PyObject *const *items, - Py_ssize_t seqlen - ); - -/* Test whether a unicode is equal to ASCII identifier. Return 1 if true, - 0 otherwise. The right argument must be ASCII identifier. - Any error occurs inside will be cleared before return. */ -PyAPI_FUNC(int) _PyUnicode_EqualToASCIIId( - PyObject *left, /* Left string */ - _Py_Identifier *right /* Right identifier */ - ); - -/* Test whether a unicode is equal to ASCII string. Return 1 if true, - 0 otherwise. The right argument must be ASCII-encoded string. - Any error occurs inside will be cleared before return. */ -PyAPI_FUNC(int) _PyUnicode_EqualToASCIIString( - PyObject *left, - const char *right /* ASCII-encoded string */ - ); - -/* Externally visible for str.strip(unicode) */ -PyAPI_FUNC(PyObject *) _PyUnicode_XStrip( - PyObject *self, - int striptype, - PyObject *sepobj - ); - -/* Using explicit passed-in values, insert the thousands grouping - into the string pointed to by buffer. For the argument descriptions, - see Objects/stringlib/localeutil.h */ -PyAPI_FUNC(Py_ssize_t) _PyUnicode_InsertThousandsGrouping( - _PyUnicodeWriter *writer, - Py_ssize_t n_buffer, - PyObject *digits, - Py_ssize_t d_pos, - Py_ssize_t n_digits, - Py_ssize_t min_width, - const char *grouping, - PyObject *thousands_sep, - Py_UCS4 *maxchar); /* === Characters Type APIs =============================================== */ @@ -804,14 +602,6 @@ PyAPI_FUNC(int) _PyUnicode_IsTitlecase( Py_UCS4 ch /* Unicode character */ ); -PyAPI_FUNC(int) _PyUnicode_IsXidStart( - Py_UCS4 ch /* Unicode character */ - ); - -PyAPI_FUNC(int) _PyUnicode_IsXidContinue( - Py_UCS4 ch /* Unicode character */ - ); - PyAPI_FUNC(int) _PyUnicode_IsWhitespace( const Py_UCS4 ch /* Unicode character */ ); @@ -820,46 +610,18 @@ PyAPI_FUNC(int) _PyUnicode_IsLinebreak( const Py_UCS4 ch /* Unicode character */ ); -/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(Py_UCS4) _PyUnicode_ToLowercase( +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToLowercase( Py_UCS4 ch /* Unicode character */ ); -/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(Py_UCS4) _PyUnicode_ToUppercase( +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToUppercase( Py_UCS4 ch /* Unicode character */ ); -Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UCS4) _PyUnicode_ToTitlecase( +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToTitlecase( Py_UCS4 ch /* Unicode character */ ); -PyAPI_FUNC(int) _PyUnicode_ToLowerFull( - Py_UCS4 ch, /* Unicode character */ - Py_UCS4 *res - ); - -PyAPI_FUNC(int) _PyUnicode_ToTitleFull( - Py_UCS4 ch, /* Unicode character */ - Py_UCS4 *res - ); - -PyAPI_FUNC(int) _PyUnicode_ToUpperFull( - Py_UCS4 ch, /* Unicode character */ - Py_UCS4 *res - ); - -PyAPI_FUNC(int) _PyUnicode_ToFoldedFull( - Py_UCS4 ch, /* Unicode character */ - Py_UCS4 *res - ); - -PyAPI_FUNC(int) _PyUnicode_IsCaseIgnorable( - Py_UCS4 ch /* Unicode character */ - ); - -PyAPI_FUNC(int) _PyUnicode_IsCased( - Py_UCS4 ch /* Unicode character */ - ); - PyAPI_FUNC(int) _PyUnicode_ToDecimalDigit( Py_UCS4 ch /* Unicode character */ ); @@ -936,19 +698,6 @@ static inline int Py_UNICODE_ISALNUM(Py_UCS4 ch) { /* === Misc functions ===================================================== */ -PyAPI_FUNC(PyObject*) _PyUnicode_FormatLong(PyObject *, int, int, int); - -/* Return an interned Unicode object for an Identifier; may fail if there is no memory.*/ +// Return an interned Unicode object for an Identifier; may fail if there is no +// memory. PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*); - -/* Fast equality check when the inputs are known to be exact unicode types - and where the hash values are equal (i.e. a very probable match) */ -PyAPI_FUNC(int) _PyUnicode_EQ(PyObject *, PyObject *); - -/* Equality check. */ -PyAPI_FUNC(int) _PyUnicode_Equal(PyObject *, PyObject *); - -PyAPI_FUNC(int) _PyUnicode_WideCharString_Converter(PyObject *, void *); -PyAPI_FUNC(int) _PyUnicode_WideCharString_Opt_Converter(PyObject *, void *); - -PyAPI_FUNC(Py_ssize_t) _PyUnicode_ScanIdentifier(PyObject *); diff --git a/Include/cpython/weakrefobject.h b/Include/cpython/weakrefobject.h index fd79fdc2dcc4688..1559e2def61260c 100644 --- a/Include/cpython/weakrefobject.h +++ b/Include/cpython/weakrefobject.h @@ -32,11 +32,8 @@ struct _PyWeakReference { vectorcallfunc vectorcall; }; -PyAPI_FUNC(Py_ssize_t) _PyWeakref_GetWeakrefCount(PyWeakReference *head); - -PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference *self); - -static inline PyObject* PyWeakref_GET_OBJECT(PyObject *ref_obj) { +Py_DEPRECATED(3.13) static inline PyObject* PyWeakref_GET_OBJECT(PyObject *ref_obj) +{ PyWeakReference *ref; PyObject *obj; assert(PyWeakref_Check(ref_obj)); diff --git a/Include/descrobject.h b/Include/descrobject.h index 77f221df07714fe..fd66d17b497a31f 100644 --- a/Include/descrobject.h +++ b/Include/descrobject.h @@ -32,6 +32,62 @@ PyAPI_FUNC(PyObject *) PyDescr_NewGetSet(PyTypeObject *, PyGetSetDef *); PyAPI_FUNC(PyObject *) PyDictProxy_New(PyObject *); PyAPI_FUNC(PyObject *) PyWrapper_New(PyObject *, PyObject *); + +/* An array of PyMemberDef structures defines the name, type and offset + of selected members of a C structure. These can be read by + PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY + flag is set). The array must be terminated with an entry whose name + pointer is NULL. */ +struct PyMemberDef { + const char *name; + int type; + Py_ssize_t offset; + int flags; + const char *doc; +}; + +// These constants used to be in structmember.h, not prefixed by Py_. +// (structmember.h now has aliases to the new names.) + +/* Types */ +#define Py_T_SHORT 0 +#define Py_T_INT 1 +#define Py_T_LONG 2 +#define Py_T_FLOAT 3 +#define Py_T_DOUBLE 4 +#define Py_T_STRING 5 +#define _Py_T_OBJECT 6 // Deprecated, use Py_T_OBJECT_EX instead +/* the ordering here is weird for binary compatibility */ +#define Py_T_CHAR 7 /* 1-character string */ +#define Py_T_BYTE 8 /* 8-bit signed int */ +/* unsigned variants: */ +#define Py_T_UBYTE 9 +#define Py_T_USHORT 10 +#define Py_T_UINT 11 +#define Py_T_ULONG 12 + +/* Added by Jack: strings contained in the structure */ +#define Py_T_STRING_INPLACE 13 + +/* Added by Lillo: bools contained in the structure (assumed char) */ +#define Py_T_BOOL 14 + +#define Py_T_OBJECT_EX 16 +#define Py_T_LONGLONG 17 +#define Py_T_ULONGLONG 18 + +#define Py_T_PYSSIZET 19 /* Py_ssize_t */ +#define _Py_T_NONE 20 // Deprecated. Value is always None. + +/* Flags */ +#define Py_READONLY 1 +#define Py_AUDIT_READ 2 // Added in 3.10, harmless no-op before that +#define _Py_WRITE_RESTRICTED 4 // Deprecated, no-op. Do not reuse the value. +#define Py_RELATIVE_OFFSET 8 + +PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, PyMemberDef *); +PyAPI_FUNC(int) PyMember_SetOne(char *, PyMemberDef *, PyObject *); + #ifndef Py_LIMITED_API # define Py_CPYTHON_DESCROBJECT_H # include "cpython/descrobject.h" diff --git a/Include/dictobject.h b/Include/dictobject.h index e7fcb44d0cf9a98..1bbeec1ab699e75 100644 --- a/Include/dictobject.h +++ b/Include/dictobject.h @@ -57,6 +57,17 @@ PyAPI_FUNC(int) PyDict_MergeFromSeq2(PyObject *d, PyAPI_FUNC(PyObject *) PyDict_GetItemString(PyObject *dp, const char *key); PyAPI_FUNC(int) PyDict_SetItemString(PyObject *dp, const char *key, PyObject *item); PyAPI_FUNC(int) PyDict_DelItemString(PyObject *dp, const char *key); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030D0000 +// Return the object from dictionary *op* which has a key *key*. +// - If the key is present, set *result to a new strong reference to the value +// and return 1. +// - If the key is missing, set *result to NULL and return 0 . +// - On error, raise an exception and return -1. +PyAPI_FUNC(int) PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result); +PyAPI_FUNC(int) PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result); +#endif + #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000 PyAPI_FUNC(PyObject *) PyObject_GenericGetDict(PyObject *, void *); #endif diff --git a/Include/errcode.h b/Include/errcode.h index 54ae929bf258703..dac5cf068c99d6d 100644 --- a/Include/errcode.h +++ b/Include/errcode.h @@ -1,36 +1,43 @@ +// Error codes passed around between file input, tokenizer, parser and +// interpreter. This is necessary so we can turn them into Python +// exceptions at a higher level. Note that some errors have a +// slightly different meaning when passed from the tokenizer to the +// parser than when passed from the parser to the interpreter; e.g. +// the parser only returns E_EOF when it hits EOF immediately, and it +// never returns E_OK. +// +// The public PyRun_InteractiveOneObjectEx() function can return E_EOF, +// same as its variants: +// +// * PyRun_InteractiveOneObject() +// * PyRun_InteractiveOneFlags() +// * PyRun_InteractiveOne() + #ifndef Py_ERRCODE_H #define Py_ERRCODE_H #ifdef __cplusplus extern "C" { #endif - -/* Error codes passed around between file input, tokenizer, parser and - interpreter. This is necessary so we can turn them into Python - exceptions at a higher level. Note that some errors have a - slightly different meaning when passed from the tokenizer to the - parser than when passed from the parser to the interpreter; e.g. - the parser only returns E_EOF when it hits EOF immediately, and it - never returns E_OK. */ - -#define E_OK 10 /* No error */ -#define E_EOF 11 /* End Of File */ -#define E_INTR 12 /* Interrupted */ -#define E_TOKEN 13 /* Bad token */ -#define E_SYNTAX 14 /* Syntax error */ -#define E_NOMEM 15 /* Ran out of memory */ -#define E_DONE 16 /* Parsing complete */ -#define E_ERROR 17 /* Execution error */ -#define E_TABSPACE 18 /* Inconsistent mixing of tabs and spaces */ -#define E_OVERFLOW 19 /* Node had too many children */ -#define E_TOODEEP 20 /* Too many indentation levels */ -#define E_DEDENT 21 /* No matching outer block for dedent */ -#define E_DECODE 22 /* Error in decoding into Unicode */ -#define E_EOFS 23 /* EOF in triple-quoted string */ -#define E_EOLS 24 /* EOL in single-quoted string */ -#define E_LINECONT 25 /* Unexpected characters after a line continuation */ -#define E_BADSINGLE 27 /* Ill-formed single statement input */ -#define E_INTERACT_STOP 28 /* Interactive mode stopped tokenization */ +#define E_OK 10 /* No error */ +#define E_EOF 11 /* End Of File */ +#define E_INTR 12 /* Interrupted */ +#define E_TOKEN 13 /* Bad token */ +#define E_SYNTAX 14 /* Syntax error */ +#define E_NOMEM 15 /* Ran out of memory */ +#define E_DONE 16 /* Parsing complete */ +#define E_ERROR 17 /* Execution error */ +#define E_TABSPACE 18 /* Inconsistent mixing of tabs and spaces */ +#define E_OVERFLOW 19 /* Node had too many children */ +#define E_TOODEEP 20 /* Too many indentation levels */ +#define E_DEDENT 21 /* No matching outer block for dedent */ +#define E_DECODE 22 /* Error in decoding into Unicode */ +#define E_EOFS 23 /* EOF in triple-quoted string */ +#define E_EOLS 24 /* EOL in single-quoted string */ +#define E_LINECONT 25 /* Unexpected characters after a line continuation */ +#define E_BADSINGLE 27 /* Ill-formed single statement input */ +#define E_INTERACT_STOP 28 /* Interactive mode stopped tokenization */ +#define E_COLUMNOVERFLOW 29 /* Column offset overflow */ #ifdef __cplusplus } diff --git a/Include/exports.h b/Include/exports.h index fc1a5c5ead6276e..ce601216f171563 100644 --- a/Include/exports.h +++ b/Include/exports.h @@ -1,10 +1,39 @@ #ifndef Py_EXPORTS_H #define Py_EXPORTS_H +/* Declarations for symbol visibility. + + PyAPI_FUNC(type): Declares a public Python API function and return type + PyAPI_DATA(type): Declares public Python data and its type + PyMODINIT_FUNC: A Python module init function. If these functions are + inside the Python core, they are private to the core. + If in an extension module, it may be declared with + external linkage depending on the platform. + + As a number of platforms support/require "__declspec(dllimport/dllexport)", + we support a HAVE_DECLSPEC_DLL macro to save duplication. +*/ + +/* + All windows ports, except cygwin, are handled in PC/pyconfig.h. + + Cygwin is the only other autoconf platform requiring special + linkage handling and it uses __declspec(). +*/ +#if defined(__CYGWIN__) +# define HAVE_DECLSPEC_DLL +#endif + #if defined(_WIN32) || defined(__CYGWIN__) - #define Py_IMPORTED_SYMBOL __declspec(dllimport) - #define Py_EXPORTED_SYMBOL __declspec(dllexport) - #define Py_LOCAL_SYMBOL + #if defined(Py_ENABLE_SHARED) + #define Py_IMPORTED_SYMBOL __declspec(dllimport) + #define Py_EXPORTED_SYMBOL __declspec(dllexport) + #define Py_LOCAL_SYMBOL + #else + #define Py_IMPORTED_SYMBOL + #define Py_EXPORTED_SYMBOL + #define Py_LOCAL_SYMBOL + #endif #else /* * If we only ever used gcc >= 5, we could use __has_attribute(visibility) @@ -27,4 +56,53 @@ #endif #endif +/* only get special linkage if built as shared or platform is Cygwin */ +#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__) +# if defined(HAVE_DECLSPEC_DLL) +# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) +# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE +# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE + /* module init functions inside the core need no external linkage */ + /* except for Cygwin to handle embedding */ +# if defined(__CYGWIN__) +# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* +# else /* __CYGWIN__ */ +# define PyMODINIT_FUNC PyObject* +# endif /* __CYGWIN__ */ +# else /* Py_BUILD_CORE */ + /* Building an extension module, or an embedded situation */ + /* public Python functions and data are imported */ + /* Under Cygwin, auto-import functions to prevent compilation */ + /* failures similar to those described at the bottom of 4.1: */ + /* http://docs.python.org/extending/windows.html#a-cookbook-approach */ +# if !defined(__CYGWIN__) +# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE +# endif /* !__CYGWIN__ */ +# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE + /* module init functions outside the core must be exported */ +# if defined(__cplusplus) +# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject* +# else /* __cplusplus */ +# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* +# endif /* __cplusplus */ +# endif /* Py_BUILD_CORE */ +# endif /* HAVE_DECLSPEC_DLL */ +#endif /* Py_ENABLE_SHARED */ + +/* If no external linkage macros defined by now, create defaults */ +#ifndef PyAPI_FUNC +# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE +#endif +#ifndef PyAPI_DATA +# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE +#endif +#ifndef PyMODINIT_FUNC +# if defined(__cplusplus) +# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject* +# else /* __cplusplus */ +# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* +# endif /* __cplusplus */ +#endif + + #endif /* Py_EXPORTS_H */ diff --git a/Include/fileobject.h b/Include/fileobject.h index 02bd7c915a23f79..6a6d11409497fab 100644 --- a/Include/fileobject.h +++ b/Include/fileobject.h @@ -23,20 +23,12 @@ Py_DEPRECATED(3.12) PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding; #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 Py_DEPRECATED(3.12) PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors; #endif -PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding; +Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding; #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_UTF8Mode; #endif -/* A routine to check if a file descriptor can be select()-ed. */ -#ifdef _MSC_VER - /* On Windows, any socket fd can be select()-ed, no matter how high */ - #define _PyIsSelectable_fd(FD) (1) -#else - #define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE) -#endif - #ifndef Py_LIMITED_API # define Py_CPYTHON_FILEOBJECT_H # include "cpython/fileobject.h" diff --git a/Include/fileutils.h b/Include/fileutils.h index ba5acc84fcb1856..1509198e45f0cae 100644 --- a/Include/fileutils.h +++ b/Include/fileutils.h @@ -1,5 +1,41 @@ #ifndef Py_FILEUTILS_H #define Py_FILEUTILS_H + +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +#ifdef HAVE_SYS_STAT_H +# include // S_ISREG() +#elif defined(HAVE_STAT_H) +# include // S_ISREG() +#endif + +#ifndef S_IFMT + // VisualAge C/C++ Failed to Define MountType Field in sys/stat.h. +# define S_IFMT 0170000 +#endif +#ifndef S_IFLNK + // Windows doesn't define S_IFLNK, but posixmodule.c maps + // IO_REPARSE_TAG_SYMLINK to S_IFLNK. +# define S_IFLNK 0120000 +#endif +#ifndef S_ISREG +# define S_ISREG(x) (((x) & S_IFMT) == S_IFREG) +#endif +#ifndef S_ISDIR +# define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) +#endif +#ifndef S_ISCHR +# define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR) +#endif +#ifndef S_ISLNK +# define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK) +#endif + + +// Move this down here since some C++ #include's don't like to be included +// inside an extern "C". #ifdef __cplusplus extern "C" { #endif diff --git a/Include/import.h b/Include/import.h index 5d5f3425b8e715e..24b23b9119196fc 100644 --- a/Include/import.h +++ b/Include/import.h @@ -43,10 +43,15 @@ PyAPI_FUNC(PyObject *) PyImport_AddModuleObject( PyAPI_FUNC(PyObject *) PyImport_AddModule( const char *name /* UTF-8 encoded string */ ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(PyObject *) PyImport_AddModuleRef( + const char *name /* UTF-8 encoded string */ + ); +#endif PyAPI_FUNC(PyObject *) PyImport_ImportModule( const char *name /* UTF-8 encoded string */ ); -PyAPI_FUNC(PyObject *) PyImport_ImportModuleNoBlock( +Py_DEPRECATED(3.13) PyAPI_FUNC(PyObject *) PyImport_ImportModuleNoBlock( const char *name /* UTF-8 encoded string */ ); PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevel( diff --git a/Include/internal/mimalloc/mimalloc.h b/Include/internal/mimalloc/mimalloc.h new file mode 100644 index 000000000000000..821129e7690b1b2 --- /dev/null +++ b/Include/internal/mimalloc/mimalloc.h @@ -0,0 +1,565 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_H +#define MIMALLOC_H + +#define MI_MALLOC_VERSION 212 // major + 2 digits minor + +// ------------------------------------------------------ +// Compiler specific attributes +// ------------------------------------------------------ + +#ifdef __cplusplus + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + #define mi_attr_noexcept noexcept + #else + #define mi_attr_noexcept throw() + #endif +#else + #define mi_attr_noexcept +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201703) + #define mi_decl_nodiscard [[nodiscard]] +#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl + #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#elif defined(_HAS_NODISCARD) + #define mi_decl_nodiscard _NODISCARD +#elif (_MSC_VER >= 1700) + #define mi_decl_nodiscard _Check_return_ +#else + #define mi_decl_nodiscard +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) + #if !defined(MI_SHARED_LIB) + #define mi_decl_export + #elif defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __declspec(dllexport) + #else + #define mi_decl_export __declspec(dllimport) + #endif + #if defined(__MINGW32__) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #else + #if (_MSC_VER >= 1900) && !defined(__EDG__) + #define mi_decl_restrict __declspec(allocator) __declspec(restrict) + #else + #define mi_decl_restrict __declspec(restrict) + #endif + #define mi_attr_malloc + #endif + #define mi_cdecl __cdecl + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#elif defined(__GNUC__) // includes clang and icc + #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __attribute__((visibility("default"))) + #else + #define mi_decl_export + #endif + #define mi_cdecl // leads to warnings... __attribute__((cdecl)) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) + #elif defined(__INTEL_COMPILER) + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) + #else + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) + #endif +#else + #define mi_cdecl + #define mi_decl_export + #define mi_decl_restrict + #define mi_attr_malloc + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#endif + +// ------------------------------------------------------ +// Includes +// ------------------------------------------------------ + +#include // size_t +#include // bool +#include // INTPTR_MAX + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------------------------------------------ +// Standard malloc interface +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_export void mi_free(void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ +#define MI_SMALL_WSIZE_MAX (128) +#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*)) + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Internals +// ------------------------------------------------------ + +typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); +mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_error_fun)(int err, void* arg); +mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + +mi_decl_export void mi_collect(bool force) mi_attr_noexcept; +mi_decl_export int mi_version(void) mi_attr_noexcept; +mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; +mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; +mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL +mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_done(void) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, + size_t* current_rss, size_t* peak_rss, + size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + +// ------------------------------------------------------------------------------------- +// Aligned allocation +// Note that `alignment` always follows `size` for consistency with unaligned +// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. +// ------------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + + +// ------------------------------------------------------------------------------------- +// Heaps: first-class, but can only allocate from the same thread that created it. +// ------------------------------------------------------------------------------------- + +struct mi_heap_s; +typedef struct mi_heap_s mi_heap_t; + +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void); +mi_decl_export void mi_heap_delete(mi_heap_t* heap); +mi_decl_export void mi_heap_destroy(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_get_default(void); +mi_decl_export mi_heap_t* mi_heap_get_backing(void); +mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + + +// -------------------------------------------------------------------------------- +// Zero initialized re-allocation. +// Only valid on memory that was originally allocated with zero initialization too. +// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +// see +// -------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4); + + +// ------------------------------------------------------ +// Analysis +// ------------------------------------------------------ + +mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_check_owned(const void* p); + +// An area of heap space contains blocks of a single size. +typedef struct mi_heap_area_s { + void* blocks; // start of the area containing heap blocks + size_t reserved; // bytes reserved for this area (virtual) + size_t committed; // current available bytes for this area + size_t used; // number of allocated blocks + size_t block_size; // size in bytes of each block + size_t full_block_size; // size in bytes of a full block including padding and metadata. +} mi_heap_area_t; + +typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; + +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; + +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; + +mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept; + +// Experimental: heaps associated with specific memory arena's +typedef int mi_arena_id_t; +mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); +mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; + +#if MI_MALLOC_VERSION >= 182 +// Create a heap that only allocates in the specified arena +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); +#endif + +// deprecated +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Convenience +// ------------------------------------------------------ + +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) +#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp))) +#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp))) +#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp))) +#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp))) + +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) +#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp))) +#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp))) +#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp))) +#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp))) + + +// ------------------------------------------------------ +// Options +// ------------------------------------------------------ + +typedef enum mi_option_e { + // stable options + mi_option_show_errors, // print error messages + mi_option_show_stats, // print statistics on termination + mi_option_verbose, // print verbose messages + // the following options are experimental (see src/options.h) + mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1) + mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2) + mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1) + mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit + mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup + mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node + mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup + mi_option_deprecated_segment_cache, + mi_option_deprecated_page_reset, + mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, + mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. + mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. + mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) + mi_option_os_tag, // tag used for OS logging (macOS only for now) + mi_option_max_errors, // issue at most N error messages + mi_option_max_warnings, // issue at most N warning messages + mi_option_max_segment_reclaim, + mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe. + mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit) + mi_option_arena_purge_mult, + mi_option_purge_extend_delay, + _mi_option_last, + // legacy option names + mi_option_large_os_pages = mi_option_allow_large_os_pages, + mi_option_eager_region_commit = mi_option_arena_eager_commit, + mi_option_reset_decommits = mi_option_purge_decommits, + mi_option_reset_delay = mi_option_purge_delay, + mi_option_abandoned_page_reset = mi_option_abandoned_page_purge +} mi_option_t; + + +mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); +mi_decl_export void mi_option_enable(mi_option_t option); +mi_decl_export void mi_option_disable(mi_option_t option); +mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); +mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); + +mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); +mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option); +mi_decl_export void mi_option_set(mi_option_t option, long value); +mi_decl_export void mi_option_set_default(mi_option_t option, long value); + + +// ------------------------------------------------------------------------------------------------------- +// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. +// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) +// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. +// ------------------------------------------------------------------------------------------------------- + +mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; +mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; + +mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); + +mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept; +mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept; + +mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; +mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; + +// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. +// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); +mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); + +#ifdef __cplusplus +} +#endif + +// --------------------------------------------------------------------------------------------- +// Implement the C++ std::allocator interface for use in STL containers. +// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally) +// --------------------------------------------------------------------------------------------- +#ifdef __cplusplus + +#include // std::size_t +#include // PTRDIFF_MAX +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // std::true_type +#include // std::forward +#endif + +template struct _mi_stl_allocator_common { + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* pointer; + typedef value_type const* const_pointer; + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } + #else + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } + #endif + + size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +}; + +template struct mi_stl_allocator : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + template struct rebind { typedef mi_stl_allocator other; }; + + mi_stl_allocator() mi_attr_noexcept = default; + mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default; + template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::true_type; + #endif +}; + +template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } +template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } + + +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11 +#define MI_HAS_HEAP_STL_ALLOCATOR 1 + +#include // std::shared_ptr + +// Common base class for STL allocators in a specific heap +template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + + _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */ + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::false_type; + #endif + + void collect(bool force) { mi_heap_collect(this->heap.get(), force); } + template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); } + +protected: + std::shared_ptr heap; + template friend struct _mi_heap_stl_allocator_common; + + _mi_heap_stl_allocator_common() { + mi_heap_t* hp = mi_heap_new(); + this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ + } + _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + +private: + static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } } + static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } } +}; + +// STL allocator allocation in a specific heap +template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + template struct rebind { typedef mi_heap_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + + +// STL allocator allocation in a specific heap, where `free` does nothing and +// the heap is destroyed in one go on destruction -- use with care! +template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ } + template struct rebind { typedef mi_heap_destroy_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + +#endif // C++11 + +#endif // __cplusplus + +#endif diff --git a/Include/internal/mimalloc/mimalloc/atomic.h b/Include/internal/mimalloc/mimalloc/atomic.h new file mode 100644 index 000000000000000..eb8478ceed6adf3 --- /dev/null +++ b/Include/internal/mimalloc/mimalloc/atomic.h @@ -0,0 +1,385 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_ATOMIC_H +#define MIMALLOC_ATOMIC_H + +// -------------------------------------------------------------------------------------------- +// Atomics +// We need to be portable between C, C++, and MSVC. +// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. +// This is why we try to use only `uintptr_t` and `*` as atomic types. +// To gain better insight in the range of used atomics, we use explicitly named memory order operations +// instead of passing the memory order as a parameter. +// ----------------------------------------------------------------------------------------------- + +#if defined(__cplusplus) +// Use C++ atomics +#include +#define _Atomic(tp) std::atomic +#define mi_atomic(name) std::atomic_##name +#define mi_memory_order(name) std::memory_order_##name +#if !defined(ATOMIC_VAR_INIT) || (__cplusplus >= 202002L) // c++20, see issue #571 + #define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#elif defined(_MSC_VER) +// Use MSVC C wrapper for C11 atomics +#define _Atomic(tp) tp +#define MI_ATOMIC_VAR_INIT(x) x +#define mi_atomic(name) mi_atomic_##name +#define mi_memory_order(name) mi_memory_order_##name +#else +// Use C11 atomics +#include +#define mi_atomic(name) atomic_##name +#define mi_memory_order(name) memory_order_##name +#if !defined(ATOMIC_VAR_INIT) || (__STDC_VERSION__ >= 201710L) // c17, see issue #735 + #define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#endif + +// Various defines for all used memory orders in mimalloc +#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) +#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) + +#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel)) + +#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1) +#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1) +#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1) +#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1) + +static inline void mi_atomic_yield(void); +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add); +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); + + +#if defined(__cplusplus) || !defined(_MSC_VER) + +// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value) +// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well +#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p) +#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p) + +// In C++ we need to add casts to help resolve templates if NULL is passed +#if defined(__cplusplus) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) +#else +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) +#endif + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { + return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); +} +static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { + int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p); + while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ }; +} + +// Used by timers +#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) + +#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d) +#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i) + + +#elif defined(_MSC_VER) + +// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics. +#define WIN32_LEAN_AND_MEAN +#include +#include +#ifdef _WIN64 +typedef LONG64 msc_intptr_t; +#define MI_64(f) f##64 +#else +typedef LONG msc_intptr_t; +#define MI_64(f) f +#endif + +typedef enum mi_memory_order_e { + mi_memory_order_relaxed, + mi_memory_order_consume, + mi_memory_order_acquire, + mi_memory_order_release, + mi_memory_order_acq_rel, + mi_memory_order_seq_cst +} mi_memory_order; + +static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); +} +static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); +} +static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + (void)(mo1); (void)(mo2); + uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); + if (read == *expected) { + return true; + } + else { + *expected = read; + return false; + } +} +static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2); +} +static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); +} +static inline void mi_atomic_thread_fence(mi_memory_order mo) { + (void)(mo); + _Atomic(uintptr_t) x = 0; + mi_atomic_exchange_explicit(&x, 1, mo); +} +static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + return *p; +#else + uintptr_t x = *p; + if (mo > mi_memory_order_relaxed) { + while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; + } + return x; +#endif +} +static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + *p = x; +#else + mi_atomic_exchange_explicit(p, x, mo); +#endif +} +static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_X64) + return *p; +#else + int64_t old = *p; + int64_t x = old; + while ((old = InterlockedCompareExchange64(p, x, old)) != x) { + x = old; + } + return x; +#endif +} +static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) { + (void)(mo); +#if defined(x_M_IX86) || defined(_M_X64) + *p = x; +#else + InterlockedExchange64(p, x); +#endif +} + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) { +#ifdef _WIN64 + return (int64_t)mi_atomic_addi((int64_t*)p, add); +#else + int64_t current; + int64_t sum; + do { + current = *p; + sum = current + add; + } while (_InterlockedCompareExchange64(p, sum, current) != current); + return current; +#endif +} +static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) { + int64_t current; + do { + current = *p; + } while (current < x && _InterlockedCompareExchange64(p, x, current) != current); +} + +static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) { + mi_atomic_addi64_relaxed(p, i); +} + +static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) { + int64_t read = _InterlockedCompareExchange64(p, des, *exp); + if (read == *exp) { + return true; + } + else { + *exp = read; + return false; + } +} + +// The pointer macros cast to `uintptr_t`. +#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) + +#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed)) + + +#endif + + +// Atomically add a signed value; returns the previous value. +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) { + return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add); +} + +// Atomically subtract a signed value; returns the previous value. +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) { + return (intptr_t)mi_atomic_addi(p, -sub); +} + +typedef _Atomic(uintptr_t) mi_atomic_once_t; + +// Returns true only on the first invocation +static inline bool mi_atomic_once( mi_atomic_once_t* once ) { + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 +} + +typedef _Atomic(uintptr_t) mi_atomic_guard_t; + +// Allows only one thread to execute at a time +#define mi_atomic_guard(guard) \ + uintptr_t _mi_guard_expected = 0; \ + for(bool _mi_guard_once = true; \ + _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \ + (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) ) + + + +// Yield +#if defined(__cplusplus) +#include +static inline void mi_atomic_yield(void) { + std::this_thread::yield(); +} +#elif defined(_WIN32) +#define WIN32_LEAN_AND_MEAN +#include +static inline void mi_atomic_yield(void) { + YieldProcessor(); +} +#elif defined(__SSE2__) +#include +static inline void mi_atomic_yield(void) { + _mm_pause(); +} +#elif (defined(__GNUC__) || defined(__clang__)) && \ + (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \ + defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__) +#if defined(__x86_64__) || defined(__i386__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("pause" ::: "memory"); +} +#elif defined(__aarch64__) +static inline void mi_atomic_yield(void) { + __asm__ volatile("wfe"); +} +#elif (defined(__arm__) && __ARM_ARCH__ >= 7) +static inline void mi_atomic_yield(void) { + __asm__ volatile("yield" ::: "memory"); +} +#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__) +#ifdef __APPLE__ +static inline void mi_atomic_yield(void) { + __asm__ volatile ("or r27,r27,r27" ::: "memory"); +} +#else +static inline void mi_atomic_yield(void) { + __asm__ __volatile__ ("or 27,27,27" ::: "memory"); +} +#endif +#elif defined(__armel__) || defined(__ARMEL__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("nop" ::: "memory"); +} +#endif +#elif defined(__sun) +// Fallback for other archs +#include +static inline void mi_atomic_yield(void) { + smt_pause(); +} +#elif defined(__wasi__) +#include +static inline void mi_atomic_yield(void) { + sched_yield(); +} +#else +#include +static inline void mi_atomic_yield(void) { + sleep(0); +} +#endif + + +#endif // __MIMALLOC_ATOMIC_H diff --git a/Include/internal/mimalloc/mimalloc/internal.h b/Include/internal/mimalloc/mimalloc/internal.h new file mode 100644 index 000000000000000..f076bc6a40f9773 --- /dev/null +++ b/Include/internal/mimalloc/mimalloc/internal.h @@ -0,0 +1,979 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_INTERNAL_H +#define MIMALLOC_INTERNAL_H + + +// -------------------------------------------------------------------------- +// This file contains the interal API's of mimalloc and various utility +// functions and macros. +// -------------------------------------------------------------------------- + +#include "mimalloc/types.h" +#include "mimalloc/track.h" + +#if (MI_DEBUG>0) +#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) +#else +#define mi_trace_message(...) +#endif + +#define MI_CACHE_LINE 64 +#if defined(_MSC_VER) +#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) +#pragma warning(disable:26812) // unscoped enum warning +#define mi_decl_noinline __declspec(noinline) +#define mi_decl_thread __declspec(thread) +#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) +#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc +#define mi_decl_noinline __attribute__((noinline)) +#define mi_decl_thread __thread +#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) +#else +#define mi_decl_noinline +#define mi_decl_thread __thread // hope for the best :-) +#define mi_decl_cache_align +#endif + +#if defined(__EMSCRIPTEN__) && !defined(__wasi__) +#define __wasi__ +#endif + +#if defined(__cplusplus) +#define mi_decl_externc extern "C" +#else +#define mi_decl_externc +#endif + +// pthreads +#if !defined(_WIN32) && !defined(__wasi__) +#define MI_USE_PTHREADS +#include +#endif + +// "options.c" +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_options_init(void); +void _mi_error_message(int err, const char* fmt, ...); + +// random.c +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +static inline uintptr_t _mi_random_shuffle(uintptr_t x); + +// init.c +extern mi_decl_cache_align mi_stats_t _mi_stats_main; +extern mi_decl_cache_align const mi_page_t _mi_page_empty; +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; +mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap +void _mi_thread_done(mi_heap_t* heap); +void _mi_thread_data_collect(void); + +// os.c +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); +void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); + +size_t _mi_os_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); + +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); +bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +bool _mi_os_protect(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats); + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); + +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); + +// arena.c +mi_arena_id_t _mi_arena_id_none(void); +void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats); +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); +bool _mi_arena_contains(const void* p); +void _mi_arena_collect(bool force_purge, mi_stats_t* stats); +void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); + +// "segment-map.c" +void _mi_segment_map_allocated_at(const mi_segment_t* segment); +void _mi_segment_map_freed_at(const mi_segment_t* segment); + +// "segment.c" +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); +void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); +void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); +bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld); +void _mi_segment_thread_collect(mi_segments_tld_t* tld); + +#if MI_HUGE_PAGE_ABANDON +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#else +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#endif + +uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page +void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); +void _mi_abandoned_await_readers(void); +void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld); + +// "page.c" +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; + +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_heap_delayed_free_all(mi_heap_t* heap); +bool _mi_heap_delayed_free_partial(mi_heap_t* heap); +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); + +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); + +void _mi_page_free_collect(mi_page_t* page,bool force); +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments + +size_t _mi_bin_size(uint8_t bin); // for stats +uint8_t _mi_bin(size_t size); // for stats + +// "heap.c" +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(void); + +// "stats.c" +void _mi_stats_done(mi_stats_t* stats); +mi_msecs_t _mi_clock_now(void); +mi_msecs_t _mi_clock_end(mi_msecs_t start); +mi_msecs_t _mi_clock_start(void); + +// "alloc.c" +void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; +mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); +bool _mi_free_delayed_block(mi_block_t* block); +void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); + +// option.c, c primitives +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); + + +#if MI_DEBUG>1 +bool _mi_page_is_valid(mi_page_t* page); +#endif + + +// ------------------------------------------------------ +// Branches +// ------------------------------------------------------ + +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + + +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ +#include +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + + +/* ----------------------------------------------------------- + Inlined definitions +----------------------------------------------------------- */ +#define MI_UNUSED(x) (void)(x) +#if (MI_DEBUG>0) +#define MI_UNUSED_RELEASE(x) +#else +#define MI_UNUSED_RELEASE(x) MI_UNUSED(x) +#endif + +#define MI_INIT4(x) x(),x(),x(),x() +#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x) +#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x) +#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x) +#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) +#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) +#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) + + +#include +// initialize a local variable to zero; use memset as compilers optimize constant sized memset's +#define _mi_memzero_var(x) memset(&x,0,sizeof(x)) + +// Is `x` a power of two? (0 is considered a power of two) +static inline bool _mi_is_power_of_two(uintptr_t x) { + return ((x & (x - 1)) == 0); +} + +// Is a pointer aligned? +static inline bool _mi_is_aligned(void* p, size_t alignment) { + mi_assert_internal(alignment != 0); + return (((uintptr_t)p % alignment) == 0); +} + +// Align upwards +static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return ((sz + mask) & ~mask); + } + else { + return (((sz + mask)/alignment)*alignment); + } +} + +// Align downwards +static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return (sz & ~mask); + } + else { + return ((sz / alignment) * alignment); + } +} + +// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. +static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { + mi_assert_internal(divider != 0); + return (divider == 0 ? size : ((size + divider - 1) / divider)); +} + +// Is memory zero initialized? +static inline bool mi_mem_is_zero(const void* p, size_t size) { + for (size_t i = 0; i < size; i++) { + if (((uint8_t*)p)[i] != 0) return false; + } + return true; +} + + +// Align a byte size to a size in _machine words_, +// i.e. byte size == `wsize*sizeof(void*)`. +static inline size_t _mi_wsize_from_size(size_t size) { + mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t)); + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Overflow detecting multiply +#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5)) +#include // UINT_MAX, ULONG_MAX +#if defined(_CLOCK_T) // for Illumos +#undef _CLOCK_T +#endif +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #if (SIZE_MAX == ULONG_MAX) + return __builtin_umull_overflow(count, size, (unsigned long *)total); + #elif (SIZE_MAX == UINT_MAX) + return __builtin_umul_overflow(count, size, (unsigned int *)total); + #else + return __builtin_umulll_overflow(count, size, (unsigned long long *)total); + #endif +} +#else /* __builtin_umul_overflow is unavailable */ +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) + *total = count * size; + // note: gcc/clang optimize this to directly check the overflow flag + return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count); +} +#endif + +// Safe multiply `count*size` into `total`; return `true` on overflow. +static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { + if (count==1) { // quick check for the case where count is one (common for C++ allocators) + *total = size; + return false; + } + else if mi_unlikely(mi_mul_overflow(count, size, total)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); + #endif + *total = SIZE_MAX; + return true; + } + else return false; +} + + +/*---------------------------------------------------------------------------------------- + Heap functions +------------------------------------------------------------------------------------------- */ + +extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap + +static inline bool mi_heap_is_backing(const mi_heap_t* heap) { + return (heap->tld->heap_backing == heap); +} + +static inline bool mi_heap_is_initialized(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + return (heap != &_mi_heap_empty); +} + +static inline uintptr_t _mi_ptr_cookie(const void* p) { + extern mi_heap_t _mi_heap_main; + mi_assert_internal(_mi_heap_main.cookie != 0); + return ((uintptr_t)p ^ _mi_heap_main.cookie); +} + +/* ----------------------------------------------------------- + Pages +----------------------------------------------------------- */ + +static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { + mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE)); + const size_t idx = _mi_wsize_from_size(size); + mi_assert_internal(idx < MI_PAGES_DIRECT); + return heap->pages_free_direct[idx]; +} + +// Segment that contains the pointer +// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE), +// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it; +// therefore we align one byte before `p`. +static inline mi_segment_t* _mi_ptr_segment(const void* p) { + mi_assert_internal(p != NULL); + return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK); +} + +static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) { + mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0); + return (mi_page_t*)(s); +} + +static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) { + mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0); + return (mi_slice_t*)(p); +} + +// Segment belonging to a page +static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) { + mi_segment_t* segment = _mi_ptr_segment(page); + mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries)); + return segment; +} + +static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) { + mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset); + mi_assert_internal(start >= _mi_ptr_segment(slice)->slices); + mi_assert_internal(start->slice_offset == 0); + mi_assert_internal(start + start->slice_count > slice); + return start; +} + +// Get the page containing the pointer (performance critical as it is called in mi_free) +static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) { + mi_assert_internal(p > (void*)segment); + ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment; + mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE); + size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT; + mi_assert_internal(idx <= segment->slice_entries); + mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx]; + mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data + mi_assert_internal(slice->slice_offset == 0); + mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries); + return mi_slice_to_page(slice); +} + +// Quick page start for initialized pages +static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) { + return _mi_segment_page_start(segment, page, page_size); +} + +// Get the page containing the pointer +static inline mi_page_t* _mi_ptr_page(void* p) { + return _mi_segment_page_of(_mi_ptr_segment(p), p); +} + +// Get the block size of a page (special case for huge objects) +static inline size_t mi_page_block_size(const mi_page_t* page) { + const size_t bsize = page->xblock_size; + mi_assert_internal(bsize > 0); + if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) { + return bsize; + } + else { + size_t psize; + _mi_segment_page_start(_mi_page_segment(page), page, &psize); + return psize; + } +} + +static inline bool mi_page_is_huge(const mi_page_t* page) { + return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); +} + +// Get the usable block size of a page without fixed padding. +// This may still include internal padding due to alignment and rounding up size classes. +static inline size_t mi_page_usable_block_size(const mi_page_t* page) { + return mi_page_block_size(page) - MI_PADDING_SIZE; +} + +// size of a segment +static inline size_t mi_segment_size(mi_segment_t* segment) { + return segment->segment_slices * MI_SEGMENT_SLICE_SIZE; +} + +static inline uint8_t* mi_segment_end(mi_segment_t* segment) { + return (uint8_t*)segment + mi_segment_size(segment); +} + +// Thread free access +static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { + return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3); +} + +static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) { + return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3); +} + +// Heap access +static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { + return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap)); +} + +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); + mi_atomic_store_release(&page->xheap,(uintptr_t)heap); +} + +// Thread free flag helpers +static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { + return (mi_block_t*)(tf & ~0x03); +} +static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) { + return (mi_delayed_t)(tf & 0x03); +} +static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) { + return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed); +} +static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) { + return mi_tf_make(mi_tf_block(tf),delayed); +} +static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) { + return mi_tf_make(block, mi_tf_delayed(tf)); +} + +// are all blocks in a page freed? +// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`. +static inline bool mi_page_all_free(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->used == 0); +} + +// are there any available blocks? +static inline bool mi_page_has_any_available(const mi_page_t* page) { + mi_assert_internal(page != NULL && page->reserved > 0); + return (page->used < page->reserved || (mi_page_thread_free(page) != NULL)); +} + +// are there immediately available blocks, i.e. blocks available on the free list. +static inline bool mi_page_immediate_available(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->free != NULL); +} + +// is more than 7/8th of a page in use? +static inline bool mi_page_mostly_used(const mi_page_t* page) { + if (page==NULL) return true; + uint16_t frac = page->reserved / 8U; + return (page->reserved - page->used <= frac); +} + +static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { + return &((mi_heap_t*)heap)->pages[_mi_bin(size)]; +} + + + +//----------------------------------------------------------- +// Page flags +//----------------------------------------------------------- +static inline bool mi_page_is_in_full(const mi_page_t* page) { + return page->flags.x.in_full; +} + +static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) { + page->flags.x.in_full = in_full; +} + +static inline bool mi_page_has_aligned(const mi_page_t* page) { + return page->flags.x.has_aligned; +} + +static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { + page->flags.x.has_aligned = has_aligned; +} + + +/* ------------------------------------------------------------------- +Encoding/Decoding the free list next pointers + +This is to protect against buffer overflow exploits where the +free list is mutated. Many hardened allocators xor the next pointer `p` +with a secret key `k1`, as `p^k1`. This prevents overwriting with known +values but might be still too weak: if the attacker can guess +the pointer `p` this can reveal `k1` (since `p^k1^p == k1`). +Moreover, if multiple blocks can be read as well, the attacker can +xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot +about the pointers (and subsequently `k1`). + +Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<> (MI_INTPTR_BITS - shift)))); +} +static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) { + shift %= MI_INTPTR_BITS; + return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift)))); +} + +static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) { + void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]); + return (p==null ? NULL : p); +} + +static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) { + uintptr_t x = (uintptr_t)(p==NULL ? null : p); + return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; +} + +static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { + mi_track_mem_defined(block,sizeof(mi_block_t)); + mi_block_t* next; + #ifdef MI_ENCODE_FREELIST + next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + next = (mi_block_t*)block->next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); + return next; +} + +static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { + mi_track_mem_undefined(block,sizeof(mi_block_t)); + #ifdef MI_ENCODE_FREELIST + block->next = mi_ptr_encode(null, next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + block->next = (mi_encoded_t)next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); +} + +static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { + #ifdef MI_ENCODE_FREELIST + mi_block_t* next = mi_block_nextx(page,block,page->keys); + // check for free list corruption: is `next` at least in the same page? + // TODO: check if `next` is `page->block_size` aligned? + if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { + _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + next = NULL; + } + return next; + #else + MI_UNUSED(page); + return mi_block_nextx(page,block,NULL); + #endif +} + +static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) { + #ifdef MI_ENCODE_FREELIST + mi_block_set_nextx(page,block,next, page->keys); + #else + MI_UNUSED(page); + mi_block_set_nextx(page,block,next,NULL); + #endif +} + + +// ------------------------------------------------------------------- +// commit mask +// ------------------------------------------------------------------- + +static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = 0; + } +} + +static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = ~((size_t)0); + } +} + +static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != 0) return false; + } + return true; +} + +static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != ~((size_t)0)) return false; + } + return true; +} + +// defined in `segment.c`: +size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total); +size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx); + +#define mi_commit_mask_foreach(cm,idx,count) \ + idx = 0; \ + while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) { + +#define mi_commit_mask_foreach_end() \ + idx += count; \ + } + + + +/* ----------------------------------------------------------- + memory id's +----------------------------------------------------------- */ + +static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) { + mi_memid_t memid; + _mi_memzero_var(memid); + memid.memkind = memkind; + return memid; +} + +static inline mi_memid_t _mi_memid_none(void) { + return _mi_memid_create(MI_MEM_NONE); +} + +static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) { + mi_memid_t memid = _mi_memid_create(MI_MEM_OS); + memid.initially_committed = committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return memid; +} + + +// ------------------------------------------------------------------- +// Fast "random" shuffle +// ------------------------------------------------------------------- + +static inline uintptr_t _mi_random_shuffle(uintptr_t x) { + if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros +#if (MI_INTPTR_SIZE==8) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#elif (MI_INTPTR_SIZE==4) + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + return x; +} + +// ------------------------------------------------------------------- +// Optimize numa node access for the common case (= one node) +// ------------------------------------------------------------------- + +int _mi_os_numa_node_get(mi_os_tld_t* tld); +size_t _mi_os_numa_node_count_get(void); + +extern _Atomic(size_t) _mi_numa_node_count; +static inline int _mi_os_numa_node(mi_os_tld_t* tld) { + if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } + else return _mi_os_numa_node_get(tld); +} +static inline size_t _mi_os_numa_node_count(void) { + const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); + if mi_likely(count > 0) { return count; } + else return _mi_os_numa_node_count_get(); +} + + + +// ----------------------------------------------------------------------- +// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero) +// ----------------------------------------------------------------------- + +#if defined(__GNUC__) + +#include // LONG_MAX +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_clzl(x); +#else + return __builtin_clzll(x); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_ctzl(x); +#else + return __builtin_ctzll(x); +#endif +} + +#elif defined(_MSC_VER) + +#include // LONG_MAX +#include // BitScanReverse64 +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanReverse(&idx, x); +#else + _BitScanReverse64(&idx, x); +#endif + return ((MI_INTPTR_BITS - 1) - idx); +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanForward(&idx, x); +#else + _BitScanForward64(&idx, x); +#endif + return idx; +} + +#else +static inline size_t mi_ctz32(uint32_t x) { + // de Bruijn multiplication, see + static const unsigned char debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + if (x==0) return 32; + return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27]; +} +static inline size_t mi_clz32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, + 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0 + }; + if (x==0) return 32; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27]; +} + +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_clz32((uint32_t)x); +#else + size_t count = mi_clz32((uint32_t)(x >> 32)); + if (count < 32) return count; + return (32 + mi_clz32((uint32_t)x)); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_ctz32((uint32_t)x); +#else + size_t count = mi_ctz32((uint32_t)x); + if (count < 32) return count; + return (32 + mi_ctz32((uint32_t)(x>>32))); +#endif +} + +#endif + +// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero) +static inline size_t mi_bsr(uintptr_t x) { + return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x)); +} + + +// --------------------------------------------------------------------------------- +// Provide our own `_mi_memcpy` for potential performance optimizations. +// +// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if +// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support +// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. +// --------------------------------------------------------------------------------- + +#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#include +extern bool _mi_cpu_has_fsrm; +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + if (_mi_cpu_has_fsrm) { + __movsb((unsigned char*)dst, (const unsigned char*)src, n); + } + else { + memcpy(dst, src, n); + } +} +static inline void _mi_memzero(void* dst, size_t n) { + if (_mi_cpu_has_fsrm) { + __stosb((unsigned char*)dst, 0, n); + } + else { + memset(dst, 0, n); + } +} +#else +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + memcpy(dst, src, n); +} +static inline void _mi_memzero(void* dst, size_t n) { + memset(dst, 0, n); +} +#endif + +// ------------------------------------------------------------------------------- +// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned +// This is used for example in `mi_realloc`. +// ------------------------------------------------------------------------------- + +#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) +// On GCC/CLang we provide a hint that the pointers are word aligned. +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE); + _mi_memcpy(adst, asrc, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + _mi_memzero(adst, n); +} +#else +// Default fallback on `_mi_memcpy` +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + _mi_memcpy(dst, src, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + _mi_memzero(dst, n); +} +#endif + + +#endif diff --git a/Include/internal/mimalloc/mimalloc/prim.h b/Include/internal/mimalloc/mimalloc/prim.h new file mode 100644 index 000000000000000..4b9e4dc4194d776 --- /dev/null +++ b/Include/internal/mimalloc/mimalloc/prim.h @@ -0,0 +1,323 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_PRIM_H +#define MIMALLOC_PRIM_H + + +// -------------------------------------------------------------------------- +// This file specifies the primitive portability API. +// Each OS/host needs to implement these primitives, see `src/prim` +// for implementations on Window, macOS, WASI, and Linux/Unix. +// +// note: on all primitive functions, we always have result parameters != NUL, and: +// addr != NULL and page aligned +// size > 0 and page aligned +// return value is an error code an int where 0 is success. +// -------------------------------------------------------------------------- + +// OS memory configuration +typedef struct mi_os_mem_config_s { + size_t page_size; // 4KiB + size_t large_page_size; // 2MiB + size_t alloc_granularity; // smallest allocation size (on Windows 64KiB) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc) + bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) +} mi_os_mem_config_t; + +// Initialize +void _mi_prim_mem_init( mi_os_mem_config_t* config ); + +// Free OS memory +int _mi_prim_free(void* addr, size_t size ); + +// Allocate OS memory. Return NULL on error. +// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. +// If `commit` is false, the virtual memory range only needs to be reserved (with no access) +// which will later be committed explicitly using `_mi_prim_commit`. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: !commit => !allow_large +// try_alignment >= _mi_os_page_size() and a power of 2 +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); + +// Commit memory. Returns error code or 0 on success. +// For example, on Linux this would make the memory PROT_READ|PROT_WRITE. +// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows) +int _mi_prim_commit(void* addr, size_t size, bool* is_zero); + +// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true +// if the memory would need to be re-committed. For example, on Windows this is always true, +// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit. +// pre: needs_recommit != NULL +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); + +// Reset memory. The range keeps being accessible but the content might be reset. +// Returns error code or 0 on success. +int _mi_prim_reset(void* addr, size_t size); + +// Protect memory. Returns error code or 0 on success. +int _mi_prim_protect(void* addr, size_t size, bool protect); + +// Allocate huge (1GiB) pages possibly associated with a NUMA node. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: size > 0 and a multiple of 1GiB. +// numa_node is either negative (don't care), or a numa node number. +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); + +// Return the current NUMA node +size_t _mi_prim_numa_node(void); + +// Return the number of logical NUMA nodes +size_t _mi_prim_numa_node_count(void); + +// Clock ticks +mi_msecs_t _mi_prim_clock_now(void); + +// Return process information (only for statistics) +typedef struct mi_process_info_s { + mi_msecs_t elapsed; + mi_msecs_t utime; + mi_msecs_t stime; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; +} mi_process_info_t; + +void _mi_prim_process_info(mi_process_info_t* pinfo); + +// Default stderr output. (only for warnings etc. with verbose enabled) +// msg != NULL && _mi_strlen(msg) > 0 +void _mi_prim_out_stderr( const char* msg ); + +// Get an environment variable. (only for options) +// name != NULL, result != NULL, result_size >= 64 +bool _mi_prim_getenv(const char* name, char* result, size_t result_size); + + +// Fill a buffer with strong randomness; return `false` on error or if +// there is no strong randomization available. +bool _mi_prim_random_buf(void* buf, size_t buf_len); + +// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination. +void _mi_prim_thread_init_auto_done(void); + +// Called on process exit and may take action to clean up resources associated with the thread auto done. +void _mi_prim_thread_done_auto_done(void); + +// Called when the default heap for a thread changes +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); + + +//------------------------------------------------------------------- +// Thread id: `_mi_prim_thread_id()` +// +// Getting the thread id should be performant as it is called in the +// fast path of `_mi_free` and we specialize for various platforms as +// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. +// We only require _mi_prim_thread_id() to return a unique id +// for each thread (unequal to zero). +//------------------------------------------------------------------- + +// defined in `init.c`; do not use these directly +extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from +extern bool _mi_process_is_initialized; // has mi_process_init been called? + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; + +#if defined(_WIN32) + +#define WIN32_LEAN_AND_MEAN +#include +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + // Windows: works on Intel and ARM in both 32- and 64-bit + return (uintptr_t)NtCurrentTeb(); +} + +// We use assembly for a fast thread id on the main platforms. The TLS layout depends on +// both the OS and libc implementation so we use specific tests for each main platform. +// If you test on another platform and it works please send a PR :-) +// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. +#elif defined(__GNUC__) && ( \ + (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \ + || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + ) + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + void* res; + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + res = tcb[slot]; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + res = tcb[slot]; + #endif + return res; +} + +// setting a tls slot is only used on macOS for now +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + tcb[slot] = value; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + tcb[slot] = value; + #endif +} + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + #if defined(__BIONIC__) + // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id + // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 + return (uintptr_t)mi_prim_tls_slot(1); + #else + // in all our other targets, slot 0 is the thread id + // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h + // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 + return (uintptr_t)mi_prim_tls_slot(0); + #endif +} + +#else + +// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return (uintptr_t)&_mi_heap_default; +} + +#endif + + + +/* ---------------------------------------------------------------------------------------- +The thread local default heap: `_mi_prim_get_default_heap()` +This is inlined here as it is on the fast path for allocation functions. + +On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a +__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures +that the storage will always be available (allocated on the thread stacks). + +On some platforms though we cannot use that when overriding `malloc` since the underlying +TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. +We try to circumvent this in an efficient way: +- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the + loader itself calls `malloc` even before the modules are initialized. +- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). +- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) +------------------------------------------------------------------------------------------- */ + +static inline mi_heap_t* mi_prim_get_default_heap(void); + +#if defined(MI_MALLOC_OVERRIDE) +#if defined(__APPLE__) // macOS + #define MI_TLS_SLOT 89 // seems unused? + // #define MI_TLS_RECURSE_GUARD 1 + // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) + // see +#elif defined(__OpenBSD__) + // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) + // see + #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) + // #elif defined(__DragonFly__) + // #warning "mimalloc is not working correctly on DragonFly yet." + // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) +#elif defined(__ANDROID__) + // See issue #381 + #define MI_TLS_PTHREAD +#endif +#endif + + +#if defined(MI_TLS_SLOT) + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); + if mi_unlikely(heap == NULL) { + #ifdef __GNUC__ + __asm(""); // prevent conditional load of the address of _mi_heap_empty + #endif + heap = (mi_heap_t*)&_mi_heap_empty; + } + return heap; +} + +#elif defined(MI_TLS_PTHREAD_SLOT_OFS) + +static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) { + pthread_t self = pthread_self(); + #if defined(__DragonFly__) + if (self==NULL) return NULL; + #endif + return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); +} + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot(); + if mi_unlikely(pheap == NULL) return _mi_heap_main_get(); + mi_heap_t* heap = *pheap; + if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty; + return heap; +} + +#elif defined(MI_TLS_PTHREAD) + +extern pthread_key_t _mi_heap_default_key; +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); + return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); +} + +#else // default using a thread local variable; used on most platforms. + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + #if defined(MI_TLS_RECURSE_GUARD) + if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); + #endif + return _mi_heap_default; +} + +#endif // mi_prim_get_default_heap() + + + +#endif // MIMALLOC_PRIM_H diff --git a/Include/internal/mimalloc/mimalloc/track.h b/Include/internal/mimalloc/mimalloc/track.h new file mode 100644 index 000000000000000..fa1a048d846a9cf --- /dev/null +++ b/Include/internal/mimalloc/mimalloc/track.h @@ -0,0 +1,147 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TRACK_H +#define MIMALLOC_TRACK_H + +/* ------------------------------------------------------------------------------------------------------ +Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers. +These can be defined for tracking allocation: + + #define mi_track_malloc_size(p,reqsize,size,zero) + #define mi_track_free_size(p,_size) + +The macros are set up such that the size passed to `mi_track_free_size` +always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`). +The `reqsize` is what the user requested, and `size >= reqsize`. +The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled, +or otherwise it is the usable block size which may be larger than the original request. +Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc). +The `zero` parameter is `true` if the allocated block is zero initialized. + +Optional: + + #define mi_track_align(p,alignedp,offset,size) + #define mi_track_resize(p,oldsize,newsize) + #define mi_track_init() + +The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block. +The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`). +The `mi_track_resize` is currently unused but could be called on reallocations within a block. +`mi_track_init` is called at program start. + +The following macros are for tools like asan and valgrind to track whether memory is +defined, undefined, or not accessible at all: + + #define mi_track_mem_defined(p,size) + #define mi_track_mem_undefined(p,size) + #define mi_track_mem_noaccess(p,size) + +-------------------------------------------------------------------------------------------------------*/ + +#if MI_TRACK_VALGRIND +// valgrind tool + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy +#define MI_TRACK_TOOL "valgrind" + +#include +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) +#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) +#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) + +#elif MI_TRACK_ASAN +// address sanitizer + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "asan" + +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) +#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size) + +#elif MI_TRACK_ETW +// windows event tracing + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 +#define MI_TRACK_TOOL "ETW" + +#define WIN32_LEAN_AND_MEAN +#include +#include "../src/prim/windows/etw.h" + +#define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); +#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size) +#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size) + +#else +// no tracking + +#define MI_TRACK_ENABLED 0 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "none" + +#define mi_track_malloc_size(p,reqsize,size,zero) +#define mi_track_free_size(p,_size) + +#endif + +// ------------------- +// Utility definitions + +#ifndef mi_track_resize +#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) +#endif + +#ifndef mi_track_align +#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset) +#endif + +#ifndef mi_track_init +#define mi_track_init() +#endif + +#ifndef mi_track_mem_defined +#define mi_track_mem_defined(p,size) +#endif + +#ifndef mi_track_mem_undefined +#define mi_track_mem_undefined(p,size) +#endif + +#ifndef mi_track_mem_noaccess +#define mi_track_mem_noaccess(p,size) +#endif + + +#if MI_PADDING +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)==(reqsize)); \ + mi_track_malloc_size(p,reqsize,reqsize,zero); \ + } +#else +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)>=(reqsize)); \ + mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \ + } +#endif + +#endif diff --git a/Include/internal/mimalloc/mimalloc/types.h b/Include/internal/mimalloc/mimalloc/types.h new file mode 100644 index 000000000000000..7616f37e4b978f7 --- /dev/null +++ b/Include/internal/mimalloc/mimalloc/types.h @@ -0,0 +1,670 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TYPES_H +#define MIMALLOC_TYPES_H + +// -------------------------------------------------------------------------- +// This file contains the main type definitions for mimalloc: +// mi_heap_t : all data for a thread-local heap, contains +// lists of all managed heap pages. +// mi_segment_t : a larger chunk of memory (32GiB) from where pages +// are allocated. +// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from +// where objects are allocated. +// -------------------------------------------------------------------------- + + +#include // ptrdiff_t +#include // uintptr_t, uint16_t, etc +#include "mimalloc/atomic.h" // _Atomic + +#ifdef _MSC_VER +#pragma warning(disable:4214) // bitfield is not int +#endif + +// Minimal alignment necessary. On most platforms 16 bytes are needed +// due to SSE registers for example. This must be at least `sizeof(void*)` +#ifndef MI_MAX_ALIGN_SIZE +#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) +#endif + +// ------------------------------------------------------ +// Variants +// ------------------------------------------------------ + +// Define NDEBUG in the release version to disable assertions. +// #define NDEBUG + +// Define MI_TRACK_ to enable tracking support +// #define MI_TRACK_VALGRIND 1 +// #define MI_TRACK_ASAN 1 +// #define MI_TRACK_ETW 1 + +// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). +// #define MI_STAT 1 + +// Define MI_SECURE to enable security mitigations +// #define MI_SECURE 1 // guard page around metadata +// #define MI_SECURE 2 // guard page around each mimalloc page +// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free) +// #define MI_SECURE 4 // checks for double free. (may be more expensive) + +#if !defined(MI_SECURE) +#define MI_SECURE 0 +#endif + +// Define MI_DEBUG for debug mode +// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free. +// #define MI_DEBUG 2 // + internal assertion checks +// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) +#if !defined(MI_DEBUG) +#if !defined(NDEBUG) || defined(_DEBUG) +#define MI_DEBUG 2 +#else +#define MI_DEBUG 0 +#endif +#endif + +// Reserve extra padding at the end of each block to be more resilient against heap block overflows. +// The padding can detect buffer overflow on free. +#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) +#define MI_PADDING 1 +#endif + +// Check padding bytes; allows byte-precise buffer overflow detection +#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_PADDING_CHECK 1 +#endif + + +// Encoded free lists allow detection of corrupted free lists +// and can detect buffer overflows, modify after free, and double `free`s. +#if (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_ENCODE_FREELIST 1 +#endif + + +// We used to abandon huge pages but to eagerly deallocate if freed from another thread, +// but that makes it not possible to visit them during a heap walk or include them in a +// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from +// another thread so most memory is available until it gets properly freed by the owning thread. +// #define MI_HUGE_PAGE_ABANDON 1 + + +// ------------------------------------------------------ +// Platform specific values +// ------------------------------------------------------ + +// ------------------------------------------------------ +// Size of a pointer. +// We assume that `sizeof(void*)==sizeof(intptr_t)` +// and it holds for all platforms we know of. +// +// However, the C standard only requires that: +// p == (void*)((intptr_t)p)) +// but we also need: +// i == (intptr_t)((void*)i) +// or otherwise one might define an intptr_t type that is larger than a pointer... +// ------------------------------------------------------ + +#if INTPTR_MAX > INT64_MAX +# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example) +#elif INTPTR_MAX == INT64_MAX +# define MI_INTPTR_SHIFT (3) +#elif INTPTR_MAX == INT32_MAX +# define MI_INTPTR_SHIFT (2) +#else +#error platform pointers must be 32, 64, or 128 bits +#endif + +#if SIZE_MAX == UINT64_MAX +# define MI_SIZE_SHIFT (3) +typedef int64_t mi_ssize_t; +#elif SIZE_MAX == UINT32_MAX +# define MI_SIZE_SHIFT (2) +typedef int32_t mi_ssize_t; +#else +#error platform objects must be 32 or 64 bits +#endif + +#if (SIZE_MAX/2) > LONG_MAX +# define MI_ZU(x) x##ULL +# define MI_ZI(x) x##LL +#else +# define MI_ZU(x) x##UL +# define MI_ZI(x) x##L +#endif + +#define MI_INTPTR_SIZE (1< 4 +#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB +#else +#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit +#endif + +#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB +#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB + + +// Derived constants +#define MI_SEGMENT_SIZE (MI_ZU(1)<= 655360) +#error "mimalloc internal: define more bins" +#endif + +// Maximum slice offset (15) +#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1) + +// Used as a special value to encode block sizes in 32 bits. +#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB)) + +// blocks up to this size are always allocated aligned +#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE) + +// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments +#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) + + +// ------------------------------------------------------ +// Mimalloc pages contain allocated blocks +// ------------------------------------------------------ + +// The free lists use encoded next fields +// (Only actually encodes when MI_ENCODED_FREELIST is defined.) +typedef uintptr_t mi_encoded_t; + +// thread id's +typedef size_t mi_threadid_t; + +// free lists contain blocks +typedef struct mi_block_s { + mi_encoded_t next; +} mi_block_t; + + +// The delayed flags are used for efficient multi-threaded free-ing +typedef enum mi_delayed_e { + MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list + MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap + MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list + MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim +} mi_delayed_t; + + +// The `in_full` and `has_aligned` page flags are put in a union to efficiently +// test if both are false (`full_aligned == 0`) in the `mi_free` routine. +#if !MI_TSAN +typedef union mi_page_flags_s { + uint8_t full_aligned; + struct { + uint8_t in_full : 1; + uint8_t has_aligned : 1; + } x; +} mi_page_flags_t; +#else +// under thread sanitizer, use a byte for each flag to suppress warning, issue #130 +typedef union mi_page_flags_s { + uint16_t full_aligned; + struct { + uint8_t in_full; + uint8_t has_aligned; + } x; +} mi_page_flags_t; +#endif + +// Thread free list. +// We use the bottom 2 bits of the pointer for mi_delayed_t flags +typedef uintptr_t mi_thread_free_t; + +// A page contains blocks of one specific size (`block_size`). +// Each page has three list of free blocks: +// `free` for blocks that can be allocated, +// `local_free` for freed blocks that are not yet available to `mi_malloc` +// `thread_free` for freed blocks by other threads +// The `local_free` and `thread_free` lists are migrated to the `free` list +// when it is exhausted. The separate `local_free` list is necessary to +// implement a monotonic heartbeat. The `thread_free` list is needed for +// avoiding atomic operations in the common case. +// +// +// `used - |thread_free|` == actual blocks that are in use (alive) +// `used - |thread_free| + |free| + |local_free| == capacity` +// +// We don't count `freed` (as |free|) but use `used` to reduce +// the number of memory accesses in the `mi_page_all_free` function(s). +// +// Notes: +// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`) +// - Using `uint16_t` does not seem to slow things down +// - The size is 8 words on 64-bit which helps the page index calculations +// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 +// and 12 are still good for address calculation) +// - To limit the structure size, the `xblock_size` is 32-bits only; for +// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size +// - `thread_free` uses the bottom bits as a delayed-free flags to optimize +// concurrent frees where only the first concurrent free adds to the owning +// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`). +// The invariant is that no-delayed-free is only set if there is +// at least one block that will be added, or as already been added, to +// the owning heap `thread_delayed_free` list. This guarantees that pages +// will be freed correctly even if only other threads free blocks. +typedef struct mi_page_s { + // "owned" by the segment + uint32_t slice_count; // slices in this page (0 if not a page) + uint32_t slice_offset; // distance from the actual page data slice (0 if a page) + uint8_t is_committed : 1; // `true` if the page virtual memory is committed + uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized + + // layout like this to optimize access in `mi_malloc` and `mi_free` + uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` + uint16_t reserved; // number of blocks reserved in memory + mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) + uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized + uint8_t retire_expire : 7; // expiration count for retired blocks + + mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) + uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) + uint32_t xblock_size; // size available in each block (always `>0`) + mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) + + #if (MI_ENCODE_FREELIST || MI_PADDING) + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary + #endif + + _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads + _Atomic(uintptr_t) xheap; + + struct mi_page_s* next; // next page owned by this thread with the same `block_size` + struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + + // 64-bit 9 words, 32-bit 12 words, (+2 for secure) + #if MI_INTPTR_SIZE==8 + uintptr_t padding[1]; + #endif +} mi_page_t; + + + +// ------------------------------------------------------ +// Mimalloc segments contain mimalloc pages +// ------------------------------------------------------ + +typedef enum mi_page_kind_e { + MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment + MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment + MI_PAGE_LARGE, // larger blocks go into a page of just one block + MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment. +} mi_page_kind_t; + +typedef enum mi_segment_kind_e { + MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside. + MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside. +} mi_segment_kind_t; + +// ------------------------------------------------------ +// A segment holds a commit mask where a bit is set if +// the corresponding MI_COMMIT_SIZE area is committed. +// The MI_COMMIT_SIZE must be a multiple of the slice +// size. If it is equal we have the most fine grained +// decommit (but setting it higher can be more efficient). +// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will +// be committed in one go which can be set higher than +// MI_COMMIT_SIZE for efficiency (while the decommit mask +// is still tracked in fine-grained MI_COMMIT_SIZE chunks) +// ------------------------------------------------------ + +#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) +#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB +#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) +#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS +#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS) + +#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS)) +#error "the segment size must be exactly divisible by the (commit size * size_t bits)" +#endif + +typedef struct mi_commit_mask_s { + size_t mask[MI_COMMIT_MASK_FIELD_COUNT]; +} mi_commit_mask_t; + +typedef mi_page_t mi_slice_t; +typedef int64_t mi_msecs_t; + + +// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this. +typedef enum mi_memkind_e { + MI_MEM_NONE, // not allocated + MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example) + MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example) + MI_MEM_OS, // allocated from the OS + MI_MEM_OS_HUGE, // allocated as huge os pages + MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`) + MI_MEM_ARENA // allocated from an arena (the usual case) +} mi_memkind_t; + +static inline bool mi_memkind_is_os(mi_memkind_t memkind) { + return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP); +} + +typedef struct mi_memid_os_info { + void* base; // actual base address of the block (used for offset aligned allocations) + size_t alignment; // alignment at allocation +} mi_memid_os_info_t; + +typedef struct mi_memid_arena_info { + size_t block_index; // index in the arena + mi_arena_id_t id; // arena id (>= 1) + bool is_exclusive; // the arena can only be used for specific arena allocations +} mi_memid_arena_info_t; + +typedef struct mi_memid_s { + union { + mi_memid_os_info_t os; // only used for MI_MEM_OS + mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA + } mem; + bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages) + bool initially_committed;// `true` if the memory was originally allocated as committed + bool initially_zero; // `true` if the memory was originally zero initialized + mi_memkind_t memkind; +} mi_memid_t; + + +// Segments are large allocated memory blocks (8mb on 64 bit) from +// the OS. Inside segments we allocated fixed size _pages_ that +// contain blocks. +typedef struct mi_segment_s { + // constant fields + mi_memid_t memid; // memory id for arena allocation + bool allow_decommit; + bool allow_purge; + size_t segment_size; + + // segment fields + mi_msecs_t purge_expire; + mi_commit_mask_t purge_mask; + mi_commit_mask_t commit_mask; + + _Atomic(struct mi_segment_s*) abandoned_next; + + // from here is zero initialized + struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) + + size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) + size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) + size_t used; // count of pages in use + uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` + + size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` + size_t segment_info_slices; // initial slices we are using segment info and possible guard pages. + + // layout like this to optimize access in `mi_free` + mi_segment_kind_t kind; + size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT` + _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment + + mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment +} mi_segment_t; + + +// ------------------------------------------------------ +// Heaps +// Provide first-class heaps to allocate from. +// A heap just owns a set of pages for allocation and +// can only be allocate/reallocate from the thread that created it. +// Freeing blocks can be done from any thread though. +// Per thread, the segments are shared among its heaps. +// Per thread, there is always a default heap that is +// used for allocation; it is initialized to statically +// point to an empty heap to avoid initialization checks +// in the fast path. +// ------------------------------------------------------ + +// Thread local data +typedef struct mi_tld_s mi_tld_t; + +// Pages of a certain block size are held in a queue. +typedef struct mi_page_queue_s { + mi_page_t* first; + mi_page_t* last; + size_t block_size; +} mi_page_queue_t; + +#define MI_BIN_FULL (MI_BIN_HUGE+1) + +// Random context +typedef struct mi_random_cxt_s { + uint32_t input[16]; + uint32_t output[16]; + int output_available; + bool weak; +} mi_random_ctx_t; + + +// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows +#if (MI_PADDING) +typedef struct mi_padding_s { + uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) + uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) +} mi_padding_t; +#define MI_PADDING_SIZE (sizeof(mi_padding_t)) +#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) +#else +#define MI_PADDING_SIZE 0 +#define MI_PADDING_WSIZE 0 +#endif + +#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) + + +// A heap owns a set of pages. +struct mi_heap_s { + mi_tld_t* tld; + mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. + mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") + _Atomic(mi_block_t*) thread_delayed_free; + mi_threadid_t thread_id; // thread this heap belongs too + mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) + uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) + uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list + mi_random_ctx_t random; // random number context used for secure allocation + size_t page_count; // total number of pages in the `pages` queues. + size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) + size_t page_retired_max; // largest retired index into the `pages` array. + mi_heap_t* next; // list of heaps per thread + bool no_reclaim; // `true` if this heap should not reclaim abandoned pages +}; + + + +// ------------------------------------------------------ +// Debug +// ------------------------------------------------------ + +#if !defined(MI_DEBUG_UNINIT) +#define MI_DEBUG_UNINIT (0xD0) +#endif +#if !defined(MI_DEBUG_FREED) +#define MI_DEBUG_FREED (0xDF) +#endif +#if !defined(MI_DEBUG_PADDING) +#define MI_DEBUG_PADDING (0xDE) +#endif + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + +// ------------------------------------------------------ +// Statistics +// ------------------------------------------------------ + +#ifndef MI_STAT +#if (MI_DEBUG>0) +#define MI_STAT 2 +#else +#define MI_STAT 0 +#endif +#endif + +typedef struct mi_stat_count_s { + int64_t allocated; + int64_t freed; + int64_t peak; + int64_t current; +} mi_stat_count_t; + +typedef struct mi_stat_counter_s { + int64_t total; + int64_t count; +} mi_stat_counter_t; + +typedef struct mi_stats_s { + mi_stat_count_t segments; + mi_stat_count_t pages; + mi_stat_count_t reserved; + mi_stat_count_t committed; + mi_stat_count_t reset; + mi_stat_count_t purged; + mi_stat_count_t page_committed; + mi_stat_count_t segments_abandoned; + mi_stat_count_t pages_abandoned; + mi_stat_count_t threads; + mi_stat_count_t normal; + mi_stat_count_t huge; + mi_stat_count_t large; + mi_stat_count_t malloc; + mi_stat_count_t segments_cache; + mi_stat_counter_t pages_extended; + mi_stat_counter_t mmap_calls; + mi_stat_counter_t commit_calls; + mi_stat_counter_t reset_calls; + mi_stat_counter_t purge_calls; + mi_stat_counter_t page_no_retire; + mi_stat_counter_t searches; + mi_stat_counter_t normal_count; + mi_stat_counter_t huge_count; + mi_stat_counter_t large_count; +#if MI_STAT>1 + mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; +#endif +} mi_stats_t; + + +void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); +void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); +void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); + +#if (MI_STAT) +#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) +#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) +#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) +#else +#define mi_stat_increase(stat,amount) (void)0 +#define mi_stat_decrease(stat,amount) (void)0 +#define mi_stat_counter_increase(stat,amount) (void)0 +#endif + +#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// A "span" is is an available range of slices. The span queues keep +// track of slice spans of at most the given `slice_count` (but more than the previous size class). +typedef struct mi_span_queue_s { + mi_slice_t* first; + mi_slice_t* last; + size_t slice_count; +} mi_span_queue_t; + +#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) + +// OS thread local data +typedef struct mi_os_tld_s { + size_t region_idx; // start point for next allocation + mi_stats_t* stats; // points to tld stats +} mi_os_tld_t; + + +// Segments thread local data +typedef struct mi_segments_tld_s { + mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments + size_t count; // current number of segments; + size_t peak_count; // peak number of segments + size_t current_size; // current size of all segments + size_t peak_size; // peak size of all segments + mi_stats_t* stats; // points to tld stats + mi_os_tld_t* os; // points to os stats +} mi_segments_tld_t; + +// Thread local data +struct mi_tld_s { + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + mi_segments_tld_t segments; // segment tld + mi_os_tld_t os; // os tld + mi_stats_t stats; // statistics +}; + +#endif diff --git a/Include/internal/pycore_abstract.h b/Include/internal/pycore_abstract.h index b1afb2dc7be65e6..3cc0afac4bd5b45 100644 --- a/Include/internal/pycore_abstract.h +++ b/Include/internal/pycore_abstract.h @@ -19,6 +19,42 @@ _PyIndex_Check(PyObject *obj) PyObject *_PyNumber_PowerNoMod(PyObject *lhs, PyObject *rhs); PyObject *_PyNumber_InPlacePowerNoMod(PyObject *lhs, PyObject *rhs); +extern int _PyObject_HasLen(PyObject *o); + +/* === Sequence protocol ================================================ */ + +#define PY_ITERSEARCH_COUNT 1 +#define PY_ITERSEARCH_INDEX 2 +#define PY_ITERSEARCH_CONTAINS 3 + +/* Iterate over seq. + + Result depends on the operation: + + PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if + error. + PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of + obj in seq; set ValueError and return -1 if none found; + also return -1 on error. + PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on + error. */ +extern Py_ssize_t _PySequence_IterSearch(PyObject *seq, + PyObject *obj, int operation); + +/* === Mapping protocol ================================================= */ + +extern int _PyObject_RealIsInstance(PyObject *inst, PyObject *cls); + +extern int _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls); + +// Convert Python int to Py_ssize_t. Do nothing if the argument is None. +// Export for '_bisect' shared extension. +PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *); + +// Same as PyNumber_Index() but can return an instance of a subclass of int. +// Export for 'math' shared extension. +PyAPI_FUNC(PyObject*) _PyNumber_Index(PyObject *o); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_ast.h b/Include/internal/pycore_ast.h index 36277efe9c5ca53..f222d485e0b54b9 100644 --- a/Include/internal/pycore_ast.h +++ b/Include/internal/pycore_ast.h @@ -10,7 +10,7 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_asdl.h" +#include "pycore_asdl.h" // _ASDL_SEQ_HEAD typedef struct _mod *mod_ty; @@ -51,6 +51,8 @@ typedef struct _pattern *pattern_ty; typedef struct _type_ignore *type_ignore_ty; +typedef struct _type_param *type_param_ty; + typedef struct { _ASDL_SEQ_HEAD @@ -147,6 +149,14 @@ typedef struct { asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena *arena); +typedef struct { + _ASDL_SEQ_HEAD + type_param_ty typed_elements[1]; +} asdl_type_param_seq; + +asdl_type_param_seq *_Py_asdl_type_param_seq_new(Py_ssize_t size, PyArena + *arena); + enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3, FunctionType_kind=4}; @@ -176,12 +186,13 @@ struct _mod { enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3, Return_kind=4, Delete_kind=5, Assign_kind=6, - AugAssign_kind=7, AnnAssign_kind=8, For_kind=9, - AsyncFor_kind=10, While_kind=11, If_kind=12, With_kind=13, - AsyncWith_kind=14, Match_kind=15, Raise_kind=16, Try_kind=17, - TryStar_kind=18, Assert_kind=19, Import_kind=20, - ImportFrom_kind=21, Global_kind=22, Nonlocal_kind=23, - Expr_kind=24, Pass_kind=25, Break_kind=26, Continue_kind=27}; + TypeAlias_kind=7, AugAssign_kind=8, AnnAssign_kind=9, + For_kind=10, AsyncFor_kind=11, While_kind=12, If_kind=13, + With_kind=14, AsyncWith_kind=15, Match_kind=16, + Raise_kind=17, Try_kind=18, TryStar_kind=19, Assert_kind=20, + Import_kind=21, ImportFrom_kind=22, Global_kind=23, + Nonlocal_kind=24, Expr_kind=25, Pass_kind=26, Break_kind=27, + Continue_kind=28}; struct _stmt { enum _stmt_kind kind; union { @@ -192,6 +203,7 @@ struct _stmt { asdl_expr_seq *decorator_list; expr_ty returns; string type_comment; + asdl_type_param_seq *type_params; } FunctionDef; struct { @@ -201,6 +213,7 @@ struct _stmt { asdl_expr_seq *decorator_list; expr_ty returns; string type_comment; + asdl_type_param_seq *type_params; } AsyncFunctionDef; struct { @@ -209,6 +222,7 @@ struct _stmt { asdl_keyword_seq *keywords; asdl_stmt_seq *body; asdl_expr_seq *decorator_list; + asdl_type_param_seq *type_params; } ClassDef; struct { @@ -225,6 +239,12 @@ struct _stmt { string type_comment; } Assign; + struct { + expr_ty name; + asdl_type_param_seq *type_params; + expr_ty value; + } TypeAlias; + struct { expr_ty target; operator_ty op; @@ -630,6 +650,30 @@ struct _type_ignore { } v; }; +enum _type_param_kind {TypeVar_kind=1, ParamSpec_kind=2, TypeVarTuple_kind=3}; +struct _type_param { + enum _type_param_kind kind; + union { + struct { + identifier name; + expr_ty bound; + } TypeVar; + + struct { + identifier name; + } ParamSpec; + + struct { + identifier name; + } TypeVarTuple; + + } v; + int lineno; + int col_offset; + int end_lineno; + int end_col_offset; +}; + // Note: these macros affect function definitions, not only call sites. mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores, @@ -640,19 +684,20 @@ mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena *arena); stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq * body, asdl_expr_seq * decorator_list, expr_ty - returns, string type_comment, int lineno, int - col_offset, int end_lineno, int end_col_offset, - PyArena *arena); + returns, string type_comment, asdl_type_param_seq * + type_params, int lineno, int col_offset, int + end_lineno, int end_col_offset, PyArena *arena); stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args, asdl_stmt_seq * body, asdl_expr_seq * decorator_list, expr_ty returns, string - type_comment, int lineno, int col_offset, int + type_comment, asdl_type_param_seq * + type_params, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena); stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases, asdl_keyword_seq * keywords, asdl_stmt_seq * body, - asdl_expr_seq * decorator_list, int lineno, int - col_offset, int end_lineno, int end_col_offset, PyArena - *arena); + asdl_expr_seq * decorator_list, asdl_type_param_seq * + type_params, int lineno, int col_offset, int + end_lineno, int end_col_offset, PyArena *arena); stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena); stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int @@ -660,6 +705,9 @@ stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena); +stmt_ty _PyAST_TypeAlias(expr_ty name, asdl_type_param_seq * type_params, + expr_ty value, int lineno, int col_offset, int + end_lineno, int end_col_offset, PyArena *arena); stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena); @@ -844,6 +892,14 @@ pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena); type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena); +type_param_ty _PyAST_TypeVar(identifier name, expr_ty bound, int lineno, int + col_offset, int end_lineno, int end_col_offset, + PyArena *arena); +type_param_ty _PyAST_ParamSpec(identifier name, int lineno, int col_offset, int + end_lineno, int end_col_offset, PyArena *arena); +type_param_ty _PyAST_TypeVarTuple(identifier name, int lineno, int col_offset, + int end_lineno, int end_col_offset, PyArena + *arena); PyObject* PyAST_mod2obj(mod_ty t); diff --git a/Include/internal/pycore_ast_state.h b/Include/internal/pycore_ast_state.h index f15b4905eed14b6..6ffd30aca7b11b7 100644 --- a/Include/internal/pycore_ast_state.h +++ b/Include/internal/pycore_ast_state.h @@ -2,6 +2,9 @@ #ifndef Py_INTERNAL_AST_STATE_H #define Py_INTERNAL_AST_STATE_H + +#include "pycore_lock.h" // _PyOnceFlag + #ifdef __cplusplus extern "C" { #endif @@ -11,7 +14,8 @@ extern "C" { #endif struct ast_state { - int initialized; + _PyOnceFlag once; + int finalized; int recursion_depth; int recursion_limit; PyObject *AST_type; @@ -118,6 +122,7 @@ struct ast_state { PyObject *Not_type; PyObject *Or_singleton; PyObject *Or_type; + PyObject *ParamSpec_type; PyObject *Pass_type; PyObject *Pow_singleton; PyObject *Pow_type; @@ -137,7 +142,10 @@ struct ast_state { PyObject *TryStar_type; PyObject *Try_type; PyObject *Tuple_type; + PyObject *TypeAlias_type; PyObject *TypeIgnore_type; + PyObject *TypeVarTuple_type; + PyObject *TypeVar_type; PyObject *UAdd_singleton; PyObject *UAdd_type; PyObject *USub_singleton; @@ -166,6 +174,7 @@ struct ast_state { PyObject *bases; PyObject *body; PyObject *boolop_type; + PyObject *bound; PyObject *cases; PyObject *cause; PyObject *cls; @@ -243,6 +252,8 @@ struct ast_state { PyObject *type_comment; PyObject *type_ignore_type; PyObject *type_ignores; + PyObject *type_param_type; + PyObject *type_params; PyObject *unaryop_type; PyObject *upper; PyObject *value; diff --git a/Include/internal/pycore_atexit.h b/Include/internal/pycore_atexit.h new file mode 100644 index 000000000000000..3966df70e2616fb --- /dev/null +++ b/Include/internal/pycore_atexit.h @@ -0,0 +1,63 @@ +#ifndef Py_INTERNAL_ATEXIT_H +#define Py_INTERNAL_ATEXIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +//############### +// runtime atexit + +typedef void (*atexit_callbackfunc)(void); + +struct _atexit_runtime_state { + PyThread_type_lock mutex; +#define NEXITFUNCS 32 + atexit_callbackfunc callbacks[NEXITFUNCS]; + int ncallbacks; +}; + + +//################### +// interpreter atexit + +typedef void (*atexit_datacallbackfunc)(void *); + +typedef struct atexit_callback { + atexit_datacallbackfunc func; + void *data; + struct atexit_callback *next; +} atexit_callback; + +typedef struct { + PyObject *func; + PyObject *args; + PyObject *kwargs; +} atexit_py_callback; + +struct atexit_state { + atexit_callback *ll_callbacks; + atexit_callback *last_ll_callback; + + // XXX The rest of the state could be moved to the atexit module state + // and a low-level callback added for it during module exec. + // For the moment we leave it here. + atexit_py_callback **callbacks; + int ncallbacks; + int callback_len; +}; + +// Export for '_xxinterpchannels' shared extension +PyAPI_FUNC(int) _Py_AtExit( + PyInterpreterState *interp, + atexit_datacallbackfunc func, + void *data); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_ATEXIT_H */ diff --git a/Include/internal/pycore_atomic.h b/Include/internal/pycore_atomic.h deleted file mode 100644 index 425d69f868b52b2..000000000000000 --- a/Include/internal/pycore_atomic.h +++ /dev/null @@ -1,557 +0,0 @@ -#ifndef Py_ATOMIC_H -#define Py_ATOMIC_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - -#include "dynamic_annotations.h" /* _Py_ANNOTATE_MEMORY_ORDER */ -#include "pyconfig.h" - -#ifdef HAVE_STD_ATOMIC -# include -#endif - - -#if defined(_MSC_VER) -#include -#if defined(_M_IX86) || defined(_M_X64) -# include -#endif -#endif - -/* This is modeled after the atomics interface from C1x, according to - * the draft at - * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf. - * Operations and types are named the same except with a _Py_ prefix - * and have the same semantics. - * - * Beware, the implementations here are deep magic. - */ - -#if defined(HAVE_STD_ATOMIC) - -typedef enum _Py_memory_order { - _Py_memory_order_relaxed = memory_order_relaxed, - _Py_memory_order_acquire = memory_order_acquire, - _Py_memory_order_release = memory_order_release, - _Py_memory_order_acq_rel = memory_order_acq_rel, - _Py_memory_order_seq_cst = memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - atomic_uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - atomic_int _value; -} _Py_atomic_int; - -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ - atomic_signal_fence(ORDER) - -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ - atomic_thread_fence(ORDER) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER) - -// Use builtin atomic operations in GCC >= 4.7 and clang -#elif defined(HAVE_BUILTIN_ATOMIC) - -typedef enum _Py_memory_order { - _Py_memory_order_relaxed = __ATOMIC_RELAXED, - _Py_memory_order_acquire = __ATOMIC_ACQUIRE, - _Py_memory_order_release = __ATOMIC_RELEASE, - _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL, - _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; - -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ - __atomic_signal_fence(ORDER) - -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ - __atomic_thread_fence(ORDER) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - (assert((ORDER) == __ATOMIC_RELAXED \ - || (ORDER) == __ATOMIC_SEQ_CST \ - || (ORDER) == __ATOMIC_RELEASE), \ - __atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - (assert((ORDER) == __ATOMIC_RELAXED \ - || (ORDER) == __ATOMIC_SEQ_CST \ - || (ORDER) == __ATOMIC_ACQUIRE \ - || (ORDER) == __ATOMIC_CONSUME), \ - __atomic_load_n(&((ATOMIC_VAL)->_value), ORDER)) - -/* Only support GCC (for expression statements) and x86 (for simple - * atomic semantics) and MSVC x86/x64/ARM */ -#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64)) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; - - -static __inline__ void -_Py_atomic_signal_fence(_Py_memory_order order) -{ - if (order != _Py_memory_order_relaxed) - __asm__ volatile("":::"memory"); -} - -static __inline__ void -_Py_atomic_thread_fence(_Py_memory_order order) -{ - if (order != _Py_memory_order_relaxed) - __asm__ volatile("mfence":::"memory"); -} - -/* Tell the race checker about this operation's effects. */ -static __inline__ void -_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) -{ - (void)address; /* shut up -Wunused-parameter */ - switch(order) { - case _Py_memory_order_release: - case _Py_memory_order_acq_rel: - case _Py_memory_order_seq_cst: - _Py_ANNOTATE_HAPPENS_BEFORE(address); - break; - case _Py_memory_order_relaxed: - case _Py_memory_order_acquire: - break; - } - switch(order) { - case _Py_memory_order_acquire: - case _Py_memory_order_acq_rel: - case _Py_memory_order_seq_cst: - _Py_ANNOTATE_HAPPENS_AFTER(address); - break; - case _Py_memory_order_relaxed: - case _Py_memory_order_release: - break; - } -} - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - __extension__ ({ \ - __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ - __typeof__(atomic_val->_value) new_val = NEW_VAL;\ - volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \ - _Py_memory_order order = ORDER; \ - _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ - \ - /* Perform the operation. */ \ - _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \ - switch(order) { \ - case _Py_memory_order_release: \ - _Py_atomic_signal_fence(_Py_memory_order_release); \ - /* fallthrough */ \ - case _Py_memory_order_relaxed: \ - *volatile_data = new_val; \ - break; \ - \ - case _Py_memory_order_acquire: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - __asm__ volatile("xchg %0, %1" \ - : "+r"(new_val) \ - : "m"(atomic_val->_value) \ - : "memory"); \ - break; \ - } \ - _Py_ANNOTATE_IGNORE_WRITES_END(); \ - }) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - __extension__ ({ \ - __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ - __typeof__(atomic_val->_value) result; \ - volatile __typeof__(result) *volatile_data = &atomic_val->_value; \ - _Py_memory_order order = ORDER; \ - _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ - \ - /* Perform the operation. */ \ - _Py_ANNOTATE_IGNORE_READS_BEGIN(); \ - switch(order) { \ - case _Py_memory_order_release: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - /* Loads on x86 are not releases by default, so need a */ \ - /* thread fence. */ \ - _Py_atomic_thread_fence(_Py_memory_order_release); \ - break; \ - default: \ - /* No fence */ \ - break; \ - } \ - result = *volatile_data; \ - switch(order) { \ - case _Py_memory_order_acquire: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - /* Loads on x86 are automatically acquire operations so */ \ - /* can get by with just a compiler fence. */ \ - _Py_atomic_signal_fence(_Py_memory_order_acquire); \ - break; \ - default: \ - /* No fence */ \ - break; \ - } \ - _Py_ANNOTATE_IGNORE_READS_END(); \ - result; \ - }) - -#elif defined(_MSC_VER) -/* _Interlocked* functions provide a full memory barrier and are therefore - enough for acq_rel and seq_cst. If the HLE variants aren't available - in hardware they will fall back to a full memory barrier as well. - - This might affect performance but likely only in some very specific and - hard to measure scenario. -*/ -#if defined(_M_IX86) || defined(_M_X64) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - volatile uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - volatile int _value; -} _Py_atomic_int; - - -#if defined(_M_X64) -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange64_HLEAcquire((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange64_HLERelease((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \ - break; \ - default: \ - _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \ - break; \ - } -#else -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); -#endif - -#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange_HLEAcquire((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange_HLERelease((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \ - break; \ - default: \ - _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \ - break; \ - } - -#if defined(_M_X64) -/* This has to be an intptr_t for now. - gil_created() uses -1 as a sentinel value, if this returns - a uintptr_t it will do an unsigned compare and crash -*/ -inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) { - __int64 old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \ - _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER)) - -#else -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value) -#endif - -inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) { - long old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \ - _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER)) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - if (sizeof((ATOMIC_VAL)->_value) == 8) { \ - _Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \ - _Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) } - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ( \ - sizeof((ATOMIC_VAL)->_value) == 8 ? \ - _Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \ - _Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \ - ) -#elif defined(_M_ARM) || defined(_M_ARM64) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - volatile uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - volatile int _value; -} _Py_atomic_int; - - -#if defined(_M_ARM64) -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ - break; \ - } -#else -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); -#endif - -#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ - break; \ - } - -#if defined(_M_ARM64) -/* This has to be an intptr_t for now. - gil_created() uses -1 as a sentinel value, if this returns - a uintptr_t it will do an unsigned compare and crash -*/ -inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) { - uintptr_t old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_acq(value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_rel(value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange64(value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \ - _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER)) - -#else -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value) -#endif - -inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) { - int old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange_acq(value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange_rel(value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange(value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \ - _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER)) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - if (sizeof((ATOMIC_VAL)->_value) == 8) { \ - _Py_atomic_store_64bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) } else { \ - _Py_atomic_store_32bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) } - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ( \ - sizeof((ATOMIC_VAL)->_value) == 8 ? \ - _Py_atomic_load_64bit((ATOMIC_VAL), (ORDER)) : \ - _Py_atomic_load_32bit((ATOMIC_VAL), (ORDER)) \ - ) -#endif -#else /* !gcc x86 !_msc_ver */ -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; -/* Fall back to other compilers and processors by assuming that simple - volatile accesses are atomic. This is false, so people should port - this. */ -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0) -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0) -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - ((ATOMIC_VAL)->_value = NEW_VAL) -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ((ATOMIC_VAL)->_value) -#endif - -/* Standardized shortcuts. */ -#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ - _Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_seq_cst) -#define _Py_atomic_load(ATOMIC_VAL) \ - _Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_seq_cst) - -/* Python-local extensions */ - -#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ - _Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_relaxed) -#define _Py_atomic_load_relaxed(ATOMIC_VAL) \ - _Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_relaxed) - -#ifdef __cplusplus -} -#endif -#endif /* Py_ATOMIC_H */ diff --git a/Include/internal/pycore_atomic_funcs.h b/Include/internal/pycore_atomic_funcs.h deleted file mode 100644 index a708789cea733bb..000000000000000 --- a/Include/internal/pycore_atomic_funcs.h +++ /dev/null @@ -1,94 +0,0 @@ -/* Atomic functions: similar to pycore_atomic.h, but don't need - to declare variables as atomic. - - Py_ssize_t type: - - * value = _Py_atomic_size_get(&var) - * _Py_atomic_size_set(&var, value) - - Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order): - enforce total ordering with all other atomic functions. -*/ -#ifndef Py_ATOMIC_FUNC_H -#define Py_ATOMIC_FUNC_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - -#if defined(_MSC_VER) -# include // _InterlockedExchange() -#endif - - -// Use builtin atomic operations in GCC >= 4.7 and clang -#ifdef HAVE_BUILTIN_ATOMIC - -static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var) -{ - return __atomic_load_n(var, __ATOMIC_SEQ_CST); -} - -static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value) -{ - __atomic_store_n(var, value, __ATOMIC_SEQ_CST); -} - -#elif defined(_MSC_VER) - -static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var) -{ -#if SIZEOF_VOID_P == 8 - Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var)); - volatile __int64 *volatile_var = (volatile __int64 *)var; - __int64 old; - do { - old = *volatile_var; - } while(_InterlockedCompareExchange64(volatile_var, old, old) != old); -#else - Py_BUILD_ASSERT(sizeof(long) == sizeof(*var)); - volatile long *volatile_var = (volatile long *)var; - long old; - do { - old = *volatile_var; - } while(_InterlockedCompareExchange(volatile_var, old, old) != old); -#endif - return old; -} - -static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value) -{ -#if SIZEOF_VOID_P == 8 - Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var)); - volatile __int64 *volatile_var = (volatile __int64 *)var; - _InterlockedExchange64(volatile_var, value); -#else - Py_BUILD_ASSERT(sizeof(long) == sizeof(*var)); - volatile long *volatile_var = (volatile long *)var; - _InterlockedExchange(volatile_var, value); -#endif -} - -#else -// Fallback implementation using volatile - -static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var) -{ - volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var; - return *volatile_var; -} - -static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value) -{ - volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var; - *volatile_var = value; -} -#endif - -#ifdef __cplusplus -} -#endif -#endif /* Py_ATOMIC_FUNC_H */ diff --git a/Include/internal/pycore_bitutils.h b/Include/internal/pycore_bitutils.h index e6bf61ef425bd81..50f69377523818f 100644 --- a/Include/internal/pycore_bitutils.h +++ b/Include/internal/pycore_bitutils.h @@ -26,10 +26,10 @@ extern "C" { #endif #ifdef _MSC_VER - /* Get _byteswap_ushort(), _byteswap_ulong(), _byteswap_uint64() */ -# include +# include // _byteswap_uint64() #endif + static inline uint16_t _Py_bswap16(uint16_t word) { diff --git a/Include/internal/pycore_blocks_output_buffer.h b/Include/internal/pycore_blocks_output_buffer.h index 28cf6fba4eeba2e..573e10359b7bd27 100644 --- a/Include/internal/pycore_blocks_output_buffer.h +++ b/Include/internal/pycore_blocks_output_buffer.h @@ -40,6 +40,10 @@ extern "C" { #include "Python.h" +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + typedef struct { // List of bytes objects PyObject *list; @@ -314,4 +318,4 @@ _BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer) #ifdef __cplusplus } #endif -#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */ \ No newline at end of file +#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */ diff --git a/Include/internal/pycore_bytesobject.h b/Include/internal/pycore_bytesobject.h index 9173a4f105f8008..94d421a9eb742ae 100644 --- a/Include/internal/pycore_bytesobject.h +++ b/Include/internal/pycore_bytesobject.h @@ -8,44 +8,144 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +extern PyObject* _PyBytes_FormatEx( + const char *format, + Py_ssize_t format_len, + PyObject *args, + int use_bytearray); -/* runtime lifecycle */ +extern PyObject* _PyBytes_FromHex( + PyObject *string, + int use_bytearray); -extern PyStatus _PyBytes_InitTypes(PyInterpreterState *); +// Helper for PyBytes_DecodeEscape that detects invalid escape chars. +// Export for test_peg_generator. +PyAPI_FUNC(PyObject*) _PyBytes_DecodeEscape(const char *, Py_ssize_t, + const char *, const char **); +/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*, + x must be an iterable object. */ +extern PyObject* _PyBytes_Join(PyObject *sep, PyObject *x); -/* Substring Search. - - Returns the index of the first occurrence of - a substring ("needle") in a larger text ("haystack"). - If the needle is not found, return -1. - If the needle is found, add offset to the index. -*/ +// Substring Search. +// +// Returns the index of the first occurrence of +// a substring ("needle") in a larger text ("haystack"). +// If the needle is not found, return -1. +// If the needle is found, add offset to the index. +// +// Export for 'mmap' shared extension. PyAPI_FUNC(Py_ssize_t) _PyBytes_Find(const char *haystack, Py_ssize_t len_haystack, const char *needle, Py_ssize_t len_needle, Py_ssize_t offset); -/* Same as above, but search right-to-left */ +// Same as above, but search right-to-left. +// Export for 'mmap' shared extension. PyAPI_FUNC(Py_ssize_t) _PyBytes_ReverseFind(const char *haystack, Py_ssize_t len_haystack, const char *needle, Py_ssize_t len_needle, Py_ssize_t offset); -/** Helper function to implement the repeat and inplace repeat methods on a buffer - * - * len_dest is assumed to be an integer multiple of len_src. - * If src equals dest, then assume the operation is inplace. - * - * This method repeately doubles the number of bytes copied to reduce - * the number of invocations of memcpy. - */ +// Helper function to implement the repeat and inplace repeat methods on a +// buffer. +// +// len_dest is assumed to be an integer multiple of len_src. +// If src equals dest, then assume the operation is inplace. +// +// This method repeately doubles the number of bytes copied to reduce +// the number of invocations of memcpy. +// +// Export for 'array' shared extension. PyAPI_FUNC(void) _PyBytes_Repeat(char* dest, Py_ssize_t len_dest, const char* src, Py_ssize_t len_src); +/* --- _PyBytesWriter ----------------------------------------------------- */ + +/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer". + A _PyBytesWriter variable must be declared at the end of variables in a + function to optimize the memory allocation on the stack. */ +typedef struct { + /* bytes, bytearray or NULL (when the small buffer is used) */ + PyObject *buffer; + + /* Number of allocated size. */ + Py_ssize_t allocated; + + /* Minimum number of allocated bytes, + incremented by _PyBytesWriter_Prepare() */ + Py_ssize_t min_size; + + /* If non-zero, use a bytearray instead of a bytes object for buffer. */ + int use_bytearray; + + /* If non-zero, overallocate the buffer (default: 0). + This flag must be zero if use_bytearray is non-zero. */ + int overallocate; + + /* Stack buffer */ + int use_small_buffer; + char small_buffer[512]; +} _PyBytesWriter; + +/* Initialize a bytes writer + + By default, the overallocation is disabled. Set the overallocate attribute + to control the allocation of the buffer. + + Export _PyBytesWriter API for '_pickle' shared extension. */ +PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer); + +/* Get the buffer content and reset the writer. + Return a bytes object, or a bytearray object if use_bytearray is non-zero. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, + void *str); + +/* Deallocate memory of a writer (clear its internal buffer). */ +PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer); + +/* Allocate the buffer to write size bytes. + Return the pointer to the beginning of buffer data. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, + Py_ssize_t size); + +/* Ensure that the buffer is large enough to write *size* bytes. + Add size to the writer minimum size (min_size attribute). + + str is the current pointer inside the buffer. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, + void *str, + Py_ssize_t size); + +/* Resize the buffer to make it larger. + The new buffer may be larger than size bytes because of overallocation. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. + + Note: size must be greater than the number of allocated bytes in the writer. + + This function doesn't use the writer minimum size (min_size attribute). + + See also _PyBytesWriter_Prepare(). + */ +PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer, + void *str, + Py_ssize_t size); + +/* Write bytes. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, + void *str, + const void *bytes, + Py_ssize_t size); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_call.h b/Include/internal/pycore_call.h index 55378e3dfebf24d..c92028a01299e2d 100644 --- a/Include/internal/pycore_call.h +++ b/Include/internal/pycore_call.h @@ -8,31 +8,107 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_identifier.h" // _Py_Identifier #include "pycore_pystate.h" // _PyThreadState_GET() -PyAPI_FUNC(PyObject *) _PyObject_Call_Prepend( +/* Suggested size (number of positional arguments) for arrays of PyObject* + allocated on a C stack to avoid allocating memory on the heap memory. Such + array is used to pass positional arguments to call functions of the + PyObject_Vectorcall() family. + + The size is chosen to not abuse the C stack and so limit the risk of stack + overflow. The size is also chosen to allow using the small stack for most + function calls of the Python standard library. On 64-bit CPU, it allocates + 40 bytes on the stack. */ +#define _PY_FASTCALL_SMALL_STACK 5 + + +// Export for 'math' shared extension, used via _PyObject_VectorcallTstate() +// static inline function. +PyAPI_FUNC(PyObject*) _Py_CheckFunctionResult( + PyThreadState *tstate, + PyObject *callable, + PyObject *result, + const char *where); + +extern PyObject* _PyObject_Call_Prepend( PyThreadState *tstate, PyObject *callable, PyObject *obj, PyObject *args, PyObject *kwargs); -PyAPI_FUNC(PyObject *) _PyObject_FastCallDictTstate( +extern PyObject* _PyObject_VectorcallDictTstate( PyThreadState *tstate, PyObject *callable, PyObject *const *args, size_t nargsf, PyObject *kwargs); -PyAPI_FUNC(PyObject *) _PyObject_Call( +extern PyObject* _PyObject_Call( PyThreadState *tstate, PyObject *callable, PyObject *args, PyObject *kwargs); extern PyObject * _PyObject_CallMethodFormat( - PyThreadState *tstate, PyObject *callable, const char *format, ...); + PyThreadState *tstate, + PyObject *callable, + const char *format, + ...); + +// Export for 'array' shared extension +PyAPI_FUNC(PyObject*) _PyObject_CallMethod( + PyObject *obj, + PyObject *name, + const char *format, ...); + +extern PyObject* _PyObject_CallMethodIdObjArgs( + PyObject *obj, + _Py_Identifier *name, + ...); + +static inline PyObject * +_PyObject_VectorcallMethodId( + _Py_Identifier *name, PyObject *const *args, + size_t nargsf, PyObject *kwnames) +{ + PyObject *oname = _PyUnicode_FromId(name); /* borrowed */ + if (!oname) { + return _Py_NULL; + } + return PyObject_VectorcallMethod(oname, args, nargsf, kwnames); +} +static inline PyObject * +_PyObject_CallMethodIdNoArgs(PyObject *self, _Py_Identifier *name) +{ + size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; + return _PyObject_VectorcallMethodId(name, &self, nargsf, _Py_NULL); +} + +static inline PyObject * +_PyObject_CallMethodIdOneArg(PyObject *self, _Py_Identifier *name, PyObject *arg) +{ + PyObject *args[2] = {self, arg}; + size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET; + assert(arg != NULL); + return _PyObject_VectorcallMethodId(name, args, nargsf, _Py_NULL); +} + + +/* === Vectorcall protocol (PEP 590) ============================= */ + +// Call callable using tp_call. Arguments are like PyObject_Vectorcall(), +// except that nargs is plainly the number of arguments without flags. +// +// Export for 'math' shared extension, used via _PyObject_VectorcallTstate() +// static inline function. +PyAPI_FUNC(PyObject*) _PyObject_MakeTpCall( + PyThreadState *tstate, + PyObject *callable, + PyObject *const *args, Py_ssize_t nargs, + PyObject *keywords); // Static inline variant of public PyVectorcall_Function(). static inline vectorcallfunc @@ -109,13 +185,19 @@ _PyObject_CallNoArgs(PyObject *func) { } -static inline PyObject * -_PyObject_FastCallTstate(PyThreadState *tstate, PyObject *func, PyObject *const *args, Py_ssize_t nargs) -{ - EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func); - return _PyObject_VectorcallTstate(tstate, func, args, (size_t)nargs, NULL); -} +extern PyObject *const * +_PyStack_UnpackDict(PyThreadState *tstate, + PyObject *const *args, Py_ssize_t nargs, + PyObject *kwargs, PyObject **p_kwnames); + +extern void _PyStack_UnpackDict_Free( + PyObject *const *stack, + Py_ssize_t nargs, + PyObject *kwnames); +extern void _PyStack_UnpackDict_FreeNoDecRef( + PyObject *const *stack, + PyObject *kwnames); #ifdef __cplusplus } diff --git a/Include/internal/pycore_capsule.h b/Include/internal/pycore_capsule.h new file mode 100644 index 000000000000000..aa2c67f3a8f002c --- /dev/null +++ b/Include/internal/pycore_capsule.h @@ -0,0 +1,17 @@ +#ifndef Py_INTERNAL_PYCAPSULE_H +#define Py_INTERNAL_PYCAPSULE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +// Export for '_socket' shared extension +PyAPI_FUNC(int) _PyCapsule_SetTraverse(PyObject *op, traverseproc traverse_func, inquiry clear_func); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_PYCAPSULE_H */ diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index deda070a6dea79a..c372b7224fb047b 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -8,28 +8,55 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "dynamic_annotations.h" // _Py_ANNOTATE_RWLOCK_CREATE + +#include "pycore_interp.h" // PyInterpreterState.eval_frame +#include "pycore_pystate.h" // _PyThreadState_GET() + /* Forward declarations */ struct pyruntimestate; struct _ceval_runtime_state; +// Export for '_lsprof' shared extension +PyAPI_FUNC(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg); + +extern int _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg); + +extern int _PyEval_SetOpcodeTrace(PyFrameObject *f, bool enable); + +// Helper to look up a builtin object +// Export for 'array' shared extension +PyAPI_FUNC(PyObject*) _PyEval_GetBuiltin(PyObject *); + +extern PyObject* _PyEval_GetBuiltinId(_Py_Identifier *); + +extern void _PyEval_SetSwitchInterval(unsigned long microseconds); +extern unsigned long _PyEval_GetSwitchInterval(void); + +// Export for '_queue' shared extension +PyAPI_FUNC(int) _PyEval_MakePendingCalls(PyThreadState *); + #ifndef Py_DEFAULT_RECURSION_LIMIT # define Py_DEFAULT_RECURSION_LIMIT 1000 #endif -#include "pycore_interp.h" // PyInterpreterState.eval_frame -#include "pycore_pystate.h" // _PyThreadState_GET() - - extern void _Py_FinishPendingCalls(PyThreadState *tstate); -extern void _PyEval_InitRuntimeState(struct _ceval_runtime_state *); -extern void _PyEval_InitState(struct _ceval_state *, PyThread_type_lock); +extern void _PyEval_InitState(PyInterpreterState *, PyThread_type_lock); extern void _PyEval_FiniState(struct _ceval_state *ceval); -PyAPI_FUNC(void) _PyEval_SignalReceived(PyInterpreterState *interp); +extern void _PyEval_SignalReceived(PyInterpreterState *interp); + +// bitwise flags: +#define _Py_PENDING_MAINTHREADONLY 1 +#define _Py_PENDING_RAWFREE 2 + +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(int) _PyEval_AddPendingCall( PyInterpreterState *interp, - int (*func)(void *), - void *arg); -PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *interp); + _Py_pending_call_func func, + void *arg, + int flags); + +extern void _PyEval_SignalAsyncExc(PyInterpreterState *interp); #ifdef HAVE_FORK extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate); #endif @@ -96,11 +123,12 @@ _PyEval_Vector(PyThreadState *tstate, PyObject* const* args, size_t argcount, PyObject *kwnames); -extern int _PyEval_ThreadsInitialized(struct pyruntimestate *runtime); -extern PyStatus _PyEval_InitGIL(PyThreadState *tstate); +extern int _PyEval_ThreadsInitialized(void); +extern PyStatus _PyEval_InitGIL(PyThreadState *tstate, int own_gil); extern void _PyEval_FiniGIL(PyInterpreterState *interp); -extern void _PyEval_ReleaseLock(PyThreadState *tstate); +extern void _PyEval_AcquireLock(PyThreadState *tstate); +extern void _PyEval_ReleaseLock(PyInterpreterState *, PyThreadState *); extern void _PyEval_DeactivateOpCache(void); @@ -120,6 +148,8 @@ static inline int _Py_MakeRecCheck(PyThreadState *tstate) { } #endif +// Export for '_json' shared extension, used via _Py_EnterRecursiveCall() +// static inline function. PyAPI_FUNC(int) _Py_CheckRecursiveCall( PyThreadState *tstate, const char *where); @@ -150,8 +180,57 @@ extern struct _PyInterpreterFrame* _PyEval_GetFrame(void); extern PyObject* _Py_MakeCoro(PyFunctionObject *func); +/* Handle signals, pending calls, GIL drop request + and asynchronous exception */ extern int _Py_HandlePending(PyThreadState *tstate); +extern PyObject * _PyEval_GetFrameLocals(void); + +extern const binaryfunc _PyEval_BinaryOps[]; +int _PyEval_CheckExceptStarTypeValid(PyThreadState *tstate, PyObject* right); +int _PyEval_CheckExceptTypeValid(PyThreadState *tstate, PyObject* right); +int _PyEval_ExceptionGroupMatch(PyObject* exc_value, PyObject *match_type, PyObject **match, PyObject **rest); +void _PyEval_FormatAwaitableError(PyThreadState *tstate, PyTypeObject *type, int oparg); +void _PyEval_FormatExcCheckArg(PyThreadState *tstate, PyObject *exc, const char *format_str, PyObject *obj); +void _PyEval_FormatExcUnbound(PyThreadState *tstate, PyCodeObject *co, int oparg); +void _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwargs); +PyObject *_PyEval_MatchClass(PyThreadState *tstate, PyObject *subject, PyObject *type, Py_ssize_t nargs, PyObject *kwargs); +PyObject *_PyEval_MatchKeys(PyThreadState *tstate, PyObject *map, PyObject *keys); +int _PyEval_UnpackIterable(PyThreadState *tstate, PyObject *v, int argcnt, int argcntafter, PyObject **sp); +void _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame); + + +#define _PY_GIL_DROP_REQUEST_BIT 0 +#define _PY_SIGNALS_PENDING_BIT 1 +#define _PY_CALLS_TO_DO_BIT 2 +#define _PY_ASYNC_EXCEPTION_BIT 3 +#define _PY_GC_SCHEDULED_BIT 4 + +/* Reserve a few bits for future use */ +#define _PY_EVAL_EVENTS_BITS 8 +#define _PY_EVAL_EVENTS_MASK ((1 << _PY_EVAL_EVENTS_BITS)-1) + +static inline void +_Py_set_eval_breaker_bit(PyInterpreterState *interp, uint32_t bit, uint32_t set) +{ + assert(set == 0 || set == 1); + uintptr_t to_set = set << bit; + uintptr_t mask = ((uintptr_t)1) << bit; + uintptr_t old = _Py_atomic_load_uintptr(&interp->ceval.eval_breaker); + if ((old & mask) == to_set) { + return; + } + uintptr_t new; + do { + new = (old & ~mask) | to_set; + } while (!_Py_atomic_compare_exchange_uintptr(&interp->ceval.eval_breaker, &old, new)); +} + +static inline bool +_Py_eval_breaker_bit_is_set(PyInterpreterState *interp, int32_t bit) +{ + return _Py_atomic_load_uintptr_relaxed(&interp->ceval.eval_breaker) & (((uintptr_t)1) << bit); +} #ifdef __cplusplus diff --git a/Include/internal/pycore_ceval_state.h b/Include/internal/pycore_ceval_state.h new file mode 100644 index 000000000000000..072bbcda0c3c828 --- /dev/null +++ b/Include/internal/pycore_ceval_state.h @@ -0,0 +1,97 @@ +#ifndef Py_INTERNAL_CEVAL_STATE_H +#define Py_INTERNAL_CEVAL_STATE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_gil.h" // struct _gil_runtime_state + + +typedef int (*_Py_pending_call_func)(void *); + +struct _pending_calls { + int busy; + PyThread_type_lock lock; + /* Request for running pending calls. */ + int32_t calls_to_do; +#define NPENDINGCALLS 32 + struct _pending_call { + _Py_pending_call_func func; + void *arg; + int flags; + } calls[NPENDINGCALLS]; + int first; + int last; +}; + +typedef enum { + PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state + PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized + PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed +} perf_status_t; + + +#ifdef PY_HAVE_PERF_TRAMPOLINE +struct code_arena_st; + +struct trampoline_api_st { + void* (*init_state)(void); + void (*write_state)(void* state, const void *code_addr, + unsigned int code_size, PyCodeObject* code); + int (*free_state)(void* state); + void *state; +}; +#endif + +struct _ceval_runtime_state { + struct { +#ifdef PY_HAVE_PERF_TRAMPOLINE + perf_status_t status; + Py_ssize_t extra_code_index; + struct code_arena_st *code_arena; + struct trampoline_api_st trampoline_api; + FILE *map_file; + Py_ssize_t persist_after_fork; +#else + int _not_used; +#endif + } perf; + /* Pending calls to be made only on the main thread. */ + struct _pending_calls pending_mainthread; +}; + +#ifdef PY_HAVE_PERF_TRAMPOLINE +# define _PyEval_RUNTIME_PERF_INIT \ + { \ + .status = PERF_STATUS_NO_INIT, \ + .extra_code_index = -1, \ + .persist_after_fork = 0, \ + } +#else +# define _PyEval_RUNTIME_PERF_INIT {0} +#endif + + +struct _ceval_state { + /* This single variable consolidates all requests to break out of + * the fast path in the eval loop. + * It is by far the hottest field in this struct and + * should be placed at the beginning. */ + uintptr_t eval_breaker; + /* Avoid false sharing */ + int64_t padding[7]; + int recursion_limit; + struct _gil_runtime_state *gil; + int own_gil; + struct _pending_calls pending; +}; + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CEVAL_STATE_H */ diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index cf8573aa9138b25..eaf84a9c94fc9be 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -4,6 +4,12 @@ extern "C" { #endif +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#define CODE_MAX_WATCHERS 8 + /* PEP 659 * Specialization and quickening structs and helper functions */ @@ -16,53 +22,56 @@ extern "C" { #define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT)) typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT index; - _Py_CODEUNIT module_keys_version[2]; - _Py_CODEUNIT builtin_keys_version; + uint16_t counter; + uint16_t module_keys_version; + uint16_t builtin_keys_version; + uint16_t index; } _PyLoadGlobalCache; #define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache) typedef struct { - _Py_CODEUNIT counter; + uint16_t counter; } _PyBinaryOpCache; #define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache) typedef struct { - _Py_CODEUNIT counter; + uint16_t counter; } _PyUnpackSequenceCache; #define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \ CACHE_ENTRIES(_PyUnpackSequenceCache) typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT mask; + uint16_t counter; } _PyCompareOpCache; #define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache) typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT type_version[2]; - _Py_CODEUNIT func_version; + uint16_t counter; } _PyBinarySubscrCache; #define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache) typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT version[2]; - _Py_CODEUNIT index; + uint16_t counter; +} _PySuperAttrCache; + +#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache) + +typedef struct { + uint16_t counter; + uint16_t version[2]; + uint16_t index; } _PyAttrCache; typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT type_version[2]; - _Py_CODEUNIT keys_version[2]; - _Py_CODEUNIT descr[4]; + uint16_t counter; + uint16_t type_version[2]; + uint16_t keys_version[2]; + uint16_t descr[4]; } _PyLoadMethodCache; @@ -72,26 +81,36 @@ typedef struct { #define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache) typedef struct { - _Py_CODEUNIT counter; - _Py_CODEUNIT func_version[2]; - _Py_CODEUNIT min_args; + uint16_t counter; + uint16_t func_version[2]; } _PyCallCache; #define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache) typedef struct { - _Py_CODEUNIT counter; + uint16_t counter; } _PyStoreSubscrCache; #define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache) typedef struct { - _Py_CODEUNIT counter; + uint16_t counter; } _PyForIterCache; #define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache) -extern uint8_t _PyOpcode_Adaptive[256]; +typedef struct { + uint16_t counter; +} _PySendCache; + +#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache) + +typedef struct { + uint16_t counter; + uint16_t version[2]; +} _PyToBoolCache; + +#define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache) // Borrowed references to common callables: struct callable_cache { @@ -120,6 +139,7 @@ struct callable_cache { // Note that these all fit within a byte, as do combinations. // Later, we will use the smaller numbers to differentiate the different // kinds of locals (e.g. pos-only arg, varkwargs, local-only). +#define CO_FAST_HIDDEN 0x10 #define CO_FAST_LOCAL 0x20 #define CO_FAST_CELL 0x40 #define CO_FAST_FREE 0x80 @@ -187,8 +207,8 @@ struct _PyCodeConstructor { // back to a regular function signature. Regardless, this approach // wouldn't be appropriate if this weren't a strictly internal API. // (See the comments in https://github.com/python/cpython/pull/26258.) -PyAPI_FUNC(int) _PyCode_Validate(struct _PyCodeConstructor *); -PyAPI_FUNC(PyCodeObject *) _PyCode_New(struct _PyCodeConstructor *); +extern int _PyCode_Validate(struct _PyCodeConstructor *); +extern PyCodeObject* _PyCode_New(struct _PyCodeConstructor *); /* Private API */ @@ -213,24 +233,36 @@ extern void _PyLineTable_InitAddressRange( extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range); extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range); +/** API for executors */ +extern void _PyCode_Clear_Executors(PyCodeObject *code); + +#define ENABLE_SPECIALIZATION 1 + /* Specialization functions */ -extern int _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr, - PyObject *name); -extern int _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr, +extern void _Py_Specialize_LoadSuperAttr(PyObject *global_super, PyObject *cls, + _Py_CODEUNIT *instr, int load_method); +extern void _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr, PyObject *name); -extern int _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins, _Py_CODEUNIT *instr, PyObject *name); -extern int _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container, _Py_CODEUNIT *instr); -extern int _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub, _Py_CODEUNIT *instr); -extern int _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr, - int nargs, PyObject *kwnames); +extern void _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr, + PyObject *name); +extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins, + _Py_CODEUNIT *instr, PyObject *name); +extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container, + _Py_CODEUNIT *instr); +extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub, + _Py_CODEUNIT *instr); +extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr, + int nargs); extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr, int oparg, PyObject **locals); extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr, int oparg); extern void _Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr, int oparg); -extern void _Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr); +extern void _Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr, int oparg); +extern void _Py_Specialize_Send(PyObject *receiver, _Py_CODEUNIT *instr); +extern void _Py_Specialize_ToBool(PyObject *value, _Py_CODEUNIT *instr); /* Finalizer function for static codeobjects used in deepfreeze.py */ extern void _PyStaticCode_Fini(PyCodeObject *co); @@ -239,19 +271,32 @@ extern int _PyStaticCode_Init(PyCodeObject *co); #ifdef Py_STATS +#include "pycore_bitutils.h" // _Py_bit_length -#define STAT_INC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name++; } while (0) -#define STAT_DEC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name--; } while (0) -#define OPCODE_EXE_INC(opname) do { if (_py_stats) _py_stats->opcode_stats[opname].execution_count++; } while (0) -#define CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.name++; } while (0) -#define OBJECT_STAT_INC(name) do { if (_py_stats) _py_stats->object_stats.name++; } while (0) +#define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0) +#define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0) +#define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0) +#define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0) +#define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0) #define OBJECT_STAT_INC_COND(name, cond) \ - do { if (_py_stats && cond) _py_stats->object_stats.name++; } while (0) -#define EVAL_CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.eval_calls[name]++; } while (0) + do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0) +#define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0) #define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \ - do { if (_py_stats && PyFunction_Check(callable)) _py_stats->call_stats.eval_calls[name]++; } while (0) - -// Used by the _opcode extension which is built as a shared library + do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0) +#define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0) +#define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) +#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) +#define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) +#define OPT_HIST(length, name) \ + do { \ + if (_Py_stats) { \ + int bucket = _Py_bit_length(length >= 1 ? length - 1 : 0); \ + bucket = (bucket >= _Py_UOP_HIST_SIZE) ? _Py_UOP_HIST_SIZE - 1 : bucket; \ + _Py_stats->optimization_stats.name[bucket]++; \ + } \ + } while (0) + +// Export for '_opcode' shared extension PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void); #else @@ -263,6 +308,11 @@ PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void); #define OBJECT_STAT_INC_COND(name, cond) ((void)0) #define EVAL_CALL_STAT_INC(name) ((void)0) #define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0) +#define GC_STAT_ADD(gen, name, n) ((void)0) +#define OPT_STAT_INC(name) ((void)0) +#define UOP_STAT_INC(opname, name) ((void)0) +#define OPT_UNSUPPORTED_OPCODE(opname) ((void)0) +#define OPT_HIST(length, name) ((void)0) #endif // !Py_STATS // Utility functions for reading/writing 32/64-bit values in the inline caches. @@ -292,6 +342,12 @@ write_obj(uint16_t *p, PyObject *val) memcpy(p, &val, sizeof(val)); } +static inline uint16_t +read_u16(uint16_t *p) +{ + return *p; +} + static inline uint32_t read_u32(uint16_t *p) { @@ -377,8 +433,22 @@ write_location_entry_start(uint8_t *ptr, int code, int length) /* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */ #define ADAPTIVE_BACKOFF_BITS 4 -/* The initial counter value is 1 == 2**ADAPTIVE_BACKOFF_START - 1 */ -#define ADAPTIVE_BACKOFF_START 1 + +// A value of 1 means that we attempt to specialize the *second* time each +// instruction is executed. Executing twice is a much better indicator of +// "hotness" than executing once, but additional warmup delays only prevent +// specialization. Most types stabilize by the second execution, too: +#define ADAPTIVE_WARMUP_VALUE 1 +#define ADAPTIVE_WARMUP_BACKOFF 1 + +// A value of 52 means that we attempt to re-specialize after 53 misses (a prime +// number, useful for avoiding artifacts if every nth value is a different type +// or something). Setting the backoff to 0 means that the counter is reset to +// the same state as a warming-up instruction (value == 1, backoff == 1) after +// deoptimization. This isn't strictly necessary, but it is bit easier to reason +// about when thinking about the opcode transitions as a state machine: +#define ADAPTIVE_COOLDOWN_VALUE 52 +#define ADAPTIVE_COOLDOWN_BACKOFF 0 #define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS) @@ -386,13 +456,19 @@ write_location_entry_start(uint8_t *ptr, int code, int length) static inline uint16_t adaptive_counter_bits(int value, int backoff) { return (value << ADAPTIVE_BACKOFF_BITS) | - (backoff & ((1<= (y)) + ((x) <= (y)))) -static inline int -_PyCode_InitLineArray(PyCodeObject *co) -{ - if (co->_co_linearray) { - return 0; - } - return _PyCode_CreateLineArray(co); -} +/* + * The following bits are chosen so that the value of + * COMPARSION_BIT(left, right) + * masked by the values below will be non-zero if the + * comparison is true, and zero if it is false */ -static inline int -_PyCode_LineNumberFromArray(PyCodeObject *co, int index) -{ - assert(co->_co_linearray != NULL); - assert(index >= 0); - assert(index < Py_SIZE(co)); - if (co->_co_linearray_entry_size == 2) { - return ((int16_t *)co->_co_linearray)[index]; - } - else { - assert(co->_co_linearray_entry_size == 4); - return ((int32_t *)co->_co_linearray)[index]; - } -} +/* This is for values that are unordered, ie. NaN, not types that are unordered, e.g. sets */ +#define COMPARISON_UNORDERED 1 + +#define COMPARISON_LESS_THAN 2 +#define COMPARISON_GREATER_THAN 4 +#define COMPARISON_EQUALS 8 + +#define COMPARISON_NOT_EQUALS (COMPARISON_UNORDERED | COMPARISON_LESS_THAN | COMPARISON_GREATER_THAN) + +extern int _Py_Instrument(PyCodeObject *co, PyInterpreterState *interp); + +extern int _Py_GetBaseOpcode(PyCodeObject *code, int offset); +extern int _PyInstruction_GetLength(PyCodeObject *code, int offset); #ifdef __cplusplus } diff --git a/Include/internal/pycore_codecs.h b/Include/internal/pycore_codecs.h new file mode 100644 index 000000000000000..a2a7151d50ade7c --- /dev/null +++ b/Include/internal/pycore_codecs.h @@ -0,0 +1,55 @@ +#ifndef Py_INTERNAL_CODECS_H +#define Py_INTERNAL_CODECS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +extern PyObject* _PyCodec_Lookup(const char *encoding); + +/* Text codec specific encoding and decoding API. + + Checks the encoding against a list of codecs which do not + implement a str<->bytes encoding before attempting the + operation. + + Please note that these APIs are internal and should not + be used in Python C extensions. + + XXX (ncoghlan): should we make these, or something like them, public + in Python 3.5+? + + */ +extern PyObject* _PyCodec_LookupTextEncoding( + const char *encoding, + const char *alternate_command); + +extern PyObject* _PyCodec_EncodeText( + PyObject *object, + const char *encoding, + const char *errors); + +extern PyObject* _PyCodec_DecodeText( + PyObject *object, + const char *encoding, + const char *errors); + +/* These two aren't actually text encoding specific, but _io.TextIOWrapper + * is the only current API consumer. + */ +extern PyObject* _PyCodecInfo_GetIncrementalDecoder( + PyObject *codec_info, + const char *errors); + +extern PyObject* _PyCodecInfo_GetIncrementalEncoder( + PyObject *codec_info, + const char *errors); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CODECS_H */ diff --git a/Include/internal/pycore_compile.h b/Include/internal/pycore_compile.h index cb490abe77a59dd..e5870759ba74f16 100644 --- a/Include/internal/pycore_compile.h +++ b/Include/internal/pycore_compile.h @@ -11,7 +11,7 @@ extern "C" { struct _arena; // Type defined in pycore_pyarena.h struct _mod; // Type defined in pycore_ast.h -// Export the symbol for test_peg_generator (built as a library) +// Export for 'test_peg_generator' shared extension PyAPI_FUNC(PyCodeObject*) _PyAST_Compile( struct _mod *mod, PyObject *filename, @@ -19,30 +19,126 @@ PyAPI_FUNC(PyCodeObject*) _PyAST_Compile( int optimize, struct _arena *arena); -int _PyFuture_FromAST( - struct _mod * mod, +/* AST optimizations */ +extern int _PyCompile_AstOptimize( + struct _mod *mod, PyObject *filename, - PyFutureFeatures* futures); - -extern PyObject* _Py_Mangle(PyObject *p, PyObject *name); - -typedef struct { - int optimize; - int ff_features; + PyCompilerFlags *flags, + int optimize, + struct _arena *arena); - int recursion_depth; /* current recursion depth */ - int recursion_limit; /* recursion limit */ -} _PyASTOptimizeState; +static const _PyCompilerSrcLocation NO_LOCATION = {-1, -1, -1, -1}; extern int _PyAST_Optimize( struct _mod *, struct _arena *arena, - _PyASTOptimizeState *state); + int optimize, + int ff_features); + +typedef struct { + int h_label; + int h_startdepth; + int h_preserve_lasti; +} _PyCompile_ExceptHandlerInfo; + +typedef struct { + int i_opcode; + int i_oparg; + _PyCompilerSrcLocation i_loc; + _PyCompile_ExceptHandlerInfo i_except_handler_info; + + /* Used by the assembler */ + int i_target; + int i_offset; +} _PyCompile_Instruction; + +typedef struct { + _PyCompile_Instruction *s_instrs; + int s_allocated; + int s_used; + + int *s_labelmap; /* label id --> instr offset */ + int s_labelmap_size; + int s_next_free_label; /* next free label id */ +} _PyCompile_InstructionSequence; + +int _PyCompile_InstructionSequence_UseLabel(_PyCompile_InstructionSequence *seq, int lbl); +int _PyCompile_InstructionSequence_Addop(_PyCompile_InstructionSequence *seq, + int opcode, int oparg, + _PyCompilerSrcLocation loc); + +typedef struct { + PyObject *u_name; + PyObject *u_qualname; /* dot-separated qualified name (lazy) */ + + /* The following fields are dicts that map objects to + the index of them in co_XXX. The index is used as + the argument for opcodes that refer to those collections. + */ + PyObject *u_consts; /* all constants */ + PyObject *u_names; /* all names */ + PyObject *u_varnames; /* local variables */ + PyObject *u_cellvars; /* cell variables */ + PyObject *u_freevars; /* free variables */ + PyObject *u_fasthidden; /* dict; keys are names that are fast-locals only + temporarily within an inlined comprehension. When + value is True, treat as fast-local. */ + + Py_ssize_t u_argcount; /* number of arguments for block */ + Py_ssize_t u_posonlyargcount; /* number of positional only arguments for block */ + Py_ssize_t u_kwonlyargcount; /* number of keyword only arguments for block */ + + int u_firstlineno; /* the first lineno of the block */ +} _PyCompile_CodeUnitMetadata; + + +/* Utility for a number of growing arrays used in the compiler */ +int _PyCompile_EnsureArrayLargeEnough( + int idx, + void **array, + int *alloc, + int default_alloc, + size_t item_size); + +int _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj); + + +// Export for '_opcode' extention module +PyAPI_FUNC(int) _PyCompile_OpcodeIsValid(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasArg(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasConst(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasName(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasJump(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasFree(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasLocal(int opcode); +PyAPI_FUNC(int) _PyCompile_OpcodeHasExc(int opcode); + +PyAPI_FUNC(PyObject*) _PyCompile_GetUnaryIntrinsicName(int index); +PyAPI_FUNC(PyObject*) _PyCompile_GetBinaryIntrinsicName(int index); /* Access compiler internals for unit testing */ + +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(PyObject*) _PyCompile_CleanDoc(PyObject *doc); + +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(PyObject*) _PyCompile_CodeGen( + PyObject *ast, + PyObject *filename, + PyCompilerFlags *flags, + int optimize, + int compile_mode); + +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(PyObject*) _PyCompile_OptimizeCfg( PyObject *instructions, - PyObject *consts); + PyObject *consts, + int nlocals); + +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(PyCodeObject*) +_PyCompile_Assemble(_PyCompile_CodeUnitMetadata *umd, PyObject *filename, + PyObject *instructions); #ifdef __cplusplus } diff --git a/Include/internal/pycore_complexobject.h b/Include/internal/pycore_complexobject.h new file mode 100644 index 000000000000000..54713536eedc462 --- /dev/null +++ b/Include/internal/pycore_complexobject.h @@ -0,0 +1,25 @@ +#ifndef Py_INTERNAL_COMPLEXOBJECT_H +#define Py_INTERNAL_COMPLEXOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_unicodeobject.h" // _PyUnicodeWriter + +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +extern int _PyComplex_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_COMPLEXOBJECT_H diff --git a/Include/internal/pycore_condvar.h b/Include/internal/pycore_condvar.h index 981c962bf7dfdfa..34c21aaad43197b 100644 --- a/Include/internal/pycore_condvar.h +++ b/Include/internal/pycore_condvar.h @@ -5,14 +5,8 @@ # error "this header requires Py_BUILD_CORE define" #endif -#ifndef _POSIX_THREADS -/* This means pthreads are not implemented in libc headers, hence the macro - not present in unistd.h. But they still can be implemented as an external - library (e.g. gnu pth in pthread emulation) */ -# ifdef HAVE_PTHREAD_H -# include /* _POSIX_THREADS */ -# endif -#endif +#include "pycore_pythread.h" // _POSIX_THREADS + #ifdef _POSIX_THREADS /* @@ -21,7 +15,7 @@ #define Py_HAVE_CONDVAR #ifdef HAVE_PTHREAD_H -# include +# include // pthread_mutex_t #endif #define PyMUTEX_T pthread_mutex_t @@ -38,7 +32,7 @@ /* include windows if it hasn't been done before */ #define WIN32_LEAN_AND_MEAN -#include +#include // CRITICAL_SECTION /* options */ /* non-emulated condition variables are provided for those that want diff --git a/Include/internal/pycore_context.h b/Include/internal/pycore_context.h index 1bf4e8f3ee532e4..ec884e9e0f55a92 100644 --- a/Include/internal/pycore_context.h +++ b/Include/internal/pycore_context.h @@ -5,7 +5,7 @@ # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_hamt.h" /* PyHamtObject */ +#include "pycore_hamt.h" // PyHamtObject extern PyTypeObject _PyContextTokenMissing_Type; @@ -18,6 +18,10 @@ void _PyContext_Fini(PyInterpreterState *); /* other API */ +typedef struct { + PyObject_HEAD +} _PyContextTokenMissing; + #ifndef WITH_FREELISTS // without freelists # define PyContext_MAXFREELIST 0 @@ -64,4 +68,9 @@ struct _pycontexttokenobject { }; +// _testinternalcapi.hamt() used by tests. +// Export for '_testcapi' shared extension +PyAPI_FUNC(PyObject*) _PyContext_NewHamtForTests(void); + + #endif /* !Py_INTERNAL_CONTEXT_H */ diff --git a/Include/internal/pycore_critical_section.h b/Include/internal/pycore_critical_section.h new file mode 100644 index 000000000000000..bf2bbfffc38bd0f --- /dev/null +++ b/Include/internal/pycore_critical_section.h @@ -0,0 +1,242 @@ +#ifndef Py_INTERNAL_CRITICAL_SECTION_H +#define Py_INTERNAL_CRITICAL_SECTION_H + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_lock.h" // PyMutex +#include "pycore_pystate.h" // _PyThreadState_GET() +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Implementation of Python critical sections +// +// Conceptually, critical sections are a deadlock avoidance layer on top of +// per-object locks. These helpers, in combination with those locks, replace +// our usage of the global interpreter lock to provide thread-safety for +// otherwise thread-unsafe objects, such as dict. +// +// NOTE: These APIs are no-ops in non-free-threaded builds. +// +// Straightforward per-object locking could introduce deadlocks that were not +// present when running with the GIL. Threads may hold locks for multiple +// objects simultaneously because Python operations can nest. If threads were +// to acquire the same locks in different orders, they would deadlock. +// +// One way to avoid deadlocks is to allow threads to hold only the lock (or +// locks) for a single operation at a time (typically a single lock, but some +// operations involve two locks). When a thread begins a nested operation it +// could suspend the locks for any outer operation: before beginning the nested +// operation, the locks for the outer operation are released and when the +// nested operation completes, the locks for the outer operation are +// reacquired. +// +// To improve performance, this API uses a variation of the above scheme. +// Instead of immediately suspending locks any time a nested operation begins, +// locks are only suspended if the thread would block. This reduces the number +// of lock acquisitions and releases for nested operations, while still +// avoiding deadlocks. +// +// Additionally, the locks for any active operation are suspended around +// other potentially blocking operations, such as I/O. This is because the +// interaction between locks and blocking operations can lead to deadlocks in +// the same way as the interaction between multiple locks. +// +// Each thread's critical sections and their corresponding locks are tracked in +// a stack in `PyThreadState.critical_section`. When a thread calls +// `_PyThreadState_Detach()`, such as before a blocking I/O operation or when +// waiting to acquire a lock, the thread suspends all of its active critical +// sections, temporarily releasing the associated locks. When the thread calls +// `_PyThreadState_Attach()`, it resumes the top-most (i.e., most recent) +// critical section by reacquiring the associated lock or locks. See +// `_PyCriticalSection_Resume()`. +// +// NOTE: Only the top-most critical section is guaranteed to be active. +// Operations that need to lock two objects at once must use +// `Py_BEGIN_CRITICAL_SECTION2()`. You *CANNOT* use nested critical sections +// to lock more than one object at once, because the inner critical section +// may suspend the outer critical sections. This API does not provide a way +// to lock more than two objects at once (though it could be added later +// if actually needed). +// +// NOTE: Critical sections implicitly behave like reentrant locks because +// attempting to acquire the same lock will suspend any outer (earlier) +// critical sections. However, they are less efficient for this use case than +// purposefully designed reentrant locks. +// +// Example usage: +// Py_BEGIN_CRITICAL_SECTION(op); +// ... +// Py_END_CRITICAL_SECTION(); +// +// To lock two objects at once: +// Py_BEGIN_CRITICAL_SECTION2(op1, op2); +// ... +// Py_END_CRITICAL_SECTION2(); + + +// Tagged pointers to critical sections use the two least significant bits to +// mark if the pointed-to critical section is inactive and whether it is a +// _PyCriticalSection2 object. +#define _Py_CRITICAL_SECTION_INACTIVE 0x1 +#define _Py_CRITICAL_SECTION_TWO_MUTEXES 0x2 +#define _Py_CRITICAL_SECTION_MASK 0x3 + +#ifdef Py_GIL_DISABLED +# define Py_BEGIN_CRITICAL_SECTION(op) \ + { \ + _PyCriticalSection _cs; \ + _PyCriticalSection_Begin(&_cs, &_PyObject_CAST(op)->ob_mutex) + +# define Py_END_CRITICAL_SECTION() \ + _PyCriticalSection_End(&_cs); \ + } + +# define Py_BEGIN_CRITICAL_SECTION2(a, b) \ + { \ + _PyCriticalSection2 _cs2; \ + _PyCriticalSection2_Begin(&_cs2, &_PyObject_CAST(a)->ob_mutex, &_PyObject_CAST(b)->ob_mutex) + +# define Py_END_CRITICAL_SECTION2() \ + _PyCriticalSection2_End(&_cs2); \ + } +#else /* !Py_GIL_DISABLED */ +// The critical section APIs are no-ops with the GIL. +# define Py_BEGIN_CRITICAL_SECTION(op) +# define Py_END_CRITICAL_SECTION() +# define Py_BEGIN_CRITICAL_SECTION2(a, b) +# define Py_END_CRITICAL_SECTION2() +#endif /* !Py_GIL_DISABLED */ + +typedef struct { + // Tagged pointer to an outer active critical section (or 0). + // The two least-significant-bits indicate whether the pointed-to critical + // section is inactive and whether it is a _PyCriticalSection2 object. + uintptr_t prev; + + // Mutex used to protect critical section + PyMutex *mutex; +} _PyCriticalSection; + +// A critical section protected by two mutexes. Use +// _PyCriticalSection2_Begin and _PyCriticalSection2_End. +typedef struct { + _PyCriticalSection base; + + PyMutex *mutex2; +} _PyCriticalSection2; + +static inline int +_PyCriticalSection_IsActive(uintptr_t tag) +{ + return tag != 0 && (tag & _Py_CRITICAL_SECTION_INACTIVE) == 0; +} + +// Resumes the top-most critical section. +PyAPI_FUNC(void) +_PyCriticalSection_Resume(PyThreadState *tstate); + +// (private) slow path for locking the mutex +PyAPI_FUNC(void) +_PyCriticalSection_BeginSlow(_PyCriticalSection *c, PyMutex *m); + +PyAPI_FUNC(void) +_PyCriticalSection2_BeginSlow(_PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2, + int is_m1_locked); + +static inline void +_PyCriticalSection_Begin(_PyCriticalSection *c, PyMutex *m) +{ + if (PyMutex_LockFast(&m->v)) { + PyThreadState *tstate = _PyThreadState_GET(); + c->mutex = m; + c->prev = tstate->critical_section; + tstate->critical_section = (uintptr_t)c; + } + else { + _PyCriticalSection_BeginSlow(c, m); + } +} + +// Removes the top-most critical section from the thread's stack of critical +// sections. If the new top-most critical section is inactive, then it is +// resumed. +static inline void +_PyCriticalSection_Pop(_PyCriticalSection *c) +{ + PyThreadState *tstate = _PyThreadState_GET(); + uintptr_t prev = c->prev; + tstate->critical_section = prev; + + if ((prev & _Py_CRITICAL_SECTION_INACTIVE) != 0) { + _PyCriticalSection_Resume(tstate); + } +} + +static inline void +_PyCriticalSection_End(_PyCriticalSection *c) +{ + PyMutex_Unlock(c->mutex); + _PyCriticalSection_Pop(c); +} + +static inline void +_PyCriticalSection2_Begin(_PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2) +{ + if (m1 == m2) { + // If the two mutex arguments are the same, treat this as a critical + // section with a single mutex. + c->mutex2 = NULL; + _PyCriticalSection_Begin(&c->base, m1); + return; + } + + if ((uintptr_t)m2 < (uintptr_t)m1) { + // Sort the mutexes so that the lower address is locked first. + // The exact order does not matter, but we need to acquire the mutexes + // in a consistent order to avoid lock ordering deadlocks. + PyMutex *tmp = m1; + m1 = m2; + m2 = tmp; + } + + if (PyMutex_LockFast(&m1->v)) { + if (PyMutex_LockFast(&m2->v)) { + PyThreadState *tstate = _PyThreadState_GET(); + c->base.mutex = m1; + c->mutex2 = m2; + c->base.prev = tstate->critical_section; + + uintptr_t p = (uintptr_t)c | _Py_CRITICAL_SECTION_TWO_MUTEXES; + tstate->critical_section = p; + } + else { + _PyCriticalSection2_BeginSlow(c, m1, m2, 1); + } + } + else { + _PyCriticalSection2_BeginSlow(c, m1, m2, 0); + } +} + +static inline void +_PyCriticalSection2_End(_PyCriticalSection2 *c) +{ + if (c->mutex2) { + PyMutex_Unlock(c->mutex2); + } + PyMutex_Unlock(c->base.mutex); + _PyCriticalSection_Pop(&c->base); +} + +PyAPI_FUNC(void) +_PyCriticalSection_SuspendAll(PyThreadState *tstate); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CRITICAL_SECTION_H */ diff --git a/Include/internal/pycore_crossinterp.h b/Include/internal/pycore_crossinterp.h new file mode 100644 index 000000000000000..ec9dac96292f353 --- /dev/null +++ b/Include/internal/pycore_crossinterp.h @@ -0,0 +1,281 @@ +#ifndef Py_INTERNAL_CROSSINTERP_H +#define Py_INTERNAL_CROSSINTERP_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_pyerrors.h" + + +/***************************/ +/* cross-interpreter calls */ +/***************************/ + +typedef int (*_Py_simple_func)(void *); +extern int _Py_CallInInterpreter( + PyInterpreterState *interp, + _Py_simple_func func, + void *arg); +extern int _Py_CallInInterpreterAndRawFree( + PyInterpreterState *interp, + _Py_simple_func func, + void *arg); + + +/**************************/ +/* cross-interpreter data */ +/**************************/ + +typedef struct _xid _PyCrossInterpreterData; +typedef PyObject *(*xid_newobjectfunc)(_PyCrossInterpreterData *); +typedef void (*xid_freefunc)(void *); + +// _PyCrossInterpreterData is similar to Py_buffer as an effectively +// opaque struct that holds data outside the object machinery. This +// is necessary to pass safely between interpreters in the same process. +struct _xid { + // data is the cross-interpreter-safe derivation of a Python object + // (see _PyObject_GetCrossInterpreterData). It will be NULL if the + // new_object func (below) encodes the data. + void *data; + // obj is the Python object from which the data was derived. This + // is non-NULL only if the data remains bound to the object in some + // way, such that the object must be "released" (via a decref) when + // the data is released. In that case the code that sets the field, + // likely a registered "crossinterpdatafunc", is responsible for + // ensuring it owns the reference (i.e. incref). + PyObject *obj; + // interp is the ID of the owning interpreter of the original + // object. It corresponds to the active interpreter when + // _PyObject_GetCrossInterpreterData() was called. This should only + // be set by the cross-interpreter machinery. + // + // We use the ID rather than the PyInterpreterState to avoid issues + // with deleted interpreters. Note that IDs are never re-used, so + // each one will always correspond to a specific interpreter + // (whether still alive or not). + int64_t interpid; + // new_object is a function that returns a new object in the current + // interpreter given the data. The resulting object (a new + // reference) will be equivalent to the original object. This field + // is required. + xid_newobjectfunc new_object; + // free is called when the data is released. If it is NULL then + // nothing will be done to free the data. For some types this is + // okay (e.g. bytes) and for those types this field should be set + // to NULL. However, for most the data was allocated just for + // cross-interpreter use, so it must be freed when + // _PyCrossInterpreterData_Release is called or the memory will + // leak. In that case, at the very least this field should be set + // to PyMem_RawFree (the default if not explicitly set to NULL). + // The call will happen with the original interpreter activated. + xid_freefunc free; +}; + +PyAPI_FUNC(_PyCrossInterpreterData *) _PyCrossInterpreterData_New(void); +PyAPI_FUNC(void) _PyCrossInterpreterData_Free(_PyCrossInterpreterData *data); + + +/* defining cross-interpreter data */ + +PyAPI_FUNC(void) _PyCrossInterpreterData_Init( + _PyCrossInterpreterData *data, + PyInterpreterState *interp, void *shared, PyObject *obj, + xid_newobjectfunc new_object); +PyAPI_FUNC(int) _PyCrossInterpreterData_InitWithSize( + _PyCrossInterpreterData *, + PyInterpreterState *interp, const size_t, PyObject *, + xid_newobjectfunc); +PyAPI_FUNC(void) _PyCrossInterpreterData_Clear( + PyInterpreterState *, _PyCrossInterpreterData *); + + +/* using cross-interpreter data */ + +PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *); +PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *); +PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *); +PyAPI_FUNC(int) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *); +PyAPI_FUNC(int) _PyCrossInterpreterData_ReleaseAndRawFree(_PyCrossInterpreterData *); + + +/* cross-interpreter data registry */ + +// For now we use a global registry of shareable classes. An +// alternative would be to add a tp_* slot for a class's +// crossinterpdatafunc. It would be simpler and more efficient. + +typedef int (*crossinterpdatafunc)(PyThreadState *tstate, PyObject *, + _PyCrossInterpreterData *); + +struct _xidregitem; + +struct _xidregitem { + struct _xidregitem *prev; + struct _xidregitem *next; + /* This can be a dangling pointer, but only if weakref is set. */ + PyTypeObject *cls; + /* This is NULL for builtin types. */ + PyObject *weakref; + size_t refcount; + crossinterpdatafunc getdata; +}; + +struct _xidregistry { + int global; /* builtin types or heap types */ + int initialized; + PyThread_type_lock mutex; + struct _xidregitem *head; +}; + +PyAPI_FUNC(int) _PyCrossInterpreterData_RegisterClass(PyTypeObject *, crossinterpdatafunc); +PyAPI_FUNC(int) _PyCrossInterpreterData_UnregisterClass(PyTypeObject *); +PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *); + + +/*****************************/ +/* runtime state & lifecycle */ +/*****************************/ + +struct _xi_runtime_state { + // builtin types + // XXX Remove this field once we have a tp_* slot. + struct _xidregistry registry; +}; + +struct _xi_state { + // heap types + // XXX Remove this field once we have a tp_* slot. + struct _xidregistry registry; + + // heap types + PyObject *PyExc_NotShareableError; +}; + +extern PyStatus _PyXI_Init(PyInterpreterState *interp); +extern void _PyXI_Fini(PyInterpreterState *interp); + + +/***************************/ +/* short-term data sharing */ +/***************************/ + +// Ultimately we'd like to preserve enough information about the +// exception and traceback that we could re-constitute (or at least +// simulate, a la traceback.TracebackException), and even chain, a copy +// of the exception in the calling interpreter. + +typedef struct _excinfo { + struct _excinfo_type { + PyTypeObject *builtin; + const char *name; + const char *qualname; + const char *module; + } type; + const char *msg; +} _PyXI_excinfo; + + +typedef enum error_code { + _PyXI_ERR_NO_ERROR = 0, + _PyXI_ERR_UNCAUGHT_EXCEPTION = -1, + _PyXI_ERR_OTHER = -2, + _PyXI_ERR_NO_MEMORY = -3, + _PyXI_ERR_ALREADY_RUNNING = -4, + _PyXI_ERR_MAIN_NS_FAILURE = -5, + _PyXI_ERR_APPLY_NS_FAILURE = -6, + _PyXI_ERR_NOT_SHAREABLE = -7, +} _PyXI_errcode; + + +typedef struct _sharedexception { + // The originating interpreter. + PyInterpreterState *interp; + // The kind of error to propagate. + _PyXI_errcode code; + // The exception information to propagate, if applicable. + // This is populated only for some error codes, + // but always for _PyXI_ERR_UNCAUGHT_EXCEPTION. + _PyXI_excinfo uncaught; +} _PyXI_error; + +PyAPI_FUNC(PyObject *) _PyXI_ApplyError(_PyXI_error *err); + + +typedef struct xi_session _PyXI_session; +typedef struct _sharedns _PyXI_namespace; + +PyAPI_FUNC(void) _PyXI_FreeNamespace(_PyXI_namespace *ns); +PyAPI_FUNC(_PyXI_namespace *) _PyXI_NamespaceFromNames(PyObject *names); +PyAPI_FUNC(int) _PyXI_FillNamespaceFromDict( + _PyXI_namespace *ns, + PyObject *nsobj, + _PyXI_session *session); +PyAPI_FUNC(int) _PyXI_ApplyNamespace( + _PyXI_namespace *ns, + PyObject *nsobj, + PyObject *dflt); + + +// A cross-interpreter session involves entering an interpreter +// (_PyXI_Enter()), doing some work with it, and finally exiting +// that interpreter (_PyXI_Exit()). +// +// At the boundaries of the session, both entering and exiting, +// data may be exchanged between the previous interpreter and the +// target one in a thread-safe way that does not violate the +// isolation between interpreters. This includes setting objects +// in the target's __main__ module on the way in, and capturing +// uncaught exceptions on the way out. +struct xi_session { + // Once a session has been entered, this is the tstate that was + // current before the session. If it is different from cur_tstate + // then we must have switched interpreters. Either way, this will + // be the current tstate once we exit the session. + PyThreadState *prev_tstate; + // Once a session has been entered, this is the current tstate. + // It must be current when the session exits. + PyThreadState *init_tstate; + // This is true if init_tstate needs cleanup during exit. + int own_init_tstate; + + // This is true if, while entering the session, init_thread took + // "ownership" of the interpreter's __main__ module. This means + // it is the only thread that is allowed to run code there. + // (Caveat: for now, users may still run exec() against the + // __main__ module's dict, though that isn't advisable.) + int running; + // This is a cached reference to the __dict__ of the entered + // interpreter's __main__ module. It is looked up when at the + // beginning of the session as a convenience. + PyObject *main_ns; + + // This is set if the interpreter is entered and raised an exception + // that needs to be handled in some special way during exit. + _PyXI_errcode *error_override; + // This is set if exit captured an exception to propagate. + _PyXI_error *error; + + // -- pre-allocated memory -- + _PyXI_error _error; + _PyXI_errcode _error_override; +}; + +PyAPI_FUNC(int) _PyXI_Enter( + _PyXI_session *session, + PyInterpreterState *interp, + PyObject *nsupdates); +PyAPI_FUNC(void) _PyXI_Exit(_PyXI_session *session); + +PyAPI_FUNC(PyObject *) _PyXI_ApplyCapturedException(_PyXI_session *session); +PyAPI_FUNC(int) _PyXI_HasCapturedException(_PyXI_session *session); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CROSSINTERP_H */ diff --git a/Include/internal/pycore_descrobject.h b/Include/internal/pycore_descrobject.h index 76378569df90e3e..3cec59a68a3d2b2 100644 --- a/Include/internal/pycore_descrobject.h +++ b/Include/internal/pycore_descrobject.h @@ -20,6 +20,8 @@ typedef struct { typedef propertyobject _PyPropertyObject; +extern PyTypeObject _PyMethodWrapper_Type; + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_dict.h b/Include/internal/pycore_dict.h index ae4094a095d879a..d96870e9197bbf0 100644 --- a/Include/internal/pycore_dict.h +++ b/Include/internal/pycore_dict.h @@ -9,32 +9,70 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_identifier.h" // _Py_Identifier +#include "pycore_object.h" // PyDictOrValues -/* runtime lifecycle */ +// Unsafe flavor of PyDict_GetItemWithError(): no error checking +extern PyObject* _PyDict_GetItemWithError(PyObject *dp, PyObject *key); -extern void _PyDict_Fini(PyInterpreterState *interp); +extern int _PyDict_DelItemIf(PyObject *mp, PyObject *key, + int (*predicate)(PyObject *value)); +// "KnownHash" variants +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key, + PyObject *item, Py_hash_t hash); +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +extern int _PyDict_Contains_KnownHash(PyObject *, PyObject *, Py_hash_t); -/* other API */ +// "Id" variants +extern PyObject* _PyDict_GetItemIdWithError(PyObject *dp, + _Py_Identifier *key); +extern int _PyDict_ContainsId(PyObject *, _Py_Identifier *); +extern int _PyDict_SetItemId(PyObject *dp, _Py_Identifier *key, PyObject *item); +extern int _PyDict_DelItemId(PyObject *mp, _Py_Identifier *key); -#ifndef WITH_FREELISTS -// without freelists -# define PyDict_MAXFREELIST 0 -#endif +extern int _PyDict_Next( + PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash); -#ifndef PyDict_MAXFREELIST -# define PyDict_MAXFREELIST 80 -#endif +extern int _PyDict_HasOnlyStringKeys(PyObject *mp); -struct _Py_dict_state { -#if PyDict_MAXFREELIST > 0 - /* Dictionary reuse scheme to save calls to malloc and free */ - PyDictObject *free_list[PyDict_MAXFREELIST]; - int numfree; - PyDictKeysObject *keys_free_list[PyDict_MAXFREELIST]; - int keys_numfree; -#endif -}; +extern void _PyDict_MaybeUntrack(PyObject *mp); + +// Export for '_ctypes' shared extension +PyAPI_FUNC(Py_ssize_t) _PyDict_SizeOf(PyDictObject *); + +#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL) + +/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0, + the first occurrence of a key wins, if override is 1, the last occurrence + of a key wins, if override is 2, a KeyError with conflicting key as + argument is raised. +*/ +extern int _PyDict_MergeEx(PyObject *mp, PyObject *other, int override); + +extern void _PyDict_DebugMallocStats(FILE *out); + + +/* _PyDictView */ + +typedef struct { + PyObject_HEAD + PyDictObject *dv_dict; +} _PyDictViewObject; + +extern PyObject* _PyDictView_New(PyObject *, PyTypeObject *); +extern PyObject* _PyDictView_Intersect(PyObject* self, PyObject *other); + + +/* runtime lifecycle */ + +extern void _PyDict_Fini(PyInterpreterState *interp); + + +/* other API */ typedef struct { /* Cached hash code of me_key. */ @@ -53,9 +91,12 @@ extern PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *); /* Gets a version number unique to the current state of the keys of dict, if possible. * Returns the version number, or zero if it was not possible to get a version number. */ -extern uint32_t _PyDictKeys_GetVersionForCurrentState(PyDictKeysObject *dictkeys); +extern uint32_t _PyDictKeys_GetVersionForCurrentState( + PyInterpreterState *interp, PyDictKeysObject *dictkeys); -extern Py_ssize_t _PyDict_KeysSize(PyDictKeysObject *keys); +extern size_t _PyDict_KeysSize(PyDictKeysObject *keys); + +extern void _PyDictKeys_DecRef(PyDictKeysObject *keys); /* _Py_dict_lookup() returns index of entry which can be used like DK_ENTRIES(dk)[index]. * -1 when no entry found, -3 when compare raises error. @@ -70,7 +111,11 @@ extern PyObject *_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *); extern int _PyDict_SetItem_Take2(PyDictObject *op, PyObject *key, PyObject *value); extern int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject **dictptr, PyObject *name, PyObject *value); -extern PyObject *_PyDict_Pop_KnownHash(PyObject *, PyObject *, Py_hash_t, PyObject *); +extern int _PyDict_Pop_KnownHash( + PyDictObject *dict, + PyObject *key, + Py_hash_t hash, + PyObject **result); #define DKIX_EMPTY (-1) #define DKIX_DUMMY (-2) /* Used internally */ @@ -138,27 +183,34 @@ struct _dictvalues { PyObject *values[1]; }; -#define DK_LOG_SIZE(dk) ((dk)->dk_log2_size) +#define DK_LOG_SIZE(dk) _Py_RVALUE((dk)->dk_log2_size) #if SIZEOF_VOID_P > 4 #define DK_SIZE(dk) (((int64_t)1)<dk_kind == DICT_KEYS_GENERAL), \ - (PyDictKeyEntry*)(&((int8_t*)((dk)->dk_indices))[(size_t)1 << (dk)->dk_log2_index_bytes])) -#define DK_UNICODE_ENTRIES(dk) \ - (assert((dk)->dk_kind != DICT_KEYS_GENERAL), \ - (PyDictUnicodeEntry*)(&((int8_t*)((dk)->dk_indices))[(size_t)1 << (dk)->dk_log2_index_bytes])) -#define DK_IS_UNICODE(dk) ((dk)->dk_kind != DICT_KEYS_GENERAL) -extern uint64_t _pydict_global_version; +static inline void* _DK_ENTRIES(PyDictKeysObject *dk) { + int8_t *indices = (int8_t*)(dk->dk_indices); + size_t index = (size_t)1 << dk->dk_log2_index_bytes; + return (&indices[index]); +} +static inline PyDictKeyEntry* DK_ENTRIES(PyDictKeysObject *dk) { + assert(dk->dk_kind == DICT_KEYS_GENERAL); + return (PyDictKeyEntry*)_DK_ENTRIES(dk); +} +static inline PyDictUnicodeEntry* DK_UNICODE_ENTRIES(PyDictKeysObject *dk) { + assert(dk->dk_kind != DICT_KEYS_GENERAL); + return (PyDictUnicodeEntry*)_DK_ENTRIES(dk); +} + +#define DK_IS_UNICODE(dk) ((dk)->dk_kind != DICT_KEYS_GENERAL) -#define DICT_MAX_WATCHERS 8 #define DICT_VERSION_INCREMENT (1 << DICT_MAX_WATCHERS) #define DICT_VERSION_MASK (DICT_VERSION_INCREMENT - 1) -#define DICT_NEXT_VERSION() (_pydict_global_version += DICT_VERSION_INCREMENT) +#define DICT_NEXT_VERSION(INTERP) \ + ((INTERP)->dict_state.global_version += DICT_VERSION_INCREMENT) void _PyDict_SendEvent(int watcher_bits, @@ -168,20 +220,23 @@ _PyDict_SendEvent(int watcher_bits, PyObject *value); static inline uint64_t -_PyDict_NotifyEvent(PyDict_WatchEvent event, +_PyDict_NotifyEvent(PyInterpreterState *interp, + PyDict_WatchEvent event, PyDictObject *mp, PyObject *key, PyObject *value) { + assert(Py_REFCNT((PyObject*)mp) > 0); int watcher_bits = mp->ma_version_tag & DICT_VERSION_MASK; if (watcher_bits) { _PyDict_SendEvent(watcher_bits, event, mp, key, value); - return DICT_NEXT_VERSION() | watcher_bits; + return DICT_NEXT_VERSION(interp) | watcher_bits; } - return DICT_NEXT_VERSION(); + return DICT_NEXT_VERSION(interp); } extern PyObject *_PyObject_MakeDictFromInstanceAttributes(PyObject *obj, PyDictValues *values); +extern bool _PyObject_MakeInstanceAttributesFromDict(PyObject *obj, PyDictOrValues *dorv); extern PyObject *_PyDict_FromItems( PyObject *const *keys, Py_ssize_t keys_offset, PyObject *const *values, Py_ssize_t values_offset, diff --git a/Include/internal/pycore_dict_state.h b/Include/internal/pycore_dict_state.h new file mode 100644 index 000000000000000..ece0f10ca251707 --- /dev/null +++ b/Include/internal/pycore_dict_state.h @@ -0,0 +1,50 @@ +#ifndef Py_INTERNAL_DICT_STATE_H +#define Py_INTERNAL_DICT_STATE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +#ifndef WITH_FREELISTS +// without freelists +# define PyDict_MAXFREELIST 0 +#endif + +#ifndef PyDict_MAXFREELIST +# define PyDict_MAXFREELIST 80 +#endif + +#define DICT_MAX_WATCHERS 8 + +struct _Py_dict_state { + /*Global counter used to set ma_version_tag field of dictionary. + * It is incremented each time that a dictionary is created and each + * time that a dictionary is modified. */ + uint64_t global_version; + uint32_t next_keys_version; + +#if PyDict_MAXFREELIST > 0 + /* Dictionary reuse scheme to save calls to malloc and free */ + PyDictObject *free_list[PyDict_MAXFREELIST]; + PyDictKeysObject *keys_free_list[PyDict_MAXFREELIST]; + int numfree; + int keys_numfree; +#endif + + PyDict_WatchCallback watchers[DICT_MAX_WATCHERS]; +}; + +#define _dict_state_INIT \ + { \ + .next_keys_version = 2, \ + } + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_DICT_STATE_H */ diff --git a/Include/internal/pycore_dtoa.h b/Include/internal/pycore_dtoa.h index c77cf6e46cc3c36..ac62a4d300720af 100644 --- a/Include/internal/pycore_dtoa.h +++ b/Include/internal/pycore_dtoa.h @@ -1,3 +1,5 @@ +#ifndef Py_INTERNAL_DTOA_H +#define Py_INTERNAL_DTOA_H #ifdef __cplusplus extern "C" { #endif @@ -11,18 +13,61 @@ extern "C" { #if _PY_SHORT_FLOAT_REPR == 1 +typedef uint32_t ULong; + +struct +Bigint { + struct Bigint *next; + int k, maxwds, sign, wds; + ULong x[1]; +}; + +#ifdef Py_USING_MEMORY_DEBUGGER + +struct _dtoa_state { + int _not_used; +}; +#define _dtoa_interp_state_INIT(INTERP) \ + {0} + +#else // !Py_USING_MEMORY_DEBUGGER + +/* The size of the Bigint freelist */ +#define Bigint_Kmax 7 + +#ifndef PRIVATE_MEM +#define PRIVATE_MEM 2304 +#endif +#define Bigint_PREALLOC_SIZE \ + ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)) + +struct _dtoa_state { + /* p5s is a linked list of powers of 5 of the form 5**(2**i), i >= 2 */ + // XXX This should be freed during runtime fini. + struct Bigint *p5s; + struct Bigint *freelist[Bigint_Kmax+1]; + double preallocated[Bigint_PREALLOC_SIZE]; + double *preallocated_next; +}; +#define _dtoa_state_INIT(INTERP) \ + { \ + .preallocated_next = (INTERP)->dtoa.preallocated, \ + } + +#endif // !Py_USING_MEMORY_DEBUGGER + + /* These functions are used by modules compiled as C extension like math: they must be exported. */ -PyAPI_FUNC(double) _Py_dg_strtod(const char *str, char **ptr); -PyAPI_FUNC(char *) _Py_dg_dtoa(double d, int mode, int ndigits, - int *decpt, int *sign, char **rve); -PyAPI_FUNC(void) _Py_dg_freedtoa(char *s); -PyAPI_FUNC(double) _Py_dg_stdnan(int sign); -PyAPI_FUNC(double) _Py_dg_infinity(int sign); +extern double _Py_dg_strtod(const char *str, char **ptr); +extern char* _Py_dg_dtoa(double d, int mode, int ndigits, + int *decpt, int *sign, char **rve); +extern void _Py_dg_freedtoa(char *s); #endif // _PY_SHORT_FLOAT_REPR == 1 #ifdef __cplusplus } #endif +#endif /* !Py_INTERNAL_DTOA_H */ diff --git a/Include/internal/pycore_emscripten_signal.h b/Include/internal/pycore_emscripten_signal.h index 8b3287d85da4b2e..754193e21dec5a5 100644 --- a/Include/internal/pycore_emscripten_signal.h +++ b/Include/internal/pycore_emscripten_signal.h @@ -3,6 +3,10 @@ #if defined(__EMSCRIPTEN__) +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + void _Py_CheckEmscriptenSignals(void); @@ -14,6 +18,7 @@ _Py_CheckEmscriptenSignalsPeriodically(void); #define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY() _Py_CheckEmscriptenSignalsPeriodically() extern int Py_EMSCRIPTEN_SIGNAL_HANDLING; +extern int _Py_emscripten_signal_clock; #else diff --git a/Include/internal/pycore_emscripten_trampoline.h b/Include/internal/pycore_emscripten_trampoline.h new file mode 100644 index 000000000000000..e519c99ad86ccef --- /dev/null +++ b/Include/internal/pycore_emscripten_trampoline.h @@ -0,0 +1,81 @@ +#ifndef Py_EMSCRIPTEN_TRAMPOLINE_H +#define Py_EMSCRIPTEN_TRAMPOLINE_H + +#include "pycore_runtime.h" // _PyRuntimeState + +/** + * C function call trampolines to mitigate bad function pointer casts. + * + * Section 6.3.2.3, paragraph 8 reads: + * + * A pointer to a function of one type may be converted to a pointer to a + * function of another type and back again; the result shall compare equal to + * the original pointer. If a converted pointer is used to call a function + * whose type is not compatible with the pointed-to type, the behavior is + * undefined. + * + * Typical native ABIs ignore additional arguments or fill in missing values + * with 0/NULL in function pointer cast. Compilers do not show warnings when a + * function pointer is explicitly casted to an incompatible type. + * + * Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict + * function signature checks. Argument count, types, and return type must match. + * + * Third party code unintentionally rely on problematic fpcasts. The call + * trampoline mitigates common occurrences of bad fpcasts on Emscripten. + */ + +#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) + +void _Py_EmscriptenTrampoline_Init(_PyRuntimeState *runtime); + +PyObject* +_PyEM_TrampolineCall_JavaScript(PyCFunctionWithKeywords func, + PyObject* self, + PyObject* args, + PyObject* kw); + +PyObject* +_PyEM_TrampolineCall_Reflection(PyCFunctionWithKeywords func, + PyObject* self, + PyObject* args, + PyObject* kw); + +#define _PyEM_TrampolineCall(meth, self, args, kw) \ + ((_PyRuntime.wasm_type_reflection_available) ? \ + (_PyEM_TrampolineCall_Reflection((PyCFunctionWithKeywords)(meth), (self), (args), (kw))) : \ + (_PyEM_TrampolineCall_JavaScript((PyCFunctionWithKeywords)(meth), (self), (args), (kw)))) + +#define _PyCFunction_TrampolineCall(meth, self, args) \ + _PyEM_TrampolineCall( \ + (*(PyCFunctionWithKeywords)(void(*)(void))(meth)), (self), (args), NULL) + +#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \ + _PyEM_TrampolineCall((meth), (self), (args), (kw)) + +#define descr_set_trampoline_call(set, obj, value, closure) \ + ((int)_PyEM_TrampolineCall((PyCFunctionWithKeywords)(set), (obj), (value), (PyObject*)(closure))) + +#define descr_get_trampoline_call(get, obj, closure) \ + _PyEM_TrampolineCall((PyCFunctionWithKeywords)(get), (obj), (PyObject*)(closure), NULL) + + +#else // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) + +#define _Py_EmscriptenTrampoline_Init(runtime) + +#define _PyCFunction_TrampolineCall(meth, self, args) \ + (meth)((self), (args)) + +#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \ + (meth)((self), (args), (kw)) + +#define descr_set_trampoline_call(set, obj, value, closure) \ + (set)((obj), (value), (closure)) + +#define descr_get_trampoline_call(get, obj, closure) \ + (get)((obj), (closure)) + +#endif // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) + +#endif // ndef Py_EMSCRIPTEN_SIGNAL_H diff --git a/Include/internal/pycore_faulthandler.h b/Include/internal/pycore_faulthandler.h new file mode 100644 index 000000000000000..6dd7d8d7ca9792e --- /dev/null +++ b/Include/internal/pycore_faulthandler.h @@ -0,0 +1,99 @@ +#ifndef Py_INTERNAL_FAULTHANDLER_H +#define Py_INTERNAL_FAULTHANDLER_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#ifdef HAVE_SIGACTION +# include // sigaction +#endif + + +#ifndef MS_WINDOWS + /* register() is useless on Windows, because only SIGSEGV, SIGABRT and + SIGILL can be handled by the process, and these signals can only be used + with enable(), not using register() */ +# define FAULTHANDLER_USER +#endif + + +#ifdef HAVE_SIGACTION +/* Using an alternative stack requires sigaltstack() + and sigaction() SA_ONSTACK */ +# ifdef HAVE_SIGALTSTACK +# define FAULTHANDLER_USE_ALT_STACK +# endif +typedef struct sigaction _Py_sighandler_t; +#else +typedef PyOS_sighandler_t _Py_sighandler_t; +#endif // HAVE_SIGACTION + + +#ifdef FAULTHANDLER_USER +struct faulthandler_user_signal { + int enabled; + PyObject *file; + int fd; + int all_threads; + int chain; + _Py_sighandler_t previous; + PyInterpreterState *interp; +}; +#endif /* FAULTHANDLER_USER */ + + +struct _faulthandler_runtime_state { + struct { + int enabled; + PyObject *file; + int fd; + int all_threads; + PyInterpreterState *interp; +#ifdef MS_WINDOWS + void *exc_handler; +#endif + } fatal_error; + + struct { + PyObject *file; + int fd; + PY_TIMEOUT_T timeout_us; /* timeout in microseconds */ + int repeat; + PyInterpreterState *interp; + int exit; + char *header; + size_t header_len; + /* The main thread always holds this lock. It is only released when + faulthandler_thread() is interrupted before this thread exits, or at + Python exit. */ + PyThread_type_lock cancel_event; + /* released by child thread when joined */ + PyThread_type_lock running; + } thread; + +#ifdef FAULTHANDLER_USER + struct faulthandler_user_signal *user_signals; +#endif + +#ifdef FAULTHANDLER_USE_ALT_STACK + stack_t stack; + stack_t old_stack; +#endif +}; + +#define _faulthandler_runtime_state_INIT \ + { \ + .fatal_error = { \ + .fd = -1, \ + }, \ + } + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_FAULTHANDLER_H */ diff --git a/Include/internal/pycore_fileutils.h b/Include/internal/pycore_fileutils.h index 3ce8108e4e04f15..2f89da2c6ecd916 100644 --- a/Include/internal/pycore_fileutils.h +++ b/Include/internal/pycore_fileutils.h @@ -5,10 +5,23 @@ extern "C" { #endif #ifndef Py_BUILD_CORE -# error "Py_BUILD_CORE must be defined to include this header" +# error "this header requires Py_BUILD_CORE define" #endif -#include /* struct lconv */ +#include // struct lconv + + +/* A routine to check if a file descriptor can be select()-ed. */ +#ifdef _MSC_VER + /* On Windows, any socket fd can be select()-ed, no matter how high */ + #define _PyIsSelectable_fd(FD) (1) +#else + #define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE) +#endif + +struct _fileutils_state { + int force_ascii; +}; typedef enum { _Py_ERROR_UNKNOWN=0, @@ -22,8 +35,10 @@ typedef enum { _Py_ERROR_OTHER } _Py_error_handler; +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors); +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(int) _Py_DecodeLocaleEx( const char *arg, wchar_t **wstr, @@ -32,6 +47,7 @@ PyAPI_FUNC(int) _Py_DecodeLocaleEx( int current_locale, _Py_error_handler errors); +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(int) _Py_EncodeLocaleEx( const wchar_t *text, char **str, @@ -40,11 +56,11 @@ PyAPI_FUNC(int) _Py_EncodeLocaleEx( int current_locale, _Py_error_handler errors); -PyAPI_FUNC(char*) _Py_EncodeLocaleRaw( +extern char* _Py_EncodeLocaleRaw( const wchar_t *text, size_t *error_pos); -PyAPI_FUNC(PyObject *) _Py_device_encoding(int); +extern PyObject* _Py_device_encoding(int); #if defined(MS_WINDOWS) || defined(__APPLE__) /* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611). @@ -61,7 +77,7 @@ PyAPI_FUNC(PyObject *) _Py_device_encoding(int); #ifdef MS_WINDOWS struct _Py_stat_struct { - unsigned long st_dev; + uint64_t st_dev; uint64_t st_ino; unsigned short st_mode; int st_nlink; @@ -75,54 +91,64 @@ struct _Py_stat_struct { int st_mtime_nsec; time_t st_ctime; int st_ctime_nsec; + time_t st_birthtime; + int st_birthtime_nsec; unsigned long st_file_attributes; unsigned long st_reparse_tag; + uint64_t st_ino_high; }; #else # define _Py_stat_struct stat #endif +// Export for 'mmap' shared extension PyAPI_FUNC(int) _Py_fstat( int fd, struct _Py_stat_struct *status); +// Export for 'mmap' shared extension PyAPI_FUNC(int) _Py_fstat_noraise( int fd, struct _Py_stat_struct *status); +// Export for '_tkinter' shared extension PyAPI_FUNC(int) _Py_stat( PyObject *path, struct stat *status); +// Export for 'select' shared extension (Solaris newDevPollObject()) PyAPI_FUNC(int) _Py_open( const char *pathname, int flags); +// Export for '_posixsubprocess' shared extension PyAPI_FUNC(int) _Py_open_noraise( const char *pathname, int flags); -PyAPI_FUNC(FILE *) _Py_wfopen( +extern FILE* _Py_wfopen( const wchar_t *path, const wchar_t *mode); -PyAPI_FUNC(Py_ssize_t) _Py_read( +extern Py_ssize_t _Py_read( int fd, void *buf, size_t count); +// Export for 'select' shared extension (Solaris devpoll_flush()) PyAPI_FUNC(Py_ssize_t) _Py_write( int fd, const void *buf, size_t count); +// Export for '_posixsubprocess' shared extension PyAPI_FUNC(Py_ssize_t) _Py_write_noraise( int fd, const void *buf, size_t count); #ifdef HAVE_READLINK -PyAPI_FUNC(int) _Py_wreadlink( +extern int _Py_wreadlink( const wchar_t *path, wchar_t *buf, /* Number of characters of 'buf' buffer @@ -131,7 +157,7 @@ PyAPI_FUNC(int) _Py_wreadlink( #endif #ifdef HAVE_REALPATH -PyAPI_FUNC(wchar_t*) _Py_wrealpath( +extern wchar_t* _Py_wrealpath( const wchar_t *path, wchar_t *resolved_path, /* Number of characters of 'resolved_path' buffer @@ -139,34 +165,38 @@ PyAPI_FUNC(wchar_t*) _Py_wrealpath( size_t resolved_path_len); #endif -PyAPI_FUNC(wchar_t*) _Py_wgetcwd( +extern wchar_t* _Py_wgetcwd( wchar_t *buf, /* Number of characters of 'buf' buffer including the trailing NUL character */ size_t buflen); -PyAPI_FUNC(int) _Py_get_inheritable(int fd); +extern int _Py_get_inheritable(int fd); +// Export for '_socket' shared extension PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable, int *atomic_flag_works); +// Export for '_posixsubprocess' shared extension PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable, int *atomic_flag_works); +// Export for '_socket' shared extension PyAPI_FUNC(int) _Py_dup(int fd); -#ifndef MS_WINDOWS -PyAPI_FUNC(int) _Py_get_blocking(int fd); +extern int _Py_get_blocking(int fd); + +extern int _Py_set_blocking(int fd, int blocking); -PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking); -#else /* MS_WINDOWS */ -PyAPI_FUNC(void*) _Py_get_osfhandle_noraise(int fd); +#ifdef MS_WINDOWS +extern void* _Py_get_osfhandle_noraise(int fd); +// Export for '_testconsole' shared extension PyAPI_FUNC(void*) _Py_get_osfhandle(int fd); -PyAPI_FUNC(int) _Py_open_osfhandle_noraise(void *handle, int flags); +extern int _Py_open_osfhandle_noraise(void *handle, int flags); -PyAPI_FUNC(int) _Py_open_osfhandle(void *handle, int flags); +extern int _Py_open_osfhandle(void *handle, int flags); #endif /* MS_WINDOWS */ // This is used after getting NULL back from Py_DecodeLocale(). @@ -175,9 +205,9 @@ PyAPI_FUNC(int) _Py_open_osfhandle(void *handle, int flags); ? _PyStatus_ERR("cannot decode " NAME) \ : _PyStatus_NO_MEMORY() -PyAPI_DATA(int) _Py_HasFileSystemDefaultEncodeErrors; +extern int _Py_HasFileSystemDefaultEncodeErrors; -PyAPI_FUNC(int) _Py_DecodeUTF8Ex( +extern int _Py_DecodeUTF8Ex( const char *arg, Py_ssize_t arglen, wchar_t **wstr, @@ -185,7 +215,7 @@ PyAPI_FUNC(int) _Py_DecodeUTF8Ex( const char **reason, _Py_error_handler errors); -PyAPI_FUNC(int) _Py_EncodeUTF8Ex( +extern int _Py_EncodeUTF8Ex( const wchar_t *text, char **str, size_t *error_pos, @@ -193,7 +223,7 @@ PyAPI_FUNC(int) _Py_EncodeUTF8Ex( int raw_malloc, _Py_error_handler errors); -PyAPI_FUNC(wchar_t*) _Py_DecodeUTF8_surrogateescape( +extern wchar_t* _Py_DecodeUTF8_surrogateescape( const char *arg, Py_ssize_t arglen, size_t *wlen); @@ -201,25 +231,26 @@ PyAPI_FUNC(wchar_t*) _Py_DecodeUTF8_surrogateescape( extern int _Py_wstat(const wchar_t *, struct stat *); -PyAPI_FUNC(int) _Py_GetForceASCII(void); +extern int _Py_GetForceASCII(void); /* Reset "force ASCII" mode (if it was initialized). This function should be called when Python changes the LC_CTYPE locale, so the "force ASCII" mode can be detected again on the new locale encoding. */ -PyAPI_FUNC(void) _Py_ResetForceASCII(void); +extern void _Py_ResetForceASCII(void); -PyAPI_FUNC(int) _Py_GetLocaleconvNumeric( +extern int _Py_GetLocaleconvNumeric( struct lconv *lc, PyObject **decimal_point, PyObject **thousands_sep); +// Export for '_posixsubprocess' (on macOS) PyAPI_FUNC(void) _Py_closerange(int first, int last); -PyAPI_FUNC(wchar_t*) _Py_GetLocaleEncoding(void); -PyAPI_FUNC(PyObject*) _Py_GetLocaleEncodingObject(void); +extern wchar_t* _Py_GetLocaleEncoding(void); +extern PyObject* _Py_GetLocaleEncodingObject(void); #ifdef HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION extern int _Py_LocaleUsesNonUnicodeWchar(void); @@ -238,14 +269,26 @@ extern int _Py_abspath(const wchar_t *path, wchar_t **abspath_p); #ifdef MS_WINDOWS extern int _PyOS_getfullpathname(const wchar_t *path, wchar_t **abspath_p); #endif -extern wchar_t * _Py_join_relfile(const wchar_t *dirname, - const wchar_t *relfile); +extern wchar_t* _Py_join_relfile(const wchar_t *dirname, + const wchar_t *relfile); extern int _Py_add_relfile(wchar_t *dirname, const wchar_t *relfile, size_t bufsize); extern size_t _Py_find_basename(const wchar_t *filename); -PyAPI_FUNC(wchar_t *) _Py_normpath(wchar_t *path, Py_ssize_t size); +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(wchar_t*) _Py_normpath(wchar_t *path, Py_ssize_t size); + +extern wchar_t *_Py_normpath_and_size(wchar_t *path, Py_ssize_t size, Py_ssize_t *length); + +// The Windows Games API family does not provide these functions +// so provide our own implementations. Remove them in case they get added +// to the Games API family +#if defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP) +#include // HRESULT + +extern HRESULT PathCchSkipRoot(const wchar_t *pszPath, const wchar_t **ppszRootEnd); +#endif /* defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP) */ // Macros to protect CRT calls against instant termination when passed an // invalid parameter (bpo-23524). IPH stands for Invalid Parameter Handler. @@ -269,6 +312,14 @@ PyAPI_FUNC(wchar_t *) _Py_normpath(wchar_t *path, Py_ssize_t size); # define _Py_END_SUPPRESS_IPH #endif /* _MSC_VER >= 1900 */ +// Export for 'select' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *); + +// Export for test_peg_generator +PyAPI_FUNC(char*) _Py_UniversalNewlineFgetsWithSize(char *, int, FILE*, PyObject *, size_t*); + +extern int _PyFile_Flush(PyObject *); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_fileutils_windows.h b/Include/internal/pycore_fileutils_windows.h new file mode 100644 index 000000000000000..b79aa9fb4653765 --- /dev/null +++ b/Include/internal/pycore_fileutils_windows.h @@ -0,0 +1,98 @@ +#ifndef Py_INTERNAL_FILEUTILS_WINDOWS_H +#define Py_INTERNAL_FILEUTILS_WINDOWS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#ifdef MS_WINDOWS + +#if !defined(NTDDI_WIN10_NI) || !(NTDDI_VERSION >= NTDDI_WIN10_NI) +typedef struct _FILE_STAT_BASIC_INFORMATION { + LARGE_INTEGER FileId; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG FileAttributes; + ULONG ReparseTag; + ULONG NumberOfLinks; + ULONG DeviceType; + ULONG DeviceCharacteristics; + ULONG Reserved; + LARGE_INTEGER VolumeSerialNumber; + FILE_ID_128 FileId128; +} FILE_STAT_BASIC_INFORMATION; + +typedef enum _FILE_INFO_BY_NAME_CLASS { + FileStatByNameInfo, + FileStatLxByNameInfo, + FileCaseSensitiveByNameInfo, + FileStatBasicByNameInfo, + MaximumFileInfoByNameClass +} FILE_INFO_BY_NAME_CLASS; +#endif + +typedef BOOL (WINAPI *PGetFileInformationByName)( + PCWSTR FileName, + FILE_INFO_BY_NAME_CLASS FileInformationClass, + PVOID FileInfoBuffer, + ULONG FileInfoBufferSize +); + +static inline BOOL _Py_GetFileInformationByName( + PCWSTR FileName, + FILE_INFO_BY_NAME_CLASS FileInformationClass, + PVOID FileInfoBuffer, + ULONG FileInfoBufferSize +) { + static PGetFileInformationByName GetFileInformationByName = NULL; + static int GetFileInformationByName_init = -1; + + if (GetFileInformationByName_init < 0) { + HMODULE hMod = LoadLibraryW(L"api-ms-win-core-file-l2-1-4"); + GetFileInformationByName_init = 0; + if (hMod) { + GetFileInformationByName = (PGetFileInformationByName)GetProcAddress( + hMod, "GetFileInformationByName"); + if (GetFileInformationByName) { + GetFileInformationByName_init = 1; + } else { + FreeLibrary(hMod); + } + } + } + + if (GetFileInformationByName_init <= 0) { + SetLastError(ERROR_NOT_SUPPORTED); + return FALSE; + } + return GetFileInformationByName(FileName, FileInformationClass, FileInfoBuffer, FileInfoBufferSize); +} + +static inline BOOL _Py_GetFileInformationByName_ErrorIsTrustworthy(int error) +{ + switch(error) { + case ERROR_FILE_NOT_FOUND: + case ERROR_PATH_NOT_FOUND: + case ERROR_NOT_READY: + case ERROR_BAD_NET_NAME: + case ERROR_BAD_NETPATH: + case ERROR_BAD_PATHNAME: + case ERROR_INVALID_NAME: + case ERROR_FILENAME_EXCED_RANGE: + return TRUE; + case ERROR_NOT_SUPPORTED: + return FALSE; + } + return FALSE; +} + +#endif + +#endif diff --git a/Include/internal/pycore_floatobject.h b/Include/internal/pycore_floatobject.h index 8a655543329f335..4e5474841bc25da 100644 --- a/Include/internal/pycore_floatobject.h +++ b/Include/internal/pycore_floatobject.h @@ -9,6 +9,8 @@ extern "C" { #endif +#include "pycore_unicodeobject.h" // _PyUnicodeWriter + /* runtime lifecycle */ extern void _PyFloat_InitState(PyInterpreterState *); @@ -19,6 +21,18 @@ extern void _PyFloat_FiniType(PyInterpreterState *); /* other API */ +enum _py_float_format_type { + _py_float_format_unknown, + _py_float_format_ieee_big_endian, + _py_float_format_ieee_little_endian, +}; + +struct _Py_float_runtime_state { + enum _py_float_format_type float_format; + enum _py_float_format_type double_format; +}; + + #ifndef WITH_FREELISTS // without freelists # define PyFloat_MAXFREELIST 0 @@ -41,18 +55,25 @@ struct _Py_float_state { void _PyFloat_ExactDealloc(PyObject *op); -PyAPI_FUNC(void) _PyFloat_DebugMallocStats(FILE* out); +extern void _PyFloat_DebugMallocStats(FILE* out); /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ -PyAPI_FUNC(int) _PyFloat_FormatAdvancedWriter( +extern int _PyFloat_FormatAdvancedWriter( _PyUnicodeWriter *writer, PyObject *obj, PyObject *format_spec, Py_ssize_t start, Py_ssize_t end); +extern PyObject* _Py_string_to_number_with_underscores( + const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg, + PyObject *(*innerfunc)(const char *, Py_ssize_t, void *)); + +extern double _Py_parse_inf_or_nan(const char *p, char **endptr); + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_flowgraph.h b/Include/internal/pycore_flowgraph.h new file mode 100644 index 000000000000000..58fed46886ea454 --- /dev/null +++ b/Include/internal/pycore_flowgraph.h @@ -0,0 +1,43 @@ +#ifndef Py_INTERNAL_CFG_H +#define Py_INTERNAL_CFG_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_opcode_utils.h" +#include "pycore_compile.h" + +typedef struct { + int id; +} _PyCfgJumpTargetLabel; + +struct _PyCfgBuilder; + +int _PyCfgBuilder_UseLabel(struct _PyCfgBuilder *g, _PyCfgJumpTargetLabel lbl); +int _PyCfgBuilder_Addop(struct _PyCfgBuilder *g, int opcode, int oparg, _PyCompilerSrcLocation loc); + +struct _PyCfgBuilder* _PyCfgBuilder_New(void); +void _PyCfgBuilder_Free(struct _PyCfgBuilder *g); +int _PyCfgBuilder_CheckSize(struct _PyCfgBuilder* g); + +int _PyCfg_OptimizeCodeUnit(struct _PyCfgBuilder *g, PyObject *consts, PyObject *const_cache, + int nlocals, int nparams, int firstlineno); + +int _PyCfg_ToInstructionSequence(struct _PyCfgBuilder *g, _PyCompile_InstructionSequence *seq); +int _PyCfg_OptimizedCfgToInstructionSequence(struct _PyCfgBuilder *g, _PyCompile_CodeUnitMetadata *umd, + int code_flags, int *stackdepth, int *nlocalsplus, + _PyCompile_InstructionSequence *seq); + +PyCodeObject * +_PyAssemble_MakeCodeObject(_PyCompile_CodeUnitMetadata *u, PyObject *const_cache, + PyObject *consts, int maxdepth, _PyCompile_InstructionSequence *instrs, + int nlocalsplus, int code_flags, PyObject *filename); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CFG_H */ diff --git a/Include/internal/pycore_format.h b/Include/internal/pycore_format.h index 1899609e77ef201..1b8d57539ca505f 100644 --- a/Include/internal/pycore_format.h +++ b/Include/internal/pycore_format.h @@ -14,14 +14,12 @@ extern "C" { * F_BLANK ' ' * F_ALT '#' * F_ZERO '0' - * F_NO_NEG_0 'z' */ #define F_LJUST (1<<0) #define F_SIGN (1<<1) #define F_BLANK (1<<2) #define F_ALT (1<<3) #define F_ZERO (1<<4) -#define F_NO_NEG_0 (1<<5) #ifdef __cplusplus } diff --git a/Include/internal/pycore_frame.h b/Include/internal/pycore_frame.h index 5bd0a7f2f517eff..0f9e7333cf1e1c0 100644 --- a/Include/internal/pycore_frame.h +++ b/Include/internal/pycore_frame.h @@ -4,9 +4,13 @@ extern "C" { #endif +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + #include -#include -#include "pycore_code.h" // STATS +#include // offsetof() +#include "pycore_code.h" // STATS /* See Objects/frame_layout.md for an explanation of the frame stack * including explanation of the PyFrameObject and _PyInterpreterFrame @@ -32,56 +36,60 @@ extern PyFrameObject* _PyFrame_New_NoTrack(PyCodeObject *code); /* other API */ typedef enum _framestate { - FRAME_CREATED = -2, - FRAME_SUSPENDED = -1, + FRAME_CREATED = -3, + FRAME_SUSPENDED = -2, + FRAME_SUSPENDED_YIELD_FROM = -1, FRAME_EXECUTING = 0, FRAME_COMPLETED = 1, FRAME_CLEARED = 4 } PyFrameState; +#define FRAME_STATE_SUSPENDED(S) ((S) == FRAME_SUSPENDED || (S) == FRAME_SUSPENDED_YIELD_FROM) +#define FRAME_STATE_FINISHED(S) ((S) >= FRAME_COMPLETED) + enum _frameowner { FRAME_OWNED_BY_THREAD = 0, FRAME_OWNED_BY_GENERATOR = 1, - FRAME_OWNED_BY_FRAME_OBJECT = 2 + FRAME_OWNED_BY_FRAME_OBJECT = 2, + FRAME_OWNED_BY_CSTACK = 3, }; typedef struct _PyInterpreterFrame { - /* "Specials" section */ - PyObject *f_funcobj; /* Strong reference */ - PyObject *f_globals; /* Borrowed reference */ - PyObject *f_builtins; /* Borrowed reference */ - PyObject *f_locals; /* Strong reference, may be NULL */ - PyCodeObject *f_code; /* Strong reference */ - PyFrameObject *frame_obj; /* Strong reference, may be NULL */ - /* Linkage section */ + PyObject *f_executable; /* Strong reference */ struct _PyInterpreterFrame *previous; - // NOTE: This is not necessarily the last instruction started in the given - // frame. Rather, it is the code unit *prior to* the *next* instruction. For - // example, it may be an inline CACHE entry, an instruction we just jumped - // over, or (in the case of a newly-created frame) a totally invalid value: - _Py_CODEUNIT *prev_instr; - int stacktop; /* Offset of TOS from localsplus */ - bool is_entry; // Whether this is the "root" frame for the current _PyCFrame. + PyObject *f_funcobj; /* Strong reference. Only valid if not on C stack */ + PyObject *f_globals; /* Borrowed reference. Only valid if not on C stack */ + PyObject *f_builtins; /* Borrowed reference. Only valid if not on C stack */ + PyObject *f_locals; /* Strong reference, may be NULL. Only valid if not on C stack */ + PyFrameObject *frame_obj; /* Strong reference, may be NULL. Only valid if not on C stack */ + _Py_CODEUNIT *instr_ptr; /* Instruction currently executing (or about to begin) */ + int stacktop; /* Offset of TOS from localsplus */ + uint16_t return_offset; /* Only relevant during a function call */ char owner; /* Locals and stack */ PyObject *localsplus[1]; } _PyInterpreterFrame; #define _PyInterpreterFrame_LASTI(IF) \ - ((int)((IF)->prev_instr - _PyCode_CODE((IF)->f_code))) + ((int)((IF)->instr_ptr - _PyCode_CODE(_PyFrame_GetCode(IF)))) + +static inline PyCodeObject *_PyFrame_GetCode(_PyInterpreterFrame *f) { + assert(PyCode_Check(f->f_executable)); + return (PyCodeObject *)f->f_executable; +} static inline PyObject **_PyFrame_Stackbase(_PyInterpreterFrame *f) { - return f->localsplus + f->f_code->co_nlocalsplus; + return f->localsplus + _PyFrame_GetCode(f)->co_nlocalsplus; } static inline PyObject *_PyFrame_StackPeek(_PyInterpreterFrame *f) { - assert(f->stacktop > f->f_code->co_nlocalsplus); + assert(f->stacktop > _PyFrame_GetCode(f)->co_nlocalsplus); assert(f->localsplus[f->stacktop-1] != NULL); return f->localsplus[f->stacktop-1]; } static inline PyObject *_PyFrame_StackPop(_PyInterpreterFrame *f) { - assert(f->stacktop > f->f_code->co_nlocalsplus); + assert(f->stacktop > _PyFrame_GetCode(f)->co_nlocalsplus); f->stacktop--; return f->localsplus[f->stacktop]; } @@ -91,26 +99,42 @@ static inline void _PyFrame_StackPush(_PyInterpreterFrame *f, PyObject *value) { f->stacktop++; } -#define FRAME_SPECIALS_SIZE ((sizeof(_PyInterpreterFrame)-1)/sizeof(PyObject *)) +#define FRAME_SPECIALS_SIZE ((int)((sizeof(_PyInterpreterFrame)-1)/sizeof(PyObject *))) + +static inline int +_PyFrame_NumSlotsForCodeObject(PyCodeObject *code) +{ + /* This function needs to remain in sync with the calculation of + * co_framesize in Tools/build/deepfreeze.py */ + assert(code->co_framesize >= FRAME_SPECIALS_SIZE); + return code->co_framesize - FRAME_SPECIALS_SIZE; +} void _PyFrame_Copy(_PyInterpreterFrame *src, _PyInterpreterFrame *dest); -/* Consumes reference to func and locals */ +/* Consumes reference to func and locals. + Does not initialize frame->previous, which happens + when frame is linked into the frame stack. + */ static inline void -_PyFrame_InitializeSpecials( +_PyFrame_Initialize( _PyInterpreterFrame *frame, PyFunctionObject *func, - PyObject *locals, PyCodeObject *code) + PyObject *locals, PyCodeObject *code, int null_locals_from) { frame->f_funcobj = (PyObject *)func; - frame->f_code = (PyCodeObject *)Py_NewRef(code); + frame->f_executable = Py_NewRef(code); frame->f_builtins = func->func_builtins; frame->f_globals = func->func_globals; frame->f_locals = locals; frame->stacktop = code->co_nlocalsplus; frame->frame_obj = NULL; - frame->prev_instr = _PyCode_CODE(code) - 1; - frame->is_entry = false; + frame->instr_ptr = _PyCode_CODE(code); + frame->return_offset = 0; frame->owner = FRAME_OWNED_BY_THREAD; + + for (int i = null_locals_from; i < code->co_nlocalsplus; i++) { + frame->localsplus[i] = NULL; + } } /* Gets the pointer to the locals array @@ -122,10 +146,16 @@ _PyFrame_GetLocalsArray(_PyInterpreterFrame *frame) return frame->localsplus; } +/* Fetches the stack pointer, and sets stacktop to -1. + Having stacktop <= 0 ensures that invalid + values are not visible to the cycle GC. + We choose -1 rather than 0 to assist debugging. */ static inline PyObject** _PyFrame_GetStackPointer(_PyInterpreterFrame *frame) { - return frame->localsplus+frame->stacktop; + PyObject **sp = frame->localsplus + frame->stacktop; + frame->stacktop = -1; + return sp; } static inline void @@ -145,8 +175,26 @@ _PyFrame_SetStackPointer(_PyInterpreterFrame *frame, PyObject **stack_pointer) static inline bool _PyFrame_IsIncomplete(_PyInterpreterFrame *frame) { + if (frame->owner == FRAME_OWNED_BY_CSTACK) { + return true; + } return frame->owner != FRAME_OWNED_BY_GENERATOR && - frame->prev_instr < _PyCode_CODE(frame->f_code) + frame->f_code->_co_firsttraceable; + frame->instr_ptr < _PyCode_CODE(_PyFrame_GetCode(frame)) + _PyFrame_GetCode(frame)->_co_firsttraceable; +} + +static inline _PyInterpreterFrame * +_PyFrame_GetFirstComplete(_PyInterpreterFrame *frame) +{ + while (frame && _PyFrame_IsIncomplete(frame)) { + frame = frame->previous; + } + return frame; +} + +static inline _PyInterpreterFrame * +_PyThreadState_GetFrame(PyThreadState *tstate) +{ + return _PyFrame_GetFirstComplete(tstate->current_frame); } /* For use by _PyFrame_GetFrameObject @@ -179,11 +227,14 @@ _PyFrame_GetFrameObject(_PyInterpreterFrame *frame) * frames like the ones in generators and coroutines. */ void -_PyFrame_Clear(_PyInterpreterFrame * frame); +_PyFrame_ClearExceptCode(_PyInterpreterFrame * frame); int _PyFrame_Traverse(_PyInterpreterFrame *frame, visitproc visit, void *arg); +PyObject * +_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden); + int _PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame); @@ -211,18 +262,40 @@ void _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame *frame); * Must be guarded by _PyThreadState_HasStackSpace() * Consumes reference to func. */ static inline _PyInterpreterFrame * -_PyFrame_PushUnchecked(PyThreadState *tstate, PyFunctionObject *func) +_PyFrame_PushUnchecked(PyThreadState *tstate, PyFunctionObject *func, int null_locals_from) { CALL_STAT_INC(frames_pushed); PyCodeObject *code = (PyCodeObject *)func->func_code; _PyInterpreterFrame *new_frame = (_PyInterpreterFrame *)tstate->datastack_top; tstate->datastack_top += code->co_framesize; assert(tstate->datastack_top < tstate->datastack_limit); - _PyFrame_InitializeSpecials(new_frame, func, NULL, code); + _PyFrame_Initialize(new_frame, func, NULL, code, null_locals_from); return new_frame; } -int _PyInterpreterFrame_GetLine(_PyInterpreterFrame *frame); +/* Pushes a trampoline frame without checking for space. + * Must be guarded by _PyThreadState_HasStackSpace() */ +static inline _PyInterpreterFrame * +_PyFrame_PushTrampolineUnchecked(PyThreadState *tstate, PyCodeObject *code, int stackdepth) +{ + CALL_STAT_INC(frames_pushed); + _PyInterpreterFrame *frame = (_PyInterpreterFrame *)tstate->datastack_top; + tstate->datastack_top += code->co_framesize; + assert(tstate->datastack_top < tstate->datastack_limit); + frame->f_funcobj = Py_None; + frame->f_executable = Py_NewRef(code); +#ifdef Py_DEBUG + frame->f_builtins = NULL; + frame->f_globals = NULL; +#endif + frame->f_locals = NULL; + frame->stacktop = code->co_nlocalsplus + stackdepth; + frame->frame_obj = NULL; + frame->instr_ptr = _PyCode_CODE(code); + frame->owner = FRAME_OWNED_BY_THREAD; + frame->return_offset = 0; + return frame; +} static inline PyGenObject *_PyFrame_GetGenerator(_PyInterpreterFrame *frame) diff --git a/Include/internal/pycore_function.h b/Include/internal/pycore_function.h index 1c87aa31ddb611e..3f3da8a44b77e4c 100644 --- a/Include/internal/pycore_function.h +++ b/Include/internal/pycore_function.h @@ -8,9 +8,32 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +extern PyObject* _PyFunction_Vectorcall( + PyObject *func, + PyObject *const *stack, + size_t nargsf, + PyObject *kwnames); + +#define FUNC_MAX_WATCHERS 8 + +#define FUNC_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */ +struct _py_func_state { + uint32_t next_version; + // Borrowed references to function objects whose + // func_version % FUNC_VERSION_CACHE_SIZE + // once was equal to the index in the table. + // They are cleared when the function is deallocated. + PyFunctionObject *func_version_cache[FUNC_VERSION_CACHE_SIZE]; +}; + extern PyFunctionObject* _PyFunction_FromConstructor(PyFrameConstructor *constr); extern uint32_t _PyFunction_GetVersionForCurrentState(PyFunctionObject *func); +extern void _PyFunction_SetVersion(PyFunctionObject *func, uint32_t version); +PyFunctionObject *_PyFunction_LookupByVersion(uint32_t version); + +extern PyObject *_Py_set_function_type_params( + PyThreadState* unused, PyObject *func, PyObject *type_params); #ifdef __cplusplus } diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h index b3abe2030a03dac..2d33aa76d78229f 100644 --- a/Include/internal/pycore_gc.h +++ b/Include/internal/pycore_gc.h @@ -19,10 +19,21 @@ typedef struct { uintptr_t _gc_prev; } PyGC_Head; +#define _PyGC_Head_UNUSED PyGC_Head + + +/* Get an object's GC head */ static inline PyGC_Head* _Py_AS_GC(PyObject *op) { - return (_Py_CAST(PyGC_Head*, op) - 1); + char *gc = ((char*)op) - sizeof(PyGC_Head); + return (PyGC_Head*)gc; } -#define _PyGC_Head_UNUSED PyGC_Head + +/* Get the object given the GC head */ +static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) { + char *op = ((char *)gc) + sizeof(PyGC_Head); + return (PyObject *)op; +} + /* True if the object is currently tracked by the GC. */ static inline int _PyObject_GC_IS_TRACKED(PyObject *op) { @@ -57,19 +68,19 @@ static inline int _PyObject_GC_MAY_BE_TRACKED(PyObject *obj) { // But it is always 0 for normal code. static inline PyGC_Head* _PyGCHead_NEXT(PyGC_Head *gc) { uintptr_t next = gc->_gc_next; - return _Py_CAST(PyGC_Head*, next); + return (PyGC_Head*)next; } static inline void _PyGCHead_SET_NEXT(PyGC_Head *gc, PyGC_Head *next) { - gc->_gc_next = _Py_CAST(uintptr_t, next); + gc->_gc_next = (uintptr_t)next; } // Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags. static inline PyGC_Head* _PyGCHead_PREV(PyGC_Head *gc) { uintptr_t prev = (gc->_gc_prev & _PyGC_PREV_MASK); - return _Py_CAST(PyGC_Head*, prev); + return (PyGC_Head*)prev; } static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, PyGC_Head *prev) { - uintptr_t uprev = _Py_CAST(uintptr_t, prev); + uintptr_t uprev = (uintptr_t)prev; assert((uprev & ~_PyGC_PREV_MASK) == 0); gc->_gc_prev = ((gc->_gc_prev & ~_PyGC_PREV_MASK) | uprev); } diff --git a/Include/internal/pycore_genobject.h b/Include/internal/pycore_genobject.h index 42db0d87d1f40a0..cf58a2750a31f9b 100644 --- a/Include/internal/pycore_genobject.h +++ b/Include/internal/pycore_genobject.h @@ -9,8 +9,20 @@ extern "C" { #endif extern PyObject *_PyGen_yf(PyGenObject *); +extern void _PyGen_Finalize(PyObject *self); + +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *); + +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **); + extern PyObject *_PyCoro_GetAwaitableIter(PyObject *o); -extern PyObject *_PyAsyncGenValueWrapperNew(PyObject *); +extern PyObject *_PyAsyncGenValueWrapperNew(PyThreadState *state, PyObject *); + +extern PyTypeObject _PyCoroWrapper_Type; +extern PyTypeObject _PyAsyncGenWrappedValue_Type; +extern PyTypeObject _PyAsyncGenAThrow_Type; /* runtime lifecycle */ diff --git a/Include/internal/pycore_gil.h b/Include/internal/pycore_gil.h index 8ebad37b686cd4c..19b0d23a68568a2 100644 --- a/Include/internal/pycore_gil.h +++ b/Include/internal/pycore_gil.h @@ -8,8 +8,7 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_atomic.h" /* _Py_atomic_address */ -#include "pycore_condvar.h" /* PyCOND_T */ +#include "pycore_condvar.h" // PyCOND_T #ifndef Py_HAVE_CONDVAR # error You need either a POSIX-compatible or a Windows system! @@ -25,10 +24,10 @@ struct _gil_runtime_state { unsigned long interval; /* Last PyThreadState holding / having held the GIL. This helps us know whether anyone else was scheduled after we dropped the GIL. */ - _Py_atomic_address last_holder; + PyThreadState* last_holder; /* Whether the GIL is already taken (-1 if uninitialized). This is atomic because it can be read without any lock taken in ceval.c. */ - _Py_atomic_int locked; + int locked; /* Number of GIL switches since the beginning. */ unsigned long switch_number; /* This condition variable allows one or several threads to wait diff --git a/Include/internal/pycore_global_objects.h b/Include/internal/pycore_global_objects.h index 82e89db7b1b7507..327fcc24cb29f11 100644 --- a/Include/internal/pycore_global_objects.h +++ b/Include/internal/pycore_global_objects.h @@ -8,8 +8,12 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_gc.h" // PyGC_Head +#include "pycore_context.h" // _PyContextTokenMissing +#include "pycore_gc.h" // _PyGC_Head_UNUSED #include "pycore_global_strings.h" // struct _Py_global_strings +#include "pycore_hamt.h" // PyHamtNode_Bitmap +#include "pycore_hashtable.h" // _Py_hashtable_t +#include "pycore_typeobject.h" // pytype_slotdef // These would be in pycore_long.h if it weren't for an include cycle. @@ -21,11 +25,16 @@ extern "C" { // All others must be per-interpreter. #define _Py_GLOBAL_OBJECT(NAME) \ - _PyRuntime.global_objects.NAME + _PyRuntime.static_objects.NAME #define _Py_SINGLETON(NAME) \ _Py_GLOBAL_OBJECT(singletons.NAME) -struct _Py_global_objects { +struct _Py_cached_objects { + // XXX We could statically allocate the hashtable. + _Py_hashtable_t *interned_strings; +}; + +struct _Py_static_objects { struct { /* Small integers are preallocated in this array so that they * can be shared. @@ -44,9 +53,49 @@ struct _Py_global_objects { _PyGC_Head_UNUSED _tuple_empty_gc_not_used; PyTupleObject tuple_empty; + + _PyGC_Head_UNUSED _hamt_bitmap_node_empty_gc_not_used; + PyHamtNode_Bitmap hamt_bitmap_node_empty; + _PyContextTokenMissing context_token_missing; } singletons; +}; + +#define _Py_INTERP_CACHED_OBJECT(interp, NAME) \ + (interp)->cached_objects.NAME + +struct _Py_interp_cached_objects { + PyObject *interned_strings; + + /* AST */ + PyObject *str_replace_inf; - PyObject *interned; + /* object.__reduce__ */ + PyObject *objreduce; + PyObject *type_slots_pname; + pytype_slotdef *type_slots_ptrs[MAX_EQUIV]; + + /* TypeVar and related types */ + PyTypeObject *generic_type; + PyTypeObject *typevar_type; + PyTypeObject *typevartuple_type; + PyTypeObject *paramspec_type; + PyTypeObject *paramspecargs_type; + PyTypeObject *paramspeckwargs_type; +}; + +#define _Py_INTERP_STATIC_OBJECT(interp, NAME) \ + (interp)->static_objects.NAME +#define _Py_INTERP_SINGLETON(interp, NAME) \ + _Py_INTERP_STATIC_OBJECT(interp, singletons.NAME) + +struct _Py_interp_static_objects { + struct { + int _not_used; + // hamt_empty is here instead of global because of its weakreflist. + _PyGC_Head_UNUSED _hamt_empty_gc_not_used; + PyHamtObject hamt_empty; + PyBaseExceptionObject last_resort_memory_error; + } singletons; }; diff --git a/Include/internal/pycore_global_objects_fini_generated.h b/Include/internal/pycore_global_objects_fini_generated.h new file mode 100644 index 000000000000000..89ec8cbbbcd6490 --- /dev/null +++ b/Include/internal/pycore_global_objects_fini_generated.h @@ -0,0 +1,1532 @@ +#ifndef Py_INTERNAL_GLOBAL_OBJECTS_FINI_GENERATED_INIT_H +#define Py_INTERNAL_GLOBAL_OBJECTS_FINI_GENERATED_INIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#ifdef Py_DEBUG +static inline void +_PyStaticObject_CheckRefcnt(PyObject *obj) { + if (Py_REFCNT(obj) < _Py_IMMORTAL_REFCNT) { + fprintf(stderr, "Immortal Object has less refcnt than expected.\n"); + _PyObject_Dump(obj); + } +} +#endif + +/* The following is auto-generated by Tools/build/generate_global_objects.py. */ +#ifdef Py_DEBUG +static inline void +_PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { + /* generated runtime-global */ + // (see pycore_runtime_init_generated.h) + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -5]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -4]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -3]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -2]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -1]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 0]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 1]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 2]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 3]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 4]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 5]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 6]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 7]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 8]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 9]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 10]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 11]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 12]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 13]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 14]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 15]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 16]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 17]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 18]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 19]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 20]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 21]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 22]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 23]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 24]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 25]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 26]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 27]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 28]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 29]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 30]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 31]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 32]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 33]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 34]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 35]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 36]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 37]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 38]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 39]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 40]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 41]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 42]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 43]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 44]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 45]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 46]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 47]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 48]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 49]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 50]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 51]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 52]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 53]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 54]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 55]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 56]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 57]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 58]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 59]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 60]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 61]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 62]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 63]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 64]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 65]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 66]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 67]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 68]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 69]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 70]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 71]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 72]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 73]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 74]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 75]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 76]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 77]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 78]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 79]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 80]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 81]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 82]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 83]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 84]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 85]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 86]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 87]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 88]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 89]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 90]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 91]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 92]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 93]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 94]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 95]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 96]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 97]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 98]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 99]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 100]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 101]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 102]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 103]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 104]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 105]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 106]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 107]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 108]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 109]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 110]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 111]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 112]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 113]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 114]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 115]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 116]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 117]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 118]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 119]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 120]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 121]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 122]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 123]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 124]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 125]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 126]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 127]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 129]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 130]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 131]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 132]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 133]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 134]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 135]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 136]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 137]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 138]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 139]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 140]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 141]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 142]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 143]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 144]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 145]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 146]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 147]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 148]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 149]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 150]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 151]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 152]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 153]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 154]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 155]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 156]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 157]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 158]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 159]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 160]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 161]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 162]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 163]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 164]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 165]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 166]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 167]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 168]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 169]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 170]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 171]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 172]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 173]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 174]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 175]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 176]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 177]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 178]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 179]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 180]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 181]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 182]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 183]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 184]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 185]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 186]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 187]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 188]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 189]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 190]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 191]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 192]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 193]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 194]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 195]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 196]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 197]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 198]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 199]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 200]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 201]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 202]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 203]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 204]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 205]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 206]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 207]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 208]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 209]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 210]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 211]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 212]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 213]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 214]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 215]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 216]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 217]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 218]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 219]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 220]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 221]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 222]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 223]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 224]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 225]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 226]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 227]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 228]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 229]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 230]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 231]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 232]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 233]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 234]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 235]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 236]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 237]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 238]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 239]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 240]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 241]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 242]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 243]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 244]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 245]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 246]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 247]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 248]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 249]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 250]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 251]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 252]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 253]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 254]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 255]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 256]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[0]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[1]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[2]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[3]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[4]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[5]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[6]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[7]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[8]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[9]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[10]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[11]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[12]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[13]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[14]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[15]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[16]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[17]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[18]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[19]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[20]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[21]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[22]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[23]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[24]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[25]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[26]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[27]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[28]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[29]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[30]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[31]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[32]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[33]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[34]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[35]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[36]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[37]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[38]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[39]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[40]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[41]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[42]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[43]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[44]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[45]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[46]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[47]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[48]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[49]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[50]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[51]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[52]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[53]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[54]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[55]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[56]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[57]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[58]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[59]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[60]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[61]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[62]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[63]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[64]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[65]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[66]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[67]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[68]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[69]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[70]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[71]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[72]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[73]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[74]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[75]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[76]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[77]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[78]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[79]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[80]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[81]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[82]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[83]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[84]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[85]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[86]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[87]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[88]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[89]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[90]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[91]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[92]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[93]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[94]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[95]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[96]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[97]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[98]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[99]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[100]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[101]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[102]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[103]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[104]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[105]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[106]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[107]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[108]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[109]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[110]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[111]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[112]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[113]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[114]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[115]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[116]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[117]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[118]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[119]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[120]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[121]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[122]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[123]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[124]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[125]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[126]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[127]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[129]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[130]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[131]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[132]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[133]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[134]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[135]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[136]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[137]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[138]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[139]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[140]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[141]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[142]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[143]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[144]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[145]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[146]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[147]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[148]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[149]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[150]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[151]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[152]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[153]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[154]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[155]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[156]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[157]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[158]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[159]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[160]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[161]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[162]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[163]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[164]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[165]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[166]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[167]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[168]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[169]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[170]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[171]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[172]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[173]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[174]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[175]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[176]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[177]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[178]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[179]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[180]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[181]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[182]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[183]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[184]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[185]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[186]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[187]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[188]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[189]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[190]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[191]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[192]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[193]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[194]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[195]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[196]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[197]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[198]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[199]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[200]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[201]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[202]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[203]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[204]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[205]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[206]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[207]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[208]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[209]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[210]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[211]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[212]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[213]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[214]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[215]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[216]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[217]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[218]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[219]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[220]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[221]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[222]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[223]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[224]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[225]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[226]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[227]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[228]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[229]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[230]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[231]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[232]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[233]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[234]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[235]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[236]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[237]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[238]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[239]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[240]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[241]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[242]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[243]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[244]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[245]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[246]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[247]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[248]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[249]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[250]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[251]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[252]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[253]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[254]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_characters)[255]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_dictcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_genexpr)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_lambda)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_listcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_module)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_null)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_setcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_string)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(anon_unknown)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(close_br)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(dbl_close_br)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(dbl_open_br)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(dbl_percent)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(defaults)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(dot)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(dot_locals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(empty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(generic_base)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(json_decoder)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(kwdefaults)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(list_err)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(newline)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(open_br)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(percent)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(type_params)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(utf_8)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(CANCELLED)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(FINISHED)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(False)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(JSONDecodeError)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(PENDING)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(Py_Repr)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(TextIOWrapper)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(True)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(WarningMessage)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_WindowsConsoleIO)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__IOBase_closed)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__abc_tpflags__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__abs__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__abstractmethods__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__add__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__aenter__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__aexit__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__aiter__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__all__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__and__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__anext__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__annotations__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__args__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__asyncio_running_event_loop__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__await__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__bases__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__bool__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__buffer__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__build_class__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__builtins__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__bytes__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__call__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__cantrace__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__class__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__class_getitem__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__classcell__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__classdict__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__classdictcell__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__complex__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__contains__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__copy__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ctypes_from_outparam__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__del__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__delattr__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__delete__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__delitem__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__dict__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__dictoffset__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__dir__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__divmod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__doc__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__enter__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__eq__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__exit__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__file__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__float__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__floordiv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__format__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__fspath__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ge__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__get__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getattr__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getattribute__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getinitargs__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getitem__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getnewargs__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getnewargs_ex__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__getstate__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__gt__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__hash__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__iadd__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__iand__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ifloordiv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ilshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__imatmul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__imod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__import__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__imul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__index__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__init__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__init_subclass__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__instancecheck__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__int__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__invert__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ior__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ipow__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__irshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__isabstractmethod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__isub__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__iter__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__itruediv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ixor__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__le__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__len__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__length_hint__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__lltrace__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__loader__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__lshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__lt__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__main__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__match_args__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__matmul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__missing__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__mod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__module__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__mro_entries__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__mul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__name__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ne__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__neg__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__new__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__newobj__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__newobj_ex__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__next__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__notes__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__or__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__orig_class__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__origin__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__package__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__parameters__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__path__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__pos__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__pow__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__prepare__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__qualname__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__radd__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rand__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rdivmod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__reduce__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__reduce_ex__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__release_buffer__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__repr__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__reversed__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rfloordiv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rlshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rmatmul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rmod__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rmul__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__ror__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__round__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rpow__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rrshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rshift__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rsub__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rtruediv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__rxor__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__set__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__set_name__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__setattr__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__setitem__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__setstate__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__sizeof__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__slotnames__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__slots__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__spec__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__str__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__sub__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__subclasscheck__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__subclasshook__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__truediv__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__trunc__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__type_params__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__typing_is_unpacked_typevartuple__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__typing_prepare_subst__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__typing_subst__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__typing_unpacked_tuple_args__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__warningregistry__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__weaklistoffset__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__weakref__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(__xor__)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_abc_impl)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_abstract_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_active)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_annotation)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_anonymous_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_argtypes_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_as_parameter_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_asyncio_future_blocking)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_blksize)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_bootstrap)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_check_retval_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_dealloc_warn)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_feature_version)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_fields_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_finalizing)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_find_and_load)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_fix_up_module)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_flags_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_get_sourcefile)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_handle_fromlist)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_initializing)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_io)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_is_text_encoding)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_length_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_limbo)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_lock_unlock_module)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_loop)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_needs_com_addref_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_pack_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_restype_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_showwarnmsg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_shutdown)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_slotnames)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_strptime_datetime)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_swappedbytes_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_type_)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_uninitialized_submodules)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_warn_unawaited_coroutine)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(_xoptions)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(a)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(abs_tol)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(access)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(aclose)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(add)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(add_done_callback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(after_in_child)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(after_in_parent)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(aggregate_class)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(append)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(argdefs)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(arguments)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(argv)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(as_integer_ratio)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(asend)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ast)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(athrow)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(attribute)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(authorizer_callback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(autocommit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(b)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(backtick)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(base)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(before)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(big)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(binary_form)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(block)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(bound)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(buffer)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(buffer_callback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(buffer_size)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(buffering)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(buffers)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(bufsize)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(builtins)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(byteorder)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(bytes)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(bytes_per_sep)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(c)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(c_call)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(c_exception)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(c_return)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cached_statements)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cadata)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cafile)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(call)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(call_exception_handler)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(call_soon)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(callback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cancel)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(capath)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(category)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cb_type)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(certfile)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(check_same_thread)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(clear)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(close)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(closed)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(closefd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(closure)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_argcount)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_cellvars)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_code)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_consts)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_exceptiontable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_filename)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_firstlineno)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_flags)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_freevars)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_kwonlyargcount)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_linetable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_names)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_nlocals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_posonlyargcount)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_qualname)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_stacksize)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(co_varnames)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(code)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(command)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(comment_factory)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(compile_mode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(consts)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(context)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(contravariant)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cookie)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(copy)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(copyreg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(coro)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(count)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(covariant)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(cwd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(d)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(data)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(database)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(decode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(decoder)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(default)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(defaultaction)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(delete)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(depth)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(detect_types)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(deterministic)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(device)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dict)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dictcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(difference_update)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(digest)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(digest_size)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(digestmod)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dir_fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(discard)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dispatch_table)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(displayhook)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dklen)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(doc)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dont_inherit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dst)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(dst_dir_fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(e)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(eager_start)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(effective_ids)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(element_factory)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(encode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(encoding)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(end)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(end_lineno)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(end_offset)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(endpos)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(entrypoint)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(env)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(errors)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(event)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(eventmask)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(excepthook)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(exception)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(existing_file_name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(exp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(extend)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(extra_tokens)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(f)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(facility)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(factory)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(false)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(family)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fanout)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fd2)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fdel)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fget)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(file)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(file_actions)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filename)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fileno)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filepath)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fillvalue)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filters)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(final)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(find_class)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fix_imports)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(flags)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(flush)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(follow_symlinks)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(format)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(from_param)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fromlist)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fromtimestamp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fromutc)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fset)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(func)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(future)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(g)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(generation)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(genexpr)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(get)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(get_debug)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(get_event_loop)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(get_loop)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(get_source)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(getattr)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(getstate)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(gid)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(globals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(groupindex)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(groups)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(h)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(handle)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(hash_name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(header)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(headers)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(hi)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(hook)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(id)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ident)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(identity_hint)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ignore)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(imag)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(importlib)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(in_fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(incoming)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(indexgroup)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inf)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(infer_variance)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inheritable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_bytes)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_value)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initval)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inner_size)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(input)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(insert_comments)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(insert_pis)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(instructions)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(intern)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(intersection)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(interval)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(is_running)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(isatty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(isinstance)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(isoformat)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(isolation_level)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(istext)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(item)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(items)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(iter)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(iterable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(iterations)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(join)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(jump)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(keepends)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(key)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(keyfile)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(keys)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(kind)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(kw)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(kw1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(kw2)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(lambda)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last_exc)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last_node)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last_traceback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last_type)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(last_value)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(latin1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(leaf_size)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(len)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(length)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(level)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(limit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(line)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(line_buffering)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(lineno)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(listcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(little)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(lo)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(locale)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(locals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(logoption)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(loop)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mapping)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(match)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(max_length)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(maxdigits)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(maxevents)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(maxmem)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(maxsplit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(maxvalue)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(memLevel)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(memlimit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(message)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(metaclass)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(metadata)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(method)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mod)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(module)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(module_globals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(modules)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mro)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(msg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mycmp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_arg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_fields)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_sequence_fields)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_unnamed_fields)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(name_from)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(namespace_separator)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(namespaces)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(narg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ndigits)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(new_file_name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(new_limit)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(newline)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(newlines)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(next)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(nlocals)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(node_depth)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(node_offset)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ns)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(nstype)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(nt)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(null)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(number)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(obj)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(object)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(offset)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(offset_dst)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(offset_src)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(on_type_read)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(onceregistry)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(only_keys)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(oparg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(opcode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(open)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(opener)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(operation)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(optimize)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(options)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(order)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(origin)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(out_fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(outgoing)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(overlapped)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(owner)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(p)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pages)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(parent)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(password)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(path)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pattern)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(peek)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(persistent_id)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(persistent_load)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(person)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pi_factory)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pid)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(policy)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pos)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pos1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(pos2)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(posix)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(print_file_and_line)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(priority)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(progress)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(progress_handler)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(progress_routine)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(proto)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(protocol)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ps1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(ps2)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(query)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(quotetabs)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(r)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(raw)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(read)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(read1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readall)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readinto)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readinto1)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readline)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(readonly)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(real)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reducer_override)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(registry)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(rel_tol)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(release)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reload)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(repl)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(replace)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reserved)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reset)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(resetids)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(return)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reverse)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(reversed)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(s)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(salt)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sched_priority)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(scheduler)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seek)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seekable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(selectors)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(self)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(send)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sep)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sequence)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(server_hostname)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(server_side)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(session)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setcomp)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setpgroup)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setsid)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setsigdef)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setsigmask)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(setstate)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(shape)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(show_cmd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(signed)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(size)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sizehint)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(skip_file_prefixes)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sleep)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sock)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sort)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(source)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(source_traceback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(src)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(src_dir_fd)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(stacklevel)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(start)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(statement)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(status)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(stderr)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(stdin)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(stdout)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(step)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(steps)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(store_name)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(strategy)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(strftime)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(strict)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(strict_mode)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(string)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sub_key)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(symmetric_difference_update)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tabsize)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tag)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(target)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(target_is_directory)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(task)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tb_frame)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tb_lasti)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tb_lineno)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tb_next)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tell)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(template)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(term)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(text)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(threading)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(throw)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(timeout)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(times)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(timetuple)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(top)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(trace_callback)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(trailers)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(translate)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(true)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(truncate)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(twice)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(txt)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(type)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(type_params)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tz)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(tzname)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(uid)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(unlink)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(unraisablehook)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(uri)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(usedforsecurity)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(value)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(values)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(version)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(volume)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnings)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnoptions)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(wbits)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(week)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(weekday)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(which)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(who)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(withdata)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(writable)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(write)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(write_through)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(x)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(year)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(zdict)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[0]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[1]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[2]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[3]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[4]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[5]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[6]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[7]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[8]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[9]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[10]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[11]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[12]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[13]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[14]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[15]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[16]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[17]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[18]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[19]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[20]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[21]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[22]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[23]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[24]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[25]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[26]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[27]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[28]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[29]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[30]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[31]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[32]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[33]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[34]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[35]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[36]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[37]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[38]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[39]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[40]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[41]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[42]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[43]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[44]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[45]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[46]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[47]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[48]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[49]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[50]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[51]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[52]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[53]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[54]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[55]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[56]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[57]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[58]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[59]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[60]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[61]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[62]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[63]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[64]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[65]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[66]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[67]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[68]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[69]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[70]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[71]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[72]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[73]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[74]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[75]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[76]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[77]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[78]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[79]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[80]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[81]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[82]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[83]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[84]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[85]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[86]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[87]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[88]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[89]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[90]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[91]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[92]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[93]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[94]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[95]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[96]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[97]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[98]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[99]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[100]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[101]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[102]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[103]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[104]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[105]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[106]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[107]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[108]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[109]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[110]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[111]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[112]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[113]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[114]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[115]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[116]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[117]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[118]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[119]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[120]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[121]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[122]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[123]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[124]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[125]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[126]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).ascii[127]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[128 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[129 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[130 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[131 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[132 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[133 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[134 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[135 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[136 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[137 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[138 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[139 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[140 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[141 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[142 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[143 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[144 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[145 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[146 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[147 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[148 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[149 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[150 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[151 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[152 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[153 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[154 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[155 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[156 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[157 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[158 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[159 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[160 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[161 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[162 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[163 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[164 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[165 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[166 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[167 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[168 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[169 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[170 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[171 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[172 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[173 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[174 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[175 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[176 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[177 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[178 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[179 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[180 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[181 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[182 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[183 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[184 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[185 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[186 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[187 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[188 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[189 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[190 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[191 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[192 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[193 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[194 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[195 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[196 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[197 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[198 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[199 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[200 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[201 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[202 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[203 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[204 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[205 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[206 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[207 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[208 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[209 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[210 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[211 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[212 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[213 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[214 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[215 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[216 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[217 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[218 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[219 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[220 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[221 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[222 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[223 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[224 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[225 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[226 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[227 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[228 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[229 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[230 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[231 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[232 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[233 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[234 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[235 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[236 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[237 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[238 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[239 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[240 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[241 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[242 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[243 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[244 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[245 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[246 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[247 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[248 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[249 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[250 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[251 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[252 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[253 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[254 - 128]); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(strings).latin1[255 - 128]); + /* non-generated */ + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(bytes_empty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(tuple_empty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(hamt_bitmap_node_empty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_INTERP_SINGLETON(interp, hamt_empty)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_SINGLETON(context_token_missing)); +} +#endif // Py_DEBUG +/* End auto-generated code */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_GLOBAL_OBJECTS_FINI_GENERATED_INIT_H */ diff --git a/Include/internal/pycore_global_strings.h b/Include/internal/pycore_global_strings.h index adfe51942424708..62c3ee3ae2a0bd8 100644 --- a/Include/internal/pycore_global_strings.h +++ b/Include/internal/pycore_global_strings.h @@ -11,7 +11,7 @@ extern "C" { // The data structure & init here are inspired by Tools/build/deepfreeze.py. // All field names generated by ASCII_STR() have a common prefix, -// to help avoid collisions with keywords, etc. +// to help avoid collisions with keywords, macros, etc. #define STRUCT_FOR_ASCII_STR(LITERAL) \ struct { \ @@ -19,9 +19,9 @@ extern "C" { uint8_t _data[sizeof(LITERAL)]; \ } #define STRUCT_FOR_STR(NAME, LITERAL) \ - STRUCT_FOR_ASCII_STR(LITERAL) _ ## NAME; + STRUCT_FOR_ASCII_STR(LITERAL) _py_ ## NAME; #define STRUCT_FOR_ID(NAME) \ - STRUCT_FOR_ASCII_STR(#NAME) _ ## NAME; + STRUCT_FOR_ASCII_STR(#NAME) _py_ ## NAME; // XXX Order by frequency of use? @@ -33,22 +33,26 @@ struct _Py_global_strings { STRUCT_FOR_STR(anon_lambda, "") STRUCT_FOR_STR(anon_listcomp, "") STRUCT_FOR_STR(anon_module, "") + STRUCT_FOR_STR(anon_null, "") STRUCT_FOR_STR(anon_setcomp, "") STRUCT_FOR_STR(anon_string, "") STRUCT_FOR_STR(anon_unknown, "") STRUCT_FOR_STR(close_br, "}") - STRUCT_FOR_STR(comma_sep, ", ") STRUCT_FOR_STR(dbl_close_br, "}}") STRUCT_FOR_STR(dbl_open_br, "{{") STRUCT_FOR_STR(dbl_percent, "%%") + STRUCT_FOR_STR(defaults, ".defaults") STRUCT_FOR_STR(dot, ".") STRUCT_FOR_STR(dot_locals, ".") STRUCT_FOR_STR(empty, "") + STRUCT_FOR_STR(generic_base, ".generic_base") STRUCT_FOR_STR(json_decoder, "json.decoder") + STRUCT_FOR_STR(kwdefaults, ".kwdefaults") STRUCT_FOR_STR(list_err, "list index out of range") STRUCT_FOR_STR(newline, "\n") STRUCT_FOR_STR(open_br, "{") STRUCT_FOR_STR(percent, "%") + STRUCT_FOR_STR(type_params, ".type_params") STRUCT_FOR_STR(utf_8, "utf-8") } literals; @@ -63,6 +67,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(True) STRUCT_FOR_ID(WarningMessage) STRUCT_FOR_ID(_) + STRUCT_FOR_ID(_WindowsConsoleIO) STRUCT_FOR_ID(__IOBase_closed) STRUCT_FOR_ID(__abc_tpflags__) STRUCT_FOR_ID(__abs__) @@ -80,6 +85,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(__await__) STRUCT_FOR_ID(__bases__) STRUCT_FOR_ID(__bool__) + STRUCT_FOR_ID(__buffer__) STRUCT_FOR_ID(__build_class__) STRUCT_FOR_ID(__builtins__) STRUCT_FOR_ID(__bytes__) @@ -88,9 +94,12 @@ struct _Py_global_strings { STRUCT_FOR_ID(__class__) STRUCT_FOR_ID(__class_getitem__) STRUCT_FOR_ID(__classcell__) + STRUCT_FOR_ID(__classdict__) + STRUCT_FOR_ID(__classdictcell__) STRUCT_FOR_ID(__complex__) STRUCT_FOR_ID(__contains__) STRUCT_FOR_ID(__copy__) + STRUCT_FOR_ID(__ctypes_from_outparam__) STRUCT_FOR_ID(__del__) STRUCT_FOR_ID(__delattr__) STRUCT_FOR_ID(__delete__) @@ -149,6 +158,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(__lshift__) STRUCT_FOR_ID(__lt__) STRUCT_FOR_ID(__main__) + STRUCT_FOR_ID(__match_args__) STRUCT_FOR_ID(__matmul__) STRUCT_FOR_ID(__missing__) STRUCT_FOR_ID(__mod__) @@ -178,6 +188,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(__rdivmod__) STRUCT_FOR_ID(__reduce__) STRUCT_FOR_ID(__reduce_ex__) + STRUCT_FOR_ID(__release_buffer__) STRUCT_FOR_ID(__repr__) STRUCT_FOR_ID(__reversed__) STRUCT_FOR_ID(__rfloordiv__) @@ -208,6 +219,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(__subclasshook__) STRUCT_FOR_ID(__truediv__) STRUCT_FOR_ID(__trunc__) + STRUCT_FOR_ID(__type_params__) STRUCT_FOR_ID(__typing_is_unpacked_typevartuple__) STRUCT_FOR_ID(__typing_prepare_subst__) STRUCT_FOR_ID(__typing_subst__) @@ -217,30 +229,48 @@ struct _Py_global_strings { STRUCT_FOR_ID(__weakref__) STRUCT_FOR_ID(__xor__) STRUCT_FOR_ID(_abc_impl) + STRUCT_FOR_ID(_abstract_) + STRUCT_FOR_ID(_active) STRUCT_FOR_ID(_annotation) + STRUCT_FOR_ID(_anonymous_) + STRUCT_FOR_ID(_argtypes_) + STRUCT_FOR_ID(_as_parameter_) STRUCT_FOR_ID(_asyncio_future_blocking) STRUCT_FOR_ID(_blksize) STRUCT_FOR_ID(_bootstrap) + STRUCT_FOR_ID(_check_retval_) STRUCT_FOR_ID(_dealloc_warn) STRUCT_FOR_ID(_feature_version) + STRUCT_FOR_ID(_fields_) STRUCT_FOR_ID(_finalizing) STRUCT_FOR_ID(_find_and_load) STRUCT_FOR_ID(_fix_up_module) + STRUCT_FOR_ID(_flags_) STRUCT_FOR_ID(_get_sourcefile) STRUCT_FOR_ID(_handle_fromlist) STRUCT_FOR_ID(_initializing) + STRUCT_FOR_ID(_io) STRUCT_FOR_ID(_is_text_encoding) + STRUCT_FOR_ID(_length_) + STRUCT_FOR_ID(_limbo) STRUCT_FOR_ID(_lock_unlock_module) STRUCT_FOR_ID(_loop) + STRUCT_FOR_ID(_needs_com_addref_) + STRUCT_FOR_ID(_pack_) + STRUCT_FOR_ID(_restype_) STRUCT_FOR_ID(_showwarnmsg) STRUCT_FOR_ID(_shutdown) STRUCT_FOR_ID(_slotnames) + STRUCT_FOR_ID(_strptime_datetime) + STRUCT_FOR_ID(_swappedbytes_) + STRUCT_FOR_ID(_type_) STRUCT_FOR_ID(_uninitialized_submodules) STRUCT_FOR_ID(_warn_unawaited_coroutine) STRUCT_FOR_ID(_xoptions) STRUCT_FOR_ID(a) STRUCT_FOR_ID(abs_tol) STRUCT_FOR_ID(access) + STRUCT_FOR_ID(aclose) STRUCT_FOR_ID(add) STRUCT_FOR_ID(add_done_callback) STRUCT_FOR_ID(after_in_child) @@ -250,8 +280,13 @@ struct _Py_global_strings { STRUCT_FOR_ID(argdefs) STRUCT_FOR_ID(arguments) STRUCT_FOR_ID(argv) + STRUCT_FOR_ID(as_integer_ratio) + STRUCT_FOR_ID(asend) + STRUCT_FOR_ID(ast) + STRUCT_FOR_ID(athrow) STRUCT_FOR_ID(attribute) STRUCT_FOR_ID(authorizer_callback) + STRUCT_FOR_ID(autocommit) STRUCT_FOR_ID(b) STRUCT_FOR_ID(backtick) STRUCT_FOR_ID(base) @@ -259,6 +294,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(big) STRUCT_FOR_ID(binary_form) STRUCT_FOR_ID(block) + STRUCT_FOR_ID(bound) STRUCT_FOR_ID(buffer) STRUCT_FOR_ID(buffer_callback) STRUCT_FOR_ID(buffer_size) @@ -269,6 +305,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(byteorder) STRUCT_FOR_ID(bytes) STRUCT_FOR_ID(bytes_per_sep) + STRUCT_FOR_ID(c) STRUCT_FOR_ID(c_call) STRUCT_FOR_ID(c_exception) STRUCT_FOR_ID(c_return) @@ -278,6 +315,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(call) STRUCT_FOR_ID(call_exception_handler) STRUCT_FOR_ID(call_soon) + STRUCT_FOR_ID(callback) STRUCT_FOR_ID(cancel) STRUCT_FOR_ID(capath) STRUCT_FOR_ID(category) @@ -310,14 +348,18 @@ struct _Py_global_strings { STRUCT_FOR_ID(code) STRUCT_FOR_ID(command) STRUCT_FOR_ID(comment_factory) + STRUCT_FOR_ID(compile_mode) STRUCT_FOR_ID(consts) STRUCT_FOR_ID(context) + STRUCT_FOR_ID(contravariant) STRUCT_FOR_ID(cookie) STRUCT_FOR_ID(copy) STRUCT_FOR_ID(copyreg) STRUCT_FOR_ID(coro) STRUCT_FOR_ID(count) + STRUCT_FOR_ID(covariant) STRUCT_FOR_ID(cwd) + STRUCT_FOR_ID(d) STRUCT_FOR_ID(data) STRUCT_FOR_ID(database) STRUCT_FOR_ID(decode) @@ -344,7 +386,8 @@ struct _Py_global_strings { STRUCT_FOR_ID(dont_inherit) STRUCT_FOR_ID(dst) STRUCT_FOR_ID(dst_dir_fd) - STRUCT_FOR_ID(duration) + STRUCT_FOR_ID(e) + STRUCT_FOR_ID(eager_start) STRUCT_FOR_ID(effective_ids) STRUCT_FOR_ID(element_factory) STRUCT_FOR_ID(encode) @@ -353,16 +396,18 @@ struct _Py_global_strings { STRUCT_FOR_ID(end_lineno) STRUCT_FOR_ID(end_offset) STRUCT_FOR_ID(endpos) + STRUCT_FOR_ID(entrypoint) STRUCT_FOR_ID(env) STRUCT_FOR_ID(errors) STRUCT_FOR_ID(event) STRUCT_FOR_ID(eventmask) - STRUCT_FOR_ID(exc_type) - STRUCT_FOR_ID(exc_value) STRUCT_FOR_ID(excepthook) STRUCT_FOR_ID(exception) + STRUCT_FOR_ID(existing_file_name) STRUCT_FOR_ID(exp) STRUCT_FOR_ID(extend) + STRUCT_FOR_ID(extra_tokens) + STRUCT_FOR_ID(f) STRUCT_FOR_ID(facility) STRUCT_FOR_ID(factory) STRUCT_FOR_ID(false) @@ -386,11 +431,14 @@ struct _Py_global_strings { STRUCT_FOR_ID(flush) STRUCT_FOR_ID(follow_symlinks) STRUCT_FOR_ID(format) - STRUCT_FOR_ID(frequency) + STRUCT_FOR_ID(from_param) STRUCT_FOR_ID(fromlist) + STRUCT_FOR_ID(fromtimestamp) + STRUCT_FOR_ID(fromutc) STRUCT_FOR_ID(fset) STRUCT_FOR_ID(func) STRUCT_FOR_ID(future) + STRUCT_FOR_ID(g) STRUCT_FOR_ID(generation) STRUCT_FOR_ID(genexpr) STRUCT_FOR_ID(get) @@ -404,6 +452,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(globals) STRUCT_FOR_ID(groupindex) STRUCT_FOR_ID(groups) + STRUCT_FOR_ID(h) STRUCT_FOR_ID(handle) STRUCT_FOR_ID(hash_name) STRUCT_FOR_ID(header) @@ -412,6 +461,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(hook) STRUCT_FOR_ID(id) STRUCT_FOR_ID(ident) + STRUCT_FOR_ID(identity_hint) STRUCT_FOR_ID(ignore) STRUCT_FOR_ID(imag) STRUCT_FOR_ID(importlib) @@ -419,6 +469,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(incoming) STRUCT_FOR_ID(indexgroup) STRUCT_FOR_ID(inf) + STRUCT_FOR_ID(infer_variance) STRUCT_FOR_ID(inheritable) STRUCT_FOR_ID(initial) STRUCT_FOR_ID(initial_bytes) @@ -431,8 +482,11 @@ struct _Py_global_strings { STRUCT_FOR_ID(instructions) STRUCT_FOR_ID(intern) STRUCT_FOR_ID(intersection) + STRUCT_FOR_ID(interval) + STRUCT_FOR_ID(is_running) STRUCT_FOR_ID(isatty) STRUCT_FOR_ID(isinstance) + STRUCT_FOR_ID(isoformat) STRUCT_FOR_ID(isolation_level) STRUCT_FOR_ID(istext) STRUCT_FOR_ID(item) @@ -447,8 +501,12 @@ struct _Py_global_strings { STRUCT_FOR_ID(keyfile) STRUCT_FOR_ID(keys) STRUCT_FOR_ID(kind) + STRUCT_FOR_ID(kw) + STRUCT_FOR_ID(kw1) + STRUCT_FOR_ID(kw2) STRUCT_FOR_ID(lambda) STRUCT_FOR_ID(last) + STRUCT_FOR_ID(last_exc) STRUCT_FOR_ID(last_node) STRUCT_FOR_ID(last_traceback) STRUCT_FOR_ID(last_type) @@ -481,6 +539,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(memlimit) STRUCT_FOR_ID(message) STRUCT_FOR_ID(metaclass) + STRUCT_FOR_ID(metadata) STRUCT_FOR_ID(method) STRUCT_FOR_ID(mod) STRUCT_FOR_ID(mode) @@ -501,14 +560,17 @@ struct _Py_global_strings { STRUCT_FOR_ID(namespaces) STRUCT_FOR_ID(narg) STRUCT_FOR_ID(ndigits) + STRUCT_FOR_ID(new_file_name) STRUCT_FOR_ID(new_limit) STRUCT_FOR_ID(newline) STRUCT_FOR_ID(newlines) STRUCT_FOR_ID(next) + STRUCT_FOR_ID(nlocals) STRUCT_FOR_ID(node_depth) STRUCT_FOR_ID(node_offset) STRUCT_FOR_ID(ns) STRUCT_FOR_ID(nstype) + STRUCT_FOR_ID(nt) STRUCT_FOR_ID(null) STRUCT_FOR_ID(number) STRUCT_FOR_ID(obj) @@ -527,6 +589,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(optimize) STRUCT_FOR_ID(options) STRUCT_FOR_ID(order) + STRUCT_FOR_ID(origin) STRUCT_FOR_ID(out_fd) STRUCT_FOR_ID(outgoing) STRUCT_FOR_ID(overlapped) @@ -545,10 +608,14 @@ struct _Py_global_strings { STRUCT_FOR_ID(pid) STRUCT_FOR_ID(policy) STRUCT_FOR_ID(pos) + STRUCT_FOR_ID(pos1) + STRUCT_FOR_ID(pos2) + STRUCT_FOR_ID(posix) STRUCT_FOR_ID(print_file_and_line) STRUCT_FOR_ID(priority) STRUCT_FOR_ID(progress) STRUCT_FOR_ID(progress_handler) + STRUCT_FOR_ID(progress_routine) STRUCT_FOR_ID(proto) STRUCT_FOR_ID(protocol) STRUCT_FOR_ID(ps1) @@ -569,6 +636,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(reducer_override) STRUCT_FOR_ID(registry) STRUCT_FOR_ID(rel_tol) + STRUCT_FOR_ID(release) STRUCT_FOR_ID(reload) STRUCT_FOR_ID(repl) STRUCT_FOR_ID(replace) @@ -585,6 +653,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(seek) STRUCT_FOR_ID(seekable) STRUCT_FOR_ID(selectors) + STRUCT_FOR_ID(self) STRUCT_FOR_ID(send) STRUCT_FOR_ID(sep) STRUCT_FOR_ID(sequence) @@ -602,10 +671,10 @@ struct _Py_global_strings { STRUCT_FOR_ID(signed) STRUCT_FOR_ID(size) STRUCT_FOR_ID(sizehint) + STRUCT_FOR_ID(skip_file_prefixes) STRUCT_FOR_ID(sleep) STRUCT_FOR_ID(sock) STRUCT_FOR_ID(sort) - STRUCT_FOR_ID(sound) STRUCT_FOR_ID(source) STRUCT_FOR_ID(source_traceback) STRUCT_FOR_ID(src) @@ -618,8 +687,10 @@ struct _Py_global_strings { STRUCT_FOR_ID(stdin) STRUCT_FOR_ID(stdout) STRUCT_FOR_ID(step) + STRUCT_FOR_ID(steps) STRUCT_FOR_ID(store_name) STRUCT_FOR_ID(strategy) + STRUCT_FOR_ID(strftime) STRUCT_FOR_ID(strict) STRUCT_FOR_ID(strict_mode) STRUCT_FOR_ID(string) @@ -642,9 +713,9 @@ struct _Py_global_strings { STRUCT_FOR_ID(throw) STRUCT_FOR_ID(timeout) STRUCT_FOR_ID(times) + STRUCT_FOR_ID(timetuple) STRUCT_FOR_ID(top) STRUCT_FOR_ID(trace_callback) - STRUCT_FOR_ID(traceback) STRUCT_FOR_ID(trailers) STRUCT_FOR_ID(translate) STRUCT_FOR_ID(true) @@ -652,7 +723,9 @@ struct _Py_global_strings { STRUCT_FOR_ID(twice) STRUCT_FOR_ID(txt) STRUCT_FOR_ID(type) + STRUCT_FOR_ID(type_params) STRUCT_FOR_ID(tz) + STRUCT_FOR_ID(tzname) STRUCT_FOR_ID(uid) STRUCT_FOR_ID(unlink) STRUCT_FOR_ID(unraisablehook) @@ -661,6 +734,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(value) STRUCT_FOR_ID(values) STRUCT_FOR_ID(version) + STRUCT_FOR_ID(volume) STRUCT_FOR_ID(warnings) STRUCT_FOR_ID(warnoptions) STRUCT_FOR_ID(wbits) @@ -692,9 +766,9 @@ struct _Py_global_strings { #define _Py_ID(NAME) \ - (_Py_SINGLETON(strings.identifiers._ ## NAME._ascii.ob_base)) + (_Py_SINGLETON(strings.identifiers._py_ ## NAME._ascii.ob_base)) #define _Py_STR(NAME) \ - (_Py_SINGLETON(strings.literals._ ## NAME._ascii.ob_base)) + (_Py_SINGLETON(strings.literals._py_ ## NAME._ascii.ob_base)) /* _Py_DECLARE_STR() should precede all uses of _Py_STR() in a function. diff --git a/Include/internal/pycore_hamt.h b/Include/internal/pycore_hamt.h index 7a674b4ee4ac0e6..d8742c7cb635783 100644 --- a/Include/internal/pycore_hamt.h +++ b/Include/internal/pycore_hamt.h @@ -28,10 +28,6 @@ extern PyTypeObject _PyHamtKeys_Type; extern PyTypeObject _PyHamtValues_Type; extern PyTypeObject _PyHamtItems_Type; -/* runtime lifecycle */ - -void _PyHamt_Fini(PyInterpreterState *); - /* other API */ @@ -53,6 +49,13 @@ typedef struct { } PyHamtObject; +typedef struct { + PyObject_VAR_HEAD + uint32_t b_bitmap; + PyObject *b_array[1]; +} PyHamtNode_Bitmap; + + /* A struct to hold the state of depth-first traverse of the tree. HAMT is an immutable collection. Iterators will hold a strong reference diff --git a/Include/internal/pycore_hashtable.h b/Include/internal/pycore_hashtable.h index 2aa23a24c2830ad..369d49c42bbfccd 100644 --- a/Include/internal/pycore_hashtable.h +++ b/Include/internal/pycore_hashtable.h @@ -18,9 +18,9 @@ typedef struct { _Py_slist_item_t *head; } _Py_slist_t; -#define _Py_SLIST_ITEM_NEXT(ITEM) (((_Py_slist_item_t *)(ITEM))->next) +#define _Py_SLIST_ITEM_NEXT(ITEM) _Py_RVALUE(((_Py_slist_item_t *)(ITEM))->next) -#define _Py_SLIST_HEAD(SLIST) (((_Py_slist_t *)(SLIST))->head) +#define _Py_SLIST_HEAD(SLIST) _Py_RVALUE(((_Py_slist_t *)(SLIST))->head) /* _Py_hashtable: table entry */ @@ -70,6 +70,11 @@ struct _Py_hashtable_t { _Py_hashtable_allocator_t alloc; }; +// Export _Py_hashtable functions for '_testinternalcapi' shared extension +PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new( + _Py_hashtable_hash_func hash_func, + _Py_hashtable_compare_func compare_func); + /* Hash a pointer (void*) */ PyAPI_FUNC(Py_uhash_t) _Py_hashtable_hash_ptr(const void *key); @@ -78,10 +83,6 @@ PyAPI_FUNC(int) _Py_hashtable_compare_direct( const void *key1, const void *key2); -PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new( - _Py_hashtable_hash_func hash_func, - _Py_hashtable_compare_func compare_func); - PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new_full( _Py_hashtable_hash_func hash_func, _Py_hashtable_compare_func compare_func, @@ -106,6 +107,7 @@ PyAPI_FUNC(int) _Py_hashtable_foreach( void *user_data); PyAPI_FUNC(size_t) _Py_hashtable_size(const _Py_hashtable_t *ht); +PyAPI_FUNC(size_t) _Py_hashtable_len(const _Py_hashtable_t *ht); /* Add a new entry to the hash. The key must not be present in the hash table. Return 0 on success, -1 on memory error. */ diff --git a/Include/internal/pycore_identifier.h b/Include/internal/pycore_identifier.h new file mode 100644 index 000000000000000..cda28810a481962 --- /dev/null +++ b/Include/internal/pycore_identifier.h @@ -0,0 +1,20 @@ +/* String Literals: _Py_Identifier API */ + +#ifndef Py_INTERNAL_IDENTIFIER_H +#define Py_INTERNAL_IDENTIFIER_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +extern PyObject* _PyType_LookupId(PyTypeObject *, _Py_Identifier *); +extern PyObject* _PyObject_LookupSpecialId(PyObject *, _Py_Identifier *); +extern int _PyObject_SetAttrId(PyObject *, _Py_Identifier *, PyObject *); + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_IDENTIFIER_H diff --git a/Include/internal/pycore_import.h b/Include/internal/pycore_import.h index aee1f66a3ea1718..117e46bb86285d2 100644 --- a/Include/internal/pycore_import.h +++ b/Include/internal/pycore_import.h @@ -5,21 +5,207 @@ extern "C" { #endif +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_hashtable.h" // _Py_hashtable_t +#include "pycore_time.h" // _PyTime_t + +extern int _PyImport_IsInitialized(PyInterpreterState *); + +// Export for 'pyexpat' shared extension +PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module); + +extern int _PyImport_SetModuleString(const char *name, PyObject* module); + +extern void _PyImport_AcquireLock(PyInterpreterState *interp); +extern int _PyImport_ReleaseLock(PyInterpreterState *interp); + +extern int _PyImport_FixupBuiltin( + PyObject *mod, + const char *name, /* UTF-8 encoded string */ + PyObject *modules + ); +extern int _PyImport_FixupExtensionObject(PyObject*, PyObject *, + PyObject *, PyObject *); + +// Export for many shared extensions, like '_json' +PyAPI_FUNC(PyObject*) _PyImport_GetModuleAttr(PyObject *, PyObject *); + +// Export for many shared extensions, like '_datetime' +PyAPI_FUNC(PyObject*) _PyImport_GetModuleAttrString(const char *, const char *); + + +struct _import_runtime_state { + /* The builtin modules (defined in config.c). */ + struct _inittab *inittab; + /* The most recent value assigned to a PyModuleDef.m_base.m_index. + This is incremented each time PyModuleDef_Init() is called, + which is just about every time an extension module is imported. + See PyInterpreterState.modules_by_index for more info. */ + Py_ssize_t last_module_index; + struct { + /* A lock to guard the cache. */ + PyThread_type_lock mutex; + /* The actual cache of (filename, name, PyModuleDef) for modules. + Only legacy (single-phase init) extension modules are added + and only if they support multiple initialization (m_size >- 0) + or are imported in the main interpreter. + This is initialized lazily in _PyImport_FixupExtensionObject(). + Modules are added there and looked up in _imp.find_extension(). */ + _Py_hashtable_t *hashtable; + } extensions; + /* Package context -- the full module name for package imports */ + const char * pkgcontext; +}; + +struct _import_state { + /* cached sys.modules dictionary */ + PyObject *modules; + /* This is the list of module objects for all legacy (single-phase init) + extension modules ever loaded in this process (i.e. imported + in this interpreter or in any other). Py_None stands in for + modules that haven't actually been imported in this interpreter. + + A module's index (PyModuleDef.m_base.m_index) is used to look up + the corresponding module object for this interpreter, if any. + (See PyState_FindModule().) When any extension module + is initialized during import, its moduledef gets initialized by + PyModuleDef_Init(), and the first time that happens for each + PyModuleDef, its index gets set to the current value of + a global counter (see _PyRuntimeState.imports.last_module_index). + The entry for that index in this interpreter remains unset until + the module is actually imported here. (Py_None is used as + a placeholder.) Note that multi-phase init modules always get + an index for which there will never be a module set. + + This is initialized lazily in PyState_AddModule(), which is also + where modules get added. */ + PyObject *modules_by_index; + /* importlib module._bootstrap */ + PyObject *importlib; + /* override for config->use_frozen_modules (for tests) + (-1: "off", 1: "on", 0: no override) */ + int override_frozen_modules; + int override_multi_interp_extensions_check; +#ifdef HAVE_DLOPEN + int dlopenflags; +#endif + PyObject *import_func; + /* The global import lock. */ + struct { + PyThread_type_lock mutex; + unsigned long thread; + int level; + } lock; + /* diagnostic info in PyImport_ImportModuleLevelObject() */ + struct { + int import_level; + _PyTime_t accumulated; + int header; + } find_and_load; +}; + +#ifdef HAVE_DLOPEN +# include // RTLD_NOW, RTLD_LAZY +# if HAVE_DECL_RTLD_NOW +# define _Py_DLOPEN_FLAGS RTLD_NOW +# else +# define _Py_DLOPEN_FLAGS RTLD_LAZY +# endif +# define DLOPENFLAGS_INIT .dlopenflags = _Py_DLOPEN_FLAGS, +#else +# define _Py_DLOPEN_FLAGS 0 +# define DLOPENFLAGS_INIT +#endif + +#define IMPORTS_INIT \ + { \ + DLOPENFLAGS_INIT \ + .lock = { \ + .mutex = NULL, \ + .thread = PYTHREAD_INVALID_THREAD_ID, \ + .level = 0, \ + }, \ + .find_and_load = { \ + .header = 1, \ + }, \ + } + +extern void _PyImport_ClearCore(PyInterpreterState *interp); + +extern Py_ssize_t _PyImport_GetNextModuleIndex(void); +extern const char * _PyImport_ResolveNameWithPackageContext(const char *name); +extern const char * _PyImport_SwapPackageContext(const char *newcontext); + +extern int _PyImport_GetDLOpenFlags(PyInterpreterState *interp); +extern void _PyImport_SetDLOpenFlags(PyInterpreterState *interp, int new_val); + +extern PyObject * _PyImport_InitModules(PyInterpreterState *interp); +extern PyObject * _PyImport_GetModules(PyInterpreterState *interp); +extern void _PyImport_ClearModules(PyInterpreterState *interp); + +extern void _PyImport_ClearModulesByIndex(PyInterpreterState *interp); + +extern int _PyImport_InitDefaultImportFunc(PyInterpreterState *interp); +extern int _PyImport_IsDefaultImportFunc( + PyInterpreterState *interp, + PyObject *func); + +extern PyObject * _PyImport_GetImportlibLoader( + PyInterpreterState *interp, + const char *loader_name); +extern PyObject * _PyImport_GetImportlibExternalLoader( + PyInterpreterState *interp, + const char *loader_name); +extern PyObject * _PyImport_BlessMyLoader( + PyInterpreterState *interp, + PyObject *module_globals); +extern PyObject * _PyImport_ImportlibModuleRepr( + PyInterpreterState *interp, + PyObject *module); + + +extern PyStatus _PyImport_Init(void); +extern void _PyImport_Fini(void); +extern void _PyImport_Fini2(void); + +extern PyStatus _PyImport_InitCore( + PyThreadState *tstate, + PyObject *sysmod, + int importlib); +extern PyStatus _PyImport_InitExternal(PyThreadState *tstate); +extern void _PyImport_FiniCore(PyInterpreterState *interp); +extern void _PyImport_FiniExternal(PyInterpreterState *interp); + + #ifdef HAVE_FORK -extern PyStatus _PyImport_ReInitLock(void); +extern PyStatus _PyImport_ReInitLock(PyInterpreterState *interp); #endif -extern PyObject* _PyImport_BootstrapImp(PyThreadState *tstate); + + +extern PyObject* _PyImport_GetBuiltinModuleNames(void); struct _module_alias { const char *name; /* ASCII encoded string */ const char *orig; /* ASCII encoded string */ }; -PyAPI_DATA(const struct _frozen *) _PyImport_FrozenBootstrap; -PyAPI_DATA(const struct _frozen *) _PyImport_FrozenStdlib; -PyAPI_DATA(const struct _frozen *) _PyImport_FrozenTest; +// Export these 3 symbols for test_ctypes +PyAPI_DATA(const struct _frozen*) _PyImport_FrozenBootstrap; +PyAPI_DATA(const struct _frozen*) _PyImport_FrozenStdlib; +PyAPI_DATA(const struct _frozen*) _PyImport_FrozenTest; + extern const struct _module_alias * _PyImport_FrozenAliases; +extern int _PyImport_CheckSubinterpIncompatibleExtensionAllowed( + const char *name); + + +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(int) _PyImport_ClearExtension(PyObject *name, PyObject *filename); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_importdl.h b/Include/internal/pycore_importdl.h new file mode 100644 index 000000000000000..c8583582b358ac1 --- /dev/null +++ b/Include/internal/pycore_importdl.h @@ -0,0 +1,57 @@ +#ifndef Py_INTERNAL_IMPORTDL_H +#define Py_INTERNAL_IMPORTDL_H + +#include "patchlevel.h" // PY_MAJOR_VERSION + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +extern const char *_PyImport_DynLoadFiletab[]; + +extern PyObject *_PyImport_LoadDynamicModuleWithSpec(PyObject *spec, FILE *); + +typedef PyObject *(*PyModInitFunction)(void); + +/* Max length of module suffix searched for -- accommodates "module.slb" */ +#define MAXSUFFIXSIZE 12 + +#ifdef MS_WINDOWS +#include +typedef FARPROC dl_funcptr; + +#ifdef _DEBUG +# define PYD_DEBUG_SUFFIX "_d" +#else +# define PYD_DEBUG_SUFFIX "" +#endif + +#ifdef Py_GIL_DISABLED +# define PYD_THREADING_TAG "t" +#else +# define PYD_THREADING_TAG "" +#endif + +#ifdef PYD_PLATFORM_TAG +# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG "-" PYD_PLATFORM_TAG +#else +# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG +#endif + +#define PYD_TAGGED_SUFFIX PYD_DEBUG_SUFFIX "." PYD_SOABI ".pyd" +#define PYD_UNTAGGED_SUFFIX PYD_DEBUG_SUFFIX ".pyd" + +#else +typedef void (*dl_funcptr)(void); +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_IMPORTDL_H */ diff --git a/Include/internal/pycore_initconfig.h b/Include/internal/pycore_initconfig.h index 69f88d7d1d46b8e..c86988234f6a050 100644 --- a/Include/internal/pycore_initconfig.h +++ b/Include/internal/pycore_initconfig.h @@ -22,7 +22,7 @@ struct pyruntimestate; #endif #define _PyStatus_OK() \ - (PyStatus){._type = _PyStatus_TYPE_OK,} + (PyStatus){._type = _PyStatus_TYPE_OK} /* other fields are set to 0 */ #define _PyStatus_ERR(ERR_MSG) \ (PyStatus){ \ @@ -30,7 +30,8 @@ struct pyruntimestate; .func = _PyStatus_GET_FUNC(), \ .err_msg = (ERR_MSG)} /* other fields are set to 0 */ -#define _PyStatus_NO_MEMORY() _PyStatus_ERR("memory allocation failed") +#define _PyStatus_NO_MEMORY_ERRMSG "memory allocation failed" +#define _PyStatus_NO_MEMORY() _PyStatus_ERR(_PyStatus_NO_MEMORY_ERRMSG) #define _PyStatus_EXIT(EXITCODE) \ (PyStatus){ \ ._type = _PyStatus_TYPE_EXIT, \ @@ -44,21 +45,23 @@ struct pyruntimestate; #define _PyStatus_UPDATE_FUNC(err) \ do { (err).func = _PyStatus_GET_FUNC(); } while (0) -PyObject* _PyErr_SetFromPyStatus(PyStatus status); +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(void) _PyErr_SetFromPyStatus(PyStatus status); + /* --- PyWideStringList ------------------------------------------------ */ #define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL} #ifndef NDEBUG -PyAPI_FUNC(int) _PyWideStringList_CheckConsistency(const PyWideStringList *list); +extern int _PyWideStringList_CheckConsistency(const PyWideStringList *list); #endif -PyAPI_FUNC(void) _PyWideStringList_Clear(PyWideStringList *list); -PyAPI_FUNC(int) _PyWideStringList_Copy(PyWideStringList *list, +extern void _PyWideStringList_Clear(PyWideStringList *list); +extern int _PyWideStringList_Copy(PyWideStringList *list, const PyWideStringList *list2); -PyAPI_FUNC(PyStatus) _PyWideStringList_Extend(PyWideStringList *list, +extern PyStatus _PyWideStringList_Extend(PyWideStringList *list, const PyWideStringList *list2); -PyAPI_FUNC(PyObject*) _PyWideStringList_AsList(const PyWideStringList *list); +extern PyObject* _PyWideStringList_AsList(const PyWideStringList *list); /* --- _PyArgv ---------------------------------------------------- */ @@ -70,28 +73,28 @@ typedef struct _PyArgv { wchar_t * const *wchar_argv; } _PyArgv; -PyAPI_FUNC(PyStatus) _PyArgv_AsWstrList(const _PyArgv *args, +extern PyStatus _PyArgv_AsWstrList(const _PyArgv *args, PyWideStringList *list); /* --- Helper functions ------------------------------------------- */ -PyAPI_FUNC(int) _Py_str_to_int( +extern int _Py_str_to_int( const char *str, int *result); -PyAPI_FUNC(const wchar_t*) _Py_get_xoption( +extern const wchar_t* _Py_get_xoption( const PyWideStringList *xoptions, const wchar_t *name); -PyAPI_FUNC(const char*) _Py_GetEnv( +extern const char* _Py_GetEnv( int use_environment, const char *name); -PyAPI_FUNC(void) _Py_get_env_flag( +extern void _Py_get_env_flag( int use_environment, int *flag, const char *name); /* Py_GetArgcArgv() helper */ -PyAPI_FUNC(void) _Py_ClearArgcArgv(void); +extern void _Py_ClearArgcArgv(void); /* --- _PyPreCmdline ------------------------------------------------- */ @@ -124,7 +127,9 @@ extern PyStatus _PyPreCmdline_Read(_PyPreCmdline *cmdline, /* --- PyPreConfig ----------------------------------------------- */ +// Export for '_testembed' program PyAPI_FUNC(void) _PyPreConfig_InitCompatConfig(PyPreConfig *preconfig); + extern void _PyPreConfig_InitFromConfig( PyPreConfig *preconfig, const PyConfig *config); @@ -148,7 +153,9 @@ typedef enum { _PyConfig_INIT_ISOLATED = 3 } _PyConfigInitEnum; +// Export for '_testembed' program PyAPI_FUNC(void) _PyConfig_InitCompatConfig(PyConfig *config); + extern PyStatus _PyConfig_Copy( PyConfig *config, const PyConfig *config2); @@ -163,16 +170,16 @@ extern PyStatus _PyConfig_SetPyArgv( PyConfig *config, const _PyArgv *args); -PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config); -PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict); extern void _Py_DumpPathConfig(PyThreadState *tstate); -PyAPI_FUNC(PyObject*) _Py_Get_Getpath_CodeObject(void); - /* --- Function used for testing ---------------------------------- */ +// Export these functions for '_testinternalcapi' shared extension +PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config); +PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict); +PyAPI_FUNC(PyObject*) _Py_Get_Getpath_CodeObject(void); PyAPI_FUNC(PyObject*) _Py_GetConfigsAsDict(void); #ifdef __cplusplus diff --git a/Include/internal/pycore_instruments.h b/Include/internal/pycore_instruments.h new file mode 100644 index 000000000000000..eae8371ef7f9b8f --- /dev/null +++ b/Include/internal/pycore_instruments.h @@ -0,0 +1,107 @@ +#ifndef Py_INTERNAL_INSTRUMENT_H +#define Py_INTERNAL_INSTRUMENT_H + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_frame.h" // _PyInterpreterFrame + +#ifdef __cplusplus +extern "C" { +#endif + +#define PY_MONITORING_TOOL_IDS 8 + +/* Local events. + * These require bytecode instrumentation */ + +#define PY_MONITORING_EVENT_PY_START 0 +#define PY_MONITORING_EVENT_PY_RESUME 1 +#define PY_MONITORING_EVENT_PY_RETURN 2 +#define PY_MONITORING_EVENT_PY_YIELD 3 +#define PY_MONITORING_EVENT_CALL 4 +#define PY_MONITORING_EVENT_LINE 5 +#define PY_MONITORING_EVENT_INSTRUCTION 6 +#define PY_MONITORING_EVENT_JUMP 7 +#define PY_MONITORING_EVENT_BRANCH 8 +#define PY_MONITORING_EVENT_STOP_ITERATION 9 + +#define PY_MONITORING_IS_INSTRUMENTED_EVENT(ev) \ + ((ev) < _PY_MONITORING_LOCAL_EVENTS) + +/* Other events, mainly exceptions */ + +#define PY_MONITORING_EVENT_RAISE 10 +#define PY_MONITORING_EVENT_EXCEPTION_HANDLED 11 +#define PY_MONITORING_EVENT_PY_UNWIND 12 +#define PY_MONITORING_EVENT_PY_THROW 13 +#define PY_MONITORING_EVENT_RERAISE 14 + + +/* Ancilliary events */ + +#define PY_MONITORING_EVENT_C_RETURN 15 +#define PY_MONITORING_EVENT_C_RAISE 16 + + +typedef uint32_t _PyMonitoringEventSet; + +/* Tool IDs */ + +/* These are defined in PEP 669 for convenience to avoid clashes */ +#define PY_MONITORING_DEBUGGER_ID 0 +#define PY_MONITORING_COVERAGE_ID 1 +#define PY_MONITORING_PROFILER_ID 2 +#define PY_MONITORING_OPTIMIZER_ID 5 + +/* Internal IDs used to suuport sys.setprofile() and sys.settrace() */ +#define PY_MONITORING_SYS_PROFILE_ID 6 +#define PY_MONITORING_SYS_TRACE_ID 7 + + +PyObject *_PyMonitoring_RegisterCallback(int tool_id, int event_id, PyObject *obj); + +int _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events); +int _PyMonitoring_SetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet events); +int _PyMonitoring_GetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet *events); + +extern int +_Py_call_instrumentation(PyThreadState *tstate, int event, + _PyInterpreterFrame *frame, _Py_CODEUNIT *instr); + +extern int +_Py_call_instrumentation_line(PyThreadState *tstate, _PyInterpreterFrame* frame, + _Py_CODEUNIT *instr, _Py_CODEUNIT *prev); + +extern int +_Py_call_instrumentation_instruction( + PyThreadState *tstate, _PyInterpreterFrame* frame, _Py_CODEUNIT *instr); + +_Py_CODEUNIT * +_Py_call_instrumentation_jump( + PyThreadState *tstate, int event, + _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, _Py_CODEUNIT *target); + +extern int +_Py_call_instrumentation_arg(PyThreadState *tstate, int event, + _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg); + +extern int +_Py_call_instrumentation_2args(PyThreadState *tstate, int event, + _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1); + +extern void +_Py_call_instrumentation_exc2(PyThreadState *tstate, int event, + _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1); + +extern int +_Py_Instrumentation_GetLine(PyCodeObject *code, int index); + +extern PyObject _PyInstrumentation_MISSING; +extern PyObject _PyInstrumentation_DISABLE; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_INSTRUMENT_H */ diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index 6f8fbeb1ba3e31e..498db8becf114cf 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -8,72 +8,41 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include +#include // bool -#include "pycore_atomic.h" // _Py_atomic_address #include "pycore_ast_state.h" // struct ast_state +#include "pycore_atexit.h" // struct atexit_state +#include "pycore_ceval_state.h" // struct _ceval_state #include "pycore_code.h" // struct callable_cache #include "pycore_context.h" // struct _Py_context_state -#include "pycore_dict.h" // struct _Py_dict_state +#include "pycore_crossinterp.h" // struct _xidregistry +#include "pycore_dict_state.h" // struct _Py_dict_state +#include "pycore_dtoa.h" // struct _dtoa_state #include "pycore_exceptions.h" // struct _Py_exc_state #include "pycore_floatobject.h" // struct _Py_float_state -#include "pycore_genobject.h" // struct _Py_async_gen_state +#include "pycore_function.h" // FUNC_MAX_WATCHERS #include "pycore_gc.h" // struct _gc_runtime_state +#include "pycore_genobject.h" // struct _Py_async_gen_state +#include "pycore_global_objects.h"// struct _Py_interp_cached_objects +#include "pycore_import.h" // struct _import_state +#include "pycore_instruments.h" // _PY_MONITORING_EVENTS #include "pycore_list.h" // struct _Py_list_state +#include "pycore_object_state.h" // struct _py_object_state +#include "pycore_obmalloc.h" // struct _obmalloc_state #include "pycore_tuple.h" // struct _Py_tuple_state -#include "pycore_typeobject.h" // struct type_cache +#include "pycore_typeobject.h" // struct types_state #include "pycore_unicodeobject.h" // struct _Py_unicode_state #include "pycore_warnings.h" // struct _warnings_runtime_state -struct _pending_calls { - PyThread_type_lock lock; - /* Request for running pending calls. */ - _Py_atomic_int calls_to_do; - /* Request for looking at the `async_exc` field of the current - thread state. - Guarded by the GIL. */ - int async_exc; -#define NPENDINGCALLS 32 - struct { - int (*func)(void *); - void *arg; - } calls[NPENDINGCALLS]; - int first; - int last; -}; - -struct _ceval_state { - int recursion_limit; - /* This single variable consolidates all requests to break out of - the fast path in the eval loop. */ - _Py_atomic_int eval_breaker; - /* Request for dropping the GIL */ - _Py_atomic_int gil_drop_request; - /* The GC is ready to be executed */ - _Py_atomic_int gc_scheduled; - struct _pending_calls pending; -}; - - -// atexit state -typedef struct { - PyObject *func; - PyObject *args; - PyObject *kwargs; -} atexit_callback; - -struct atexit_state { - atexit_callback **callbacks; - int ncallbacks; - int callback_len; -}; - struct _Py_long_state { int max_str_digits; }; +/* cross-interpreter data registry */ + + /* interpreter state */ /* PyInterpreterState holds the global state for one of the runtime's @@ -83,12 +52,32 @@ struct _Py_long_state { */ struct _is { + /* This struct countains the eval_breaker, + * which is by far the hottest field in this struct + * and should be placed at the beginning. */ + struct _ceval_state ceval; + PyInterpreterState *next; + int64_t id; + int64_t id_refcount; + int requires_idref; + PyThread_type_lock id_mutex; + + /* Has been initialized to a safe state. + + In order to be effective, this must be set to 0 during or right + after allocation. */ + int _initialized; + int finalizing; + + uintptr_t last_restart_version; struct pythreads { uint64_t next_unique_id; /* The linked list of threads, newest first. */ PyThreadState *head; + /* The thread currently executing in the __main__ module, if any. */ + PyThreadState *main; /* Used in Modules/_threadmodule.c. */ long count; /* Support for runtime thread stack size tuning. @@ -103,36 +92,44 @@ struct _is { Get runtime from tstate: tstate->interp->runtime. */ struct pyruntimestate *runtime; - int64_t id; - int64_t id_refcount; - int requires_idref; - PyThread_type_lock id_mutex; + /* Set by Py_EndInterpreter(). - /* Has been initialized to a safe state. + Use _PyInterpreterState_GetFinalizing() + and _PyInterpreterState_SetFinalizing() + to access it, don't access it directly. */ + PyThreadState* _finalizing; + /* The ID of the OS thread in which we are finalizing. */ + unsigned long _finalizing_id; - In order to be effective, this must be set to 0 during or right - after allocation. */ - int _initialized; - int finalizing; + struct _gc_runtime_state gc; - /* Was this interpreter statically allocated? */ - bool _static; + /* The following fields are here to avoid allocation during init. + The data is exposed through PyInterpreterState pointer fields. + These fields should not be accessed directly outside of init. - struct _ceval_state ceval; - struct _gc_runtime_state gc; + All other PyInterpreterState pointer fields are populated when + needed and default to NULL. + + For now there are some exceptions to that rule, which require + allocation during init. These will be addressed on a case-by-case + basis. Also see _PyRuntimeState regarding the various mutex fields. + */ - // sys.modules dictionary - PyObject *modules; - PyObject *modules_by_index; // Dictionary of the sys module PyObject *sysdict; + // Dictionary of the builtins module PyObject *builtins; - // importlib module - PyObject *importlib; - // override for config->use_frozen_modules (for tests) - // (-1: "off", 1: "on", 0: no override) - int override_frozen_modules; + + struct _import_state imports; + + /* The per-interpreter GIL, which might not be used. */ + struct _gil_runtime_state _gil; + + /* ---------- IMPORTANT --------------------------- + The fields above this line are declared as early as + possible to facilitate out-of-process observability + tools. */ PyObject *codec_search_path; PyObject *codec_search_cache; @@ -140,23 +137,25 @@ struct _is { int codecs_initialized; PyConfig config; -#ifdef HAVE_DLOPEN - int dlopenflags; -#endif unsigned long feature_flags; PyObject *dict; /* Stores per-interpreter state */ + PyObject *sysdict_copy; PyObject *builtins_copy; - PyObject *import_func; // Initialized to _PyEval_EvalFrameDefault(). _PyFrameEvalFunction eval_frame; - PyDict_WatchCallback dict_watchers[DICT_MAX_WATCHERS]; + PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS]; + // One bit is set for each non-NULL entry in func_watchers + uint8_t active_func_watchers; Py_ssize_t co_extra_user_count; freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS]; + /* cross-interpreter data and utils */ + struct _xi_state xi; + #ifdef HAVE_FORK PyObject *before_forkers; PyObject *after_forkers_parent; @@ -166,12 +165,20 @@ struct _is { struct _warnings_runtime_state warnings; struct atexit_state atexit; + struct _obmalloc_state obmalloc; + PyObject *audit_hooks; PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS]; + PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS]; + // One bit is set for each non-NULL entry in code_watchers + uint8_t active_code_watchers; + struct _py_object_state object_state; struct _Py_unicode_state unicode; struct _Py_float_state float_state; struct _Py_long_state long_state; + struct _dtoa_state dtoa; + struct _py_func_state func_state; /* Using a cache is very effective since typically only a single slice is created and then deleted again. */ PySliceObject *slice_cache; @@ -186,49 +193,140 @@ struct _is { struct ast_state ast; struct types_state types; struct callable_cache callable_cache; - - /* The following fields are here to avoid allocation during init. - The data is exposed through PyInterpreterState pointer fields. - These fields should not be accessed directly outside of init. - - All other PyInterpreterState pointer fields are populated when - needed and default to NULL. - - For now there are some exceptions to that rule, which require - allocation during init. These will be addressed on a case-by-case - basis. Also see _PyRuntimeState regarding the various mutex fields. - */ - - /* the initial PyInterpreterState.threads.head */ + _PyOptimizerObject *optimizer; + _PyExecutorObject *executor_list_head; + uint16_t optimizer_resume_threshold; + uint16_t optimizer_backedge_threshold; + uint32_t next_func_version; + + _Py_GlobalMonitors monitors; + bool sys_profile_initialized; + bool sys_trace_initialized; + Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */ + Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */ + PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS]; + PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS]; + + struct _Py_interp_cached_objects cached_objects; + struct _Py_interp_static_objects static_objects; + + /* the initial PyInterpreterState.threads.head */ PyThreadState _initial_thread; + Py_ssize_t _interactive_src_count; }; /* other API */ -extern void _PyInterpreterState_ClearModules(PyInterpreterState *interp); extern void _PyInterpreterState_Clear(PyThreadState *tstate); -/* cross-interpreter data registry */ +static inline PyThreadState* +_PyInterpreterState_GetFinalizing(PyInterpreterState *interp) { + return (PyThreadState*)_Py_atomic_load_ptr_relaxed(&interp->_finalizing); +} -/* For now we use a global registry of shareable classes. An - alternative would be to add a tp_* slot for a class's - crossinterpdatafunc. It would be simpler and more efficient. */ +static inline unsigned long +_PyInterpreterState_GetFinalizingID(PyInterpreterState *interp) { + return _Py_atomic_load_ulong_relaxed(&interp->_finalizing_id); +} -struct _xidregitem; +static inline void +_PyInterpreterState_SetFinalizing(PyInterpreterState *interp, PyThreadState *tstate) { + _Py_atomic_store_ptr_relaxed(&interp->_finalizing, tstate); + if (tstate == NULL) { + _Py_atomic_store_ulong_relaxed(&interp->_finalizing_id, 0); + } + else { + // XXX Re-enable this assert once gh-109860 is fixed. + //assert(tstate->thread_id == PyThread_get_thread_ident()); + _Py_atomic_store_ulong_relaxed(&interp->_finalizing_id, + tstate->thread_id); + } +} -struct _xidregitem { - PyTypeObject *cls; - crossinterpdatafunc getdata; - struct _xidregitem *next; -}; -PyAPI_FUNC(PyInterpreterState*) _PyInterpreterState_LookUpID(int64_t); +// Export for the _xxinterpchannels module. +PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(int64_t); + +extern int _PyInterpreterState_IDInitref(PyInterpreterState *); +extern int _PyInterpreterState_IDIncref(PyInterpreterState *); +extern void _PyInterpreterState_IDDecref(PyInterpreterState *); + +extern const PyConfig* _PyInterpreterState_GetConfig(PyInterpreterState *interp); + +// Get a copy of the current interpreter configuration. +// +// Return 0 on success. Raise an exception and return -1 on error. +// +// The caller must initialize 'config', using PyConfig_InitPythonConfig() +// for example. +// +// Python must be preinitialized to call this method. +// The caller must hold the GIL. +// +// Once done with the configuration, PyConfig_Clear() must be called to clear +// it. +// +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(int) _PyInterpreterState_GetConfigCopy( + struct PyConfig *config); + +// Set the configuration of the current interpreter. +// +// This function should be called during or just after the Python +// initialization. +// +// Update the sys module with the new configuration. If the sys module was +// modified directly after the Python initialization, these changes are lost. +// +// Some configuration like faulthandler or warnoptions can be updated in the +// configuration, but don't reconfigure Python (don't enable/disable +// faulthandler and don't reconfigure warnings filters). +// +// Return 0 on success. Raise an exception and return -1 on error. +// +// The configuration should come from _PyInterpreterState_GetConfigCopy(). +// +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(int) _PyInterpreterState_SetConfig( + const struct PyConfig *config); + + +/* +Runtime Feature Flags + +Each flag indicate whether or not a specific runtime feature +is available in a given context. For example, forking the process +might not be allowed in the current interpreter (i.e. os.fork() would fail). +*/ + +/* Set if the interpreter share obmalloc runtime state + with the main interpreter. */ +#define Py_RTFLAGS_USE_MAIN_OBMALLOC (1UL << 5) + +/* Set if import should check a module for subinterpreter support. */ +#define Py_RTFLAGS_MULTI_INTERP_EXTENSIONS (1UL << 8) + +/* Set if threads are allowed. */ +#define Py_RTFLAGS_THREADS (1UL << 10) + +/* Set if daemon threads are allowed. */ +#define Py_RTFLAGS_DAEMON_THREADS (1UL << 11) + +/* Set if os.fork() is allowed. */ +#define Py_RTFLAGS_FORK (1UL << 15) + +/* Set if os.exec*() is allowed. */ +#define Py_RTFLAGS_EXEC (1UL << 16) + +extern int _PyInterpreterState_HasFeature(PyInterpreterState *interp, + unsigned long feature); + +PyAPI_FUNC(PyStatus) _PyInterpreterState_New( + PyThreadState *tstate, + PyInterpreterState **pinterp); -PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *); -PyAPI_FUNC(int) _PyInterpreterState_IDIncref(PyInterpreterState *); -PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *); #ifdef __cplusplus } diff --git a/Include/internal/pycore_interpreteridobject.h b/Include/internal/pycore_interpreteridobject.h deleted file mode 100644 index 804831e76deaeac..000000000000000 --- a/Include/internal/pycore_interpreteridobject.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Interpreter ID Object */ - -#ifndef Py_INTERNAL_INTERPRETERIDOBJECT_H -#define Py_INTERNAL_INTERPRETERIDOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - -PyAPI_DATA(PyTypeObject) _PyInterpreterID_Type; - -PyAPI_FUNC(PyObject *) _PyInterpreterID_New(int64_t); -PyAPI_FUNC(PyObject *) _PyInterpreterState_GetIDObject(PyInterpreterState *); -PyAPI_FUNC(PyInterpreterState *) _PyInterpreterID_LookUp(PyObject *); - -#ifdef __cplusplus -} -#endif -#endif // !Py_INTERNAL_INTERPRETERIDOBJECT_H diff --git a/Include/internal/pycore_intrinsics.h b/Include/internal/pycore_intrinsics.h new file mode 100644 index 000000000000000..3a8dd95cff8e5db --- /dev/null +++ b/Include/internal/pycore_intrinsics.h @@ -0,0 +1,50 @@ +#ifndef Py_INTERNAL_INTRINSIC_H +#define Py_INTERNAL_INTRINSIC_H + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +/* Unary Functions: */ +#define INTRINSIC_1_INVALID 0 +#define INTRINSIC_PRINT 1 +#define INTRINSIC_IMPORT_STAR 2 +#define INTRINSIC_STOPITERATION_ERROR 3 +#define INTRINSIC_ASYNC_GEN_WRAP 4 +#define INTRINSIC_UNARY_POSITIVE 5 +#define INTRINSIC_LIST_TO_TUPLE 6 +#define INTRINSIC_TYPEVAR 7 +#define INTRINSIC_PARAMSPEC 8 +#define INTRINSIC_TYPEVARTUPLE 9 +#define INTRINSIC_SUBSCRIPT_GENERIC 10 +#define INTRINSIC_TYPEALIAS 11 + +#define MAX_INTRINSIC_1 11 + + +/* Binary Functions: */ +#define INTRINSIC_2_INVALID 0 +#define INTRINSIC_PREP_RERAISE_STAR 1 +#define INTRINSIC_TYPEVAR_WITH_BOUND 2 +#define INTRINSIC_TYPEVAR_WITH_CONSTRAINTS 3 +#define INTRINSIC_SET_FUNCTION_TYPE_PARAMS 4 + +#define MAX_INTRINSIC_2 4 + +typedef PyObject *(*intrinsic_func1)(PyThreadState* tstate, PyObject *value); +typedef PyObject *(*intrinsic_func2)(PyThreadState* tstate, PyObject *value1, PyObject *value2); + +typedef struct { + intrinsic_func1 func; + const char *name; +} intrinsic_func1_info; + +typedef struct { + intrinsic_func2 func; + const char *name; +} intrinsic_func2_info; + +extern const intrinsic_func1_info _PyIntrinsics_UnaryFunctions[]; +extern const intrinsic_func2_info _PyIntrinsics_BinaryFunctions[]; + +#endif // !Py_INTERNAL_INTRINSIC_H diff --git a/Include/internal/pycore_list.h b/Include/internal/pycore_list.h index 691d13bc8d9ffab..55d67b32bc8a63e 100644 --- a/Include/internal/pycore_list.h +++ b/Include/internal/pycore_list.h @@ -8,7 +8,9 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "listobject.h" // _PyList_CAST() + +extern PyObject* _PyList_Extend(PyListObject *, PyObject *); +extern void _PyList_DebugMallocStats(FILE *out); /* runtime lifecycle */ @@ -35,7 +37,7 @@ struct _Py_list_state { #endif }; -#define _PyList_ITEMS(op) (_PyList_CAST(op)->ob_item) +#define _PyList_ITEMS(op) _Py_RVALUE(_PyList_CAST(op)->ob_item) extern int _PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem); @@ -75,6 +77,8 @@ typedef struct { PyListObject *it_seq; /* Set to NULL when iterator is exhausted */ } _PyListIterObject; +extern PyObject *_PyList_FromArraySteal(PyObject *const *src, Py_ssize_t n); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_llist.h b/Include/internal/pycore_llist.h new file mode 100644 index 000000000000000..5fd261da05fa5d0 --- /dev/null +++ b/Include/internal/pycore_llist.h @@ -0,0 +1,107 @@ +// A doubly-linked list that can be embedded in a struct. +// +// Usage: +// struct llist_node head = LLIST_INIT(head); +// typedef struct { +// ... +// struct llist_node node; +// ... +// } MyObj; +// +// llist_insert_tail(&head, &obj->node); +// llist_remove(&obj->node); +// +// struct llist_node *node; +// llist_for_each(node, &head) { +// MyObj *obj = llist_data(node, MyObj, node); +// ... +// } +// + +#ifndef Py_INTERNAL_LLIST_H +#define Py_INTERNAL_LLIST_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "Py_BUILD_CORE must be defined to include this header" +#endif + +struct llist_node { + struct llist_node *next; + struct llist_node *prev; +}; + +// Get the struct containing a node. +#define llist_data(node, type, member) \ + (type*)((char*)node - offsetof(type, member)) + +// Iterate over a list. +#define llist_for_each(node, head) \ + for (node = (head)->next; node != (head); node = node->next) + +// Iterate over a list, but allow removal of the current node. +#define llist_for_each_safe(node, head) \ + for (struct llist_node *_next = (node = (head)->next, node->next); \ + node != (head); node = _next, _next = node->next) + +#define LLIST_INIT(head) { &head, &head } + +static inline void +llist_init(struct llist_node *head) +{ + head->next = head; + head->prev = head; +} + +// Returns 1 if the list is empty, 0 otherwise. +static inline int +llist_empty(struct llist_node *head) +{ + return head->next == head; +} + +// Appends to the tail of the list. +static inline void +llist_insert_tail(struct llist_node *head, struct llist_node *node) +{ + node->prev = head->prev; + node->next = head; + head->prev->next = node; + head->prev = node; +} + +// Remove a node from the list. +static inline void +llist_remove(struct llist_node *node) +{ + struct llist_node *prev = node->prev; + struct llist_node *next = node->next; + prev->next = next; + next->prev = prev; + node->prev = NULL; + node->next = NULL; +} + +// Append all nodes from head2 onto head1. head2 is left empty. +static inline void +llist_concat(struct llist_node *head1, struct llist_node *head2) +{ + if (!llist_empty(head2)) { + head1->prev->next = head2->next; + head2->next->prev = head1->prev; + + head1->prev = head2->prev; + head2->prev->next = head1; + llist_init(head2); + } +} + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_LLIST_H */ diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h new file mode 100644 index 000000000000000..f135cbbc3754fba --- /dev/null +++ b/Include/internal/pycore_lock.h @@ -0,0 +1,202 @@ +// Lightweight locks and other synchronization mechanisms. +// +// These implementations are based on WebKit's WTF::Lock. See +// https://webkit.org/blog/6161/locking-in-webkit/ for a description of the +// design. +#ifndef Py_INTERNAL_LOCK_H +#define Py_INTERNAL_LOCK_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_time.h" // _PyTime_t + + +// A mutex that occupies one byte. The lock can be zero initialized. +// +// Only the two least significant bits are used. The remaining bits should be +// zero: +// 0b00: unlocked +// 0b01: locked +// 0b10: unlocked and has parked threads +// 0b11: locked and has parked threads +// +// Typical initialization: +// PyMutex m = (PyMutex){0}; +// +// Typical usage: +// PyMutex_Lock(&m); +// ... +// PyMutex_Unlock(&m); + +// NOTE: In Py_GIL_DISABLED builds, `struct _PyMutex` is defined in Include/object.h. +// The Py_GIL_DISABLED builds need the definition in Include/object.h for the +// `ob_mutex` field in PyObject. For the default (non-free-threaded) build, +// we define the struct here to avoid exposing it in the public API. +#ifndef Py_GIL_DISABLED +struct _PyMutex { uint8_t v; }; +#endif + +typedef struct _PyMutex PyMutex; + +#define _Py_UNLOCKED 0 +#define _Py_LOCKED 1 +#define _Py_HAS_PARKED 2 +#define _Py_ONCE_INITIALIZED 4 + +// (private) slow path for locking the mutex +PyAPI_FUNC(void) _PyMutex_LockSlow(PyMutex *m); + +// (private) slow path for unlocking the mutex +PyAPI_FUNC(void) _PyMutex_UnlockSlow(PyMutex *m); + +static inline int +PyMutex_LockFast(uint8_t *lock_bits) +{ + uint8_t expected = _Py_UNLOCKED; + return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED); +} + +// Locks the mutex. +// +// If the mutex is currently locked, the calling thread will be parked until +// the mutex is unlocked. If the current thread holds the GIL, then the GIL +// will be released while the thread is parked. +static inline void +PyMutex_Lock(PyMutex *m) +{ + uint8_t expected = _Py_UNLOCKED; + if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_LOCKED)) { + _PyMutex_LockSlow(m); + } +} + +// Unlocks the mutex. +static inline void +PyMutex_Unlock(PyMutex *m) +{ + uint8_t expected = _Py_LOCKED; + if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_UNLOCKED)) { + _PyMutex_UnlockSlow(m); + } +} + +// Checks if the mutex is currently locked. +static inline int +PyMutex_IsLocked(PyMutex *m) +{ + return (_Py_atomic_load_uint8(&m->v) & _Py_LOCKED) != 0; +} + +typedef enum _PyLockFlags { + // Do not detach/release the GIL when waiting on the lock. + _Py_LOCK_DONT_DETACH = 0, + + // Detach/release the GIL while waiting on the lock. + _PY_LOCK_DETACH = 1, + + // Handle signals if interrupted while waiting on the lock. + _PY_LOCK_HANDLE_SIGNALS = 2, +} _PyLockFlags; + +// Lock a mutex with an optional timeout and additional options. See +// _PyLockFlags for details. +extern PyLockStatus +_PyMutex_LockTimed(PyMutex *m, _PyTime_t timeout_ns, _PyLockFlags flags); + +// Unlock a mutex, returns 0 if the mutex is not locked (used for improved +// error messages). +extern int _PyMutex_TryUnlock(PyMutex *m); + + +// PyEvent is a one-time event notification +typedef struct { + uint8_t v; +} PyEvent; + +// Set the event and notify any waiting threads. +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(void) _PyEvent_Notify(PyEvent *evt); + +// Wait for the event to be set. If the event is already set, then this returns +// immediately. +PyAPI_FUNC(void) PyEvent_Wait(PyEvent *evt); + +// Wait for the event to be set, or until the timeout expires. If the event is +// already set, then this returns immediately. Returns 1 if the event was set, +// and 0 if the timeout expired or thread was interrupted. +PyAPI_FUNC(int) PyEvent_WaitTimed(PyEvent *evt, _PyTime_t timeout_ns); + + +// _PyRawMutex implements a word-sized mutex that that does not depend on the +// parking lot API, and therefore can be used in the parking lot +// implementation. +// +// The mutex uses a packed representation: the least significant bit is used to +// indicate whether the mutex is locked or not. The remaining bits are either +// zero or a pointer to a `struct raw_mutex_entry` (see lock.c). +typedef struct { + uintptr_t v; +} _PyRawMutex; + +// Slow paths for lock/unlock +extern void _PyRawMutex_LockSlow(_PyRawMutex *m); +extern void _PyRawMutex_UnlockSlow(_PyRawMutex *m); + +static inline void +_PyRawMutex_Lock(_PyRawMutex *m) +{ + uintptr_t unlocked = _Py_UNLOCKED; + if (_Py_atomic_compare_exchange_uintptr(&m->v, &unlocked, _Py_LOCKED)) { + return; + } + _PyRawMutex_LockSlow(m); +} + +static inline void +_PyRawMutex_Unlock(_PyRawMutex *m) +{ + uintptr_t locked = _Py_LOCKED; + if (_Py_atomic_compare_exchange_uintptr(&m->v, &locked, _Py_UNLOCKED)) { + return; + } + _PyRawMutex_UnlockSlow(m); +} + +// A data structure that can be used to run initialization code once in a +// thread-safe manner. The C++11 equivalent is std::call_once. +typedef struct { + uint8_t v; +} _PyOnceFlag; + +// Type signature for one-time initialization functions. The function should +// return 0 on success and -1 on failure. +typedef int _Py_once_fn_t(void *arg); + +// (private) slow path for one time initialization +PyAPI_FUNC(int) +_PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg); + +// Calls `fn` once using `flag`. The `arg` is passed to the call to `fn`. +// +// Returns 0 on success and -1 on failure. +// +// If `fn` returns 0 (success), then subsequent calls immediately return 0. +// If `fn` returns -1 (failure), then subsequent calls will retry the call. +static inline int +_PyOnceFlag_CallOnce(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg) +{ + if (_Py_atomic_load_uint8(&flag->v) == _Py_ONCE_INITIALIZED) { + return 0; + } + return _PyOnceFlag_CallOnceSlow(flag, fn, arg); +} + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_LOCK_H */ diff --git a/Include/internal/pycore_long.h b/Include/internal/pycore_long.h index 30c97b7edc98e15..ec27df9e416c58c 100644 --- a/Include/internal/pycore_long.h +++ b/Include/internal/pycore_long.h @@ -8,7 +8,8 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_global_objects.h" // _PY_NSMALLNEGINTS +#include "pycore_bytesobject.h" // _PyBytesWriter +#include "pycore_global_objects.h"// _PY_NSMALLNEGINTS #include "pycore_runtime.h" // _PyRuntime /* @@ -46,6 +47,16 @@ extern "C" { # error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold." #endif +// _PyLong_NumBits. Return the number of bits needed to represent the +// absolute value of a long. For example, this returns 1 for 1 and -1, 2 +// for 2 and -2, and 2 for 3 and -3. It returns 0 for 0. +// v must not be NULL, and must be a normalized long. +// (size_t)-1 is returned and OverflowError set if the true result doesn't +// fit in a size_t. +// +// Export for 'math' shared extension. +PyAPI_FUNC(size_t) _PyLong_NumBits(PyObject *v); + /* runtime lifecycle */ @@ -63,53 +74,243 @@ extern void _PyLong_FiniTypes(PyInterpreterState *interp); # error "_PY_NSMALLPOSINTS must be greater than or equal to 257" #endif -// Return a borrowed reference to the zero singleton. +// Return a reference to the immortal zero singleton. // The function cannot return NULL. static inline PyObject* _PyLong_GetZero(void) { return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS]; } -// Return a borrowed reference to the one singleton. +// Return a reference to the immortal one singleton. // The function cannot return NULL. static inline PyObject* _PyLong_GetOne(void) { return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+1]; } static inline PyObject* _PyLong_FromUnsignedChar(unsigned char i) { - return Py_NewRef((PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i]); + return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i]; } -PyObject *_PyLong_Add(PyLongObject *left, PyLongObject *right); -PyObject *_PyLong_Multiply(PyLongObject *left, PyLongObject *right); -PyObject *_PyLong_Subtract(PyLongObject *left, PyLongObject *right); +// _PyLong_Frexp returns a double x and an exponent e such that the +// true value is approximately equal to x * 2**e. e is >= 0. x is +// 0.0 if and only if the input is 0 (in which case, e and x are both +// zeroes); otherwise, 0.5 <= abs(x) < 1.0. On overflow, which is +// possible if the number of bits doesn't fit into a Py_ssize_t, sets +// OverflowError and returns -1.0 for x, 0 for e. +// +// Export for 'math' shared extension +PyAPI_DATA(double) _PyLong_Frexp(PyLongObject *a, Py_ssize_t *e); + +extern PyObject* _PyLong_FromBytes(const char *, Py_ssize_t, int); + +// _PyLong_DivmodNear. Given integers a and b, compute the nearest +// integer q to the exact quotient a / b, rounding to the nearest even integer +// in the case of a tie. Return (q, r), where r = a - q*b. The remainder r +// will satisfy abs(r) <= abs(b)/2, with equality possible only if q is +// even. +// +// Export for '_datetime' shared extension. +PyAPI_DATA(PyObject*) _PyLong_DivmodNear(PyObject *, PyObject *); + +// _PyLong_Format: Convert the long to a string object with given base, +// appending a base prefix of 0[box] if base is 2, 8 or 16. +// Export for '_tkinter' shared extension. +PyAPI_DATA(PyObject*) _PyLong_Format(PyObject *obj, int base); -int _PyLong_AssignValue(PyObject **target, Py_ssize_t value); +// Export for 'math' shared extension +PyAPI_DATA(PyObject*) _PyLong_Rshift(PyObject *, size_t); -/* Used by Python/mystrtoul.c, _PyBytes_FromHex(), - _PyBytes_DecodeEscape(), etc. */ +// Export for 'math' shared extension +PyAPI_DATA(PyObject*) _PyLong_Lshift(PyObject *, size_t); + +extern PyObject* _PyLong_Add(PyLongObject *left, PyLongObject *right); +extern PyObject* _PyLong_Multiply(PyLongObject *left, PyLongObject *right); +extern PyObject* _PyLong_Subtract(PyLongObject *left, PyLongObject *right); + +// Export for 'binascii' shared extension. PyAPI_DATA(unsigned char) _PyLong_DigitValue[256]; /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ -PyAPI_FUNC(int) _PyLong_FormatAdvancedWriter( +extern int _PyLong_FormatAdvancedWriter( _PyUnicodeWriter *writer, PyObject *obj, PyObject *format_spec, Py_ssize_t start, Py_ssize_t end); -PyAPI_FUNC(int) _PyLong_FormatWriter( +extern int _PyLong_FormatWriter( _PyUnicodeWriter *writer, PyObject *obj, int base, int alternate); -PyAPI_FUNC(char*) _PyLong_FormatBytesWriter( +extern char* _PyLong_FormatBytesWriter( _PyBytesWriter *writer, char *str, PyObject *obj, int base, int alternate); +// Argument converters used by Argument Clinic + +// Export for 'select' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_UnsignedShort_Converter(PyObject *, void *); + +// Export for '_testclinic' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_UnsignedInt_Converter(PyObject *, void *); + +// Export for '_blake2' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_UnsignedLong_Converter(PyObject *, void *); + +// Export for '_blake2' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_UnsignedLongLong_Converter(PyObject *, void *); + +// Export for '_testclinic' shared extension (Argument Clinic code) +PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *); + +/* Long value tag bits: + * 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1. + * 2: Reserved for immortality bit + * 3+ Unsigned digit count + */ +#define SIGN_MASK 3 +#define SIGN_ZERO 1 +#define SIGN_NEGATIVE 2 +#define NON_SIZE_BITS 3 + +/* The functions _PyLong_IsCompact and _PyLong_CompactValue are defined + * in Include/cpython/longobject.h, since they need to be inline. + * + * "Compact" values have at least one bit to spare, + * so that addition and subtraction can be performed on the values + * without risk of overflow. + * + * The inline functions need tag bits. + * For readability, rather than do `#define SIGN_MASK _PyLong_SIGN_MASK` + * we define them to the numbers in both places and then assert that + * they're the same. + */ +static_assert(SIGN_MASK == _PyLong_SIGN_MASK, "SIGN_MASK does not match _PyLong_SIGN_MASK"); +static_assert(NON_SIZE_BITS == _PyLong_NON_SIZE_BITS, "NON_SIZE_BITS does not match _PyLong_NON_SIZE_BITS"); + +/* All *compact" values are guaranteed to fit into + * a Py_ssize_t with at least one bit to spare. + * In other words, for 64 bit machines, compact + * will be signed 63 (or fewer) bit values + */ + +/* Return 1 if the argument is compact int */ +static inline int +_PyLong_IsNonNegativeCompact(const PyLongObject* op) { + assert(PyLong_Check(op)); + return op->long_value.lv_tag <= (1 << NON_SIZE_BITS); +} + + +static inline int +_PyLong_BothAreCompact(const PyLongObject* a, const PyLongObject* b) { + assert(PyLong_Check(a)); + assert(PyLong_Check(b)); + return (a->long_value.lv_tag | b->long_value.lv_tag) < (2 << NON_SIZE_BITS); +} + +static inline bool +_PyLong_IsZero(const PyLongObject *op) +{ + return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO; +} + +static inline bool +_PyLong_IsNegative(const PyLongObject *op) +{ + return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE; +} + +static inline bool +_PyLong_IsPositive(const PyLongObject *op) +{ + return (op->long_value.lv_tag & SIGN_MASK) == 0; +} + +static inline Py_ssize_t +_PyLong_DigitCount(const PyLongObject *op) +{ + assert(PyLong_Check(op)); + return op->long_value.lv_tag >> NON_SIZE_BITS; +} + +/* Equivalent to _PyLong_DigitCount(op) * _PyLong_NonCompactSign(op) */ +static inline Py_ssize_t +_PyLong_SignedDigitCount(const PyLongObject *op) +{ + assert(PyLong_Check(op)); + Py_ssize_t sign = 1 - (op->long_value.lv_tag & SIGN_MASK); + return sign * (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS); +} + +static inline int +_PyLong_CompactSign(const PyLongObject *op) +{ + assert(PyLong_Check(op)); + assert(_PyLong_IsCompact(op)); + return 1 - (op->long_value.lv_tag & SIGN_MASK); +} + +static inline int +_PyLong_NonCompactSign(const PyLongObject *op) +{ + assert(PyLong_Check(op)); + assert(!_PyLong_IsCompact(op)); + return 1 - (op->long_value.lv_tag & SIGN_MASK); +} + +/* Do a and b have the same sign? */ +static inline int +_PyLong_SameSign(const PyLongObject *a, const PyLongObject *b) +{ + return (a->long_value.lv_tag & SIGN_MASK) == (b->long_value.lv_tag & SIGN_MASK); +} + +#define TAG_FROM_SIGN_AND_SIZE(sign, size) ((1 - (sign)) | ((size) << NON_SIZE_BITS)) + +static inline void +_PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size) +{ + assert(size >= 0); + assert(-1 <= sign && sign <= 1); + assert(sign != 0 || size == 0); + op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, (size_t)size); +} + +static inline void +_PyLong_SetDigitCount(PyLongObject *op, Py_ssize_t size) +{ + assert(size >= 0); + op->long_value.lv_tag = (((size_t)size) << NON_SIZE_BITS) | (op->long_value.lv_tag & SIGN_MASK); +} + +#define NON_SIZE_MASK ~((1 << NON_SIZE_BITS) - 1) + +static inline void +_PyLong_FlipSign(PyLongObject *op) { + unsigned int flipped_sign = 2 - (op->long_value.lv_tag & SIGN_MASK); + op->long_value.lv_tag &= NON_SIZE_MASK; + op->long_value.lv_tag |= flipped_sign; +} + +#define _PyLong_DIGIT_INIT(val) \ + { \ + .ob_base = _PyObject_HEAD_INIT(&PyLong_Type), \ + .long_value = { \ + .lv_tag = TAG_FROM_SIGN_AND_SIZE( \ + (val) == 0 ? 0 : ((val) < 0 ? -1 : 1), \ + (val) == 0 ? 0 : 1), \ + { ((val) >= 0 ? (val) : -(val)) }, \ + } \ + } + +#define _PyLong_FALSE_TAG TAG_FROM_SIGN_AND_SIZE(0, 0) +#define _PyLong_TRUE_TAG TAG_FROM_SIGN_AND_SIZE(1, 1) + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_memoryobject.h b/Include/internal/pycore_memoryobject.h new file mode 100644 index 000000000000000..62e204fcbf65339 --- /dev/null +++ b/Include/internal/pycore_memoryobject.h @@ -0,0 +1,20 @@ +#ifndef Py_INTERNAL_MEMORYOBJECT_H +#define Py_INTERNAL_MEMORYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +extern PyTypeObject _PyManagedBuffer_Type; + +PyObject * +_PyMemoryView_FromBufferProc(PyObject *v, int flags, + getbufferproc bufferproc); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_MEMORYOBJECT_H */ diff --git a/Include/internal/pycore_mimalloc.h b/Include/internal/pycore_mimalloc.h new file mode 100644 index 000000000000000..c29dc82a42762aa --- /dev/null +++ b/Include/internal/pycore_mimalloc.h @@ -0,0 +1,19 @@ +#ifndef Py_INTERNAL_MIMALLOC_H +#define Py_INTERNAL_MIMALLOC_H + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#if defined(MIMALLOC_H) || defined(MIMALLOC_TYPES_H) +# error "pycore_mimalloc.h must be included before mimalloc.h" +#endif + +#include "pycore_pymem.h" +#define MI_DEBUG_UNINIT PYMEM_CLEANBYTE +#define MI_DEBUG_FREED PYMEM_DEADBYTE +#define MI_DEBUG_PADDING PYMEM_FORBIDDENBYTE + +#include "mimalloc.h" + +#endif // Py_INTERNAL_MIMALLOC_H diff --git a/Include/internal/pycore_modsupport.h b/Include/internal/pycore_modsupport.h new file mode 100644 index 000000000000000..3d3cd6722528e93 --- /dev/null +++ b/Include/internal/pycore_modsupport.h @@ -0,0 +1,125 @@ +#ifndef Py_INTERNAL_MODSUPPORT_H +#define Py_INTERNAL_MODSUPPORT_H + +#include "pycore_lock.h" // _PyOnceFlag + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +extern int _PyArg_NoKwnames(const char *funcname, PyObject *kwnames); +#define _PyArg_NoKwnames(funcname, kwnames) \ + ((kwnames) == NULL || _PyArg_NoKwnames((funcname), (kwnames))) + +// Export for '_bz2' shared extension +PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args); +#define _PyArg_NoPositional(funcname, args) \ + ((args) == NULL || _PyArg_NoPositional((funcname), (args))) + +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs); +#define _PyArg_NoKeywords(funcname, kwargs) \ + ((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs))) + +// Export for 'zlib' shared extension +PyAPI_FUNC(int) _PyArg_CheckPositional(const char *, Py_ssize_t, + Py_ssize_t, Py_ssize_t); +#define _Py_ANY_VARARGS(n) ((n) == PY_SSIZE_T_MAX) +#define _PyArg_CheckPositional(funcname, nargs, min, max) \ + ((!_Py_ANY_VARARGS(max) && (min) <= (nargs) && (nargs) <= (max)) \ + || _PyArg_CheckPositional((funcname), (nargs), (min), (max))) + +extern PyObject ** _Py_VaBuildStack( + PyObject **small_stack, + Py_ssize_t small_stack_len, + const char *format, + va_list va, + Py_ssize_t *p_nargs); + +extern PyObject* _PyModule_CreateInitialized(PyModuleDef*, int apiver); + +// Export for '_curses' shared extension +PyAPI_FUNC(int) _PyArg_ParseStack( + PyObject *const *args, + Py_ssize_t nargs, + const char *format, + ...); + +extern int _PyArg_UnpackStack( + PyObject *const *args, + Py_ssize_t nargs, + const char *name, + Py_ssize_t min, + Py_ssize_t max, + ...); + +// Export for '_heapq' shared extension +PyAPI_FUNC(void) _PyArg_BadArgument( + const char *fname, + const char *displayname, + const char *expected, + PyObject *arg); + +// --- _PyArg_Parser API --------------------------------------------------- + +typedef struct _PyArg_Parser { + const char *format; + const char * const *keywords; + const char *fname; + const char *custom_msg; + _PyOnceFlag once; /* atomic one-time initialization flag */ + int is_kwtuple_owned; /* does this parser own the kwtuple object? */ + int pos; /* number of positional-only arguments */ + int min; /* minimal number of arguments */ + int max; /* maximal number of positional arguments */ + PyObject *kwtuple; /* tuple of keyword parameter names */ + struct _PyArg_Parser *next; +} _PyArg_Parser; + +// Export for '_testclinic' shared extension +PyAPI_FUNC(int) _PyArg_ParseTupleAndKeywordsFast(PyObject *, PyObject *, + struct _PyArg_Parser *, ...); + +// Export for '_dbm' shared extension +PyAPI_FUNC(int) _PyArg_ParseStackAndKeywords( + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwnames, + struct _PyArg_Parser *, + ...); + +// Export for 'math' shared extension +PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywords( + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs, + PyObject *kwnames, + struct _PyArg_Parser *parser, + int minpos, + int maxpos, + int minkw, + PyObject **buf); +#define _PyArg_UnpackKeywords(args, nargs, kwargs, kwnames, parser, minpos, maxpos, minkw, buf) \ + (((minkw) == 0 && (kwargs) == NULL && (kwnames) == NULL && \ + (minpos) <= (nargs) && (nargs) <= (maxpos) && (args) != NULL) ? (args) : \ + _PyArg_UnpackKeywords((args), (nargs), (kwargs), (kwnames), (parser), \ + (minpos), (maxpos), (minkw), (buf))) + +// Export for '_testclinic' shared extension +PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywordsWithVararg( + PyObject *const *args, Py_ssize_t nargs, + PyObject *kwargs, PyObject *kwnames, + struct _PyArg_Parser *parser, + int minpos, int maxpos, int minkw, + int vararg, PyObject **buf); + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_MODSUPPORT_H + diff --git a/Include/internal/pycore_moduleobject.h b/Include/internal/pycore_moduleobject.h index 76361b8dff113a0..5644bbe5e0552bd 100644 --- a/Include/internal/pycore_moduleobject.h +++ b/Include/internal/pycore_moduleobject.h @@ -8,6 +8,12 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +extern void _PyModule_Clear(PyObject *); +extern void _PyModule_ClearDict(PyObject *); +extern int _PyModuleSpec_IsInitializing(PyObject *); + +extern int _PyModule_IsExtension(PyObject *obj); + typedef struct { PyObject_HEAD PyObject *md_dict; @@ -33,9 +39,12 @@ static inline PyObject* _PyModule_GetDict(PyObject *mod) { PyObject *dict = ((PyModuleObject *)mod) -> md_dict; // _PyModule_GetDict(mod) must not be used after calling module_clear(mod) assert(dict != NULL); - return dict; + return dict; // borrowed reference } +PyObject* _Py_module_getattro_impl(PyModuleObject *m, PyObject *name, int suppress); +PyObject* _Py_module_getattro(PyModuleObject *m, PyObject *name); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_namespace.h b/Include/internal/pycore_namespace.h index cb76f040693d10b..f165cf15319a599 100644 --- a/Include/internal/pycore_namespace.h +++ b/Include/internal/pycore_namespace.h @@ -10,9 +10,10 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -PyAPI_DATA(PyTypeObject) _PyNamespace_Type; +extern PyTypeObject _PyNamespace_Type; -PyAPI_FUNC(PyObject *) _PyNamespace_New(PyObject *kwds); +// Export for '_testmultiphase' shared extension +PyAPI_FUNC(PyObject*) _PyNamespace_New(PyObject *kwds); #ifdef __cplusplus } diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h index 8b78f79e950e92f..f413b8451e5ab49 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -10,49 +10,175 @@ extern "C" { #include #include "pycore_gc.h" // _PyObject_GC_IS_TRACKED() +#include "pycore_emscripten_trampoline.h" // _PyCFunction_TrampolineCall() #include "pycore_interp.h" // PyInterpreterState.gc #include "pycore_pystate.h" // _PyInterpreterState_GET() -#include "pycore_runtime.h" // _PyRuntime -/* This value provides *effective* immortality, meaning the object should never - be deallocated (until runtime finalization). See PEP 683 for more details about - immortality, as well as a proposed mechanism for proper immortality. */ -#define _PyObject_IMMORTAL_REFCNT 999999999 +/* Check if an object is consistent. For example, ensure that the reference + counter is greater than or equal to 1, and ensure that ob_type is not NULL. -#define _PyObject_IMMORTAL_INIT(type) \ - { \ - .ob_refcnt = _PyObject_IMMORTAL_REFCNT, \ - .ob_type = (type), \ + Call _PyObject_AssertFailed() if the object is inconsistent. + + If check_content is zero, only check header fields: reduce the overhead. + + The function always return 1. The return value is just here to be able to + write: + + assert(_PyObject_CheckConsistency(obj, 1)); */ +extern int _PyObject_CheckConsistency(PyObject *op, int check_content); + +extern void _PyDebugAllocatorStats(FILE *out, const char *block_name, + int num_blocks, size_t sizeof_block); + +extern void _PyObject_DebugTypeStats(FILE *out); + +#ifdef Py_TRACE_REFS +// Forget a reference registered by _Py_NewReference(). Function called by +// _Py_Dealloc(). +// +// On a free list, the function can be used before modifying an object to +// remove the object from traced objects. Then _Py_NewReference() or +// _Py_NewReferenceNoTotal() should be called again on the object to trace +// it again. +extern void _Py_ForgetReference(PyObject *); +#endif + +// Export for shared _testinternalcapi extension +PyAPI_FUNC(int) _PyObject_IsFreed(PyObject *); + +/* We need to maintain an internal copy of Py{Var}Object_HEAD_INIT to avoid + designated initializer conflicts in C++20. If we use the deinition in + object.h, we will be mixing designated and non-designated initializers in + pycore objects which is forbiddent in C++20. However, if we then use + designated initializers in object.h then Extensions without designated break. + Furthermore, we can't use designated initializers in Extensions since these + are not supported pre-C++20. Thus, keeping an internal copy here is the most + backwards compatible solution */ +#if defined(Py_GIL_DISABLED) +#define _PyObject_HEAD_INIT(type) \ + { \ + .ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL, \ + .ob_type = (type) \ } -#define _PyVarObject_IMMORTAL_INIT(type, size) \ - { \ - .ob_base = _PyObject_IMMORTAL_INIT(type), \ - .ob_size = size, \ +#else +#define _PyObject_HEAD_INIT(type) \ + { \ + .ob_refcnt = _Py_IMMORTAL_REFCNT, \ + .ob_type = (type) \ + } +#endif +#define _PyVarObject_HEAD_INIT(type, size) \ + { \ + .ob_base = _PyObject_HEAD_INIT(type), \ + .ob_size = size \ } -PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalRefcountErrorFunc( +extern void _Py_NO_RETURN _Py_FatalRefcountErrorFunc( const char *func, const char *message); #define _Py_FatalRefcountError(message) \ _Py_FatalRefcountErrorFunc(__func__, (message)) + +#ifdef Py_REF_DEBUG +/* The symbol is only exposed in the API for the sake of extensions + built against the pre-3.12 stable ABI. */ +PyAPI_DATA(Py_ssize_t) _Py_RefTotal; + +extern void _Py_AddRefTotal(PyInterpreterState *, Py_ssize_t); +extern void _Py_IncRefTotal(PyInterpreterState *); +extern void _Py_DecRefTotal(PyInterpreterState *); + +# define _Py_DEC_REFTOTAL(interp) \ + interp->object_state.reftotal-- +#endif + // Increment reference count by n static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n) { + if (_Py_IsImmortal(op)) { + return; + } #ifdef Py_REF_DEBUG - _Py_RefTotal += n; + _Py_AddRefTotal(_PyInterpreterState_GET(), n); #endif +#if !defined(Py_GIL_DISABLED) op->ob_refcnt += n; +#else + if (_Py_IsOwnedByCurrentThread(op)) { + uint32_t local = op->ob_ref_local; + Py_ssize_t refcnt = (Py_ssize_t)local + n; +# if PY_SSIZE_T_MAX > UINT32_MAX + if (refcnt > (Py_ssize_t)UINT32_MAX) { + // Make the object immortal if the 32-bit local reference count + // would overflow. + refcnt = _Py_IMMORTAL_REFCNT_LOCAL; + } +# endif + _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, (uint32_t)refcnt); + } + else { + _Py_atomic_add_ssize(&op->ob_ref_shared, (n << _Py_REF_SHARED_SHIFT)); + } +#endif } #define _Py_RefcntAdd(op, n) _Py_RefcntAdd(_PyObject_CAST(op), n) +static inline void _Py_SetImmortal(PyObject *op) +{ + if (op) { +#ifdef Py_GIL_DISABLED + op->ob_tid = _Py_UNOWNED_TID; + op->ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL; + op->ob_ref_shared = 0; +#else + op->ob_refcnt = _Py_IMMORTAL_REFCNT; +#endif + } +} +#define _Py_SetImmortal(op) _Py_SetImmortal(_PyObject_CAST(op)) + +// Makes an immortal object mortal again with the specified refcnt. Should only +// be used during runtime finalization. +static inline void _Py_SetMortal(PyObject *op, Py_ssize_t refcnt) +{ + if (op) { + assert(_Py_IsImmortal(op)); +#ifdef Py_GIL_DISABLED + op->ob_tid = _Py_UNOWNED_TID; + op->ob_ref_local = 0; + op->ob_ref_shared = _Py_REF_SHARED(refcnt, _Py_REF_MERGED); +#else + op->ob_refcnt = refcnt; +#endif + } +} + +/* _Py_ClearImmortal() should only be used during runtime finalization. */ +static inline void _Py_ClearImmortal(PyObject *op) +{ + if (op) { + _Py_SetMortal(op, 1); + Py_DECREF(op); + } +} +#define _Py_ClearImmortal(op) \ + do { \ + _Py_ClearImmortal(_PyObject_CAST(op)); \ + op = NULL; \ + } while (0) + +#if !defined(Py_GIL_DISABLED) static inline void _Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct) { + if (_Py_IsImmortal(op)) { + return; + } _Py_DECREF_STAT_INC(); #ifdef Py_REF_DEBUG - _Py_RefTotal--; + _Py_DEC_REFTOTAL(_PyInterpreterState_GET()); #endif if (--op->ob_refcnt != 0) { assert(op->ob_refcnt > 0); @@ -68,9 +194,12 @@ _Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct) static inline void _Py_DECREF_NO_DEALLOC(PyObject *op) { + if (_Py_IsImmortal(op)) { + return; + } _Py_DECREF_STAT_INC(); #ifdef Py_REF_DEBUG - _Py_RefTotal--; + _Py_DEC_REFTOTAL(_PyInterpreterState_GET()); #endif op->ob_refcnt--; #ifdef Py_DEBUG @@ -80,8 +209,44 @@ _Py_DECREF_NO_DEALLOC(PyObject *op) #endif } -PyAPI_FUNC(int) _PyType_CheckConsistency(PyTypeObject *type); -PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content); +#else +// TODO: implement Py_DECREF specializations for Py_GIL_DISABLED build +static inline void +_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct) +{ + Py_DECREF(op); +} + +static inline void +_Py_DECREF_NO_DEALLOC(PyObject *op) +{ + Py_DECREF(op); +} + +static inline int +_Py_REF_IS_MERGED(Py_ssize_t ob_ref_shared) +{ + return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_MERGED; +} + +static inline int +_Py_REF_IS_QUEUED(Py_ssize_t ob_ref_shared) +{ + return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_QUEUED; +} + +// Merge the local and shared reference count fields and add `extra` to the +// refcount when merging. +Py_ssize_t _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra); +#endif // !defined(Py_GIL_DISABLED) + +#ifdef Py_REF_DEBUG +# undef _Py_DEC_REFTOTAL +#endif + + +extern int _PyType_CheckConsistency(PyTypeObject *type); +extern int _PyDict_CheckConsistency(PyObject *mp, int check_content); /* Update the Python traceback of an object. This function must be called when a memory block is reused from a free list. @@ -97,6 +262,9 @@ _PyType_HasFeature(PyTypeObject *type, unsigned long feature) { extern void _PyType_InitCache(PyInterpreterState *interp); +extern PyStatus _PyObject_InitState(PyInterpreterState *interp); +extern void _PyObject_FiniState(PyInterpreterState *interp); +extern bool _PyRefchain_IsTraced(PyInterpreterState *interp, PyObject *obj); /* Inline functions trading binary compatibility for speed: _PyObject_Init() is the fast version of PyObject_Init(), and @@ -118,8 +286,9 @@ static inline void _PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size) { assert(op != NULL); - Py_SET_SIZE(op, size); + assert(typeobj != &PyLong_Type); _PyObject_Init((PyObject *)op, typeobj); + Py_SET_SIZE(op, size); } @@ -208,13 +377,15 @@ static inline void _PyObject_GC_UNTRACK( #endif #ifdef Py_REF_DEBUG +extern void _PyInterpreterState_FinalizeRefTotal(PyInterpreterState *); +extern void _Py_FinalizeRefTotal(_PyRuntimeState *); extern void _PyDebug_PrintTotalRefs(void); #endif #ifdef Py_TRACE_REFS -extern void _Py_AddToAllObjects(PyObject *op, int force); -extern void _Py_PrintReferences(FILE *); -extern void _Py_PrintReferenceAddresses(FILE *); +extern void _Py_AddToAllObjects(PyObject *op); +extern void _Py_PrintReferences(PyInterpreterState *, FILE *); +extern void _Py_PrintReferenceAddresses(PyInterpreterState *, FILE *); #endif @@ -232,8 +403,9 @@ _PyObject_GET_WEAKREFS_LISTPTR(PyObject *op) { if (PyType_Check(op) && ((PyTypeObject *)op)->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { + PyInterpreterState *interp = _PyInterpreterState_GET(); static_builtin_state *state = _PyStaticType_GetState( - (PyTypeObject *)op); + interp, (PyTypeObject *)op); return _PyStaticType_GET_WEAKREFS_LISTPTR(state); } // Essentially _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(): @@ -267,9 +439,9 @@ _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(PyObject *op) static inline int _PyObject_IS_GC(PyObject *obj) { - return (PyType_IS_GC(Py_TYPE(obj)) - && (Py_TYPE(obj)->tp_is_gc == NULL - || Py_TYPE(obj)->tp_is_gc(obj))); + PyTypeObject *type = Py_TYPE(obj); + return (PyType_IS_GC(type) + && (type->tp_is_gc == NULL || type->tp_is_gc(obj))); } // Fast inlined version of PyType_IS_GC() @@ -290,18 +462,20 @@ extern int _Py_CheckSlotResult( const char *slot_name, int success); -// PyType_Ready() must be called if _PyType_IsReady() is false. -// See also the Py_TPFLAGS_READY flag. -#define _PyType_IsReady(type) ((type)->tp_dict != NULL) - // Test if a type supports weak references static inline int _PyType_SUPPORTS_WEAKREFS(PyTypeObject *type) { return (type->tp_weaklistoffset != 0); } extern PyObject* _PyType_AllocNoTrack(PyTypeObject *type, Py_ssize_t nitems); +extern PyObject *_PyType_NewManagedObject(PyTypeObject *type); + +extern PyTypeObject* _PyType_CalculateMetaclass(PyTypeObject *, PyObject *); +extern PyObject* _PyType_GetDocFromInternalDoc(const char *, const char *); +extern PyObject* _PyType_GetTextSignatureFromInternalDoc(const char *, const char *, int); extern int _PyObject_InitializeDict(PyObject *obj); +int _PyObject_InitInlineValues(PyObject *obj, PyTypeObject *tp); extern int _PyObject_StoreInstanceAttribute(PyObject *obj, PyDictValues *values, PyObject *name, PyObject *value); PyObject * _PyObject_GetInstanceAttribute(PyObject *obj, PyDictValues *values, @@ -351,14 +525,18 @@ _PyDictOrValues_SetValues(PyDictOrValues *ptr, PyDictValues *values) extern PyObject ** _PyObject_ComputedDictPointer(PyObject *); extern void _PyObject_FreeInstanceAttributes(PyObject *obj); extern int _PyObject_IsInstanceDictEmpty(PyObject *); -extern int _PyType_HasSubclasses(PyTypeObject *); -extern PyObject* _PyType_GetSubclasses(PyTypeObject *); -// Access macro to the members which are floating "behind" the object -#define _PyHeapType_GET_MEMBERS(etype) \ - ((PyMemberDef *)(((char *)(etype)) + Py_TYPE(etype)->tp_basicsize)) +// Export for 'math' shared extension +PyAPI_FUNC(PyObject*) _PyObject_LookupSpecial(PyObject *, PyObject *); -PyAPI_FUNC(PyObject *) _PyObject_LookupSpecial(PyObject *, PyObject *); +extern int _PyObject_IsAbstract(PyObject *); + +extern int _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); +extern PyObject* _PyObject_NextNotImplemented(PyObject *); + +// Pickle support. +// Export for '_datetime' shared extension +PyAPI_FUNC(PyObject*) _PyObject_GetState(PyObject *); /* C function call trampolines to mitigate bad function pointer casts. * @@ -372,7 +550,7 @@ PyAPI_FUNC(PyObject *) _PyObject_LookupSpecial(PyObject *, PyObject *); * match. * * Third party code unintentionally rely on problematic fpcasts. The call - * trampoline mitigates common occurences of bad fpcasts on Emscripten. + * trampoline mitigates common occurrences of bad fpcasts on Emscripten. */ #if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) #define _PyCFunction_TrampolineCall(meth, self, args) \ @@ -387,6 +565,14 @@ extern PyObject* _PyCFunctionWithKeywords_TrampolineCall( (meth)((self), (args), (kw)) #endif // __EMSCRIPTEN__ && PY_CALL_TRAMPOLINE +// Export these 2 symbols for '_pickle' shared extension +PyAPI_DATA(PyTypeObject) _PyNone_Type; +PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type; + +// Maps Py_LT to Py_GT, ..., Py_GE to Py_LE. +// Export for the stable ABI. +PyAPI_DATA(int) _Py_SwappedOp[]; + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_object_state.h b/Include/internal/pycore_object_state.h new file mode 100644 index 000000000000000..9eac27b1a9a4e3e --- /dev/null +++ b/Include/internal/pycore_object_state.h @@ -0,0 +1,37 @@ +#ifndef Py_INTERNAL_OBJECT_STATE_H +#define Py_INTERNAL_OBJECT_STATE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_hashtable.h" // _Py_hashtable_t + +struct _py_object_runtime_state { +#ifdef Py_REF_DEBUG + Py_ssize_t interpreter_leaks; +#endif + int _not_used; +}; + +struct _py_object_state { +#ifdef Py_REF_DEBUG + Py_ssize_t reftotal; +#endif +#ifdef Py_TRACE_REFS + // Hash table storing all objects. The key is the object pointer + // (PyObject*) and the value is always the number 1 (as uintptr_t). + // See _PyRefchain_IsTraced() and _PyRefchain_Trace() functions. + _Py_hashtable_t *refchain; +#endif + int _not_used; +}; + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_OBJECT_STATE_H */ diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h new file mode 100644 index 000000000000000..b0dbf53d4e3d152 --- /dev/null +++ b/Include/internal/pycore_obmalloc.h @@ -0,0 +1,698 @@ +#ifndef Py_INTERNAL_OBMALLOC_H +#define Py_INTERNAL_OBMALLOC_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +typedef unsigned int pymem_uint; /* assuming >= 16 bits */ + +#undef uint +#define uint pymem_uint + + +/* An object allocator for Python. + + Here is an introduction to the layers of the Python memory architecture, + showing where the object allocator is actually used (layer +2), It is + called for every object allocation and deallocation (PyObject_New/Del), + unless the object-specific allocators implement a proprietary allocation + scheme (ex.: ints use a simple free list). This is also the place where + the cyclic garbage collector operates selectively on container objects. + + + Object-specific allocators + _____ ______ ______ ________ + [ int ] [ dict ] [ list ] ... [ string ] Python core | ++3 | <----- Object-specific memory -----> | <-- Non-object memory --> | + _______________________________ | | + [ Python's object allocator ] | | ++2 | ####### Object memory ####### | <------ Internal buffers ------> | + ______________________________________________________________ | + [ Python's raw memory allocator (PyMem_ API) ] | ++1 | <----- Python memory (under PyMem manager's control) ------> | | + __________________________________________________________________ + [ Underlying general-purpose allocator (ex: C library malloc) ] + 0 | <------ Virtual memory allocated for the python process -------> | + + ========================================================================= + _______________________________________________________________________ + [ OS-specific Virtual Memory Manager (VMM) ] +-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | + __________________________________ __________________________________ + [ ] [ ] +-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | + +*/ +/*==========================================================================*/ + +/* A fast, special-purpose memory allocator for small blocks, to be used + on top of a general-purpose malloc -- heavily based on previous art. */ + +/* Vladimir Marangozov -- August 2000 */ + +/* + * "Memory management is where the rubber meets the road -- if we do the wrong + * thing at any level, the results will not be good. And if we don't make the + * levels work well together, we are in serious trouble." (1) + * + * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, + * "Dynamic Storage Allocation: A Survey and Critical Review", + * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. + */ + +/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ + +/*==========================================================================*/ + +/* + * Allocation strategy abstract: + * + * For small requests, the allocator sub-allocates blocks of memory. + * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the + * system's allocator. + * + * Small requests are grouped in size classes spaced 8 bytes apart, due + * to the required valid alignment of the returned address. Requests of + * a particular size are serviced from memory pools of 4K (one VMM page). + * Pools are fragmented on demand and contain free lists of blocks of one + * particular size class. In other words, there is a fixed-size allocator + * for each size class. Free pools are shared by the different allocators + * thus minimizing the space reserved for a particular size class. + * + * This allocation strategy is a variant of what is known as "simple + * segregated storage based on array of free lists". The main drawback of + * simple segregated storage is that we might end up with lot of reserved + * memory for the different free lists, which degenerate in time. To avoid + * this, we partition each free list in pools and we share dynamically the + * reserved space between all free lists. This technique is quite efficient + * for memory intensive programs which allocate mainly small-sized blocks. + * + * For small requests we have the following table: + * + * Request in bytes Size of allocated block Size class idx + * ---------------------------------------------------------------- + * 1-8 8 0 + * 9-16 16 1 + * 17-24 24 2 + * 25-32 32 3 + * 33-40 40 4 + * 41-48 48 5 + * 49-56 56 6 + * 57-64 64 7 + * 65-72 72 8 + * ... ... ... + * 497-504 504 62 + * 505-512 512 63 + * + * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying + * allocator. + */ + +/*==========================================================================*/ + +/* + * -- Main tunable settings section -- + */ + +/* + * Alignment of addresses returned to the user. 8-bytes alignment works + * on most current architectures (with 32-bit or 64-bit address buses). + * The alignment value is also used for grouping small requests in size + * classes spaced ALIGNMENT bytes apart. + * + * You shouldn't change this unless you know what you are doing. + */ + +#if SIZEOF_VOID_P > 4 +#define ALIGNMENT 16 /* must be 2^N */ +#define ALIGNMENT_SHIFT 4 +#else +#define ALIGNMENT 8 /* must be 2^N */ +#define ALIGNMENT_SHIFT 3 +#endif + +/* Return the number of bytes in size class I, as a uint. */ +#define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT) + +/* + * Max size threshold below which malloc requests are considered to be + * small enough in order to use preallocated memory pools. You can tune + * this value according to your application behaviour and memory needs. + * + * Note: a size threshold of 512 guarantees that newly created dictionaries + * will be allocated from preallocated memory pools on 64-bit. + * + * The following invariants must hold: + * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512 + * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT + * + * Although not required, for better performance and space efficiency, + * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. + */ +#define SMALL_REQUEST_THRESHOLD 512 +#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) + +/* + * The system's VMM page size can be obtained on most unices with a + * getpagesize() call or deduced from various header files. To make + * things simpler, we assume that it is 4K, which is OK for most systems. + * It is probably better if this is the native page size, but it doesn't + * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page + * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation + * violation fault. 4K is apparently OK for all the platforms that python + * currently targets. + */ +#define SYSTEM_PAGE_SIZE (4 * 1024) + +/* + * Maximum amount of memory managed by the allocator for small requests. + */ +#ifdef WITH_MEMORY_LIMITS +#ifndef SMALL_MEMORY_LIMIT +#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ +#endif +#endif + +#if !defined(WITH_PYMALLOC_RADIX_TREE) +/* Use radix-tree to track arena memory regions, for address_in_range(). + * Enable by default since it allows larger pool sizes. Can be disabled + * using -DWITH_PYMALLOC_RADIX_TREE=0 */ +#define WITH_PYMALLOC_RADIX_TREE 1 +#endif + +#if SIZEOF_VOID_P > 4 +/* on 64-bit platforms use larger pools and arenas if we can */ +#define USE_LARGE_ARENAS +#if WITH_PYMALLOC_RADIX_TREE +/* large pools only supported if radix-tree is enabled */ +#define USE_LARGE_POOLS +#endif +#endif + +/* + * The allocator sub-allocates blocks of memory (called arenas) aligned + * on a page boundary. This is a reserved virtual address space for the + * current process (obtained through a malloc()/mmap() call). In no way this + * means that the memory arenas will be used entirely. A malloc() is + * usually an address range reservation for bytes, unless all pages within + * this space are referenced subsequently. So malloc'ing big blocks and not + * using them does not mean "wasting memory". It's an addressable range + * wastage... + * + * Arenas are allocated with mmap() on systems supporting anonymous memory + * mappings to reduce heap fragmentation. + */ +#ifdef USE_LARGE_ARENAS +#define ARENA_BITS 20 /* 1 MiB */ +#else +#define ARENA_BITS 18 /* 256 KiB */ +#endif +#define ARENA_SIZE (1 << ARENA_BITS) +#define ARENA_SIZE_MASK (ARENA_SIZE - 1) + +#ifdef WITH_MEMORY_LIMITS +#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) +#endif + +/* + * Size of the pools used for small blocks. Must be a power of 2. + */ +#ifdef USE_LARGE_POOLS +#define POOL_BITS 14 /* 16 KiB */ +#else +#define POOL_BITS 12 /* 4 KiB */ +#endif +#define POOL_SIZE (1 << POOL_BITS) +#define POOL_SIZE_MASK (POOL_SIZE - 1) + +#if !WITH_PYMALLOC_RADIX_TREE +#if POOL_SIZE != SYSTEM_PAGE_SIZE +# error "pool size must be equal to system page size" +#endif +#endif + +#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE) +#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE +# error "arena size not an exact multiple of pool size" +#endif + +/* + * -- End of tunable settings section -- + */ + +/*==========================================================================*/ + +/* When you say memory, my mind reasons in terms of (pointers to) blocks */ +typedef uint8_t pymem_block; + +/* Pool for small blocks. */ +struct pool_header { + union { pymem_block *_padding; + uint count; } ref; /* number of allocated blocks */ + pymem_block *freeblock; /* pool's free list head */ + struct pool_header *nextpool; /* next pool of this size class */ + struct pool_header *prevpool; /* previous pool "" */ + uint arenaindex; /* index into arenas of base adr */ + uint szidx; /* block size class index */ + uint nextoffset; /* bytes to virgin block */ + uint maxnextoffset; /* largest valid nextoffset */ +}; + +typedef struct pool_header *poolp; + +/* Record keeping for arenas. */ +struct arena_object { + /* The address of the arena, as returned by malloc. Note that 0 + * will never be returned by a successful malloc, and is used + * here to mark an arena_object that doesn't correspond to an + * allocated arena. + */ + uintptr_t address; + + /* Pool-aligned pointer to the next pool to be carved off. */ + pymem_block* pool_address; + + /* The number of available pools in the arena: free pools + never- + * allocated pools. + */ + uint nfreepools; + + /* The total number of pools in the arena, whether or not available. */ + uint ntotalpools; + + /* Singly-linked list of available pools. */ + struct pool_header* freepools; + + /* Whenever this arena_object is not associated with an allocated + * arena, the nextarena member is used to link all unassociated + * arena_objects in the singly-linked `unused_arena_objects` list. + * The prevarena member is unused in this case. + * + * When this arena_object is associated with an allocated arena + * with at least one available pool, both members are used in the + * doubly-linked `usable_arenas` list, which is maintained in + * increasing order of `nfreepools` values. + * + * Else this arena_object is associated with an allocated arena + * all of whose pools are in use. `nextarena` and `prevarena` + * are both meaningless in this case. + */ + struct arena_object* nextarena; + struct arena_object* prevarena; +}; + +#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT) + +#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ + +/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ +#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) + +/* Return total number of blocks in pool of size index I, as a uint. */ +#define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) + +/*==========================================================================*/ + +/* + * Pool table -- headed, circular, doubly-linked lists of partially used pools. + +This is involved. For an index i, usedpools[i+i] is the header for a list of +all partially used pools holding small blocks with "size class idx" i. So +usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size +16, and so on: index 2*i <-> blocks of size (i+1)<freeblock points to +the start of a singly-linked list of free blocks within the pool. When a +block is freed, it's inserted at the front of its pool's freeblock list. Note +that the available blocks in a pool are *not* linked all together when a pool +is initialized. Instead only "the first two" (lowest addresses) blocks are +set up, returning the first such block, and setting pool->freeblock to a +one-block list holding the second such block. This is consistent with that +pymalloc strives at all levels (arena, pool, and block) never to touch a piece +of memory until it's actually needed. + +So long as a pool is in the used state, we're certain there *is* a block +available for allocating, and pool->freeblock is not NULL. If pool->freeblock +points to the end of the free list before we've carved the entire pool into +blocks, that means we simply haven't yet gotten to one of the higher-address +blocks. The offset from the pool_header to the start of "the next" virgin +block is stored in the pool_header nextoffset member, and the largest value +of nextoffset that makes sense is stored in the maxnextoffset member when a +pool is initialized. All the blocks in a pool have been passed out at least +once when and only when nextoffset > maxnextoffset. + + +Major obscurity: While the usedpools vector is declared to have poolp +entries, it doesn't really. It really contains two pointers per (conceptual) +poolp entry, the nextpool and prevpool members of a pool_header. The +excruciating initialization code below fools C so that + + usedpool[i+i] + +"acts like" a genuine poolp, but only so long as you only reference its +nextpool and prevpool members. The "- 2*sizeof(pymem_block *)" gibberish is +compensating for that a pool_header's nextpool and prevpool members +immediately follow a pool_header's first two members: + + union { pymem_block *_padding; + uint count; } ref; + pymem_block *freeblock; + +each of which consume sizeof(pymem_block *) bytes. So what usedpools[i+i] really +contains is a fudged-up pointer p such that *if* C believes it's a poolp +pointer, then p->nextpool and p->prevpool are both p (meaning that the headed +circular list is empty). + +It's unclear why the usedpools setup is so convoluted. It could be to +minimize the amount of cache required to hold this heavily-referenced table +(which only *needs* the two interpool pointer members of a pool_header). OTOH, +referencing code has to remember to "double the index" and doing so isn't +free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying +on that C doesn't insert any padding anywhere in a pool_header at or before +the prevpool member. +**************************************************************************** */ + +#define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8) + +struct _obmalloc_pools { + poolp used[OBMALLOC_USED_POOLS_SIZE]; +}; + + +/*========================================================================== +Arena management. + +`arenas` is a vector of arena_objects. It contains maxarenas entries, some of +which may not be currently used (== they're arena_objects that aren't +currently associated with an allocated arena). Note that arenas proper are +separately malloc'ed. + +Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5, +we do try to free() arenas, and use some mild heuristic strategies to increase +the likelihood that arenas eventually can be freed. + +unused_arena_objects + + This is a singly-linked list of the arena_objects that are currently not + being used (no arena is associated with them). Objects are taken off the + head of the list in new_arena(), and are pushed on the head of the list in + PyObject_Free() when the arena is empty. Key invariant: an arena_object + is on this list if and only if its .address member is 0. + +usable_arenas + + This is a doubly-linked list of the arena_objects associated with arenas + that have pools available. These pools are either waiting to be reused, + or have not been used before. The list is sorted to have the most- + allocated arenas first (ascending order based on the nfreepools member). + This means that the next allocation will come from a heavily used arena, + which gives the nearly empty arenas a chance to be returned to the system. + In my unscientific tests this dramatically improved the number of arenas + that could be freed. + +Note that an arena_object associated with an arena all of whose pools are +currently in use isn't on either list. + +Changed in Python 3.8: keeping usable_arenas sorted by number of free pools +used to be done by one-at-a-time linear search when an arena's number of +free pools changed. That could, overall, consume time quadratic in the +number of arenas. That didn't really matter when there were only a few +hundred arenas (typical!), but could be a timing disaster when there were +hundreds of thousands. See bpo-37029. + +Now we have a vector of "search fingers" to eliminate the need to search: +nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas +with nfp free pools. This is NULL if and only if there is no arena with +nfp free pools in usable_arenas. +*/ + +/* How many arena_objects do we initially allocate? + * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the + * `arenas` vector. + */ +#define INITIAL_ARENA_OBJECTS 16 + +struct _obmalloc_mgmt { + /* Array of objects used to track chunks of memory (arenas). */ + struct arena_object* arenas; + /* Number of slots currently allocated in the `arenas` vector. */ + uint maxarenas; + + /* The head of the singly-linked, NULL-terminated list of available + * arena_objects. + */ + struct arena_object* unused_arena_objects; + + /* The head of the doubly-linked, NULL-terminated at each end, list of + * arena_objects associated with arenas that have pools available. + */ + struct arena_object* usable_arenas; + + /* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */ + struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1]; + + /* Number of arenas allocated that haven't been free()'d. */ + size_t narenas_currently_allocated; + + /* Total number of times malloc() called to allocate an arena. */ + size_t ntimes_arena_allocated; + /* High water mark (max value ever seen) for narenas_currently_allocated. */ + size_t narenas_highwater; + + Py_ssize_t raw_allocated_blocks; +}; + + +#if WITH_PYMALLOC_RADIX_TREE +/*==========================================================================*/ +/* radix tree for tracking arena usage. If enabled, used to implement + address_in_range(). + + memory address bit allocation for keys + + 64-bit pointers, IGNORE_BITS=0 and 2^20 arena size: + 15 -> MAP_TOP_BITS + 15 -> MAP_MID_BITS + 14 -> MAP_BOT_BITS + 20 -> ideal aligned arena + ---- + 64 + + 64-bit pointers, IGNORE_BITS=16, and 2^20 arena size: + 16 -> IGNORE_BITS + 10 -> MAP_TOP_BITS + 10 -> MAP_MID_BITS + 8 -> MAP_BOT_BITS + 20 -> ideal aligned arena + ---- + 64 + + 32-bit pointers and 2^18 arena size: + 14 -> MAP_BOT_BITS + 18 -> ideal aligned arena + ---- + 32 + +*/ + +#if SIZEOF_VOID_P == 8 + +/* number of bits in a pointer */ +#define POINTER_BITS 64 + +/* High bits of memory addresses that will be ignored when indexing into the + * radix tree. Setting this to zero is the safe default. For most 64-bit + * machines, setting this to 16 would be safe. The kernel would not give + * user-space virtual memory addresses that have significant information in + * those high bits. The main advantage to setting IGNORE_BITS > 0 is that less + * virtual memory will be used for the top and middle radix tree arrays. Those + * arrays are allocated in the BSS segment and so will typically consume real + * memory only if actually accessed. + */ +#define IGNORE_BITS 0 + +/* use the top and mid layers of the radix tree */ +#define USE_INTERIOR_NODES + +#elif SIZEOF_VOID_P == 4 + +#define POINTER_BITS 32 +#define IGNORE_BITS 0 + +#else + + /* Currently this code works for 64-bit or 32-bit pointers only. */ +#error "obmalloc radix tree requires 64-bit or 32-bit pointers." + +#endif /* SIZEOF_VOID_P */ + +/* arena_coverage_t members require this to be true */ +#if ARENA_BITS >= 32 +# error "arena size must be < 2^32" +#endif + +/* the lower bits of the address that are not ignored */ +#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS) + +#ifdef USE_INTERIOR_NODES +/* number of bits used for MAP_TOP and MAP_MID nodes */ +#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3) +#else +#define INTERIOR_BITS 0 +#endif + +#define MAP_TOP_BITS INTERIOR_BITS +#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS) +#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1) + +#define MAP_MID_BITS INTERIOR_BITS +#define MAP_MID_LENGTH (1 << MAP_MID_BITS) +#define MAP_MID_MASK (MAP_MID_LENGTH - 1) + +#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS) +#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS) +#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1) + +#define MAP_BOT_SHIFT ARENA_BITS +#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT) +#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT) + +#define AS_UINT(p) ((uintptr_t)(p)) +#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK) +#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK) +#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK) + +#if IGNORE_BITS > 0 +/* Return the ignored part of the pointer address. Those bits should be same + * for all valid pointers if IGNORE_BITS is set correctly. + */ +#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS) +#else +#define HIGH_BITS(p) 0 +#endif + + +/* This is the leaf of the radix tree. See arena_map_mark_used() for the + * meaning of these members. */ +typedef struct { + int32_t tail_hi; + int32_t tail_lo; +} arena_coverage_t; + +typedef struct arena_map_bot { + /* The members tail_hi and tail_lo are accessed together. So, it + * better to have them as an array of structs, rather than two + * arrays. + */ + arena_coverage_t arenas[MAP_BOT_LENGTH]; +} arena_map_bot_t; + +#ifdef USE_INTERIOR_NODES +typedef struct arena_map_mid { + struct arena_map_bot *ptrs[MAP_MID_LENGTH]; +} arena_map_mid_t; + +typedef struct arena_map_top { + struct arena_map_mid *ptrs[MAP_TOP_LENGTH]; +} arena_map_top_t; +#endif + +struct _obmalloc_usage { + /* The root of radix tree. Note that by initializing like this, the memory + * should be in the BSS. The OS will only memory map pages as the MAP_MID + * nodes get used (OS pages are demand loaded as needed). + */ +#ifdef USE_INTERIOR_NODES + arena_map_top_t arena_map_root; + /* accounting for number of used interior nodes */ + int arena_map_mid_count; + int arena_map_bot_count; +#else + arena_map_bot_t arena_map_root; +#endif +}; + +#endif /* WITH_PYMALLOC_RADIX_TREE */ + + +struct _obmalloc_global_state { + int dump_debug_stats; + Py_ssize_t interpreter_leaks; +}; + +struct _obmalloc_state { + struct _obmalloc_pools pools; + struct _obmalloc_mgmt mgmt; + struct _obmalloc_usage usage; +}; + + +#undef uint + + +/* Allocate memory directly from the O/S virtual memory system, + * where supported. Otherwise fallback on malloc */ +void *_PyObject_VirtualAlloc(size_t size); +void _PyObject_VirtualFree(void *, size_t size); + + +/* This function returns the number of allocated memory blocks, regardless of size */ +extern Py_ssize_t _Py_GetGlobalAllocatedBlocks(void); +#define _Py_GetAllocatedBlocks() \ + _Py_GetGlobalAllocatedBlocks() +extern Py_ssize_t _PyInterpreterState_GetAllocatedBlocks(PyInterpreterState *); +extern void _PyInterpreterState_FinalizeAllocatedBlocks(PyInterpreterState *); + + +#ifdef WITH_PYMALLOC +// Export the symbol for the 3rd party 'guppy3' project +PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out); +#endif + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_OBMALLOC_H diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h new file mode 100644 index 000000000000000..8ee72ff2d4126f9 --- /dev/null +++ b/Include/internal/pycore_obmalloc_init.h @@ -0,0 +1,73 @@ +#ifndef Py_INTERNAL_OBMALLOC_INIT_H +#define Py_INTERNAL_OBMALLOC_INIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +/****************************************************/ +/* the default object allocator's state initializer */ + +#define PTA(pools, x) \ + ((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *))) +#define PT(p, x) PTA(p, x), PTA(p, x) + +#define PT_8(p, start) \ + PT(p, start), \ + PT(p, start+1), \ + PT(p, start+2), \ + PT(p, start+3), \ + PT(p, start+4), \ + PT(p, start+5), \ + PT(p, start+6), \ + PT(p, start+7) + +#if NB_SMALL_SIZE_CLASSES <= 8 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0) } +#elif NB_SMALL_SIZE_CLASSES <= 16 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8) } +#elif NB_SMALL_SIZE_CLASSES <= 24 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16) } +#elif NB_SMALL_SIZE_CLASSES <= 32 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24) } +#elif NB_SMALL_SIZE_CLASSES <= 40 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32) } +#elif NB_SMALL_SIZE_CLASSES <= 48 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40) } +#elif NB_SMALL_SIZE_CLASSES <= 56 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48) } +#elif NB_SMALL_SIZE_CLASSES <= 64 +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48), PT_8(p, 56) } +#else +# error "NB_SMALL_SIZE_CLASSES should be less than 64" +#endif + +#define _obmalloc_global_state_INIT \ + { \ + .dump_debug_stats = -1, \ + } + +#define _obmalloc_state_INIT(obmalloc) \ + { \ + .pools = { \ + .used = _obmalloc_pools_INIT(obmalloc.pools), \ + }, \ + } + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_OBMALLOC_INIT_H diff --git a/Include/internal/pycore_opcode.h b/Include/internal/pycore_opcode.h deleted file mode 100644 index 59276c8097116f2..000000000000000 --- a/Include/internal/pycore_opcode.h +++ /dev/null @@ -1,589 +0,0 @@ -// Auto-generated by Tools/build/generate_opcode_h.py from Lib/opcode.py - -#ifndef Py_INTERNAL_OPCODE_H -#define Py_INTERNAL_OPCODE_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - -#include "opcode.h" - -extern const uint8_t _PyOpcode_Caches[256]; - -extern const uint8_t _PyOpcode_Deopt[256]; - -#ifdef NEED_OPCODE_TABLES -static const uint32_t _PyOpcode_RelativeJump[9] = { - 0U, - 0U, - 536870912U, - 135118848U, - 4163U, - 0U, - 0U, - 0U, - 48U, -}; -static const uint32_t _PyOpcode_Jump[9] = { - 0U, - 0U, - 536870912U, - 135118848U, - 4163U, - 0U, - 0U, - 0U, - 48U, -}; - -const uint8_t _PyOpcode_Caches[256] = { - [BINARY_SUBSCR] = 4, - [STORE_SUBSCR] = 1, - [UNPACK_SEQUENCE] = 1, - [FOR_ITER] = 1, - [STORE_ATTR] = 4, - [LOAD_ATTR] = 9, - [COMPARE_OP] = 2, - [LOAD_GLOBAL] = 5, - [BINARY_OP] = 1, - [CALL] = 4, -}; - -const uint8_t _PyOpcode_Deopt[256] = { - [ASYNC_GEN_WRAP] = ASYNC_GEN_WRAP, - [BEFORE_ASYNC_WITH] = BEFORE_ASYNC_WITH, - [BEFORE_WITH] = BEFORE_WITH, - [BINARY_OP] = BINARY_OP, - [BINARY_OP_ADAPTIVE] = BINARY_OP, - [BINARY_OP_ADD_FLOAT] = BINARY_OP, - [BINARY_OP_ADD_INT] = BINARY_OP, - [BINARY_OP_ADD_UNICODE] = BINARY_OP, - [BINARY_OP_INPLACE_ADD_UNICODE] = BINARY_OP, - [BINARY_OP_MULTIPLY_FLOAT] = BINARY_OP, - [BINARY_OP_MULTIPLY_INT] = BINARY_OP, - [BINARY_OP_SUBTRACT_FLOAT] = BINARY_OP, - [BINARY_OP_SUBTRACT_INT] = BINARY_OP, - [BINARY_SLICE] = BINARY_SLICE, - [BINARY_SUBSCR] = BINARY_SUBSCR, - [BINARY_SUBSCR_ADAPTIVE] = BINARY_SUBSCR, - [BINARY_SUBSCR_DICT] = BINARY_SUBSCR, - [BINARY_SUBSCR_GETITEM] = BINARY_SUBSCR, - [BINARY_SUBSCR_LIST_INT] = BINARY_SUBSCR, - [BINARY_SUBSCR_TUPLE_INT] = BINARY_SUBSCR, - [BUILD_CONST_KEY_MAP] = BUILD_CONST_KEY_MAP, - [BUILD_LIST] = BUILD_LIST, - [BUILD_MAP] = BUILD_MAP, - [BUILD_SET] = BUILD_SET, - [BUILD_SLICE] = BUILD_SLICE, - [BUILD_STRING] = BUILD_STRING, - [BUILD_TUPLE] = BUILD_TUPLE, - [CACHE] = CACHE, - [CALL] = CALL, - [CALL_ADAPTIVE] = CALL, - [CALL_BOUND_METHOD_EXACT_ARGS] = CALL, - [CALL_BUILTIN_CLASS] = CALL, - [CALL_BUILTIN_FAST_WITH_KEYWORDS] = CALL, - [CALL_FUNCTION_EX] = CALL_FUNCTION_EX, - [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = CALL, - [CALL_NO_KW_BUILTIN_FAST] = CALL, - [CALL_NO_KW_BUILTIN_O] = CALL, - [CALL_NO_KW_ISINSTANCE] = CALL, - [CALL_NO_KW_LEN] = CALL, - [CALL_NO_KW_LIST_APPEND] = CALL, - [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = CALL, - [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = CALL, - [CALL_NO_KW_METHOD_DESCRIPTOR_O] = CALL, - [CALL_NO_KW_STR_1] = CALL, - [CALL_NO_KW_TUPLE_1] = CALL, - [CALL_NO_KW_TYPE_1] = CALL, - [CALL_PY_EXACT_ARGS] = CALL, - [CALL_PY_WITH_DEFAULTS] = CALL, - [CHECK_EG_MATCH] = CHECK_EG_MATCH, - [CHECK_EXC_MATCH] = CHECK_EXC_MATCH, - [CLEANUP_THROW] = CLEANUP_THROW, - [COMPARE_OP] = COMPARE_OP, - [COMPARE_OP_ADAPTIVE] = COMPARE_OP, - [COMPARE_OP_FLOAT_JUMP] = COMPARE_OP, - [COMPARE_OP_INT_JUMP] = COMPARE_OP, - [COMPARE_OP_STR_JUMP] = COMPARE_OP, - [CONTAINS_OP] = CONTAINS_OP, - [COPY] = COPY, - [COPY_FREE_VARS] = COPY_FREE_VARS, - [DELETE_ATTR] = DELETE_ATTR, - [DELETE_DEREF] = DELETE_DEREF, - [DELETE_FAST] = DELETE_FAST, - [DELETE_GLOBAL] = DELETE_GLOBAL, - [DELETE_NAME] = DELETE_NAME, - [DELETE_SUBSCR] = DELETE_SUBSCR, - [DICT_MERGE] = DICT_MERGE, - [DICT_UPDATE] = DICT_UPDATE, - [END_ASYNC_FOR] = END_ASYNC_FOR, - [END_FOR] = END_FOR, - [EXTENDED_ARG] = EXTENDED_ARG, - [EXTENDED_ARG_QUICK] = EXTENDED_ARG, - [FORMAT_VALUE] = FORMAT_VALUE, - [FOR_ITER] = FOR_ITER, - [FOR_ITER_ADAPTIVE] = FOR_ITER, - [FOR_ITER_LIST] = FOR_ITER, - [FOR_ITER_RANGE] = FOR_ITER, - [GET_AITER] = GET_AITER, - [GET_ANEXT] = GET_ANEXT, - [GET_AWAITABLE] = GET_AWAITABLE, - [GET_ITER] = GET_ITER, - [GET_LEN] = GET_LEN, - [GET_YIELD_FROM_ITER] = GET_YIELD_FROM_ITER, - [IMPORT_FROM] = IMPORT_FROM, - [IMPORT_NAME] = IMPORT_NAME, - [IMPORT_STAR] = IMPORT_STAR, - [IS_OP] = IS_OP, - [JUMP_BACKWARD] = JUMP_BACKWARD, - [JUMP_BACKWARD_NO_INTERRUPT] = JUMP_BACKWARD_NO_INTERRUPT, - [JUMP_FORWARD] = JUMP_FORWARD, - [JUMP_IF_FALSE_OR_POP] = JUMP_IF_FALSE_OR_POP, - [JUMP_IF_TRUE_OR_POP] = JUMP_IF_TRUE_OR_POP, - [KW_NAMES] = KW_NAMES, - [LIST_APPEND] = LIST_APPEND, - [LIST_EXTEND] = LIST_EXTEND, - [LIST_TO_TUPLE] = LIST_TO_TUPLE, - [LOAD_ASSERTION_ERROR] = LOAD_ASSERTION_ERROR, - [LOAD_ATTR] = LOAD_ATTR, - [LOAD_ATTR_ADAPTIVE] = LOAD_ATTR, - [LOAD_ATTR_CLASS] = LOAD_ATTR, - [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = LOAD_ATTR, - [LOAD_ATTR_INSTANCE_VALUE] = LOAD_ATTR, - [LOAD_ATTR_METHOD_LAZY_DICT] = LOAD_ATTR, - [LOAD_ATTR_METHOD_NO_DICT] = LOAD_ATTR, - [LOAD_ATTR_METHOD_WITH_DICT] = LOAD_ATTR, - [LOAD_ATTR_METHOD_WITH_VALUES] = LOAD_ATTR, - [LOAD_ATTR_MODULE] = LOAD_ATTR, - [LOAD_ATTR_PROPERTY] = LOAD_ATTR, - [LOAD_ATTR_SLOT] = LOAD_ATTR, - [LOAD_ATTR_WITH_HINT] = LOAD_ATTR, - [LOAD_BUILD_CLASS] = LOAD_BUILD_CLASS, - [LOAD_CLASSDEREF] = LOAD_CLASSDEREF, - [LOAD_CLOSURE] = LOAD_CLOSURE, - [LOAD_CONST] = LOAD_CONST, - [LOAD_CONST__LOAD_FAST] = LOAD_CONST, - [LOAD_DEREF] = LOAD_DEREF, - [LOAD_FAST] = LOAD_FAST, - [LOAD_FAST_CHECK] = LOAD_FAST_CHECK, - [LOAD_FAST__LOAD_CONST] = LOAD_FAST, - [LOAD_FAST__LOAD_FAST] = LOAD_FAST, - [LOAD_GLOBAL] = LOAD_GLOBAL, - [LOAD_GLOBAL_ADAPTIVE] = LOAD_GLOBAL, - [LOAD_GLOBAL_BUILTIN] = LOAD_GLOBAL, - [LOAD_GLOBAL_MODULE] = LOAD_GLOBAL, - [LOAD_NAME] = LOAD_NAME, - [MAKE_CELL] = MAKE_CELL, - [MAKE_FUNCTION] = MAKE_FUNCTION, - [MAP_ADD] = MAP_ADD, - [MATCH_CLASS] = MATCH_CLASS, - [MATCH_KEYS] = MATCH_KEYS, - [MATCH_MAPPING] = MATCH_MAPPING, - [MATCH_SEQUENCE] = MATCH_SEQUENCE, - [NOP] = NOP, - [POP_EXCEPT] = POP_EXCEPT, - [POP_JUMP_IF_FALSE] = POP_JUMP_IF_FALSE, - [POP_JUMP_IF_NONE] = POP_JUMP_IF_NONE, - [POP_JUMP_IF_NOT_NONE] = POP_JUMP_IF_NOT_NONE, - [POP_JUMP_IF_TRUE] = POP_JUMP_IF_TRUE, - [POP_TOP] = POP_TOP, - [PREP_RERAISE_STAR] = PREP_RERAISE_STAR, - [PRINT_EXPR] = PRINT_EXPR, - [PUSH_EXC_INFO] = PUSH_EXC_INFO, - [PUSH_NULL] = PUSH_NULL, - [RAISE_VARARGS] = RAISE_VARARGS, - [RERAISE] = RERAISE, - [RESUME] = RESUME, - [RETURN_GENERATOR] = RETURN_GENERATOR, - [RETURN_VALUE] = RETURN_VALUE, - [SEND] = SEND, - [SETUP_ANNOTATIONS] = SETUP_ANNOTATIONS, - [SET_ADD] = SET_ADD, - [SET_UPDATE] = SET_UPDATE, - [STOPITERATION_ERROR] = STOPITERATION_ERROR, - [STORE_ATTR] = STORE_ATTR, - [STORE_ATTR_ADAPTIVE] = STORE_ATTR, - [STORE_ATTR_INSTANCE_VALUE] = STORE_ATTR, - [STORE_ATTR_SLOT] = STORE_ATTR, - [STORE_ATTR_WITH_HINT] = STORE_ATTR, - [STORE_DEREF] = STORE_DEREF, - [STORE_FAST] = STORE_FAST, - [STORE_FAST__LOAD_FAST] = STORE_FAST, - [STORE_FAST__STORE_FAST] = STORE_FAST, - [STORE_GLOBAL] = STORE_GLOBAL, - [STORE_NAME] = STORE_NAME, - [STORE_SLICE] = STORE_SLICE, - [STORE_SUBSCR] = STORE_SUBSCR, - [STORE_SUBSCR_ADAPTIVE] = STORE_SUBSCR, - [STORE_SUBSCR_DICT] = STORE_SUBSCR, - [STORE_SUBSCR_LIST_INT] = STORE_SUBSCR, - [SWAP] = SWAP, - [UNARY_INVERT] = UNARY_INVERT, - [UNARY_NEGATIVE] = UNARY_NEGATIVE, - [UNARY_NOT] = UNARY_NOT, - [UNARY_POSITIVE] = UNARY_POSITIVE, - [UNPACK_EX] = UNPACK_EX, - [UNPACK_SEQUENCE] = UNPACK_SEQUENCE, - [UNPACK_SEQUENCE_ADAPTIVE] = UNPACK_SEQUENCE, - [UNPACK_SEQUENCE_LIST] = UNPACK_SEQUENCE, - [UNPACK_SEQUENCE_TUPLE] = UNPACK_SEQUENCE, - [UNPACK_SEQUENCE_TWO_TUPLE] = UNPACK_SEQUENCE, - [WITH_EXCEPT_START] = WITH_EXCEPT_START, - [YIELD_VALUE] = YIELD_VALUE, -}; -#endif // NEED_OPCODE_TABLES - -#ifdef Py_DEBUG -static const char *const _PyOpcode_OpName[263] = { - [CACHE] = "CACHE", - [POP_TOP] = "POP_TOP", - [PUSH_NULL] = "PUSH_NULL", - [BINARY_OP_ADAPTIVE] = "BINARY_OP_ADAPTIVE", - [END_FOR] = "END_FOR", - [BINARY_OP_ADD_FLOAT] = "BINARY_OP_ADD_FLOAT", - [BINARY_OP_ADD_INT] = "BINARY_OP_ADD_INT", - [BINARY_OP_ADD_UNICODE] = "BINARY_OP_ADD_UNICODE", - [BINARY_OP_INPLACE_ADD_UNICODE] = "BINARY_OP_INPLACE_ADD_UNICODE", - [NOP] = "NOP", - [UNARY_POSITIVE] = "UNARY_POSITIVE", - [UNARY_NEGATIVE] = "UNARY_NEGATIVE", - [UNARY_NOT] = "UNARY_NOT", - [BINARY_OP_MULTIPLY_FLOAT] = "BINARY_OP_MULTIPLY_FLOAT", - [BINARY_OP_MULTIPLY_INT] = "BINARY_OP_MULTIPLY_INT", - [UNARY_INVERT] = "UNARY_INVERT", - [BINARY_OP_SUBTRACT_FLOAT] = "BINARY_OP_SUBTRACT_FLOAT", - [BINARY_OP_SUBTRACT_INT] = "BINARY_OP_SUBTRACT_INT", - [BINARY_SUBSCR_ADAPTIVE] = "BINARY_SUBSCR_ADAPTIVE", - [BINARY_SUBSCR_DICT] = "BINARY_SUBSCR_DICT", - [BINARY_SUBSCR_GETITEM] = "BINARY_SUBSCR_GETITEM", - [BINARY_SUBSCR_LIST_INT] = "BINARY_SUBSCR_LIST_INT", - [BINARY_SUBSCR_TUPLE_INT] = "BINARY_SUBSCR_TUPLE_INT", - [CALL_ADAPTIVE] = "CALL_ADAPTIVE", - [CALL_PY_EXACT_ARGS] = "CALL_PY_EXACT_ARGS", - [BINARY_SUBSCR] = "BINARY_SUBSCR", - [BINARY_SLICE] = "BINARY_SLICE", - [STORE_SLICE] = "STORE_SLICE", - [CALL_PY_WITH_DEFAULTS] = "CALL_PY_WITH_DEFAULTS", - [CALL_BOUND_METHOD_EXACT_ARGS] = "CALL_BOUND_METHOD_EXACT_ARGS", - [GET_LEN] = "GET_LEN", - [MATCH_MAPPING] = "MATCH_MAPPING", - [MATCH_SEQUENCE] = "MATCH_SEQUENCE", - [MATCH_KEYS] = "MATCH_KEYS", - [CALL_BUILTIN_CLASS] = "CALL_BUILTIN_CLASS", - [PUSH_EXC_INFO] = "PUSH_EXC_INFO", - [CHECK_EXC_MATCH] = "CHECK_EXC_MATCH", - [CHECK_EG_MATCH] = "CHECK_EG_MATCH", - [CALL_BUILTIN_FAST_WITH_KEYWORDS] = "CALL_BUILTIN_FAST_WITH_KEYWORDS", - [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS", - [CALL_NO_KW_BUILTIN_FAST] = "CALL_NO_KW_BUILTIN_FAST", - [CALL_NO_KW_BUILTIN_O] = "CALL_NO_KW_BUILTIN_O", - [CALL_NO_KW_ISINSTANCE] = "CALL_NO_KW_ISINSTANCE", - [CALL_NO_KW_LEN] = "CALL_NO_KW_LEN", - [CALL_NO_KW_LIST_APPEND] = "CALL_NO_KW_LIST_APPEND", - [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = "CALL_NO_KW_METHOD_DESCRIPTOR_FAST", - [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = "CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS", - [CALL_NO_KW_METHOD_DESCRIPTOR_O] = "CALL_NO_KW_METHOD_DESCRIPTOR_O", - [CALL_NO_KW_STR_1] = "CALL_NO_KW_STR_1", - [WITH_EXCEPT_START] = "WITH_EXCEPT_START", - [GET_AITER] = "GET_AITER", - [GET_ANEXT] = "GET_ANEXT", - [BEFORE_ASYNC_WITH] = "BEFORE_ASYNC_WITH", - [BEFORE_WITH] = "BEFORE_WITH", - [END_ASYNC_FOR] = "END_ASYNC_FOR", - [CLEANUP_THROW] = "CLEANUP_THROW", - [CALL_NO_KW_TUPLE_1] = "CALL_NO_KW_TUPLE_1", - [CALL_NO_KW_TYPE_1] = "CALL_NO_KW_TYPE_1", - [COMPARE_OP_ADAPTIVE] = "COMPARE_OP_ADAPTIVE", - [COMPARE_OP_FLOAT_JUMP] = "COMPARE_OP_FLOAT_JUMP", - [STORE_SUBSCR] = "STORE_SUBSCR", - [DELETE_SUBSCR] = "DELETE_SUBSCR", - [COMPARE_OP_INT_JUMP] = "COMPARE_OP_INT_JUMP", - [STOPITERATION_ERROR] = "STOPITERATION_ERROR", - [COMPARE_OP_STR_JUMP] = "COMPARE_OP_STR_JUMP", - [EXTENDED_ARG_QUICK] = "EXTENDED_ARG_QUICK", - [FOR_ITER_ADAPTIVE] = "FOR_ITER_ADAPTIVE", - [FOR_ITER_LIST] = "FOR_ITER_LIST", - [GET_ITER] = "GET_ITER", - [GET_YIELD_FROM_ITER] = "GET_YIELD_FROM_ITER", - [PRINT_EXPR] = "PRINT_EXPR", - [LOAD_BUILD_CLASS] = "LOAD_BUILD_CLASS", - [FOR_ITER_RANGE] = "FOR_ITER_RANGE", - [LOAD_ATTR_ADAPTIVE] = "LOAD_ATTR_ADAPTIVE", - [LOAD_ASSERTION_ERROR] = "LOAD_ASSERTION_ERROR", - [RETURN_GENERATOR] = "RETURN_GENERATOR", - [LOAD_ATTR_CLASS] = "LOAD_ATTR_CLASS", - [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN", - [LOAD_ATTR_INSTANCE_VALUE] = "LOAD_ATTR_INSTANCE_VALUE", - [LOAD_ATTR_MODULE] = "LOAD_ATTR_MODULE", - [LOAD_ATTR_PROPERTY] = "LOAD_ATTR_PROPERTY", - [LOAD_ATTR_SLOT] = "LOAD_ATTR_SLOT", - [LIST_TO_TUPLE] = "LIST_TO_TUPLE", - [RETURN_VALUE] = "RETURN_VALUE", - [IMPORT_STAR] = "IMPORT_STAR", - [SETUP_ANNOTATIONS] = "SETUP_ANNOTATIONS", - [LOAD_ATTR_WITH_HINT] = "LOAD_ATTR_WITH_HINT", - [ASYNC_GEN_WRAP] = "ASYNC_GEN_WRAP", - [PREP_RERAISE_STAR] = "PREP_RERAISE_STAR", - [POP_EXCEPT] = "POP_EXCEPT", - [STORE_NAME] = "STORE_NAME", - [DELETE_NAME] = "DELETE_NAME", - [UNPACK_SEQUENCE] = "UNPACK_SEQUENCE", - [FOR_ITER] = "FOR_ITER", - [UNPACK_EX] = "UNPACK_EX", - [STORE_ATTR] = "STORE_ATTR", - [DELETE_ATTR] = "DELETE_ATTR", - [STORE_GLOBAL] = "STORE_GLOBAL", - [DELETE_GLOBAL] = "DELETE_GLOBAL", - [SWAP] = "SWAP", - [LOAD_CONST] = "LOAD_CONST", - [LOAD_NAME] = "LOAD_NAME", - [BUILD_TUPLE] = "BUILD_TUPLE", - [BUILD_LIST] = "BUILD_LIST", - [BUILD_SET] = "BUILD_SET", - [BUILD_MAP] = "BUILD_MAP", - [LOAD_ATTR] = "LOAD_ATTR", - [COMPARE_OP] = "COMPARE_OP", - [IMPORT_NAME] = "IMPORT_NAME", - [IMPORT_FROM] = "IMPORT_FROM", - [JUMP_FORWARD] = "JUMP_FORWARD", - [JUMP_IF_FALSE_OR_POP] = "JUMP_IF_FALSE_OR_POP", - [JUMP_IF_TRUE_OR_POP] = "JUMP_IF_TRUE_OR_POP", - [LOAD_ATTR_METHOD_LAZY_DICT] = "LOAD_ATTR_METHOD_LAZY_DICT", - [POP_JUMP_IF_FALSE] = "POP_JUMP_IF_FALSE", - [POP_JUMP_IF_TRUE] = "POP_JUMP_IF_TRUE", - [LOAD_GLOBAL] = "LOAD_GLOBAL", - [IS_OP] = "IS_OP", - [CONTAINS_OP] = "CONTAINS_OP", - [RERAISE] = "RERAISE", - [COPY] = "COPY", - [LOAD_ATTR_METHOD_NO_DICT] = "LOAD_ATTR_METHOD_NO_DICT", - [BINARY_OP] = "BINARY_OP", - [SEND] = "SEND", - [LOAD_FAST] = "LOAD_FAST", - [STORE_FAST] = "STORE_FAST", - [DELETE_FAST] = "DELETE_FAST", - [LOAD_FAST_CHECK] = "LOAD_FAST_CHECK", - [POP_JUMP_IF_NOT_NONE] = "POP_JUMP_IF_NOT_NONE", - [POP_JUMP_IF_NONE] = "POP_JUMP_IF_NONE", - [RAISE_VARARGS] = "RAISE_VARARGS", - [GET_AWAITABLE] = "GET_AWAITABLE", - [MAKE_FUNCTION] = "MAKE_FUNCTION", - [BUILD_SLICE] = "BUILD_SLICE", - [JUMP_BACKWARD_NO_INTERRUPT] = "JUMP_BACKWARD_NO_INTERRUPT", - [MAKE_CELL] = "MAKE_CELL", - [LOAD_CLOSURE] = "LOAD_CLOSURE", - [LOAD_DEREF] = "LOAD_DEREF", - [STORE_DEREF] = "STORE_DEREF", - [DELETE_DEREF] = "DELETE_DEREF", - [JUMP_BACKWARD] = "JUMP_BACKWARD", - [LOAD_ATTR_METHOD_WITH_DICT] = "LOAD_ATTR_METHOD_WITH_DICT", - [CALL_FUNCTION_EX] = "CALL_FUNCTION_EX", - [LOAD_ATTR_METHOD_WITH_VALUES] = "LOAD_ATTR_METHOD_WITH_VALUES", - [EXTENDED_ARG] = "EXTENDED_ARG", - [LIST_APPEND] = "LIST_APPEND", - [SET_ADD] = "SET_ADD", - [MAP_ADD] = "MAP_ADD", - [LOAD_CLASSDEREF] = "LOAD_CLASSDEREF", - [COPY_FREE_VARS] = "COPY_FREE_VARS", - [YIELD_VALUE] = "YIELD_VALUE", - [RESUME] = "RESUME", - [MATCH_CLASS] = "MATCH_CLASS", - [LOAD_CONST__LOAD_FAST] = "LOAD_CONST__LOAD_FAST", - [LOAD_FAST__LOAD_CONST] = "LOAD_FAST__LOAD_CONST", - [FORMAT_VALUE] = "FORMAT_VALUE", - [BUILD_CONST_KEY_MAP] = "BUILD_CONST_KEY_MAP", - [BUILD_STRING] = "BUILD_STRING", - [LOAD_FAST__LOAD_FAST] = "LOAD_FAST__LOAD_FAST", - [LOAD_GLOBAL_ADAPTIVE] = "LOAD_GLOBAL_ADAPTIVE", - [LOAD_GLOBAL_BUILTIN] = "LOAD_GLOBAL_BUILTIN", - [LOAD_GLOBAL_MODULE] = "LOAD_GLOBAL_MODULE", - [LIST_EXTEND] = "LIST_EXTEND", - [SET_UPDATE] = "SET_UPDATE", - [DICT_MERGE] = "DICT_MERGE", - [DICT_UPDATE] = "DICT_UPDATE", - [STORE_ATTR_ADAPTIVE] = "STORE_ATTR_ADAPTIVE", - [STORE_ATTR_INSTANCE_VALUE] = "STORE_ATTR_INSTANCE_VALUE", - [STORE_ATTR_SLOT] = "STORE_ATTR_SLOT", - [STORE_ATTR_WITH_HINT] = "STORE_ATTR_WITH_HINT", - [STORE_FAST__LOAD_FAST] = "STORE_FAST__LOAD_FAST", - [CALL] = "CALL", - [KW_NAMES] = "KW_NAMES", - [STORE_FAST__STORE_FAST] = "STORE_FAST__STORE_FAST", - [STORE_SUBSCR_ADAPTIVE] = "STORE_SUBSCR_ADAPTIVE", - [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT", - [STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT", - [UNPACK_SEQUENCE_ADAPTIVE] = "UNPACK_SEQUENCE_ADAPTIVE", - [UNPACK_SEQUENCE_LIST] = "UNPACK_SEQUENCE_LIST", - [UNPACK_SEQUENCE_TUPLE] = "UNPACK_SEQUENCE_TUPLE", - [UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE", - [181] = "<181>", - [182] = "<182>", - [183] = "<183>", - [184] = "<184>", - [185] = "<185>", - [186] = "<186>", - [187] = "<187>", - [188] = "<188>", - [189] = "<189>", - [190] = "<190>", - [191] = "<191>", - [192] = "<192>", - [193] = "<193>", - [194] = "<194>", - [195] = "<195>", - [196] = "<196>", - [197] = "<197>", - [198] = "<198>", - [199] = "<199>", - [200] = "<200>", - [201] = "<201>", - [202] = "<202>", - [203] = "<203>", - [204] = "<204>", - [205] = "<205>", - [206] = "<206>", - [207] = "<207>", - [208] = "<208>", - [209] = "<209>", - [210] = "<210>", - [211] = "<211>", - [212] = "<212>", - [213] = "<213>", - [214] = "<214>", - [215] = "<215>", - [216] = "<216>", - [217] = "<217>", - [218] = "<218>", - [219] = "<219>", - [220] = "<220>", - [221] = "<221>", - [222] = "<222>", - [223] = "<223>", - [224] = "<224>", - [225] = "<225>", - [226] = "<226>", - [227] = "<227>", - [228] = "<228>", - [229] = "<229>", - [230] = "<230>", - [231] = "<231>", - [232] = "<232>", - [233] = "<233>", - [234] = "<234>", - [235] = "<235>", - [236] = "<236>", - [237] = "<237>", - [238] = "<238>", - [239] = "<239>", - [240] = "<240>", - [241] = "<241>", - [242] = "<242>", - [243] = "<243>", - [244] = "<244>", - [245] = "<245>", - [246] = "<246>", - [247] = "<247>", - [248] = "<248>", - [249] = "<249>", - [250] = "<250>", - [251] = "<251>", - [252] = "<252>", - [253] = "<253>", - [254] = "<254>", - [DO_TRACING] = "DO_TRACING", - [SETUP_FINALLY] = "SETUP_FINALLY", - [SETUP_CLEANUP] = "SETUP_CLEANUP", - [SETUP_WITH] = "SETUP_WITH", - [POP_BLOCK] = "POP_BLOCK", - [JUMP] = "JUMP", - [JUMP_NO_INTERRUPT] = "JUMP_NO_INTERRUPT", - [LOAD_METHOD] = "LOAD_METHOD", -}; -#endif - -#define EXTRA_CASES \ - case 181: \ - case 182: \ - case 183: \ - case 184: \ - case 185: \ - case 186: \ - case 187: \ - case 188: \ - case 189: \ - case 190: \ - case 191: \ - case 192: \ - case 193: \ - case 194: \ - case 195: \ - case 196: \ - case 197: \ - case 198: \ - case 199: \ - case 200: \ - case 201: \ - case 202: \ - case 203: \ - case 204: \ - case 205: \ - case 206: \ - case 207: \ - case 208: \ - case 209: \ - case 210: \ - case 211: \ - case 212: \ - case 213: \ - case 214: \ - case 215: \ - case 216: \ - case 217: \ - case 218: \ - case 219: \ - case 220: \ - case 221: \ - case 222: \ - case 223: \ - case 224: \ - case 225: \ - case 226: \ - case 227: \ - case 228: \ - case 229: \ - case 230: \ - case 231: \ - case 232: \ - case 233: \ - case 234: \ - case 235: \ - case 236: \ - case 237: \ - case 238: \ - case 239: \ - case 240: \ - case 241: \ - case 242: \ - case 243: \ - case 244: \ - case 245: \ - case 246: \ - case 247: \ - case 248: \ - case 249: \ - case 250: \ - case 251: \ - case 252: \ - case 253: \ - case 254: \ - ; - -#ifdef __cplusplus -} -#endif -#endif // !Py_INTERNAL_OPCODE_H diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h new file mode 100644 index 000000000000000..4e45725d3934796 --- /dev/null +++ b/Include/internal/pycore_opcode_metadata.h @@ -0,0 +1,2573 @@ +// This file is generated by Tools/cases_generator/generate_cases.py +// from: +// Python/bytecodes.c +// Do not edit! + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include // bool + + +#define IS_PSEUDO_INSTR(OP) ( \ + ((OP) == LOAD_CLOSURE) || \ + ((OP) == STORE_FAST_MAYBE_NULL) || \ + ((OP) == LOAD_SUPER_METHOD) || \ + ((OP) == LOAD_ZERO_SUPER_METHOD) || \ + ((OP) == LOAD_ZERO_SUPER_ATTR) || \ + ((OP) == LOAD_METHOD) || \ + ((OP) == JUMP) || \ + ((OP) == JUMP_NO_INTERRUPT) || \ + ((OP) == SETUP_FINALLY) || \ + ((OP) == SETUP_CLEANUP) || \ + ((OP) == SETUP_WITH) || \ + ((OP) == POP_BLOCK) || \ + 0) + +#define _EXIT_TRACE 300 +#define _SET_IP 301 +#define _SPECIALIZE_TO_BOOL 302 +#define _TO_BOOL 303 +#define _GUARD_BOTH_INT 304 +#define _BINARY_OP_MULTIPLY_INT 305 +#define _BINARY_OP_ADD_INT 306 +#define _BINARY_OP_SUBTRACT_INT 307 +#define _GUARD_BOTH_FLOAT 308 +#define _BINARY_OP_MULTIPLY_FLOAT 309 +#define _BINARY_OP_ADD_FLOAT 310 +#define _BINARY_OP_SUBTRACT_FLOAT 311 +#define _GUARD_BOTH_UNICODE 312 +#define _BINARY_OP_ADD_UNICODE 313 +#define _BINARY_OP_INPLACE_ADD_UNICODE 314 +#define _SPECIALIZE_BINARY_SUBSCR 315 +#define _BINARY_SUBSCR 316 +#define _SPECIALIZE_STORE_SUBSCR 317 +#define _STORE_SUBSCR 318 +#define _POP_FRAME 319 +#define _SPECIALIZE_SEND 320 +#define _SEND 321 +#define _SPECIALIZE_UNPACK_SEQUENCE 322 +#define _UNPACK_SEQUENCE 323 +#define _SPECIALIZE_STORE_ATTR 324 +#define _STORE_ATTR 325 +#define _SPECIALIZE_LOAD_GLOBAL 326 +#define _LOAD_GLOBAL 327 +#define _GUARD_GLOBALS_VERSION 328 +#define _GUARD_BUILTINS_VERSION 329 +#define _LOAD_GLOBAL_MODULE 330 +#define _LOAD_GLOBAL_BUILTINS 331 +#define _SPECIALIZE_LOAD_SUPER_ATTR 332 +#define _LOAD_SUPER_ATTR 333 +#define _SPECIALIZE_LOAD_ATTR 334 +#define _LOAD_ATTR 335 +#define _GUARD_TYPE_VERSION 336 +#define _CHECK_MANAGED_OBJECT_HAS_VALUES 337 +#define _LOAD_ATTR_INSTANCE_VALUE 338 +#define _CHECK_ATTR_MODULE 339 +#define _LOAD_ATTR_MODULE 340 +#define _CHECK_ATTR_WITH_HINT 341 +#define _LOAD_ATTR_WITH_HINT 342 +#define _LOAD_ATTR_SLOT 343 +#define _CHECK_ATTR_CLASS 344 +#define _LOAD_ATTR_CLASS 345 +#define _GUARD_DORV_VALUES 346 +#define _STORE_ATTR_INSTANCE_VALUE 347 +#define _STORE_ATTR_SLOT 348 +#define _SPECIALIZE_COMPARE_OP 349 +#define _COMPARE_OP 350 +#define _POP_JUMP_IF_FALSE 351 +#define _POP_JUMP_IF_TRUE 352 +#define _IS_NONE 353 +#define _SPECIALIZE_FOR_ITER 354 +#define _FOR_ITER 355 +#define _FOR_ITER_TIER_TWO 356 +#define _ITER_CHECK_LIST 357 +#define _ITER_JUMP_LIST 358 +#define _GUARD_NOT_EXHAUSTED_LIST 359 +#define _ITER_NEXT_LIST 360 +#define _ITER_CHECK_TUPLE 361 +#define _ITER_JUMP_TUPLE 362 +#define _GUARD_NOT_EXHAUSTED_TUPLE 363 +#define _ITER_NEXT_TUPLE 364 +#define _ITER_CHECK_RANGE 365 +#define _ITER_JUMP_RANGE 366 +#define _GUARD_NOT_EXHAUSTED_RANGE 367 +#define _ITER_NEXT_RANGE 368 +#define _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT 369 +#define _GUARD_KEYS_VERSION 370 +#define _LOAD_ATTR_METHOD_WITH_VALUES 371 +#define _LOAD_ATTR_METHOD_NO_DICT 372 +#define _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 373 +#define _LOAD_ATTR_NONDESCRIPTOR_NO_DICT 374 +#define _CHECK_ATTR_METHOD_LAZY_DICT 375 +#define _LOAD_ATTR_METHOD_LAZY_DICT 376 +#define _SPECIALIZE_CALL 377 +#define _CALL 378 +#define _CHECK_CALL_BOUND_METHOD_EXACT_ARGS 379 +#define _INIT_CALL_BOUND_METHOD_EXACT_ARGS 380 +#define _CHECK_PEP_523 381 +#define _CHECK_FUNCTION_EXACT_ARGS 382 +#define _CHECK_STACK_SPACE 383 +#define _INIT_CALL_PY_EXACT_ARGS 384 +#define _PUSH_FRAME 385 +#define _SPECIALIZE_BINARY_OP 386 +#define _BINARY_OP 387 +#define _GUARD_IS_TRUE_POP 388 +#define _GUARD_IS_FALSE_POP 389 +#define _GUARD_IS_NONE_POP 390 +#define _GUARD_IS_NOT_NONE_POP 391 +#define _JUMP_TO_TOP 392 +#define _SAVE_RETURN_OFFSET 393 +#define _INSERT 394 +#define _CHECK_VALIDITY 395 + +extern int _PyOpcode_num_popped(int opcode, int oparg, bool jump); +#ifdef NEED_OPCODE_METADATA +int _PyOpcode_num_popped(int opcode, int oparg, bool jump) { + switch(opcode) { + case NOP: + return 0; + case RESUME: + return 0; + case RESUME_CHECK: + return 0; + case INSTRUMENTED_RESUME: + return 0; + case LOAD_CLOSURE: + return 0; + case LOAD_FAST_CHECK: + return 0; + case LOAD_FAST: + return 0; + case LOAD_FAST_AND_CLEAR: + return 0; + case LOAD_FAST_LOAD_FAST: + return 0; + case LOAD_CONST: + return 0; + case STORE_FAST: + return 1; + case STORE_FAST_MAYBE_NULL: + return 1; + case STORE_FAST_LOAD_FAST: + return 1; + case STORE_FAST_STORE_FAST: + return 2; + case POP_TOP: + return 1; + case PUSH_NULL: + return 0; + case END_FOR: + return 2; + case INSTRUMENTED_END_FOR: + return 2; + case END_SEND: + return 2; + case INSTRUMENTED_END_SEND: + return 2; + case UNARY_NEGATIVE: + return 1; + case UNARY_NOT: + return 1; + case _SPECIALIZE_TO_BOOL: + return 1; + case _TO_BOOL: + return 1; + case TO_BOOL: + return 1; + case TO_BOOL_BOOL: + return 1; + case TO_BOOL_INT: + return 1; + case TO_BOOL_LIST: + return 1; + case TO_BOOL_NONE: + return 1; + case TO_BOOL_STR: + return 1; + case TO_BOOL_ALWAYS_TRUE: + return 1; + case UNARY_INVERT: + return 1; + case _GUARD_BOTH_INT: + return 2; + case _BINARY_OP_MULTIPLY_INT: + return 2; + case _BINARY_OP_ADD_INT: + return 2; + case _BINARY_OP_SUBTRACT_INT: + return 2; + case BINARY_OP_MULTIPLY_INT: + return 2; + case BINARY_OP_ADD_INT: + return 2; + case BINARY_OP_SUBTRACT_INT: + return 2; + case _GUARD_BOTH_FLOAT: + return 2; + case _BINARY_OP_MULTIPLY_FLOAT: + return 2; + case _BINARY_OP_ADD_FLOAT: + return 2; + case _BINARY_OP_SUBTRACT_FLOAT: + return 2; + case BINARY_OP_MULTIPLY_FLOAT: + return 2; + case BINARY_OP_ADD_FLOAT: + return 2; + case BINARY_OP_SUBTRACT_FLOAT: + return 2; + case _GUARD_BOTH_UNICODE: + return 2; + case _BINARY_OP_ADD_UNICODE: + return 2; + case BINARY_OP_ADD_UNICODE: + return 2; + case _BINARY_OP_INPLACE_ADD_UNICODE: + return 2; + case BINARY_OP_INPLACE_ADD_UNICODE: + return 2; + case _SPECIALIZE_BINARY_SUBSCR: + return 2; + case _BINARY_SUBSCR: + return 2; + case BINARY_SUBSCR: + return 2; + case BINARY_SLICE: + return 3; + case STORE_SLICE: + return 4; + case BINARY_SUBSCR_LIST_INT: + return 2; + case BINARY_SUBSCR_STR_INT: + return 2; + case BINARY_SUBSCR_TUPLE_INT: + return 2; + case BINARY_SUBSCR_DICT: + return 2; + case BINARY_SUBSCR_GETITEM: + return 2; + case LIST_APPEND: + return (oparg-1) + 2; + case SET_ADD: + return (oparg-1) + 2; + case _SPECIALIZE_STORE_SUBSCR: + return 2; + case _STORE_SUBSCR: + return 3; + case STORE_SUBSCR: + return 3; + case STORE_SUBSCR_LIST_INT: + return 3; + case STORE_SUBSCR_DICT: + return 3; + case DELETE_SUBSCR: + return 2; + case CALL_INTRINSIC_1: + return 1; + case CALL_INTRINSIC_2: + return 2; + case RAISE_VARARGS: + return oparg; + case INTERPRETER_EXIT: + return 1; + case _POP_FRAME: + return 1; + case RETURN_VALUE: + return 1; + case INSTRUMENTED_RETURN_VALUE: + return 1; + case RETURN_CONST: + return 0; + case INSTRUMENTED_RETURN_CONST: + return 0; + case GET_AITER: + return 1; + case GET_ANEXT: + return 1; + case GET_AWAITABLE: + return 1; + case _SPECIALIZE_SEND: + return 2; + case _SEND: + return 2; + case SEND: + return 2; + case SEND_GEN: + return 2; + case INSTRUMENTED_YIELD_VALUE: + return 1; + case YIELD_VALUE: + return 1; + case POP_EXCEPT: + return 1; + case RERAISE: + return oparg + 1; + case END_ASYNC_FOR: + return 2; + case CLEANUP_THROW: + return 3; + case LOAD_ASSERTION_ERROR: + return 0; + case LOAD_BUILD_CLASS: + return 0; + case STORE_NAME: + return 1; + case DELETE_NAME: + return 0; + case _SPECIALIZE_UNPACK_SEQUENCE: + return 1; + case _UNPACK_SEQUENCE: + return 1; + case UNPACK_SEQUENCE: + return 1; + case UNPACK_SEQUENCE_TWO_TUPLE: + return 1; + case UNPACK_SEQUENCE_TUPLE: + return 1; + case UNPACK_SEQUENCE_LIST: + return 1; + case UNPACK_EX: + return 1; + case _SPECIALIZE_STORE_ATTR: + return 1; + case _STORE_ATTR: + return 2; + case STORE_ATTR: + return 2; + case DELETE_ATTR: + return 1; + case STORE_GLOBAL: + return 1; + case DELETE_GLOBAL: + return 0; + case LOAD_LOCALS: + return 0; + case LOAD_FROM_DICT_OR_GLOBALS: + return 1; + case LOAD_NAME: + return 0; + case _SPECIALIZE_LOAD_GLOBAL: + return 0; + case _LOAD_GLOBAL: + return 0; + case LOAD_GLOBAL: + return 0; + case _GUARD_GLOBALS_VERSION: + return 0; + case _GUARD_BUILTINS_VERSION: + return 0; + case _LOAD_GLOBAL_MODULE: + return 0; + case _LOAD_GLOBAL_BUILTINS: + return 0; + case LOAD_GLOBAL_MODULE: + return 0; + case LOAD_GLOBAL_BUILTIN: + return 0; + case DELETE_FAST: + return 0; + case MAKE_CELL: + return 0; + case DELETE_DEREF: + return 0; + case LOAD_FROM_DICT_OR_DEREF: + return 1; + case LOAD_DEREF: + return 0; + case STORE_DEREF: + return 1; + case COPY_FREE_VARS: + return 0; + case BUILD_STRING: + return oparg; + case BUILD_TUPLE: + return oparg; + case BUILD_LIST: + return oparg; + case LIST_EXTEND: + return (oparg-1) + 2; + case SET_UPDATE: + return (oparg-1) + 2; + case BUILD_SET: + return oparg; + case BUILD_MAP: + return oparg*2; + case SETUP_ANNOTATIONS: + return 0; + case BUILD_CONST_KEY_MAP: + return oparg + 1; + case DICT_UPDATE: + return (oparg - 1) + 2; + case DICT_MERGE: + return (oparg - 1) + 5; + case MAP_ADD: + return (oparg - 1) + 3; + case INSTRUMENTED_LOAD_SUPER_ATTR: + return 3; + case _SPECIALIZE_LOAD_SUPER_ATTR: + return 3; + case _LOAD_SUPER_ATTR: + return 3; + case LOAD_SUPER_ATTR: + return 3; + case LOAD_SUPER_METHOD: + return 3; + case LOAD_ZERO_SUPER_METHOD: + return 3; + case LOAD_ZERO_SUPER_ATTR: + return 3; + case LOAD_SUPER_ATTR_ATTR: + return 3; + case LOAD_SUPER_ATTR_METHOD: + return 3; + case _SPECIALIZE_LOAD_ATTR: + return 1; + case _LOAD_ATTR: + return 1; + case LOAD_ATTR: + return 1; + case LOAD_METHOD: + return 1; + case _GUARD_TYPE_VERSION: + return 1; + case _CHECK_MANAGED_OBJECT_HAS_VALUES: + return 1; + case _LOAD_ATTR_INSTANCE_VALUE: + return 1; + case LOAD_ATTR_INSTANCE_VALUE: + return 1; + case _CHECK_ATTR_MODULE: + return 1; + case _LOAD_ATTR_MODULE: + return 1; + case LOAD_ATTR_MODULE: + return 1; + case _CHECK_ATTR_WITH_HINT: + return 1; + case _LOAD_ATTR_WITH_HINT: + return 1; + case LOAD_ATTR_WITH_HINT: + return 1; + case _LOAD_ATTR_SLOT: + return 1; + case LOAD_ATTR_SLOT: + return 1; + case _CHECK_ATTR_CLASS: + return 1; + case _LOAD_ATTR_CLASS: + return 1; + case LOAD_ATTR_CLASS: + return 1; + case LOAD_ATTR_PROPERTY: + return 1; + case LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN: + return 1; + case _GUARD_DORV_VALUES: + return 1; + case _STORE_ATTR_INSTANCE_VALUE: + return 2; + case STORE_ATTR_INSTANCE_VALUE: + return 2; + case STORE_ATTR_WITH_HINT: + return 2; + case _STORE_ATTR_SLOT: + return 2; + case STORE_ATTR_SLOT: + return 2; + case _SPECIALIZE_COMPARE_OP: + return 2; + case _COMPARE_OP: + return 2; + case COMPARE_OP: + return 2; + case COMPARE_OP_FLOAT: + return 2; + case COMPARE_OP_INT: + return 2; + case COMPARE_OP_STR: + return 2; + case IS_OP: + return 2; + case CONTAINS_OP: + return 2; + case CHECK_EG_MATCH: + return 2; + case CHECK_EXC_MATCH: + return 2; + case IMPORT_NAME: + return 2; + case IMPORT_FROM: + return 1; + case JUMP_FORWARD: + return 0; + case JUMP_BACKWARD: + return 0; + case JUMP: + return 0; + case JUMP_NO_INTERRUPT: + return 0; + case ENTER_EXECUTOR: + return 0; + case _POP_JUMP_IF_FALSE: + return 1; + case _POP_JUMP_IF_TRUE: + return 1; + case _IS_NONE: + return 1; + case POP_JUMP_IF_TRUE: + return 1; + case POP_JUMP_IF_FALSE: + return 1; + case POP_JUMP_IF_NONE: + return 1; + case POP_JUMP_IF_NOT_NONE: + return 1; + case JUMP_BACKWARD_NO_INTERRUPT: + return 0; + case GET_LEN: + return 1; + case MATCH_CLASS: + return 3; + case MATCH_MAPPING: + return 1; + case MATCH_SEQUENCE: + return 1; + case MATCH_KEYS: + return 2; + case GET_ITER: + return 1; + case GET_YIELD_FROM_ITER: + return 1; + case _SPECIALIZE_FOR_ITER: + return 1; + case _FOR_ITER: + return 1; + case _FOR_ITER_TIER_TWO: + return 1; + case FOR_ITER: + return 1; + case INSTRUMENTED_FOR_ITER: + return 0; + case _ITER_CHECK_LIST: + return 1; + case _ITER_JUMP_LIST: + return 1; + case _GUARD_NOT_EXHAUSTED_LIST: + return 1; + case _ITER_NEXT_LIST: + return 1; + case FOR_ITER_LIST: + return 1; + case _ITER_CHECK_TUPLE: + return 1; + case _ITER_JUMP_TUPLE: + return 1; + case _GUARD_NOT_EXHAUSTED_TUPLE: + return 1; + case _ITER_NEXT_TUPLE: + return 1; + case FOR_ITER_TUPLE: + return 1; + case _ITER_CHECK_RANGE: + return 1; + case _ITER_JUMP_RANGE: + return 1; + case _GUARD_NOT_EXHAUSTED_RANGE: + return 1; + case _ITER_NEXT_RANGE: + return 1; + case FOR_ITER_RANGE: + return 1; + case FOR_ITER_GEN: + return 1; + case BEFORE_ASYNC_WITH: + return 1; + case BEFORE_WITH: + return 1; + case WITH_EXCEPT_START: + return 4; + case SETUP_FINALLY: + return 0; + case SETUP_CLEANUP: + return 0; + case SETUP_WITH: + return 0; + case POP_BLOCK: + return 0; + case PUSH_EXC_INFO: + return 1; + case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT: + return 1; + case _GUARD_KEYS_VERSION: + return 1; + case _LOAD_ATTR_METHOD_WITH_VALUES: + return 1; + case LOAD_ATTR_METHOD_WITH_VALUES: + return 1; + case _LOAD_ATTR_METHOD_NO_DICT: + return 1; + case LOAD_ATTR_METHOD_NO_DICT: + return 1; + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; + case LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; + case LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; + case _CHECK_ATTR_METHOD_LAZY_DICT: + return 1; + case _LOAD_ATTR_METHOD_LAZY_DICT: + return 1; + case LOAD_ATTR_METHOD_LAZY_DICT: + return 1; + case INSTRUMENTED_CALL: + return 0; + case _SPECIALIZE_CALL: + return oparg + 2; + case _CALL: + return oparg + 2; + case CALL: + return oparg + 2; + case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; + case _INIT_CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; + case _CHECK_PEP_523: + return 0; + case _CHECK_FUNCTION_EXACT_ARGS: + return oparg + 2; + case _CHECK_STACK_SPACE: + return oparg + 2; + case _INIT_CALL_PY_EXACT_ARGS: + return oparg + 2; + case _PUSH_FRAME: + return 1; + case CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; + case CALL_PY_EXACT_ARGS: + return oparg + 2; + case CALL_PY_WITH_DEFAULTS: + return oparg + 2; + case CALL_TYPE_1: + return oparg + 2; + case CALL_STR_1: + return oparg + 2; + case CALL_TUPLE_1: + return oparg + 2; + case CALL_ALLOC_AND_ENTER_INIT: + return oparg + 2; + case EXIT_INIT_CHECK: + return 1; + case CALL_BUILTIN_CLASS: + return oparg + 2; + case CALL_BUILTIN_O: + return oparg + 2; + case CALL_BUILTIN_FAST: + return oparg + 2; + case CALL_BUILTIN_FAST_WITH_KEYWORDS: + return oparg + 2; + case CALL_LEN: + return oparg + 2; + case CALL_ISINSTANCE: + return oparg + 2; + case CALL_LIST_APPEND: + return oparg + 2; + case CALL_METHOD_DESCRIPTOR_O: + return oparg + 2; + case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: + return oparg + 2; + case CALL_METHOD_DESCRIPTOR_NOARGS: + return oparg + 2; + case CALL_METHOD_DESCRIPTOR_FAST: + return oparg + 2; + case INSTRUMENTED_CALL_KW: + return 0; + case CALL_KW: + return oparg + 3; + case INSTRUMENTED_CALL_FUNCTION_EX: + return 0; + case CALL_FUNCTION_EX: + return ((oparg & 1) ? 1 : 0) + 3; + case MAKE_FUNCTION: + return 1; + case SET_FUNCTION_ATTRIBUTE: + return 2; + case RETURN_GENERATOR: + return 0; + case BUILD_SLICE: + return ((oparg == 3) ? 1 : 0) + 2; + case CONVERT_VALUE: + return 1; + case FORMAT_SIMPLE: + return 1; + case FORMAT_WITH_SPEC: + return 2; + case COPY: + return (oparg-1) + 1; + case _SPECIALIZE_BINARY_OP: + return 2; + case _BINARY_OP: + return 2; + case BINARY_OP: + return 2; + case SWAP: + return (oparg-2) + 2; + case INSTRUMENTED_INSTRUCTION: + return 0; + case INSTRUMENTED_JUMP_FORWARD: + return 0; + case INSTRUMENTED_JUMP_BACKWARD: + return 0; + case INSTRUMENTED_POP_JUMP_IF_TRUE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_FALSE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_NONE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_NOT_NONE: + return 0; + case EXTENDED_ARG: + return 0; + case CACHE: + return 0; + case RESERVED: + return 0; + case _GUARD_IS_TRUE_POP: + return 1; + case _GUARD_IS_FALSE_POP: + return 1; + case _GUARD_IS_NONE_POP: + return 1; + case _GUARD_IS_NOT_NONE_POP: + return 1; + case _JUMP_TO_TOP: + return 0; + case _SET_IP: + return 0; + case _SAVE_RETURN_OFFSET: + return 0; + case _EXIT_TRACE: + return 0; + case _INSERT: + return oparg + 1; + case _CHECK_VALIDITY: + return 0; + default: + return -1; + } +} +#endif // NEED_OPCODE_METADATA + +extern int _PyOpcode_num_pushed(int opcode, int oparg, bool jump); +#ifdef NEED_OPCODE_METADATA +int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { + switch(opcode) { + case NOP: + return 0; + case RESUME: + return 0; + case RESUME_CHECK: + return 0; + case INSTRUMENTED_RESUME: + return 0; + case LOAD_CLOSURE: + return 1; + case LOAD_FAST_CHECK: + return 1; + case LOAD_FAST: + return 1; + case LOAD_FAST_AND_CLEAR: + return 1; + case LOAD_FAST_LOAD_FAST: + return 2; + case LOAD_CONST: + return 1; + case STORE_FAST: + return 0; + case STORE_FAST_MAYBE_NULL: + return 0; + case STORE_FAST_LOAD_FAST: + return 1; + case STORE_FAST_STORE_FAST: + return 0; + case POP_TOP: + return 0; + case PUSH_NULL: + return 1; + case END_FOR: + return 0; + case INSTRUMENTED_END_FOR: + return 0; + case END_SEND: + return 1; + case INSTRUMENTED_END_SEND: + return 1; + case UNARY_NEGATIVE: + return 1; + case UNARY_NOT: + return 1; + case _SPECIALIZE_TO_BOOL: + return 1; + case _TO_BOOL: + return 1; + case TO_BOOL: + return 1; + case TO_BOOL_BOOL: + return 1; + case TO_BOOL_INT: + return 1; + case TO_BOOL_LIST: + return 1; + case TO_BOOL_NONE: + return 1; + case TO_BOOL_STR: + return 1; + case TO_BOOL_ALWAYS_TRUE: + return 1; + case UNARY_INVERT: + return 1; + case _GUARD_BOTH_INT: + return 2; + case _BINARY_OP_MULTIPLY_INT: + return 1; + case _BINARY_OP_ADD_INT: + return 1; + case _BINARY_OP_SUBTRACT_INT: + return 1; + case BINARY_OP_MULTIPLY_INT: + return 1; + case BINARY_OP_ADD_INT: + return 1; + case BINARY_OP_SUBTRACT_INT: + return 1; + case _GUARD_BOTH_FLOAT: + return 2; + case _BINARY_OP_MULTIPLY_FLOAT: + return 1; + case _BINARY_OP_ADD_FLOAT: + return 1; + case _BINARY_OP_SUBTRACT_FLOAT: + return 1; + case BINARY_OP_MULTIPLY_FLOAT: + return 1; + case BINARY_OP_ADD_FLOAT: + return 1; + case BINARY_OP_SUBTRACT_FLOAT: + return 1; + case _GUARD_BOTH_UNICODE: + return 2; + case _BINARY_OP_ADD_UNICODE: + return 1; + case BINARY_OP_ADD_UNICODE: + return 1; + case _BINARY_OP_INPLACE_ADD_UNICODE: + return 0; + case BINARY_OP_INPLACE_ADD_UNICODE: + return 0; + case _SPECIALIZE_BINARY_SUBSCR: + return 2; + case _BINARY_SUBSCR: + return 1; + case BINARY_SUBSCR: + return 1; + case BINARY_SLICE: + return 1; + case STORE_SLICE: + return 0; + case BINARY_SUBSCR_LIST_INT: + return 1; + case BINARY_SUBSCR_STR_INT: + return 1; + case BINARY_SUBSCR_TUPLE_INT: + return 1; + case BINARY_SUBSCR_DICT: + return 1; + case BINARY_SUBSCR_GETITEM: + return 1; + case LIST_APPEND: + return (oparg-1) + 1; + case SET_ADD: + return (oparg-1) + 1; + case _SPECIALIZE_STORE_SUBSCR: + return 2; + case _STORE_SUBSCR: + return 0; + case STORE_SUBSCR: + return 0; + case STORE_SUBSCR_LIST_INT: + return 0; + case STORE_SUBSCR_DICT: + return 0; + case DELETE_SUBSCR: + return 0; + case CALL_INTRINSIC_1: + return 1; + case CALL_INTRINSIC_2: + return 1; + case RAISE_VARARGS: + return 0; + case INTERPRETER_EXIT: + return 0; + case _POP_FRAME: + return 0; + case RETURN_VALUE: + return 0; + case INSTRUMENTED_RETURN_VALUE: + return 0; + case RETURN_CONST: + return 0; + case INSTRUMENTED_RETURN_CONST: + return 0; + case GET_AITER: + return 1; + case GET_ANEXT: + return 2; + case GET_AWAITABLE: + return 1; + case _SPECIALIZE_SEND: + return 2; + case _SEND: + return 2; + case SEND: + return 2; + case SEND_GEN: + return 2; + case INSTRUMENTED_YIELD_VALUE: + return 1; + case YIELD_VALUE: + return 1; + case POP_EXCEPT: + return 0; + case RERAISE: + return oparg; + case END_ASYNC_FOR: + return 0; + case CLEANUP_THROW: + return 2; + case LOAD_ASSERTION_ERROR: + return 1; + case LOAD_BUILD_CLASS: + return 1; + case STORE_NAME: + return 0; + case DELETE_NAME: + return 0; + case _SPECIALIZE_UNPACK_SEQUENCE: + return 1; + case _UNPACK_SEQUENCE: + return oparg; + case UNPACK_SEQUENCE: + return oparg; + case UNPACK_SEQUENCE_TWO_TUPLE: + return oparg; + case UNPACK_SEQUENCE_TUPLE: + return oparg; + case UNPACK_SEQUENCE_LIST: + return oparg; + case UNPACK_EX: + return (oparg & 0xFF) + (oparg >> 8) + 1; + case _SPECIALIZE_STORE_ATTR: + return 1; + case _STORE_ATTR: + return 0; + case STORE_ATTR: + return 0; + case DELETE_ATTR: + return 0; + case STORE_GLOBAL: + return 0; + case DELETE_GLOBAL: + return 0; + case LOAD_LOCALS: + return 1; + case LOAD_FROM_DICT_OR_GLOBALS: + return 1; + case LOAD_NAME: + return 1; + case _SPECIALIZE_LOAD_GLOBAL: + return 0; + case _LOAD_GLOBAL: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_GLOBAL: + return (oparg & 1 ? 1 : 0) + 1; + case _GUARD_GLOBALS_VERSION: + return 0; + case _GUARD_BUILTINS_VERSION: + return 0; + case _LOAD_GLOBAL_MODULE: + return ((oparg & 1) ? 1 : 0) + 1; + case _LOAD_GLOBAL_BUILTINS: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_GLOBAL_MODULE: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_GLOBAL_BUILTIN: + return (oparg & 1 ? 1 : 0) + 1; + case DELETE_FAST: + return 0; + case MAKE_CELL: + return 0; + case DELETE_DEREF: + return 0; + case LOAD_FROM_DICT_OR_DEREF: + return 1; + case LOAD_DEREF: + return 1; + case STORE_DEREF: + return 0; + case COPY_FREE_VARS: + return 0; + case BUILD_STRING: + return 1; + case BUILD_TUPLE: + return 1; + case BUILD_LIST: + return 1; + case LIST_EXTEND: + return (oparg-1) + 1; + case SET_UPDATE: + return (oparg-1) + 1; + case BUILD_SET: + return 1; + case BUILD_MAP: + return 1; + case SETUP_ANNOTATIONS: + return 0; + case BUILD_CONST_KEY_MAP: + return 1; + case DICT_UPDATE: + return (oparg - 1) + 1; + case DICT_MERGE: + return (oparg - 1) + 4; + case MAP_ADD: + return (oparg - 1) + 1; + case INSTRUMENTED_LOAD_SUPER_ATTR: + return ((oparg & 1) ? 1 : 0) + 1; + case _SPECIALIZE_LOAD_SUPER_ATTR: + return 3; + case _LOAD_SUPER_ATTR: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_SUPER_ATTR: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_SUPER_METHOD: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_ZERO_SUPER_METHOD: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_ZERO_SUPER_ATTR: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_SUPER_ATTR_ATTR: + return 1; + case LOAD_SUPER_ATTR_METHOD: + return 2; + case _SPECIALIZE_LOAD_ATTR: + return 1; + case _LOAD_ATTR: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_METHOD: + return (oparg & 1 ? 1 : 0) + 1; + case _GUARD_TYPE_VERSION: + return 1; + case _CHECK_MANAGED_OBJECT_HAS_VALUES: + return 1; + case _LOAD_ATTR_INSTANCE_VALUE: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_INSTANCE_VALUE: + return (oparg & 1 ? 1 : 0) + 1; + case _CHECK_ATTR_MODULE: + return 1; + case _LOAD_ATTR_MODULE: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_MODULE: + return (oparg & 1 ? 1 : 0) + 1; + case _CHECK_ATTR_WITH_HINT: + return 1; + case _LOAD_ATTR_WITH_HINT: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_WITH_HINT: + return (oparg & 1 ? 1 : 0) + 1; + case _LOAD_ATTR_SLOT: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_SLOT: + return (oparg & 1 ? 1 : 0) + 1; + case _CHECK_ATTR_CLASS: + return 1; + case _LOAD_ATTR_CLASS: + return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_CLASS: + return (oparg & 1 ? 1 : 0) + 1; + case LOAD_ATTR_PROPERTY: + return 1; + case LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN: + return 1; + case _GUARD_DORV_VALUES: + return 1; + case _STORE_ATTR_INSTANCE_VALUE: + return 0; + case STORE_ATTR_INSTANCE_VALUE: + return 0; + case STORE_ATTR_WITH_HINT: + return 0; + case _STORE_ATTR_SLOT: + return 0; + case STORE_ATTR_SLOT: + return 0; + case _SPECIALIZE_COMPARE_OP: + return 2; + case _COMPARE_OP: + return 1; + case COMPARE_OP: + return 1; + case COMPARE_OP_FLOAT: + return 1; + case COMPARE_OP_INT: + return 1; + case COMPARE_OP_STR: + return 1; + case IS_OP: + return 1; + case CONTAINS_OP: + return 1; + case CHECK_EG_MATCH: + return 2; + case CHECK_EXC_MATCH: + return 2; + case IMPORT_NAME: + return 1; + case IMPORT_FROM: + return 2; + case JUMP_FORWARD: + return 0; + case JUMP_BACKWARD: + return 0; + case JUMP: + return 0; + case JUMP_NO_INTERRUPT: + return 0; + case ENTER_EXECUTOR: + return 0; + case _POP_JUMP_IF_FALSE: + return 0; + case _POP_JUMP_IF_TRUE: + return 0; + case _IS_NONE: + return 1; + case POP_JUMP_IF_TRUE: + return 0; + case POP_JUMP_IF_FALSE: + return 0; + case POP_JUMP_IF_NONE: + return 0; + case POP_JUMP_IF_NOT_NONE: + return 0; + case JUMP_BACKWARD_NO_INTERRUPT: + return 0; + case GET_LEN: + return 2; + case MATCH_CLASS: + return 1; + case MATCH_MAPPING: + return 2; + case MATCH_SEQUENCE: + return 2; + case MATCH_KEYS: + return 3; + case GET_ITER: + return 1; + case GET_YIELD_FROM_ITER: + return 1; + case _SPECIALIZE_FOR_ITER: + return 1; + case _FOR_ITER: + return 2; + case _FOR_ITER_TIER_TWO: + return 2; + case FOR_ITER: + return 2; + case INSTRUMENTED_FOR_ITER: + return 0; + case _ITER_CHECK_LIST: + return 1; + case _ITER_JUMP_LIST: + return 1; + case _GUARD_NOT_EXHAUSTED_LIST: + return 1; + case _ITER_NEXT_LIST: + return 2; + case FOR_ITER_LIST: + return 2; + case _ITER_CHECK_TUPLE: + return 1; + case _ITER_JUMP_TUPLE: + return 1; + case _GUARD_NOT_EXHAUSTED_TUPLE: + return 1; + case _ITER_NEXT_TUPLE: + return 2; + case FOR_ITER_TUPLE: + return 2; + case _ITER_CHECK_RANGE: + return 1; + case _ITER_JUMP_RANGE: + return 1; + case _GUARD_NOT_EXHAUSTED_RANGE: + return 1; + case _ITER_NEXT_RANGE: + return 2; + case FOR_ITER_RANGE: + return 2; + case FOR_ITER_GEN: + return 2; + case BEFORE_ASYNC_WITH: + return 2; + case BEFORE_WITH: + return 2; + case WITH_EXCEPT_START: + return 5; + case SETUP_FINALLY: + return 0; + case SETUP_CLEANUP: + return 0; + case SETUP_WITH: + return 0; + case POP_BLOCK: + return 0; + case PUSH_EXC_INFO: + return 2; + case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT: + return 1; + case _GUARD_KEYS_VERSION: + return 1; + case _LOAD_ATTR_METHOD_WITH_VALUES: + return 2; + case LOAD_ATTR_METHOD_WITH_VALUES: + return 2; + case _LOAD_ATTR_METHOD_NO_DICT: + return 2; + case LOAD_ATTR_METHOD_NO_DICT: + return 2; + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; + case LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; + case LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; + case _CHECK_ATTR_METHOD_LAZY_DICT: + return 1; + case _LOAD_ATTR_METHOD_LAZY_DICT: + return 2; + case LOAD_ATTR_METHOD_LAZY_DICT: + return 2; + case INSTRUMENTED_CALL: + return 0; + case _SPECIALIZE_CALL: + return oparg + 2; + case _CALL: + return 1; + case CALL: + return 1; + case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; + case _INIT_CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; + case _CHECK_PEP_523: + return 0; + case _CHECK_FUNCTION_EXACT_ARGS: + return oparg + 2; + case _CHECK_STACK_SPACE: + return oparg + 2; + case _INIT_CALL_PY_EXACT_ARGS: + return 1; + case _PUSH_FRAME: + return 1; + case CALL_BOUND_METHOD_EXACT_ARGS: + return 1; + case CALL_PY_EXACT_ARGS: + return 1; + case CALL_PY_WITH_DEFAULTS: + return 1; + case CALL_TYPE_1: + return 1; + case CALL_STR_1: + return 1; + case CALL_TUPLE_1: + return 1; + case CALL_ALLOC_AND_ENTER_INIT: + return 1; + case EXIT_INIT_CHECK: + return 0; + case CALL_BUILTIN_CLASS: + return 1; + case CALL_BUILTIN_O: + return 1; + case CALL_BUILTIN_FAST: + return 1; + case CALL_BUILTIN_FAST_WITH_KEYWORDS: + return 1; + case CALL_LEN: + return 1; + case CALL_ISINSTANCE: + return 1; + case CALL_LIST_APPEND: + return 1; + case CALL_METHOD_DESCRIPTOR_O: + return 1; + case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: + return 1; + case CALL_METHOD_DESCRIPTOR_NOARGS: + return 1; + case CALL_METHOD_DESCRIPTOR_FAST: + return 1; + case INSTRUMENTED_CALL_KW: + return 0; + case CALL_KW: + return 1; + case INSTRUMENTED_CALL_FUNCTION_EX: + return 0; + case CALL_FUNCTION_EX: + return 1; + case MAKE_FUNCTION: + return 1; + case SET_FUNCTION_ATTRIBUTE: + return 1; + case RETURN_GENERATOR: + return 0; + case BUILD_SLICE: + return 1; + case CONVERT_VALUE: + return 1; + case FORMAT_SIMPLE: + return 1; + case FORMAT_WITH_SPEC: + return 1; + case COPY: + return (oparg-1) + 2; + case _SPECIALIZE_BINARY_OP: + return 2; + case _BINARY_OP: + return 1; + case BINARY_OP: + return 1; + case SWAP: + return (oparg-2) + 2; + case INSTRUMENTED_INSTRUCTION: + return 0; + case INSTRUMENTED_JUMP_FORWARD: + return 0; + case INSTRUMENTED_JUMP_BACKWARD: + return 0; + case INSTRUMENTED_POP_JUMP_IF_TRUE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_FALSE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_NONE: + return 0; + case INSTRUMENTED_POP_JUMP_IF_NOT_NONE: + return 0; + case EXTENDED_ARG: + return 0; + case CACHE: + return 0; + case RESERVED: + return 0; + case _GUARD_IS_TRUE_POP: + return 0; + case _GUARD_IS_FALSE_POP: + return 0; + case _GUARD_IS_NONE_POP: + return 0; + case _GUARD_IS_NOT_NONE_POP: + return 0; + case _JUMP_TO_TOP: + return 0; + case _SET_IP: + return 0; + case _SAVE_RETURN_OFFSET: + return 0; + case _EXIT_TRACE: + return 0; + case _INSERT: + return oparg + 1; + case _CHECK_VALIDITY: + return 0; + default: + return -1; + } +} +#endif // NEED_OPCODE_METADATA + +enum InstructionFormat { + INSTR_FMT_IB, + INSTR_FMT_IBC, + INSTR_FMT_IBC0, + INSTR_FMT_IBC00, + INSTR_FMT_IBC000, + INSTR_FMT_IBC0000000, + INSTR_FMT_IBC00000000, + INSTR_FMT_IX, + INSTR_FMT_IXC, + INSTR_FMT_IXC0, + INSTR_FMT_IXC00, + INSTR_FMT_IXC000, +}; + +#define IS_VALID_OPCODE(OP) \ + (((OP) >= 0) && ((OP) < OPCODE_METADATA_SIZE) && \ + (_PyOpcode_opcode_metadata[(OP)].valid_entry)) + +#define HAS_ARG_FLAG (1) +#define HAS_CONST_FLAG (2) +#define HAS_NAME_FLAG (4) +#define HAS_JUMP_FLAG (8) +#define HAS_FREE_FLAG (16) +#define HAS_LOCAL_FLAG (32) +#define HAS_EVAL_BREAK_FLAG (64) +#define HAS_DEOPT_FLAG (128) +#define HAS_ERROR_FLAG (256) +#define HAS_ESCAPES_FLAG (512) +#define OPCODE_HAS_ARG(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_ARG_FLAG)) +#define OPCODE_HAS_CONST(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_CONST_FLAG)) +#define OPCODE_HAS_NAME(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_NAME_FLAG)) +#define OPCODE_HAS_JUMP(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_JUMP_FLAG)) +#define OPCODE_HAS_FREE(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_FREE_FLAG)) +#define OPCODE_HAS_LOCAL(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_LOCAL_FLAG)) +#define OPCODE_HAS_EVAL_BREAK(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_EVAL_BREAK_FLAG)) +#define OPCODE_HAS_DEOPT(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_DEOPT_FLAG)) +#define OPCODE_HAS_ERROR(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_ERROR_FLAG)) +#define OPCODE_HAS_ESCAPES(OP) (_PyOpcode_opcode_metadata[OP].flags & (HAS_ESCAPES_FLAG)) + +struct opcode_metadata { + bool valid_entry; + enum InstructionFormat instr_format; + int flags; +}; + +struct opcode_macro_expansion { + int nuops; + struct { int16_t uop; int8_t size; int8_t offset; } uops[12]; +}; + +#define OPARG_FULL 0 +#define OPARG_CACHE_1 1 +#define OPARG_CACHE_2 2 +#define OPARG_CACHE_4 4 +#define OPARG_TOP 5 +#define OPARG_BOTTOM 6 +#define OPARG_SAVE_RETURN_OFFSET 7 + +#define OPCODE_METADATA_FLAGS(OP) (_PyOpcode_opcode_metadata[(OP)].flags & (HAS_ARG_FLAG | HAS_JUMP_FLAG)) +#define SAME_OPCODE_METADATA(OP1, OP2) \ + (OPCODE_METADATA_FLAGS(OP1) == OPCODE_METADATA_FLAGS(OP2)) + +#define OPCODE_METADATA_SIZE 512 +#define OPCODE_UOP_NAME_SIZE 512 +#define OPCODE_MACRO_EXPANSION_SIZE 256 + +extern const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE]; +#ifdef NEED_OPCODE_METADATA +const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = { + [NOP] = { true, INSTR_FMT_IX, 0 }, + [RESUME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [RESUME_CHECK] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [INSTRUMENTED_RESUME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_CLOSURE] = { true, 0, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [LOAD_FAST_CHECK] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_ERROR_FLAG }, + [LOAD_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [LOAD_FAST_AND_CLEAR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [LOAD_FAST_LOAD_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [LOAD_CONST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_CONST_FLAG }, + [STORE_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [STORE_FAST_MAYBE_NULL] = { true, 0, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [STORE_FAST_LOAD_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [STORE_FAST_STORE_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, + [POP_TOP] = { true, INSTR_FMT_IX, 0 }, + [PUSH_NULL] = { true, INSTR_FMT_IX, 0 }, + [END_FOR] = { true, INSTR_FMT_IX, 0 }, + [INSTRUMENTED_END_FOR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [END_SEND] = { true, INSTR_FMT_IX, 0 }, + [INSTRUMENTED_END_SEND] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [UNARY_NEGATIVE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [UNARY_NOT] = { true, INSTR_FMT_IX, 0 }, + [_SPECIALIZE_TO_BOOL] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [_TO_BOOL] = { true, INSTR_FMT_IXC0, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [TO_BOOL] = { true, INSTR_FMT_IXC00, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [TO_BOOL_BOOL] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [TO_BOOL_INT] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [TO_BOOL_LIST] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [TO_BOOL_NONE] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [TO_BOOL_STR] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [TO_BOOL_ALWAYS_TRUE] = { true, INSTR_FMT_IXC00, HAS_DEOPT_FLAG }, + [UNARY_INVERT] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_GUARD_BOTH_INT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_BINARY_OP_MULTIPLY_INT] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG }, + [_BINARY_OP_ADD_INT] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG }, + [_BINARY_OP_SUBTRACT_INT] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG }, + [BINARY_OP_MULTIPLY_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG }, + [BINARY_OP_ADD_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG }, + [BINARY_OP_SUBTRACT_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG }, + [_GUARD_BOTH_FLOAT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_BINARY_OP_MULTIPLY_FLOAT] = { true, INSTR_FMT_IXC, 0 }, + [_BINARY_OP_ADD_FLOAT] = { true, INSTR_FMT_IXC, 0 }, + [_BINARY_OP_SUBTRACT_FLOAT] = { true, INSTR_FMT_IXC, 0 }, + [BINARY_OP_MULTIPLY_FLOAT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [BINARY_OP_ADD_FLOAT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [BINARY_OP_SUBTRACT_FLOAT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [_GUARD_BOTH_UNICODE] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_BINARY_OP_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_OP_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_BINARY_OP_INPLACE_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_OP_INPLACE_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_BINARY_SUBSCR] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [_BINARY_SUBSCR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_SUBSCR] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_SLICE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_SLICE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_SUBSCR_LIST_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [BINARY_SUBSCR_STR_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [BINARY_SUBSCR_TUPLE_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [BINARY_SUBSCR_DICT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BINARY_SUBSCR_GETITEM] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [LIST_APPEND] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG }, + [SET_ADD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_STORE_SUBSCR] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [_STORE_SUBSCR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_SUBSCR] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_SUBSCR_LIST_INT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [STORE_SUBSCR_DICT] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DELETE_SUBSCR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_INTRINSIC_1] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_INTRINSIC_2] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [RAISE_VARARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INTERPRETER_EXIT] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [_POP_FRAME] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [RETURN_VALUE] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [INSTRUMENTED_RETURN_VALUE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [RETURN_CONST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_CONST_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_RETURN_CONST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_CONST_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [GET_AITER] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [GET_ANEXT] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [GET_AWAITABLE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_SEND] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [_SEND] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SEND] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SEND_GEN] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_YIELD_VALUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [YIELD_VALUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [POP_EXCEPT] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [RERAISE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [END_ASYNC_FOR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CLEANUP_THROW] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ASSERTION_ERROR] = { true, INSTR_FMT_IX, 0 }, + [LOAD_BUILD_CLASS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DELETE_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_UNPACK_SEQUENCE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_UNPACK_SEQUENCE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [UNPACK_SEQUENCE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [UNPACK_SEQUENCE_TWO_TUPLE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [UNPACK_SEQUENCE_TUPLE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [UNPACK_SEQUENCE_LIST] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [UNPACK_EX] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_STORE_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ESCAPES_FLAG }, + [_STORE_ATTR] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_ATTR] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DELETE_ATTR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_GLOBAL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DELETE_GLOBAL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_LOCALS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_FROM_DICT_OR_GLOBALS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_LOAD_GLOBAL] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_GLOBAL] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_GLOBAL] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_GUARD_GLOBALS_VERSION] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [_GUARD_BUILTINS_VERSION] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG }, + [_LOAD_GLOBAL_MODULE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_LOAD_GLOBAL_BUILTINS] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_GLOBAL_MODULE] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_GLOBAL_BUILTIN] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [DELETE_FAST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_ERROR_FLAG }, + [MAKE_CELL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_FREE_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DELETE_DEREF] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_FREE_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_FROM_DICT_OR_DEREF] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_FREE_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_DEREF] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_FREE_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [STORE_DEREF] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_FREE_FLAG | HAS_ESCAPES_FLAG }, + [COPY_FREE_VARS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [BUILD_STRING] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_TUPLE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_LIST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LIST_EXTEND] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SET_UPDATE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_SET] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_MAP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SETUP_ANNOTATIONS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_CONST_KEY_MAP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DICT_UPDATE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [DICT_MERGE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [MAP_ADD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_LOAD_SUPER_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG }, + [_SPECIALIZE_LOAD_SUPER_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_SUPER_ATTR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_SUPER_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_SUPER_METHOD] = { true, 0, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ZERO_SUPER_METHOD] = { true, 0, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ZERO_SUPER_ATTR] = { true, 0, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_SUPER_ATTR_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_SUPER_ATTR_METHOD] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_LOAD_ATTR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_ATTR] = { true, INSTR_FMT_IBC0000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_METHOD] = { true, 0, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_GUARD_TYPE_VERSION] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_CHECK_MANAGED_OBJECT_HAS_VALUES] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_MODULE] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_MODULE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_ATTR_MODULE] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_WITH_HINT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_ATTR_SLOT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_ATTR_SLOT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_CLASS] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_CLASS] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, + [LOAD_ATTR_CLASS] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [LOAD_ATTR_PROPERTY] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_GUARD_DORV_VALUES] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_STORE_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [STORE_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IXC000, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [STORE_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_STORE_ATTR_SLOT] = { true, INSTR_FMT_IXC, HAS_ESCAPES_FLAG }, + [STORE_ATTR_SLOT] = { true, INSTR_FMT_IXC000, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_COMPARE_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_COMPARE_OP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [COMPARE_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [COMPARE_OP_FLOAT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [COMPARE_OP_INT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [COMPARE_OP_STR] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [IS_OP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [CONTAINS_OP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CHECK_EG_MATCH] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CHECK_EXC_MATCH] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [IMPORT_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [IMPORT_FROM] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [JUMP_FORWARD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [JUMP_BACKWARD] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [JUMP] = { true, 0, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [JUMP_NO_INTERRUPT] = { true, 0, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [ENTER_EXECUTOR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG }, + [_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [_IS_NONE] = { true, INSTR_FMT_IX, 0 }, + [POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [JUMP_BACKWARD_NO_INTERRUPT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [GET_LEN] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [MATCH_CLASS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [MATCH_MAPPING] = { true, INSTR_FMT_IX, 0 }, + [MATCH_SEQUENCE] = { true, INSTR_FMT_IX, 0 }, + [MATCH_KEYS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [GET_ITER] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [GET_YIELD_FROM_ITER] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_FOR_ITER] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_FOR_ITER] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_FOR_ITER_TIER_TWO] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [FOR_ITER] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_FOR_ITER] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_ITER_CHECK_LIST] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_JUMP_LIST] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [_GUARD_NOT_EXHAUSTED_LIST] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_NEXT_LIST] = { true, INSTR_FMT_IX, 0 }, + [FOR_ITER_LIST] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_DEOPT_FLAG }, + [_ITER_CHECK_TUPLE] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_JUMP_TUPLE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [_GUARD_NOT_EXHAUSTED_TUPLE] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_NEXT_TUPLE] = { true, INSTR_FMT_IX, 0 }, + [FOR_ITER_TUPLE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_DEOPT_FLAG }, + [_ITER_CHECK_RANGE] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_JUMP_RANGE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG }, + [_GUARD_NOT_EXHAUSTED_RANGE] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_ITER_NEXT_RANGE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [FOR_ITER_RANGE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [FOR_ITER_GEN] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [BEFORE_ASYNC_WITH] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BEFORE_WITH] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [WITH_EXCEPT_START] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SETUP_FINALLY] = { true, 0, 0 }, + [SETUP_CLEANUP] = { true, 0, 0 }, + [SETUP_WITH] = { true, 0, 0 }, + [POP_BLOCK] = { true, 0, 0 }, + [PUSH_EXC_INFO] = { true, INSTR_FMT_IX, 0 }, + [_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_GUARD_KEYS_VERSION] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_METHOD_WITH_VALUES] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR_METHOD_WITH_VALUES] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_ATTR_METHOD_NO_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR_METHOD_NO_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, + [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, + [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [LOAD_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_CALL] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_SPECIALIZE_CALL] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_CALL] = { true, INSTR_FMT_IBC0, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [_CHECK_CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_INIT_CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [_CHECK_PEP_523] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_CHECK_FUNCTION_EXACT_ARGS] = { true, INSTR_FMT_IBC0, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_STACK_SPACE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_INIT_CALL_PY_EXACT_ARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_PUSH_FRAME] = { true, INSTR_FMT_IX, 0 }, + [CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [CALL_PY_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [CALL_PY_WITH_DEFAULTS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG }, + [CALL_TYPE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [CALL_STR_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_TUPLE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_ALLOC_AND_ENTER_INIT] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [EXIT_INIT_CHECK] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_BUILTIN_CLASS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG }, + [CALL_BUILTIN_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_BUILTIN_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_BUILTIN_FAST_WITH_KEYWORDS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_LEN] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_ISINSTANCE] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_LIST_APPEND] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG }, + [CALL_METHOD_DESCRIPTOR_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_METHOD_DESCRIPTOR_NOARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_METHOD_DESCRIPTOR_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_CALL_KW] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CALL_KW] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_CALL_FUNCTION_EX] = { true, INSTR_FMT_IX, 0 }, + [CALL_FUNCTION_EX] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [MAKE_FUNCTION] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SET_FUNCTION_ATTRIBUTE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [RETURN_GENERATOR] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [BUILD_SLICE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [CONVERT_VALUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG }, + [FORMAT_SIMPLE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [FORMAT_WITH_SPEC] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [COPY] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [_SPECIALIZE_BINARY_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_BINARY_OP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG }, + [BINARY_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [SWAP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [INSTRUMENTED_INSTRUCTION] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, + [INSTRUMENTED_JUMP_FORWARD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [INSTRUMENTED_JUMP_BACKWARD] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG }, + [INSTRUMENTED_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG }, + [INSTRUMENTED_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG }, + [INSTRUMENTED_POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG }, + [INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG }, + [EXTENDED_ARG] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [CACHE] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [RESERVED] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, + [_GUARD_IS_TRUE_POP] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_GUARD_IS_FALSE_POP] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_GUARD_IS_NONE_POP] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_GUARD_IS_NOT_NONE_POP] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_JUMP_TO_TOP] = { true, INSTR_FMT_IX, HAS_EVAL_BREAK_FLAG }, + [_SET_IP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ESCAPES_FLAG }, + [_SAVE_RETURN_OFFSET] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [_EXIT_TRACE] = { true, INSTR_FMT_IX, 0 }, + [_INSERT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG }, + [_CHECK_VALIDITY] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, +}; +#endif // NEED_OPCODE_METADATA + +extern const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPANSION_SIZE]; +#ifdef NEED_OPCODE_METADATA +const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPANSION_SIZE] = { + [NOP] = { .nuops = 1, .uops = { { NOP, 0, 0 } } }, + [RESUME_CHECK] = { .nuops = 1, .uops = { { RESUME_CHECK, 0, 0 } } }, + [LOAD_FAST_CHECK] = { .nuops = 1, .uops = { { LOAD_FAST_CHECK, 0, 0 } } }, + [LOAD_FAST] = { .nuops = 1, .uops = { { LOAD_FAST, 0, 0 } } }, + [LOAD_FAST_AND_CLEAR] = { .nuops = 1, .uops = { { LOAD_FAST_AND_CLEAR, 0, 0 } } }, + [LOAD_FAST_LOAD_FAST] = { .nuops = 2, .uops = { { LOAD_FAST, 5, 0 }, { LOAD_FAST, 6, 0 } } }, + [LOAD_CONST] = { .nuops = 1, .uops = { { LOAD_CONST, 0, 0 } } }, + [STORE_FAST] = { .nuops = 1, .uops = { { STORE_FAST, 0, 0 } } }, + [STORE_FAST_LOAD_FAST] = { .nuops = 2, .uops = { { STORE_FAST, 5, 0 }, { LOAD_FAST, 6, 0 } } }, + [STORE_FAST_STORE_FAST] = { .nuops = 2, .uops = { { STORE_FAST, 5, 0 }, { STORE_FAST, 6, 0 } } }, + [POP_TOP] = { .nuops = 1, .uops = { { POP_TOP, 0, 0 } } }, + [PUSH_NULL] = { .nuops = 1, .uops = { { PUSH_NULL, 0, 0 } } }, + [END_FOR] = { .nuops = 2, .uops = { { POP_TOP, 0, 0 }, { POP_TOP, 0, 0 } } }, + [END_SEND] = { .nuops = 1, .uops = { { END_SEND, 0, 0 } } }, + [UNARY_NEGATIVE] = { .nuops = 1, .uops = { { UNARY_NEGATIVE, 0, 0 } } }, + [UNARY_NOT] = { .nuops = 1, .uops = { { UNARY_NOT, 0, 0 } } }, + [TO_BOOL] = { .nuops = 1, .uops = { { _TO_BOOL, 0, 0 } } }, + [TO_BOOL_BOOL] = { .nuops = 1, .uops = { { TO_BOOL_BOOL, 0, 0 } } }, + [TO_BOOL_INT] = { .nuops = 1, .uops = { { TO_BOOL_INT, 0, 0 } } }, + [TO_BOOL_LIST] = { .nuops = 1, .uops = { { TO_BOOL_LIST, 0, 0 } } }, + [TO_BOOL_NONE] = { .nuops = 1, .uops = { { TO_BOOL_NONE, 0, 0 } } }, + [TO_BOOL_STR] = { .nuops = 1, .uops = { { TO_BOOL_STR, 0, 0 } } }, + [TO_BOOL_ALWAYS_TRUE] = { .nuops = 1, .uops = { { TO_BOOL_ALWAYS_TRUE, 2, 1 } } }, + [UNARY_INVERT] = { .nuops = 1, .uops = { { UNARY_INVERT, 0, 0 } } }, + [BINARY_OP_MULTIPLY_INT] = { .nuops = 2, .uops = { { _GUARD_BOTH_INT, 0, 0 }, { _BINARY_OP_MULTIPLY_INT, 0, 0 } } }, + [BINARY_OP_ADD_INT] = { .nuops = 2, .uops = { { _GUARD_BOTH_INT, 0, 0 }, { _BINARY_OP_ADD_INT, 0, 0 } } }, + [BINARY_OP_SUBTRACT_INT] = { .nuops = 2, .uops = { { _GUARD_BOTH_INT, 0, 0 }, { _BINARY_OP_SUBTRACT_INT, 0, 0 } } }, + [BINARY_OP_MULTIPLY_FLOAT] = { .nuops = 2, .uops = { { _GUARD_BOTH_FLOAT, 0, 0 }, { _BINARY_OP_MULTIPLY_FLOAT, 0, 0 } } }, + [BINARY_OP_ADD_FLOAT] = { .nuops = 2, .uops = { { _GUARD_BOTH_FLOAT, 0, 0 }, { _BINARY_OP_ADD_FLOAT, 0, 0 } } }, + [BINARY_OP_SUBTRACT_FLOAT] = { .nuops = 2, .uops = { { _GUARD_BOTH_FLOAT, 0, 0 }, { _BINARY_OP_SUBTRACT_FLOAT, 0, 0 } } }, + [BINARY_OP_ADD_UNICODE] = { .nuops = 2, .uops = { { _GUARD_BOTH_UNICODE, 0, 0 }, { _BINARY_OP_ADD_UNICODE, 0, 0 } } }, + [BINARY_SUBSCR] = { .nuops = 1, .uops = { { _BINARY_SUBSCR, 0, 0 } } }, + [BINARY_SLICE] = { .nuops = 1, .uops = { { BINARY_SLICE, 0, 0 } } }, + [STORE_SLICE] = { .nuops = 1, .uops = { { STORE_SLICE, 0, 0 } } }, + [BINARY_SUBSCR_LIST_INT] = { .nuops = 1, .uops = { { BINARY_SUBSCR_LIST_INT, 0, 0 } } }, + [BINARY_SUBSCR_STR_INT] = { .nuops = 1, .uops = { { BINARY_SUBSCR_STR_INT, 0, 0 } } }, + [BINARY_SUBSCR_TUPLE_INT] = { .nuops = 1, .uops = { { BINARY_SUBSCR_TUPLE_INT, 0, 0 } } }, + [BINARY_SUBSCR_DICT] = { .nuops = 1, .uops = { { BINARY_SUBSCR_DICT, 0, 0 } } }, + [LIST_APPEND] = { .nuops = 1, .uops = { { LIST_APPEND, 0, 0 } } }, + [SET_ADD] = { .nuops = 1, .uops = { { SET_ADD, 0, 0 } } }, + [STORE_SUBSCR] = { .nuops = 1, .uops = { { _STORE_SUBSCR, 0, 0 } } }, + [STORE_SUBSCR_LIST_INT] = { .nuops = 1, .uops = { { STORE_SUBSCR_LIST_INT, 0, 0 } } }, + [STORE_SUBSCR_DICT] = { .nuops = 1, .uops = { { STORE_SUBSCR_DICT, 0, 0 } } }, + [DELETE_SUBSCR] = { .nuops = 1, .uops = { { DELETE_SUBSCR, 0, 0 } } }, + [CALL_INTRINSIC_1] = { .nuops = 1, .uops = { { CALL_INTRINSIC_1, 0, 0 } } }, + [CALL_INTRINSIC_2] = { .nuops = 1, .uops = { { CALL_INTRINSIC_2, 0, 0 } } }, + [RETURN_VALUE] = { .nuops = 1, .uops = { { _POP_FRAME, 0, 0 } } }, + [RETURN_CONST] = { .nuops = 2, .uops = { { LOAD_CONST, 0, 0 }, { _POP_FRAME, 0, 0 } } }, + [GET_AITER] = { .nuops = 1, .uops = { { GET_AITER, 0, 0 } } }, + [GET_ANEXT] = { .nuops = 1, .uops = { { GET_ANEXT, 0, 0 } } }, + [GET_AWAITABLE] = { .nuops = 1, .uops = { { GET_AWAITABLE, 0, 0 } } }, + [POP_EXCEPT] = { .nuops = 1, .uops = { { POP_EXCEPT, 0, 0 } } }, + [LOAD_ASSERTION_ERROR] = { .nuops = 1, .uops = { { LOAD_ASSERTION_ERROR, 0, 0 } } }, + [LOAD_BUILD_CLASS] = { .nuops = 1, .uops = { { LOAD_BUILD_CLASS, 0, 0 } } }, + [STORE_NAME] = { .nuops = 1, .uops = { { STORE_NAME, 0, 0 } } }, + [DELETE_NAME] = { .nuops = 1, .uops = { { DELETE_NAME, 0, 0 } } }, + [UNPACK_SEQUENCE] = { .nuops = 1, .uops = { { _UNPACK_SEQUENCE, 0, 0 } } }, + [UNPACK_SEQUENCE_TWO_TUPLE] = { .nuops = 1, .uops = { { UNPACK_SEQUENCE_TWO_TUPLE, 0, 0 } } }, + [UNPACK_SEQUENCE_TUPLE] = { .nuops = 1, .uops = { { UNPACK_SEQUENCE_TUPLE, 0, 0 } } }, + [UNPACK_SEQUENCE_LIST] = { .nuops = 1, .uops = { { UNPACK_SEQUENCE_LIST, 0, 0 } } }, + [UNPACK_EX] = { .nuops = 1, .uops = { { UNPACK_EX, 0, 0 } } }, + [STORE_ATTR] = { .nuops = 1, .uops = { { _STORE_ATTR, 0, 0 } } }, + [DELETE_ATTR] = { .nuops = 1, .uops = { { DELETE_ATTR, 0, 0 } } }, + [STORE_GLOBAL] = { .nuops = 1, .uops = { { STORE_GLOBAL, 0, 0 } } }, + [DELETE_GLOBAL] = { .nuops = 1, .uops = { { DELETE_GLOBAL, 0, 0 } } }, + [LOAD_LOCALS] = { .nuops = 1, .uops = { { LOAD_LOCALS, 0, 0 } } }, + [LOAD_FROM_DICT_OR_GLOBALS] = { .nuops = 1, .uops = { { LOAD_FROM_DICT_OR_GLOBALS, 0, 0 } } }, + [LOAD_NAME] = { .nuops = 1, .uops = { { LOAD_NAME, 0, 0 } } }, + [LOAD_GLOBAL] = { .nuops = 1, .uops = { { _LOAD_GLOBAL, 0, 0 } } }, + [LOAD_GLOBAL_MODULE] = { .nuops = 2, .uops = { { _GUARD_GLOBALS_VERSION, 1, 1 }, { _LOAD_GLOBAL_MODULE, 1, 3 } } }, + [LOAD_GLOBAL_BUILTIN] = { .nuops = 3, .uops = { { _GUARD_GLOBALS_VERSION, 1, 1 }, { _GUARD_BUILTINS_VERSION, 1, 2 }, { _LOAD_GLOBAL_BUILTINS, 1, 3 } } }, + [DELETE_FAST] = { .nuops = 1, .uops = { { DELETE_FAST, 0, 0 } } }, + [MAKE_CELL] = { .nuops = 1, .uops = { { MAKE_CELL, 0, 0 } } }, + [DELETE_DEREF] = { .nuops = 1, .uops = { { DELETE_DEREF, 0, 0 } } }, + [LOAD_FROM_DICT_OR_DEREF] = { .nuops = 1, .uops = { { LOAD_FROM_DICT_OR_DEREF, 0, 0 } } }, + [LOAD_DEREF] = { .nuops = 1, .uops = { { LOAD_DEREF, 0, 0 } } }, + [STORE_DEREF] = { .nuops = 1, .uops = { { STORE_DEREF, 0, 0 } } }, + [COPY_FREE_VARS] = { .nuops = 1, .uops = { { COPY_FREE_VARS, 0, 0 } } }, + [BUILD_STRING] = { .nuops = 1, .uops = { { BUILD_STRING, 0, 0 } } }, + [BUILD_TUPLE] = { .nuops = 1, .uops = { { BUILD_TUPLE, 0, 0 } } }, + [BUILD_LIST] = { .nuops = 1, .uops = { { BUILD_LIST, 0, 0 } } }, + [LIST_EXTEND] = { .nuops = 1, .uops = { { LIST_EXTEND, 0, 0 } } }, + [SET_UPDATE] = { .nuops = 1, .uops = { { SET_UPDATE, 0, 0 } } }, + [BUILD_SET] = { .nuops = 1, .uops = { { BUILD_SET, 0, 0 } } }, + [BUILD_MAP] = { .nuops = 1, .uops = { { BUILD_MAP, 0, 0 } } }, + [SETUP_ANNOTATIONS] = { .nuops = 1, .uops = { { SETUP_ANNOTATIONS, 0, 0 } } }, + [BUILD_CONST_KEY_MAP] = { .nuops = 1, .uops = { { BUILD_CONST_KEY_MAP, 0, 0 } } }, + [DICT_UPDATE] = { .nuops = 1, .uops = { { DICT_UPDATE, 0, 0 } } }, + [DICT_MERGE] = { .nuops = 1, .uops = { { DICT_MERGE, 0, 0 } } }, + [MAP_ADD] = { .nuops = 1, .uops = { { MAP_ADD, 0, 0 } } }, + [LOAD_SUPER_ATTR_ATTR] = { .nuops = 1, .uops = { { LOAD_SUPER_ATTR_ATTR, 0, 0 } } }, + [LOAD_SUPER_ATTR_METHOD] = { .nuops = 1, .uops = { { LOAD_SUPER_ATTR_METHOD, 0, 0 } } }, + [LOAD_ATTR] = { .nuops = 1, .uops = { { _LOAD_ATTR, 0, 0 } } }, + [LOAD_ATTR_INSTANCE_VALUE] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_MANAGED_OBJECT_HAS_VALUES, 0, 0 }, { _LOAD_ATTR_INSTANCE_VALUE, 1, 3 } } }, + [LOAD_ATTR_MODULE] = { .nuops = 2, .uops = { { _CHECK_ATTR_MODULE, 2, 1 }, { _LOAD_ATTR_MODULE, 1, 3 } } }, + [LOAD_ATTR_WITH_HINT] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_ATTR_WITH_HINT, 0, 0 }, { _LOAD_ATTR_WITH_HINT, 1, 3 } } }, + [LOAD_ATTR_SLOT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_SLOT, 1, 3 } } }, + [LOAD_ATTR_CLASS] = { .nuops = 2, .uops = { { _CHECK_ATTR_CLASS, 2, 1 }, { _LOAD_ATTR_CLASS, 4, 5 } } }, + [STORE_ATTR_INSTANCE_VALUE] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES, 0, 0 }, { _STORE_ATTR_INSTANCE_VALUE, 1, 3 } } }, + [STORE_ATTR_SLOT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _STORE_ATTR_SLOT, 1, 3 } } }, + [COMPARE_OP] = { .nuops = 1, .uops = { { _COMPARE_OP, 0, 0 } } }, + [COMPARE_OP_FLOAT] = { .nuops = 1, .uops = { { COMPARE_OP_FLOAT, 0, 0 } } }, + [COMPARE_OP_INT] = { .nuops = 1, .uops = { { COMPARE_OP_INT, 0, 0 } } }, + [COMPARE_OP_STR] = { .nuops = 1, .uops = { { COMPARE_OP_STR, 0, 0 } } }, + [IS_OP] = { .nuops = 1, .uops = { { IS_OP, 0, 0 } } }, + [CONTAINS_OP] = { .nuops = 1, .uops = { { CONTAINS_OP, 0, 0 } } }, + [CHECK_EG_MATCH] = { .nuops = 1, .uops = { { CHECK_EG_MATCH, 0, 0 } } }, + [CHECK_EXC_MATCH] = { .nuops = 1, .uops = { { CHECK_EXC_MATCH, 0, 0 } } }, + [POP_JUMP_IF_TRUE] = { .nuops = 1, .uops = { { _POP_JUMP_IF_TRUE, 0, 0 } } }, + [POP_JUMP_IF_FALSE] = { .nuops = 1, .uops = { { _POP_JUMP_IF_FALSE, 0, 0 } } }, + [POP_JUMP_IF_NONE] = { .nuops = 2, .uops = { { _IS_NONE, 0, 0 }, { _POP_JUMP_IF_TRUE, 0, 0 } } }, + [POP_JUMP_IF_NOT_NONE] = { .nuops = 2, .uops = { { _IS_NONE, 0, 0 }, { _POP_JUMP_IF_FALSE, 0, 0 } } }, + [GET_LEN] = { .nuops = 1, .uops = { { GET_LEN, 0, 0 } } }, + [MATCH_CLASS] = { .nuops = 1, .uops = { { MATCH_CLASS, 0, 0 } } }, + [MATCH_MAPPING] = { .nuops = 1, .uops = { { MATCH_MAPPING, 0, 0 } } }, + [MATCH_SEQUENCE] = { .nuops = 1, .uops = { { MATCH_SEQUENCE, 0, 0 } } }, + [MATCH_KEYS] = { .nuops = 1, .uops = { { MATCH_KEYS, 0, 0 } } }, + [GET_ITER] = { .nuops = 1, .uops = { { GET_ITER, 0, 0 } } }, + [GET_YIELD_FROM_ITER] = { .nuops = 1, .uops = { { GET_YIELD_FROM_ITER, 0, 0 } } }, + [FOR_ITER] = { .nuops = 1, .uops = { { _FOR_ITER, 0, 0 } } }, + [FOR_ITER_LIST] = { .nuops = 3, .uops = { { _ITER_CHECK_LIST, 0, 0 }, { _ITER_JUMP_LIST, 0, 0 }, { _ITER_NEXT_LIST, 0, 0 } } }, + [FOR_ITER_TUPLE] = { .nuops = 3, .uops = { { _ITER_CHECK_TUPLE, 0, 0 }, { _ITER_JUMP_TUPLE, 0, 0 }, { _ITER_NEXT_TUPLE, 0, 0 } } }, + [FOR_ITER_RANGE] = { .nuops = 3, .uops = { { _ITER_CHECK_RANGE, 0, 0 }, { _ITER_JUMP_RANGE, 0, 0 }, { _ITER_NEXT_RANGE, 0, 0 } } }, + [BEFORE_ASYNC_WITH] = { .nuops = 1, .uops = { { BEFORE_ASYNC_WITH, 0, 0 } } }, + [BEFORE_WITH] = { .nuops = 1, .uops = { { BEFORE_WITH, 0, 0 } } }, + [WITH_EXCEPT_START] = { .nuops = 1, .uops = { { WITH_EXCEPT_START, 0, 0 } } }, + [PUSH_EXC_INFO] = { .nuops = 1, .uops = { { PUSH_EXC_INFO, 0, 0 } } }, + [LOAD_ATTR_METHOD_WITH_VALUES] = { .nuops = 4, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, 0, 0 }, { _GUARD_KEYS_VERSION, 2, 3 }, { _LOAD_ATTR_METHOD_WITH_VALUES, 4, 5 } } }, + [LOAD_ATTR_METHOD_NO_DICT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_METHOD_NO_DICT, 4, 5 } } }, + [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { .nuops = 4, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, 0, 0 }, { _GUARD_KEYS_VERSION, 2, 3 }, { _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES, 4, 5 } } }, + [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_NONDESCRIPTOR_NO_DICT, 4, 5 } } }, + [LOAD_ATTR_METHOD_LAZY_DICT] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_ATTR_METHOD_LAZY_DICT, 0, 0 }, { _LOAD_ATTR_METHOD_LAZY_DICT, 4, 5 } } }, + [CALL_BOUND_METHOD_EXACT_ARGS] = { .nuops = 8, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _INIT_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SAVE_RETURN_OFFSET, 7, 3 }, { _PUSH_FRAME, 0, 0 } } }, + [CALL_PY_EXACT_ARGS] = { .nuops = 6, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SAVE_RETURN_OFFSET, 7, 3 }, { _PUSH_FRAME, 0, 0 } } }, + [CALL_TYPE_1] = { .nuops = 1, .uops = { { CALL_TYPE_1, 0, 0 } } }, + [CALL_STR_1] = { .nuops = 1, .uops = { { CALL_STR_1, 0, 0 } } }, + [CALL_TUPLE_1] = { .nuops = 1, .uops = { { CALL_TUPLE_1, 0, 0 } } }, + [EXIT_INIT_CHECK] = { .nuops = 1, .uops = { { EXIT_INIT_CHECK, 0, 0 } } }, + [CALL_BUILTIN_CLASS] = { .nuops = 1, .uops = { { CALL_BUILTIN_CLASS, 0, 0 } } }, + [CALL_BUILTIN_O] = { .nuops = 1, .uops = { { CALL_BUILTIN_O, 0, 0 } } }, + [CALL_BUILTIN_FAST] = { .nuops = 1, .uops = { { CALL_BUILTIN_FAST, 0, 0 } } }, + [CALL_BUILTIN_FAST_WITH_KEYWORDS] = { .nuops = 1, .uops = { { CALL_BUILTIN_FAST_WITH_KEYWORDS, 0, 0 } } }, + [CALL_LEN] = { .nuops = 1, .uops = { { CALL_LEN, 0, 0 } } }, + [CALL_ISINSTANCE] = { .nuops = 1, .uops = { { CALL_ISINSTANCE, 0, 0 } } }, + [CALL_METHOD_DESCRIPTOR_O] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_O, 0, 0 } } }, + [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS, 0, 0 } } }, + [CALL_METHOD_DESCRIPTOR_NOARGS] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_NOARGS, 0, 0 } } }, + [CALL_METHOD_DESCRIPTOR_FAST] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_FAST, 0, 0 } } }, + [MAKE_FUNCTION] = { .nuops = 1, .uops = { { MAKE_FUNCTION, 0, 0 } } }, + [SET_FUNCTION_ATTRIBUTE] = { .nuops = 1, .uops = { { SET_FUNCTION_ATTRIBUTE, 0, 0 } } }, + [BUILD_SLICE] = { .nuops = 1, .uops = { { BUILD_SLICE, 0, 0 } } }, + [CONVERT_VALUE] = { .nuops = 1, .uops = { { CONVERT_VALUE, 0, 0 } } }, + [FORMAT_SIMPLE] = { .nuops = 1, .uops = { { FORMAT_SIMPLE, 0, 0 } } }, + [FORMAT_WITH_SPEC] = { .nuops = 1, .uops = { { FORMAT_WITH_SPEC, 0, 0 } } }, + [COPY] = { .nuops = 1, .uops = { { COPY, 0, 0 } } }, + [BINARY_OP] = { .nuops = 1, .uops = { { _BINARY_OP, 0, 0 } } }, + [SWAP] = { .nuops = 1, .uops = { { SWAP, 0, 0 } } }, +}; +#endif // NEED_OPCODE_METADATA + +extern const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE]; +#ifdef NEED_OPCODE_METADATA +const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = { + [_EXIT_TRACE] = "_EXIT_TRACE", + [_SET_IP] = "_SET_IP", + [_SPECIALIZE_TO_BOOL] = "_SPECIALIZE_TO_BOOL", + [_TO_BOOL] = "_TO_BOOL", + [_GUARD_BOTH_INT] = "_GUARD_BOTH_INT", + [_BINARY_OP_MULTIPLY_INT] = "_BINARY_OP_MULTIPLY_INT", + [_BINARY_OP_ADD_INT] = "_BINARY_OP_ADD_INT", + [_BINARY_OP_SUBTRACT_INT] = "_BINARY_OP_SUBTRACT_INT", + [_GUARD_BOTH_FLOAT] = "_GUARD_BOTH_FLOAT", + [_BINARY_OP_MULTIPLY_FLOAT] = "_BINARY_OP_MULTIPLY_FLOAT", + [_BINARY_OP_ADD_FLOAT] = "_BINARY_OP_ADD_FLOAT", + [_BINARY_OP_SUBTRACT_FLOAT] = "_BINARY_OP_SUBTRACT_FLOAT", + [_GUARD_BOTH_UNICODE] = "_GUARD_BOTH_UNICODE", + [_BINARY_OP_ADD_UNICODE] = "_BINARY_OP_ADD_UNICODE", + [_BINARY_OP_INPLACE_ADD_UNICODE] = "_BINARY_OP_INPLACE_ADD_UNICODE", + [_SPECIALIZE_BINARY_SUBSCR] = "_SPECIALIZE_BINARY_SUBSCR", + [_BINARY_SUBSCR] = "_BINARY_SUBSCR", + [_SPECIALIZE_STORE_SUBSCR] = "_SPECIALIZE_STORE_SUBSCR", + [_STORE_SUBSCR] = "_STORE_SUBSCR", + [_POP_FRAME] = "_POP_FRAME", + [_SPECIALIZE_SEND] = "_SPECIALIZE_SEND", + [_SEND] = "_SEND", + [_SPECIALIZE_UNPACK_SEQUENCE] = "_SPECIALIZE_UNPACK_SEQUENCE", + [_UNPACK_SEQUENCE] = "_UNPACK_SEQUENCE", + [_SPECIALIZE_STORE_ATTR] = "_SPECIALIZE_STORE_ATTR", + [_STORE_ATTR] = "_STORE_ATTR", + [_SPECIALIZE_LOAD_GLOBAL] = "_SPECIALIZE_LOAD_GLOBAL", + [_LOAD_GLOBAL] = "_LOAD_GLOBAL", + [_GUARD_GLOBALS_VERSION] = "_GUARD_GLOBALS_VERSION", + [_GUARD_BUILTINS_VERSION] = "_GUARD_BUILTINS_VERSION", + [_LOAD_GLOBAL_MODULE] = "_LOAD_GLOBAL_MODULE", + [_LOAD_GLOBAL_BUILTINS] = "_LOAD_GLOBAL_BUILTINS", + [_SPECIALIZE_LOAD_SUPER_ATTR] = "_SPECIALIZE_LOAD_SUPER_ATTR", + [_LOAD_SUPER_ATTR] = "_LOAD_SUPER_ATTR", + [_SPECIALIZE_LOAD_ATTR] = "_SPECIALIZE_LOAD_ATTR", + [_LOAD_ATTR] = "_LOAD_ATTR", + [_GUARD_TYPE_VERSION] = "_GUARD_TYPE_VERSION", + [_CHECK_MANAGED_OBJECT_HAS_VALUES] = "_CHECK_MANAGED_OBJECT_HAS_VALUES", + [_LOAD_ATTR_INSTANCE_VALUE] = "_LOAD_ATTR_INSTANCE_VALUE", + [_CHECK_ATTR_MODULE] = "_CHECK_ATTR_MODULE", + [_LOAD_ATTR_MODULE] = "_LOAD_ATTR_MODULE", + [_CHECK_ATTR_WITH_HINT] = "_CHECK_ATTR_WITH_HINT", + [_LOAD_ATTR_WITH_HINT] = "_LOAD_ATTR_WITH_HINT", + [_LOAD_ATTR_SLOT] = "_LOAD_ATTR_SLOT", + [_CHECK_ATTR_CLASS] = "_CHECK_ATTR_CLASS", + [_LOAD_ATTR_CLASS] = "_LOAD_ATTR_CLASS", + [_GUARD_DORV_VALUES] = "_GUARD_DORV_VALUES", + [_STORE_ATTR_INSTANCE_VALUE] = "_STORE_ATTR_INSTANCE_VALUE", + [_STORE_ATTR_SLOT] = "_STORE_ATTR_SLOT", + [_SPECIALIZE_COMPARE_OP] = "_SPECIALIZE_COMPARE_OP", + [_COMPARE_OP] = "_COMPARE_OP", + [_POP_JUMP_IF_FALSE] = "_POP_JUMP_IF_FALSE", + [_POP_JUMP_IF_TRUE] = "_POP_JUMP_IF_TRUE", + [_IS_NONE] = "_IS_NONE", + [_SPECIALIZE_FOR_ITER] = "_SPECIALIZE_FOR_ITER", + [_FOR_ITER] = "_FOR_ITER", + [_FOR_ITER_TIER_TWO] = "_FOR_ITER_TIER_TWO", + [_ITER_CHECK_LIST] = "_ITER_CHECK_LIST", + [_ITER_JUMP_LIST] = "_ITER_JUMP_LIST", + [_GUARD_NOT_EXHAUSTED_LIST] = "_GUARD_NOT_EXHAUSTED_LIST", + [_ITER_NEXT_LIST] = "_ITER_NEXT_LIST", + [_ITER_CHECK_TUPLE] = "_ITER_CHECK_TUPLE", + [_ITER_JUMP_TUPLE] = "_ITER_JUMP_TUPLE", + [_GUARD_NOT_EXHAUSTED_TUPLE] = "_GUARD_NOT_EXHAUSTED_TUPLE", + [_ITER_NEXT_TUPLE] = "_ITER_NEXT_TUPLE", + [_ITER_CHECK_RANGE] = "_ITER_CHECK_RANGE", + [_ITER_JUMP_RANGE] = "_ITER_JUMP_RANGE", + [_GUARD_NOT_EXHAUSTED_RANGE] = "_GUARD_NOT_EXHAUSTED_RANGE", + [_ITER_NEXT_RANGE] = "_ITER_NEXT_RANGE", + [_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT] = "_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT", + [_GUARD_KEYS_VERSION] = "_GUARD_KEYS_VERSION", + [_LOAD_ATTR_METHOD_WITH_VALUES] = "_LOAD_ATTR_METHOD_WITH_VALUES", + [_LOAD_ATTR_METHOD_NO_DICT] = "_LOAD_ATTR_METHOD_NO_DICT", + [_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = "_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES", + [_LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = "_LOAD_ATTR_NONDESCRIPTOR_NO_DICT", + [_CHECK_ATTR_METHOD_LAZY_DICT] = "_CHECK_ATTR_METHOD_LAZY_DICT", + [_LOAD_ATTR_METHOD_LAZY_DICT] = "_LOAD_ATTR_METHOD_LAZY_DICT", + [_SPECIALIZE_CALL] = "_SPECIALIZE_CALL", + [_CALL] = "_CALL", + [_CHECK_CALL_BOUND_METHOD_EXACT_ARGS] = "_CHECK_CALL_BOUND_METHOD_EXACT_ARGS", + [_INIT_CALL_BOUND_METHOD_EXACT_ARGS] = "_INIT_CALL_BOUND_METHOD_EXACT_ARGS", + [_CHECK_PEP_523] = "_CHECK_PEP_523", + [_CHECK_FUNCTION_EXACT_ARGS] = "_CHECK_FUNCTION_EXACT_ARGS", + [_CHECK_STACK_SPACE] = "_CHECK_STACK_SPACE", + [_INIT_CALL_PY_EXACT_ARGS] = "_INIT_CALL_PY_EXACT_ARGS", + [_PUSH_FRAME] = "_PUSH_FRAME", + [_SPECIALIZE_BINARY_OP] = "_SPECIALIZE_BINARY_OP", + [_BINARY_OP] = "_BINARY_OP", + [_GUARD_IS_TRUE_POP] = "_GUARD_IS_TRUE_POP", + [_GUARD_IS_FALSE_POP] = "_GUARD_IS_FALSE_POP", + [_GUARD_IS_NONE_POP] = "_GUARD_IS_NONE_POP", + [_GUARD_IS_NOT_NONE_POP] = "_GUARD_IS_NOT_NONE_POP", + [_JUMP_TO_TOP] = "_JUMP_TO_TOP", + [_SAVE_RETURN_OFFSET] = "_SAVE_RETURN_OFFSET", + [_INSERT] = "_INSERT", + [_CHECK_VALIDITY] = "_CHECK_VALIDITY", +}; +#endif // NEED_OPCODE_METADATA + +extern const char *const _PyOpcode_OpName[268]; +#ifdef NEED_OPCODE_METADATA +const char *const _PyOpcode_OpName[268] = { + [CACHE] = "CACHE", + [RESERVED] = "RESERVED", + [RESUME] = "RESUME", + [BEFORE_ASYNC_WITH] = "BEFORE_ASYNC_WITH", + [BEFORE_WITH] = "BEFORE_WITH", + [BINARY_OP_INPLACE_ADD_UNICODE] = "BINARY_OP_INPLACE_ADD_UNICODE", + [BINARY_SLICE] = "BINARY_SLICE", + [BINARY_SUBSCR] = "BINARY_SUBSCR", + [CHECK_EG_MATCH] = "CHECK_EG_MATCH", + [CHECK_EXC_MATCH] = "CHECK_EXC_MATCH", + [CLEANUP_THROW] = "CLEANUP_THROW", + [DELETE_SUBSCR] = "DELETE_SUBSCR", + [END_ASYNC_FOR] = "END_ASYNC_FOR", + [END_FOR] = "END_FOR", + [END_SEND] = "END_SEND", + [EXIT_INIT_CHECK] = "EXIT_INIT_CHECK", + [FORMAT_SIMPLE] = "FORMAT_SIMPLE", + [FORMAT_WITH_SPEC] = "FORMAT_WITH_SPEC", + [GET_AITER] = "GET_AITER", + [GET_ANEXT] = "GET_ANEXT", + [GET_ITER] = "GET_ITER", + [GET_LEN] = "GET_LEN", + [GET_YIELD_FROM_ITER] = "GET_YIELD_FROM_ITER", + [INTERPRETER_EXIT] = "INTERPRETER_EXIT", + [LOAD_ASSERTION_ERROR] = "LOAD_ASSERTION_ERROR", + [LOAD_BUILD_CLASS] = "LOAD_BUILD_CLASS", + [LOAD_LOCALS] = "LOAD_LOCALS", + [MAKE_FUNCTION] = "MAKE_FUNCTION", + [MATCH_KEYS] = "MATCH_KEYS", + [MATCH_MAPPING] = "MATCH_MAPPING", + [MATCH_SEQUENCE] = "MATCH_SEQUENCE", + [NOP] = "NOP", + [POP_EXCEPT] = "POP_EXCEPT", + [POP_TOP] = "POP_TOP", + [PUSH_EXC_INFO] = "PUSH_EXC_INFO", + [PUSH_NULL] = "PUSH_NULL", + [RETURN_GENERATOR] = "RETURN_GENERATOR", + [RETURN_VALUE] = "RETURN_VALUE", + [SETUP_ANNOTATIONS] = "SETUP_ANNOTATIONS", + [STORE_SLICE] = "STORE_SLICE", + [STORE_SUBSCR] = "STORE_SUBSCR", + [TO_BOOL] = "TO_BOOL", + [UNARY_INVERT] = "UNARY_INVERT", + [UNARY_NEGATIVE] = "UNARY_NEGATIVE", + [UNARY_NOT] = "UNARY_NOT", + [WITH_EXCEPT_START] = "WITH_EXCEPT_START", + [BINARY_OP] = "BINARY_OP", + [BUILD_CONST_KEY_MAP] = "BUILD_CONST_KEY_MAP", + [BUILD_LIST] = "BUILD_LIST", + [BUILD_MAP] = "BUILD_MAP", + [BUILD_SET] = "BUILD_SET", + [BUILD_SLICE] = "BUILD_SLICE", + [BUILD_STRING] = "BUILD_STRING", + [BUILD_TUPLE] = "BUILD_TUPLE", + [CALL] = "CALL", + [CALL_FUNCTION_EX] = "CALL_FUNCTION_EX", + [CALL_INTRINSIC_1] = "CALL_INTRINSIC_1", + [CALL_INTRINSIC_2] = "CALL_INTRINSIC_2", + [CALL_KW] = "CALL_KW", + [COMPARE_OP] = "COMPARE_OP", + [CONTAINS_OP] = "CONTAINS_OP", + [CONVERT_VALUE] = "CONVERT_VALUE", + [COPY] = "COPY", + [COPY_FREE_VARS] = "COPY_FREE_VARS", + [DELETE_ATTR] = "DELETE_ATTR", + [DELETE_DEREF] = "DELETE_DEREF", + [DELETE_FAST] = "DELETE_FAST", + [DELETE_GLOBAL] = "DELETE_GLOBAL", + [DELETE_NAME] = "DELETE_NAME", + [DICT_MERGE] = "DICT_MERGE", + [DICT_UPDATE] = "DICT_UPDATE", + [ENTER_EXECUTOR] = "ENTER_EXECUTOR", + [EXTENDED_ARG] = "EXTENDED_ARG", + [FOR_ITER] = "FOR_ITER", + [GET_AWAITABLE] = "GET_AWAITABLE", + [IMPORT_FROM] = "IMPORT_FROM", + [IMPORT_NAME] = "IMPORT_NAME", + [IS_OP] = "IS_OP", + [JUMP_BACKWARD] = "JUMP_BACKWARD", + [JUMP_BACKWARD_NO_INTERRUPT] = "JUMP_BACKWARD_NO_INTERRUPT", + [JUMP_FORWARD] = "JUMP_FORWARD", + [LIST_APPEND] = "LIST_APPEND", + [LIST_EXTEND] = "LIST_EXTEND", + [LOAD_ATTR] = "LOAD_ATTR", + [LOAD_CONST] = "LOAD_CONST", + [LOAD_DEREF] = "LOAD_DEREF", + [LOAD_FAST] = "LOAD_FAST", + [LOAD_FAST_AND_CLEAR] = "LOAD_FAST_AND_CLEAR", + [LOAD_FAST_CHECK] = "LOAD_FAST_CHECK", + [LOAD_FAST_LOAD_FAST] = "LOAD_FAST_LOAD_FAST", + [LOAD_FROM_DICT_OR_DEREF] = "LOAD_FROM_DICT_OR_DEREF", + [LOAD_FROM_DICT_OR_GLOBALS] = "LOAD_FROM_DICT_OR_GLOBALS", + [LOAD_GLOBAL] = "LOAD_GLOBAL", + [LOAD_NAME] = "LOAD_NAME", + [LOAD_SUPER_ATTR] = "LOAD_SUPER_ATTR", + [MAKE_CELL] = "MAKE_CELL", + [MAP_ADD] = "MAP_ADD", + [MATCH_CLASS] = "MATCH_CLASS", + [POP_JUMP_IF_FALSE] = "POP_JUMP_IF_FALSE", + [POP_JUMP_IF_NONE] = "POP_JUMP_IF_NONE", + [POP_JUMP_IF_NOT_NONE] = "POP_JUMP_IF_NOT_NONE", + [POP_JUMP_IF_TRUE] = "POP_JUMP_IF_TRUE", + [RAISE_VARARGS] = "RAISE_VARARGS", + [RERAISE] = "RERAISE", + [RETURN_CONST] = "RETURN_CONST", + [SEND] = "SEND", + [SET_ADD] = "SET_ADD", + [SET_FUNCTION_ATTRIBUTE] = "SET_FUNCTION_ATTRIBUTE", + [SET_UPDATE] = "SET_UPDATE", + [STORE_ATTR] = "STORE_ATTR", + [STORE_DEREF] = "STORE_DEREF", + [STORE_FAST] = "STORE_FAST", + [STORE_FAST_LOAD_FAST] = "STORE_FAST_LOAD_FAST", + [STORE_FAST_STORE_FAST] = "STORE_FAST_STORE_FAST", + [STORE_GLOBAL] = "STORE_GLOBAL", + [STORE_NAME] = "STORE_NAME", + [SWAP] = "SWAP", + [UNPACK_EX] = "UNPACK_EX", + [UNPACK_SEQUENCE] = "UNPACK_SEQUENCE", + [YIELD_VALUE] = "YIELD_VALUE", + [BINARY_OP_ADD_FLOAT] = "BINARY_OP_ADD_FLOAT", + [BINARY_OP_ADD_INT] = "BINARY_OP_ADD_INT", + [BINARY_OP_ADD_UNICODE] = "BINARY_OP_ADD_UNICODE", + [BINARY_OP_MULTIPLY_FLOAT] = "BINARY_OP_MULTIPLY_FLOAT", + [BINARY_OP_MULTIPLY_INT] = "BINARY_OP_MULTIPLY_INT", + [BINARY_OP_SUBTRACT_FLOAT] = "BINARY_OP_SUBTRACT_FLOAT", + [BINARY_OP_SUBTRACT_INT] = "BINARY_OP_SUBTRACT_INT", + [BINARY_SUBSCR_DICT] = "BINARY_SUBSCR_DICT", + [BINARY_SUBSCR_GETITEM] = "BINARY_SUBSCR_GETITEM", + [BINARY_SUBSCR_LIST_INT] = "BINARY_SUBSCR_LIST_INT", + [BINARY_SUBSCR_STR_INT] = "BINARY_SUBSCR_STR_INT", + [BINARY_SUBSCR_TUPLE_INT] = "BINARY_SUBSCR_TUPLE_INT", + [CALL_ALLOC_AND_ENTER_INIT] = "CALL_ALLOC_AND_ENTER_INIT", + [CALL_BOUND_METHOD_EXACT_ARGS] = "CALL_BOUND_METHOD_EXACT_ARGS", + [CALL_BUILTIN_CLASS] = "CALL_BUILTIN_CLASS", + [CALL_BUILTIN_FAST] = "CALL_BUILTIN_FAST", + [CALL_BUILTIN_FAST_WITH_KEYWORDS] = "CALL_BUILTIN_FAST_WITH_KEYWORDS", + [CALL_BUILTIN_O] = "CALL_BUILTIN_O", + [CALL_ISINSTANCE] = "CALL_ISINSTANCE", + [CALL_LEN] = "CALL_LEN", + [CALL_LIST_APPEND] = "CALL_LIST_APPEND", + [CALL_METHOD_DESCRIPTOR_FAST] = "CALL_METHOD_DESCRIPTOR_FAST", + [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS", + [CALL_METHOD_DESCRIPTOR_NOARGS] = "CALL_METHOD_DESCRIPTOR_NOARGS", + [CALL_METHOD_DESCRIPTOR_O] = "CALL_METHOD_DESCRIPTOR_O", + [CALL_PY_EXACT_ARGS] = "CALL_PY_EXACT_ARGS", + [CALL_PY_WITH_DEFAULTS] = "CALL_PY_WITH_DEFAULTS", + [CALL_STR_1] = "CALL_STR_1", + [CALL_TUPLE_1] = "CALL_TUPLE_1", + [CALL_TYPE_1] = "CALL_TYPE_1", + [COMPARE_OP_FLOAT] = "COMPARE_OP_FLOAT", + [COMPARE_OP_INT] = "COMPARE_OP_INT", + [COMPARE_OP_STR] = "COMPARE_OP_STR", + [FOR_ITER_GEN] = "FOR_ITER_GEN", + [FOR_ITER_LIST] = "FOR_ITER_LIST", + [FOR_ITER_RANGE] = "FOR_ITER_RANGE", + [FOR_ITER_TUPLE] = "FOR_ITER_TUPLE", + [LOAD_ATTR_CLASS] = "LOAD_ATTR_CLASS", + [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN", + [LOAD_ATTR_INSTANCE_VALUE] = "LOAD_ATTR_INSTANCE_VALUE", + [LOAD_ATTR_METHOD_LAZY_DICT] = "LOAD_ATTR_METHOD_LAZY_DICT", + [LOAD_ATTR_METHOD_NO_DICT] = "LOAD_ATTR_METHOD_NO_DICT", + [LOAD_ATTR_METHOD_WITH_VALUES] = "LOAD_ATTR_METHOD_WITH_VALUES", + [LOAD_ATTR_MODULE] = "LOAD_ATTR_MODULE", + [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = "LOAD_ATTR_NONDESCRIPTOR_NO_DICT", + [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = "LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES", + [LOAD_ATTR_PROPERTY] = "LOAD_ATTR_PROPERTY", + [LOAD_ATTR_SLOT] = "LOAD_ATTR_SLOT", + [LOAD_ATTR_WITH_HINT] = "LOAD_ATTR_WITH_HINT", + [LOAD_GLOBAL_BUILTIN] = "LOAD_GLOBAL_BUILTIN", + [LOAD_GLOBAL_MODULE] = "LOAD_GLOBAL_MODULE", + [LOAD_SUPER_ATTR_ATTR] = "LOAD_SUPER_ATTR_ATTR", + [LOAD_SUPER_ATTR_METHOD] = "LOAD_SUPER_ATTR_METHOD", + [RESUME_CHECK] = "RESUME_CHECK", + [SEND_GEN] = "SEND_GEN", + [STORE_ATTR_INSTANCE_VALUE] = "STORE_ATTR_INSTANCE_VALUE", + [STORE_ATTR_SLOT] = "STORE_ATTR_SLOT", + [STORE_ATTR_WITH_HINT] = "STORE_ATTR_WITH_HINT", + [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT", + [STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT", + [TO_BOOL_ALWAYS_TRUE] = "TO_BOOL_ALWAYS_TRUE", + [TO_BOOL_BOOL] = "TO_BOOL_BOOL", + [TO_BOOL_INT] = "TO_BOOL_INT", + [TO_BOOL_LIST] = "TO_BOOL_LIST", + [TO_BOOL_NONE] = "TO_BOOL_NONE", + [TO_BOOL_STR] = "TO_BOOL_STR", + [UNPACK_SEQUENCE_LIST] = "UNPACK_SEQUENCE_LIST", + [UNPACK_SEQUENCE_TUPLE] = "UNPACK_SEQUENCE_TUPLE", + [UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE", + [INSTRUMENTED_RESUME] = "INSTRUMENTED_RESUME", + [INSTRUMENTED_END_FOR] = "INSTRUMENTED_END_FOR", + [INSTRUMENTED_END_SEND] = "INSTRUMENTED_END_SEND", + [INSTRUMENTED_RETURN_VALUE] = "INSTRUMENTED_RETURN_VALUE", + [INSTRUMENTED_RETURN_CONST] = "INSTRUMENTED_RETURN_CONST", + [INSTRUMENTED_YIELD_VALUE] = "INSTRUMENTED_YIELD_VALUE", + [INSTRUMENTED_LOAD_SUPER_ATTR] = "INSTRUMENTED_LOAD_SUPER_ATTR", + [INSTRUMENTED_FOR_ITER] = "INSTRUMENTED_FOR_ITER", + [INSTRUMENTED_CALL] = "INSTRUMENTED_CALL", + [INSTRUMENTED_CALL_KW] = "INSTRUMENTED_CALL_KW", + [INSTRUMENTED_CALL_FUNCTION_EX] = "INSTRUMENTED_CALL_FUNCTION_EX", + [INSTRUMENTED_INSTRUCTION] = "INSTRUMENTED_INSTRUCTION", + [INSTRUMENTED_JUMP_FORWARD] = "INSTRUMENTED_JUMP_FORWARD", + [INSTRUMENTED_JUMP_BACKWARD] = "INSTRUMENTED_JUMP_BACKWARD", + [INSTRUMENTED_POP_JUMP_IF_TRUE] = "INSTRUMENTED_POP_JUMP_IF_TRUE", + [INSTRUMENTED_POP_JUMP_IF_FALSE] = "INSTRUMENTED_POP_JUMP_IF_FALSE", + [INSTRUMENTED_POP_JUMP_IF_NONE] = "INSTRUMENTED_POP_JUMP_IF_NONE", + [INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = "INSTRUMENTED_POP_JUMP_IF_NOT_NONE", + [INSTRUMENTED_LINE] = "INSTRUMENTED_LINE", + [JUMP] = "JUMP", + [JUMP_NO_INTERRUPT] = "JUMP_NO_INTERRUPT", + [LOAD_CLOSURE] = "LOAD_CLOSURE", + [LOAD_METHOD] = "LOAD_METHOD", + [LOAD_SUPER_METHOD] = "LOAD_SUPER_METHOD", + [LOAD_ZERO_SUPER_ATTR] = "LOAD_ZERO_SUPER_ATTR", + [LOAD_ZERO_SUPER_METHOD] = "LOAD_ZERO_SUPER_METHOD", + [POP_BLOCK] = "POP_BLOCK", + [SETUP_CLEANUP] = "SETUP_CLEANUP", + [SETUP_FINALLY] = "SETUP_FINALLY", + [SETUP_WITH] = "SETUP_WITH", + [STORE_FAST_MAYBE_NULL] = "STORE_FAST_MAYBE_NULL", +}; +#endif // NEED_OPCODE_METADATA + +extern const uint8_t _PyOpcode_Caches[256]; +#ifdef NEED_OPCODE_METADATA +const uint8_t _PyOpcode_Caches[256] = { + [TO_BOOL] = 3, + [BINARY_OP_INPLACE_ADD_UNICODE] = 1, + [BINARY_SUBSCR] = 1, + [STORE_SUBSCR] = 1, + [SEND] = 1, + [UNPACK_SEQUENCE] = 1, + [STORE_ATTR] = 4, + [LOAD_GLOBAL] = 4, + [LOAD_SUPER_ATTR] = 1, + [LOAD_ATTR] = 9, + [COMPARE_OP] = 1, + [JUMP_BACKWARD] = 1, + [POP_JUMP_IF_TRUE] = 1, + [POP_JUMP_IF_FALSE] = 1, + [POP_JUMP_IF_NONE] = 1, + [POP_JUMP_IF_NOT_NONE] = 1, + [FOR_ITER] = 1, + [CALL] = 3, + [BINARY_OP] = 1, +}; +#endif // NEED_OPCODE_METADATA + +extern const uint8_t _PyOpcode_Deopt[256]; +#ifdef NEED_OPCODE_METADATA +const uint8_t _PyOpcode_Deopt[256] = { + [BEFORE_ASYNC_WITH] = BEFORE_ASYNC_WITH, + [BEFORE_WITH] = BEFORE_WITH, + [BINARY_OP] = BINARY_OP, + [BINARY_OP_ADD_FLOAT] = BINARY_OP, + [BINARY_OP_ADD_INT] = BINARY_OP, + [BINARY_OP_ADD_UNICODE] = BINARY_OP, + [BINARY_OP_INPLACE_ADD_UNICODE] = BINARY_OP, + [BINARY_OP_MULTIPLY_FLOAT] = BINARY_OP, + [BINARY_OP_MULTIPLY_INT] = BINARY_OP, + [BINARY_OP_SUBTRACT_FLOAT] = BINARY_OP, + [BINARY_OP_SUBTRACT_INT] = BINARY_OP, + [BINARY_SLICE] = BINARY_SLICE, + [BINARY_SUBSCR] = BINARY_SUBSCR, + [BINARY_SUBSCR_DICT] = BINARY_SUBSCR, + [BINARY_SUBSCR_GETITEM] = BINARY_SUBSCR, + [BINARY_SUBSCR_LIST_INT] = BINARY_SUBSCR, + [BINARY_SUBSCR_STR_INT] = BINARY_SUBSCR, + [BINARY_SUBSCR_TUPLE_INT] = BINARY_SUBSCR, + [BUILD_CONST_KEY_MAP] = BUILD_CONST_KEY_MAP, + [BUILD_LIST] = BUILD_LIST, + [BUILD_MAP] = BUILD_MAP, + [BUILD_SET] = BUILD_SET, + [BUILD_SLICE] = BUILD_SLICE, + [BUILD_STRING] = BUILD_STRING, + [BUILD_TUPLE] = BUILD_TUPLE, + [CACHE] = CACHE, + [CALL] = CALL, + [CALL_ALLOC_AND_ENTER_INIT] = CALL, + [CALL_BOUND_METHOD_EXACT_ARGS] = CALL, + [CALL_BUILTIN_CLASS] = CALL, + [CALL_BUILTIN_FAST] = CALL, + [CALL_BUILTIN_FAST_WITH_KEYWORDS] = CALL, + [CALL_BUILTIN_O] = CALL, + [CALL_FUNCTION_EX] = CALL_FUNCTION_EX, + [CALL_INTRINSIC_1] = CALL_INTRINSIC_1, + [CALL_INTRINSIC_2] = CALL_INTRINSIC_2, + [CALL_ISINSTANCE] = CALL, + [CALL_KW] = CALL_KW, + [CALL_LEN] = CALL, + [CALL_LIST_APPEND] = CALL, + [CALL_METHOD_DESCRIPTOR_FAST] = CALL, + [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = CALL, + [CALL_METHOD_DESCRIPTOR_NOARGS] = CALL, + [CALL_METHOD_DESCRIPTOR_O] = CALL, + [CALL_PY_EXACT_ARGS] = CALL, + [CALL_PY_WITH_DEFAULTS] = CALL, + [CALL_STR_1] = CALL, + [CALL_TUPLE_1] = CALL, + [CALL_TYPE_1] = CALL, + [CHECK_EG_MATCH] = CHECK_EG_MATCH, + [CHECK_EXC_MATCH] = CHECK_EXC_MATCH, + [CLEANUP_THROW] = CLEANUP_THROW, + [COMPARE_OP] = COMPARE_OP, + [COMPARE_OP_FLOAT] = COMPARE_OP, + [COMPARE_OP_INT] = COMPARE_OP, + [COMPARE_OP_STR] = COMPARE_OP, + [CONTAINS_OP] = CONTAINS_OP, + [CONVERT_VALUE] = CONVERT_VALUE, + [COPY] = COPY, + [COPY_FREE_VARS] = COPY_FREE_VARS, + [DELETE_ATTR] = DELETE_ATTR, + [DELETE_DEREF] = DELETE_DEREF, + [DELETE_FAST] = DELETE_FAST, + [DELETE_GLOBAL] = DELETE_GLOBAL, + [DELETE_NAME] = DELETE_NAME, + [DELETE_SUBSCR] = DELETE_SUBSCR, + [DICT_MERGE] = DICT_MERGE, + [DICT_UPDATE] = DICT_UPDATE, + [END_ASYNC_FOR] = END_ASYNC_FOR, + [END_FOR] = END_FOR, + [END_SEND] = END_SEND, + [ENTER_EXECUTOR] = ENTER_EXECUTOR, + [EXIT_INIT_CHECK] = EXIT_INIT_CHECK, + [EXTENDED_ARG] = EXTENDED_ARG, + [FORMAT_SIMPLE] = FORMAT_SIMPLE, + [FORMAT_WITH_SPEC] = FORMAT_WITH_SPEC, + [FOR_ITER] = FOR_ITER, + [FOR_ITER_GEN] = FOR_ITER, + [FOR_ITER_LIST] = FOR_ITER, + [FOR_ITER_RANGE] = FOR_ITER, + [FOR_ITER_TUPLE] = FOR_ITER, + [GET_AITER] = GET_AITER, + [GET_ANEXT] = GET_ANEXT, + [GET_AWAITABLE] = GET_AWAITABLE, + [GET_ITER] = GET_ITER, + [GET_LEN] = GET_LEN, + [GET_YIELD_FROM_ITER] = GET_YIELD_FROM_ITER, + [IMPORT_FROM] = IMPORT_FROM, + [IMPORT_NAME] = IMPORT_NAME, + [INSTRUMENTED_CALL] = INSTRUMENTED_CALL, + [INSTRUMENTED_CALL_FUNCTION_EX] = INSTRUMENTED_CALL_FUNCTION_EX, + [INSTRUMENTED_CALL_KW] = INSTRUMENTED_CALL_KW, + [INSTRUMENTED_END_FOR] = INSTRUMENTED_END_FOR, + [INSTRUMENTED_END_SEND] = INSTRUMENTED_END_SEND, + [INSTRUMENTED_FOR_ITER] = INSTRUMENTED_FOR_ITER, + [INSTRUMENTED_INSTRUCTION] = INSTRUMENTED_INSTRUCTION, + [INSTRUMENTED_JUMP_BACKWARD] = INSTRUMENTED_JUMP_BACKWARD, + [INSTRUMENTED_JUMP_FORWARD] = INSTRUMENTED_JUMP_FORWARD, + [INSTRUMENTED_LINE] = INSTRUMENTED_LINE, + [INSTRUMENTED_LOAD_SUPER_ATTR] = INSTRUMENTED_LOAD_SUPER_ATTR, + [INSTRUMENTED_POP_JUMP_IF_FALSE] = INSTRUMENTED_POP_JUMP_IF_FALSE, + [INSTRUMENTED_POP_JUMP_IF_NONE] = INSTRUMENTED_POP_JUMP_IF_NONE, + [INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = INSTRUMENTED_POP_JUMP_IF_NOT_NONE, + [INSTRUMENTED_POP_JUMP_IF_TRUE] = INSTRUMENTED_POP_JUMP_IF_TRUE, + [INSTRUMENTED_RESUME] = INSTRUMENTED_RESUME, + [INSTRUMENTED_RETURN_CONST] = INSTRUMENTED_RETURN_CONST, + [INSTRUMENTED_RETURN_VALUE] = INSTRUMENTED_RETURN_VALUE, + [INSTRUMENTED_YIELD_VALUE] = INSTRUMENTED_YIELD_VALUE, + [INTERPRETER_EXIT] = INTERPRETER_EXIT, + [IS_OP] = IS_OP, + [JUMP_BACKWARD] = JUMP_BACKWARD, + [JUMP_BACKWARD_NO_INTERRUPT] = JUMP_BACKWARD_NO_INTERRUPT, + [JUMP_FORWARD] = JUMP_FORWARD, + [LIST_APPEND] = LIST_APPEND, + [LIST_EXTEND] = LIST_EXTEND, + [LOAD_ASSERTION_ERROR] = LOAD_ASSERTION_ERROR, + [LOAD_ATTR] = LOAD_ATTR, + [LOAD_ATTR_CLASS] = LOAD_ATTR, + [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = LOAD_ATTR, + [LOAD_ATTR_INSTANCE_VALUE] = LOAD_ATTR, + [LOAD_ATTR_METHOD_LAZY_DICT] = LOAD_ATTR, + [LOAD_ATTR_METHOD_NO_DICT] = LOAD_ATTR, + [LOAD_ATTR_METHOD_WITH_VALUES] = LOAD_ATTR, + [LOAD_ATTR_MODULE] = LOAD_ATTR, + [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = LOAD_ATTR, + [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = LOAD_ATTR, + [LOAD_ATTR_PROPERTY] = LOAD_ATTR, + [LOAD_ATTR_SLOT] = LOAD_ATTR, + [LOAD_ATTR_WITH_HINT] = LOAD_ATTR, + [LOAD_BUILD_CLASS] = LOAD_BUILD_CLASS, + [LOAD_CONST] = LOAD_CONST, + [LOAD_DEREF] = LOAD_DEREF, + [LOAD_FAST] = LOAD_FAST, + [LOAD_FAST_AND_CLEAR] = LOAD_FAST_AND_CLEAR, + [LOAD_FAST_CHECK] = LOAD_FAST_CHECK, + [LOAD_FAST_LOAD_FAST] = LOAD_FAST_LOAD_FAST, + [LOAD_FROM_DICT_OR_DEREF] = LOAD_FROM_DICT_OR_DEREF, + [LOAD_FROM_DICT_OR_GLOBALS] = LOAD_FROM_DICT_OR_GLOBALS, + [LOAD_GLOBAL] = LOAD_GLOBAL, + [LOAD_GLOBAL_BUILTIN] = LOAD_GLOBAL, + [LOAD_GLOBAL_MODULE] = LOAD_GLOBAL, + [LOAD_LOCALS] = LOAD_LOCALS, + [LOAD_NAME] = LOAD_NAME, + [LOAD_SUPER_ATTR] = LOAD_SUPER_ATTR, + [LOAD_SUPER_ATTR_ATTR] = LOAD_SUPER_ATTR, + [LOAD_SUPER_ATTR_METHOD] = LOAD_SUPER_ATTR, + [MAKE_CELL] = MAKE_CELL, + [MAKE_FUNCTION] = MAKE_FUNCTION, + [MAP_ADD] = MAP_ADD, + [MATCH_CLASS] = MATCH_CLASS, + [MATCH_KEYS] = MATCH_KEYS, + [MATCH_MAPPING] = MATCH_MAPPING, + [MATCH_SEQUENCE] = MATCH_SEQUENCE, + [NOP] = NOP, + [POP_EXCEPT] = POP_EXCEPT, + [POP_JUMP_IF_FALSE] = POP_JUMP_IF_FALSE, + [POP_JUMP_IF_NONE] = POP_JUMP_IF_NONE, + [POP_JUMP_IF_NOT_NONE] = POP_JUMP_IF_NOT_NONE, + [POP_JUMP_IF_TRUE] = POP_JUMP_IF_TRUE, + [POP_TOP] = POP_TOP, + [PUSH_EXC_INFO] = PUSH_EXC_INFO, + [PUSH_NULL] = PUSH_NULL, + [RAISE_VARARGS] = RAISE_VARARGS, + [RERAISE] = RERAISE, + [RESERVED] = RESERVED, + [RESUME] = RESUME, + [RESUME_CHECK] = RESUME, + [RETURN_CONST] = RETURN_CONST, + [RETURN_GENERATOR] = RETURN_GENERATOR, + [RETURN_VALUE] = RETURN_VALUE, + [SEND] = SEND, + [SEND_GEN] = SEND, + [SETUP_ANNOTATIONS] = SETUP_ANNOTATIONS, + [SET_ADD] = SET_ADD, + [SET_FUNCTION_ATTRIBUTE] = SET_FUNCTION_ATTRIBUTE, + [SET_UPDATE] = SET_UPDATE, + [STORE_ATTR] = STORE_ATTR, + [STORE_ATTR_INSTANCE_VALUE] = STORE_ATTR, + [STORE_ATTR_SLOT] = STORE_ATTR, + [STORE_ATTR_WITH_HINT] = STORE_ATTR, + [STORE_DEREF] = STORE_DEREF, + [STORE_FAST] = STORE_FAST, + [STORE_FAST_LOAD_FAST] = STORE_FAST_LOAD_FAST, + [STORE_FAST_STORE_FAST] = STORE_FAST_STORE_FAST, + [STORE_GLOBAL] = STORE_GLOBAL, + [STORE_NAME] = STORE_NAME, + [STORE_SLICE] = STORE_SLICE, + [STORE_SUBSCR] = STORE_SUBSCR, + [STORE_SUBSCR_DICT] = STORE_SUBSCR, + [STORE_SUBSCR_LIST_INT] = STORE_SUBSCR, + [SWAP] = SWAP, + [TO_BOOL] = TO_BOOL, + [TO_BOOL_ALWAYS_TRUE] = TO_BOOL, + [TO_BOOL_BOOL] = TO_BOOL, + [TO_BOOL_INT] = TO_BOOL, + [TO_BOOL_LIST] = TO_BOOL, + [TO_BOOL_NONE] = TO_BOOL, + [TO_BOOL_STR] = TO_BOOL, + [UNARY_INVERT] = UNARY_INVERT, + [UNARY_NEGATIVE] = UNARY_NEGATIVE, + [UNARY_NOT] = UNARY_NOT, + [UNPACK_EX] = UNPACK_EX, + [UNPACK_SEQUENCE] = UNPACK_SEQUENCE, + [UNPACK_SEQUENCE_LIST] = UNPACK_SEQUENCE, + [UNPACK_SEQUENCE_TUPLE] = UNPACK_SEQUENCE, + [UNPACK_SEQUENCE_TWO_TUPLE] = UNPACK_SEQUENCE, + [WITH_EXCEPT_START] = WITH_EXCEPT_START, + [YIELD_VALUE] = YIELD_VALUE, +}; +#endif // NEED_OPCODE_METADATA + +#define EXTRA_CASES \ + case 119: \ + case 120: \ + case 121: \ + case 122: \ + case 123: \ + case 124: \ + case 125: \ + case 126: \ + case 127: \ + case 128: \ + case 129: \ + case 130: \ + case 131: \ + case 132: \ + case 133: \ + case 134: \ + case 135: \ + case 136: \ + case 137: \ + case 138: \ + case 139: \ + case 140: \ + case 141: \ + case 142: \ + case 143: \ + case 144: \ + case 145: \ + case 146: \ + case 147: \ + case 148: \ + case 219: \ + case 220: \ + case 221: \ + case 222: \ + case 223: \ + case 224: \ + case 225: \ + case 226: \ + case 227: \ + case 228: \ + case 229: \ + case 230: \ + case 231: \ + case 232: \ + case 233: \ + case 234: \ + case 235: \ + case 255: \ + ; + diff --git a/Include/internal/pycore_opcode_utils.h b/Include/internal/pycore_opcode_utils.h new file mode 100644 index 000000000000000..208bfb2f75308bc --- /dev/null +++ b/Include/internal/pycore_opcode_utils.h @@ -0,0 +1,73 @@ +#ifndef Py_INTERNAL_OPCODE_UTILS_H +#define Py_INTERNAL_OPCODE_UTILS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "opcode_ids.h" + +#define MAX_REAL_OPCODE 254 + +#define IS_WITHIN_OPCODE_RANGE(opcode) \ + (((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \ + IS_PSEUDO_INSTR(opcode)) + +#define IS_BLOCK_PUSH_OPCODE(opcode) \ + ((opcode) == SETUP_FINALLY || \ + (opcode) == SETUP_WITH || \ + (opcode) == SETUP_CLEANUP) + +#define HAS_TARGET(opcode) \ + (OPCODE_HAS_JUMP(opcode) || IS_BLOCK_PUSH_OPCODE(opcode)) + +/* opcodes that must be last in the basicblock */ +#define IS_TERMINATOR_OPCODE(opcode) \ + (OPCODE_HAS_JUMP(opcode) || IS_SCOPE_EXIT_OPCODE(opcode)) + +/* opcodes which are not emitted in codegen stage, only by the assembler */ +#define IS_ASSEMBLER_OPCODE(opcode) \ + ((opcode) == JUMP_FORWARD || \ + (opcode) == JUMP_BACKWARD || \ + (opcode) == JUMP_BACKWARD_NO_INTERRUPT) + +#define IS_BACKWARDS_JUMP_OPCODE(opcode) \ + ((opcode) == JUMP_BACKWARD || \ + (opcode) == JUMP_BACKWARD_NO_INTERRUPT) + +#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \ + ((opcode) == JUMP || \ + (opcode) == JUMP_NO_INTERRUPT || \ + (opcode) == JUMP_FORWARD || \ + (opcode) == JUMP_BACKWARD || \ + (opcode) == JUMP_BACKWARD_NO_INTERRUPT) + +#define IS_SCOPE_EXIT_OPCODE(opcode) \ + ((opcode) == RETURN_VALUE || \ + (opcode) == RETURN_CONST || \ + (opcode) == RAISE_VARARGS || \ + (opcode) == RERAISE) + + +/* Flags used in the oparg for MAKE_FUNCTION */ +#define MAKE_FUNCTION_DEFAULTS 0x01 +#define MAKE_FUNCTION_KWDEFAULTS 0x02 +#define MAKE_FUNCTION_ANNOTATIONS 0x04 +#define MAKE_FUNCTION_CLOSURE 0x08 + +/* Values used in the oparg for RESUME */ +#define RESUME_AT_FUNC_START 0 +#define RESUME_AFTER_YIELD 1 +#define RESUME_AFTER_YIELD_FROM 2 +#define RESUME_AFTER_AWAIT 3 + +#define RESUME_OPARG_LOCATION_MASK 0x3 +#define RESUME_OPARG_DEPTH1_MASK 0x4 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_OPCODE_UTILS_H */ diff --git a/Include/internal/pycore_optimizer.h b/Include/internal/pycore_optimizer.h new file mode 100644 index 000000000000000..b052460b44b7913 --- /dev/null +++ b/Include/internal/pycore_optimizer.h @@ -0,0 +1,25 @@ +#ifndef Py_INTERNAL_OPTIMIZER_H +#define Py_INTERNAL_OPTIMIZER_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_uops.h" // _PyUOpInstruction + +int _Py_uop_analyze_and_optimize(PyCodeObject *code, + _PyUOpInstruction *trace, int trace_len, int curr_stackentries); + +extern PyTypeObject _PyCounterExecutor_Type; +extern PyTypeObject _PyCounterOptimizer_Type; +extern PyTypeObject _PyDefaultOptimizer_Type; +extern PyTypeObject _PyUOpExecutor_Type; +extern PyTypeObject _PyUOpOptimizer_Type; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_OPTIMIZER_H */ diff --git a/Include/internal/pycore_parking_lot.h b/Include/internal/pycore_parking_lot.h new file mode 100644 index 000000000000000..f444da730055e84 --- /dev/null +++ b/Include/internal/pycore_parking_lot.h @@ -0,0 +1,99 @@ +// ParkingLot is an internal API for building efficient synchronization +// primitives like mutexes and events. +// +// The API and name is inspired by WebKit's WTF::ParkingLot, which in turn +// is inspired Linux's futex API. +// See https://webkit.org/blog/6161/locking-in-webkit/. +// +// The core functionality is an atomic "compare-and-sleep" operation along with +// an atomic "wake-up" operation. + +#ifndef Py_INTERNAL_PARKING_LOT_H +#define Py_INTERNAL_PARKING_LOT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_time.h" // _PyTime_t + + +enum { + // The thread was unparked by another thread. + Py_PARK_OK = 0, + + // The value of `address` did not match `expected`. + Py_PARK_AGAIN = -1, + + // The thread was unparked due to a timeout. + Py_PARK_TIMEOUT = -2, + + // The thread was interrupted by a signal. + Py_PARK_INTR = -3, +}; + +// Checks that `*address == *expected` and puts the thread to sleep until an +// unpark operation is called on the same `address`. Otherwise, the function +// returns `Py_PARK_AGAIN`. The comparison behaves like memcmp, but is +// performed atomically with respect to unpark operations. +// +// The `address_size` argument is the size of the data pointed to by the +// `address` and `expected` pointers (i.e., sizeof(*address)). It must be +// 1, 2, 4, or 8. +// +// The `timeout_ns` argument specifies the maximum amount of time to wait, with +// -1 indicating an infinite wait. +// +// `park_arg`, which can be NULL, is passed to the unpark operation. +// +// If `detach` is true, then the thread will detach/release the GIL while +// waiting. +// +// Example usage: +// +// if (_Py_atomic_compare_exchange_uint8(address, &expected, new_value)) { +// int res = _PyParkingLot_Park(address, &new_value, sizeof(*address), +// timeout_ns, NULL, 1); +// ... +// } +PyAPI_FUNC(int) +_PyParkingLot_Park(const void *address, const void *expected, + size_t address_size, _PyTime_t timeout_ns, + void *park_arg, int detach); + +// Callback for _PyParkingLot_Unpark: +// +// `arg` is the data of the same name provided to the _PyParkingLot_Unpark() +// call. +// `park_arg` is the data provided to _PyParkingLot_Park() call or NULL if +// no waiting thread was found. +// `has_more_waiters` is true if there are more threads waiting on the same +// address. May be true in cases where threads are waiting on a different +// address that map to the same internal bucket. +typedef void _Py_unpark_fn_t(void *arg, void *park_arg, int has_more_waiters); + +// Unparks a single thread waiting on `address`. +// +// Note that fn() is called regardless of whether a thread was unparked. If +// no threads are waiting on `address` then the `park_arg` argument to fn() +// will be NULL. +// +// Example usage: +// void callback(void *arg, void *park_arg, int has_more_waiters); +// _PyParkingLot_Unpark(address, &callback, arg); +PyAPI_FUNC(void) +_PyParkingLot_Unpark(const void *address, _Py_unpark_fn_t *fn, void *arg); + +// Unparks all threads waiting on `address`. +PyAPI_FUNC(void) _PyParkingLot_UnparkAll(const void *address); + +// Resets the parking lot state after a fork. Forgets all parked threads. +PyAPI_FUNC(void) _PyParkingLot_AfterFork(void); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_PARKING_LOT_H */ diff --git a/Include/internal/pycore_parser.h b/Include/internal/pycore_parser.h index e2de24e2ca97341..067b34c12c4e7f8 100644 --- a/Include/internal/pycore_parser.h +++ b/Include/internal/pycore_parser.h @@ -8,12 +8,46 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif + +#include "pycore_ast.h" // struct _expr +#include "pycore_global_strings.h" // _Py_DECLARE_STR() +#include "pycore_pyarena.h" // PyArena + + +#ifdef Py_DEBUG +#define _PYPEGEN_NSTATISTICS 2000 +#endif + +struct _parser_runtime_state { +#ifdef Py_DEBUG + long memo_statistics[_PYPEGEN_NSTATISTICS]; +#else + int _not_used; +#endif + struct _expr dummy_name; +}; + +_Py_DECLARE_STR(empty, "") +#define _parser_runtime_state_INIT \ + { \ + .dummy_name = { \ + .kind = Name_kind, \ + .v.Name.id = &_Py_STR(empty), \ + .v.Name.ctx = Load, \ + .lineno = 1, \ + .col_offset = 0, \ + .end_lineno = 1, \ + .end_col_offset = 0, \ + }, \ + } + extern struct _mod* _PyParser_ASTFromString( const char *str, PyObject* filename, int mode, PyCompilerFlags *flags, PyArena *arena); + extern struct _mod* _PyParser_ASTFromFile( FILE *fp, PyObject *filename_ob, @@ -24,6 +58,17 @@ extern struct _mod* _PyParser_ASTFromFile( PyCompilerFlags *flags, int *errcode, PyArena *arena); +extern struct _mod* _PyParser_InteractiveASTFromFile( + FILE *fp, + PyObject *filename_ob, + const char *enc, + int mode, + const char *ps1, + const char *ps2, + PyCompilerFlags *flags, + int *errcode, + PyObject **interactive_src, + PyArena *arena); #ifdef __cplusplus } diff --git a/Include/internal/pycore_pathconfig.h b/Include/internal/pycore_pathconfig.h index b8deaa0c3eb067b..a1ce1b19a002832 100644 --- a/Include/internal/pycore_pathconfig.h +++ b/Include/internal/pycore_pathconfig.h @@ -8,7 +8,9 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void); + extern PyStatus _PyPathConfig_ReadGlobal(PyConfig *config); extern PyStatus _PyPathConfig_UpdateGlobal(const PyConfig *config); extern const wchar_t * _PyPathConfig_GetGlobalModuleSearchPath(void); diff --git a/Include/internal/pycore_pyarena.h b/Include/internal/pycore_pyarena.h index d78972a88ca238d..1f07479fb2ca27f 100644 --- a/Include/internal/pycore_pyarena.h +++ b/Include/internal/pycore_pyarena.h @@ -1,5 +1,4 @@ -/* An arena-like memory interface for the compiler. - */ +// An arena-like memory interface for the compiler. #ifndef Py_INTERNAL_PYARENA_H #define Py_INTERNAL_PYARENA_H @@ -13,49 +12,54 @@ extern "C" { typedef struct _arena PyArena; -/* _PyArena_New() and _PyArena_Free() create a new arena and free it, - respectively. Once an arena has been created, it can be used - to allocate memory via _PyArena_Malloc(). Pointers to PyObject can - also be registered with the arena via _PyArena_AddPyObject(), and the - arena will ensure that the PyObjects stay alive at least until - _PyArena_Free() is called. When an arena is freed, all the memory it - allocated is freed, the arena releases internal references to registered - PyObject*, and none of its pointers are valid. - XXX (tim) What does "none of its pointers are valid" mean? Does it - XXX mean that pointers previously obtained via _PyArena_Malloc() are - XXX no longer valid? (That's clearly true, but not sure that's what - XXX the text is trying to say.) - - _PyArena_New() returns an arena pointer. On error, it - returns a negative number and sets an exception. - XXX (tim): Not true. On error, _PyArena_New() actually returns NULL, - XXX and looks like it may or may not set an exception (e.g., if the - XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on - XXX and an exception is set; OTOH, if the internal - XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but - XXX an exception is not set in that case). -*/ +// _PyArena_New() and _PyArena_Free() create a new arena and free it, +// respectively. Once an arena has been created, it can be used +// to allocate memory via _PyArena_Malloc(). Pointers to PyObject can +// also be registered with the arena via _PyArena_AddPyObject(), and the +// arena will ensure that the PyObjects stay alive at least until +// _PyArena_Free() is called. When an arena is freed, all the memory it +// allocated is freed, the arena releases internal references to registered +// PyObject*, and none of its pointers are valid. +// XXX (tim) What does "none of its pointers are valid" mean? Does it +// XXX mean that pointers previously obtained via _PyArena_Malloc() are +// XXX no longer valid? (That's clearly true, but not sure that's what +// XXX the text is trying to say.) +// +// _PyArena_New() returns an arena pointer. On error, it +// returns a negative number and sets an exception. +// XXX (tim): Not true. On error, _PyArena_New() actually returns NULL, +// XXX and looks like it may or may not set an exception (e.g., if the +// XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on +// XXX and an exception is set; OTOH, if the internal +// XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but +// XXX an exception is not set in that case). +// +// Export for test_peg_generator PyAPI_FUNC(PyArena*) _PyArena_New(void); + +// Export for test_peg_generator PyAPI_FUNC(void) _PyArena_Free(PyArena *); -/* Mostly like malloc(), return the address of a block of memory spanning - * `size` bytes, or return NULL (without setting an exception) if enough - * new memory can't be obtained. Unlike malloc(0), _PyArena_Malloc() with - * size=0 does not guarantee to return a unique pointer (the pointer - * returned may equal one or more other pointers obtained from - * _PyArena_Malloc()). - * Note that pointers obtained via _PyArena_Malloc() must never be passed to - * the system free() or realloc(), or to any of Python's similar memory- - * management functions. _PyArena_Malloc()-obtained pointers remain valid - * until _PyArena_Free(ar) is called, at which point all pointers obtained - * from the arena `ar` become invalid simultaneously. - */ +// Mostly like malloc(), return the address of a block of memory spanning +// `size` bytes, or return NULL (without setting an exception) if enough +// new memory can't be obtained. Unlike malloc(0), _PyArena_Malloc() with +// size=0 does not guarantee to return a unique pointer (the pointer +// returned may equal one or more other pointers obtained from +// _PyArena_Malloc()). +// Note that pointers obtained via _PyArena_Malloc() must never be passed to +// the system free() or realloc(), or to any of Python's similar memory- +// management functions. _PyArena_Malloc()-obtained pointers remain valid +// until _PyArena_Free(ar) is called, at which point all pointers obtained +// from the arena `ar` become invalid simultaneously. +// +// Export for test_peg_generator PyAPI_FUNC(void*) _PyArena_Malloc(PyArena *, size_t size); -/* This routine isn't a proper arena allocation routine. It takes - * a PyObject* and records it so that it can be DECREFed when the - * arena is freed. - */ +// This routine isn't a proper arena allocation routine. It takes +// a PyObject* and records it so that it can be DECREFed when the +// arena is freed. +// +// Export for test_peg_generator PyAPI_FUNC(int) _PyArena_AddPyObject(PyArena *, PyObject *); #ifdef __cplusplus diff --git a/Include/internal/pycore_pybuffer.h b/Include/internal/pycore_pybuffer.h new file mode 100644 index 000000000000000..3cbc290b2ea3ee5 --- /dev/null +++ b/Include/internal/pycore_pybuffer.h @@ -0,0 +1,21 @@ +#ifndef Py_INTERNAL_PYBUFFER_H +#define Py_INTERNAL_PYBUFFER_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +// Exported for the _xxinterpchannels module. +PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreter( + PyInterpreterState *interp, Py_buffer *view); +PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreterAndRawFree( + PyInterpreterState *interp, Py_buffer *view); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_PYBUFFER_H */ diff --git a/Include/internal/pycore_pyerrors.h b/Include/internal/pycore_pyerrors.h index 66f37942ef916af..0f16fb894d17e12 100644 --- a/Include/internal/pycore_pyerrors.h +++ b/Include/internal/pycore_pyerrors.h @@ -9,6 +9,59 @@ extern "C" { #endif +/* Error handling definitions */ + +extern _PyErr_StackItem* _PyErr_GetTopmostException(PyThreadState *tstate); +extern PyObject* _PyErr_GetHandledException(PyThreadState *); +extern void _PyErr_SetHandledException(PyThreadState *, PyObject *); +extern void _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, PyObject **); + +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *); + + +// Like PyErr_Format(), but saves current exception as __context__ and +// __cause__. +// Export for '_sqlite3' shared extension. +PyAPI_FUNC(PyObject*) _PyErr_FormatFromCause( + PyObject *exception, + const char *format, /* ASCII-encoded string */ + ... + ); + +extern int _PyException_AddNote( + PyObject *exc, + PyObject *note); + +extern int _PyErr_CheckSignals(void); + +/* Support for adding program text to SyntaxErrors */ + +// Export for test_peg_generator +PyAPI_FUNC(PyObject*) _PyErr_ProgramDecodedTextObject( + PyObject *filename, + int lineno, + const char* encoding); + +extern PyObject* _PyUnicodeTranslateError_Create( + PyObject *object, + Py_ssize_t start, + Py_ssize_t end, + const char *reason /* UTF-8 encoded string */ + ); + +extern void _Py_NO_RETURN _Py_FatalErrorFormat( + const char *func, + const char *format, + ...); + +extern PyObject* _PyErr_SetImportErrorWithNameFrom( + PyObject *, + PyObject *, + PyObject *, + PyObject *); + + /* runtime lifecycle */ extern PyStatus _PyErr_InitTypes(PyInterpreterState *); @@ -20,7 +73,10 @@ extern void _PyErr_FiniTypes(PyInterpreterState *); static inline PyObject* _PyErr_Occurred(PyThreadState *tstate) { assert(tstate != NULL); - return tstate->curexc_type; + if (tstate->current_exception == NULL) { + return NULL; + } + return (PyObject *)Py_TYPE(tstate->current_exception); } static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state) @@ -28,78 +84,92 @@ static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state) Py_CLEAR(exc_state->exc_value); } -PyAPI_FUNC(PyObject*) _PyErr_StackItemToExcInfoTuple( +extern PyObject* _PyErr_StackItemToExcInfoTuple( _PyErr_StackItem *err_info); -PyAPI_FUNC(void) _PyErr_Fetch( +extern void _PyErr_Fetch( PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **traceback); -PyAPI_FUNC(int) _PyErr_ExceptionMatches( +extern PyObject* _PyErr_GetRaisedException(PyThreadState *tstate); + +extern int _PyErr_ExceptionMatches( PyThreadState *tstate, PyObject *exc); -PyAPI_FUNC(void) _PyErr_Restore( +extern void _PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc); + +extern void _PyErr_Restore( PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *traceback); -PyAPI_FUNC(void) _PyErr_SetObject( +extern void _PyErr_SetObject( PyThreadState *tstate, PyObject *type, PyObject *value); -PyAPI_FUNC(void) _PyErr_ChainStackItem( - _PyErr_StackItem *exc_info); +extern void _PyErr_ChainStackItem(void); -PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate); +extern void _PyErr_Clear(PyThreadState *tstate); -PyAPI_FUNC(void) _PyErr_SetNone(PyThreadState *tstate, PyObject *exception); +extern void _PyErr_SetNone(PyThreadState *tstate, PyObject *exception); -PyAPI_FUNC(PyObject *) _PyErr_NoMemory(PyThreadState *tstate); +extern PyObject* _PyErr_NoMemory(PyThreadState *tstate); -PyAPI_FUNC(void) _PyErr_SetString( +extern void _PyErr_SetString( PyThreadState *tstate, PyObject *exception, const char *string); -PyAPI_FUNC(PyObject *) _PyErr_Format( +extern PyObject* _PyErr_Format( PyThreadState *tstate, PyObject *exception, const char *format, ...); -PyAPI_FUNC(void) _PyErr_NormalizeException( +extern void _PyErr_NormalizeException( PyThreadState *tstate, PyObject **exc, PyObject **val, PyObject **tb); -PyAPI_FUNC(PyObject *) _PyErr_FormatFromCauseTstate( +extern PyObject* _PyErr_FormatFromCauseTstate( PyThreadState *tstate, PyObject *exception, const char *format, ...); -PyAPI_FUNC(PyObject *) _PyExc_CreateExceptionGroup( +extern PyObject* _PyExc_CreateExceptionGroup( const char *msg, PyObject *excs); -PyAPI_FUNC(PyObject *) _PyExc_PrepReraiseStar( +extern PyObject* _PyExc_PrepReraiseStar( PyObject *orig, PyObject *excs); -PyAPI_FUNC(int) _PyErr_CheckSignalsTstate(PyThreadState *tstate); - -PyAPI_FUNC(void) _Py_DumpExtensionModules(int fd, PyInterpreterState *interp); +extern int _PyErr_CheckSignalsTstate(PyThreadState *tstate); +extern void _Py_DumpExtensionModules(int fd, PyInterpreterState *interp); +extern PyObject* _Py_CalculateSuggestions(PyObject *dir, PyObject *name); extern PyObject* _Py_Offer_Suggestions(PyObject* exception); + +// Export for '_testinternalcapi' shared extension PyAPI_FUNC(Py_ssize_t) _Py_UTF8_Edit_Cost(PyObject *str_a, PyObject *str_b, Py_ssize_t max_cost); +void _PyErr_FormatNote(const char *format, ...); + +/* Context manipulation (PEP 3134) */ + +Py_DEPRECATED(3.12) extern void _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *); + +// Export for '_zoneinfo' shared extension +PyAPI_FUNC(void) _PyErr_ChainExceptions1(PyObject *); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pyhash.h b/Include/internal/pycore_pyhash.h index a229f8d8b7f0a2a..c3b72d90de3a694 100644 --- a/Include/internal/pycore_pyhash.h +++ b/Include/internal/pycore_pyhash.h @@ -1,10 +1,95 @@ -#ifndef Py_INTERNAL_HASH_H -#define Py_INTERNAL_HASH_H +#ifndef Py_INTERNAL_PYHASH_H +#define Py_INTERNAL_PYHASH_H #ifndef Py_BUILD_CORE # error "this header requires Py_BUILD_CORE define" #endif -uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t); +// Similar to _Py_HashPointer(), but don't replace -1 with -2 +extern Py_hash_t _Py_HashPointerRaw(const void*); +// Export for '_datetime' shared extension +PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void*, Py_ssize_t); + +/* Hash secret + * + * memory layout on 64 bit systems + * cccccccc cccccccc cccccccc uc -- unsigned char[24] + * pppppppp ssssssss ........ fnv -- two Py_hash_t + * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t + * ........ ........ ssssssss djbx33a -- 16 bytes padding + one Py_hash_t + * ........ ........ eeeeeeee pyexpat XML hash salt + * + * memory layout on 32 bit systems + * cccccccc cccccccc cccccccc uc + * ppppssss ........ ........ fnv -- two Py_hash_t + * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t (*) + * ........ ........ ssss.... djbx33a -- 16 bytes padding + one Py_hash_t + * ........ ........ eeee.... pyexpat XML hash salt + * + * (*) The siphash member may not be available on 32 bit platforms without + * an unsigned int64 data type. + */ +typedef union { + /* ensure 24 bytes */ + unsigned char uc[24]; + /* two Py_hash_t for FNV */ + struct { + Py_hash_t prefix; + Py_hash_t suffix; + } fnv; + /* two uint64 for SipHash24 */ + struct { + uint64_t k0; + uint64_t k1; + } siphash; + /* a different (!) Py_hash_t for small string optimization */ + struct { + unsigned char padding[16]; + Py_hash_t suffix; + } djbx33a; + struct { + unsigned char padding[16]; + Py_hash_t hashsalt; + } expat; +} _Py_HashSecret_t; + +// Export for '_elementtree' shared extension +PyAPI_DATA(_Py_HashSecret_t) _Py_HashSecret; + +#ifdef Py_DEBUG +extern int _Py_HashSecret_Initialized; +#endif + + +struct pyhash_runtime_state { + struct { +#ifndef MS_WINDOWS + int fd; + dev_t st_dev; + ino_t st_ino; +#else + // This is a placeholder so the struct isn't empty on Windows. + int _not_used; #endif + } urandom_cache; +}; + +#ifndef MS_WINDOWS +# define _py_urandom_cache_INIT \ + { \ + .fd = -1, \ + } +#else +# define _py_urandom_cache_INIT {0} +#endif + +#define pyhash_state_INIT \ + { \ + .urandom_cache = _py_urandom_cache_INIT, \ + } + + +extern uint64_t _Py_KeyedHash(uint64_t key, const void *src, Py_ssize_t src_sz); + +#endif // !Py_INTERNAL_PYHASH_H diff --git a/Include/internal/pycore_pylifecycle.h b/Include/internal/pycore_pylifecycle.h index b4718b8ade2354a..61e0150e89009ce 100644 --- a/Include/internal/pycore_pylifecycle.h +++ b/Include/internal/pycore_pylifecycle.h @@ -14,10 +14,6 @@ extern "C" { struct _PyArgv; struct pyruntimestate; -/* True if the main interpreter thread exited due to an unhandled - * KeyboardInterrupt exception, suggesting the user pressed ^C. */ -PyAPI_DATA(int) _Py_UnhandledKeyboardInterrupt; - extern int _Py_SetFileSystemEncoding( const char *encoding, const char *errors); @@ -27,14 +23,12 @@ extern PyStatus _PyUnicode_InitEncodings(PyThreadState *tstate); extern int _PyUnicode_EnableLegacyWindowsFSEncoding(void); #endif -PyAPI_FUNC(void) _Py_ClearStandardStreamEncoding(void); - -PyAPI_FUNC(int) _Py_IsLocaleCoercionTarget(const char *ctype_loc); +extern int _Py_IsLocaleCoercionTarget(const char *ctype_loc); /* Various one-time initializers */ +extern void _Py_InitVersion(void); extern PyStatus _PyFaulthandler_Init(int enable); -extern int _PyTraceMalloc_Init(int enable); extern PyObject * _PyBuiltin_Init(PyInterpreterState *interp); extern PyStatus _PySys_Create( PyThreadState *tstate, @@ -42,11 +36,11 @@ extern PyStatus _PySys_Create( extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options); extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config); extern int _PySys_UpdateConfig(PyThreadState *tstate); -extern void _PySys_Fini(PyInterpreterState *interp); +extern void _PySys_FiniTypes(PyInterpreterState *interp); extern int _PyBuiltins_AddExceptions(PyObject * bltinmod); extern PyStatus _Py_HashRandomization_Init(const PyConfig *); -extern PyStatus _PyImportZip_Init(PyThreadState *tstate); +extern PyStatus _PyTime_Init(void); extern PyStatus _PyGC_Init(PyInterpreterState *interp); extern PyStatus _PyAtExit_Init(PyInterpreterState *interp); extern int _Py_Deepfreeze_Init(void); @@ -56,8 +50,6 @@ extern int _Py_Deepfreeze_Init(void); extern int _PySignal_Init(int install_signal_handlers); extern void _PySignal_Fini(void); -extern void _PyImport_Fini(void); -extern void _PyImport_Fini2(void); extern void _PyGC_Fini(PyInterpreterState *interp); extern void _Py_HashRandomization_Fini(void); extern void _PyFaulthandler_Fini(void); @@ -69,34 +61,62 @@ extern void _PyAtExit_Fini(PyInterpreterState *interp); extern void _PyThread_FiniType(PyInterpreterState *interp); extern void _Py_Deepfreeze_Fini(void); extern void _PyArg_Fini(void); +extern void _Py_FinalizeAllocatedBlocks(_PyRuntimeState *); -extern PyStatus _PyGILState_Init(_PyRuntimeState *runtime); +extern PyStatus _PyGILState_Init(PyInterpreterState *interp); extern PyStatus _PyGILState_SetTstate(PyThreadState *tstate); extern void _PyGILState_Fini(PyInterpreterState *interp); -PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyInterpreterState *interp); +extern void _PyGC_DumpShutdownStats(PyInterpreterState *interp); -PyAPI_FUNC(PyStatus) _Py_PreInitializeFromPyArgv( +extern PyStatus _Py_PreInitializeFromPyArgv( const PyPreConfig *src_config, const struct _PyArgv *args); -PyAPI_FUNC(PyStatus) _Py_PreInitializeFromConfig( +extern PyStatus _Py_PreInitializeFromConfig( const PyConfig *config, const struct _PyArgv *args); -PyAPI_FUNC(wchar_t *) _Py_GetStdlibDir(void); +extern wchar_t * _Py_GetStdlibDir(void); -PyAPI_FUNC(int) _Py_HandleSystemExit(int *exitcode_p); +extern int _Py_HandleSystemExit(int *exitcode_p); -PyAPI_FUNC(PyObject*) _PyErr_WriteUnraisableDefaultHook(PyObject *unraisable); +extern PyObject* _PyErr_WriteUnraisableDefaultHook(PyObject *unraisable); -PyAPI_FUNC(void) _PyErr_Print(PyThreadState *tstate); -PyAPI_FUNC(void) _PyErr_Display(PyObject *file, PyObject *exception, +extern void _PyErr_Print(PyThreadState *tstate); +extern void _PyErr_Display(PyObject *file, PyObject *exception, PyObject *value, PyObject *tb); +extern void _PyErr_DisplayException(PyObject *file, PyObject *exc); -PyAPI_FUNC(void) _PyThreadState_DeleteCurrent(PyThreadState *tstate); +extern void _PyThreadState_DeleteCurrent(PyThreadState *tstate); extern void _PyAtExit_Call(PyInterpreterState *interp); +extern int _Py_IsCoreInitialized(void); + +extern int _Py_FdIsInteractive(FILE *fp, PyObject *filename); + +extern const char* _Py_gitidentifier(void); +extern const char* _Py_gitversion(void); + +// Export for '_asyncio' shared extension +PyAPI_FUNC(int) _Py_IsInterpreterFinalizing(PyInterpreterState *interp); + +/* Random */ +extern int _PyOS_URandom(void *buffer, Py_ssize_t size); + +// Export for '_random' shared extension +PyAPI_FUNC(int) _PyOS_URandomNonblock(void *buffer, Py_ssize_t size); + +/* Legacy locale support */ +extern int _Py_CoerceLegacyLocale(int warn); +extern int _Py_LegacyLocaleDetected(int warn); + +// Export for 'readline' shared extension +PyAPI_FUNC(char*) _Py_SetLocaleFromEnv(int category); + +// Export for special main.c string compiling with source tracebacks +int _PyRun_SimpleStringFlagsWithName(const char *command, const char* name, PyCompilerFlags *flags); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymath.h b/Include/internal/pycore_pymath.h index 5f3afe4df6865c0..7a4e1c1eb714f73 100644 --- a/Include/internal/pycore_pymath.h +++ b/Include/internal/pycore_pymath.h @@ -56,21 +56,6 @@ static inline void _Py_ADJUST_ERANGE2(double x, double y) } } -// Return the maximum value of integral type *type*. -#define _Py_IntegralTypeMax(type) \ - (_Py_IS_TYPE_SIGNED(type) ? (((((type)1 << (sizeof(type)*CHAR_BIT - 2)) - 1) << 1) + 1) : ~(type)0) - -// Return the minimum value of integral type *type*. -#define _Py_IntegralTypeMin(type) \ - (_Py_IS_TYPE_SIGNED(type) ? -_Py_IntegralTypeMax(type) - 1 : 0) - -// Check whether *v* is in the range of integral type *type*. This is most -// useful if *v* is floating-point, since demoting a floating-point *v* to an -// integral type that cannot represent *v*'s integral part is undefined -// behavior. -#define _Py_InIntegralTypeRange(type, v) \ - (_Py_IntegralTypeMin(type) <= v && v <= _Py_IntegralTypeMax(type)) - //--- HAVE_PY_SET_53BIT_PRECISION macro ------------------------------------ // diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index b9eea9d4b30ad16..6b5113714dbeb28 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -8,13 +8,47 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pymem.h" // PyMemAllocatorName +// Try to get the allocators name set by _PyMem_SetupAllocators(). +// Return NULL if unknown. +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(const char*) _PyMem_GetCurrentAllocatorName(void); + +// strdup() using PyMem_RawMalloc() +extern char* _PyMem_RawStrdup(const char *str); + +// strdup() using PyMem_Malloc(). +// Export for '_pickle ' shared extension. +PyAPI_FUNC(char*) _PyMem_Strdup(const char *str); + +// wcsdup() using PyMem_RawMalloc() +extern wchar_t* _PyMem_RawWcsdup(const wchar_t *str); + +typedef struct { + /* We tag each block with an API ID in order to tag API violations */ + char api_id; + PyMemAllocatorEx alloc; +} debug_alloc_api_t; + +struct _pymem_allocators { + PyThread_type_lock mutex; + struct { + PyMemAllocatorEx raw; + PyMemAllocatorEx mem; + PyMemAllocatorEx obj; + } standard; + struct { + debug_alloc_api_t raw; + debug_alloc_api_t mem; + debug_alloc_api_t obj; + } debug; + PyObjectArenaAllocator obj_arena; +}; /* Set the memory allocator of the specified domain to the default. Save the old allocator into *old_alloc if it's non-NULL. Return on success, or return -1 if the domain is unknown. */ -PyAPI_FUNC(int) _PyMem_SetDefaultAllocator( +extern int _PyMem_SetDefaultAllocator( PyMemAllocatorDomain domain, PyMemAllocatorEx *old_alloc); @@ -60,53 +94,15 @@ static inline int _PyMem_IsPtrFreed(const void *ptr) #endif } -PyAPI_FUNC(int) _PyMem_GetAllocatorName( +extern int _PyMem_GetAllocatorName( const char *name, PyMemAllocatorName *allocator); /* Configure the Python memory allocators. Pass PYMEM_ALLOCATOR_DEFAULT to use default allocators. PYMEM_ALLOCATOR_NOT_SET does nothing. */ -PyAPI_FUNC(int) _PyMem_SetupAllocators(PyMemAllocatorName allocator); - -struct _PyTraceMalloc_Config { - /* Module initialized? - Variable protected by the GIL */ - enum { - TRACEMALLOC_NOT_INITIALIZED, - TRACEMALLOC_INITIALIZED, - TRACEMALLOC_FINALIZED - } initialized; - - /* Is tracemalloc tracing memory allocations? - Variable protected by the GIL */ - int tracing; - - /* limit of the number of frames in a traceback, 1 by default. - Variable protected by the GIL. */ - int max_nframe; -}; - -#define _PyTraceMalloc_Config_INIT \ - {.initialized = TRACEMALLOC_NOT_INITIALIZED, \ - .tracing = 0, \ - .max_nframe = 1} - -PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config; +extern int _PyMem_SetupAllocators(PyMemAllocatorName allocator); -/* Allocate memory directly from the O/S virtual memory system, - * where supported. Otherwise fallback on malloc */ -void *_PyObject_VirtualAlloc(size_t size); -void _PyObject_VirtualFree(void *, size_t size); - -/* This function returns the number of allocated memory blocks, regardless of size */ -PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); - -/* Macros */ -#ifdef WITH_PYMALLOC -// Export the symbol for the 3rd party guppy3 project -PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out); -#endif #ifdef __cplusplus } diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h new file mode 100644 index 000000000000000..11fbe16fa6f1d5f --- /dev/null +++ b/Include/internal/pycore_pymem_init.h @@ -0,0 +1,84 @@ +#ifndef Py_INTERNAL_PYMEM_INIT_H +#define Py_INTERNAL_PYMEM_INIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +/********************************/ +/* the allocators' initializers */ + +extern void * _PyMem_RawMalloc(void *, size_t); +extern void * _PyMem_RawCalloc(void *, size_t, size_t); +extern void * _PyMem_RawRealloc(void *, void *, size_t); +extern void _PyMem_RawFree(void *, void *); +#define PYRAW_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} + +#if defined(WITH_PYMALLOC) +extern void* _PyObject_Malloc(void *, size_t); +extern void* _PyObject_Calloc(void *, size_t, size_t); +extern void _PyObject_Free(void *, void *); +extern void* _PyObject_Realloc(void *, void *, size_t); +# define PYOBJ_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +# define PYMEM_ALLOC PYOBJ_ALLOC +#else +# define PYOBJ_ALLOC PYRAW_ALLOC +# define PYMEM_ALLOC PYOBJ_ALLOC +#endif // WITH_PYMALLOC + + +extern void* _PyMem_DebugRawMalloc(void *, size_t); +extern void* _PyMem_DebugRawCalloc(void *, size_t, size_t); +extern void* _PyMem_DebugRawRealloc(void *, void *, size_t); +extern void _PyMem_DebugRawFree(void *, void *); + +extern void* _PyMem_DebugMalloc(void *, size_t); +extern void* _PyMem_DebugCalloc(void *, size_t, size_t); +extern void* _PyMem_DebugRealloc(void *, void *, size_t); +extern void _PyMem_DebugFree(void *, void *); + +#define PYDBGRAW_ALLOC(runtime) \ + {&(runtime).allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC(runtime) \ + {&(runtime).allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC(runtime) \ + {&(runtime).allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + +extern void * _PyMem_ArenaAlloc(void *, size_t); +extern void _PyMem_ArenaFree(void *, void *, size_t); + +#ifdef Py_DEBUG +# define _pymem_allocators_standard_INIT(runtime) \ + { \ + PYDBGRAW_ALLOC(runtime), \ + PYDBGMEM_ALLOC(runtime), \ + PYDBGOBJ_ALLOC(runtime), \ + } +#else +# define _pymem_allocators_standard_INIT(runtime) \ + { \ + PYRAW_ALLOC, \ + PYMEM_ALLOC, \ + PYOBJ_ALLOC, \ + } +#endif + +#define _pymem_allocators_debug_INIT \ + { \ + {'r', PYRAW_ALLOC}, \ + {'m', PYMEM_ALLOC}, \ + {'o', PYOBJ_ALLOC}, \ + } + +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyMem_ArenaAlloc, _PyMem_ArenaFree } + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_PYMEM_INIT_H diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index 3d6d400f74dd1de..7fa952e371d7b46 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -8,7 +8,34 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_runtime.h" /* PyRuntimeState */ +#include "pycore_runtime.h" // _PyRuntime + + +// Values for PyThreadState.state. A thread must be in the "attached" state +// before calling most Python APIs. If the GIL is enabled, then "attached" +// implies that the thread holds the GIL and "detached" implies that the +// thread does not hold the GIL (or is in the process of releasing it). In +// `--disable-gil` builds, multiple threads may be "attached" to the same +// interpreter at the same time. Only the "bound" thread may perform the +// transitions between "attached" and "detached" on its own PyThreadState. +// +// The "gc" state is used to implement stop-the-world pauses, such as for +// cyclic garbage collection. It is only used in `--disable-gil` builds. It is +// similar to the "detached" state, but only the thread performing a +// stop-the-world pause may transition threads between the "detached" and "gc" +// states. A thread trying to "attach" from the "gc" state will block until +// it is transitioned back to "detached" when the stop-the-world pause is +// complete. +// +// State transition diagram: +// +// (bound thread) (stop-the-world thread) +// [attached] <-> [detached] <-> [gc] +// +// See `_PyThreadState_Attach()` and `_PyThreadState_Detach()`. +#define _Py_THREAD_DETACHED 0 +#define _Py_THREAD_ATTACHED 1 +#define _Py_THREAD_GC 2 /* Check if the current thread is the main thread. @@ -33,6 +60,23 @@ _Py_IsMainInterpreter(PyInterpreterState *interp) return (interp == _PyInterpreterState_Main()); } +static inline int +_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) +{ + /* bpo-39877: Access _PyRuntime directly rather than using + tstate->interp->runtime to support calls from Python daemon threads. + After Py_Finalize() has been called, tstate can be a dangling pointer: + point to PyThreadState freed memory. */ + return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL && + interp == &_PyRuntime._main_interpreter); +} + +// Export for _xxsubinterpreters module. +PyAPI_FUNC(int) _PyInterpreterState_SetRunningMain(PyInterpreterState *); +PyAPI_FUNC(void) _PyInterpreterState_SetNotRunningMain(PyInterpreterState *); +PyAPI_FUNC(int) _PyInterpreterState_IsRunningMain(PyInterpreterState *); +PyAPI_FUNC(int) _PyInterpreterState_FailIfRunningMain(PyInterpreterState *); + static inline const PyConfig * _Py_GetMainConfig(void) @@ -53,38 +97,56 @@ _Py_ThreadCanHandleSignals(PyInterpreterState *interp) } -/* Only execute pending calls on the main thread. */ -static inline int -_Py_ThreadCanHandlePendingCalls(void) -{ - return _Py_IsMainThread(); -} +/* Variable and static inline functions for in-line access to current thread + and interpreter state */ +#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE) +extern _Py_thread_local PyThreadState *_Py_tss_tstate; +#endif -/* Variable and macro for in-line access to current thread - and interpreter state */ +#ifndef NDEBUG +extern int _PyThreadState_CheckConsistency(PyThreadState *tstate); +#endif -static inline PyThreadState* -_PyRuntimeState_GetThreadState(_PyRuntimeState *runtime) -{ - return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->gilstate.tstate_current); -} +int _PyThreadState_MustExit(PyThreadState *tstate); + +// Export for most shared extensions, used via _PyThreadState_GET() static +// inline function. +PyAPI_FUNC(PyThreadState *) _PyThreadState_GetCurrent(void); /* Get the current Python thread state. - Efficient macro reading directly the 'gilstate.tstate_current' atomic - variable. The macro is unsafe: it does not check for error and it can - return NULL. + This function is unsafe: it does not check for error and it can return NULL. The caller must hold the GIL. - See also PyThreadState_Get() and _PyThreadState_UncheckedGet(). */ + See also PyThreadState_Get() and PyThreadState_GetUnchecked(). */ static inline PyThreadState* _PyThreadState_GET(void) { - return _PyRuntimeState_GetThreadState(&_PyRuntime); +#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE) + return _Py_tss_tstate; +#else + return _PyThreadState_GetCurrent(); +#endif } +// Attaches the current thread to the interpreter. +// +// This may block while acquiring the GIL (if the GIL is enabled) or while +// waiting for a stop-the-world pause (if the GIL is disabled). +// +// High-level code should generally call PyEval_RestoreThread() instead, which +// calls this function. +void _PyThreadState_Attach(PyThreadState *tstate); + +// Detaches the current thread from the interpreter. +// +// High-level code should generally call PyEval_SaveThread() instead, which +// calls this function. +void _PyThreadState_Detach(PyThreadState *tstate); + + static inline void _Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate) { @@ -103,11 +165,11 @@ _Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate) /* Get the current interpreter state. - The macro is unsafe: it does not check for error and it can return NULL. + The function is unsafe: it does not check for error and it can return NULL. The caller must hold the GIL. - See also _PyInterpreterState_Get() + See also PyInterpreterState_Get() and _PyGILState_GetInterpreterStateUnsafe(). */ static inline PyInterpreterState* _PyInterpreterState_GET(void) { PyThreadState *tstate = _PyThreadState_GET(); @@ -120,47 +182,61 @@ static inline PyInterpreterState* _PyInterpreterState_GET(void) { // PyThreadState functions -PyAPI_FUNC(void) _PyThreadState_SetCurrent(PyThreadState *tstate); -// We keep this around exclusively for stable ABI compatibility. -PyAPI_FUNC(void) _PyThreadState_Init( - PyThreadState *tstate); -PyAPI_FUNC(void) _PyThreadState_DeleteExcept( - _PyRuntimeState *runtime, - PyThreadState *tstate); +extern PyThreadState * _PyThreadState_New( + PyInterpreterState *interp, + int whence); +extern void _PyThreadState_Bind(PyThreadState *tstate); +extern void _PyThreadState_DeleteExcept(PyThreadState *tstate); +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(PyObject*) _PyThreadState_GetDict(PyThreadState *tstate); -static inline void -_PyThreadState_UpdateTracingState(PyThreadState *tstate) -{ - bool use_tracing = - (tstate->tracing == 0) && - (tstate->c_tracefunc != NULL || tstate->c_profilefunc != NULL); - tstate->cframe->use_tracing = (use_tracing ? 255 : 0); -} +/* The implementation of sys._current_exceptions() Returns a dict mapping + thread id to that thread's current exception. +*/ +extern PyObject* _PyThread_CurrentExceptions(void); /* Other */ -PyAPI_FUNC(PyThreadState *) _PyThreadState_Swap( - struct _gilstate_runtime_state *gilstate, +extern PyThreadState * _PyThreadState_Swap( + _PyRuntimeState *runtime, PyThreadState *newts); -PyAPI_FUNC(PyStatus) _PyInterpreterState_Enable(_PyRuntimeState *runtime); +extern PyStatus _PyInterpreterState_Enable(_PyRuntimeState *runtime); #ifdef HAVE_FORK extern PyStatus _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime); -extern PyStatus _PyGILState_Reinit(_PyRuntimeState *runtime); extern void _PySignal_AfterFork(void); #endif - +// Export for the stable ABI PyAPI_FUNC(int) _PyState_AddModule( PyThreadState *tstate, PyObject* module, PyModuleDef* def); -PyAPI_FUNC(int) _PyOS_InterruptOccurred(PyThreadState *tstate); +extern int _PyOS_InterruptOccurred(PyThreadState *tstate); + +#define HEAD_LOCK(runtime) \ + PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK) +#define HEAD_UNLOCK(runtime) \ + PyThread_release_lock((runtime)->interpreters.mutex) + +// Get the configuration of the current interpreter. +// The caller must hold the GIL. +// Export for test_peg_generator. +PyAPI_FUNC(const PyConfig*) _Py_GetConfig(void); + +// Get the single PyInterpreterState used by this process' GILState +// implementation. +// +// This function doesn't check for error. Return NULL before _PyGILState_Init() +// is called and after _PyGILState_Fini() is called. +// +// See also PyInterpreterState_Get() and _PyInterpreterState_GET(). +extern PyInterpreterState* _PyGILState_GetInterpreterStateUnsafe(void); #ifdef __cplusplus } diff --git a/Include/internal/pycore_pystats.h b/Include/internal/pycore_pystats.h new file mode 100644 index 000000000000000..f8af398a5605861 --- /dev/null +++ b/Include/internal/pycore_pystats.h @@ -0,0 +1,21 @@ +#ifndef Py_INTERNAL_PYSTATS_H +#define Py_INTERNAL_PYSTATS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#ifdef Py_STATS +extern void _Py_StatsOn(void); +extern void _Py_StatsOff(void); +extern void _Py_StatsClear(void); +extern int _Py_PrintSpecializationStats(int to_file); +#endif + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_PYSTATS_H diff --git a/Include/internal/pycore_pythonrun.h b/Include/internal/pycore_pythonrun.h new file mode 100644 index 000000000000000..0bfc5704dc4c594 --- /dev/null +++ b/Include/internal/pycore_pythonrun.h @@ -0,0 +1,39 @@ +#ifndef Py_INTERNAL_PYTHONRUN_H +#define Py_INTERNAL_PYTHONRUN_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +extern int _PyRun_SimpleFileObject( + FILE *fp, + PyObject *filename, + int closeit, + PyCompilerFlags *flags); + +extern int _PyRun_AnyFileObject( + FILE *fp, + PyObject *filename, + int closeit, + PyCompilerFlags *flags); + +extern int _PyRun_InteractiveLoopObject( + FILE *fp, + PyObject *filename, + PyCompilerFlags *flags); + +extern const char* _Py_SourceAsString( + PyObject *cmd, + const char *funcname, + const char *what, + PyCompilerFlags *cf, + PyObject **cmd_copy); + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_PYTHONRUN_H + diff --git a/Include/internal/pycore_pythread.h b/Include/internal/pycore_pythread.h new file mode 100644 index 000000000000000..9c9a09f60f34418 --- /dev/null +++ b/Include/internal/pycore_pythread.h @@ -0,0 +1,155 @@ +#ifndef Py_INTERNAL_PYTHREAD_H +#define Py_INTERNAL_PYTHREAD_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "dynamic_annotations.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX + +// Get _POSIX_THREADS and _POSIX_SEMAPHORES macros if available +#if (defined(HAVE_UNISTD_H) && !defined(_POSIX_THREADS) \ + && !defined(_POSIX_SEMAPHORES)) +# include // _POSIX_THREADS, _POSIX_SEMAPHORES +#endif +#if (defined(HAVE_PTHREAD_H) && !defined(_POSIX_THREADS) \ + && !defined(_POSIX_SEMAPHORES)) + // This means pthreads are not implemented in libc headers, hence the macro + // not present in . But they still can be implemented as an + // external library (e.g. gnu pth in pthread emulation) +# include // _POSIX_THREADS, _POSIX_SEMAPHORES +#endif +#if !defined(_POSIX_THREADS) && defined(__hpux) && defined(_SC_THREADS) + // Check if we're running on HP-UX and _SC_THREADS is defined. If so, then + // enough of the POSIX threads package is implemented to support Python + // threads. + // + // This is valid for HP-UX 11.23 running on an ia64 system. If needed, add + // a check of __ia64 to verify that we're running on an ia64 system instead + // of a pa-risc system. +# define _POSIX_THREADS +#endif + + +#if defined(_POSIX_THREADS) || defined(HAVE_PTHREAD_STUBS) +# define _USE_PTHREADS +#endif + +#if defined(_USE_PTHREADS) && defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) +// monotonic is supported statically. It doesn't mean it works on runtime. +# define CONDATTR_MONOTONIC +#endif + + +#if defined(HAVE_PTHREAD_STUBS) +#include // bool + +// pthread_key +struct py_stub_tls_entry { + bool in_use; + void *value; +}; +#endif + +struct _pythread_runtime_state { + int initialized; + +#ifdef _USE_PTHREADS + // This matches when thread_pthread.h is used. + struct { + /* NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported. */ + pthread_condattr_t *ptr; +# ifdef CONDATTR_MONOTONIC + /* The value to which condattr_monotonic is set. */ + pthread_condattr_t val; +# endif + } _condattr_monotonic; + +#endif // USE_PTHREADS + +#if defined(HAVE_PTHREAD_STUBS) + struct { + struct py_stub_tls_entry tls_entries[PTHREAD_KEYS_MAX]; + } stubs; +#endif +}; + + +#ifdef HAVE_FORK +/* Private function to reinitialize a lock at fork in the child process. + Reset the lock to the unlocked state. + Return 0 on success, return -1 on error. */ +extern int _PyThread_at_fork_reinit(PyThread_type_lock *lock); +#endif /* HAVE_FORK */ + + +// unset: -1 seconds, in nanoseconds +#define PyThread_UNSET_TIMEOUT ((_PyTime_t)(-1 * 1000 * 1000 * 1000)) + +// Exported for the _xxinterpchannels module. +PyAPI_FUNC(int) PyThread_ParseTimeoutArg( + PyObject *arg, + int blocking, + PY_TIMEOUT_T *timeout); + +/* Helper to acquire an interruptible lock with a timeout. If the lock acquire + * is interrupted, signal handlers are run, and if they raise an exception, + * PY_LOCK_INTR is returned. Otherwise, PY_LOCK_ACQUIRED or PY_LOCK_FAILURE + * are returned, depending on whether the lock can be acquired within the + * timeout. + */ +// Exported for the _xxinterpchannels module. +PyAPI_FUNC(PyLockStatus) PyThread_acquire_lock_timed_with_retries( + PyThread_type_lock, + PY_TIMEOUT_T microseconds); + +typedef unsigned long long PyThread_ident_t; +typedef Py_uintptr_t PyThread_handle_t; + +#define PY_FORMAT_THREAD_IDENT_T "llu" +#define Py_PARSE_THREAD_IDENT_T "K" + +PyAPI_FUNC(PyThread_ident_t) PyThread_get_thread_ident_ex(void); + +/* Thread joining APIs. + * + * These APIs have a strict contract: + * - Either PyThread_join_thread or PyThread_detach_thread must be called + * exactly once with the given handle. + * - Calling neither PyThread_join_thread nor PyThread_detach_thread results + * in a resource leak until the end of the process. + * - Any other usage, such as calling both PyThread_join_thread and + * PyThread_detach_thread, or calling them more than once (including + * simultaneously), results in undefined behavior. + */ +PyAPI_FUNC(int) PyThread_start_joinable_thread(void (*func)(void *), + void *arg, + PyThread_ident_t* ident, + PyThread_handle_t* handle); +/* + * Join a thread started with `PyThread_start_joinable_thread`. + * This function cannot be interrupted. It returns 0 on success, + * a non-zero value on failure. + */ +PyAPI_FUNC(int) PyThread_join_thread(PyThread_handle_t); +/* + * Detach a thread started with `PyThread_start_joinable_thread`, such + * that its resources are relased as soon as it exits. + * This function cannot be interrupted. It returns 0 on success, + * a non-zero value on failure. + */ +PyAPI_FUNC(int) PyThread_detach_thread(PyThread_handle_t); + +/* + * Obtain the new thread ident and handle in a forked child process. + */ +PyAPI_FUNC(void) PyThread_update_thread_after_fork(PyThread_ident_t* ident, + PyThread_handle_t* handle); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_PYTHREAD_H */ diff --git a/Include/internal/pycore_range.h b/Include/internal/pycore_range.h index 809e89a1e01b60d..bf045ec4fd8332d 100644 --- a/Include/internal/pycore_range.h +++ b/Include/internal/pycore_range.h @@ -10,7 +10,6 @@ extern "C" { typedef struct { PyObject_HEAD - long index; long start; long step; long len; diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index d1fbc09f1ea2064..e6efe8b646e86f8 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -8,25 +8,26 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_atomic.h" /* _Py_atomic_address */ -#include "pycore_gil.h" // struct _gil_runtime_state -#include "pycore_global_objects.h" // struct _Py_global_objects +#include "pycore_atexit.h" // struct _atexit_runtime_state +#include "pycore_ceval_state.h" // struct _ceval_runtime_state +#include "pycore_crossinterp.h" // struct _xidregistry +#include "pycore_faulthandler.h" // struct _faulthandler_runtime_state +#include "pycore_floatobject.h" // struct _Py_float_runtime_state +#include "pycore_import.h" // struct _import_runtime_state #include "pycore_interp.h" // PyInterpreterState -#include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_ids +#include "pycore_object_state.h" // struct _py_object_runtime_state +#include "pycore_parser.h" // struct _parser_runtime_state +#include "pycore_pyhash.h" // struct pyhash_runtime_state +#include "pycore_pymem.h" // struct _pymem_allocators +#include "pycore_pythread.h" // struct _pythread_runtime_state +#include "pycore_signal.h" // struct _signals_runtime_state +#include "pycore_time.h" // struct _time_runtime_state +#include "pycore_tracemalloc.h" // struct _tracemalloc_runtime_state +#include "pycore_typeobject.h" // struct _types_runtime_state +#include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_state struct _getargs_runtime_state { - PyThread_type_lock mutex; -}; - -/* ceval state */ - -struct _ceval_runtime_state { - /* Request for checking signals. It is shared by all interpreters (see - bpo-40513). Any thread of any interpreter can receive a signal, but only - the main thread of the main interpreter can handle signals: see - _Py_ThreadCanHandleSignals(). */ - _Py_atomic_int signals_pending; - struct _gil_runtime_state gil; + struct _PyArg_Parser *static_parsers; }; /* GIL state */ @@ -35,15 +36,11 @@ struct _gilstate_runtime_state { /* bpo-26558: Flag to disable PyGILState_Check(). If set to non-zero, PyGILState_Check() always return 1. */ int check_enabled; - /* Assuming the current thread holds the GIL, this is the - PyThreadState for the current thread. */ - _Py_atomic_address tstate_current; /* The single PyInterpreterState used by this process' GILState implementation */ /* TODO: Given interp_main, it may be possible to kill this ref */ PyInterpreterState *autoInterpreterState; - Py_tss_t autoTSSkey; }; /* Runtime audit hook state */ @@ -54,12 +51,101 @@ typedef struct _Py_AuditHookEntry { void *userData; } _Py_AuditHookEntry; +typedef struct _Py_DebugOffsets { + char cookie[8]; + uint64_t version; + // Runtime state offset; + struct _runtime_state { + off_t finalizing; + off_t interpreters_head; + } runtime_state; + + // Interpreter state offset; + struct _interpreter_state { + off_t next; + off_t threads_head; + off_t gc; + off_t imports_modules; + off_t sysdict; + off_t builtins; + off_t ceval_gil; + off_t gil_runtime_state_locked; + off_t gil_runtime_state_holder; + } interpreter_state; + + // Thread state offset; + struct _thread_state{ + off_t prev; + off_t next; + off_t interp; + off_t current_frame; + off_t thread_id; + off_t native_thread_id; + } thread_state; + + // InterpreterFrame offset; + struct _interpreter_frame { + off_t previous; + off_t executable; + off_t instr_ptr; + off_t localsplus; + off_t owner; + } interpreter_frame; + + // CFrame offset; + struct _cframe { + off_t current_frame; + off_t previous; + } cframe; + + // Code object offset; + struct _code_object { + off_t filename; + off_t name; + off_t linetable; + off_t firstlineno; + off_t argcount; + off_t localsplusnames; + off_t localspluskinds; + off_t co_code_adaptive; + } code_object; + + // PyObject offset; + struct _pyobject { + off_t ob_type; + } pyobject; + + // PyTypeObject object offset; + struct _type_object { + off_t tp_name; + } type_object; + + // PyTuple object offset; + struct _tuple_object { + off_t ob_item; + } tuple_object; +} _Py_DebugOffsets; + /* Full Python runtime state */ /* _PyRuntimeState holds the global state for the CPython runtime. That data is exposed in the internal API as a static variable (_PyRuntime). */ typedef struct pyruntimestate { + /* This field must be first to facilitate locating it by out of process + * debuggers. Out of process debuggers will use the offsets contained in this + * field to be able to locate other fields in several interpreter structures + * in a way that doesn't require them to know the exact layout of those + * structures. + * + * IMPORTANT: + * This struct is **NOT** backwards compatible between minor version of the + * interpreter and the members, order of members and size can change between + * minor versions. This struct is only guaranteed to be stable between patch + * versions for a given minor version of the interpreter. + */ + _Py_DebugOffsets debug_offsets; + /* Has been initialized to a safe state. In order to be effective, this must be set to 0 during or right @@ -83,7 +169,9 @@ typedef struct pyruntimestate { Use _PyRuntimeState_GetFinalizing() and _PyRuntimeState_SetFinalizing() to access it, don't access it directly. */ - _Py_atomic_address _finalizing; + PyThreadState *_finalizing; + /* The ID of the OS thread in which we are finalizing. */ + unsigned long _finalizing_id; struct pyinterpreters { PyThread_type_lock mutex; @@ -103,21 +191,43 @@ typedef struct pyruntimestate { using a Python int. */ int64_t next_id; } interpreters; - // XXX Remove this field once we have a tp_* slot. - struct _xidregistry { - PyThread_type_lock mutex; - struct _xidregitem *head; - } xidregistry; unsigned long main_thread; -#define NEXITFUNCS 32 - void (*exitfuncs[NEXITFUNCS])(void); - int nexitfuncs; + /* ---------- IMPORTANT --------------------------- + The fields above this line are declared as early as + possible to facilitate out-of-process observability + tools. */ + + /* cross-interpreter data and utils */ + struct _xi_runtime_state xi; + + struct _pymem_allocators allocators; + struct _obmalloc_global_state obmalloc; + struct pyhash_runtime_state pyhash_state; + struct _time_runtime_state time; + struct _pythread_runtime_state threads; + struct _signals_runtime_state signals; + + /* Used for the thread state bound to the current thread. */ + Py_tss_t autoTSSkey; + /* Used instead of PyThreadState.trash when there is not current tstate. */ + Py_tss_t trashTSSkey; + + PyWideStringList orig_argv; + + struct _parser_runtime_state parser; + + struct _atexit_runtime_state atexit; + + struct _import_runtime_state imports; struct _ceval_runtime_state ceval; struct _gilstate_runtime_state gilstate; struct _getargs_runtime_state getargs; + struct _fileutils_state fileutils; + struct _faulthandler_runtime_state faulthandler; + struct _tracemalloc_runtime_state tracemalloc; PyPreConfig preconfig; @@ -125,12 +235,19 @@ typedef struct pyruntimestate { // is called multiple times. Py_OpenCodeHookFunction open_code_hook; void *open_code_userdata; - _Py_AuditHookEntry *audit_hook_head; + struct { + PyThread_type_lock mutex; + _Py_AuditHookEntry *head; + } audit_hooks; - struct _Py_unicode_runtime_ids unicode_ids; + struct _py_object_runtime_state object_state; + struct _Py_float_runtime_state float_state; + struct _Py_unicode_runtime_state unicode_state; + struct _types_runtime_state types; /* All the objects that are shared by the runtime's interpreters. */ - struct _Py_global_objects global_objects; + struct _Py_cached_objects cached_objects; + struct _Py_static_objects static_objects; /* The following fields are here to avoid allocation during init. The data is exposed through _PyRuntimeState pointer fields. @@ -148,15 +265,25 @@ typedef struct pyruntimestate { /* PyInterpreterState.interpreters.main */ PyInterpreterState _main_interpreter; + +#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) + // Used in "Python/emscripten_trampoline.c" to choose between type + // reflection trampoline and EM_JS trampoline. + bool wasm_type_reflection_available; +#endif + } _PyRuntimeState; /* other API */ +// Export _PyRuntime for shared extensions which use it in static inline +// functions for best performance, like _Py_IsMainThread() or _Py_ID(). +// It's also made accessible for debuggers and profilers. PyAPI_DATA(_PyRuntimeState) _PyRuntime; -PyAPI_FUNC(PyStatus) _PyRuntimeState_Init(_PyRuntimeState *runtime); -PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime); +extern PyStatus _PyRuntimeState_Init(_PyRuntimeState *runtime); +extern void _PyRuntimeState_Fini(_PyRuntimeState *runtime); #ifdef HAVE_FORK extern PyStatus _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime); @@ -164,19 +291,33 @@ extern PyStatus _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime); /* Initialize _PyRuntimeState. Return NULL on success, or return an error message on failure. */ -PyAPI_FUNC(PyStatus) _PyRuntime_Initialize(void); +extern PyStatus _PyRuntime_Initialize(void); -PyAPI_FUNC(void) _PyRuntime_Finalize(void); +extern void _PyRuntime_Finalize(void); static inline PyThreadState* _PyRuntimeState_GetFinalizing(_PyRuntimeState *runtime) { - return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->_finalizing); + return (PyThreadState*)_Py_atomic_load_ptr_relaxed(&runtime->_finalizing); +} + +static inline unsigned long +_PyRuntimeState_GetFinalizingID(_PyRuntimeState *runtime) { + return _Py_atomic_load_ulong_relaxed(&runtime->_finalizing_id); } static inline void _PyRuntimeState_SetFinalizing(_PyRuntimeState *runtime, PyThreadState *tstate) { - _Py_atomic_store_relaxed(&runtime->_finalizing, (uintptr_t)tstate); + _Py_atomic_store_ptr_relaxed(&runtime->_finalizing, tstate); + if (tstate == NULL) { + _Py_atomic_store_ulong_relaxed(&runtime->_finalizing_id, 0); + } + else { + // XXX Re-enable this assert once gh-109860 is fixed. + //assert(tstate->thread_id == PyThread_get_thread_ident()); + _Py_atomic_store_ulong_relaxed(&runtime->_finalizing_id, + tstate->thread_id); + } } #ifdef __cplusplus diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 8dd7a3128c66659..fa5d8114abf0d71 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -8,48 +8,150 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_object.h" +#include "pycore_ceval_state.h" // _PyEval_RUNTIME_PERF_INIT +#include "pycore_faulthandler.h" // _faulthandler_runtime_state_INIT +#include "pycore_floatobject.h" // _py_float_format_unknown +#include "pycore_object.h" // _PyObject_HEAD_INIT +#include "pycore_obmalloc_init.h" // _obmalloc_global_state_INIT +#include "pycore_parser.h" // _parser_runtime_state_INIT +#include "pycore_pyhash.h" // pyhash_state_INIT +#include "pycore_pymem_init.h" // _pymem_allocators_standard_INIT +#include "pycore_runtime_init_generated.h" // _Py_bytes_characters_INIT +#include "pycore_signal.h" // _signals_RUNTIME_INIT +#include "pycore_tracemalloc.h" // _tracemalloc_runtime_state_INIT + + +extern PyTypeObject _PyExc_MemoryError; /* The static initializers defined here should only be used in the runtime init code (in pystate.c and pylifecycle.c). */ - -#define _PyRuntimeState_INIT \ +#define _PyRuntimeState_INIT(runtime) \ { \ - .gilstate = { \ - .check_enabled = 1, \ - /* A TSS key must be initialized with Py_tss_NEEDS_INIT \ - in accordance with the specification. */ \ - .autoTSSkey = Py_tss_NEEDS_INIT, \ + .debug_offsets = { \ + .cookie = "xdebugpy", \ + .version = PY_VERSION_HEX, \ + .runtime_state = { \ + .finalizing = offsetof(_PyRuntimeState, _finalizing), \ + .interpreters_head = offsetof(_PyRuntimeState, interpreters.head), \ + }, \ + .interpreter_state = { \ + .next = offsetof(PyInterpreterState, next), \ + .threads_head = offsetof(PyInterpreterState, threads.head), \ + .gc = offsetof(PyInterpreterState, gc), \ + .imports_modules = offsetof(PyInterpreterState, imports.modules), \ + .sysdict = offsetof(PyInterpreterState, sysdict), \ + .builtins = offsetof(PyInterpreterState, builtins), \ + .ceval_gil = offsetof(PyInterpreterState, ceval.gil), \ + .gil_runtime_state_locked = offsetof(PyInterpreterState, _gil.locked), \ + .gil_runtime_state_holder = offsetof(PyInterpreterState, _gil.last_holder), \ + }, \ + .thread_state = { \ + .prev = offsetof(PyThreadState, prev), \ + .next = offsetof(PyThreadState, next), \ + .interp = offsetof(PyThreadState, interp), \ + .current_frame = offsetof(PyThreadState, current_frame), \ + .thread_id = offsetof(PyThreadState, thread_id), \ + .native_thread_id = offsetof(PyThreadState, native_thread_id), \ + }, \ + .interpreter_frame = { \ + .previous = offsetof(_PyInterpreterFrame, previous), \ + .executable = offsetof(_PyInterpreterFrame, f_executable), \ + .instr_ptr = offsetof(_PyInterpreterFrame, instr_ptr), \ + .localsplus = offsetof(_PyInterpreterFrame, localsplus), \ + .owner = offsetof(_PyInterpreterFrame, owner), \ + }, \ + .code_object = { \ + .filename = offsetof(PyCodeObject, co_filename), \ + .name = offsetof(PyCodeObject, co_name), \ + .linetable = offsetof(PyCodeObject, co_linetable), \ + .firstlineno = offsetof(PyCodeObject, co_firstlineno), \ + .argcount = offsetof(PyCodeObject, co_argcount), \ + .localsplusnames = offsetof(PyCodeObject, co_localsplusnames), \ + .localspluskinds = offsetof(PyCodeObject, co_localspluskinds), \ + .co_code_adaptive = offsetof(PyCodeObject, co_code_adaptive), \ + }, \ + .pyobject = { \ + .ob_type = offsetof(PyObject, ob_type), \ + }, \ + .type_object = { \ + .tp_name = offsetof(PyTypeObject, tp_name), \ + }, \ + .tuple_object = { \ + .ob_item = offsetof(PyTupleObject, ob_item), \ + }, \ + }, \ + .allocators = { \ + .standard = _pymem_allocators_standard_INIT(runtime), \ + .debug = _pymem_allocators_debug_INIT, \ + .obj_arena = _pymem_allocators_obj_arena_INIT, \ }, \ + .obmalloc = _obmalloc_global_state_INIT, \ + .pyhash_state = pyhash_state_INIT, \ + .signals = _signals_RUNTIME_INIT, \ .interpreters = { \ /* This prevents interpreters from getting created \ until _PyInterpreterState_Enable() is called. */ \ .next_id = -1, \ }, \ - .global_objects = _Py_global_objects_INIT, \ - ._main_interpreter = _PyInterpreterState_INIT, \ + .xi = { \ + .registry = { \ + .global = 1, \ + }, \ + }, \ + /* A TSS key must be initialized with Py_tss_NEEDS_INIT \ + in accordance with the specification. */ \ + .autoTSSkey = Py_tss_NEEDS_INIT, \ + .parser = _parser_runtime_state_INIT, \ + .ceval = { \ + .perf = _PyEval_RUNTIME_PERF_INIT, \ + }, \ + .gilstate = { \ + .check_enabled = 1, \ + }, \ + .fileutils = { \ + .force_ascii = -1, \ + }, \ + .faulthandler = _faulthandler_runtime_state_INIT, \ + .tracemalloc = _tracemalloc_runtime_state_INIT, \ + .float_state = { \ + .float_format = _py_float_format_unknown, \ + .double_format = _py_float_format_unknown, \ + }, \ + .types = { \ + .next_version_tag = 1, \ + }, \ + .static_objects = { \ + .singletons = { \ + .small_ints = _Py_small_ints_INIT, \ + .bytes_empty = _PyBytes_SIMPLE_INIT(0, 0), \ + .bytes_characters = _Py_bytes_characters_INIT, \ + .strings = { \ + .literals = _Py_str_literals_INIT, \ + .identifiers = _Py_str_identifiers_INIT, \ + .ascii = _Py_str_ascii_INIT, \ + .latin1 = _Py_str_latin1_INIT, \ + }, \ + .tuple_empty = { \ + .ob_base = _PyVarObject_HEAD_INIT(&PyTuple_Type, 0), \ + }, \ + .hamt_bitmap_node_empty = { \ + .ob_base = _PyVarObject_HEAD_INIT(&_PyHamt_BitmapNode_Type, 0), \ + }, \ + .context_token_missing = { \ + .ob_base = _PyObject_HEAD_INIT(&_PyContextTokenMissing_Type), \ + }, \ + }, \ + }, \ + ._main_interpreter = _PyInterpreterState_INIT(runtime._main_interpreter), \ } -#ifdef HAVE_DLOPEN -# include -# if HAVE_DECL_RTLD_NOW -# define _Py_DLOPEN_FLAGS RTLD_NOW -# else -# define _Py_DLOPEN_FLAGS RTLD_LAZY -# endif -# define DLOPENFLAGS_INIT .dlopenflags = _Py_DLOPEN_FLAGS, -#else -# define _Py_DLOPEN_FLAGS 0 -# define DLOPENFLAGS_INIT -#endif - -#define _PyInterpreterState_INIT \ +#define _PyInterpreterState_INIT(INTERP) \ { \ - ._static = 1, \ .id_refcount = -1, \ - DLOPENFLAGS_INIT \ + .imports = IMPORTS_INIT, \ + .obmalloc = _obmalloc_state_INIT(INTERP.obmalloc), \ .ceval = { \ .recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \ }, \ @@ -62,29 +164,54 @@ extern "C" { { .threshold = 10, }, \ }, \ }, \ + .object_state = _py_object_state_INIT(INTERP), \ + .dtoa = _dtoa_state_INIT(&(INTERP)), \ + .dict_state = _dict_state_INIT, \ + .func_state = { \ + .next_version = 1, \ + }, \ + .types = { \ + .next_version_tag = _Py_TYPE_BASE_VERSION_TAG, \ + }, \ + .static_objects = { \ + .singletons = { \ + ._not_used = 1, \ + .hamt_empty = { \ + .ob_base = _PyObject_HEAD_INIT(&_PyHamt_Type), \ + .h_root = (PyHamtNode*)&_Py_SINGLETON(hamt_bitmap_node_empty), \ + }, \ + .last_resort_memory_error = { \ + _PyObject_HEAD_INIT(&_PyExc_MemoryError), \ + .args = (PyObject*)&_Py_SINGLETON(tuple_empty) \ + }, \ + }, \ + }, \ ._initial_thread = _PyThreadState_INIT, \ } #define _PyThreadState_INIT \ { \ - ._static = 1, \ + ._whence = _PyThreadState_WHENCE_NOTSET, \ .py_recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \ .context_ver = 1, \ } - -// global objects - -#define _PyLong_DIGIT_INIT(val) \ +#ifdef Py_TRACE_REFS +# define _py_object_state_INIT(INTERP) \ { \ - _PyVarObject_IMMORTAL_INIT(&PyLong_Type, \ - ((val) == 0 ? 0 : ((val) > 0 ? 1 : -1))), \ - .ob_digit = { ((val) >= 0 ? (val) : -(val)) }, \ + .refchain = NULL, \ } +#else +# define _py_object_state_INIT(INTERP) \ + { 0 } +#endif + + +// global objects #define _PyBytes_SIMPLE_INIT(CH, LEN) \ { \ - _PyVarObject_IMMORTAL_INIT(&PyBytes_Type, (LEN)), \ + _PyVarObject_HEAD_INIT(&PyBytes_Type, (LEN)), \ .ob_shash = -1, \ .ob_sval = { (CH) }, \ } @@ -95,13 +222,14 @@ extern "C" { #define _PyUnicode_ASCII_BASE_INIT(LITERAL, ASCII) \ { \ - .ob_base = _PyObject_IMMORTAL_INIT(&PyUnicode_Type), \ + .ob_base = _PyObject_HEAD_INIT(&PyUnicode_Type), \ .length = sizeof(LITERAL) - 1, \ .hash = -1, \ .state = { \ .kind = 1, \ .compact = 1, \ .ascii = (ASCII), \ + .statically_allocated = 1, \ }, \ } #define _PyASCIIObject_INIT(LITERAL) \ @@ -110,9 +238,9 @@ extern "C" { ._data = (LITERAL) \ } #define INIT_STR(NAME, LITERAL) \ - ._ ## NAME = _PyASCIIObject_INIT(LITERAL) + ._py_ ## NAME = _PyASCIIObject_INIT(LITERAL) #define INIT_ID(NAME) \ - ._ ## NAME = _PyASCIIObject_INIT(#NAME) + ._py_ ## NAME = _PyASCIIObject_INIT(#NAME) #define _PyUnicode_LATIN1_INIT(LITERAL, UTF8) \ { \ ._latin1 = { \ diff --git a/Include/internal/pycore_runtime_init_generated.h b/Include/internal/pycore_runtime_init_generated.h index 30643745941a538..1defa39f816e782 100644 --- a/Include/internal/pycore_runtime_init_generated.h +++ b/Include/internal/pycore_runtime_init_generated.h @@ -8,8386 +8,1521 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_long.h" // _PyLong_DIGIT_INIT() + + /* The following is auto-generated by Tools/build/generate_global_objects.py. */ -#define _Py_global_objects_INIT { \ - .singletons = { \ - .small_ints = { \ - _PyLong_DIGIT_INIT(-5), \ - _PyLong_DIGIT_INIT(-4), \ - _PyLong_DIGIT_INIT(-3), \ - _PyLong_DIGIT_INIT(-2), \ - _PyLong_DIGIT_INIT(-1), \ - _PyLong_DIGIT_INIT(0), \ - _PyLong_DIGIT_INIT(1), \ - _PyLong_DIGIT_INIT(2), \ - _PyLong_DIGIT_INIT(3), \ - _PyLong_DIGIT_INIT(4), \ - _PyLong_DIGIT_INIT(5), \ - _PyLong_DIGIT_INIT(6), \ - _PyLong_DIGIT_INIT(7), \ - _PyLong_DIGIT_INIT(8), \ - _PyLong_DIGIT_INIT(9), \ - _PyLong_DIGIT_INIT(10), \ - _PyLong_DIGIT_INIT(11), \ - _PyLong_DIGIT_INIT(12), \ - _PyLong_DIGIT_INIT(13), \ - _PyLong_DIGIT_INIT(14), \ - _PyLong_DIGIT_INIT(15), \ - _PyLong_DIGIT_INIT(16), \ - _PyLong_DIGIT_INIT(17), \ - _PyLong_DIGIT_INIT(18), \ - _PyLong_DIGIT_INIT(19), \ - _PyLong_DIGIT_INIT(20), \ - _PyLong_DIGIT_INIT(21), \ - _PyLong_DIGIT_INIT(22), \ - _PyLong_DIGIT_INIT(23), \ - _PyLong_DIGIT_INIT(24), \ - _PyLong_DIGIT_INIT(25), \ - _PyLong_DIGIT_INIT(26), \ - _PyLong_DIGIT_INIT(27), \ - _PyLong_DIGIT_INIT(28), \ - _PyLong_DIGIT_INIT(29), \ - _PyLong_DIGIT_INIT(30), \ - _PyLong_DIGIT_INIT(31), \ - _PyLong_DIGIT_INIT(32), \ - _PyLong_DIGIT_INIT(33), \ - _PyLong_DIGIT_INIT(34), \ - _PyLong_DIGIT_INIT(35), \ - _PyLong_DIGIT_INIT(36), \ - _PyLong_DIGIT_INIT(37), \ - _PyLong_DIGIT_INIT(38), \ - _PyLong_DIGIT_INIT(39), \ - _PyLong_DIGIT_INIT(40), \ - _PyLong_DIGIT_INIT(41), \ - _PyLong_DIGIT_INIT(42), \ - _PyLong_DIGIT_INIT(43), \ - _PyLong_DIGIT_INIT(44), \ - _PyLong_DIGIT_INIT(45), \ - _PyLong_DIGIT_INIT(46), \ - _PyLong_DIGIT_INIT(47), \ - _PyLong_DIGIT_INIT(48), \ - _PyLong_DIGIT_INIT(49), \ - _PyLong_DIGIT_INIT(50), \ - _PyLong_DIGIT_INIT(51), \ - _PyLong_DIGIT_INIT(52), \ - _PyLong_DIGIT_INIT(53), \ - _PyLong_DIGIT_INIT(54), \ - _PyLong_DIGIT_INIT(55), \ - _PyLong_DIGIT_INIT(56), \ - _PyLong_DIGIT_INIT(57), \ - _PyLong_DIGIT_INIT(58), \ - _PyLong_DIGIT_INIT(59), \ - _PyLong_DIGIT_INIT(60), \ - _PyLong_DIGIT_INIT(61), \ - _PyLong_DIGIT_INIT(62), \ - _PyLong_DIGIT_INIT(63), \ - _PyLong_DIGIT_INIT(64), \ - _PyLong_DIGIT_INIT(65), \ - _PyLong_DIGIT_INIT(66), \ - _PyLong_DIGIT_INIT(67), \ - _PyLong_DIGIT_INIT(68), \ - _PyLong_DIGIT_INIT(69), \ - _PyLong_DIGIT_INIT(70), \ - _PyLong_DIGIT_INIT(71), \ - _PyLong_DIGIT_INIT(72), \ - _PyLong_DIGIT_INIT(73), \ - _PyLong_DIGIT_INIT(74), \ - _PyLong_DIGIT_INIT(75), \ - _PyLong_DIGIT_INIT(76), \ - _PyLong_DIGIT_INIT(77), \ - _PyLong_DIGIT_INIT(78), \ - _PyLong_DIGIT_INIT(79), \ - _PyLong_DIGIT_INIT(80), \ - _PyLong_DIGIT_INIT(81), \ - _PyLong_DIGIT_INIT(82), \ - _PyLong_DIGIT_INIT(83), \ - _PyLong_DIGIT_INIT(84), \ - _PyLong_DIGIT_INIT(85), \ - _PyLong_DIGIT_INIT(86), \ - _PyLong_DIGIT_INIT(87), \ - _PyLong_DIGIT_INIT(88), \ - _PyLong_DIGIT_INIT(89), \ - _PyLong_DIGIT_INIT(90), \ - _PyLong_DIGIT_INIT(91), \ - _PyLong_DIGIT_INIT(92), \ - _PyLong_DIGIT_INIT(93), \ - _PyLong_DIGIT_INIT(94), \ - _PyLong_DIGIT_INIT(95), \ - _PyLong_DIGIT_INIT(96), \ - _PyLong_DIGIT_INIT(97), \ - _PyLong_DIGIT_INIT(98), \ - _PyLong_DIGIT_INIT(99), \ - _PyLong_DIGIT_INIT(100), \ - _PyLong_DIGIT_INIT(101), \ - _PyLong_DIGIT_INIT(102), \ - _PyLong_DIGIT_INIT(103), \ - _PyLong_DIGIT_INIT(104), \ - _PyLong_DIGIT_INIT(105), \ - _PyLong_DIGIT_INIT(106), \ - _PyLong_DIGIT_INIT(107), \ - _PyLong_DIGIT_INIT(108), \ - _PyLong_DIGIT_INIT(109), \ - _PyLong_DIGIT_INIT(110), \ - _PyLong_DIGIT_INIT(111), \ - _PyLong_DIGIT_INIT(112), \ - _PyLong_DIGIT_INIT(113), \ - _PyLong_DIGIT_INIT(114), \ - _PyLong_DIGIT_INIT(115), \ - _PyLong_DIGIT_INIT(116), \ - _PyLong_DIGIT_INIT(117), \ - _PyLong_DIGIT_INIT(118), \ - _PyLong_DIGIT_INIT(119), \ - _PyLong_DIGIT_INIT(120), \ - _PyLong_DIGIT_INIT(121), \ - _PyLong_DIGIT_INIT(122), \ - _PyLong_DIGIT_INIT(123), \ - _PyLong_DIGIT_INIT(124), \ - _PyLong_DIGIT_INIT(125), \ - _PyLong_DIGIT_INIT(126), \ - _PyLong_DIGIT_INIT(127), \ - _PyLong_DIGIT_INIT(128), \ - _PyLong_DIGIT_INIT(129), \ - _PyLong_DIGIT_INIT(130), \ - _PyLong_DIGIT_INIT(131), \ - _PyLong_DIGIT_INIT(132), \ - _PyLong_DIGIT_INIT(133), \ - _PyLong_DIGIT_INIT(134), \ - _PyLong_DIGIT_INIT(135), \ - _PyLong_DIGIT_INIT(136), \ - _PyLong_DIGIT_INIT(137), \ - _PyLong_DIGIT_INIT(138), \ - _PyLong_DIGIT_INIT(139), \ - _PyLong_DIGIT_INIT(140), \ - _PyLong_DIGIT_INIT(141), \ - _PyLong_DIGIT_INIT(142), \ - _PyLong_DIGIT_INIT(143), \ - _PyLong_DIGIT_INIT(144), \ - _PyLong_DIGIT_INIT(145), \ - _PyLong_DIGIT_INIT(146), \ - _PyLong_DIGIT_INIT(147), \ - _PyLong_DIGIT_INIT(148), \ - _PyLong_DIGIT_INIT(149), \ - _PyLong_DIGIT_INIT(150), \ - _PyLong_DIGIT_INIT(151), \ - _PyLong_DIGIT_INIT(152), \ - _PyLong_DIGIT_INIT(153), \ - _PyLong_DIGIT_INIT(154), \ - _PyLong_DIGIT_INIT(155), \ - _PyLong_DIGIT_INIT(156), \ - _PyLong_DIGIT_INIT(157), \ - _PyLong_DIGIT_INIT(158), \ - _PyLong_DIGIT_INIT(159), \ - _PyLong_DIGIT_INIT(160), \ - _PyLong_DIGIT_INIT(161), \ - _PyLong_DIGIT_INIT(162), \ - _PyLong_DIGIT_INIT(163), \ - _PyLong_DIGIT_INIT(164), \ - _PyLong_DIGIT_INIT(165), \ - _PyLong_DIGIT_INIT(166), \ - _PyLong_DIGIT_INIT(167), \ - _PyLong_DIGIT_INIT(168), \ - _PyLong_DIGIT_INIT(169), \ - _PyLong_DIGIT_INIT(170), \ - _PyLong_DIGIT_INIT(171), \ - _PyLong_DIGIT_INIT(172), \ - _PyLong_DIGIT_INIT(173), \ - _PyLong_DIGIT_INIT(174), \ - _PyLong_DIGIT_INIT(175), \ - _PyLong_DIGIT_INIT(176), \ - _PyLong_DIGIT_INIT(177), \ - _PyLong_DIGIT_INIT(178), \ - _PyLong_DIGIT_INIT(179), \ - _PyLong_DIGIT_INIT(180), \ - _PyLong_DIGIT_INIT(181), \ - _PyLong_DIGIT_INIT(182), \ - _PyLong_DIGIT_INIT(183), \ - _PyLong_DIGIT_INIT(184), \ - _PyLong_DIGIT_INIT(185), \ - _PyLong_DIGIT_INIT(186), \ - _PyLong_DIGIT_INIT(187), \ - _PyLong_DIGIT_INIT(188), \ - _PyLong_DIGIT_INIT(189), \ - _PyLong_DIGIT_INIT(190), \ - _PyLong_DIGIT_INIT(191), \ - _PyLong_DIGIT_INIT(192), \ - _PyLong_DIGIT_INIT(193), \ - _PyLong_DIGIT_INIT(194), \ - _PyLong_DIGIT_INIT(195), \ - _PyLong_DIGIT_INIT(196), \ - _PyLong_DIGIT_INIT(197), \ - _PyLong_DIGIT_INIT(198), \ - _PyLong_DIGIT_INIT(199), \ - _PyLong_DIGIT_INIT(200), \ - _PyLong_DIGIT_INIT(201), \ - _PyLong_DIGIT_INIT(202), \ - _PyLong_DIGIT_INIT(203), \ - _PyLong_DIGIT_INIT(204), \ - _PyLong_DIGIT_INIT(205), \ - _PyLong_DIGIT_INIT(206), \ - _PyLong_DIGIT_INIT(207), \ - _PyLong_DIGIT_INIT(208), \ - _PyLong_DIGIT_INIT(209), \ - _PyLong_DIGIT_INIT(210), \ - _PyLong_DIGIT_INIT(211), \ - _PyLong_DIGIT_INIT(212), \ - _PyLong_DIGIT_INIT(213), \ - _PyLong_DIGIT_INIT(214), \ - _PyLong_DIGIT_INIT(215), \ - _PyLong_DIGIT_INIT(216), \ - _PyLong_DIGIT_INIT(217), \ - _PyLong_DIGIT_INIT(218), \ - _PyLong_DIGIT_INIT(219), \ - _PyLong_DIGIT_INIT(220), \ - _PyLong_DIGIT_INIT(221), \ - _PyLong_DIGIT_INIT(222), \ - _PyLong_DIGIT_INIT(223), \ - _PyLong_DIGIT_INIT(224), \ - _PyLong_DIGIT_INIT(225), \ - _PyLong_DIGIT_INIT(226), \ - _PyLong_DIGIT_INIT(227), \ - _PyLong_DIGIT_INIT(228), \ - _PyLong_DIGIT_INIT(229), \ - _PyLong_DIGIT_INIT(230), \ - _PyLong_DIGIT_INIT(231), \ - _PyLong_DIGIT_INIT(232), \ - _PyLong_DIGIT_INIT(233), \ - _PyLong_DIGIT_INIT(234), \ - _PyLong_DIGIT_INIT(235), \ - _PyLong_DIGIT_INIT(236), \ - _PyLong_DIGIT_INIT(237), \ - _PyLong_DIGIT_INIT(238), \ - _PyLong_DIGIT_INIT(239), \ - _PyLong_DIGIT_INIT(240), \ - _PyLong_DIGIT_INIT(241), \ - _PyLong_DIGIT_INIT(242), \ - _PyLong_DIGIT_INIT(243), \ - _PyLong_DIGIT_INIT(244), \ - _PyLong_DIGIT_INIT(245), \ - _PyLong_DIGIT_INIT(246), \ - _PyLong_DIGIT_INIT(247), \ - _PyLong_DIGIT_INIT(248), \ - _PyLong_DIGIT_INIT(249), \ - _PyLong_DIGIT_INIT(250), \ - _PyLong_DIGIT_INIT(251), \ - _PyLong_DIGIT_INIT(252), \ - _PyLong_DIGIT_INIT(253), \ - _PyLong_DIGIT_INIT(254), \ - _PyLong_DIGIT_INIT(255), \ - _PyLong_DIGIT_INIT(256), \ - }, \ - \ - .bytes_empty = _PyBytes_SIMPLE_INIT(0, 0), \ - .bytes_characters = { \ - _PyBytes_CHAR_INIT(0), \ - _PyBytes_CHAR_INIT(1), \ - _PyBytes_CHAR_INIT(2), \ - _PyBytes_CHAR_INIT(3), \ - _PyBytes_CHAR_INIT(4), \ - _PyBytes_CHAR_INIT(5), \ - _PyBytes_CHAR_INIT(6), \ - _PyBytes_CHAR_INIT(7), \ - _PyBytes_CHAR_INIT(8), \ - _PyBytes_CHAR_INIT(9), \ - _PyBytes_CHAR_INIT(10), \ - _PyBytes_CHAR_INIT(11), \ - _PyBytes_CHAR_INIT(12), \ - _PyBytes_CHAR_INIT(13), \ - _PyBytes_CHAR_INIT(14), \ - _PyBytes_CHAR_INIT(15), \ - _PyBytes_CHAR_INIT(16), \ - _PyBytes_CHAR_INIT(17), \ - _PyBytes_CHAR_INIT(18), \ - _PyBytes_CHAR_INIT(19), \ - _PyBytes_CHAR_INIT(20), \ - _PyBytes_CHAR_INIT(21), \ - _PyBytes_CHAR_INIT(22), \ - _PyBytes_CHAR_INIT(23), \ - _PyBytes_CHAR_INIT(24), \ - _PyBytes_CHAR_INIT(25), \ - _PyBytes_CHAR_INIT(26), \ - _PyBytes_CHAR_INIT(27), \ - _PyBytes_CHAR_INIT(28), \ - _PyBytes_CHAR_INIT(29), \ - _PyBytes_CHAR_INIT(30), \ - _PyBytes_CHAR_INIT(31), \ - _PyBytes_CHAR_INIT(32), \ - _PyBytes_CHAR_INIT(33), \ - _PyBytes_CHAR_INIT(34), \ - _PyBytes_CHAR_INIT(35), \ - _PyBytes_CHAR_INIT(36), \ - _PyBytes_CHAR_INIT(37), \ - _PyBytes_CHAR_INIT(38), \ - _PyBytes_CHAR_INIT(39), \ - _PyBytes_CHAR_INIT(40), \ - _PyBytes_CHAR_INIT(41), \ - _PyBytes_CHAR_INIT(42), \ - _PyBytes_CHAR_INIT(43), \ - _PyBytes_CHAR_INIT(44), \ - _PyBytes_CHAR_INIT(45), \ - _PyBytes_CHAR_INIT(46), \ - _PyBytes_CHAR_INIT(47), \ - _PyBytes_CHAR_INIT(48), \ - _PyBytes_CHAR_INIT(49), \ - _PyBytes_CHAR_INIT(50), \ - _PyBytes_CHAR_INIT(51), \ - _PyBytes_CHAR_INIT(52), \ - _PyBytes_CHAR_INIT(53), \ - _PyBytes_CHAR_INIT(54), \ - _PyBytes_CHAR_INIT(55), \ - _PyBytes_CHAR_INIT(56), \ - _PyBytes_CHAR_INIT(57), \ - _PyBytes_CHAR_INIT(58), \ - _PyBytes_CHAR_INIT(59), \ - _PyBytes_CHAR_INIT(60), \ - _PyBytes_CHAR_INIT(61), \ - _PyBytes_CHAR_INIT(62), \ - _PyBytes_CHAR_INIT(63), \ - _PyBytes_CHAR_INIT(64), \ - _PyBytes_CHAR_INIT(65), \ - _PyBytes_CHAR_INIT(66), \ - _PyBytes_CHAR_INIT(67), \ - _PyBytes_CHAR_INIT(68), \ - _PyBytes_CHAR_INIT(69), \ - _PyBytes_CHAR_INIT(70), \ - _PyBytes_CHAR_INIT(71), \ - _PyBytes_CHAR_INIT(72), \ - _PyBytes_CHAR_INIT(73), \ - _PyBytes_CHAR_INIT(74), \ - _PyBytes_CHAR_INIT(75), \ - _PyBytes_CHAR_INIT(76), \ - _PyBytes_CHAR_INIT(77), \ - _PyBytes_CHAR_INIT(78), \ - _PyBytes_CHAR_INIT(79), \ - _PyBytes_CHAR_INIT(80), \ - _PyBytes_CHAR_INIT(81), \ - _PyBytes_CHAR_INIT(82), \ - _PyBytes_CHAR_INIT(83), \ - _PyBytes_CHAR_INIT(84), \ - _PyBytes_CHAR_INIT(85), \ - _PyBytes_CHAR_INIT(86), \ - _PyBytes_CHAR_INIT(87), \ - _PyBytes_CHAR_INIT(88), \ - _PyBytes_CHAR_INIT(89), \ - _PyBytes_CHAR_INIT(90), \ - _PyBytes_CHAR_INIT(91), \ - _PyBytes_CHAR_INIT(92), \ - _PyBytes_CHAR_INIT(93), \ - _PyBytes_CHAR_INIT(94), \ - _PyBytes_CHAR_INIT(95), \ - _PyBytes_CHAR_INIT(96), \ - _PyBytes_CHAR_INIT(97), \ - _PyBytes_CHAR_INIT(98), \ - _PyBytes_CHAR_INIT(99), \ - _PyBytes_CHAR_INIT(100), \ - _PyBytes_CHAR_INIT(101), \ - _PyBytes_CHAR_INIT(102), \ - _PyBytes_CHAR_INIT(103), \ - _PyBytes_CHAR_INIT(104), \ - _PyBytes_CHAR_INIT(105), \ - _PyBytes_CHAR_INIT(106), \ - _PyBytes_CHAR_INIT(107), \ - _PyBytes_CHAR_INIT(108), \ - _PyBytes_CHAR_INIT(109), \ - _PyBytes_CHAR_INIT(110), \ - _PyBytes_CHAR_INIT(111), \ - _PyBytes_CHAR_INIT(112), \ - _PyBytes_CHAR_INIT(113), \ - _PyBytes_CHAR_INIT(114), \ - _PyBytes_CHAR_INIT(115), \ - _PyBytes_CHAR_INIT(116), \ - _PyBytes_CHAR_INIT(117), \ - _PyBytes_CHAR_INIT(118), \ - _PyBytes_CHAR_INIT(119), \ - _PyBytes_CHAR_INIT(120), \ - _PyBytes_CHAR_INIT(121), \ - _PyBytes_CHAR_INIT(122), \ - _PyBytes_CHAR_INIT(123), \ - _PyBytes_CHAR_INIT(124), \ - _PyBytes_CHAR_INIT(125), \ - _PyBytes_CHAR_INIT(126), \ - _PyBytes_CHAR_INIT(127), \ - _PyBytes_CHAR_INIT(128), \ - _PyBytes_CHAR_INIT(129), \ - _PyBytes_CHAR_INIT(130), \ - _PyBytes_CHAR_INIT(131), \ - _PyBytes_CHAR_INIT(132), \ - _PyBytes_CHAR_INIT(133), \ - _PyBytes_CHAR_INIT(134), \ - _PyBytes_CHAR_INIT(135), \ - _PyBytes_CHAR_INIT(136), \ - _PyBytes_CHAR_INIT(137), \ - _PyBytes_CHAR_INIT(138), \ - _PyBytes_CHAR_INIT(139), \ - _PyBytes_CHAR_INIT(140), \ - _PyBytes_CHAR_INIT(141), \ - _PyBytes_CHAR_INIT(142), \ - _PyBytes_CHAR_INIT(143), \ - _PyBytes_CHAR_INIT(144), \ - _PyBytes_CHAR_INIT(145), \ - _PyBytes_CHAR_INIT(146), \ - _PyBytes_CHAR_INIT(147), \ - _PyBytes_CHAR_INIT(148), \ - _PyBytes_CHAR_INIT(149), \ - _PyBytes_CHAR_INIT(150), \ - _PyBytes_CHAR_INIT(151), \ - _PyBytes_CHAR_INIT(152), \ - _PyBytes_CHAR_INIT(153), \ - _PyBytes_CHAR_INIT(154), \ - _PyBytes_CHAR_INIT(155), \ - _PyBytes_CHAR_INIT(156), \ - _PyBytes_CHAR_INIT(157), \ - _PyBytes_CHAR_INIT(158), \ - _PyBytes_CHAR_INIT(159), \ - _PyBytes_CHAR_INIT(160), \ - _PyBytes_CHAR_INIT(161), \ - _PyBytes_CHAR_INIT(162), \ - _PyBytes_CHAR_INIT(163), \ - _PyBytes_CHAR_INIT(164), \ - _PyBytes_CHAR_INIT(165), \ - _PyBytes_CHAR_INIT(166), \ - _PyBytes_CHAR_INIT(167), \ - _PyBytes_CHAR_INIT(168), \ - _PyBytes_CHAR_INIT(169), \ - _PyBytes_CHAR_INIT(170), \ - _PyBytes_CHAR_INIT(171), \ - _PyBytes_CHAR_INIT(172), \ - _PyBytes_CHAR_INIT(173), \ - _PyBytes_CHAR_INIT(174), \ - _PyBytes_CHAR_INIT(175), \ - _PyBytes_CHAR_INIT(176), \ - _PyBytes_CHAR_INIT(177), \ - _PyBytes_CHAR_INIT(178), \ - _PyBytes_CHAR_INIT(179), \ - _PyBytes_CHAR_INIT(180), \ - _PyBytes_CHAR_INIT(181), \ - _PyBytes_CHAR_INIT(182), \ - _PyBytes_CHAR_INIT(183), \ - _PyBytes_CHAR_INIT(184), \ - _PyBytes_CHAR_INIT(185), \ - _PyBytes_CHAR_INIT(186), \ - _PyBytes_CHAR_INIT(187), \ - _PyBytes_CHAR_INIT(188), \ - _PyBytes_CHAR_INIT(189), \ - _PyBytes_CHAR_INIT(190), \ - _PyBytes_CHAR_INIT(191), \ - _PyBytes_CHAR_INIT(192), \ - _PyBytes_CHAR_INIT(193), \ - _PyBytes_CHAR_INIT(194), \ - _PyBytes_CHAR_INIT(195), \ - _PyBytes_CHAR_INIT(196), \ - _PyBytes_CHAR_INIT(197), \ - _PyBytes_CHAR_INIT(198), \ - _PyBytes_CHAR_INIT(199), \ - _PyBytes_CHAR_INIT(200), \ - _PyBytes_CHAR_INIT(201), \ - _PyBytes_CHAR_INIT(202), \ - _PyBytes_CHAR_INIT(203), \ - _PyBytes_CHAR_INIT(204), \ - _PyBytes_CHAR_INIT(205), \ - _PyBytes_CHAR_INIT(206), \ - _PyBytes_CHAR_INIT(207), \ - _PyBytes_CHAR_INIT(208), \ - _PyBytes_CHAR_INIT(209), \ - _PyBytes_CHAR_INIT(210), \ - _PyBytes_CHAR_INIT(211), \ - _PyBytes_CHAR_INIT(212), \ - _PyBytes_CHAR_INIT(213), \ - _PyBytes_CHAR_INIT(214), \ - _PyBytes_CHAR_INIT(215), \ - _PyBytes_CHAR_INIT(216), \ - _PyBytes_CHAR_INIT(217), \ - _PyBytes_CHAR_INIT(218), \ - _PyBytes_CHAR_INIT(219), \ - _PyBytes_CHAR_INIT(220), \ - _PyBytes_CHAR_INIT(221), \ - _PyBytes_CHAR_INIT(222), \ - _PyBytes_CHAR_INIT(223), \ - _PyBytes_CHAR_INIT(224), \ - _PyBytes_CHAR_INIT(225), \ - _PyBytes_CHAR_INIT(226), \ - _PyBytes_CHAR_INIT(227), \ - _PyBytes_CHAR_INIT(228), \ - _PyBytes_CHAR_INIT(229), \ - _PyBytes_CHAR_INIT(230), \ - _PyBytes_CHAR_INIT(231), \ - _PyBytes_CHAR_INIT(232), \ - _PyBytes_CHAR_INIT(233), \ - _PyBytes_CHAR_INIT(234), \ - _PyBytes_CHAR_INIT(235), \ - _PyBytes_CHAR_INIT(236), \ - _PyBytes_CHAR_INIT(237), \ - _PyBytes_CHAR_INIT(238), \ - _PyBytes_CHAR_INIT(239), \ - _PyBytes_CHAR_INIT(240), \ - _PyBytes_CHAR_INIT(241), \ - _PyBytes_CHAR_INIT(242), \ - _PyBytes_CHAR_INIT(243), \ - _PyBytes_CHAR_INIT(244), \ - _PyBytes_CHAR_INIT(245), \ - _PyBytes_CHAR_INIT(246), \ - _PyBytes_CHAR_INIT(247), \ - _PyBytes_CHAR_INIT(248), \ - _PyBytes_CHAR_INIT(249), \ - _PyBytes_CHAR_INIT(250), \ - _PyBytes_CHAR_INIT(251), \ - _PyBytes_CHAR_INIT(252), \ - _PyBytes_CHAR_INIT(253), \ - _PyBytes_CHAR_INIT(254), \ - _PyBytes_CHAR_INIT(255), \ - }, \ - \ - .strings = { \ - .literals = { \ - INIT_STR(anon_dictcomp, ""), \ - INIT_STR(anon_genexpr, ""), \ - INIT_STR(anon_lambda, ""), \ - INIT_STR(anon_listcomp, ""), \ - INIT_STR(anon_module, ""), \ - INIT_STR(anon_setcomp, ""), \ - INIT_STR(anon_string, ""), \ - INIT_STR(anon_unknown, ""), \ - INIT_STR(close_br, "}"), \ - INIT_STR(comma_sep, ", "), \ - INIT_STR(dbl_close_br, "}}"), \ - INIT_STR(dbl_open_br, "{{"), \ - INIT_STR(dbl_percent, "%%"), \ - INIT_STR(dot, "."), \ - INIT_STR(dot_locals, "."), \ - INIT_STR(empty, ""), \ - INIT_STR(json_decoder, "json.decoder"), \ - INIT_STR(list_err, "list index out of range"), \ - INIT_STR(newline, "\n"), \ - INIT_STR(open_br, "{"), \ - INIT_STR(percent, "%"), \ - INIT_STR(utf_8, "utf-8"), \ - }, \ - .identifiers = { \ - INIT_ID(CANCELLED), \ - INIT_ID(FINISHED), \ - INIT_ID(False), \ - INIT_ID(JSONDecodeError), \ - INIT_ID(PENDING), \ - INIT_ID(Py_Repr), \ - INIT_ID(TextIOWrapper), \ - INIT_ID(True), \ - INIT_ID(WarningMessage), \ - INIT_ID(_), \ - INIT_ID(__IOBase_closed), \ - INIT_ID(__abc_tpflags__), \ - INIT_ID(__abs__), \ - INIT_ID(__abstractmethods__), \ - INIT_ID(__add__), \ - INIT_ID(__aenter__), \ - INIT_ID(__aexit__), \ - INIT_ID(__aiter__), \ - INIT_ID(__all__), \ - INIT_ID(__and__), \ - INIT_ID(__anext__), \ - INIT_ID(__annotations__), \ - INIT_ID(__args__), \ - INIT_ID(__asyncio_running_event_loop__), \ - INIT_ID(__await__), \ - INIT_ID(__bases__), \ - INIT_ID(__bool__), \ - INIT_ID(__build_class__), \ - INIT_ID(__builtins__), \ - INIT_ID(__bytes__), \ - INIT_ID(__call__), \ - INIT_ID(__cantrace__), \ - INIT_ID(__class__), \ - INIT_ID(__class_getitem__), \ - INIT_ID(__classcell__), \ - INIT_ID(__complex__), \ - INIT_ID(__contains__), \ - INIT_ID(__copy__), \ - INIT_ID(__del__), \ - INIT_ID(__delattr__), \ - INIT_ID(__delete__), \ - INIT_ID(__delitem__), \ - INIT_ID(__dict__), \ - INIT_ID(__dictoffset__), \ - INIT_ID(__dir__), \ - INIT_ID(__divmod__), \ - INIT_ID(__doc__), \ - INIT_ID(__enter__), \ - INIT_ID(__eq__), \ - INIT_ID(__exit__), \ - INIT_ID(__file__), \ - INIT_ID(__float__), \ - INIT_ID(__floordiv__), \ - INIT_ID(__format__), \ - INIT_ID(__fspath__), \ - INIT_ID(__ge__), \ - INIT_ID(__get__), \ - INIT_ID(__getattr__), \ - INIT_ID(__getattribute__), \ - INIT_ID(__getinitargs__), \ - INIT_ID(__getitem__), \ - INIT_ID(__getnewargs__), \ - INIT_ID(__getnewargs_ex__), \ - INIT_ID(__getstate__), \ - INIT_ID(__gt__), \ - INIT_ID(__hash__), \ - INIT_ID(__iadd__), \ - INIT_ID(__iand__), \ - INIT_ID(__ifloordiv__), \ - INIT_ID(__ilshift__), \ - INIT_ID(__imatmul__), \ - INIT_ID(__imod__), \ - INIT_ID(__import__), \ - INIT_ID(__imul__), \ - INIT_ID(__index__), \ - INIT_ID(__init__), \ - INIT_ID(__init_subclass__), \ - INIT_ID(__instancecheck__), \ - INIT_ID(__int__), \ - INIT_ID(__invert__), \ - INIT_ID(__ior__), \ - INIT_ID(__ipow__), \ - INIT_ID(__irshift__), \ - INIT_ID(__isabstractmethod__), \ - INIT_ID(__isub__), \ - INIT_ID(__iter__), \ - INIT_ID(__itruediv__), \ - INIT_ID(__ixor__), \ - INIT_ID(__le__), \ - INIT_ID(__len__), \ - INIT_ID(__length_hint__), \ - INIT_ID(__lltrace__), \ - INIT_ID(__loader__), \ - INIT_ID(__lshift__), \ - INIT_ID(__lt__), \ - INIT_ID(__main__), \ - INIT_ID(__matmul__), \ - INIT_ID(__missing__), \ - INIT_ID(__mod__), \ - INIT_ID(__module__), \ - INIT_ID(__mro_entries__), \ - INIT_ID(__mul__), \ - INIT_ID(__name__), \ - INIT_ID(__ne__), \ - INIT_ID(__neg__), \ - INIT_ID(__new__), \ - INIT_ID(__newobj__), \ - INIT_ID(__newobj_ex__), \ - INIT_ID(__next__), \ - INIT_ID(__notes__), \ - INIT_ID(__or__), \ - INIT_ID(__orig_class__), \ - INIT_ID(__origin__), \ - INIT_ID(__package__), \ - INIT_ID(__parameters__), \ - INIT_ID(__path__), \ - INIT_ID(__pos__), \ - INIT_ID(__pow__), \ - INIT_ID(__prepare__), \ - INIT_ID(__qualname__), \ - INIT_ID(__radd__), \ - INIT_ID(__rand__), \ - INIT_ID(__rdivmod__), \ - INIT_ID(__reduce__), \ - INIT_ID(__reduce_ex__), \ - INIT_ID(__repr__), \ - INIT_ID(__reversed__), \ - INIT_ID(__rfloordiv__), \ - INIT_ID(__rlshift__), \ - INIT_ID(__rmatmul__), \ - INIT_ID(__rmod__), \ - INIT_ID(__rmul__), \ - INIT_ID(__ror__), \ - INIT_ID(__round__), \ - INIT_ID(__rpow__), \ - INIT_ID(__rrshift__), \ - INIT_ID(__rshift__), \ - INIT_ID(__rsub__), \ - INIT_ID(__rtruediv__), \ - INIT_ID(__rxor__), \ - INIT_ID(__set__), \ - INIT_ID(__set_name__), \ - INIT_ID(__setattr__), \ - INIT_ID(__setitem__), \ - INIT_ID(__setstate__), \ - INIT_ID(__sizeof__), \ - INIT_ID(__slotnames__), \ - INIT_ID(__slots__), \ - INIT_ID(__spec__), \ - INIT_ID(__str__), \ - INIT_ID(__sub__), \ - INIT_ID(__subclasscheck__), \ - INIT_ID(__subclasshook__), \ - INIT_ID(__truediv__), \ - INIT_ID(__trunc__), \ - INIT_ID(__typing_is_unpacked_typevartuple__), \ - INIT_ID(__typing_prepare_subst__), \ - INIT_ID(__typing_subst__), \ - INIT_ID(__typing_unpacked_tuple_args__), \ - INIT_ID(__warningregistry__), \ - INIT_ID(__weaklistoffset__), \ - INIT_ID(__weakref__), \ - INIT_ID(__xor__), \ - INIT_ID(_abc_impl), \ - INIT_ID(_annotation), \ - INIT_ID(_asyncio_future_blocking), \ - INIT_ID(_blksize), \ - INIT_ID(_bootstrap), \ - INIT_ID(_dealloc_warn), \ - INIT_ID(_feature_version), \ - INIT_ID(_finalizing), \ - INIT_ID(_find_and_load), \ - INIT_ID(_fix_up_module), \ - INIT_ID(_get_sourcefile), \ - INIT_ID(_handle_fromlist), \ - INIT_ID(_initializing), \ - INIT_ID(_is_text_encoding), \ - INIT_ID(_lock_unlock_module), \ - INIT_ID(_loop), \ - INIT_ID(_showwarnmsg), \ - INIT_ID(_shutdown), \ - INIT_ID(_slotnames), \ - INIT_ID(_uninitialized_submodules), \ - INIT_ID(_warn_unawaited_coroutine), \ - INIT_ID(_xoptions), \ - INIT_ID(a), \ - INIT_ID(abs_tol), \ - INIT_ID(access), \ - INIT_ID(add), \ - INIT_ID(add_done_callback), \ - INIT_ID(after_in_child), \ - INIT_ID(after_in_parent), \ - INIT_ID(aggregate_class), \ - INIT_ID(append), \ - INIT_ID(argdefs), \ - INIT_ID(arguments), \ - INIT_ID(argv), \ - INIT_ID(attribute), \ - INIT_ID(authorizer_callback), \ - INIT_ID(b), \ - INIT_ID(backtick), \ - INIT_ID(base), \ - INIT_ID(before), \ - INIT_ID(big), \ - INIT_ID(binary_form), \ - INIT_ID(block), \ - INIT_ID(buffer), \ - INIT_ID(buffer_callback), \ - INIT_ID(buffer_size), \ - INIT_ID(buffering), \ - INIT_ID(buffers), \ - INIT_ID(bufsize), \ - INIT_ID(builtins), \ - INIT_ID(byteorder), \ - INIT_ID(bytes), \ - INIT_ID(bytes_per_sep), \ - INIT_ID(c_call), \ - INIT_ID(c_exception), \ - INIT_ID(c_return), \ - INIT_ID(cached_statements), \ - INIT_ID(cadata), \ - INIT_ID(cafile), \ - INIT_ID(call), \ - INIT_ID(call_exception_handler), \ - INIT_ID(call_soon), \ - INIT_ID(cancel), \ - INIT_ID(capath), \ - INIT_ID(category), \ - INIT_ID(cb_type), \ - INIT_ID(certfile), \ - INIT_ID(check_same_thread), \ - INIT_ID(clear), \ - INIT_ID(close), \ - INIT_ID(closed), \ - INIT_ID(closefd), \ - INIT_ID(closure), \ - INIT_ID(co_argcount), \ - INIT_ID(co_cellvars), \ - INIT_ID(co_code), \ - INIT_ID(co_consts), \ - INIT_ID(co_exceptiontable), \ - INIT_ID(co_filename), \ - INIT_ID(co_firstlineno), \ - INIT_ID(co_flags), \ - INIT_ID(co_freevars), \ - INIT_ID(co_kwonlyargcount), \ - INIT_ID(co_linetable), \ - INIT_ID(co_name), \ - INIT_ID(co_names), \ - INIT_ID(co_nlocals), \ - INIT_ID(co_posonlyargcount), \ - INIT_ID(co_qualname), \ - INIT_ID(co_stacksize), \ - INIT_ID(co_varnames), \ - INIT_ID(code), \ - INIT_ID(command), \ - INIT_ID(comment_factory), \ - INIT_ID(consts), \ - INIT_ID(context), \ - INIT_ID(cookie), \ - INIT_ID(copy), \ - INIT_ID(copyreg), \ - INIT_ID(coro), \ - INIT_ID(count), \ - INIT_ID(cwd), \ - INIT_ID(data), \ - INIT_ID(database), \ - INIT_ID(decode), \ - INIT_ID(decoder), \ - INIT_ID(default), \ - INIT_ID(defaultaction), \ - INIT_ID(delete), \ - INIT_ID(depth), \ - INIT_ID(detect_types), \ - INIT_ID(deterministic), \ - INIT_ID(device), \ - INIT_ID(dict), \ - INIT_ID(dictcomp), \ - INIT_ID(difference_update), \ - INIT_ID(digest), \ - INIT_ID(digest_size), \ - INIT_ID(digestmod), \ - INIT_ID(dir_fd), \ - INIT_ID(discard), \ - INIT_ID(dispatch_table), \ - INIT_ID(displayhook), \ - INIT_ID(dklen), \ - INIT_ID(doc), \ - INIT_ID(dont_inherit), \ - INIT_ID(dst), \ - INIT_ID(dst_dir_fd), \ - INIT_ID(duration), \ - INIT_ID(effective_ids), \ - INIT_ID(element_factory), \ - INIT_ID(encode), \ - INIT_ID(encoding), \ - INIT_ID(end), \ - INIT_ID(end_lineno), \ - INIT_ID(end_offset), \ - INIT_ID(endpos), \ - INIT_ID(env), \ - INIT_ID(errors), \ - INIT_ID(event), \ - INIT_ID(eventmask), \ - INIT_ID(exc_type), \ - INIT_ID(exc_value), \ - INIT_ID(excepthook), \ - INIT_ID(exception), \ - INIT_ID(exp), \ - INIT_ID(extend), \ - INIT_ID(facility), \ - INIT_ID(factory), \ - INIT_ID(false), \ - INIT_ID(family), \ - INIT_ID(fanout), \ - INIT_ID(fd), \ - INIT_ID(fd2), \ - INIT_ID(fdel), \ - INIT_ID(fget), \ - INIT_ID(file), \ - INIT_ID(file_actions), \ - INIT_ID(filename), \ - INIT_ID(fileno), \ - INIT_ID(filepath), \ - INIT_ID(fillvalue), \ - INIT_ID(filters), \ - INIT_ID(final), \ - INIT_ID(find_class), \ - INIT_ID(fix_imports), \ - INIT_ID(flags), \ - INIT_ID(flush), \ - INIT_ID(follow_symlinks), \ - INIT_ID(format), \ - INIT_ID(frequency), \ - INIT_ID(fromlist), \ - INIT_ID(fset), \ - INIT_ID(func), \ - INIT_ID(future), \ - INIT_ID(generation), \ - INIT_ID(genexpr), \ - INIT_ID(get), \ - INIT_ID(get_debug), \ - INIT_ID(get_event_loop), \ - INIT_ID(get_loop), \ - INIT_ID(get_source), \ - INIT_ID(getattr), \ - INIT_ID(getstate), \ - INIT_ID(gid), \ - INIT_ID(globals), \ - INIT_ID(groupindex), \ - INIT_ID(groups), \ - INIT_ID(handle), \ - INIT_ID(hash_name), \ - INIT_ID(header), \ - INIT_ID(headers), \ - INIT_ID(hi), \ - INIT_ID(hook), \ - INIT_ID(id), \ - INIT_ID(ident), \ - INIT_ID(ignore), \ - INIT_ID(imag), \ - INIT_ID(importlib), \ - INIT_ID(in_fd), \ - INIT_ID(incoming), \ - INIT_ID(indexgroup), \ - INIT_ID(inf), \ - INIT_ID(inheritable), \ - INIT_ID(initial), \ - INIT_ID(initial_bytes), \ - INIT_ID(initial_value), \ - INIT_ID(initval), \ - INIT_ID(inner_size), \ - INIT_ID(input), \ - INIT_ID(insert_comments), \ - INIT_ID(insert_pis), \ - INIT_ID(instructions), \ - INIT_ID(intern), \ - INIT_ID(intersection), \ - INIT_ID(isatty), \ - INIT_ID(isinstance), \ - INIT_ID(isolation_level), \ - INIT_ID(istext), \ - INIT_ID(item), \ - INIT_ID(items), \ - INIT_ID(iter), \ - INIT_ID(iterable), \ - INIT_ID(iterations), \ - INIT_ID(join), \ - INIT_ID(jump), \ - INIT_ID(keepends), \ - INIT_ID(key), \ - INIT_ID(keyfile), \ - INIT_ID(keys), \ - INIT_ID(kind), \ - INIT_ID(lambda), \ - INIT_ID(last), \ - INIT_ID(last_node), \ - INIT_ID(last_traceback), \ - INIT_ID(last_type), \ - INIT_ID(last_value), \ - INIT_ID(latin1), \ - INIT_ID(leaf_size), \ - INIT_ID(len), \ - INIT_ID(length), \ - INIT_ID(level), \ - INIT_ID(limit), \ - INIT_ID(line), \ - INIT_ID(line_buffering), \ - INIT_ID(lineno), \ - INIT_ID(listcomp), \ - INIT_ID(little), \ - INIT_ID(lo), \ - INIT_ID(locale), \ - INIT_ID(locals), \ - INIT_ID(logoption), \ - INIT_ID(loop), \ - INIT_ID(mapping), \ - INIT_ID(match), \ - INIT_ID(max_length), \ - INIT_ID(maxdigits), \ - INIT_ID(maxevents), \ - INIT_ID(maxmem), \ - INIT_ID(maxsplit), \ - INIT_ID(maxvalue), \ - INIT_ID(memLevel), \ - INIT_ID(memlimit), \ - INIT_ID(message), \ - INIT_ID(metaclass), \ - INIT_ID(method), \ - INIT_ID(mod), \ - INIT_ID(mode), \ - INIT_ID(module), \ - INIT_ID(module_globals), \ - INIT_ID(modules), \ - INIT_ID(mro), \ - INIT_ID(msg), \ - INIT_ID(mycmp), \ - INIT_ID(n), \ - INIT_ID(n_arg), \ - INIT_ID(n_fields), \ - INIT_ID(n_sequence_fields), \ - INIT_ID(n_unnamed_fields), \ - INIT_ID(name), \ - INIT_ID(name_from), \ - INIT_ID(namespace_separator), \ - INIT_ID(namespaces), \ - INIT_ID(narg), \ - INIT_ID(ndigits), \ - INIT_ID(new_limit), \ - INIT_ID(newline), \ - INIT_ID(newlines), \ - INIT_ID(next), \ - INIT_ID(node_depth), \ - INIT_ID(node_offset), \ - INIT_ID(ns), \ - INIT_ID(nstype), \ - INIT_ID(null), \ - INIT_ID(number), \ - INIT_ID(obj), \ - INIT_ID(object), \ - INIT_ID(offset), \ - INIT_ID(offset_dst), \ - INIT_ID(offset_src), \ - INIT_ID(on_type_read), \ - INIT_ID(onceregistry), \ - INIT_ID(only_keys), \ - INIT_ID(oparg), \ - INIT_ID(opcode), \ - INIT_ID(open), \ - INIT_ID(opener), \ - INIT_ID(operation), \ - INIT_ID(optimize), \ - INIT_ID(options), \ - INIT_ID(order), \ - INIT_ID(out_fd), \ - INIT_ID(outgoing), \ - INIT_ID(overlapped), \ - INIT_ID(owner), \ - INIT_ID(p), \ - INIT_ID(pages), \ - INIT_ID(parent), \ - INIT_ID(password), \ - INIT_ID(path), \ - INIT_ID(pattern), \ - INIT_ID(peek), \ - INIT_ID(persistent_id), \ - INIT_ID(persistent_load), \ - INIT_ID(person), \ - INIT_ID(pi_factory), \ - INIT_ID(pid), \ - INIT_ID(policy), \ - INIT_ID(pos), \ - INIT_ID(print_file_and_line), \ - INIT_ID(priority), \ - INIT_ID(progress), \ - INIT_ID(progress_handler), \ - INIT_ID(proto), \ - INIT_ID(protocol), \ - INIT_ID(ps1), \ - INIT_ID(ps2), \ - INIT_ID(query), \ - INIT_ID(quotetabs), \ - INIT_ID(r), \ - INIT_ID(raw), \ - INIT_ID(read), \ - INIT_ID(read1), \ - INIT_ID(readable), \ - INIT_ID(readall), \ - INIT_ID(readinto), \ - INIT_ID(readinto1), \ - INIT_ID(readline), \ - INIT_ID(readonly), \ - INIT_ID(real), \ - INIT_ID(reducer_override), \ - INIT_ID(registry), \ - INIT_ID(rel_tol), \ - INIT_ID(reload), \ - INIT_ID(repl), \ - INIT_ID(replace), \ - INIT_ID(reserved), \ - INIT_ID(reset), \ - INIT_ID(resetids), \ - INIT_ID(return), \ - INIT_ID(reverse), \ - INIT_ID(reversed), \ - INIT_ID(s), \ - INIT_ID(salt), \ - INIT_ID(sched_priority), \ - INIT_ID(scheduler), \ - INIT_ID(seek), \ - INIT_ID(seekable), \ - INIT_ID(selectors), \ - INIT_ID(send), \ - INIT_ID(sep), \ - INIT_ID(sequence), \ - INIT_ID(server_hostname), \ - INIT_ID(server_side), \ - INIT_ID(session), \ - INIT_ID(setcomp), \ - INIT_ID(setpgroup), \ - INIT_ID(setsid), \ - INIT_ID(setsigdef), \ - INIT_ID(setsigmask), \ - INIT_ID(setstate), \ - INIT_ID(shape), \ - INIT_ID(show_cmd), \ - INIT_ID(signed), \ - INIT_ID(size), \ - INIT_ID(sizehint), \ - INIT_ID(sleep), \ - INIT_ID(sock), \ - INIT_ID(sort), \ - INIT_ID(sound), \ - INIT_ID(source), \ - INIT_ID(source_traceback), \ - INIT_ID(src), \ - INIT_ID(src_dir_fd), \ - INIT_ID(stacklevel), \ - INIT_ID(start), \ - INIT_ID(statement), \ - INIT_ID(status), \ - INIT_ID(stderr), \ - INIT_ID(stdin), \ - INIT_ID(stdout), \ - INIT_ID(step), \ - INIT_ID(store_name), \ - INIT_ID(strategy), \ - INIT_ID(strict), \ - INIT_ID(strict_mode), \ - INIT_ID(string), \ - INIT_ID(sub_key), \ - INIT_ID(symmetric_difference_update), \ - INIT_ID(tabsize), \ - INIT_ID(tag), \ - INIT_ID(target), \ - INIT_ID(target_is_directory), \ - INIT_ID(task), \ - INIT_ID(tb_frame), \ - INIT_ID(tb_lasti), \ - INIT_ID(tb_lineno), \ - INIT_ID(tb_next), \ - INIT_ID(tell), \ - INIT_ID(template), \ - INIT_ID(term), \ - INIT_ID(text), \ - INIT_ID(threading), \ - INIT_ID(throw), \ - INIT_ID(timeout), \ - INIT_ID(times), \ - INIT_ID(top), \ - INIT_ID(trace_callback), \ - INIT_ID(traceback), \ - INIT_ID(trailers), \ - INIT_ID(translate), \ - INIT_ID(true), \ - INIT_ID(truncate), \ - INIT_ID(twice), \ - INIT_ID(txt), \ - INIT_ID(type), \ - INIT_ID(tz), \ - INIT_ID(uid), \ - INIT_ID(unlink), \ - INIT_ID(unraisablehook), \ - INIT_ID(uri), \ - INIT_ID(usedforsecurity), \ - INIT_ID(value), \ - INIT_ID(values), \ - INIT_ID(version), \ - INIT_ID(warnings), \ - INIT_ID(warnoptions), \ - INIT_ID(wbits), \ - INIT_ID(week), \ - INIT_ID(weekday), \ - INIT_ID(which), \ - INIT_ID(who), \ - INIT_ID(withdata), \ - INIT_ID(writable), \ - INIT_ID(write), \ - INIT_ID(write_through), \ - INIT_ID(x), \ - INIT_ID(year), \ - INIT_ID(zdict), \ - }, \ - .ascii = { \ - _PyASCIIObject_INIT("\x00"), \ - _PyASCIIObject_INIT("\x01"), \ - _PyASCIIObject_INIT("\x02"), \ - _PyASCIIObject_INIT("\x03"), \ - _PyASCIIObject_INIT("\x04"), \ - _PyASCIIObject_INIT("\x05"), \ - _PyASCIIObject_INIT("\x06"), \ - _PyASCIIObject_INIT("\x07"), \ - _PyASCIIObject_INIT("\x08"), \ - _PyASCIIObject_INIT("\x09"), \ - _PyASCIIObject_INIT("\x0a"), \ - _PyASCIIObject_INIT("\x0b"), \ - _PyASCIIObject_INIT("\x0c"), \ - _PyASCIIObject_INIT("\x0d"), \ - _PyASCIIObject_INIT("\x0e"), \ - _PyASCIIObject_INIT("\x0f"), \ - _PyASCIIObject_INIT("\x10"), \ - _PyASCIIObject_INIT("\x11"), \ - _PyASCIIObject_INIT("\x12"), \ - _PyASCIIObject_INIT("\x13"), \ - _PyASCIIObject_INIT("\x14"), \ - _PyASCIIObject_INIT("\x15"), \ - _PyASCIIObject_INIT("\x16"), \ - _PyASCIIObject_INIT("\x17"), \ - _PyASCIIObject_INIT("\x18"), \ - _PyASCIIObject_INIT("\x19"), \ - _PyASCIIObject_INIT("\x1a"), \ - _PyASCIIObject_INIT("\x1b"), \ - _PyASCIIObject_INIT("\x1c"), \ - _PyASCIIObject_INIT("\x1d"), \ - _PyASCIIObject_INIT("\x1e"), \ - _PyASCIIObject_INIT("\x1f"), \ - _PyASCIIObject_INIT("\x20"), \ - _PyASCIIObject_INIT("\x21"), \ - _PyASCIIObject_INIT("\x22"), \ - _PyASCIIObject_INIT("\x23"), \ - _PyASCIIObject_INIT("\x24"), \ - _PyASCIIObject_INIT("\x25"), \ - _PyASCIIObject_INIT("\x26"), \ - _PyASCIIObject_INIT("\x27"), \ - _PyASCIIObject_INIT("\x28"), \ - _PyASCIIObject_INIT("\x29"), \ - _PyASCIIObject_INIT("\x2a"), \ - _PyASCIIObject_INIT("\x2b"), \ - _PyASCIIObject_INIT("\x2c"), \ - _PyASCIIObject_INIT("\x2d"), \ - _PyASCIIObject_INIT("\x2e"), \ - _PyASCIIObject_INIT("\x2f"), \ - _PyASCIIObject_INIT("\x30"), \ - _PyASCIIObject_INIT("\x31"), \ - _PyASCIIObject_INIT("\x32"), \ - _PyASCIIObject_INIT("\x33"), \ - _PyASCIIObject_INIT("\x34"), \ - _PyASCIIObject_INIT("\x35"), \ - _PyASCIIObject_INIT("\x36"), \ - _PyASCIIObject_INIT("\x37"), \ - _PyASCIIObject_INIT("\x38"), \ - _PyASCIIObject_INIT("\x39"), \ - _PyASCIIObject_INIT("\x3a"), \ - _PyASCIIObject_INIT("\x3b"), \ - _PyASCIIObject_INIT("\x3c"), \ - _PyASCIIObject_INIT("\x3d"), \ - _PyASCIIObject_INIT("\x3e"), \ - _PyASCIIObject_INIT("\x3f"), \ - _PyASCIIObject_INIT("\x40"), \ - _PyASCIIObject_INIT("\x41"), \ - _PyASCIIObject_INIT("\x42"), \ - _PyASCIIObject_INIT("\x43"), \ - _PyASCIIObject_INIT("\x44"), \ - _PyASCIIObject_INIT("\x45"), \ - _PyASCIIObject_INIT("\x46"), \ - _PyASCIIObject_INIT("\x47"), \ - _PyASCIIObject_INIT("\x48"), \ - _PyASCIIObject_INIT("\x49"), \ - _PyASCIIObject_INIT("\x4a"), \ - _PyASCIIObject_INIT("\x4b"), \ - _PyASCIIObject_INIT("\x4c"), \ - _PyASCIIObject_INIT("\x4d"), \ - _PyASCIIObject_INIT("\x4e"), \ - _PyASCIIObject_INIT("\x4f"), \ - _PyASCIIObject_INIT("\x50"), \ - _PyASCIIObject_INIT("\x51"), \ - _PyASCIIObject_INIT("\x52"), \ - _PyASCIIObject_INIT("\x53"), \ - _PyASCIIObject_INIT("\x54"), \ - _PyASCIIObject_INIT("\x55"), \ - _PyASCIIObject_INIT("\x56"), \ - _PyASCIIObject_INIT("\x57"), \ - _PyASCIIObject_INIT("\x58"), \ - _PyASCIIObject_INIT("\x59"), \ - _PyASCIIObject_INIT("\x5a"), \ - _PyASCIIObject_INIT("\x5b"), \ - _PyASCIIObject_INIT("\x5c"), \ - _PyASCIIObject_INIT("\x5d"), \ - _PyASCIIObject_INIT("\x5e"), \ - _PyASCIIObject_INIT("\x5f"), \ - _PyASCIIObject_INIT("\x60"), \ - _PyASCIIObject_INIT("\x61"), \ - _PyASCIIObject_INIT("\x62"), \ - _PyASCIIObject_INIT("\x63"), \ - _PyASCIIObject_INIT("\x64"), \ - _PyASCIIObject_INIT("\x65"), \ - _PyASCIIObject_INIT("\x66"), \ - _PyASCIIObject_INIT("\x67"), \ - _PyASCIIObject_INIT("\x68"), \ - _PyASCIIObject_INIT("\x69"), \ - _PyASCIIObject_INIT("\x6a"), \ - _PyASCIIObject_INIT("\x6b"), \ - _PyASCIIObject_INIT("\x6c"), \ - _PyASCIIObject_INIT("\x6d"), \ - _PyASCIIObject_INIT("\x6e"), \ - _PyASCIIObject_INIT("\x6f"), \ - _PyASCIIObject_INIT("\x70"), \ - _PyASCIIObject_INIT("\x71"), \ - _PyASCIIObject_INIT("\x72"), \ - _PyASCIIObject_INIT("\x73"), \ - _PyASCIIObject_INIT("\x74"), \ - _PyASCIIObject_INIT("\x75"), \ - _PyASCIIObject_INIT("\x76"), \ - _PyASCIIObject_INIT("\x77"), \ - _PyASCIIObject_INIT("\x78"), \ - _PyASCIIObject_INIT("\x79"), \ - _PyASCIIObject_INIT("\x7a"), \ - _PyASCIIObject_INIT("\x7b"), \ - _PyASCIIObject_INIT("\x7c"), \ - _PyASCIIObject_INIT("\x7d"), \ - _PyASCIIObject_INIT("\x7e"), \ - _PyASCIIObject_INIT("\x7f"), \ - }, \ - .latin1 = { \ - _PyUnicode_LATIN1_INIT("\x80", "\xc2\x80"), \ - _PyUnicode_LATIN1_INIT("\x81", "\xc2\x81"), \ - _PyUnicode_LATIN1_INIT("\x82", "\xc2\x82"), \ - _PyUnicode_LATIN1_INIT("\x83", "\xc2\x83"), \ - _PyUnicode_LATIN1_INIT("\x84", "\xc2\x84"), \ - _PyUnicode_LATIN1_INIT("\x85", "\xc2\x85"), \ - _PyUnicode_LATIN1_INIT("\x86", "\xc2\x86"), \ - _PyUnicode_LATIN1_INIT("\x87", "\xc2\x87"), \ - _PyUnicode_LATIN1_INIT("\x88", "\xc2\x88"), \ - _PyUnicode_LATIN1_INIT("\x89", "\xc2\x89"), \ - _PyUnicode_LATIN1_INIT("\x8a", "\xc2\x8a"), \ - _PyUnicode_LATIN1_INIT("\x8b", "\xc2\x8b"), \ - _PyUnicode_LATIN1_INIT("\x8c", "\xc2\x8c"), \ - _PyUnicode_LATIN1_INIT("\x8d", "\xc2\x8d"), \ - _PyUnicode_LATIN1_INIT("\x8e", "\xc2\x8e"), \ - _PyUnicode_LATIN1_INIT("\x8f", "\xc2\x8f"), \ - _PyUnicode_LATIN1_INIT("\x90", "\xc2\x90"), \ - _PyUnicode_LATIN1_INIT("\x91", "\xc2\x91"), \ - _PyUnicode_LATIN1_INIT("\x92", "\xc2\x92"), \ - _PyUnicode_LATIN1_INIT("\x93", "\xc2\x93"), \ - _PyUnicode_LATIN1_INIT("\x94", "\xc2\x94"), \ - _PyUnicode_LATIN1_INIT("\x95", "\xc2\x95"), \ - _PyUnicode_LATIN1_INIT("\x96", "\xc2\x96"), \ - _PyUnicode_LATIN1_INIT("\x97", "\xc2\x97"), \ - _PyUnicode_LATIN1_INIT("\x98", "\xc2\x98"), \ - _PyUnicode_LATIN1_INIT("\x99", "\xc2\x99"), \ - _PyUnicode_LATIN1_INIT("\x9a", "\xc2\x9a"), \ - _PyUnicode_LATIN1_INIT("\x9b", "\xc2\x9b"), \ - _PyUnicode_LATIN1_INIT("\x9c", "\xc2\x9c"), \ - _PyUnicode_LATIN1_INIT("\x9d", "\xc2\x9d"), \ - _PyUnicode_LATIN1_INIT("\x9e", "\xc2\x9e"), \ - _PyUnicode_LATIN1_INIT("\x9f", "\xc2\x9f"), \ - _PyUnicode_LATIN1_INIT("\xa0", "\xc2\xa0"), \ - _PyUnicode_LATIN1_INIT("\xa1", "\xc2\xa1"), \ - _PyUnicode_LATIN1_INIT("\xa2", "\xc2\xa2"), \ - _PyUnicode_LATIN1_INIT("\xa3", "\xc2\xa3"), \ - _PyUnicode_LATIN1_INIT("\xa4", "\xc2\xa4"), \ - _PyUnicode_LATIN1_INIT("\xa5", "\xc2\xa5"), \ - _PyUnicode_LATIN1_INIT("\xa6", "\xc2\xa6"), \ - _PyUnicode_LATIN1_INIT("\xa7", "\xc2\xa7"), \ - _PyUnicode_LATIN1_INIT("\xa8", "\xc2\xa8"), \ - _PyUnicode_LATIN1_INIT("\xa9", "\xc2\xa9"), \ - _PyUnicode_LATIN1_INIT("\xaa", "\xc2\xaa"), \ - _PyUnicode_LATIN1_INIT("\xab", "\xc2\xab"), \ - _PyUnicode_LATIN1_INIT("\xac", "\xc2\xac"), \ - _PyUnicode_LATIN1_INIT("\xad", "\xc2\xad"), \ - _PyUnicode_LATIN1_INIT("\xae", "\xc2\xae"), \ - _PyUnicode_LATIN1_INIT("\xaf", "\xc2\xaf"), \ - _PyUnicode_LATIN1_INIT("\xb0", "\xc2\xb0"), \ - _PyUnicode_LATIN1_INIT("\xb1", "\xc2\xb1"), \ - _PyUnicode_LATIN1_INIT("\xb2", "\xc2\xb2"), \ - _PyUnicode_LATIN1_INIT("\xb3", "\xc2\xb3"), \ - _PyUnicode_LATIN1_INIT("\xb4", "\xc2\xb4"), \ - _PyUnicode_LATIN1_INIT("\xb5", "\xc2\xb5"), \ - _PyUnicode_LATIN1_INIT("\xb6", "\xc2\xb6"), \ - _PyUnicode_LATIN1_INIT("\xb7", "\xc2\xb7"), \ - _PyUnicode_LATIN1_INIT("\xb8", "\xc2\xb8"), \ - _PyUnicode_LATIN1_INIT("\xb9", "\xc2\xb9"), \ - _PyUnicode_LATIN1_INIT("\xba", "\xc2\xba"), \ - _PyUnicode_LATIN1_INIT("\xbb", "\xc2\xbb"), \ - _PyUnicode_LATIN1_INIT("\xbc", "\xc2\xbc"), \ - _PyUnicode_LATIN1_INIT("\xbd", "\xc2\xbd"), \ - _PyUnicode_LATIN1_INIT("\xbe", "\xc2\xbe"), \ - _PyUnicode_LATIN1_INIT("\xbf", "\xc2\xbf"), \ - _PyUnicode_LATIN1_INIT("\xc0", "\xc3\x80"), \ - _PyUnicode_LATIN1_INIT("\xc1", "\xc3\x81"), \ - _PyUnicode_LATIN1_INIT("\xc2", "\xc3\x82"), \ - _PyUnicode_LATIN1_INIT("\xc3", "\xc3\x83"), \ - _PyUnicode_LATIN1_INIT("\xc4", "\xc3\x84"), \ - _PyUnicode_LATIN1_INIT("\xc5", "\xc3\x85"), \ - _PyUnicode_LATIN1_INIT("\xc6", "\xc3\x86"), \ - _PyUnicode_LATIN1_INIT("\xc7", "\xc3\x87"), \ - _PyUnicode_LATIN1_INIT("\xc8", "\xc3\x88"), \ - _PyUnicode_LATIN1_INIT("\xc9", "\xc3\x89"), \ - _PyUnicode_LATIN1_INIT("\xca", "\xc3\x8a"), \ - _PyUnicode_LATIN1_INIT("\xcb", "\xc3\x8b"), \ - _PyUnicode_LATIN1_INIT("\xcc", "\xc3\x8c"), \ - _PyUnicode_LATIN1_INIT("\xcd", "\xc3\x8d"), \ - _PyUnicode_LATIN1_INIT("\xce", "\xc3\x8e"), \ - _PyUnicode_LATIN1_INIT("\xcf", "\xc3\x8f"), \ - _PyUnicode_LATIN1_INIT("\xd0", "\xc3\x90"), \ - _PyUnicode_LATIN1_INIT("\xd1", "\xc3\x91"), \ - _PyUnicode_LATIN1_INIT("\xd2", "\xc3\x92"), \ - _PyUnicode_LATIN1_INIT("\xd3", "\xc3\x93"), \ - _PyUnicode_LATIN1_INIT("\xd4", "\xc3\x94"), \ - _PyUnicode_LATIN1_INIT("\xd5", "\xc3\x95"), \ - _PyUnicode_LATIN1_INIT("\xd6", "\xc3\x96"), \ - _PyUnicode_LATIN1_INIT("\xd7", "\xc3\x97"), \ - _PyUnicode_LATIN1_INIT("\xd8", "\xc3\x98"), \ - _PyUnicode_LATIN1_INIT("\xd9", "\xc3\x99"), \ - _PyUnicode_LATIN1_INIT("\xda", "\xc3\x9a"), \ - _PyUnicode_LATIN1_INIT("\xdb", "\xc3\x9b"), \ - _PyUnicode_LATIN1_INIT("\xdc", "\xc3\x9c"), \ - _PyUnicode_LATIN1_INIT("\xdd", "\xc3\x9d"), \ - _PyUnicode_LATIN1_INIT("\xde", "\xc3\x9e"), \ - _PyUnicode_LATIN1_INIT("\xdf", "\xc3\x9f"), \ - _PyUnicode_LATIN1_INIT("\xe0", "\xc3\xa0"), \ - _PyUnicode_LATIN1_INIT("\xe1", "\xc3\xa1"), \ - _PyUnicode_LATIN1_INIT("\xe2", "\xc3\xa2"), \ - _PyUnicode_LATIN1_INIT("\xe3", "\xc3\xa3"), \ - _PyUnicode_LATIN1_INIT("\xe4", "\xc3\xa4"), \ - _PyUnicode_LATIN1_INIT("\xe5", "\xc3\xa5"), \ - _PyUnicode_LATIN1_INIT("\xe6", "\xc3\xa6"), \ - _PyUnicode_LATIN1_INIT("\xe7", "\xc3\xa7"), \ - _PyUnicode_LATIN1_INIT("\xe8", "\xc3\xa8"), \ - _PyUnicode_LATIN1_INIT("\xe9", "\xc3\xa9"), \ - _PyUnicode_LATIN1_INIT("\xea", "\xc3\xaa"), \ - _PyUnicode_LATIN1_INIT("\xeb", "\xc3\xab"), \ - _PyUnicode_LATIN1_INIT("\xec", "\xc3\xac"), \ - _PyUnicode_LATIN1_INIT("\xed", "\xc3\xad"), \ - _PyUnicode_LATIN1_INIT("\xee", "\xc3\xae"), \ - _PyUnicode_LATIN1_INIT("\xef", "\xc3\xaf"), \ - _PyUnicode_LATIN1_INIT("\xf0", "\xc3\xb0"), \ - _PyUnicode_LATIN1_INIT("\xf1", "\xc3\xb1"), \ - _PyUnicode_LATIN1_INIT("\xf2", "\xc3\xb2"), \ - _PyUnicode_LATIN1_INIT("\xf3", "\xc3\xb3"), \ - _PyUnicode_LATIN1_INIT("\xf4", "\xc3\xb4"), \ - _PyUnicode_LATIN1_INIT("\xf5", "\xc3\xb5"), \ - _PyUnicode_LATIN1_INIT("\xf6", "\xc3\xb6"), \ - _PyUnicode_LATIN1_INIT("\xf7", "\xc3\xb7"), \ - _PyUnicode_LATIN1_INIT("\xf8", "\xc3\xb8"), \ - _PyUnicode_LATIN1_INIT("\xf9", "\xc3\xb9"), \ - _PyUnicode_LATIN1_INIT("\xfa", "\xc3\xba"), \ - _PyUnicode_LATIN1_INIT("\xfb", "\xc3\xbb"), \ - _PyUnicode_LATIN1_INIT("\xfc", "\xc3\xbc"), \ - _PyUnicode_LATIN1_INIT("\xfd", "\xc3\xbd"), \ - _PyUnicode_LATIN1_INIT("\xfe", "\xc3\xbe"), \ - _PyUnicode_LATIN1_INIT("\xff", "\xc3\xbf"), \ - }, \ - }, \ - \ - .tuple_empty = { \ - .ob_base = _PyVarObject_IMMORTAL_INIT(&PyTuple_Type, 0) \ - }, \ - }, \ +#define _Py_small_ints_INIT { \ + _PyLong_DIGIT_INIT(-5), \ + _PyLong_DIGIT_INIT(-4), \ + _PyLong_DIGIT_INIT(-3), \ + _PyLong_DIGIT_INIT(-2), \ + _PyLong_DIGIT_INIT(-1), \ + _PyLong_DIGIT_INIT(0), \ + _PyLong_DIGIT_INIT(1), \ + _PyLong_DIGIT_INIT(2), \ + _PyLong_DIGIT_INIT(3), \ + _PyLong_DIGIT_INIT(4), \ + _PyLong_DIGIT_INIT(5), \ + _PyLong_DIGIT_INIT(6), \ + _PyLong_DIGIT_INIT(7), \ + _PyLong_DIGIT_INIT(8), \ + _PyLong_DIGIT_INIT(9), \ + _PyLong_DIGIT_INIT(10), \ + _PyLong_DIGIT_INIT(11), \ + _PyLong_DIGIT_INIT(12), \ + _PyLong_DIGIT_INIT(13), \ + _PyLong_DIGIT_INIT(14), \ + _PyLong_DIGIT_INIT(15), \ + _PyLong_DIGIT_INIT(16), \ + _PyLong_DIGIT_INIT(17), \ + _PyLong_DIGIT_INIT(18), \ + _PyLong_DIGIT_INIT(19), \ + _PyLong_DIGIT_INIT(20), \ + _PyLong_DIGIT_INIT(21), \ + _PyLong_DIGIT_INIT(22), \ + _PyLong_DIGIT_INIT(23), \ + _PyLong_DIGIT_INIT(24), \ + _PyLong_DIGIT_INIT(25), \ + _PyLong_DIGIT_INIT(26), \ + _PyLong_DIGIT_INIT(27), \ + _PyLong_DIGIT_INIT(28), \ + _PyLong_DIGIT_INIT(29), \ + _PyLong_DIGIT_INIT(30), \ + _PyLong_DIGIT_INIT(31), \ + _PyLong_DIGIT_INIT(32), \ + _PyLong_DIGIT_INIT(33), \ + _PyLong_DIGIT_INIT(34), \ + _PyLong_DIGIT_INIT(35), \ + _PyLong_DIGIT_INIT(36), \ + _PyLong_DIGIT_INIT(37), \ + _PyLong_DIGIT_INIT(38), \ + _PyLong_DIGIT_INIT(39), \ + _PyLong_DIGIT_INIT(40), \ + _PyLong_DIGIT_INIT(41), \ + _PyLong_DIGIT_INIT(42), \ + _PyLong_DIGIT_INIT(43), \ + _PyLong_DIGIT_INIT(44), \ + _PyLong_DIGIT_INIT(45), \ + _PyLong_DIGIT_INIT(46), \ + _PyLong_DIGIT_INIT(47), \ + _PyLong_DIGIT_INIT(48), \ + _PyLong_DIGIT_INIT(49), \ + _PyLong_DIGIT_INIT(50), \ + _PyLong_DIGIT_INIT(51), \ + _PyLong_DIGIT_INIT(52), \ + _PyLong_DIGIT_INIT(53), \ + _PyLong_DIGIT_INIT(54), \ + _PyLong_DIGIT_INIT(55), \ + _PyLong_DIGIT_INIT(56), \ + _PyLong_DIGIT_INIT(57), \ + _PyLong_DIGIT_INIT(58), \ + _PyLong_DIGIT_INIT(59), \ + _PyLong_DIGIT_INIT(60), \ + _PyLong_DIGIT_INIT(61), \ + _PyLong_DIGIT_INIT(62), \ + _PyLong_DIGIT_INIT(63), \ + _PyLong_DIGIT_INIT(64), \ + _PyLong_DIGIT_INIT(65), \ + _PyLong_DIGIT_INIT(66), \ + _PyLong_DIGIT_INIT(67), \ + _PyLong_DIGIT_INIT(68), \ + _PyLong_DIGIT_INIT(69), \ + _PyLong_DIGIT_INIT(70), \ + _PyLong_DIGIT_INIT(71), \ + _PyLong_DIGIT_INIT(72), \ + _PyLong_DIGIT_INIT(73), \ + _PyLong_DIGIT_INIT(74), \ + _PyLong_DIGIT_INIT(75), \ + _PyLong_DIGIT_INIT(76), \ + _PyLong_DIGIT_INIT(77), \ + _PyLong_DIGIT_INIT(78), \ + _PyLong_DIGIT_INIT(79), \ + _PyLong_DIGIT_INIT(80), \ + _PyLong_DIGIT_INIT(81), \ + _PyLong_DIGIT_INIT(82), \ + _PyLong_DIGIT_INIT(83), \ + _PyLong_DIGIT_INIT(84), \ + _PyLong_DIGIT_INIT(85), \ + _PyLong_DIGIT_INIT(86), \ + _PyLong_DIGIT_INIT(87), \ + _PyLong_DIGIT_INIT(88), \ + _PyLong_DIGIT_INIT(89), \ + _PyLong_DIGIT_INIT(90), \ + _PyLong_DIGIT_INIT(91), \ + _PyLong_DIGIT_INIT(92), \ + _PyLong_DIGIT_INIT(93), \ + _PyLong_DIGIT_INIT(94), \ + _PyLong_DIGIT_INIT(95), \ + _PyLong_DIGIT_INIT(96), \ + _PyLong_DIGIT_INIT(97), \ + _PyLong_DIGIT_INIT(98), \ + _PyLong_DIGIT_INIT(99), \ + _PyLong_DIGIT_INIT(100), \ + _PyLong_DIGIT_INIT(101), \ + _PyLong_DIGIT_INIT(102), \ + _PyLong_DIGIT_INIT(103), \ + _PyLong_DIGIT_INIT(104), \ + _PyLong_DIGIT_INIT(105), \ + _PyLong_DIGIT_INIT(106), \ + _PyLong_DIGIT_INIT(107), \ + _PyLong_DIGIT_INIT(108), \ + _PyLong_DIGIT_INIT(109), \ + _PyLong_DIGIT_INIT(110), \ + _PyLong_DIGIT_INIT(111), \ + _PyLong_DIGIT_INIT(112), \ + _PyLong_DIGIT_INIT(113), \ + _PyLong_DIGIT_INIT(114), \ + _PyLong_DIGIT_INIT(115), \ + _PyLong_DIGIT_INIT(116), \ + _PyLong_DIGIT_INIT(117), \ + _PyLong_DIGIT_INIT(118), \ + _PyLong_DIGIT_INIT(119), \ + _PyLong_DIGIT_INIT(120), \ + _PyLong_DIGIT_INIT(121), \ + _PyLong_DIGIT_INIT(122), \ + _PyLong_DIGIT_INIT(123), \ + _PyLong_DIGIT_INIT(124), \ + _PyLong_DIGIT_INIT(125), \ + _PyLong_DIGIT_INIT(126), \ + _PyLong_DIGIT_INIT(127), \ + _PyLong_DIGIT_INIT(128), \ + _PyLong_DIGIT_INIT(129), \ + _PyLong_DIGIT_INIT(130), \ + _PyLong_DIGIT_INIT(131), \ + _PyLong_DIGIT_INIT(132), \ + _PyLong_DIGIT_INIT(133), \ + _PyLong_DIGIT_INIT(134), \ + _PyLong_DIGIT_INIT(135), \ + _PyLong_DIGIT_INIT(136), \ + _PyLong_DIGIT_INIT(137), \ + _PyLong_DIGIT_INIT(138), \ + _PyLong_DIGIT_INIT(139), \ + _PyLong_DIGIT_INIT(140), \ + _PyLong_DIGIT_INIT(141), \ + _PyLong_DIGIT_INIT(142), \ + _PyLong_DIGIT_INIT(143), \ + _PyLong_DIGIT_INIT(144), \ + _PyLong_DIGIT_INIT(145), \ + _PyLong_DIGIT_INIT(146), \ + _PyLong_DIGIT_INIT(147), \ + _PyLong_DIGIT_INIT(148), \ + _PyLong_DIGIT_INIT(149), \ + _PyLong_DIGIT_INIT(150), \ + _PyLong_DIGIT_INIT(151), \ + _PyLong_DIGIT_INIT(152), \ + _PyLong_DIGIT_INIT(153), \ + _PyLong_DIGIT_INIT(154), \ + _PyLong_DIGIT_INIT(155), \ + _PyLong_DIGIT_INIT(156), \ + _PyLong_DIGIT_INIT(157), \ + _PyLong_DIGIT_INIT(158), \ + _PyLong_DIGIT_INIT(159), \ + _PyLong_DIGIT_INIT(160), \ + _PyLong_DIGIT_INIT(161), \ + _PyLong_DIGIT_INIT(162), \ + _PyLong_DIGIT_INIT(163), \ + _PyLong_DIGIT_INIT(164), \ + _PyLong_DIGIT_INIT(165), \ + _PyLong_DIGIT_INIT(166), \ + _PyLong_DIGIT_INIT(167), \ + _PyLong_DIGIT_INIT(168), \ + _PyLong_DIGIT_INIT(169), \ + _PyLong_DIGIT_INIT(170), \ + _PyLong_DIGIT_INIT(171), \ + _PyLong_DIGIT_INIT(172), \ + _PyLong_DIGIT_INIT(173), \ + _PyLong_DIGIT_INIT(174), \ + _PyLong_DIGIT_INIT(175), \ + _PyLong_DIGIT_INIT(176), \ + _PyLong_DIGIT_INIT(177), \ + _PyLong_DIGIT_INIT(178), \ + _PyLong_DIGIT_INIT(179), \ + _PyLong_DIGIT_INIT(180), \ + _PyLong_DIGIT_INIT(181), \ + _PyLong_DIGIT_INIT(182), \ + _PyLong_DIGIT_INIT(183), \ + _PyLong_DIGIT_INIT(184), \ + _PyLong_DIGIT_INIT(185), \ + _PyLong_DIGIT_INIT(186), \ + _PyLong_DIGIT_INIT(187), \ + _PyLong_DIGIT_INIT(188), \ + _PyLong_DIGIT_INIT(189), \ + _PyLong_DIGIT_INIT(190), \ + _PyLong_DIGIT_INIT(191), \ + _PyLong_DIGIT_INIT(192), \ + _PyLong_DIGIT_INIT(193), \ + _PyLong_DIGIT_INIT(194), \ + _PyLong_DIGIT_INIT(195), \ + _PyLong_DIGIT_INIT(196), \ + _PyLong_DIGIT_INIT(197), \ + _PyLong_DIGIT_INIT(198), \ + _PyLong_DIGIT_INIT(199), \ + _PyLong_DIGIT_INIT(200), \ + _PyLong_DIGIT_INIT(201), \ + _PyLong_DIGIT_INIT(202), \ + _PyLong_DIGIT_INIT(203), \ + _PyLong_DIGIT_INIT(204), \ + _PyLong_DIGIT_INIT(205), \ + _PyLong_DIGIT_INIT(206), \ + _PyLong_DIGIT_INIT(207), \ + _PyLong_DIGIT_INIT(208), \ + _PyLong_DIGIT_INIT(209), \ + _PyLong_DIGIT_INIT(210), \ + _PyLong_DIGIT_INIT(211), \ + _PyLong_DIGIT_INIT(212), \ + _PyLong_DIGIT_INIT(213), \ + _PyLong_DIGIT_INIT(214), \ + _PyLong_DIGIT_INIT(215), \ + _PyLong_DIGIT_INIT(216), \ + _PyLong_DIGIT_INIT(217), \ + _PyLong_DIGIT_INIT(218), \ + _PyLong_DIGIT_INIT(219), \ + _PyLong_DIGIT_INIT(220), \ + _PyLong_DIGIT_INIT(221), \ + _PyLong_DIGIT_INIT(222), \ + _PyLong_DIGIT_INIT(223), \ + _PyLong_DIGIT_INIT(224), \ + _PyLong_DIGIT_INIT(225), \ + _PyLong_DIGIT_INIT(226), \ + _PyLong_DIGIT_INIT(227), \ + _PyLong_DIGIT_INIT(228), \ + _PyLong_DIGIT_INIT(229), \ + _PyLong_DIGIT_INIT(230), \ + _PyLong_DIGIT_INIT(231), \ + _PyLong_DIGIT_INIT(232), \ + _PyLong_DIGIT_INIT(233), \ + _PyLong_DIGIT_INIT(234), \ + _PyLong_DIGIT_INIT(235), \ + _PyLong_DIGIT_INIT(236), \ + _PyLong_DIGIT_INIT(237), \ + _PyLong_DIGIT_INIT(238), \ + _PyLong_DIGIT_INIT(239), \ + _PyLong_DIGIT_INIT(240), \ + _PyLong_DIGIT_INIT(241), \ + _PyLong_DIGIT_INIT(242), \ + _PyLong_DIGIT_INIT(243), \ + _PyLong_DIGIT_INIT(244), \ + _PyLong_DIGIT_INIT(245), \ + _PyLong_DIGIT_INIT(246), \ + _PyLong_DIGIT_INIT(247), \ + _PyLong_DIGIT_INIT(248), \ + _PyLong_DIGIT_INIT(249), \ + _PyLong_DIGIT_INIT(250), \ + _PyLong_DIGIT_INIT(251), \ + _PyLong_DIGIT_INIT(252), \ + _PyLong_DIGIT_INIT(253), \ + _PyLong_DIGIT_INIT(254), \ + _PyLong_DIGIT_INIT(255), \ + _PyLong_DIGIT_INIT(256), \ } -static inline void -_PyUnicode_InitStaticStrings(void) { - PyObject *string; - string = &_Py_ID(CANCELLED); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(FINISHED); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(False); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(JSONDecodeError); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(PENDING); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(Py_Repr); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(TextIOWrapper); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(True); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(WarningMessage); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__IOBase_closed); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__abc_tpflags__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__abs__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__abstractmethods__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__add__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__aenter__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__aexit__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__aiter__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__all__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__and__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__anext__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__annotations__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__args__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__asyncio_running_event_loop__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__await__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__bases__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__bool__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__build_class__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__builtins__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__bytes__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__call__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__cantrace__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__class__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__class_getitem__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__classcell__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__complex__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__contains__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__copy__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__del__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__delattr__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__delete__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__delitem__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__dict__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__dictoffset__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__dir__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__divmod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__doc__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__enter__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__eq__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__exit__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__file__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__float__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__floordiv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__format__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__fspath__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ge__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__get__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getattr__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getattribute__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getinitargs__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getitem__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getnewargs__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getnewargs_ex__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__getstate__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__gt__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__hash__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__iadd__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__iand__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ifloordiv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ilshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__imatmul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__imod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__import__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__imul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__index__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__init__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__init_subclass__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__instancecheck__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__int__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__invert__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ior__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ipow__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__irshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__isabstractmethod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__isub__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__iter__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__itruediv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ixor__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__le__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__len__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__length_hint__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__lltrace__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__loader__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__lshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__lt__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__main__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__matmul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__missing__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__mod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__module__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__mro_entries__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__mul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__name__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ne__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__neg__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__new__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__newobj__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__newobj_ex__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__next__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__notes__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__or__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__orig_class__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__origin__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__package__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__parameters__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__path__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__pos__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__pow__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__prepare__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__qualname__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__radd__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rand__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rdivmod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__reduce__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__reduce_ex__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__repr__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__reversed__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rfloordiv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rlshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rmatmul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rmod__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rmul__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__ror__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__round__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rpow__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rrshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rshift__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rsub__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rtruediv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__rxor__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__set__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__set_name__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__setattr__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__setitem__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__setstate__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__sizeof__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__slotnames__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__slots__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__spec__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__str__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__sub__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__subclasscheck__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__subclasshook__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__truediv__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__trunc__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__typing_is_unpacked_typevartuple__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__typing_prepare_subst__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__typing_subst__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__typing_unpacked_tuple_args__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__warningregistry__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__weaklistoffset__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__weakref__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(__xor__); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_abc_impl); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_annotation); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_asyncio_future_blocking); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_blksize); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_bootstrap); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_dealloc_warn); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_feature_version); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_finalizing); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_find_and_load); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_fix_up_module); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_get_sourcefile); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_handle_fromlist); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_initializing); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_is_text_encoding); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_lock_unlock_module); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_loop); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_showwarnmsg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_shutdown); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_slotnames); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_uninitialized_submodules); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_warn_unawaited_coroutine); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(_xoptions); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(a); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(abs_tol); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(access); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(add); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(add_done_callback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(after_in_child); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(after_in_parent); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(aggregate_class); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(append); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(argdefs); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(arguments); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(argv); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(attribute); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(authorizer_callback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(b); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(backtick); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(base); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(before); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(big); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(binary_form); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(block); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(buffer); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(buffer_callback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(buffer_size); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(buffering); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(buffers); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(bufsize); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(builtins); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(byteorder); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(bytes); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(bytes_per_sep); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(c_call); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(c_exception); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(c_return); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cached_statements); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cadata); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cafile); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(call); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(call_exception_handler); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(call_soon); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cancel); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(capath); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(category); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cb_type); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(certfile); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(check_same_thread); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(clear); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(close); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(closed); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(closefd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(closure); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_argcount); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_cellvars); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_code); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_consts); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_exceptiontable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_filename); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_firstlineno); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_flags); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_freevars); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_kwonlyargcount); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_linetable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_name); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_names); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_nlocals); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_posonlyargcount); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_qualname); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_stacksize); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(co_varnames); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(code); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(command); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(comment_factory); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(consts); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(context); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cookie); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(copy); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(copyreg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(coro); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(count); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(cwd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(data); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(database); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(decode); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(decoder); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(default); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(defaultaction); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(delete); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(depth); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(detect_types); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(deterministic); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(device); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dict); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dictcomp); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(difference_update); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(digest); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(digest_size); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(digestmod); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dir_fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(discard); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dispatch_table); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(displayhook); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dklen); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(doc); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dont_inherit); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dst); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(dst_dir_fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(duration); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(effective_ids); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(element_factory); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(encode); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(encoding); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(end); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(end_lineno); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(end_offset); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(endpos); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(env); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(errors); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(event); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(eventmask); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(exc_type); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(exc_value); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(excepthook); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(exception); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(exp); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(extend); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(facility); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(factory); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(false); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(family); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fanout); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fd2); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fdel); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fget); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(file); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(file_actions); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(filename); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fileno); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(filepath); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fillvalue); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(filters); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(final); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(find_class); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fix_imports); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(flags); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(flush); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(follow_symlinks); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(format); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(frequency); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fromlist); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(fset); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(func); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(future); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(generation); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(genexpr); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(get); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(get_debug); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(get_event_loop); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(get_loop); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(get_source); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(getattr); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(getstate); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(gid); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(globals); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(groupindex); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(groups); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(handle); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(hash_name); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(header); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(headers); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(hi); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(hook); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(id); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ident); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ignore); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(imag); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(importlib); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(in_fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(incoming); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(indexgroup); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(inf); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(inheritable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(initial); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(initial_bytes); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(initial_value); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(initval); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(inner_size); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(input); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(insert_comments); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(insert_pis); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(instructions); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(intern); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(intersection); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(isatty); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(isinstance); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(isolation_level); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(istext); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(item); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(items); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(iter); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(iterable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(iterations); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(join); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(jump); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(keepends); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(key); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(keyfile); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(keys); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(kind); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(lambda); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(last); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(last_node); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(last_traceback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(last_type); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(last_value); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(latin1); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(leaf_size); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(len); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(length); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(level); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(limit); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(line); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(line_buffering); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(lineno); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(listcomp); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(little); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(lo); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(locale); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(locals); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(logoption); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(loop); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(mapping); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(match); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(max_length); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(maxdigits); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(maxevents); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(maxmem); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(maxsplit); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(maxvalue); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(memLevel); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(memlimit); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(message); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(metaclass); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(method); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(mod); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(mode); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(module); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(module_globals); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(modules); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(mro); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(msg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(mycmp); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(n); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(n_arg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(n_fields); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(n_sequence_fields); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(n_unnamed_fields); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(name); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(name_from); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(namespace_separator); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(namespaces); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(narg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ndigits); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(new_limit); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(newline); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(newlines); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(next); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(node_depth); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(node_offset); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ns); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(nstype); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(null); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(number); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(obj); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(object); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(offset); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(offset_dst); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(offset_src); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(on_type_read); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(onceregistry); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(only_keys); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(oparg); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(opcode); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(open); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(opener); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(operation); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(optimize); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(options); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(order); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(out_fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(outgoing); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(overlapped); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(owner); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(p); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(pages); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(parent); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(password); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(path); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(pattern); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(peek); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(persistent_id); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(persistent_load); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(person); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(pi_factory); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(pid); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(policy); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(pos); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(print_file_and_line); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(priority); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(progress); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(progress_handler); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(proto); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(protocol); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ps1); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(ps2); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(query); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(quotetabs); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(r); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(raw); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(read); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(read1); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readall); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readinto); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readinto1); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readline); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(readonly); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(real); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reducer_override); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(registry); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(rel_tol); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reload); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(repl); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(replace); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reserved); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reset); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(resetids); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(return); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reverse); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(reversed); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(s); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(salt); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sched_priority); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(scheduler); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(seek); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(seekable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(selectors); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(send); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sep); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sequence); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(server_hostname); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(server_side); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(session); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setcomp); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setpgroup); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setsid); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setsigdef); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setsigmask); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(setstate); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(shape); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(show_cmd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(signed); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(size); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sizehint); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sleep); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sock); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sort); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sound); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(source); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(source_traceback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(src); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(src_dir_fd); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(stacklevel); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(start); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(statement); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(status); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(stderr); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(stdin); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(stdout); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(step); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(store_name); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(strategy); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(strict); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(strict_mode); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(string); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(sub_key); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(symmetric_difference_update); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tabsize); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tag); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(target); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(target_is_directory); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(task); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tb_frame); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tb_lasti); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tb_lineno); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tb_next); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tell); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(template); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(term); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(text); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(threading); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(throw); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(timeout); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(times); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(top); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(trace_callback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(traceback); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(trailers); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(translate); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(true); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(truncate); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(twice); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(txt); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(type); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(tz); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(uid); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(unlink); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(unraisablehook); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(uri); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(usedforsecurity); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(value); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(values); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(version); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(warnings); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(warnoptions); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(wbits); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(week); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(weekday); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(which); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(who); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(withdata); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(writable); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(write); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(write_through); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(x); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(year); - PyUnicode_InternInPlace(&string); - string = &_Py_ID(zdict); - PyUnicode_InternInPlace(&string); +#define _Py_bytes_characters_INIT { \ + _PyBytes_CHAR_INIT(0), \ + _PyBytes_CHAR_INIT(1), \ + _PyBytes_CHAR_INIT(2), \ + _PyBytes_CHAR_INIT(3), \ + _PyBytes_CHAR_INIT(4), \ + _PyBytes_CHAR_INIT(5), \ + _PyBytes_CHAR_INIT(6), \ + _PyBytes_CHAR_INIT(7), \ + _PyBytes_CHAR_INIT(8), \ + _PyBytes_CHAR_INIT(9), \ + _PyBytes_CHAR_INIT(10), \ + _PyBytes_CHAR_INIT(11), \ + _PyBytes_CHAR_INIT(12), \ + _PyBytes_CHAR_INIT(13), \ + _PyBytes_CHAR_INIT(14), \ + _PyBytes_CHAR_INIT(15), \ + _PyBytes_CHAR_INIT(16), \ + _PyBytes_CHAR_INIT(17), \ + _PyBytes_CHAR_INIT(18), \ + _PyBytes_CHAR_INIT(19), \ + _PyBytes_CHAR_INIT(20), \ + _PyBytes_CHAR_INIT(21), \ + _PyBytes_CHAR_INIT(22), \ + _PyBytes_CHAR_INIT(23), \ + _PyBytes_CHAR_INIT(24), \ + _PyBytes_CHAR_INIT(25), \ + _PyBytes_CHAR_INIT(26), \ + _PyBytes_CHAR_INIT(27), \ + _PyBytes_CHAR_INIT(28), \ + _PyBytes_CHAR_INIT(29), \ + _PyBytes_CHAR_INIT(30), \ + _PyBytes_CHAR_INIT(31), \ + _PyBytes_CHAR_INIT(32), \ + _PyBytes_CHAR_INIT(33), \ + _PyBytes_CHAR_INIT(34), \ + _PyBytes_CHAR_INIT(35), \ + _PyBytes_CHAR_INIT(36), \ + _PyBytes_CHAR_INIT(37), \ + _PyBytes_CHAR_INIT(38), \ + _PyBytes_CHAR_INIT(39), \ + _PyBytes_CHAR_INIT(40), \ + _PyBytes_CHAR_INIT(41), \ + _PyBytes_CHAR_INIT(42), \ + _PyBytes_CHAR_INIT(43), \ + _PyBytes_CHAR_INIT(44), \ + _PyBytes_CHAR_INIT(45), \ + _PyBytes_CHAR_INIT(46), \ + _PyBytes_CHAR_INIT(47), \ + _PyBytes_CHAR_INIT(48), \ + _PyBytes_CHAR_INIT(49), \ + _PyBytes_CHAR_INIT(50), \ + _PyBytes_CHAR_INIT(51), \ + _PyBytes_CHAR_INIT(52), \ + _PyBytes_CHAR_INIT(53), \ + _PyBytes_CHAR_INIT(54), \ + _PyBytes_CHAR_INIT(55), \ + _PyBytes_CHAR_INIT(56), \ + _PyBytes_CHAR_INIT(57), \ + _PyBytes_CHAR_INIT(58), \ + _PyBytes_CHAR_INIT(59), \ + _PyBytes_CHAR_INIT(60), \ + _PyBytes_CHAR_INIT(61), \ + _PyBytes_CHAR_INIT(62), \ + _PyBytes_CHAR_INIT(63), \ + _PyBytes_CHAR_INIT(64), \ + _PyBytes_CHAR_INIT(65), \ + _PyBytes_CHAR_INIT(66), \ + _PyBytes_CHAR_INIT(67), \ + _PyBytes_CHAR_INIT(68), \ + _PyBytes_CHAR_INIT(69), \ + _PyBytes_CHAR_INIT(70), \ + _PyBytes_CHAR_INIT(71), \ + _PyBytes_CHAR_INIT(72), \ + _PyBytes_CHAR_INIT(73), \ + _PyBytes_CHAR_INIT(74), \ + _PyBytes_CHAR_INIT(75), \ + _PyBytes_CHAR_INIT(76), \ + _PyBytes_CHAR_INIT(77), \ + _PyBytes_CHAR_INIT(78), \ + _PyBytes_CHAR_INIT(79), \ + _PyBytes_CHAR_INIT(80), \ + _PyBytes_CHAR_INIT(81), \ + _PyBytes_CHAR_INIT(82), \ + _PyBytes_CHAR_INIT(83), \ + _PyBytes_CHAR_INIT(84), \ + _PyBytes_CHAR_INIT(85), \ + _PyBytes_CHAR_INIT(86), \ + _PyBytes_CHAR_INIT(87), \ + _PyBytes_CHAR_INIT(88), \ + _PyBytes_CHAR_INIT(89), \ + _PyBytes_CHAR_INIT(90), \ + _PyBytes_CHAR_INIT(91), \ + _PyBytes_CHAR_INIT(92), \ + _PyBytes_CHAR_INIT(93), \ + _PyBytes_CHAR_INIT(94), \ + _PyBytes_CHAR_INIT(95), \ + _PyBytes_CHAR_INIT(96), \ + _PyBytes_CHAR_INIT(97), \ + _PyBytes_CHAR_INIT(98), \ + _PyBytes_CHAR_INIT(99), \ + _PyBytes_CHAR_INIT(100), \ + _PyBytes_CHAR_INIT(101), \ + _PyBytes_CHAR_INIT(102), \ + _PyBytes_CHAR_INIT(103), \ + _PyBytes_CHAR_INIT(104), \ + _PyBytes_CHAR_INIT(105), \ + _PyBytes_CHAR_INIT(106), \ + _PyBytes_CHAR_INIT(107), \ + _PyBytes_CHAR_INIT(108), \ + _PyBytes_CHAR_INIT(109), \ + _PyBytes_CHAR_INIT(110), \ + _PyBytes_CHAR_INIT(111), \ + _PyBytes_CHAR_INIT(112), \ + _PyBytes_CHAR_INIT(113), \ + _PyBytes_CHAR_INIT(114), \ + _PyBytes_CHAR_INIT(115), \ + _PyBytes_CHAR_INIT(116), \ + _PyBytes_CHAR_INIT(117), \ + _PyBytes_CHAR_INIT(118), \ + _PyBytes_CHAR_INIT(119), \ + _PyBytes_CHAR_INIT(120), \ + _PyBytes_CHAR_INIT(121), \ + _PyBytes_CHAR_INIT(122), \ + _PyBytes_CHAR_INIT(123), \ + _PyBytes_CHAR_INIT(124), \ + _PyBytes_CHAR_INIT(125), \ + _PyBytes_CHAR_INIT(126), \ + _PyBytes_CHAR_INIT(127), \ + _PyBytes_CHAR_INIT(128), \ + _PyBytes_CHAR_INIT(129), \ + _PyBytes_CHAR_INIT(130), \ + _PyBytes_CHAR_INIT(131), \ + _PyBytes_CHAR_INIT(132), \ + _PyBytes_CHAR_INIT(133), \ + _PyBytes_CHAR_INIT(134), \ + _PyBytes_CHAR_INIT(135), \ + _PyBytes_CHAR_INIT(136), \ + _PyBytes_CHAR_INIT(137), \ + _PyBytes_CHAR_INIT(138), \ + _PyBytes_CHAR_INIT(139), \ + _PyBytes_CHAR_INIT(140), \ + _PyBytes_CHAR_INIT(141), \ + _PyBytes_CHAR_INIT(142), \ + _PyBytes_CHAR_INIT(143), \ + _PyBytes_CHAR_INIT(144), \ + _PyBytes_CHAR_INIT(145), \ + _PyBytes_CHAR_INIT(146), \ + _PyBytes_CHAR_INIT(147), \ + _PyBytes_CHAR_INIT(148), \ + _PyBytes_CHAR_INIT(149), \ + _PyBytes_CHAR_INIT(150), \ + _PyBytes_CHAR_INIT(151), \ + _PyBytes_CHAR_INIT(152), \ + _PyBytes_CHAR_INIT(153), \ + _PyBytes_CHAR_INIT(154), \ + _PyBytes_CHAR_INIT(155), \ + _PyBytes_CHAR_INIT(156), \ + _PyBytes_CHAR_INIT(157), \ + _PyBytes_CHAR_INIT(158), \ + _PyBytes_CHAR_INIT(159), \ + _PyBytes_CHAR_INIT(160), \ + _PyBytes_CHAR_INIT(161), \ + _PyBytes_CHAR_INIT(162), \ + _PyBytes_CHAR_INIT(163), \ + _PyBytes_CHAR_INIT(164), \ + _PyBytes_CHAR_INIT(165), \ + _PyBytes_CHAR_INIT(166), \ + _PyBytes_CHAR_INIT(167), \ + _PyBytes_CHAR_INIT(168), \ + _PyBytes_CHAR_INIT(169), \ + _PyBytes_CHAR_INIT(170), \ + _PyBytes_CHAR_INIT(171), \ + _PyBytes_CHAR_INIT(172), \ + _PyBytes_CHAR_INIT(173), \ + _PyBytes_CHAR_INIT(174), \ + _PyBytes_CHAR_INIT(175), \ + _PyBytes_CHAR_INIT(176), \ + _PyBytes_CHAR_INIT(177), \ + _PyBytes_CHAR_INIT(178), \ + _PyBytes_CHAR_INIT(179), \ + _PyBytes_CHAR_INIT(180), \ + _PyBytes_CHAR_INIT(181), \ + _PyBytes_CHAR_INIT(182), \ + _PyBytes_CHAR_INIT(183), \ + _PyBytes_CHAR_INIT(184), \ + _PyBytes_CHAR_INIT(185), \ + _PyBytes_CHAR_INIT(186), \ + _PyBytes_CHAR_INIT(187), \ + _PyBytes_CHAR_INIT(188), \ + _PyBytes_CHAR_INIT(189), \ + _PyBytes_CHAR_INIT(190), \ + _PyBytes_CHAR_INIT(191), \ + _PyBytes_CHAR_INIT(192), \ + _PyBytes_CHAR_INIT(193), \ + _PyBytes_CHAR_INIT(194), \ + _PyBytes_CHAR_INIT(195), \ + _PyBytes_CHAR_INIT(196), \ + _PyBytes_CHAR_INIT(197), \ + _PyBytes_CHAR_INIT(198), \ + _PyBytes_CHAR_INIT(199), \ + _PyBytes_CHAR_INIT(200), \ + _PyBytes_CHAR_INIT(201), \ + _PyBytes_CHAR_INIT(202), \ + _PyBytes_CHAR_INIT(203), \ + _PyBytes_CHAR_INIT(204), \ + _PyBytes_CHAR_INIT(205), \ + _PyBytes_CHAR_INIT(206), \ + _PyBytes_CHAR_INIT(207), \ + _PyBytes_CHAR_INIT(208), \ + _PyBytes_CHAR_INIT(209), \ + _PyBytes_CHAR_INIT(210), \ + _PyBytes_CHAR_INIT(211), \ + _PyBytes_CHAR_INIT(212), \ + _PyBytes_CHAR_INIT(213), \ + _PyBytes_CHAR_INIT(214), \ + _PyBytes_CHAR_INIT(215), \ + _PyBytes_CHAR_INIT(216), \ + _PyBytes_CHAR_INIT(217), \ + _PyBytes_CHAR_INIT(218), \ + _PyBytes_CHAR_INIT(219), \ + _PyBytes_CHAR_INIT(220), \ + _PyBytes_CHAR_INIT(221), \ + _PyBytes_CHAR_INIT(222), \ + _PyBytes_CHAR_INIT(223), \ + _PyBytes_CHAR_INIT(224), \ + _PyBytes_CHAR_INIT(225), \ + _PyBytes_CHAR_INIT(226), \ + _PyBytes_CHAR_INIT(227), \ + _PyBytes_CHAR_INIT(228), \ + _PyBytes_CHAR_INIT(229), \ + _PyBytes_CHAR_INIT(230), \ + _PyBytes_CHAR_INIT(231), \ + _PyBytes_CHAR_INIT(232), \ + _PyBytes_CHAR_INIT(233), \ + _PyBytes_CHAR_INIT(234), \ + _PyBytes_CHAR_INIT(235), \ + _PyBytes_CHAR_INIT(236), \ + _PyBytes_CHAR_INIT(237), \ + _PyBytes_CHAR_INIT(238), \ + _PyBytes_CHAR_INIT(239), \ + _PyBytes_CHAR_INIT(240), \ + _PyBytes_CHAR_INIT(241), \ + _PyBytes_CHAR_INIT(242), \ + _PyBytes_CHAR_INIT(243), \ + _PyBytes_CHAR_INIT(244), \ + _PyBytes_CHAR_INIT(245), \ + _PyBytes_CHAR_INIT(246), \ + _PyBytes_CHAR_INIT(247), \ + _PyBytes_CHAR_INIT(248), \ + _PyBytes_CHAR_INIT(249), \ + _PyBytes_CHAR_INIT(250), \ + _PyBytes_CHAR_INIT(251), \ + _PyBytes_CHAR_INIT(252), \ + _PyBytes_CHAR_INIT(253), \ + _PyBytes_CHAR_INIT(254), \ + _PyBytes_CHAR_INIT(255), \ } -#ifdef Py_DEBUG -static inline void -_PyStaticObjects_CheckRefcnt(void) { - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -5]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -5]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -4]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -4]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -3]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -3]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -2]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -2]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -1]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + -1]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 0]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 0]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 1]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 1]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 2]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 2]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 3]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 3]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 4]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 4]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 5]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 5]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 6]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 6]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 7]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 7]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 8]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 8]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 9]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 9]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 10]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 10]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 11]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 11]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 12]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 12]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 13]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 13]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 14]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 14]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 15]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 15]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 16]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 16]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 17]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 17]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 18]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 18]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 19]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 19]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 20]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 20]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 21]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 21]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 22]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 22]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 23]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 23]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 24]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 24]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 25]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 25]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 26]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 26]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 27]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 27]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 28]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 28]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 29]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 29]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 30]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 30]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 31]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 31]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 32]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 32]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 33]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 33]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 34]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 34]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 35]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 35]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 36]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 36]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 37]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 37]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 38]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 38]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 39]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 39]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 40]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 40]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 41]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 41]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 42]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 42]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 43]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 43]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 44]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 44]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 45]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 45]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 46]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 46]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 47]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 47]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 48]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 48]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 49]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 49]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 50]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 50]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 51]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 51]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 52]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 52]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 53]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 53]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 54]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 54]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 55]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 55]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 56]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 56]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 57]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 57]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 58]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 58]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 59]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 59]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 60]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 60]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 61]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 61]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 62]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 62]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 63]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 63]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 64]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 64]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 65]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 65]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 66]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 66]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 67]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 67]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 68]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 68]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 69]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 69]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 70]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 70]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 71]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 71]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 72]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 72]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 73]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 73]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 74]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 74]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 75]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 75]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 76]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 76]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 77]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 77]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 78]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 78]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 79]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 79]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 80]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 80]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 81]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 81]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 82]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 82]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 83]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 83]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 84]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 84]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 85]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 85]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 86]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 86]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 87]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 87]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 88]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 88]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 89]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 89]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 90]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 90]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 91]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 91]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 92]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 92]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 93]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 93]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 94]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 94]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 95]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 95]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 96]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 96]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 97]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 97]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 98]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 98]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 99]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 99]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 100]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 100]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 101]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 101]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 102]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 102]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 103]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 103]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 104]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 104]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 105]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 105]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 106]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 106]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 107]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 107]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 108]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 108]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 109]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 109]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 110]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 110]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 111]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 111]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 112]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 112]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 113]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 113]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 114]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 114]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 115]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 115]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 116]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 116]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 117]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 117]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 118]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 118]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 119]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 119]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 120]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 120]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 121]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 121]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 122]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 122]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 123]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 123]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 124]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 124]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 125]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 125]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 126]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 126]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 127]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 127]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 129]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 129]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 130]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 130]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 131]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 131]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 132]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 132]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 133]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 133]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 134]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 134]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 135]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 135]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 136]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 136]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 137]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 137]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 138]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 138]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 139]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 139]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 140]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 140]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 141]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 141]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 142]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 142]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 143]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 143]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 144]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 144]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 145]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 145]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 146]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 146]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 147]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 147]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 148]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 148]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 149]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 149]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 150]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 150]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 151]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 151]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 152]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 152]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 153]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 153]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 154]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 154]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 155]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 155]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 156]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 156]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 157]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 157]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 158]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 158]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 159]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 159]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 160]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 160]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 161]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 161]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 162]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 162]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 163]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 163]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 164]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 164]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 165]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 165]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 166]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 166]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 167]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 167]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 168]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 168]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 169]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 169]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 170]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 170]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 171]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 171]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 172]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 172]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 173]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 173]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 174]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 174]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 175]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 175]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 176]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 176]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 177]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 177]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 178]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 178]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 179]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 179]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 180]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 180]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 181]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 181]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 182]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 182]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 183]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 183]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 184]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 184]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 185]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 185]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 186]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 186]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 187]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 187]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 188]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 188]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 189]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 189]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 190]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 190]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 191]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 191]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 192]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 192]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 193]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 193]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 194]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 194]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 195]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 195]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 196]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 196]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 197]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 197]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 198]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 198]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 199]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 199]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 200]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 200]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 201]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 201]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 202]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 202]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 203]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 203]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 204]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 204]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 205]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 205]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 206]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 206]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 207]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 207]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 208]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 208]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 209]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 209]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 210]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 210]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 211]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 211]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 212]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 212]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 213]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 213]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 214]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 214]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 215]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 215]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 216]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 216]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 217]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 217]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 218]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 218]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 219]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 219]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 220]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 220]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 221]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 221]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 222]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 222]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 223]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 223]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 224]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 224]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 225]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 225]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 226]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 226]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 227]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 227]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 228]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 228]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 229]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 229]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 230]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 230]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 231]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 231]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 232]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 232]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 233]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 233]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 234]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 234]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 235]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 235]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 236]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 236]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 237]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 237]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 238]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 238]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 239]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 239]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 240]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 240]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 241]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 241]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 242]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 242]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 243]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 243]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 244]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 244]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 245]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 245]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 246]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 246]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 247]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 247]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 248]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 248]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 249]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 249]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 250]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 250]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 251]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 251]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 252]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 252]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 253]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 253]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 254]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 254]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 255]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 255]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 256]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(small_ints)[_PY_NSMALLNEGINTS + 256]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_empty)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_empty)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[0]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[0]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[1]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[1]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[2]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[2]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[3]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[3]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[4]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[4]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[5]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[5]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[6]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[6]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[7]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[7]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[8]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[8]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[9]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[9]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[10]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[10]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[11]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[11]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[12]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[12]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[13]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[13]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[14]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[14]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[15]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[15]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[16]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[16]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[17]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[17]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[18]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[18]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[19]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[19]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[20]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[20]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[21]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[21]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[22]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[22]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[23]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[23]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[24]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[24]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[25]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[25]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[26]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[26]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[27]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[27]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[28]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[28]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[29]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[29]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[30]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[30]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[31]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[31]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[32]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[32]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[33]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[33]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[34]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[34]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[35]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[35]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[36]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[36]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[37]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[37]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[38]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[38]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[39]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[39]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[40]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[40]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[41]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[41]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[42]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[42]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[43]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[43]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[44]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[44]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[45]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[45]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[46]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[46]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[47]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[47]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[48]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[48]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[49]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[49]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[50]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[50]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[51]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[51]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[52]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[52]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[53]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[53]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[54]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[54]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[55]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[55]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[56]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[56]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[57]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[57]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[58]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[58]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[59]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[59]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[60]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[60]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[61]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[61]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[62]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[62]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[63]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[63]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[64]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[64]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[65]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[65]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[66]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[66]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[67]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[67]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[68]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[68]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[69]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[69]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[70]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[70]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[71]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[71]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[72]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[72]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[73]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[73]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[74]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[74]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[75]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[75]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[76]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[76]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[77]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[77]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[78]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[78]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[79]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[79]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[80]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[80]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[81]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[81]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[82]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[82]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[83]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[83]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[84]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[84]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[85]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[85]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[86]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[86]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[87]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[87]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[88]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[88]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[89]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[89]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[90]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[90]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[91]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[91]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[92]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[92]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[93]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[93]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[94]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[94]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[95]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[95]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[96]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[96]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[97]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[97]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[98]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[98]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[99]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[99]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[100]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[100]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[101]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[101]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[102]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[102]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[103]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[103]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[104]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[104]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[105]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[105]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[106]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[106]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[107]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[107]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[108]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[108]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[109]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[109]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[110]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[110]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[111]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[111]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[112]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[112]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[113]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[113]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[114]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[114]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[115]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[115]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[116]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[116]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[117]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[117]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[118]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[118]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[119]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[119]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[120]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[120]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[121]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[121]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[122]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[122]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[123]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[123]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[124]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[124]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[125]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[125]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[126]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[126]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[127]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[127]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[129]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[129]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[130]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[130]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[131]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[131]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[132]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[132]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[133]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[133]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[134]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[134]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[135]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[135]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[136]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[136]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[137]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[137]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[138]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[138]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[139]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[139]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[140]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[140]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[141]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[141]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[142]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[142]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[143]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[143]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[144]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[144]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[145]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[145]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[146]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[146]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[147]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[147]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[148]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[148]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[149]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[149]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[150]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[150]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[151]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[151]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[152]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[152]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[153]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[153]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[154]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[154]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[155]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[155]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[156]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[156]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[157]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[157]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[158]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[158]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[159]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[159]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[160]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[160]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[161]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[161]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[162]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[162]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[163]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[163]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[164]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[164]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[165]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[165]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[166]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[166]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[167]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[167]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[168]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[168]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[169]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[169]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[170]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[170]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[171]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[171]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[172]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[172]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[173]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[173]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[174]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[174]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[175]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[175]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[176]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[176]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[177]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[177]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[178]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[178]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[179]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[179]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[180]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[180]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[181]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[181]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[182]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[182]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[183]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[183]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[184]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[184]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[185]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[185]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[186]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[186]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[187]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[187]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[188]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[188]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[189]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[189]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[190]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[190]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[191]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[191]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[192]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[192]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[193]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[193]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[194]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[194]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[195]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[195]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[196]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[196]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[197]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[197]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[198]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[198]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[199]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[199]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[200]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[200]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[201]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[201]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[202]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[202]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[203]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[203]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[204]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[204]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[205]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[205]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[206]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[206]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[207]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[207]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[208]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[208]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[209]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[209]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[210]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[210]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[211]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[211]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[212]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[212]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[213]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[213]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[214]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[214]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[215]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[215]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[216]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[216]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[217]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[217]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[218]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[218]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[219]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[219]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[220]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[220]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[221]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[221]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[222]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[222]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[223]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[223]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[224]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[224]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[225]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[225]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[226]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[226]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[227]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[227]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[228]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[228]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[229]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[229]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[230]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[230]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[231]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[231]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[232]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[232]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[233]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[233]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[234]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[234]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[235]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[235]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[236]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[236]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[237]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[237]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[238]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[238]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[239]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[239]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[240]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[240]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[241]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[241]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[242]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[242]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[243]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[243]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[244]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[244]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[245]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[245]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[246]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[246]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[247]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[247]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[248]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[248]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[249]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[249]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[250]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[250]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[251]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[251]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[252]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[252]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[253]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[253]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[254]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[254]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(bytes_characters)[255]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(bytes_characters)[255]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_dictcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_dictcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_genexpr)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_genexpr)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_lambda)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_lambda)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_listcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_listcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_module)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_module)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_setcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_setcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_string)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_string)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(anon_unknown)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(anon_unknown)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(close_br)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(close_br)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(comma_sep)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(comma_sep)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(dbl_close_br)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(dbl_close_br)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(dbl_open_br)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(dbl_open_br)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(dbl_percent)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(dbl_percent)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(dot)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(dot)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(dot_locals)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(dot_locals)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(empty)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(empty)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(json_decoder)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(json_decoder)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(list_err)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(list_err)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(newline)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(newline)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(open_br)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(open_br)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(percent)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(percent)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_STR(utf_8)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_STR(utf_8)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(CANCELLED)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(CANCELLED)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(FINISHED)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(FINISHED)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(False)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(False)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(JSONDecodeError)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(JSONDecodeError)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(PENDING)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(PENDING)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(Py_Repr)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(Py_Repr)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(TextIOWrapper)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(TextIOWrapper)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(True)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(True)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(WarningMessage)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(WarningMessage)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__IOBase_closed)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__IOBase_closed)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__abc_tpflags__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__abc_tpflags__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__abs__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__abs__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__abstractmethods__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__abstractmethods__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__add__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__add__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__aenter__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__aenter__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__aexit__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__aexit__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__aiter__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__aiter__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__all__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__all__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__and__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__and__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__anext__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__anext__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__annotations__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__annotations__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__args__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__args__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__asyncio_running_event_loop__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__asyncio_running_event_loop__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__await__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__await__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__bases__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__bases__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__bool__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__bool__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__build_class__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__build_class__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__builtins__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__builtins__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__bytes__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__bytes__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__call__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__call__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__cantrace__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__cantrace__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__class__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__class__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__class_getitem__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__class_getitem__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__classcell__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__classcell__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__complex__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__complex__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__contains__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__contains__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__copy__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__copy__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__del__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__del__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__delattr__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__delattr__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__delete__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__delete__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__delitem__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__delitem__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__dict__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__dict__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__dictoffset__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__dictoffset__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__dir__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__dir__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__divmod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__divmod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__doc__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__doc__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__enter__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__enter__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__eq__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__eq__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__exit__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__exit__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__file__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__file__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__float__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__float__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__floordiv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__floordiv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__format__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__format__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__fspath__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__fspath__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ge__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ge__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__get__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__get__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getattr__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getattr__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getattribute__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getattribute__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getinitargs__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getinitargs__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getitem__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getitem__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getnewargs__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getnewargs__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getnewargs_ex__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getnewargs_ex__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__getstate__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__getstate__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__gt__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__gt__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__hash__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__hash__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__iadd__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__iadd__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__iand__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__iand__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ifloordiv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ifloordiv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ilshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ilshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__imatmul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__imatmul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__imod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__imod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__import__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__import__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__imul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__imul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__index__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__index__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__init__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__init__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__init_subclass__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__init_subclass__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__instancecheck__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__instancecheck__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__int__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__int__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__invert__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__invert__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ior__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ior__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ipow__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ipow__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__irshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__irshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__isabstractmethod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__isabstractmethod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__isub__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__isub__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__iter__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__iter__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__itruediv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__itruediv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ixor__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ixor__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__le__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__le__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__len__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__len__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__length_hint__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__length_hint__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__lltrace__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__lltrace__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__loader__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__loader__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__lshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__lshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__lt__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__lt__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__main__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__main__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__matmul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__matmul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__missing__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__missing__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__mod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__mod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__module__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__module__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__mro_entries__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__mro_entries__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__mul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__mul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__name__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__name__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ne__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ne__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__neg__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__neg__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__new__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__new__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__newobj__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__newobj__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__newobj_ex__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__newobj_ex__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__next__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__next__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__notes__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__notes__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__or__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__or__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__orig_class__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__orig_class__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__origin__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__origin__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__package__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__package__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__parameters__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__parameters__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__path__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__path__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__pos__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__pos__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__pow__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__pow__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__prepare__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__prepare__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__qualname__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__qualname__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__radd__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__radd__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rand__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rand__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rdivmod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rdivmod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__reduce__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__reduce__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__reduce_ex__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__reduce_ex__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__repr__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__repr__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__reversed__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__reversed__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rfloordiv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rfloordiv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rlshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rlshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rmatmul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rmatmul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rmod__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rmod__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rmul__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rmul__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__ror__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__ror__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__round__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__round__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rpow__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rpow__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rrshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rrshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rshift__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rshift__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rsub__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rsub__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rtruediv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rtruediv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__rxor__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__rxor__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__set__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__set__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__set_name__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__set_name__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__setattr__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__setattr__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__setitem__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__setitem__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__setstate__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__setstate__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__sizeof__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__sizeof__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__slotnames__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__slotnames__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__slots__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__slots__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__spec__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__spec__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__str__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__str__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__sub__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__sub__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__subclasscheck__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__subclasscheck__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__subclasshook__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__subclasshook__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__truediv__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__truediv__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__trunc__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__trunc__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__typing_is_unpacked_typevartuple__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__typing_is_unpacked_typevartuple__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__typing_prepare_subst__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__typing_prepare_subst__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__typing_subst__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__typing_subst__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__typing_unpacked_tuple_args__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__typing_unpacked_tuple_args__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__warningregistry__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__warningregistry__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__weaklistoffset__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__weaklistoffset__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__weakref__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__weakref__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(__xor__)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(__xor__)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_abc_impl)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_abc_impl)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_annotation)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_annotation)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_asyncio_future_blocking)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_asyncio_future_blocking)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_blksize)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_blksize)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_bootstrap)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_bootstrap)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_dealloc_warn)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_dealloc_warn)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_feature_version)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_feature_version)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_finalizing)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_finalizing)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_find_and_load)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_find_and_load)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_fix_up_module)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_fix_up_module)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_get_sourcefile)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_get_sourcefile)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_handle_fromlist)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_handle_fromlist)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_initializing)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_initializing)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_is_text_encoding)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_is_text_encoding)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_lock_unlock_module)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_lock_unlock_module)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_loop)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_loop)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_showwarnmsg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_showwarnmsg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_shutdown)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_shutdown)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_slotnames)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_slotnames)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_uninitialized_submodules)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_uninitialized_submodules)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_warn_unawaited_coroutine)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_warn_unawaited_coroutine)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(_xoptions)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(_xoptions)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(a)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(a)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(abs_tol)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(abs_tol)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(access)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(access)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(add)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(add)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(add_done_callback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(add_done_callback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(after_in_child)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(after_in_child)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(after_in_parent)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(after_in_parent)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(aggregate_class)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(aggregate_class)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(append)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(append)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(argdefs)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(argdefs)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(arguments)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(arguments)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(argv)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(argv)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(attribute)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(attribute)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(authorizer_callback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(authorizer_callback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(b)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(b)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(backtick)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(backtick)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(base)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(base)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(before)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(before)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(big)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(big)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(binary_form)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(binary_form)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(block)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(block)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(buffer)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(buffer)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(buffer_callback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(buffer_callback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(buffer_size)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(buffer_size)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(buffering)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(buffering)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(buffers)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(buffers)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(bufsize)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(bufsize)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(builtins)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(builtins)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(byteorder)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(byteorder)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(bytes)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(bytes)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(bytes_per_sep)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(bytes_per_sep)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(c_call)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(c_call)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(c_exception)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(c_exception)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(c_return)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(c_return)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cached_statements)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cached_statements)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cadata)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cadata)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cafile)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cafile)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(call)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(call)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(call_exception_handler)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(call_exception_handler)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(call_soon)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(call_soon)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cancel)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cancel)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(capath)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(capath)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(category)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(category)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cb_type)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cb_type)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(certfile)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(certfile)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(check_same_thread)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(check_same_thread)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(clear)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(clear)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(close)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(close)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(closed)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(closed)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(closefd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(closefd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(closure)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(closure)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_argcount)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_argcount)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_cellvars)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_cellvars)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_code)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_code)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_consts)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_consts)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_exceptiontable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_exceptiontable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_filename)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_filename)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_firstlineno)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_firstlineno)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_flags)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_flags)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_freevars)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_freevars)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_kwonlyargcount)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_kwonlyargcount)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_linetable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_linetable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_name)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_name)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_names)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_names)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_nlocals)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_nlocals)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_posonlyargcount)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_posonlyargcount)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_qualname)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_qualname)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_stacksize)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_stacksize)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(co_varnames)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(co_varnames)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(code)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(code)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(command)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(command)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(comment_factory)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(comment_factory)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(consts)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(consts)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(context)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(context)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cookie)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cookie)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(copy)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(copy)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(copyreg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(copyreg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(coro)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(coro)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(count)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(count)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(cwd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(cwd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(data)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(data)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(database)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(database)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(decode)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(decode)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(decoder)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(decoder)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(default)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(default)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(defaultaction)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(defaultaction)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(delete)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(delete)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(depth)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(depth)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(detect_types)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(detect_types)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(deterministic)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(deterministic)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(device)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(device)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dict)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dict)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dictcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dictcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(difference_update)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(difference_update)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(digest)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(digest)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(digest_size)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(digest_size)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(digestmod)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(digestmod)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dir_fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dir_fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(discard)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(discard)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dispatch_table)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dispatch_table)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(displayhook)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(displayhook)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dklen)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dklen)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(doc)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(doc)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dont_inherit)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dont_inherit)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dst)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dst)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(dst_dir_fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(dst_dir_fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(duration)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(duration)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(effective_ids)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(effective_ids)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(element_factory)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(element_factory)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(encode)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(encode)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(encoding)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(encoding)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(end)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(end)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(end_lineno)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(end_lineno)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(end_offset)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(end_offset)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(endpos)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(endpos)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(env)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(env)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(errors)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(errors)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(event)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(event)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(eventmask)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(eventmask)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(exc_type)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(exc_type)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(exc_value)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(exc_value)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(excepthook)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(excepthook)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(exception)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(exception)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(exp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(exp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(extend)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(extend)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(facility)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(facility)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(factory)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(factory)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(false)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(false)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(family)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(family)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fanout)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fanout)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fd2)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fd2)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fdel)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fdel)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fget)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fget)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(file)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(file)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(file_actions)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(file_actions)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(filename)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(filename)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fileno)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fileno)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(filepath)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(filepath)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fillvalue)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fillvalue)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(filters)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(filters)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(final)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(final)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(find_class)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(find_class)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fix_imports)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fix_imports)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(flags)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(flags)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(flush)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(flush)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(follow_symlinks)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(follow_symlinks)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(format)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(format)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(frequency)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(frequency)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fromlist)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fromlist)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(fset)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(fset)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(func)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(func)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(future)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(future)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(generation)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(generation)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(genexpr)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(genexpr)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(get)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(get)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(get_debug)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(get_debug)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(get_event_loop)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(get_event_loop)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(get_loop)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(get_loop)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(get_source)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(get_source)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(getattr)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(getattr)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(getstate)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(getstate)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(gid)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(gid)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(globals)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(globals)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(groupindex)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(groupindex)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(groups)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(groups)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(handle)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(handle)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(hash_name)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(hash_name)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(header)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(header)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(headers)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(headers)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(hi)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(hi)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(hook)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(hook)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(id)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(id)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ident)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ident)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ignore)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ignore)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(imag)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(imag)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(importlib)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(importlib)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(in_fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(in_fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(incoming)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(incoming)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(indexgroup)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(indexgroup)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(inf)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(inf)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(inheritable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(inheritable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(initial)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(initial)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(initial_bytes)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(initial_bytes)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(initial_value)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(initial_value)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(initval)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(initval)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(inner_size)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(inner_size)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(input)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(input)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(insert_comments)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(insert_comments)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(insert_pis)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(insert_pis)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(instructions)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(instructions)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(intern)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(intern)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(intersection)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(intersection)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(isatty)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(isatty)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(isinstance)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(isinstance)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(isolation_level)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(isolation_level)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(istext)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(istext)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(item)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(item)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(items)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(items)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(iter)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(iter)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(iterable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(iterable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(iterations)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(iterations)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(join)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(join)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(jump)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(jump)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(keepends)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(keepends)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(key)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(key)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(keyfile)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(keyfile)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(keys)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(keys)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(kind)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(kind)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(lambda)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(lambda)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(last)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(last)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(last_node)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(last_node)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(last_traceback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(last_traceback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(last_type)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(last_type)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(last_value)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(last_value)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(latin1)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(latin1)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(leaf_size)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(leaf_size)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(len)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(len)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(length)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(length)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(level)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(level)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(limit)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(limit)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(line)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(line)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(line_buffering)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(line_buffering)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(lineno)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(lineno)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(listcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(listcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(little)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(little)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(lo)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(lo)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(locale)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(locale)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(locals)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(locals)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(logoption)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(logoption)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(loop)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(loop)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(mapping)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(mapping)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(match)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(match)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(max_length)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(max_length)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(maxdigits)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(maxdigits)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(maxevents)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(maxevents)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(maxmem)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(maxmem)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(maxsplit)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(maxsplit)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(maxvalue)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(maxvalue)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(memLevel)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(memLevel)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(memlimit)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(memlimit)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(message)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(message)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(metaclass)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(metaclass)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(method)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(method)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(mod)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(mod)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(mode)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(mode)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(module)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(module)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(module_globals)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(module_globals)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(modules)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(modules)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(mro)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(mro)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(msg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(msg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(mycmp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(mycmp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(n)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(n)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(n_arg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(n_arg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(n_fields)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(n_fields)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(n_sequence_fields)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(n_sequence_fields)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(n_unnamed_fields)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(n_unnamed_fields)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(name)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(name)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(name_from)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(name_from)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(namespace_separator)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(namespace_separator)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(namespaces)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(namespaces)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(narg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(narg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ndigits)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ndigits)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(new_limit)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(new_limit)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(newline)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(newline)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(newlines)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(newlines)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(next)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(next)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(node_depth)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(node_depth)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(node_offset)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(node_offset)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ns)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ns)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(nstype)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(nstype)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(null)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(null)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(number)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(number)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(obj)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(obj)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(object)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(object)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(offset)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(offset)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(offset_dst)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(offset_dst)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(offset_src)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(offset_src)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(on_type_read)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(on_type_read)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(onceregistry)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(onceregistry)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(only_keys)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(only_keys)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(oparg)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(oparg)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(opcode)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(opcode)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(open)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(open)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(opener)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(opener)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(operation)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(operation)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(optimize)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(optimize)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(options)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(options)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(order)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(order)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(out_fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(out_fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(outgoing)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(outgoing)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(overlapped)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(overlapped)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(owner)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(owner)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(p)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(p)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(pages)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(pages)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(parent)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(parent)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(password)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(password)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(path)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(path)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(pattern)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(pattern)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(peek)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(peek)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(persistent_id)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(persistent_id)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(persistent_load)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(persistent_load)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(person)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(person)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(pi_factory)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(pi_factory)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(pid)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(pid)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(policy)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(policy)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(pos)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(pos)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(print_file_and_line)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(print_file_and_line)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(priority)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(priority)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(progress)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(progress)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(progress_handler)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(progress_handler)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(proto)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(proto)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(protocol)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(protocol)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ps1)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ps1)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(ps2)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(ps2)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(query)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(query)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(quotetabs)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(quotetabs)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(r)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(r)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(raw)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(raw)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(read)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(read)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(read1)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(read1)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readall)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readall)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readinto)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readinto)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readinto1)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readinto1)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readline)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readline)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(readonly)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(readonly)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(real)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(real)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reducer_override)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reducer_override)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(registry)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(registry)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(rel_tol)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(rel_tol)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reload)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reload)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(repl)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(repl)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(replace)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(replace)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reserved)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reserved)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reset)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reset)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(resetids)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(resetids)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(return)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(return)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reverse)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reverse)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(reversed)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(reversed)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(s)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(s)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(salt)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(salt)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sched_priority)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sched_priority)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(scheduler)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(scheduler)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(seek)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(seek)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(seekable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(seekable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(selectors)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(selectors)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(send)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(send)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sep)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sep)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sequence)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sequence)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(server_hostname)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(server_hostname)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(server_side)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(server_side)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(session)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(session)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setcomp)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setcomp)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setpgroup)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setpgroup)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setsid)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setsid)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setsigdef)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setsigdef)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setsigmask)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setsigmask)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(setstate)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(setstate)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(shape)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(shape)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(show_cmd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(show_cmd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(signed)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(signed)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(size)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(size)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sizehint)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sizehint)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sleep)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sleep)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sock)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sock)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sort)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sort)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sound)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sound)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(source)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(source)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(source_traceback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(source_traceback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(src)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(src)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(src_dir_fd)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(src_dir_fd)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(stacklevel)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(stacklevel)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(start)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(start)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(statement)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(statement)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(status)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(status)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(stderr)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(stderr)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(stdin)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(stdin)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(stdout)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(stdout)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(step)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(step)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(store_name)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(store_name)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(strategy)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(strategy)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(strict)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(strict)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(strict_mode)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(strict_mode)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(string)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(string)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(sub_key)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(sub_key)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(symmetric_difference_update)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(symmetric_difference_update)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tabsize)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tabsize)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tag)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tag)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(target)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(target)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(target_is_directory)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(target_is_directory)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(task)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(task)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tb_frame)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tb_frame)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tb_lasti)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tb_lasti)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tb_lineno)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tb_lineno)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tb_next)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tb_next)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tell)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tell)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(template)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(template)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(term)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(term)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(text)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(text)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(threading)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(threading)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(throw)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(throw)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(timeout)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(timeout)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(times)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(times)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(top)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(top)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(trace_callback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(trace_callback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(traceback)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(traceback)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(trailers)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(trailers)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(translate)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(translate)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(true)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(true)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(truncate)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(truncate)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(twice)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(twice)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(txt)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(txt)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(type)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(type)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(tz)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(tz)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(uid)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(uid)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(unlink)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(unlink)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(unraisablehook)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(unraisablehook)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(uri)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(uri)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(usedforsecurity)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(usedforsecurity)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(value)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(value)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(values)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(values)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(version)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(version)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(warnings)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(warnings)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(warnoptions)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(warnoptions)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(wbits)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(wbits)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(week)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(week)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(weekday)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(weekday)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(which)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(which)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(who)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(who)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(withdata)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(withdata)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(writable)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(writable)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(write)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(write)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(write_through)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(write_through)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(x)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(x)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(year)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(year)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_ID(zdict)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_ID(zdict)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[0]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[0]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[1]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[1]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[2]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[2]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[3]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[3]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[4]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[4]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[5]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[5]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[6]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[6]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[7]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[7]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[8]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[8]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[9]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[9]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[10]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[10]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[11]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[11]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[12]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[12]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[13]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[13]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[14]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[14]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[15]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[15]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[16]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[16]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[17]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[17]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[18]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[18]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[19]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[19]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[20]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[20]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[21]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[21]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[22]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[22]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[23]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[23]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[24]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[24]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[25]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[25]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[26]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[26]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[27]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[27]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[28]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[28]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[29]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[29]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[30]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[30]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[31]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[31]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[32]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[32]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[33]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[33]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[34]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[34]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[35]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[35]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[36]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[36]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[37]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[37]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[38]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[38]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[39]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[39]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[40]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[40]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[41]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[41]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[42]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[42]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[43]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[43]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[44]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[44]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[45]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[45]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[46]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[46]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[47]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[47]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[48]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[48]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[49]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[49]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[50]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[50]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[51]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[51]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[52]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[52]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[53]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[53]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[54]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[54]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[55]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[55]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[56]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[56]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[57]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[57]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[58]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[58]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[59]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[59]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[60]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[60]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[61]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[61]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[62]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[62]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[63]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[63]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[64]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[64]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[65]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[65]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[66]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[66]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[67]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[67]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[68]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[68]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[69]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[69]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[70]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[70]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[71]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[71]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[72]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[72]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[73]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[73]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[74]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[74]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[75]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[75]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[76]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[76]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[77]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[77]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[78]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[78]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[79]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[79]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[80]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[80]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[81]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[81]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[82]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[82]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[83]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[83]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[84]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[84]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[85]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[85]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[86]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[86]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[87]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[87]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[88]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[88]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[89]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[89]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[90]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[90]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[91]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[91]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[92]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[92]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[93]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[93]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[94]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[94]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[95]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[95]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[96]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[96]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[97]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[97]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[98]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[98]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[99]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[99]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[100]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[100]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[101]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[101]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[102]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[102]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[103]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[103]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[104]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[104]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[105]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[105]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[106]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[106]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[107]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[107]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[108]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[108]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[109]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[109]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[110]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[110]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[111]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[111]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[112]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[112]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[113]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[113]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[114]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[114]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[115]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[115]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[116]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[116]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[117]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[117]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[118]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[118]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[119]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[119]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[120]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[120]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[121]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[121]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[122]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[122]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[123]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[123]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[124]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[124]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[125]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[125]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[126]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[126]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).ascii[127]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).ascii[127]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[128 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[128 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[129 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[129 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[130 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[130 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[131 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[131 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[132 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[132 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[133 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[133 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[134 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[134 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[135 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[135 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[136 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[136 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[137 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[137 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[138 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[138 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[139 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[139 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[140 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[140 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[141 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[141 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[142 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[142 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[143 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[143 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[144 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[144 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[145 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[145 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[146 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[146 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[147 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[147 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[148 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[148 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[149 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[149 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[150 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[150 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[151 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[151 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[152 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[152 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[153 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[153 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[154 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[154 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[155 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[155 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[156 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[156 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[157 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[157 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[158 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[158 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[159 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[159 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[160 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[160 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[161 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[161 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[162 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[162 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[163 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[163 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[164 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[164 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[165 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[165 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[166 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[166 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[167 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[167 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[168 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[168 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[169 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[169 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[170 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[170 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[171 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[171 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[172 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[172 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[173 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[173 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[174 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[174 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[175 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[175 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[176 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[176 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[177 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[177 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[178 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[178 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[179 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[179 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[180 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[180 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[181 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[181 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[182 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[182 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[183 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[183 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[184 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[184 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[185 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[185 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[186 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[186 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[187 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[187 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[188 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[188 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[189 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[189 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[190 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[190 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[191 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[191 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[192 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[192 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[193 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[193 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[194 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[194 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[195 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[195 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[196 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[196 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[197 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[197 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[198 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[198 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[199 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[199 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[200 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[200 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[201 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[201 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[202 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[202 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[203 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[203 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[204 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[204 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[205 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[205 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[206 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[206 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[207 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[207 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[208 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[208 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[209 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[209 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[210 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[210 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[211 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[211 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[212 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[212 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[213 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[213 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[214 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[214 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[215 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[215 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[216 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[216 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[217 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[217 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[218 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[218 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[219 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[219 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[220 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[220 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[221 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[221 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[222 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[222 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[223 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[223 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[224 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[224 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[225 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[225 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[226 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[226 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[227 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[227 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[228 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[228 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[229 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[229 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[230 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[230 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[231 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[231 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[232 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[232 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[233 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[233 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[234 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[234 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[235 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[235 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[236 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[236 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[237 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[237 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[238 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[238 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[239 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[239 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[240 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[240 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[241 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[241 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[242 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[242 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[243 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[243 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[244 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[244 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[245 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[245 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[246 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[246 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[247 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[247 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[248 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[248 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[249 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[249 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[250 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[250 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[251 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[251 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[252 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[252 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[253 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[253 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[254 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[254 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(strings).latin1[255 - 128]) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(strings).latin1[255 - 128]); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; - if (Py_REFCNT((PyObject *)&_Py_SINGLETON(tuple_empty)) < _PyObject_IMMORTAL_REFCNT) { - _PyObject_Dump((PyObject *)&_Py_SINGLETON(tuple_empty)); - Py_FatalError("immortal object has less refcnt than expected _PyObject_IMMORTAL_REFCNT"); - }; +#define _Py_str_literals_INIT { \ + INIT_STR(anon_dictcomp, ""), \ + INIT_STR(anon_genexpr, ""), \ + INIT_STR(anon_lambda, ""), \ + INIT_STR(anon_listcomp, ""), \ + INIT_STR(anon_module, ""), \ + INIT_STR(anon_null, ""), \ + INIT_STR(anon_setcomp, ""), \ + INIT_STR(anon_string, ""), \ + INIT_STR(anon_unknown, ""), \ + INIT_STR(close_br, "}"), \ + INIT_STR(dbl_close_br, "}}"), \ + INIT_STR(dbl_open_br, "{{"), \ + INIT_STR(dbl_percent, "%%"), \ + INIT_STR(defaults, ".defaults"), \ + INIT_STR(dot, "."), \ + INIT_STR(dot_locals, "."), \ + INIT_STR(empty, ""), \ + INIT_STR(generic_base, ".generic_base"), \ + INIT_STR(json_decoder, "json.decoder"), \ + INIT_STR(kwdefaults, ".kwdefaults"), \ + INIT_STR(list_err, "list index out of range"), \ + INIT_STR(newline, "\n"), \ + INIT_STR(open_br, "{"), \ + INIT_STR(percent, "%"), \ + INIT_STR(type_params, ".type_params"), \ + INIT_STR(utf_8, "utf-8"), \ +} + +#define _Py_str_identifiers_INIT { \ + INIT_ID(CANCELLED), \ + INIT_ID(FINISHED), \ + INIT_ID(False), \ + INIT_ID(JSONDecodeError), \ + INIT_ID(PENDING), \ + INIT_ID(Py_Repr), \ + INIT_ID(TextIOWrapper), \ + INIT_ID(True), \ + INIT_ID(WarningMessage), \ + INIT_ID(_), \ + INIT_ID(_WindowsConsoleIO), \ + INIT_ID(__IOBase_closed), \ + INIT_ID(__abc_tpflags__), \ + INIT_ID(__abs__), \ + INIT_ID(__abstractmethods__), \ + INIT_ID(__add__), \ + INIT_ID(__aenter__), \ + INIT_ID(__aexit__), \ + INIT_ID(__aiter__), \ + INIT_ID(__all__), \ + INIT_ID(__and__), \ + INIT_ID(__anext__), \ + INIT_ID(__annotations__), \ + INIT_ID(__args__), \ + INIT_ID(__asyncio_running_event_loop__), \ + INIT_ID(__await__), \ + INIT_ID(__bases__), \ + INIT_ID(__bool__), \ + INIT_ID(__buffer__), \ + INIT_ID(__build_class__), \ + INIT_ID(__builtins__), \ + INIT_ID(__bytes__), \ + INIT_ID(__call__), \ + INIT_ID(__cantrace__), \ + INIT_ID(__class__), \ + INIT_ID(__class_getitem__), \ + INIT_ID(__classcell__), \ + INIT_ID(__classdict__), \ + INIT_ID(__classdictcell__), \ + INIT_ID(__complex__), \ + INIT_ID(__contains__), \ + INIT_ID(__copy__), \ + INIT_ID(__ctypes_from_outparam__), \ + INIT_ID(__del__), \ + INIT_ID(__delattr__), \ + INIT_ID(__delete__), \ + INIT_ID(__delitem__), \ + INIT_ID(__dict__), \ + INIT_ID(__dictoffset__), \ + INIT_ID(__dir__), \ + INIT_ID(__divmod__), \ + INIT_ID(__doc__), \ + INIT_ID(__enter__), \ + INIT_ID(__eq__), \ + INIT_ID(__exit__), \ + INIT_ID(__file__), \ + INIT_ID(__float__), \ + INIT_ID(__floordiv__), \ + INIT_ID(__format__), \ + INIT_ID(__fspath__), \ + INIT_ID(__ge__), \ + INIT_ID(__get__), \ + INIT_ID(__getattr__), \ + INIT_ID(__getattribute__), \ + INIT_ID(__getinitargs__), \ + INIT_ID(__getitem__), \ + INIT_ID(__getnewargs__), \ + INIT_ID(__getnewargs_ex__), \ + INIT_ID(__getstate__), \ + INIT_ID(__gt__), \ + INIT_ID(__hash__), \ + INIT_ID(__iadd__), \ + INIT_ID(__iand__), \ + INIT_ID(__ifloordiv__), \ + INIT_ID(__ilshift__), \ + INIT_ID(__imatmul__), \ + INIT_ID(__imod__), \ + INIT_ID(__import__), \ + INIT_ID(__imul__), \ + INIT_ID(__index__), \ + INIT_ID(__init__), \ + INIT_ID(__init_subclass__), \ + INIT_ID(__instancecheck__), \ + INIT_ID(__int__), \ + INIT_ID(__invert__), \ + INIT_ID(__ior__), \ + INIT_ID(__ipow__), \ + INIT_ID(__irshift__), \ + INIT_ID(__isabstractmethod__), \ + INIT_ID(__isub__), \ + INIT_ID(__iter__), \ + INIT_ID(__itruediv__), \ + INIT_ID(__ixor__), \ + INIT_ID(__le__), \ + INIT_ID(__len__), \ + INIT_ID(__length_hint__), \ + INIT_ID(__lltrace__), \ + INIT_ID(__loader__), \ + INIT_ID(__lshift__), \ + INIT_ID(__lt__), \ + INIT_ID(__main__), \ + INIT_ID(__match_args__), \ + INIT_ID(__matmul__), \ + INIT_ID(__missing__), \ + INIT_ID(__mod__), \ + INIT_ID(__module__), \ + INIT_ID(__mro_entries__), \ + INIT_ID(__mul__), \ + INIT_ID(__name__), \ + INIT_ID(__ne__), \ + INIT_ID(__neg__), \ + INIT_ID(__new__), \ + INIT_ID(__newobj__), \ + INIT_ID(__newobj_ex__), \ + INIT_ID(__next__), \ + INIT_ID(__notes__), \ + INIT_ID(__or__), \ + INIT_ID(__orig_class__), \ + INIT_ID(__origin__), \ + INIT_ID(__package__), \ + INIT_ID(__parameters__), \ + INIT_ID(__path__), \ + INIT_ID(__pos__), \ + INIT_ID(__pow__), \ + INIT_ID(__prepare__), \ + INIT_ID(__qualname__), \ + INIT_ID(__radd__), \ + INIT_ID(__rand__), \ + INIT_ID(__rdivmod__), \ + INIT_ID(__reduce__), \ + INIT_ID(__reduce_ex__), \ + INIT_ID(__release_buffer__), \ + INIT_ID(__repr__), \ + INIT_ID(__reversed__), \ + INIT_ID(__rfloordiv__), \ + INIT_ID(__rlshift__), \ + INIT_ID(__rmatmul__), \ + INIT_ID(__rmod__), \ + INIT_ID(__rmul__), \ + INIT_ID(__ror__), \ + INIT_ID(__round__), \ + INIT_ID(__rpow__), \ + INIT_ID(__rrshift__), \ + INIT_ID(__rshift__), \ + INIT_ID(__rsub__), \ + INIT_ID(__rtruediv__), \ + INIT_ID(__rxor__), \ + INIT_ID(__set__), \ + INIT_ID(__set_name__), \ + INIT_ID(__setattr__), \ + INIT_ID(__setitem__), \ + INIT_ID(__setstate__), \ + INIT_ID(__sizeof__), \ + INIT_ID(__slotnames__), \ + INIT_ID(__slots__), \ + INIT_ID(__spec__), \ + INIT_ID(__str__), \ + INIT_ID(__sub__), \ + INIT_ID(__subclasscheck__), \ + INIT_ID(__subclasshook__), \ + INIT_ID(__truediv__), \ + INIT_ID(__trunc__), \ + INIT_ID(__type_params__), \ + INIT_ID(__typing_is_unpacked_typevartuple__), \ + INIT_ID(__typing_prepare_subst__), \ + INIT_ID(__typing_subst__), \ + INIT_ID(__typing_unpacked_tuple_args__), \ + INIT_ID(__warningregistry__), \ + INIT_ID(__weaklistoffset__), \ + INIT_ID(__weakref__), \ + INIT_ID(__xor__), \ + INIT_ID(_abc_impl), \ + INIT_ID(_abstract_), \ + INIT_ID(_active), \ + INIT_ID(_annotation), \ + INIT_ID(_anonymous_), \ + INIT_ID(_argtypes_), \ + INIT_ID(_as_parameter_), \ + INIT_ID(_asyncio_future_blocking), \ + INIT_ID(_blksize), \ + INIT_ID(_bootstrap), \ + INIT_ID(_check_retval_), \ + INIT_ID(_dealloc_warn), \ + INIT_ID(_feature_version), \ + INIT_ID(_fields_), \ + INIT_ID(_finalizing), \ + INIT_ID(_find_and_load), \ + INIT_ID(_fix_up_module), \ + INIT_ID(_flags_), \ + INIT_ID(_get_sourcefile), \ + INIT_ID(_handle_fromlist), \ + INIT_ID(_initializing), \ + INIT_ID(_io), \ + INIT_ID(_is_text_encoding), \ + INIT_ID(_length_), \ + INIT_ID(_limbo), \ + INIT_ID(_lock_unlock_module), \ + INIT_ID(_loop), \ + INIT_ID(_needs_com_addref_), \ + INIT_ID(_pack_), \ + INIT_ID(_restype_), \ + INIT_ID(_showwarnmsg), \ + INIT_ID(_shutdown), \ + INIT_ID(_slotnames), \ + INIT_ID(_strptime_datetime), \ + INIT_ID(_swappedbytes_), \ + INIT_ID(_type_), \ + INIT_ID(_uninitialized_submodules), \ + INIT_ID(_warn_unawaited_coroutine), \ + INIT_ID(_xoptions), \ + INIT_ID(a), \ + INIT_ID(abs_tol), \ + INIT_ID(access), \ + INIT_ID(aclose), \ + INIT_ID(add), \ + INIT_ID(add_done_callback), \ + INIT_ID(after_in_child), \ + INIT_ID(after_in_parent), \ + INIT_ID(aggregate_class), \ + INIT_ID(append), \ + INIT_ID(argdefs), \ + INIT_ID(arguments), \ + INIT_ID(argv), \ + INIT_ID(as_integer_ratio), \ + INIT_ID(asend), \ + INIT_ID(ast), \ + INIT_ID(athrow), \ + INIT_ID(attribute), \ + INIT_ID(authorizer_callback), \ + INIT_ID(autocommit), \ + INIT_ID(b), \ + INIT_ID(backtick), \ + INIT_ID(base), \ + INIT_ID(before), \ + INIT_ID(big), \ + INIT_ID(binary_form), \ + INIT_ID(block), \ + INIT_ID(bound), \ + INIT_ID(buffer), \ + INIT_ID(buffer_callback), \ + INIT_ID(buffer_size), \ + INIT_ID(buffering), \ + INIT_ID(buffers), \ + INIT_ID(bufsize), \ + INIT_ID(builtins), \ + INIT_ID(byteorder), \ + INIT_ID(bytes), \ + INIT_ID(bytes_per_sep), \ + INIT_ID(c), \ + INIT_ID(c_call), \ + INIT_ID(c_exception), \ + INIT_ID(c_return), \ + INIT_ID(cached_statements), \ + INIT_ID(cadata), \ + INIT_ID(cafile), \ + INIT_ID(call), \ + INIT_ID(call_exception_handler), \ + INIT_ID(call_soon), \ + INIT_ID(callback), \ + INIT_ID(cancel), \ + INIT_ID(capath), \ + INIT_ID(category), \ + INIT_ID(cb_type), \ + INIT_ID(certfile), \ + INIT_ID(check_same_thread), \ + INIT_ID(clear), \ + INIT_ID(close), \ + INIT_ID(closed), \ + INIT_ID(closefd), \ + INIT_ID(closure), \ + INIT_ID(co_argcount), \ + INIT_ID(co_cellvars), \ + INIT_ID(co_code), \ + INIT_ID(co_consts), \ + INIT_ID(co_exceptiontable), \ + INIT_ID(co_filename), \ + INIT_ID(co_firstlineno), \ + INIT_ID(co_flags), \ + INIT_ID(co_freevars), \ + INIT_ID(co_kwonlyargcount), \ + INIT_ID(co_linetable), \ + INIT_ID(co_name), \ + INIT_ID(co_names), \ + INIT_ID(co_nlocals), \ + INIT_ID(co_posonlyargcount), \ + INIT_ID(co_qualname), \ + INIT_ID(co_stacksize), \ + INIT_ID(co_varnames), \ + INIT_ID(code), \ + INIT_ID(command), \ + INIT_ID(comment_factory), \ + INIT_ID(compile_mode), \ + INIT_ID(consts), \ + INIT_ID(context), \ + INIT_ID(contravariant), \ + INIT_ID(cookie), \ + INIT_ID(copy), \ + INIT_ID(copyreg), \ + INIT_ID(coro), \ + INIT_ID(count), \ + INIT_ID(covariant), \ + INIT_ID(cwd), \ + INIT_ID(d), \ + INIT_ID(data), \ + INIT_ID(database), \ + INIT_ID(decode), \ + INIT_ID(decoder), \ + INIT_ID(default), \ + INIT_ID(defaultaction), \ + INIT_ID(delete), \ + INIT_ID(depth), \ + INIT_ID(detect_types), \ + INIT_ID(deterministic), \ + INIT_ID(device), \ + INIT_ID(dict), \ + INIT_ID(dictcomp), \ + INIT_ID(difference_update), \ + INIT_ID(digest), \ + INIT_ID(digest_size), \ + INIT_ID(digestmod), \ + INIT_ID(dir_fd), \ + INIT_ID(discard), \ + INIT_ID(dispatch_table), \ + INIT_ID(displayhook), \ + INIT_ID(dklen), \ + INIT_ID(doc), \ + INIT_ID(dont_inherit), \ + INIT_ID(dst), \ + INIT_ID(dst_dir_fd), \ + INIT_ID(e), \ + INIT_ID(eager_start), \ + INIT_ID(effective_ids), \ + INIT_ID(element_factory), \ + INIT_ID(encode), \ + INIT_ID(encoding), \ + INIT_ID(end), \ + INIT_ID(end_lineno), \ + INIT_ID(end_offset), \ + INIT_ID(endpos), \ + INIT_ID(entrypoint), \ + INIT_ID(env), \ + INIT_ID(errors), \ + INIT_ID(event), \ + INIT_ID(eventmask), \ + INIT_ID(excepthook), \ + INIT_ID(exception), \ + INIT_ID(existing_file_name), \ + INIT_ID(exp), \ + INIT_ID(extend), \ + INIT_ID(extra_tokens), \ + INIT_ID(f), \ + INIT_ID(facility), \ + INIT_ID(factory), \ + INIT_ID(false), \ + INIT_ID(family), \ + INIT_ID(fanout), \ + INIT_ID(fd), \ + INIT_ID(fd2), \ + INIT_ID(fdel), \ + INIT_ID(fget), \ + INIT_ID(file), \ + INIT_ID(file_actions), \ + INIT_ID(filename), \ + INIT_ID(fileno), \ + INIT_ID(filepath), \ + INIT_ID(fillvalue), \ + INIT_ID(filters), \ + INIT_ID(final), \ + INIT_ID(find_class), \ + INIT_ID(fix_imports), \ + INIT_ID(flags), \ + INIT_ID(flush), \ + INIT_ID(follow_symlinks), \ + INIT_ID(format), \ + INIT_ID(from_param), \ + INIT_ID(fromlist), \ + INIT_ID(fromtimestamp), \ + INIT_ID(fromutc), \ + INIT_ID(fset), \ + INIT_ID(func), \ + INIT_ID(future), \ + INIT_ID(g), \ + INIT_ID(generation), \ + INIT_ID(genexpr), \ + INIT_ID(get), \ + INIT_ID(get_debug), \ + INIT_ID(get_event_loop), \ + INIT_ID(get_loop), \ + INIT_ID(get_source), \ + INIT_ID(getattr), \ + INIT_ID(getstate), \ + INIT_ID(gid), \ + INIT_ID(globals), \ + INIT_ID(groupindex), \ + INIT_ID(groups), \ + INIT_ID(h), \ + INIT_ID(handle), \ + INIT_ID(hash_name), \ + INIT_ID(header), \ + INIT_ID(headers), \ + INIT_ID(hi), \ + INIT_ID(hook), \ + INIT_ID(id), \ + INIT_ID(ident), \ + INIT_ID(identity_hint), \ + INIT_ID(ignore), \ + INIT_ID(imag), \ + INIT_ID(importlib), \ + INIT_ID(in_fd), \ + INIT_ID(incoming), \ + INIT_ID(indexgroup), \ + INIT_ID(inf), \ + INIT_ID(infer_variance), \ + INIT_ID(inheritable), \ + INIT_ID(initial), \ + INIT_ID(initial_bytes), \ + INIT_ID(initial_value), \ + INIT_ID(initval), \ + INIT_ID(inner_size), \ + INIT_ID(input), \ + INIT_ID(insert_comments), \ + INIT_ID(insert_pis), \ + INIT_ID(instructions), \ + INIT_ID(intern), \ + INIT_ID(intersection), \ + INIT_ID(interval), \ + INIT_ID(is_running), \ + INIT_ID(isatty), \ + INIT_ID(isinstance), \ + INIT_ID(isoformat), \ + INIT_ID(isolation_level), \ + INIT_ID(istext), \ + INIT_ID(item), \ + INIT_ID(items), \ + INIT_ID(iter), \ + INIT_ID(iterable), \ + INIT_ID(iterations), \ + INIT_ID(join), \ + INIT_ID(jump), \ + INIT_ID(keepends), \ + INIT_ID(key), \ + INIT_ID(keyfile), \ + INIT_ID(keys), \ + INIT_ID(kind), \ + INIT_ID(kw), \ + INIT_ID(kw1), \ + INIT_ID(kw2), \ + INIT_ID(lambda), \ + INIT_ID(last), \ + INIT_ID(last_exc), \ + INIT_ID(last_node), \ + INIT_ID(last_traceback), \ + INIT_ID(last_type), \ + INIT_ID(last_value), \ + INIT_ID(latin1), \ + INIT_ID(leaf_size), \ + INIT_ID(len), \ + INIT_ID(length), \ + INIT_ID(level), \ + INIT_ID(limit), \ + INIT_ID(line), \ + INIT_ID(line_buffering), \ + INIT_ID(lineno), \ + INIT_ID(listcomp), \ + INIT_ID(little), \ + INIT_ID(lo), \ + INIT_ID(locale), \ + INIT_ID(locals), \ + INIT_ID(logoption), \ + INIT_ID(loop), \ + INIT_ID(mapping), \ + INIT_ID(match), \ + INIT_ID(max_length), \ + INIT_ID(maxdigits), \ + INIT_ID(maxevents), \ + INIT_ID(maxmem), \ + INIT_ID(maxsplit), \ + INIT_ID(maxvalue), \ + INIT_ID(memLevel), \ + INIT_ID(memlimit), \ + INIT_ID(message), \ + INIT_ID(metaclass), \ + INIT_ID(metadata), \ + INIT_ID(method), \ + INIT_ID(mod), \ + INIT_ID(mode), \ + INIT_ID(module), \ + INIT_ID(module_globals), \ + INIT_ID(modules), \ + INIT_ID(mro), \ + INIT_ID(msg), \ + INIT_ID(mycmp), \ + INIT_ID(n), \ + INIT_ID(n_arg), \ + INIT_ID(n_fields), \ + INIT_ID(n_sequence_fields), \ + INIT_ID(n_unnamed_fields), \ + INIT_ID(name), \ + INIT_ID(name_from), \ + INIT_ID(namespace_separator), \ + INIT_ID(namespaces), \ + INIT_ID(narg), \ + INIT_ID(ndigits), \ + INIT_ID(new_file_name), \ + INIT_ID(new_limit), \ + INIT_ID(newline), \ + INIT_ID(newlines), \ + INIT_ID(next), \ + INIT_ID(nlocals), \ + INIT_ID(node_depth), \ + INIT_ID(node_offset), \ + INIT_ID(ns), \ + INIT_ID(nstype), \ + INIT_ID(nt), \ + INIT_ID(null), \ + INIT_ID(number), \ + INIT_ID(obj), \ + INIT_ID(object), \ + INIT_ID(offset), \ + INIT_ID(offset_dst), \ + INIT_ID(offset_src), \ + INIT_ID(on_type_read), \ + INIT_ID(onceregistry), \ + INIT_ID(only_keys), \ + INIT_ID(oparg), \ + INIT_ID(opcode), \ + INIT_ID(open), \ + INIT_ID(opener), \ + INIT_ID(operation), \ + INIT_ID(optimize), \ + INIT_ID(options), \ + INIT_ID(order), \ + INIT_ID(origin), \ + INIT_ID(out_fd), \ + INIT_ID(outgoing), \ + INIT_ID(overlapped), \ + INIT_ID(owner), \ + INIT_ID(p), \ + INIT_ID(pages), \ + INIT_ID(parent), \ + INIT_ID(password), \ + INIT_ID(path), \ + INIT_ID(pattern), \ + INIT_ID(peek), \ + INIT_ID(persistent_id), \ + INIT_ID(persistent_load), \ + INIT_ID(person), \ + INIT_ID(pi_factory), \ + INIT_ID(pid), \ + INIT_ID(policy), \ + INIT_ID(pos), \ + INIT_ID(pos1), \ + INIT_ID(pos2), \ + INIT_ID(posix), \ + INIT_ID(print_file_and_line), \ + INIT_ID(priority), \ + INIT_ID(progress), \ + INIT_ID(progress_handler), \ + INIT_ID(progress_routine), \ + INIT_ID(proto), \ + INIT_ID(protocol), \ + INIT_ID(ps1), \ + INIT_ID(ps2), \ + INIT_ID(query), \ + INIT_ID(quotetabs), \ + INIT_ID(r), \ + INIT_ID(raw), \ + INIT_ID(read), \ + INIT_ID(read1), \ + INIT_ID(readable), \ + INIT_ID(readall), \ + INIT_ID(readinto), \ + INIT_ID(readinto1), \ + INIT_ID(readline), \ + INIT_ID(readonly), \ + INIT_ID(real), \ + INIT_ID(reducer_override), \ + INIT_ID(registry), \ + INIT_ID(rel_tol), \ + INIT_ID(release), \ + INIT_ID(reload), \ + INIT_ID(repl), \ + INIT_ID(replace), \ + INIT_ID(reserved), \ + INIT_ID(reset), \ + INIT_ID(resetids), \ + INIT_ID(return), \ + INIT_ID(reverse), \ + INIT_ID(reversed), \ + INIT_ID(s), \ + INIT_ID(salt), \ + INIT_ID(sched_priority), \ + INIT_ID(scheduler), \ + INIT_ID(seek), \ + INIT_ID(seekable), \ + INIT_ID(selectors), \ + INIT_ID(self), \ + INIT_ID(send), \ + INIT_ID(sep), \ + INIT_ID(sequence), \ + INIT_ID(server_hostname), \ + INIT_ID(server_side), \ + INIT_ID(session), \ + INIT_ID(setcomp), \ + INIT_ID(setpgroup), \ + INIT_ID(setsid), \ + INIT_ID(setsigdef), \ + INIT_ID(setsigmask), \ + INIT_ID(setstate), \ + INIT_ID(shape), \ + INIT_ID(show_cmd), \ + INIT_ID(signed), \ + INIT_ID(size), \ + INIT_ID(sizehint), \ + INIT_ID(skip_file_prefixes), \ + INIT_ID(sleep), \ + INIT_ID(sock), \ + INIT_ID(sort), \ + INIT_ID(source), \ + INIT_ID(source_traceback), \ + INIT_ID(src), \ + INIT_ID(src_dir_fd), \ + INIT_ID(stacklevel), \ + INIT_ID(start), \ + INIT_ID(statement), \ + INIT_ID(status), \ + INIT_ID(stderr), \ + INIT_ID(stdin), \ + INIT_ID(stdout), \ + INIT_ID(step), \ + INIT_ID(steps), \ + INIT_ID(store_name), \ + INIT_ID(strategy), \ + INIT_ID(strftime), \ + INIT_ID(strict), \ + INIT_ID(strict_mode), \ + INIT_ID(string), \ + INIT_ID(sub_key), \ + INIT_ID(symmetric_difference_update), \ + INIT_ID(tabsize), \ + INIT_ID(tag), \ + INIT_ID(target), \ + INIT_ID(target_is_directory), \ + INIT_ID(task), \ + INIT_ID(tb_frame), \ + INIT_ID(tb_lasti), \ + INIT_ID(tb_lineno), \ + INIT_ID(tb_next), \ + INIT_ID(tell), \ + INIT_ID(template), \ + INIT_ID(term), \ + INIT_ID(text), \ + INIT_ID(threading), \ + INIT_ID(throw), \ + INIT_ID(timeout), \ + INIT_ID(times), \ + INIT_ID(timetuple), \ + INIT_ID(top), \ + INIT_ID(trace_callback), \ + INIT_ID(trailers), \ + INIT_ID(translate), \ + INIT_ID(true), \ + INIT_ID(truncate), \ + INIT_ID(twice), \ + INIT_ID(txt), \ + INIT_ID(type), \ + INIT_ID(type_params), \ + INIT_ID(tz), \ + INIT_ID(tzname), \ + INIT_ID(uid), \ + INIT_ID(unlink), \ + INIT_ID(unraisablehook), \ + INIT_ID(uri), \ + INIT_ID(usedforsecurity), \ + INIT_ID(value), \ + INIT_ID(values), \ + INIT_ID(version), \ + INIT_ID(volume), \ + INIT_ID(warnings), \ + INIT_ID(warnoptions), \ + INIT_ID(wbits), \ + INIT_ID(week), \ + INIT_ID(weekday), \ + INIT_ID(which), \ + INIT_ID(who), \ + INIT_ID(withdata), \ + INIT_ID(writable), \ + INIT_ID(write), \ + INIT_ID(write_through), \ + INIT_ID(x), \ + INIT_ID(year), \ + INIT_ID(zdict), \ +} + +#define _Py_str_ascii_INIT { \ + _PyASCIIObject_INIT("\x00"), \ + _PyASCIIObject_INIT("\x01"), \ + _PyASCIIObject_INIT("\x02"), \ + _PyASCIIObject_INIT("\x03"), \ + _PyASCIIObject_INIT("\x04"), \ + _PyASCIIObject_INIT("\x05"), \ + _PyASCIIObject_INIT("\x06"), \ + _PyASCIIObject_INIT("\x07"), \ + _PyASCIIObject_INIT("\x08"), \ + _PyASCIIObject_INIT("\x09"), \ + _PyASCIIObject_INIT("\x0a"), \ + _PyASCIIObject_INIT("\x0b"), \ + _PyASCIIObject_INIT("\x0c"), \ + _PyASCIIObject_INIT("\x0d"), \ + _PyASCIIObject_INIT("\x0e"), \ + _PyASCIIObject_INIT("\x0f"), \ + _PyASCIIObject_INIT("\x10"), \ + _PyASCIIObject_INIT("\x11"), \ + _PyASCIIObject_INIT("\x12"), \ + _PyASCIIObject_INIT("\x13"), \ + _PyASCIIObject_INIT("\x14"), \ + _PyASCIIObject_INIT("\x15"), \ + _PyASCIIObject_INIT("\x16"), \ + _PyASCIIObject_INIT("\x17"), \ + _PyASCIIObject_INIT("\x18"), \ + _PyASCIIObject_INIT("\x19"), \ + _PyASCIIObject_INIT("\x1a"), \ + _PyASCIIObject_INIT("\x1b"), \ + _PyASCIIObject_INIT("\x1c"), \ + _PyASCIIObject_INIT("\x1d"), \ + _PyASCIIObject_INIT("\x1e"), \ + _PyASCIIObject_INIT("\x1f"), \ + _PyASCIIObject_INIT("\x20"), \ + _PyASCIIObject_INIT("\x21"), \ + _PyASCIIObject_INIT("\x22"), \ + _PyASCIIObject_INIT("\x23"), \ + _PyASCIIObject_INIT("\x24"), \ + _PyASCIIObject_INIT("\x25"), \ + _PyASCIIObject_INIT("\x26"), \ + _PyASCIIObject_INIT("\x27"), \ + _PyASCIIObject_INIT("\x28"), \ + _PyASCIIObject_INIT("\x29"), \ + _PyASCIIObject_INIT("\x2a"), \ + _PyASCIIObject_INIT("\x2b"), \ + _PyASCIIObject_INIT("\x2c"), \ + _PyASCIIObject_INIT("\x2d"), \ + _PyASCIIObject_INIT("\x2e"), \ + _PyASCIIObject_INIT("\x2f"), \ + _PyASCIIObject_INIT("\x30"), \ + _PyASCIIObject_INIT("\x31"), \ + _PyASCIIObject_INIT("\x32"), \ + _PyASCIIObject_INIT("\x33"), \ + _PyASCIIObject_INIT("\x34"), \ + _PyASCIIObject_INIT("\x35"), \ + _PyASCIIObject_INIT("\x36"), \ + _PyASCIIObject_INIT("\x37"), \ + _PyASCIIObject_INIT("\x38"), \ + _PyASCIIObject_INIT("\x39"), \ + _PyASCIIObject_INIT("\x3a"), \ + _PyASCIIObject_INIT("\x3b"), \ + _PyASCIIObject_INIT("\x3c"), \ + _PyASCIIObject_INIT("\x3d"), \ + _PyASCIIObject_INIT("\x3e"), \ + _PyASCIIObject_INIT("\x3f"), \ + _PyASCIIObject_INIT("\x40"), \ + _PyASCIIObject_INIT("\x41"), \ + _PyASCIIObject_INIT("\x42"), \ + _PyASCIIObject_INIT("\x43"), \ + _PyASCIIObject_INIT("\x44"), \ + _PyASCIIObject_INIT("\x45"), \ + _PyASCIIObject_INIT("\x46"), \ + _PyASCIIObject_INIT("\x47"), \ + _PyASCIIObject_INIT("\x48"), \ + _PyASCIIObject_INIT("\x49"), \ + _PyASCIIObject_INIT("\x4a"), \ + _PyASCIIObject_INIT("\x4b"), \ + _PyASCIIObject_INIT("\x4c"), \ + _PyASCIIObject_INIT("\x4d"), \ + _PyASCIIObject_INIT("\x4e"), \ + _PyASCIIObject_INIT("\x4f"), \ + _PyASCIIObject_INIT("\x50"), \ + _PyASCIIObject_INIT("\x51"), \ + _PyASCIIObject_INIT("\x52"), \ + _PyASCIIObject_INIT("\x53"), \ + _PyASCIIObject_INIT("\x54"), \ + _PyASCIIObject_INIT("\x55"), \ + _PyASCIIObject_INIT("\x56"), \ + _PyASCIIObject_INIT("\x57"), \ + _PyASCIIObject_INIT("\x58"), \ + _PyASCIIObject_INIT("\x59"), \ + _PyASCIIObject_INIT("\x5a"), \ + _PyASCIIObject_INIT("\x5b"), \ + _PyASCIIObject_INIT("\x5c"), \ + _PyASCIIObject_INIT("\x5d"), \ + _PyASCIIObject_INIT("\x5e"), \ + _PyASCIIObject_INIT("\x5f"), \ + _PyASCIIObject_INIT("\x60"), \ + _PyASCIIObject_INIT("\x61"), \ + _PyASCIIObject_INIT("\x62"), \ + _PyASCIIObject_INIT("\x63"), \ + _PyASCIIObject_INIT("\x64"), \ + _PyASCIIObject_INIT("\x65"), \ + _PyASCIIObject_INIT("\x66"), \ + _PyASCIIObject_INIT("\x67"), \ + _PyASCIIObject_INIT("\x68"), \ + _PyASCIIObject_INIT("\x69"), \ + _PyASCIIObject_INIT("\x6a"), \ + _PyASCIIObject_INIT("\x6b"), \ + _PyASCIIObject_INIT("\x6c"), \ + _PyASCIIObject_INIT("\x6d"), \ + _PyASCIIObject_INIT("\x6e"), \ + _PyASCIIObject_INIT("\x6f"), \ + _PyASCIIObject_INIT("\x70"), \ + _PyASCIIObject_INIT("\x71"), \ + _PyASCIIObject_INIT("\x72"), \ + _PyASCIIObject_INIT("\x73"), \ + _PyASCIIObject_INIT("\x74"), \ + _PyASCIIObject_INIT("\x75"), \ + _PyASCIIObject_INIT("\x76"), \ + _PyASCIIObject_INIT("\x77"), \ + _PyASCIIObject_INIT("\x78"), \ + _PyASCIIObject_INIT("\x79"), \ + _PyASCIIObject_INIT("\x7a"), \ + _PyASCIIObject_INIT("\x7b"), \ + _PyASCIIObject_INIT("\x7c"), \ + _PyASCIIObject_INIT("\x7d"), \ + _PyASCIIObject_INIT("\x7e"), \ + _PyASCIIObject_INIT("\x7f"), \ +} + +#define _Py_str_latin1_INIT { \ + _PyUnicode_LATIN1_INIT("\x80", "\xc2\x80"), \ + _PyUnicode_LATIN1_INIT("\x81", "\xc2\x81"), \ + _PyUnicode_LATIN1_INIT("\x82", "\xc2\x82"), \ + _PyUnicode_LATIN1_INIT("\x83", "\xc2\x83"), \ + _PyUnicode_LATIN1_INIT("\x84", "\xc2\x84"), \ + _PyUnicode_LATIN1_INIT("\x85", "\xc2\x85"), \ + _PyUnicode_LATIN1_INIT("\x86", "\xc2\x86"), \ + _PyUnicode_LATIN1_INIT("\x87", "\xc2\x87"), \ + _PyUnicode_LATIN1_INIT("\x88", "\xc2\x88"), \ + _PyUnicode_LATIN1_INIT("\x89", "\xc2\x89"), \ + _PyUnicode_LATIN1_INIT("\x8a", "\xc2\x8a"), \ + _PyUnicode_LATIN1_INIT("\x8b", "\xc2\x8b"), \ + _PyUnicode_LATIN1_INIT("\x8c", "\xc2\x8c"), \ + _PyUnicode_LATIN1_INIT("\x8d", "\xc2\x8d"), \ + _PyUnicode_LATIN1_INIT("\x8e", "\xc2\x8e"), \ + _PyUnicode_LATIN1_INIT("\x8f", "\xc2\x8f"), \ + _PyUnicode_LATIN1_INIT("\x90", "\xc2\x90"), \ + _PyUnicode_LATIN1_INIT("\x91", "\xc2\x91"), \ + _PyUnicode_LATIN1_INIT("\x92", "\xc2\x92"), \ + _PyUnicode_LATIN1_INIT("\x93", "\xc2\x93"), \ + _PyUnicode_LATIN1_INIT("\x94", "\xc2\x94"), \ + _PyUnicode_LATIN1_INIT("\x95", "\xc2\x95"), \ + _PyUnicode_LATIN1_INIT("\x96", "\xc2\x96"), \ + _PyUnicode_LATIN1_INIT("\x97", "\xc2\x97"), \ + _PyUnicode_LATIN1_INIT("\x98", "\xc2\x98"), \ + _PyUnicode_LATIN1_INIT("\x99", "\xc2\x99"), \ + _PyUnicode_LATIN1_INIT("\x9a", "\xc2\x9a"), \ + _PyUnicode_LATIN1_INIT("\x9b", "\xc2\x9b"), \ + _PyUnicode_LATIN1_INIT("\x9c", "\xc2\x9c"), \ + _PyUnicode_LATIN1_INIT("\x9d", "\xc2\x9d"), \ + _PyUnicode_LATIN1_INIT("\x9e", "\xc2\x9e"), \ + _PyUnicode_LATIN1_INIT("\x9f", "\xc2\x9f"), \ + _PyUnicode_LATIN1_INIT("\xa0", "\xc2\xa0"), \ + _PyUnicode_LATIN1_INIT("\xa1", "\xc2\xa1"), \ + _PyUnicode_LATIN1_INIT("\xa2", "\xc2\xa2"), \ + _PyUnicode_LATIN1_INIT("\xa3", "\xc2\xa3"), \ + _PyUnicode_LATIN1_INIT("\xa4", "\xc2\xa4"), \ + _PyUnicode_LATIN1_INIT("\xa5", "\xc2\xa5"), \ + _PyUnicode_LATIN1_INIT("\xa6", "\xc2\xa6"), \ + _PyUnicode_LATIN1_INIT("\xa7", "\xc2\xa7"), \ + _PyUnicode_LATIN1_INIT("\xa8", "\xc2\xa8"), \ + _PyUnicode_LATIN1_INIT("\xa9", "\xc2\xa9"), \ + _PyUnicode_LATIN1_INIT("\xaa", "\xc2\xaa"), \ + _PyUnicode_LATIN1_INIT("\xab", "\xc2\xab"), \ + _PyUnicode_LATIN1_INIT("\xac", "\xc2\xac"), \ + _PyUnicode_LATIN1_INIT("\xad", "\xc2\xad"), \ + _PyUnicode_LATIN1_INIT("\xae", "\xc2\xae"), \ + _PyUnicode_LATIN1_INIT("\xaf", "\xc2\xaf"), \ + _PyUnicode_LATIN1_INIT("\xb0", "\xc2\xb0"), \ + _PyUnicode_LATIN1_INIT("\xb1", "\xc2\xb1"), \ + _PyUnicode_LATIN1_INIT("\xb2", "\xc2\xb2"), \ + _PyUnicode_LATIN1_INIT("\xb3", "\xc2\xb3"), \ + _PyUnicode_LATIN1_INIT("\xb4", "\xc2\xb4"), \ + _PyUnicode_LATIN1_INIT("\xb5", "\xc2\xb5"), \ + _PyUnicode_LATIN1_INIT("\xb6", "\xc2\xb6"), \ + _PyUnicode_LATIN1_INIT("\xb7", "\xc2\xb7"), \ + _PyUnicode_LATIN1_INIT("\xb8", "\xc2\xb8"), \ + _PyUnicode_LATIN1_INIT("\xb9", "\xc2\xb9"), \ + _PyUnicode_LATIN1_INIT("\xba", "\xc2\xba"), \ + _PyUnicode_LATIN1_INIT("\xbb", "\xc2\xbb"), \ + _PyUnicode_LATIN1_INIT("\xbc", "\xc2\xbc"), \ + _PyUnicode_LATIN1_INIT("\xbd", "\xc2\xbd"), \ + _PyUnicode_LATIN1_INIT("\xbe", "\xc2\xbe"), \ + _PyUnicode_LATIN1_INIT("\xbf", "\xc2\xbf"), \ + _PyUnicode_LATIN1_INIT("\xc0", "\xc3\x80"), \ + _PyUnicode_LATIN1_INIT("\xc1", "\xc3\x81"), \ + _PyUnicode_LATIN1_INIT("\xc2", "\xc3\x82"), \ + _PyUnicode_LATIN1_INIT("\xc3", "\xc3\x83"), \ + _PyUnicode_LATIN1_INIT("\xc4", "\xc3\x84"), \ + _PyUnicode_LATIN1_INIT("\xc5", "\xc3\x85"), \ + _PyUnicode_LATIN1_INIT("\xc6", "\xc3\x86"), \ + _PyUnicode_LATIN1_INIT("\xc7", "\xc3\x87"), \ + _PyUnicode_LATIN1_INIT("\xc8", "\xc3\x88"), \ + _PyUnicode_LATIN1_INIT("\xc9", "\xc3\x89"), \ + _PyUnicode_LATIN1_INIT("\xca", "\xc3\x8a"), \ + _PyUnicode_LATIN1_INIT("\xcb", "\xc3\x8b"), \ + _PyUnicode_LATIN1_INIT("\xcc", "\xc3\x8c"), \ + _PyUnicode_LATIN1_INIT("\xcd", "\xc3\x8d"), \ + _PyUnicode_LATIN1_INIT("\xce", "\xc3\x8e"), \ + _PyUnicode_LATIN1_INIT("\xcf", "\xc3\x8f"), \ + _PyUnicode_LATIN1_INIT("\xd0", "\xc3\x90"), \ + _PyUnicode_LATIN1_INIT("\xd1", "\xc3\x91"), \ + _PyUnicode_LATIN1_INIT("\xd2", "\xc3\x92"), \ + _PyUnicode_LATIN1_INIT("\xd3", "\xc3\x93"), \ + _PyUnicode_LATIN1_INIT("\xd4", "\xc3\x94"), \ + _PyUnicode_LATIN1_INIT("\xd5", "\xc3\x95"), \ + _PyUnicode_LATIN1_INIT("\xd6", "\xc3\x96"), \ + _PyUnicode_LATIN1_INIT("\xd7", "\xc3\x97"), \ + _PyUnicode_LATIN1_INIT("\xd8", "\xc3\x98"), \ + _PyUnicode_LATIN1_INIT("\xd9", "\xc3\x99"), \ + _PyUnicode_LATIN1_INIT("\xda", "\xc3\x9a"), \ + _PyUnicode_LATIN1_INIT("\xdb", "\xc3\x9b"), \ + _PyUnicode_LATIN1_INIT("\xdc", "\xc3\x9c"), \ + _PyUnicode_LATIN1_INIT("\xdd", "\xc3\x9d"), \ + _PyUnicode_LATIN1_INIT("\xde", "\xc3\x9e"), \ + _PyUnicode_LATIN1_INIT("\xdf", "\xc3\x9f"), \ + _PyUnicode_LATIN1_INIT("\xe0", "\xc3\xa0"), \ + _PyUnicode_LATIN1_INIT("\xe1", "\xc3\xa1"), \ + _PyUnicode_LATIN1_INIT("\xe2", "\xc3\xa2"), \ + _PyUnicode_LATIN1_INIT("\xe3", "\xc3\xa3"), \ + _PyUnicode_LATIN1_INIT("\xe4", "\xc3\xa4"), \ + _PyUnicode_LATIN1_INIT("\xe5", "\xc3\xa5"), \ + _PyUnicode_LATIN1_INIT("\xe6", "\xc3\xa6"), \ + _PyUnicode_LATIN1_INIT("\xe7", "\xc3\xa7"), \ + _PyUnicode_LATIN1_INIT("\xe8", "\xc3\xa8"), \ + _PyUnicode_LATIN1_INIT("\xe9", "\xc3\xa9"), \ + _PyUnicode_LATIN1_INIT("\xea", "\xc3\xaa"), \ + _PyUnicode_LATIN1_INIT("\xeb", "\xc3\xab"), \ + _PyUnicode_LATIN1_INIT("\xec", "\xc3\xac"), \ + _PyUnicode_LATIN1_INIT("\xed", "\xc3\xad"), \ + _PyUnicode_LATIN1_INIT("\xee", "\xc3\xae"), \ + _PyUnicode_LATIN1_INIT("\xef", "\xc3\xaf"), \ + _PyUnicode_LATIN1_INIT("\xf0", "\xc3\xb0"), \ + _PyUnicode_LATIN1_INIT("\xf1", "\xc3\xb1"), \ + _PyUnicode_LATIN1_INIT("\xf2", "\xc3\xb2"), \ + _PyUnicode_LATIN1_INIT("\xf3", "\xc3\xb3"), \ + _PyUnicode_LATIN1_INIT("\xf4", "\xc3\xb4"), \ + _PyUnicode_LATIN1_INIT("\xf5", "\xc3\xb5"), \ + _PyUnicode_LATIN1_INIT("\xf6", "\xc3\xb6"), \ + _PyUnicode_LATIN1_INIT("\xf7", "\xc3\xb7"), \ + _PyUnicode_LATIN1_INIT("\xf8", "\xc3\xb8"), \ + _PyUnicode_LATIN1_INIT("\xf9", "\xc3\xb9"), \ + _PyUnicode_LATIN1_INIT("\xfa", "\xc3\xba"), \ + _PyUnicode_LATIN1_INIT("\xfb", "\xc3\xbb"), \ + _PyUnicode_LATIN1_INIT("\xfc", "\xc3\xbc"), \ + _PyUnicode_LATIN1_INIT("\xfd", "\xc3\xbd"), \ + _PyUnicode_LATIN1_INIT("\xfe", "\xc3\xbe"), \ + _PyUnicode_LATIN1_INIT("\xff", "\xc3\xbf"), \ } -#endif /* End auto-generated code */ + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_semaphore.h b/Include/internal/pycore_semaphore.h new file mode 100644 index 000000000000000..4c37df7b39a48a3 --- /dev/null +++ b/Include/internal/pycore_semaphore.h @@ -0,0 +1,66 @@ +// The _PySemaphore API a simplified cross-platform semaphore used to implement +// wakeup/sleep. +#ifndef Py_INTERNAL_SEMAPHORE_H +#define Py_INTERNAL_SEMAPHORE_H + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_pythread.h" // _POSIX_SEMAPHORES +#include "pycore_time.h" // _PyTime_t + +#ifdef MS_WINDOWS +# define WIN32_LEAN_AND_MEAN +# include +#elif defined(HAVE_PTHREAD_H) +# include +#elif defined(HAVE_PTHREAD_STUBS) +# include "cpython/pthread_stubs.h" +#else +# error "Require native threads. See https://bugs.python.org/issue31370" +#endif + +#if (defined(_POSIX_SEMAPHORES) && (_POSIX_SEMAPHORES+0) != -1 && \ + defined(HAVE_SEM_TIMEDWAIT)) +# define _Py_USE_SEMAPHORES +# include +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _PySemaphore { +#if defined(MS_WINDOWS) + HANDLE platform_sem; +#elif defined(_Py_USE_SEMAPHORES) + sem_t platform_sem; +#else + pthread_mutex_t mutex; + pthread_cond_t cond; + int counter; +#endif +} _PySemaphore; + +// Puts the current thread to sleep until _PySemaphore_Wakeup() is called. +// If `detach` is true, then the thread will detach/release the GIL while +// sleeping. +PyAPI_FUNC(int) +_PySemaphore_Wait(_PySemaphore *sema, _PyTime_t timeout_ns, int detach); + +// Wakes up a single thread waiting on sema. Note that _PySemaphore_Wakeup() +// can be called before _PySemaphore_Wait(). +PyAPI_FUNC(void) +_PySemaphore_Wakeup(_PySemaphore *sema); + +// Initializes/destroys a semaphore +PyAPI_FUNC(void) _PySemaphore_Init(_PySemaphore *sema); +PyAPI_FUNC(void) _PySemaphore_Destroy(_PySemaphore *sema); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_SEMAPHORE_H */ diff --git a/Include/internal/pycore_setobject.h b/Include/internal/pycore_setobject.h new file mode 100644 index 000000000000000..34a00e6d45fe698 --- /dev/null +++ b/Include/internal/pycore_setobject.h @@ -0,0 +1,27 @@ +#ifndef Py_INTERNAL_SETOBJECT_H +#define Py_INTERNAL_SETOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +// Export for '_pickle' shared extension +PyAPI_FUNC(int) _PySet_NextEntry( + PyObject *set, + Py_ssize_t *pos, + PyObject **key, + Py_hash_t *hash); + +// Export for '_pickle' shared extension +PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable); + +// Export for the gdb plugin's (python-gdb.py) benefit +PyAPI_DATA(PyObject *) _PySet_Dummy; + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_SETOBJECT_H diff --git a/Include/internal/pycore_signal.h b/Include/internal/pycore_signal.h index b921dd170e9f6fe..47213a34ab77b52 100644 --- a/Include/internal/pycore_signal.h +++ b/Include/internal/pycore_signal.h @@ -10,7 +10,12 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include // NSIG +#include // NSIG + + +// Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. +// Export for '_posixsubprocess' shared extension. +PyAPI_FUNC(void) _Py_RestoreSignals(void); #ifdef _SIG_MAXSIG // gh-91145: On FreeBSD, defines NSIG as 32: it doesn't include @@ -29,6 +34,74 @@ extern "C" { # define Py_NSIG 64 // Use a reasonable default value #endif +#define INVALID_FD (-1) + +struct _signals_runtime_state { + struct { + // tripped and func should be accessed using atomic ops. + int tripped; + PyObject* func; + } handlers[Py_NSIG]; + + volatile struct { +#ifdef MS_WINDOWS + /* This would be "SOCKET fd" if were always included. + It isn't so we must cast to SOCKET where appropriate. */ + volatile int fd; +#elif defined(__VXWORKS__) + int fd; +#else + sig_atomic_t fd; +#endif + + int warn_on_full_buffer; +#ifdef MS_WINDOWS + int use_send; +#endif + } wakeup; + + /* Speed up sigcheck() when none tripped. + is_tripped should be accessed using atomic ops. */ + int is_tripped; + + /* These objects necessarily belong to the main interpreter. */ + PyObject *default_handler; + PyObject *ignore_handler; + +#ifdef MS_WINDOWS + /* This would be "HANDLE sigint_event" if were always included. + It isn't so we must cast to HANDLE everywhere "sigint_event" is used. */ + void *sigint_event; +#endif + + /* True if the main interpreter thread exited due to an unhandled + * KeyboardInterrupt exception, suggesting the user pressed ^C. */ + int unhandled_keyboard_interrupt; +}; + +#ifdef MS_WINDOWS +# define _signals_WAKEUP_INIT \ + {.fd = INVALID_FD, .warn_on_full_buffer = 1, .use_send = 0} +#else +# define _signals_WAKEUP_INIT \ + {.fd = INVALID_FD, .warn_on_full_buffer = 1} +#endif + +#define _signals_RUNTIME_INIT \ + { \ + .wakeup = _signals_WAKEUP_INIT, \ + } + + +// Export for '_multiprocessing' shared extension +PyAPI_FUNC(int) _PyOS_IsMainThread(void); + +#ifdef MS_WINDOWS +// is not included by Python.h so use void* instead of HANDLE. +// Export for '_multiprocessing' shared extension +PyAPI_FUNC(void*) _PyOS_SigintEvent(void); +#endif + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_strhex.h b/Include/internal/pycore_strhex.h index f427b4d695bd299..225f423912f2c27 100644 --- a/Include/internal/pycore_strhex.h +++ b/Include/internal/pycore_strhex.h @@ -9,21 +9,24 @@ extern "C" { #endif // Returns a str() containing the hex representation of argbuf. +// Export for '_hashlib' shared extension. PyAPI_FUNC(PyObject*) _Py_strhex(const char* argbuf, const Py_ssize_t arglen); // Returns a bytes() containing the ASCII hex representation of argbuf. -PyAPI_FUNC(PyObject*) _Py_strhex_bytes( +extern PyObject* _Py_strhex_bytes( const char* argbuf, const Py_ssize_t arglen); // These variants include support for a separator between every N bytes: -PyAPI_FUNC(PyObject*) _Py_strhex_with_sep( +extern PyObject* _Py_strhex_with_sep( const char* argbuf, const Py_ssize_t arglen, PyObject* sep, const int bytes_per_group); + +// Export for 'binascii' shared extension PyAPI_FUNC(PyObject*) _Py_strhex_bytes_with_sep( const char* argbuf, const Py_ssize_t arglen, diff --git a/Include/internal/pycore_structseq.h b/Include/internal/pycore_structseq.h index d10a921c55ff8bc..5cff165627502bd 100644 --- a/Include/internal/pycore_structseq.h +++ b/Include/internal/pycore_structseq.h @@ -11,23 +11,28 @@ extern "C" { /* other API */ -PyAPI_FUNC(PyTypeObject *) _PyStructSequence_NewType( +// Export for '_curses' shared extension +PyAPI_FUNC(PyTypeObject*) _PyStructSequence_NewType( PyStructSequence_Desc *desc, unsigned long tp_flags); -PyAPI_FUNC(int) _PyStructSequence_InitBuiltinWithFlags( +extern int _PyStructSequence_InitBuiltinWithFlags( + PyInterpreterState *interp, PyTypeObject *type, PyStructSequence_Desc *desc, unsigned long tp_flags); static inline int -_PyStructSequence_InitBuiltin(PyTypeObject *type, +_PyStructSequence_InitBuiltin(PyInterpreterState *interp, + PyTypeObject *type, PyStructSequence_Desc *desc) { - return _PyStructSequence_InitBuiltinWithFlags(type, desc, 0); + return _PyStructSequence_InitBuiltinWithFlags(interp, type, desc, 0); } -extern void _PyStructSequence_FiniType(PyTypeObject *type); +extern void _PyStructSequence_FiniBuiltin( + PyInterpreterState *interp, + PyTypeObject *type); #ifdef __cplusplus } diff --git a/Include/internal/pycore_symtable.h b/Include/internal/pycore_symtable.h index 8532646ce7d95cb..1d782ca2c96e055 100644 --- a/Include/internal/pycore_symtable.h +++ b/Include/internal/pycore_symtable.h @@ -10,8 +10,17 @@ extern "C" { struct _mod; // Type defined in pycore_ast.h -typedef enum _block_type { FunctionBlock, ClassBlock, ModuleBlock, AnnotationBlock } - _Py_block_ty; +typedef enum _block_type { + FunctionBlock, ClassBlock, ModuleBlock, + // Used for annotations if 'from __future__ import annotations' is active. + // Annotation blocks cannot bind names and are not evaluated. + AnnotationBlock, + // Used for generics and type aliases. These work mostly like functions + // (see PEP 695 for details). The three different blocks function identically; + // they are different enum entries only so that error messages can be more + // precise. + TypeVarBoundBlock, TypeAliasBlock, TypeParamBlock +} _Py_block_ty; typedef enum _comprehension_type { NoComprehension = 0, @@ -49,7 +58,7 @@ typedef struct _symtable_entry { PyObject *ste_varnames; /* list of function parameters */ PyObject *ste_children; /* list of child blocks */ PyObject *ste_directives;/* locations of global and nonlocal statements */ - _Py_block_ty ste_type; /* module, class, function or annotation */ + _Py_block_ty ste_type; int ste_nested; /* true if block is nested */ unsigned ste_free : 1; /* true if block has free variables */ unsigned ste_child_free : 1; /* true if a child block has free vars, @@ -64,7 +73,12 @@ typedef struct _symtable_entry { unsigned ste_needs_class_closure : 1; /* for class scopes, true if a closure over __class__ should be created */ + unsigned ste_needs_classdict : 1; /* for class scopes, true if a closure + over the class dict should be created */ + unsigned ste_comp_inlined : 1; /* true if this comprehension is inlined */ unsigned ste_comp_iter_target : 1; /* true if visiting comprehension target */ + unsigned ste_can_see_class_scope : 1; /* true if this block can see names bound in an + enclosing class scope */ int ste_comp_iter_expr; /* non-zero if visiting a comprehension range expression */ int ste_lineno; /* first line of block */ int ste_col_offset; /* offset of first line of block */ @@ -81,15 +95,18 @@ extern PyTypeObject PySTEntry_Type; extern long _PyST_GetSymbol(PySTEntryObject *, PyObject *); extern int _PyST_GetScope(PySTEntryObject *, PyObject *); +extern int _PyST_IsFunctionLike(PySTEntryObject *); extern struct symtable* _PySymtable_Build( struct _mod *mod, PyObject *filename, PyFutureFeatures *future); -PyAPI_FUNC(PySTEntryObject *) PySymtable_Lookup(struct symtable *, void *); +extern PySTEntryObject* _PySymtable_Lookup(struct symtable *, void *); extern void _PySymtable_Free(struct symtable *); +extern PyObject* _Py_Mangle(PyObject *p, PyObject *name); + /* Flags for def-use information */ #define DEF_GLOBAL 1 /* global stmt */ @@ -102,14 +119,16 @@ extern void _PySymtable_Free(struct symtable *); #define DEF_IMPORT 2<<6 /* assignment occurred via import */ #define DEF_ANNOT 2<<7 /* this name is annotated */ #define DEF_COMP_ITER 2<<8 /* this name is a comprehension iteration variable */ +#define DEF_TYPE_PARAM 2<<9 /* this name is a type parameter */ +#define DEF_COMP_CELL 2<<10 /* this name is a cell in an inlined comprehension */ #define DEF_BOUND (DEF_LOCAL | DEF_PARAM | DEF_IMPORT) /* GLOBAL_EXPLICIT and GLOBAL_IMPLICIT are used internally by the symbol table. GLOBAL is returned from PyST_GetScope() for either of them. - It is stored in ste_symbols at bits 12-15. + It is stored in ste_symbols at bits 13-16. */ -#define SCOPE_OFFSET 11 +#define SCOPE_OFFSET 12 #define SCOPE_MASK (DEF_GLOBAL | DEF_LOCAL | DEF_PARAM | DEF_NONLOCAL) #define LOCAL 1 @@ -128,6 +147,11 @@ extern struct symtable* _Py_SymtableStringObjectFlags( int start, PyCompilerFlags *flags); +int _PyFuture_FromAST( + struct _mod * mod, + PyObject *filename, + PyFutureFeatures* futures); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_sysmodule.h b/Include/internal/pycore_sysmodule.h index 10d092cdc30a2c5..9b8eafd3d6cfd3f 100644 --- a/Include/internal/pycore_sysmodule.h +++ b/Include/internal/pycore_sysmodule.h @@ -8,17 +8,26 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -PyAPI_FUNC(int) _PySys_Audit( +// Export for '_pickle' shared extension +PyAPI_FUNC(PyObject*) _PySys_GetAttr(PyThreadState *tstate, PyObject *name); + +// Export for '_pickle' shared extension +PyAPI_FUNC(size_t) _PySys_GetSizeOf(PyObject *); + +extern int _PySys_Audit( PyThreadState *tstate, const char *event, const char *argFormat, ...); -/* We want minimal exposure of this function, so use extern rather than - PyAPI_FUNC() to not export the symbol. */ +// _PySys_ClearAuditHooks() must not be exported: use extern rather than +// PyAPI_FUNC(). We want minimal exposure of this function. extern void _PySys_ClearAuditHooks(PyThreadState *tstate); -PyAPI_FUNC(int) _PySys_SetAttr(PyObject *, PyObject *); +extern int _PySys_SetAttr(PyObject *, PyObject *); + +extern int _PySys_ClearAttrString(PyInterpreterState *interp, + const char *name, int verbose); #ifdef __cplusplus } diff --git a/Include/internal/pycore_time.h b/Include/internal/pycore_time.h new file mode 100644 index 000000000000000..46713f91d190ffc --- /dev/null +++ b/Include/internal/pycore_time.h @@ -0,0 +1,371 @@ +// The _PyTime_t API is written to use timestamp and timeout values stored in +// various formats and to read clocks. +// +// The _PyTime_t type is an integer to support directly common arithmetic +// operations like t1 + t2. +// +// The _PyTime_t API supports a resolution of 1 nanosecond. The _PyTime_t type +// is signed to support negative timestamps. The supported range is around +// [-292.3 years; +292.3 years]. Using the Unix epoch (January 1st, 1970), the +// supported date range is around [1677-09-21; 2262-04-11]. +// +// Formats: +// +// * seconds +// * seconds as a floating pointer number (C double) +// * milliseconds (10^-3 seconds) +// * microseconds (10^-6 seconds) +// * 100 nanoseconds (10^-7 seconds) +// * nanoseconds (10^-9 seconds) +// * timeval structure, 1 microsecond resolution (10^-6 seconds) +// * timespec structure, 1 nanosecond resolution (10^-9 seconds) +// +// Integer overflows are detected and raise OverflowError. Conversion to a +// resolution worse than 1 nanosecond is rounded correctly with the requested +// rounding mode. There are 4 rounding modes: floor (towards -inf), ceiling +// (towards +inf), half even and up (away from zero). +// +// Some functions clamp the result in the range [_PyTime_MIN; _PyTime_MAX], so +// the caller doesn't have to handle errors and doesn't need to hold the GIL. +// For example, _PyTime_Add(t1, t2) computes t1+t2 and clamp the result on +// overflow. +// +// Clocks: +// +// * System clock +// * Monotonic clock +// * Performance counter +// +// Operations like (t * k / q) with integers are implemented in a way to reduce +// the risk of integer overflow. Such operation is used to convert a clock +// value expressed in ticks with a frequency to _PyTime_t, like +// QueryPerformanceCounter() with QueryPerformanceFrequency(). + +#ifndef Py_INTERNAL_TIME_H +#define Py_INTERNAL_TIME_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +struct _time_runtime_state { +#ifdef HAVE_TIMES + int ticks_per_second_initialized; + long ticks_per_second; +#else + int _not_used; +#endif +}; + + +#ifdef __clang__ +struct timeval; +#endif + +// _PyTime_t: Python timestamp with subsecond precision. It can be used to +// store a duration, and so indirectly a date (related to another date, like +// UNIX epoch). +typedef int64_t _PyTime_t; +// _PyTime_MIN nanoseconds is around -292.3 years +#define _PyTime_MIN INT64_MIN +// _PyTime_MAX nanoseconds is around +292.3 years +#define _PyTime_MAX INT64_MAX +#define _SIZEOF_PYTIME_T 8 + +typedef enum { + // Round towards minus infinity (-inf). + // For example, used to read a clock. + _PyTime_ROUND_FLOOR=0, + + // Round towards infinity (+inf). + // For example, used for timeout to wait "at least" N seconds. + _PyTime_ROUND_CEILING=1, + + // Round to nearest with ties going to nearest even integer. + // For example, used to round from a Python float. + _PyTime_ROUND_HALF_EVEN=2, + + // Round away from zero + // For example, used for timeout. _PyTime_ROUND_CEILING rounds + // -1e-9 to 0 milliseconds which causes bpo-31786 issue. + // _PyTime_ROUND_UP rounds -1e-9 to -1 millisecond which keeps + // the timeout sign as expected. select.poll(timeout) must block + // for negative values. + _PyTime_ROUND_UP=3, + + // _PyTime_ROUND_TIMEOUT (an alias for _PyTime_ROUND_UP) should be + // used for timeouts. + _PyTime_ROUND_TIMEOUT = _PyTime_ROUND_UP +} _PyTime_round_t; + + +// Convert a time_t to a PyLong. +// Export for '_testinternalcapi' shared extension +PyAPI_FUNC(PyObject*) _PyLong_FromTime_t(time_t sec); + +// Convert a PyLong to a time_t. +// Export for '_datetime' shared extension +PyAPI_FUNC(time_t) _PyLong_AsTime_t(PyObject *obj); + +// Convert a number of seconds, int or float, to time_t. +// Export for '_datetime' shared extension. +PyAPI_FUNC(int) _PyTime_ObjectToTime_t( + PyObject *obj, + time_t *sec, + _PyTime_round_t); + +// Convert a number of seconds, int or float, to a timeval structure. +// usec is in the range [0; 999999] and rounded towards zero. +// For example, -1.2 is converted to (-2, 800000). +// Export for '_datetime' shared extension. +PyAPI_FUNC(int) _PyTime_ObjectToTimeval( + PyObject *obj, + time_t *sec, + long *usec, + _PyTime_round_t); + +// Convert a number of seconds, int or float, to a timespec structure. +// nsec is in the range [0; 999999999] and rounded towards zero. +// For example, -1.2 is converted to (-2, 800000000). +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(int) _PyTime_ObjectToTimespec( + PyObject *obj, + time_t *sec, + long *nsec, + _PyTime_round_t); + + +// Create a timestamp from a number of seconds. +// Export for '_socket' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_FromSeconds(int seconds); + +// Create a timestamp from a number of seconds in double. +// Export for '_socket' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_FromSecondsDouble(double seconds, _PyTime_round_t round); + +// Macro to create a timestamp from a number of seconds, no integer overflow. +// Only use the macro for small values, prefer _PyTime_FromSeconds(). +#define _PYTIME_FROMSECONDS(seconds) \ + ((_PyTime_t)(seconds) * (1000 * 1000 * 1000)) + +// Create a timestamp from a number of nanoseconds. +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_FromNanoseconds(_PyTime_t ns); + +// Create a timestamp from a number of microseconds. +// Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +extern _PyTime_t _PyTime_FromMicrosecondsClamp(_PyTime_t us); + +// Create a timestamp from nanoseconds (Python int). +// Export for '_lsprof' shared extension. +PyAPI_FUNC(int) _PyTime_FromNanosecondsObject(_PyTime_t *t, + PyObject *obj); + +// Convert a number of seconds (Python float or int) to a timestamp. +// Raise an exception and return -1 on error, return 0 on success. +// Export for '_socket' shared extension. +PyAPI_FUNC(int) _PyTime_FromSecondsObject(_PyTime_t *t, + PyObject *obj, + _PyTime_round_t round); + +// Convert a number of milliseconds (Python float or int, 10^-3) to a timestamp. +// Raise an exception and return -1 on error, return 0 on success. +// Export for 'select' shared extension. +PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t, + PyObject *obj, + _PyTime_round_t round); + +// Convert a timestamp to a number of seconds as a C double. +// Export for '_socket' shared extension. +PyAPI_FUNC(double) _PyTime_AsSecondsDouble(_PyTime_t t); + +// Convert timestamp to a number of milliseconds (10^-3 seconds). +// Export for '_ssl' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, + _PyTime_round_t round); + +// Convert timestamp to a number of microseconds (10^-6 seconds). +// Export for '_queue' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_AsMicroseconds(_PyTime_t t, + _PyTime_round_t round); + +// Convert timestamp to a number of nanoseconds (10^-9 seconds). +extern _PyTime_t _PyTime_AsNanoseconds(_PyTime_t t); + +#ifdef MS_WINDOWS +// Convert timestamp to a number of 100 nanoseconds (10^-7 seconds). +extern _PyTime_t _PyTime_As100Nanoseconds(_PyTime_t t, + _PyTime_round_t round); +#endif + +// Convert timestamp to a number of nanoseconds (10^-9 seconds) as a Python int +// object. +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(PyObject*) _PyTime_AsNanosecondsObject(_PyTime_t t); + +#ifndef MS_WINDOWS +// Create a timestamp from a timeval structure. +// Raise an exception and return -1 on overflow, return 0 on success. +extern int _PyTime_FromTimeval(_PyTime_t *tp, struct timeval *tv); +#endif + +// Convert a timestamp to a timeval structure (microsecond resolution). +// tv_usec is always positive. +// Raise an exception and return -1 if the conversion overflowed, +// return 0 on success. +// Export for 'select' shared extension. +PyAPI_FUNC(int) _PyTime_AsTimeval(_PyTime_t t, + struct timeval *tv, + _PyTime_round_t round); + +// Similar to _PyTime_AsTimeval() but don't raise an exception on overflow. +// On overflow, clamp tv_sec to _PyTime_t min/max. +// Export for 'select' shared extension. +PyAPI_FUNC(void) _PyTime_AsTimeval_clamp(_PyTime_t t, + struct timeval *tv, + _PyTime_round_t round); + +// Convert a timestamp to a number of seconds (secs) and microseconds (us). +// us is always positive. This function is similar to _PyTime_AsTimeval() +// except that secs is always a time_t type, whereas the timeval structure +// uses a C long for tv_sec on Windows. +// Raise an exception and return -1 if the conversion overflowed, +// return 0 on success. +// Export for '_datetime' shared extension. +PyAPI_FUNC(int) _PyTime_AsTimevalTime_t( + _PyTime_t t, + time_t *secs, + int *us, + _PyTime_round_t round); + +#if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE) +// Create a timestamp from a timespec structure. +// Raise an exception and return -1 on overflow, return 0 on success. +extern int _PyTime_FromTimespec(_PyTime_t *tp, const struct timespec *ts); + +// Convert a timestamp to a timespec structure (nanosecond resolution). +// tv_nsec is always positive. +// Raise an exception and return -1 on error, return 0 on success. +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(int) _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts); + +// Similar to _PyTime_AsTimespec() but don't raise an exception on overflow. +// On overflow, clamp tv_sec to _PyTime_t min/max. +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(void) _PyTime_AsTimespec_clamp(_PyTime_t t, struct timespec *ts); +#endif + + +// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +extern _PyTime_t _PyTime_Add(_PyTime_t t1, _PyTime_t t2); + +// Compute ticks * mul / div. +// Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// The caller must ensure that ((div - 1) * mul) cannot overflow. +extern _PyTime_t _PyTime_MulDiv(_PyTime_t ticks, + _PyTime_t mul, + _PyTime_t div); + +// Structure used by time.get_clock_info() +typedef struct { + const char *implementation; + int monotonic; + int adjustable; + double resolution; +} _Py_clock_info_t; + +// Get the current time from the system clock. +// +// If the internal clock fails, silently ignore the error and return 0. +// On integer overflow, silently ignore the overflow and clamp the clock to +// [_PyTime_MIN; _PyTime_MAX]. +// +// Use _PyTime_GetSystemClockWithInfo() to check for failure. +// Export for '_random' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void); + +// Get the current time from the system clock. +// On success, set *t and *info (if not NULL), and return 0. +// On error, raise an exception and return -1. +extern int _PyTime_GetSystemClockWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + +// Get the time of a monotonic clock, i.e. a clock that cannot go backwards. +// The clock is not affected by system clock updates. The reference point of +// the returned value is undefined, so that only the difference between the +// results of consecutive calls is valid. +// +// If the internal clock fails, silently ignore the error and return 0. +// On integer overflow, silently ignore the overflow and clamp the clock to +// [_PyTime_MIN; _PyTime_MAX]. +// +// Use _PyTime_GetMonotonicClockWithInfo() to check for failure. +// Export for '_random' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void); + +// Get the time of a monotonic clock, i.e. a clock that cannot go backwards. +// The clock is not affected by system clock updates. The reference point of +// the returned value is undefined, so that only the difference between the +// results of consecutive calls is valid. +// +// Fill info (if set) with information of the function used to get the time. +// +// Return 0 on success, raise an exception and return -1 on error. +// Export for '_testsinglephase' shared extension. +PyAPI_FUNC(int) _PyTime_GetMonotonicClockWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + + +// Converts a timestamp to the Gregorian time, using the local time zone. +// Return 0 on success, raise an exception and return -1 on error. +// Export for '_datetime' shared extension. +PyAPI_FUNC(int) _PyTime_localtime(time_t t, struct tm *tm); + +// Converts a timestamp to the Gregorian time, assuming UTC. +// Return 0 on success, raise an exception and return -1 on error. +// Export for '_datetime' shared extension. +PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); + +// Get the performance counter: clock with the highest available resolution to +// measure a short duration. +// +// If the internal clock fails, silently ignore the error and return 0. +// On integer overflow, silently ignore the overflow and clamp the clock to +// [_PyTime_MIN; _PyTime_MAX]. +// +// Use _PyTime_GetPerfCounterWithInfo() to check for failure. +// Export for '_lsprof' shared extension. +PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void); + +// Get the performance counter: clock with the highest available resolution to +// measure a short duration. +// +// Fill info (if set) with information of the function used to get the time. +// +// Return 0 on success, raise an exception and return -1 on error. +extern int _PyTime_GetPerfCounterWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + + +// Create a deadline. +// Pseudo code: _PyTime_GetMonotonicClock() + timeout. +// Export for '_ssl' shared extension. +PyAPI_FUNC(_PyTime_t) _PyDeadline_Init(_PyTime_t timeout); + +// Get remaining time from a deadline. +// Pseudo code: deadline - _PyTime_GetMonotonicClock(). +// Export for '_ssl' shared extension. +PyAPI_FUNC(_PyTime_t) _PyDeadline_Get(_PyTime_t deadline); + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_TIME_H diff --git a/Include/internal/pycore_token.h b/Include/internal/pycore_token.h index 95459ab9f7d004b..571cd6249f28126 100644 --- a/Include/internal/pycore_token.h +++ b/Include/internal/pycore_token.h @@ -1,4 +1,4 @@ -/* Auto-generated by Tools/build/generate_token.py */ +// Auto-generated by Tools/build/generate_token.py /* Token types */ #ifndef Py_INTERNAL_TOKEN_H @@ -67,14 +67,18 @@ extern "C" { #define RARROW 51 #define ELLIPSIS 52 #define COLONEQUAL 53 -#define OP 54 -#define AWAIT 55 -#define ASYNC 56 -#define TYPE_IGNORE 57 -#define TYPE_COMMENT 58 -#define SOFT_KEYWORD 59 -#define ERRORTOKEN 60 -#define N_TOKENS 64 +#define EXCLAMATION 54 +#define OP 55 +#define TYPE_IGNORE 56 +#define TYPE_COMMENT 57 +#define SOFT_KEYWORD 58 +#define FSTRING_START 59 +#define FSTRING_MIDDLE 60 +#define FSTRING_END 61 +#define COMMENT 62 +#define NL 63 +#define ERRORTOKEN 64 +#define N_TOKENS 66 #define NT_OFFSET 256 /* Special definitions for cooperation with parser */ @@ -86,9 +90,11 @@ extern "C" { (x) == NEWLINE || \ (x) == INDENT || \ (x) == DEDENT) +#define ISSTRINGLIT(x) ((x) == STRING || \ + (x) == FSTRING_MIDDLE) -// Symbols exported for test_peg_generator +// Export these 4 symbols for 'test_peg_generator' PyAPI_DATA(const char * const) _PyParser_TokenNames[]; /* Token names */ PyAPI_FUNC(int) _PyToken_OneChar(int); PyAPI_FUNC(int) _PyToken_TwoChars(int, int); diff --git a/Include/internal/pycore_traceback.h b/Include/internal/pycore_traceback.h index c393b2c136f2de1..10922bff98bd4bc 100644 --- a/Include/internal/pycore_traceback.h +++ b/Include/internal/pycore_traceback.h @@ -8,6 +8,12 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +// Export for '_ctypes' shared extension +PyAPI_FUNC(int) _Py_DisplaySourceLine(PyObject *, PyObject *, int, int, int *, PyObject **); + +// Export for 'pyexact' shared extension +PyAPI_FUNC(void) _PyTraceback_Add(const char *, const char *, int); + /* Write the Python traceback into the file 'fd'. For example: Traceback (most recent call first): @@ -25,7 +31,7 @@ extern "C" { This function is signal safe. */ -PyAPI_FUNC(void) _Py_DumpTraceback( +extern void _Py_DumpTraceback( int fd, PyThreadState *tstate); @@ -52,7 +58,7 @@ PyAPI_FUNC(void) _Py_DumpTraceback( This function is signal safe. */ -PyAPI_FUNC(const char*) _Py_DumpTracebackThreads( +extern const char* _Py_DumpTracebackThreads( int fd, PyInterpreterState *interp, PyThreadState *current_tstate); @@ -64,23 +70,23 @@ PyAPI_FUNC(const char*) _Py_DumpTracebackThreads( string which is not ready (PyUnicode_WCHAR_KIND). This function is signal safe. */ -PyAPI_FUNC(void) _Py_DumpASCII(int fd, PyObject *text); +extern void _Py_DumpASCII(int fd, PyObject *text); /* Format an integer as decimal into the file descriptor fd. This function is signal safe. */ -PyAPI_FUNC(void) _Py_DumpDecimal( +extern void _Py_DumpDecimal( int fd, size_t value); /* Format an integer as hexadecimal with width digits into fd file descriptor. The function is signal safe. */ -PyAPI_FUNC(void) _Py_DumpHexadecimal( +extern void _Py_DumpHexadecimal( int fd, uintptr_t value, Py_ssize_t width); -PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame( +extern PyObject* _PyTraceBack_FromFrame( PyObject *tb_next, PyFrameObject *frame); @@ -89,11 +95,10 @@ PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame( /* Write the traceback tb to file f. Prefix each line with indent spaces followed by the margin (if it is not NULL). */ -PyAPI_FUNC(int) _PyTraceBack_Print_Indented( - PyObject *tb, int indent, const char* margin, - const char *header_margin, const char *header, PyObject *f); -PyAPI_FUNC(int) _Py_WriteIndentedMargin(int, const char*, PyObject *); -PyAPI_FUNC(int) _Py_WriteIndent(int, PyObject *); +extern int _PyTraceBack_Print( + PyObject *tb, const char *header, PyObject *f); +extern int _Py_WriteIndentedMargin(int, const char*, PyObject *); +extern int _Py_WriteIndent(int, PyObject *); #ifdef __cplusplus } diff --git a/Include/internal/pycore_tracemalloc.h b/Include/internal/pycore_tracemalloc.h new file mode 100644 index 000000000000000..7ddc5bac5d10af3 --- /dev/null +++ b/Include/internal/pycore_tracemalloc.h @@ -0,0 +1,170 @@ +#ifndef Py_INTERNAL_TRACEMALLOC_H +#define Py_INTERNAL_TRACEMALLOC_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_hashtable.h" // _Py_hashtable_t + + +/* Trace memory blocks allocated by PyMem_RawMalloc() */ +#define TRACE_RAW_MALLOC + + +struct _PyTraceMalloc_Config { + /* Module initialized? + Variable protected by the GIL */ + enum { + TRACEMALLOC_NOT_INITIALIZED, + TRACEMALLOC_INITIALIZED, + TRACEMALLOC_FINALIZED + } initialized; + + /* Is tracemalloc tracing memory allocations? + Variable protected by the GIL */ + int tracing; + + /* limit of the number of frames in a traceback, 1 by default. + Variable protected by the GIL. */ + int max_nframe; +}; + + +/* Pack the frame_t structure to reduce the memory footprint on 64-bit + architectures: 12 bytes instead of 16. */ +#if defined(_MSC_VER) +#pragma pack(push, 4) +#endif + +struct +#ifdef __GNUC__ +__attribute__((packed)) +#endif +tracemalloc_frame { + /* filename cannot be NULL: "" is used if the Python frame + filename is NULL */ + PyObject *filename; + unsigned int lineno; +}; +#ifdef _MSC_VER +#pragma pack(pop) +#endif + +struct tracemalloc_traceback { + Py_uhash_t hash; + /* Number of frames stored */ + uint16_t nframe; + /* Total number of frames the traceback had */ + uint16_t total_nframe; + struct tracemalloc_frame frames[1]; +}; + + +struct _tracemalloc_runtime_state { + struct _PyTraceMalloc_Config config; + + /* Protected by the GIL */ + struct { + PyMemAllocatorEx mem; + PyMemAllocatorEx raw; + PyMemAllocatorEx obj; + } allocators; + +#if defined(TRACE_RAW_MALLOC) + PyThread_type_lock tables_lock; +#endif + /* Size in bytes of currently traced memory. + Protected by TABLES_LOCK(). */ + size_t traced_memory; + /* Peak size in bytes of traced memory. + Protected by TABLES_LOCK(). */ + size_t peak_traced_memory; + /* Hash table used as a set to intern filenames: + PyObject* => PyObject*. + Protected by the GIL */ + _Py_hashtable_t *filenames; + /* Buffer to store a new traceback in traceback_new(). + Protected by the GIL. */ + struct tracemalloc_traceback *traceback; + /* Hash table used as a set to intern tracebacks: + traceback_t* => traceback_t* + Protected by the GIL */ + _Py_hashtable_t *tracebacks; + /* pointer (void*) => trace (trace_t*). + Protected by TABLES_LOCK(). */ + _Py_hashtable_t *traces; + /* domain (unsigned int) => traces (_Py_hashtable_t). + Protected by TABLES_LOCK(). */ + _Py_hashtable_t *domains; + + struct tracemalloc_traceback empty_traceback; + + Py_tss_t reentrant_key; +}; + +#define _tracemalloc_runtime_state_INIT \ + { \ + .config = { \ + .initialized = TRACEMALLOC_NOT_INITIALIZED, \ + .tracing = 0, \ + .max_nframe = 1, \ + }, \ + .reentrant_key = Py_tss_NEEDS_INIT, \ + } + + +// Get the traceback where a memory block was allocated. +// +// Return a tuple of (filename: str, lineno: int) tuples. +// +// Return None if the tracemalloc module is disabled or if the memory block +// is not tracked by tracemalloc. +// +// Raise an exception and return NULL on error. +// +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(PyObject*) _PyTraceMalloc_GetTraceback( + unsigned int domain, + uintptr_t ptr); + +/* Return non-zero if tracemalloc is tracing */ +extern int _PyTraceMalloc_IsTracing(void); + +/* Clear the tracemalloc traces */ +extern void _PyTraceMalloc_ClearTraces(void); + +/* Clear the tracemalloc traces */ +extern PyObject* _PyTraceMalloc_GetTraces(void); + +/* Clear tracemalloc traceback for an object */ +extern PyObject* _PyTraceMalloc_GetObjectTraceback(PyObject *obj); + +/* Initialize tracemalloc */ +extern int _PyTraceMalloc_Init(void); + +/* Start tracemalloc */ +extern int _PyTraceMalloc_Start(int max_nframe); + +/* Stop tracemalloc */ +extern void _PyTraceMalloc_Stop(void); + +/* Get the tracemalloc traceback limit */ +extern int _PyTraceMalloc_GetTracebackLimit(void); + +/* Get the memory usage of tracemalloc in bytes */ +extern size_t _PyTraceMalloc_GetMemory(void); + +/* Get the current size and peak size of traced memory blocks as a 2-tuple */ +extern PyObject* _PyTraceMalloc_GetTracedMemory(void); + +/* Set the peak size of traced memory blocks to the current size */ +extern void _PyTraceMalloc_ResetPeak(void); + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_TRACEMALLOC_H diff --git a/Include/internal/pycore_tuple.h b/Include/internal/pycore_tuple.h index 1efe4fa2bdef943..4fa7a12206bcb21 100644 --- a/Include/internal/pycore_tuple.h +++ b/Include/internal/pycore_tuple.h @@ -8,13 +8,12 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "tupleobject.h" /* _PyTuple_CAST() */ - +extern void _PyTuple_MaybeUntrack(PyObject *); +extern void _PyTuple_DebugMallocStats(FILE *out); /* runtime lifecycle */ extern PyStatus _PyTuple_InitGlobalObjects(PyInterpreterState *); -extern PyStatus _PyTuple_InitTypes(PyInterpreterState *); extern void _PyTuple_Fini(PyInterpreterState *); @@ -62,11 +61,18 @@ struct _Py_tuple_state { #endif }; -#define _PyTuple_ITEMS(op) (_PyTuple_CAST(op)->ob_item) +#define _PyTuple_ITEMS(op) _Py_RVALUE(_PyTuple_CAST(op)->ob_item) extern PyObject *_PyTuple_FromArray(PyObject *const *, Py_ssize_t); extern PyObject *_PyTuple_FromArraySteal(PyObject *const *, Py_ssize_t); + +typedef struct { + PyObject_HEAD + Py_ssize_t it_index; + PyTupleObject *it_seq; /* Set to NULL when iterator is exhausted */ +} _PyTupleIterObject; + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_typeobject.h b/Include/internal/pycore_typeobject.h index 5e7aca1b9f5ae98..bbf8544b09f0fb7 100644 --- a/Include/internal/pycore_typeobject.h +++ b/Include/internal/pycore_typeobject.h @@ -8,15 +8,21 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_moduleobject.h" // PyModuleObject -/* runtime lifecycle */ -extern PyStatus _PyTypes_InitTypes(PyInterpreterState *); -extern void _PyTypes_FiniTypes(PyInterpreterState *); -extern void _PyTypes_Fini(PyInterpreterState *); +/* state */ +#define _Py_TYPE_BASE_VERSION_TAG (2<<16) +#define _Py_MAX_GLOBAL_TYPE_VERSION_TAG (_Py_TYPE_BASE_VERSION_TAG - 1) + +struct _types_runtime_state { + /* Used to set PyTypeObject.tp_version_tag for core static types. */ + // bpo-42745: next_version_tag remains shared by all interpreters + // because of static types. + unsigned int next_version_tag; +}; -/* other API */ // Type attribute lookup cache: speed up attribute and method lookups, // see _PyType_Lookup(). @@ -27,15 +33,9 @@ struct type_cache_entry { }; #define MCACHE_SIZE_EXP 12 -#define MCACHE_STATS 0 struct type_cache { struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP]; -#if MCACHE_STATS - size_t hits; - size_t misses; - size_t collisions; -#endif }; /* For now we hard-code this to a value for which we are confident @@ -44,6 +44,11 @@ struct type_cache { typedef struct { PyTypeObject *type; + int readying; + int ready; + // XXX tp_dict can probably be statically allocated, + // instead of dynamically and stored on the interpreter. + PyObject *tp_dict; PyObject *tp_subclasses; /* We never clean up weakrefs for static builtin types since they will effectively never get triggered. However, there @@ -52,6 +57,36 @@ typedef struct { PyObject *tp_weaklist; } static_builtin_state; +struct types_state { + /* Used to set PyTypeObject.tp_version_tag. + It starts at _Py_MAX_GLOBAL_TYPE_VERSION_TAG + 1, + where all those lower numbers are used for core static types. */ + unsigned int next_version_tag; + + struct type_cache type_cache; + size_t num_builtins_initialized; + static_builtin_state builtins[_Py_MAX_STATIC_BUILTIN_TYPES]; +}; + + +/* runtime lifecycle */ + +extern PyStatus _PyTypes_InitTypes(PyInterpreterState *); +extern void _PyTypes_FiniTypes(PyInterpreterState *); +extern void _PyTypes_Fini(PyInterpreterState *); + + +/* other API */ + +/* Length of array of slotdef pointers used to store slots with the + same __name__. There should be at most MAX_EQUIV-1 slotdef entries with + the same __name__, for any __name__. Since that's a static property, it is + appropriate to declare fixed-size arrays for this. */ +#define MAX_EQUIV 10 + +typedef struct wrapperbase pytype_slotdef; + + static inline PyObject ** _PyStaticType_GET_WEAKREFS_LISTPTR(static_builtin_state *state) { @@ -59,21 +94,59 @@ _PyStaticType_GET_WEAKREFS_LISTPTR(static_builtin_state *state) return &state->tp_weaklist; } -struct types_state { - struct type_cache type_cache; - size_t num_builtins_initialized; - static_builtin_state builtins[_Py_MAX_STATIC_BUILTIN_TYPES]; -}; +/* Like PyType_GetModuleState, but skips verification + * that type is a heap type with an associated module */ +static inline void * +_PyType_GetModuleState(PyTypeObject *type) +{ + assert(PyType_Check(type)); + assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); + PyHeapTypeObject *et = (PyHeapTypeObject *)type; + assert(et->ht_module); + PyModuleObject *mod = (PyModuleObject *)(et->ht_module); + assert(mod != NULL); + return mod->md_state; +} + + +extern int _PyStaticType_InitBuiltin(PyInterpreterState *, PyTypeObject *type); +extern static_builtin_state * _PyStaticType_GetState(PyInterpreterState *, PyTypeObject *); +extern void _PyStaticType_ClearWeakRefs(PyInterpreterState *, PyTypeObject *type); +extern void _PyStaticType_Dealloc(PyInterpreterState *, PyTypeObject *); + +// Export for 'math' shared extension, used via _PyType_IsReady() static inline +// function +PyAPI_FUNC(PyObject *) _PyType_GetDict(PyTypeObject *); + +extern PyObject * _PyType_GetBases(PyTypeObject *type); +extern PyObject * _PyType_GetMRO(PyTypeObject *type); +extern PyObject* _PyType_GetSubclasses(PyTypeObject *); +extern int _PyType_HasSubclasses(PyTypeObject *); + +// PyType_Ready() must be called if _PyType_IsReady() is false. +// See also the Py_TPFLAGS_READY flag. +static inline int +_PyType_IsReady(PyTypeObject *type) +{ + return _PyType_GetDict(type) != NULL; +} + +extern PyObject* _Py_type_getattro_impl(PyTypeObject *type, PyObject *name, + int *suppress_missing_attribute); +extern PyObject* _Py_type_getattro(PyTypeObject *type, PyObject *name); + +extern PyObject* _Py_slot_tp_getattro(PyObject *self, PyObject *name); +extern PyObject* _Py_slot_tp_getattr_hook(PyObject *self, PyObject *name); + +extern PyTypeObject _PyBufferWrapper_Type; +extern PyObject* _PySuper_Lookup(PyTypeObject *su_type, PyObject *su_obj, + PyObject *name, int *meth_found); -extern int _PyStaticType_InitBuiltin(PyTypeObject *type); -extern static_builtin_state * _PyStaticType_GetState(PyTypeObject *); -extern void _PyStaticType_ClearWeakRefs(PyTypeObject *type); -extern void _PyStaticType_Dealloc(PyTypeObject *type); +// This is exported for the _testinternalcapi module. +PyAPI_FUNC(PyObject *) _PyType_GetModuleName(PyTypeObject *); -PyObject *_Py_slot_tp_getattro(PyObject *self, PyObject *name); -PyObject *_Py_slot_tp_getattr_hook(PyObject *self, PyObject *name); #ifdef __cplusplus } diff --git a/Include/internal/pycore_typevarobject.h b/Include/internal/pycore_typevarobject.h new file mode 100644 index 000000000000000..c9fa97d68207571 --- /dev/null +++ b/Include/internal/pycore_typevarobject.h @@ -0,0 +1,24 @@ +#ifndef Py_INTERNAL_TYPEVAROBJECT_H +#define Py_INTERNAL_TYPEVAROBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +extern PyObject *_Py_make_typevar(PyObject *, PyObject *, PyObject *); +extern PyObject *_Py_make_paramspec(PyThreadState *, PyObject *); +extern PyObject *_Py_make_typevartuple(PyThreadState *, PyObject *); +extern PyObject *_Py_make_typealias(PyThreadState *, PyObject *); +extern PyObject *_Py_subscript_generic(PyThreadState *, PyObject *); +extern int _Py_initialize_generic(PyInterpreterState *); +extern void _Py_clear_generic_types(PyInterpreterState *); + +extern PyTypeObject _PyTypeAlias_Type; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_TYPEVAROBJECT_H */ diff --git a/Include/internal/pycore_unicodeobject.h b/Include/internal/pycore_unicodeobject.h index 63bf04b3e1b872c..a0d00af92e0f5d7 100644 --- a/Include/internal/pycore_unicodeobject.h +++ b/Include/internal/pycore_unicodeobject.h @@ -9,10 +9,262 @@ extern "C" { #endif #include "pycore_fileutils.h" // _Py_error_handler +#include "pycore_identifier.h" // _Py_Identifier +#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI -void _PyUnicode_ExactDealloc(PyObject *op); +/* --- Characters Type APIs ----------------------------------------------- */ -/* runtime lifecycle */ +extern int _PyUnicode_IsXidStart(Py_UCS4 ch); +extern int _PyUnicode_IsXidContinue(Py_UCS4 ch); +extern int _PyUnicode_ToLowerFull(Py_UCS4 ch, Py_UCS4 *res); +extern int _PyUnicode_ToTitleFull(Py_UCS4 ch, Py_UCS4 *res); +extern int _PyUnicode_ToUpperFull(Py_UCS4 ch, Py_UCS4 *res); +extern int _PyUnicode_ToFoldedFull(Py_UCS4 ch, Py_UCS4 *res); +extern int _PyUnicode_IsCaseIgnorable(Py_UCS4 ch); +extern int _PyUnicode_IsCased(Py_UCS4 ch); + +/* --- Unicode API -------------------------------------------------------- */ + +// Export for '_json' shared extension +PyAPI_FUNC(int) _PyUnicode_CheckConsistency( + PyObject *op, + int check_content); + +extern void _PyUnicode_ExactDealloc(PyObject *op); +extern Py_ssize_t _PyUnicode_InternedSize(void); + +// Get a copy of a Unicode string. +// Export for '_datetime' shared extension. +PyAPI_FUNC(PyObject*) _PyUnicode_Copy( + PyObject *unicode); + +/* Unsafe version of PyUnicode_Fill(): don't check arguments and so may crash + if parameters are invalid (e.g. if length is longer than the string). */ +extern void _PyUnicode_FastFill( + PyObject *unicode, + Py_ssize_t start, + Py_ssize_t length, + Py_UCS4 fill_char + ); + +/* Unsafe version of PyUnicode_CopyCharacters(): don't check arguments and so + may crash if parameters are invalid (e.g. if the output string + is too short). */ +extern void _PyUnicode_FastCopyCharacters( + PyObject *to, + Py_ssize_t to_start, + PyObject *from, + Py_ssize_t from_start, + Py_ssize_t how_many + ); + +/* Create a new string from a buffer of ASCII characters. + WARNING: Don't check if the string contains any non-ASCII character. */ +extern PyObject* _PyUnicode_FromASCII( + const char *buffer, + Py_ssize_t size); + +/* Compute the maximum character of the substring unicode[start:end]. + Return 127 for an empty string. */ +extern Py_UCS4 _PyUnicode_FindMaxChar ( + PyObject *unicode, + Py_ssize_t start, + Py_ssize_t end); + +/* --- _PyUnicodeWriter API ----------------------------------------------- */ + +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +extern int _PyUnicode_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); + +/* --- UTF-7 Codecs ------------------------------------------------------- */ + +extern PyObject* _PyUnicode_EncodeUTF7( + PyObject *unicode, /* Unicode object */ + int base64SetO, /* Encode RFC2152 Set O characters in base64 */ + int base64WhiteSpace, /* Encode whitespace (sp, ht, nl, cr) in base64 */ + const char *errors); /* error handling */ + +/* --- UTF-8 Codecs ------------------------------------------------------- */ + +// Export for '_tkinter' shared extension. +PyAPI_FUNC(PyObject*) _PyUnicode_AsUTF8String( + PyObject *unicode, + const char *errors); + +/* --- UTF-32 Codecs ------------------------------------------------------ */ + +// Export for '_tkinter' shared extension +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF32( + PyObject *object, /* Unicode object */ + const char *errors, /* error handling */ + int byteorder); /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + +/* --- UTF-16 Codecs ------------------------------------------------------ */ + +// Returns a Python string object holding the UTF-16 encoded value of +// the Unicode data. +// +// If byteorder is not 0, output is written according to the following +// byte order: +// +// byteorder == -1: little endian +// byteorder == 0: native byte order (writes a BOM mark) +// byteorder == 1: big endian +// +// If byteorder is 0, the output string will always start with the +// Unicode BOM mark (U+FEFF). In the other two modes, no BOM mark is +// prepended. +// +// Export for '_tkinter' shared extension +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF16( + PyObject* unicode, /* Unicode object */ + const char *errors, /* error handling */ + int byteorder); /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + +/* --- Unicode-Escape Codecs ---------------------------------------------- */ + +/* Variant of PyUnicode_DecodeUnicodeEscape that supports partial decoding. */ +extern PyObject* _PyUnicode_DecodeUnicodeEscapeStateful( + const char *string, /* Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed); /* bytes consumed */ + +// Helper for PyUnicode_DecodeUnicodeEscape that detects invalid escape +// chars. +// Export for test_peg_generator. +PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscapeInternal( + const char *string, /* Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed, /* bytes consumed */ + const char **first_invalid_escape); /* on return, points to first + invalid escaped char in + string. */ + +/* --- Raw-Unicode-Escape Codecs ---------------------------------------------- */ + +/* Variant of PyUnicode_DecodeRawUnicodeEscape that supports partial decoding. */ +extern PyObject* _PyUnicode_DecodeRawUnicodeEscapeStateful( + const char *string, /* Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed); /* bytes consumed */ + +/* --- Latin-1 Codecs ----------------------------------------------------- */ + +extern PyObject* _PyUnicode_AsLatin1String( + PyObject* unicode, + const char* errors); + +/* --- ASCII Codecs ------------------------------------------------------- */ + +extern PyObject* _PyUnicode_AsASCIIString( + PyObject* unicode, + const char* errors); + +/* --- Character Map Codecs ----------------------------------------------- */ + +/* Translate an Unicode object by applying a character mapping table to + it and return the resulting Unicode object. + + The mapping table must map Unicode ordinal integers to Unicode strings, + Unicode ordinal integers or None (causing deletion of the character). + + Mapping tables may be dictionaries or sequences. Unmapped character + ordinals (ones which cause a LookupError) are left untouched and + are copied as-is. +*/ +extern PyObject* _PyUnicode_EncodeCharmap( + PyObject *unicode, /* Unicode object */ + PyObject *mapping, /* encoding mapping */ + const char *errors); /* error handling */ + +/* --- Decimal Encoder ---------------------------------------------------- */ + +// Coverts a Unicode object holding a decimal value to an ASCII string +// for using in int, float and complex parsers. +// Transforms code points that have decimal digit property to the +// corresponding ASCII digit code points. Transforms spaces to ASCII. +// Transforms code points starting from the first non-ASCII code point that +// is neither a decimal digit nor a space to the end into '?'. +// +// Export for '_testinternalcapi' shared extension. +PyAPI_FUNC(PyObject*) _PyUnicode_TransformDecimalAndSpaceToASCII( + PyObject *unicode); /* Unicode object */ + +/* --- Methods & Slots ---------------------------------------------------- */ + +extern PyObject* _PyUnicode_JoinArray( + PyObject *separator, + PyObject *const *items, + Py_ssize_t seqlen + ); + +/* Test whether a unicode is equal to ASCII identifier. Return 1 if true, + 0 otherwise. The right argument must be ASCII identifier. + Any error occurs inside will be cleared before return. */ +extern int _PyUnicode_EqualToASCIIId( + PyObject *left, /* Left string */ + _Py_Identifier *right /* Right identifier */ + ); + +// Test whether a unicode is equal to ASCII string. Return 1 if true, +// 0 otherwise. The right argument must be ASCII-encoded string. +// Any error occurs inside will be cleared before return. +// Export for '_ctypes' shared extension +PyAPI_FUNC(int) _PyUnicode_EqualToASCIIString( + PyObject *left, + const char *right /* ASCII-encoded string */ + ); + +/* Externally visible for str.strip(unicode) */ +extern PyObject* _PyUnicode_XStrip( + PyObject *self, + int striptype, + PyObject *sepobj + ); + + +/* Using explicit passed-in values, insert the thousands grouping + into the string pointed to by buffer. For the argument descriptions, + see Objects/stringlib/localeutil.h */ +extern Py_ssize_t _PyUnicode_InsertThousandsGrouping( + _PyUnicodeWriter *writer, + Py_ssize_t n_buffer, + PyObject *digits, + Py_ssize_t d_pos, + Py_ssize_t n_digits, + Py_ssize_t min_width, + const char *grouping, + PyObject *thousands_sep, + Py_UCS4 *maxchar); + +/* --- Misc functions ----------------------------------------------------- */ + +extern PyObject* _PyUnicode_FormatLong(PyObject *, int, int, int); + +/* Fast equality check when the inputs are known to be exact unicode types + and where the hash values are equal (i.e. a very probable match) */ +extern int _PyUnicode_EQ(PyObject *, PyObject *); + +// Equality check. +// Export for '_pickle' shared extension. +PyAPI_FUNC(int) _PyUnicode_Equal(PyObject *, PyObject *); + +extern int _PyUnicode_WideCharString_Converter(PyObject *, void *); +extern int _PyUnicode_WideCharString_Opt_Converter(PyObject *, void *); + +// Export for test_peg_generator +PyAPI_FUNC(Py_ssize_t) _PyUnicode_ScanIdentifier(PyObject *); + +/* --- Runtime lifecycle -------------------------------------------------- */ extern void _PyUnicode_InitState(PyInterpreterState *); extern PyStatus _PyUnicode_InitGlobalObjects(PyInterpreterState *); @@ -22,7 +274,7 @@ extern void _PyUnicode_FiniTypes(PyInterpreterState *); extern PyTypeObject _PyUnicodeASCIIIter_Type; -/* other API */ +/* --- Other API ---------------------------------------------------------- */ struct _Py_unicode_runtime_ids { PyThread_type_lock lock; @@ -31,6 +283,10 @@ struct _Py_unicode_runtime_ids { Py_ssize_t next_index; }; +struct _Py_unicode_runtime_state { + struct _Py_unicode_runtime_ids ids; +}; + /* fs_codec.encoding is initialized to NULL. Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */ struct _Py_unicode_fs_codec { @@ -48,12 +304,19 @@ struct _Py_unicode_ids { struct _Py_unicode_state { struct _Py_unicode_fs_codec fs_codec; + _PyUnicode_Name_CAPI *ucnhash_capi; + // Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId() struct _Py_unicode_ids ids; }; +extern void _PyUnicode_InternInPlace(PyInterpreterState *interp, PyObject **p); extern void _PyUnicode_ClearInterned(PyInterpreterState *interp); +// Like PyUnicode_AsUTF8(), but check for embedded null characters. +// Export for '_sqlite3' shared extension. +PyAPI_FUNC(const char *) _PyUnicode_AsUTF8NoNUL(PyObject *); + #ifdef __cplusplus } diff --git a/Include/internal/pycore_unicodeobject_generated.h b/Include/internal/pycore_unicodeobject_generated.h new file mode 100644 index 000000000000000..be9baa3eebecfcf --- /dev/null +++ b/Include/internal/pycore_unicodeobject_generated.h @@ -0,0 +1,2096 @@ +#ifndef Py_INTERNAL_UNICODEOBJECT_GENERATED_H +#define Py_INTERNAL_UNICODEOBJECT_GENERATED_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +/* The following is auto-generated by Tools/build/generate_global_objects.py. */ +static inline void +_PyUnicode_InitStaticStrings(PyInterpreterState *interp) { + PyObject *string; + string = &_Py_ID(CANCELLED); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(FINISHED); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(False); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(JSONDecodeError); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(PENDING); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(Py_Repr); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(TextIOWrapper); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(True); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(WarningMessage); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_WindowsConsoleIO); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__IOBase_closed); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__abc_tpflags__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__abs__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__abstractmethods__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__add__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__aenter__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__aexit__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__aiter__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__all__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__and__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__anext__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__annotations__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__args__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__asyncio_running_event_loop__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__await__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__bases__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__bool__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__buffer__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__build_class__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__builtins__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__bytes__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__call__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__cantrace__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__class__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__class_getitem__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__classcell__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__classdict__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__classdictcell__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__complex__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__contains__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__copy__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ctypes_from_outparam__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__del__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__delattr__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__delete__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__delitem__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__dict__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__dictoffset__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__dir__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__divmod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__doc__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__enter__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__eq__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__exit__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__file__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__float__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__floordiv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__format__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__fspath__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ge__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__get__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getattr__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getattribute__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getinitargs__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getitem__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getnewargs__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getnewargs_ex__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__getstate__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__gt__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__hash__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__iadd__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__iand__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ifloordiv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ilshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__imatmul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__imod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__import__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__imul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__index__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__init__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__init_subclass__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__instancecheck__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__int__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__invert__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ior__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ipow__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__irshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__isabstractmethod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__isub__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__iter__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__itruediv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ixor__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__le__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__len__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__length_hint__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__lltrace__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__loader__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__lshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__lt__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__main__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__match_args__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__matmul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__missing__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__mod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__module__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__mro_entries__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__mul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__name__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ne__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__neg__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__new__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__newobj__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__newobj_ex__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__next__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__notes__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__or__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__orig_class__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__origin__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__package__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__parameters__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__path__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__pos__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__pow__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__prepare__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__qualname__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__radd__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rand__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rdivmod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__reduce__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__reduce_ex__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__release_buffer__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__repr__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__reversed__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rfloordiv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rlshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rmatmul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rmod__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rmul__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__ror__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__round__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rpow__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rrshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rshift__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rsub__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rtruediv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__rxor__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__set__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__set_name__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__setattr__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__setitem__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__setstate__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__sizeof__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__slotnames__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__slots__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__spec__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__str__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__sub__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__subclasscheck__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__subclasshook__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__truediv__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__trunc__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__type_params__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__typing_is_unpacked_typevartuple__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__typing_prepare_subst__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__typing_subst__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__typing_unpacked_tuple_args__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__warningregistry__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__weaklistoffset__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__weakref__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(__xor__); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_abc_impl); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_abstract_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_active); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_annotation); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_anonymous_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_argtypes_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_as_parameter_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_asyncio_future_blocking); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_blksize); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_bootstrap); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_check_retval_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_dealloc_warn); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_feature_version); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_fields_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_finalizing); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_find_and_load); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_fix_up_module); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_flags_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_get_sourcefile); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_handle_fromlist); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_initializing); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_io); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_is_text_encoding); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_length_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_limbo); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_lock_unlock_module); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_loop); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_needs_com_addref_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_pack_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_restype_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_showwarnmsg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_shutdown); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_slotnames); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_strptime_datetime); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_swappedbytes_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_type_); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_uninitialized_submodules); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_warn_unawaited_coroutine); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(_xoptions); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(a); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(abs_tol); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(access); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(aclose); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(add); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(add_done_callback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(after_in_child); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(after_in_parent); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(aggregate_class); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(append); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(argdefs); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(arguments); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(argv); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(as_integer_ratio); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(asend); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ast); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(athrow); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(attribute); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(authorizer_callback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(autocommit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(b); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(backtick); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(base); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(before); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(big); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(binary_form); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(block); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(bound); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(buffer); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(buffer_callback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(buffer_size); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(buffering); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(buffers); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(bufsize); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(builtins); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(byteorder); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(bytes); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(bytes_per_sep); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(c); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(c_call); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(c_exception); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(c_return); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cached_statements); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cadata); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cafile); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(call); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(call_exception_handler); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(call_soon); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(callback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cancel); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(capath); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(category); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cb_type); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(certfile); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(check_same_thread); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(clear); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(close); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(closed); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(closefd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(closure); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_argcount); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_cellvars); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_code); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_consts); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_exceptiontable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_filename); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_firstlineno); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_flags); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_freevars); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_kwonlyargcount); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_linetable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_names); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_nlocals); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_posonlyargcount); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_qualname); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_stacksize); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(co_varnames); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(code); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(command); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(comment_factory); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(compile_mode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(consts); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(context); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(contravariant); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cookie); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(copy); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(copyreg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(coro); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(count); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(covariant); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(cwd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(d); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(data); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(database); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(decode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(decoder); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(default); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(defaultaction); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(delete); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(depth); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(detect_types); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(deterministic); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(device); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dict); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dictcomp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(difference_update); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(digest); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(digest_size); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(digestmod); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dir_fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(discard); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dispatch_table); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(displayhook); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dklen); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(doc); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dont_inherit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dst); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(dst_dir_fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(e); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(eager_start); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(effective_ids); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(element_factory); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(encode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(encoding); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(end); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(end_lineno); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(end_offset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(endpos); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(entrypoint); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(env); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(errors); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(event); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(eventmask); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(excepthook); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(exception); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(existing_file_name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(exp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(extend); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(extra_tokens); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(f); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(facility); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(factory); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(false); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(family); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fanout); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fd2); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fdel); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fget); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(file); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(file_actions); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(filename); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fileno); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(filepath); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fillvalue); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(filters); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(final); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(find_class); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fix_imports); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(flags); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(flush); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(follow_symlinks); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(format); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(from_param); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fromlist); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fromtimestamp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fromutc); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(fset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(func); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(future); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(g); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(generation); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(genexpr); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(get); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(get_debug); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(get_event_loop); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(get_loop); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(get_source); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(getattr); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(getstate); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(gid); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(globals); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(groupindex); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(groups); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(h); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(handle); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(hash_name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(header); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(headers); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(hi); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(hook); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(id); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ident); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(identity_hint); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ignore); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(imag); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(importlib); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(in_fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(incoming); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(indexgroup); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(inf); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(infer_variance); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(inheritable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initial); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initial_bytes); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initial_value); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initval); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(inner_size); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(input); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(insert_comments); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(insert_pis); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(instructions); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(intern); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(intersection); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(interval); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(is_running); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(isatty); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(isinstance); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(isoformat); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(isolation_level); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(istext); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(item); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(items); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(iter); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(iterable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(iterations); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(join); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(jump); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(keepends); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(key); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(keyfile); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(keys); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(kind); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(kw); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(kw1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(kw2); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(lambda); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last_exc); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last_node); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last_traceback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last_type); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(last_value); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(latin1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(leaf_size); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(len); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(length); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(level); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(limit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(line); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(line_buffering); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(lineno); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(listcomp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(little); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(lo); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(locale); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(locals); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(logoption); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(loop); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mapping); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(match); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(max_length); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(maxdigits); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(maxevents); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(maxmem); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(maxsplit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(maxvalue); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(memLevel); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(memlimit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(message); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(metaclass); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(metadata); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(method); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mod); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(module); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(module_globals); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(modules); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mro); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(msg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mycmp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(n); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(n_arg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(n_fields); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(n_sequence_fields); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(n_unnamed_fields); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(name_from); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(namespace_separator); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(namespaces); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(narg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ndigits); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(new_file_name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(new_limit); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(newline); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(newlines); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(next); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(nlocals); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(node_depth); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(node_offset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ns); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(nstype); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(nt); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(null); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(number); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(obj); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(object); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(offset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(offset_dst); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(offset_src); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(on_type_read); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(onceregistry); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(only_keys); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(oparg); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(opcode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(open); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(opener); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(operation); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(optimize); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(options); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(order); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(origin); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(out_fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(outgoing); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(overlapped); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(owner); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(p); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pages); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(parent); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(password); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(path); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pattern); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(peek); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(persistent_id); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(persistent_load); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(person); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pi_factory); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pid); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(policy); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pos); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pos1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(pos2); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(posix); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(print_file_and_line); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(priority); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(progress); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(progress_handler); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(progress_routine); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(proto); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(protocol); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ps1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(ps2); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(query); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(quotetabs); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(r); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(raw); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(read); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(read1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readall); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readinto); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readinto1); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readline); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(readonly); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(real); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reducer_override); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(registry); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(rel_tol); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(release); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reload); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(repl); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(replace); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reserved); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(resetids); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(return); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reverse); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(reversed); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(s); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(salt); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sched_priority); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(scheduler); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(seek); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(seekable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(selectors); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(self); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(send); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sep); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sequence); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(server_hostname); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(server_side); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(session); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setcomp); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setpgroup); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setsid); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setsigdef); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setsigmask); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(setstate); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(shape); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(show_cmd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(signed); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(size); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sizehint); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(skip_file_prefixes); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sleep); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sock); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sort); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(source); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(source_traceback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(src); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(src_dir_fd); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(stacklevel); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(start); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(statement); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(status); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(stderr); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(stdin); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(stdout); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(step); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(steps); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(store_name); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(strategy); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(strftime); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(strict); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(strict_mode); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(string); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(sub_key); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(symmetric_difference_update); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tabsize); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tag); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(target); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(target_is_directory); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(task); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tb_frame); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tb_lasti); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tb_lineno); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tb_next); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tell); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(template); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(term); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(text); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(threading); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(throw); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(timeout); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(times); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(timetuple); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(top); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(trace_callback); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(trailers); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(translate); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(true); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(truncate); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(twice); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(txt); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(type); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(type_params); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tz); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(tzname); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(uid); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(unlink); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(unraisablehook); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(uri); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(usedforsecurity); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(value); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(values); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(version); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(volume); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(warnings); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(warnoptions); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(wbits); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(week); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(weekday); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(which); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(who); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(withdata); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(writable); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(write); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(write_through); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(x); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(year); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(zdict); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); +} +/* End auto-generated code */ +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_UNICODEOBJECT_GENERATED_H */ diff --git a/Include/internal/pycore_uops.h b/Include/internal/pycore_uops.h new file mode 100644 index 000000000000000..e2b94894681f44d --- /dev/null +++ b/Include/internal/pycore_uops.h @@ -0,0 +1,35 @@ +#ifndef Py_INTERNAL_UOPS_H +#define Py_INTERNAL_UOPS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_frame.h" // _PyInterpreterFrame + +#define _Py_UOP_MAX_TRACE_LENGTH 512 + +typedef struct { + uint16_t opcode; + uint16_t oparg; + uint32_t target; + uint64_t operand; // A cache entry +} _PyUOpInstruction; + +typedef struct { + _PyExecutorObject base; + _PyUOpInstruction trace[1]; +} _PyUOpExecutorObject; + +_PyInterpreterFrame *_PyUopExecute( + _PyExecutorObject *executor, + _PyInterpreterFrame *frame, + PyObject **stack_pointer); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_UOPS_H */ diff --git a/Include/internal/pycore_warnings.h b/Include/internal/pycore_warnings.h index efb4f1cd7eac80a..9785d7cc467de2e 100644 --- a/Include/internal/pycore_warnings.h +++ b/Include/internal/pycore_warnings.h @@ -19,9 +19,10 @@ struct _warnings_runtime_state { extern int _PyWarnings_InitState(PyInterpreterState *interp); -PyAPI_FUNC(PyObject*) _PyWarnings_Init(void); +extern PyObject* _PyWarnings_Init(void); extern void _PyErr_WarnUnawaitedCoroutine(PyObject *coro); +extern void _PyErr_WarnUnawaitedAgenMethod(PyAsyncGenObject *agen, PyObject *method); #ifdef __cplusplus } diff --git a/Include/internal/pycore_weakref.h b/Include/internal/pycore_weakref.h new file mode 100644 index 000000000000000..eacbe14c9032890 --- /dev/null +++ b/Include/internal/pycore_weakref.h @@ -0,0 +1,64 @@ +#ifndef Py_INTERNAL_WEAKREF_H +#define Py_INTERNAL_WEAKREF_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION() + +static inline PyObject* _PyWeakref_GET_REF(PyObject *ref_obj) { + assert(PyWeakref_Check(ref_obj)); + PyWeakReference *ref = _Py_CAST(PyWeakReference*, ref_obj); + PyObject *obj = ref->wr_object; + + if (obj == Py_None) { + // clear_weakref() was called + return NULL; + } + + // Explanation for the Py_REFCNT() check: when a weakref's target is part + // of a long chain of deallocations which triggers the trashcan mechanism, + // clearing the weakrefs can be delayed long after the target's refcount + // has dropped to zero. In the meantime, code accessing the weakref will + // be able to "see" the target object even though it is supposed to be + // unreachable. See issue gh-60806. + Py_ssize_t refcnt = Py_REFCNT(obj); + if (refcnt == 0) { + return NULL; + } + + assert(refcnt > 0); + return Py_NewRef(obj); +} + +static inline int _PyWeakref_IS_DEAD(PyObject *ref_obj) { + assert(PyWeakref_Check(ref_obj)); + int is_dead; + Py_BEGIN_CRITICAL_SECTION(ref_obj); + PyWeakReference *ref = _Py_CAST(PyWeakReference*, ref_obj); + PyObject *obj = ref->wr_object; + if (obj == Py_None) { + // clear_weakref() was called + is_dead = 1; + } + else { + // See _PyWeakref_GET_REF() for the rationale of this test + is_dead = (Py_REFCNT(obj) == 0); + } + Py_END_CRITICAL_SECTION(); + return is_dead; +} + +extern Py_ssize_t _PyWeakref_GetWeakrefCount(PyWeakReference *head); + +extern void _PyWeakref_ClearRef(PyWeakReference *self); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_WEAKREF_H */ + diff --git a/Include/interpreteridobject.h b/Include/interpreteridobject.h new file mode 100644 index 000000000000000..8432632f339e923 --- /dev/null +++ b/Include/interpreteridobject.h @@ -0,0 +1,17 @@ +#ifndef Py_INTERPRETERIDOBJECT_H +#define Py_INTERPRETERIDOBJECT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +# define Py_CPYTHON_INTERPRETERIDOBJECT_H +# include "cpython/interpreteridobject.h" +# undef Py_CPYTHON_INTERPRETERIDOBJECT_H +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERPRETERIDOBJECT_H */ diff --git a/Include/intrcheck.h b/Include/intrcheck.h index b8cc65601683cba..1d1feee83de4830 100644 --- a/Include/intrcheck.h +++ b/Include/intrcheck.h @@ -5,6 +5,7 @@ extern "C" { #endif PyAPI_FUNC(int) PyOS_InterruptOccurred(void); + #ifdef HAVE_FORK #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 PyAPI_FUNC(void) PyOS_BeforeFork(void); @@ -12,18 +13,10 @@ PyAPI_FUNC(void) PyOS_AfterFork_Parent(void); PyAPI_FUNC(void) PyOS_AfterFork_Child(void); #endif #endif + /* Deprecated, please use PyOS_AfterFork_Child() instead */ Py_DEPRECATED(3.7) PyAPI_FUNC(void) PyOS_AfterFork(void); -#ifndef Py_LIMITED_API -PyAPI_FUNC(int) _PyOS_IsMainThread(void); - -#ifdef MS_WINDOWS -/* windows.h is not included by Python.h so use void* instead of HANDLE */ -PyAPI_FUNC(void*) _PyOS_SigintEvent(void); -#endif -#endif /* !Py_LIMITED_API */ - #ifdef __cplusplus } #endif diff --git a/Include/iterobject.h b/Include/iterobject.h index fff30f7176fdeb7..e69d09719bb4d12 100644 --- a/Include/iterobject.h +++ b/Include/iterobject.h @@ -7,9 +7,6 @@ extern "C" { PyAPI_DATA(PyTypeObject) PySeqIter_Type; PyAPI_DATA(PyTypeObject) PyCallIter_Type; -#ifdef Py_BUILD_CORE -extern PyTypeObject _PyAnextAwaitable_Type; -#endif #define PySeqIter_Check(op) Py_IS_TYPE((op), &PySeqIter_Type) diff --git a/Include/longobject.h b/Include/longobject.h index e559e238ae5a355..7393254cd24a9bf 100644 --- a/Include/longobject.h +++ b/Include/longobject.h @@ -18,12 +18,18 @@ PyAPI_FUNC(PyObject *) PyLong_FromUnsignedLong(unsigned long); PyAPI_FUNC(PyObject *) PyLong_FromSize_t(size_t); PyAPI_FUNC(PyObject *) PyLong_FromSsize_t(Py_ssize_t); PyAPI_FUNC(PyObject *) PyLong_FromDouble(double); + PyAPI_FUNC(long) PyLong_AsLong(PyObject *); PyAPI_FUNC(long) PyLong_AsLongAndOverflow(PyObject *, int *); PyAPI_FUNC(Py_ssize_t) PyLong_AsSsize_t(PyObject *); PyAPI_FUNC(size_t) PyLong_AsSize_t(PyObject *); PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLong(PyObject *); PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLongMask(PyObject *); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(int) PyLong_AsInt(PyObject *); +#endif + PyAPI_FUNC(PyObject *) PyLong_GetInfo(void); /* It may be useful in the future. I've added it in the PyInt -> PyLong diff --git a/Include/memoryobject.h b/Include/memoryobject.h index 19aec679a5fad16..2c9146aa2b5b06e 100644 --- a/Include/memoryobject.h +++ b/Include/memoryobject.h @@ -6,20 +6,10 @@ extern "C" { #endif -#ifndef Py_LIMITED_API -PyAPI_DATA(PyTypeObject) _PyManagedBuffer_Type; -#endif PyAPI_DATA(PyTypeObject) PyMemoryView_Type; #define PyMemoryView_Check(op) Py_IS_TYPE((op), &PyMemoryView_Type) -#ifndef Py_LIMITED_API -/* Get a pointer to the memoryview's private copy of the exporter's buffer. */ -#define PyMemoryView_GET_BUFFER(op) (&((PyMemoryViewObject *)(op))->view) -/* Get a pointer to the exporting object (this may be NULL!). */ -#define PyMemoryView_GET_BASE(op) (((PyMemoryViewObject *)(op))->view.obj) -#endif - PyAPI_FUNC(PyObject *) PyMemoryView_FromObject(PyObject *base); #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 PyAPI_FUNC(PyObject *) PyMemoryView_FromMemory(char *mem, Py_ssize_t size, @@ -32,38 +22,10 @@ PyAPI_FUNC(PyObject *) PyMemoryView_GetContiguous(PyObject *base, int buffertype, char order); - -/* The structs are declared here so that macros can work, but they shouldn't - be considered public. Don't access their fields directly, use the macros - and functions instead! */ #ifndef Py_LIMITED_API -#define _Py_MANAGED_BUFFER_RELEASED 0x001 /* access to exporter blocked */ -#define _Py_MANAGED_BUFFER_FREE_FORMAT 0x002 /* free format */ -typedef struct { - PyObject_HEAD - int flags; /* state flags */ - Py_ssize_t exports; /* number of direct memoryview exports */ - Py_buffer master; /* snapshot buffer obtained from the original exporter */ -} _PyManagedBufferObject; - - -/* memoryview state flags */ -#define _Py_MEMORYVIEW_RELEASED 0x001 /* access to master buffer blocked */ -#define _Py_MEMORYVIEW_C 0x002 /* C-contiguous layout */ -#define _Py_MEMORYVIEW_FORTRAN 0x004 /* Fortran contiguous layout */ -#define _Py_MEMORYVIEW_SCALAR 0x008 /* scalar: ndim = 0 */ -#define _Py_MEMORYVIEW_PIL 0x010 /* PIL-style layout */ - -typedef struct { - PyObject_VAR_HEAD - _PyManagedBufferObject *mbuf; /* managed buffer */ - Py_hash_t hash; /* hash value for read-only views */ - int flags; /* state flags */ - Py_ssize_t exports; /* number of buffer re-exports */ - Py_buffer view; /* private copy of the exporter's view */ - PyObject *weakreflist; - Py_ssize_t ob_array[1]; /* shape, strides, suboffsets */ -} PyMemoryViewObject; +# define Py_CPYTHON_MEMORYOBJECT_H +# include "cpython/memoryobject.h" +# undef Py_CPYTHON_MEMORYOBJECT_H #endif #ifdef __cplusplus diff --git a/Include/methodobject.h b/Include/methodobject.h index 72af5ad933df7fd..2381e8482b82a8b 100644 --- a/Include/methodobject.h +++ b/Include/methodobject.h @@ -49,8 +49,6 @@ PyAPI_FUNC(PyCFunction) PyCFunction_GetFunction(PyObject *); PyAPI_FUNC(PyObject *) PyCFunction_GetSelf(PyObject *); PyAPI_FUNC(int) PyCFunction_GetFlags(PyObject *); -Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *); - struct PyMethodDef { const char *ml_name; /* The name of the built-in function/method */ PyCFunction ml_meth; /* The C function that implements it */ diff --git a/Include/modsupport.h b/Include/modsupport.h index 4e369bd56b4d203..450cc99c404c170 100644 --- a/Include/modsupport.h +++ b/Include/modsupport.h @@ -1,3 +1,4 @@ +// Module support interface #ifndef Py_MODSUPPORT_H #define Py_MODSUPPORT_H @@ -5,47 +6,34 @@ extern "C" { #endif -/* Module support interface */ - -#include // va_list - -/* If PY_SSIZE_T_CLEAN is defined, each functions treats #-specifier - to mean Py_ssize_t */ -#ifdef PY_SSIZE_T_CLEAN -#define PyArg_Parse _PyArg_Parse_SizeT -#define PyArg_ParseTuple _PyArg_ParseTuple_SizeT -#define PyArg_ParseTupleAndKeywords _PyArg_ParseTupleAndKeywords_SizeT -#define PyArg_VaParse _PyArg_VaParse_SizeT -#define PyArg_VaParseTupleAndKeywords _PyArg_VaParseTupleAndKeywords_SizeT -#define Py_BuildValue _Py_BuildValue_SizeT -#define Py_VaBuildValue _Py_VaBuildValue_SizeT -#endif - -/* Due to a glitch in 3.2, the _SizeT versions weren't exported from the DLL. */ -#if !defined(PY_SSIZE_T_CLEAN) || !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 PyAPI_FUNC(int) PyArg_Parse(PyObject *, const char *, ...); PyAPI_FUNC(int) PyArg_ParseTuple(PyObject *, const char *, ...); PyAPI_FUNC(int) PyArg_ParseTupleAndKeywords(PyObject *, PyObject *, - const char *, char **, ...); + const char *, char **, ...); PyAPI_FUNC(int) PyArg_VaParse(PyObject *, const char *, va_list); PyAPI_FUNC(int) PyArg_VaParseTupleAndKeywords(PyObject *, PyObject *, - const char *, char **, va_list); -#endif + const char *, char **, va_list); + PyAPI_FUNC(int) PyArg_ValidateKeywordArguments(PyObject *); PyAPI_FUNC(int) PyArg_UnpackTuple(PyObject *, const char *, Py_ssize_t, Py_ssize_t, ...); PyAPI_FUNC(PyObject *) Py_BuildValue(const char *, ...); -PyAPI_FUNC(PyObject *) _Py_BuildValue_SizeT(const char *, ...); - - PyAPI_FUNC(PyObject *) Py_VaBuildValue(const char *, va_list); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030a0000 // Add an attribute with name 'name' and value 'obj' to the module 'mod. -// On success, return 0 on success. +// On success, return 0. // On error, raise an exception and return -1. PyAPI_FUNC(int) PyModule_AddObjectRef(PyObject *mod, const char *name, PyObject *value); +#endif /* Py_LIMITED_API */ + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +// Similar to PyModule_AddObjectRef() but steal a reference to 'value'. +PyAPI_FUNC(int) PyModule_Add(PyObject *mod, const char *name, PyObject *value); +#endif /* Py_LIMITED_API */ -// Similar to PyModule_AddObjectRef() but steal a reference to 'obj' -// (Py_DECREF(obj)) on success (if it returns 0). +// Similar to PyModule_AddObjectRef() and PyModule_Add() but steal +// a reference to 'value' on success and only on success. +// Errorprone. Should not be used in new code. PyAPI_FUNC(int) PyModule_AddObject(PyObject *mod, const char *, PyObject *value); PyAPI_FUNC(int) PyModule_AddIntConstant(PyObject *, const char *, long); @@ -120,14 +108,6 @@ PyAPI_FUNC(int) PyModule_ExecDef(PyObject *module, PyModuleDef *def); #define PYTHON_ABI_VERSION 3 #define PYTHON_ABI_STRING "3" -#ifdef Py_TRACE_REFS - /* When we are tracing reference counts, rename module creation functions so - modules compiled with incompatible settings will generate a - link-time error. */ - #define PyModule_Create2 PyModule_Create2TraceRefs - #define PyModule_FromDefAndSpec2 PyModule_FromDefAndSpec2TraceRefs -#endif - PyAPI_FUNC(PyObject *) PyModule_Create2(PyModuleDef*, int apiver); #ifdef Py_LIMITED_API @@ -154,12 +134,6 @@ PyAPI_FUNC(PyObject *) PyModule_FromDefAndSpec2(PyModuleDef *def, #endif /* New in 3.5 */ -#ifndef Py_LIMITED_API -# define Py_CPYTHON_MODSUPPORT_H -# include "cpython/modsupport.h" -# undef Py_CPYTHON_MODSUPPORT_H -#endif - #ifdef __cplusplus } #endif diff --git a/Include/moduleobject.h b/Include/moduleobject.h index fbb2c5ae79444b1..42b87cc4e910126 100644 --- a/Include/moduleobject.h +++ b/Include/moduleobject.h @@ -27,11 +27,6 @@ PyAPI_FUNC(PyObject *) PyModule_GetNameObject(PyObject *); PyAPI_FUNC(const char *) PyModule_GetName(PyObject *); Py_DEPRECATED(3.2) PyAPI_FUNC(const char *) PyModule_GetFilename(PyObject *); PyAPI_FUNC(PyObject *) PyModule_GetFilenameObject(PyObject *); -#ifndef Py_LIMITED_API -PyAPI_FUNC(void) _PyModule_Clear(PyObject *); -PyAPI_FUNC(void) _PyModule_ClearDict(PyObject *); -PyAPI_FUNC(int) _PyModuleSpec_IsInitializing(PyObject *); -#endif PyAPI_FUNC(PyModuleDef*) PyModule_GetDef(PyObject*); PyAPI_FUNC(void*) PyModule_GetState(PyObject*); @@ -43,8 +38,22 @@ PyAPI_DATA(PyTypeObject) PyModuleDef_Type; typedef struct PyModuleDef_Base { PyObject_HEAD + /* The function used to re-initialize the module. + This is only set for legacy (single-phase init) extension modules + and only used for those that support multiple initializations + (m_size >= 0). + It is set by _PyImport_LoadDynamicModuleWithSpec() + and _imp.create_builtin(). */ PyObject* (*m_init)(void); + /* The module's index into its interpreter's modules_by_index cache. + This is set for all extension modules but only used for legacy ones. + (See PyInterpreterState.modules_by_index for more info.) + It is set by PyModuleDef_Init(). */ Py_ssize_t m_index; + /* A copy of the module's __dict__ after the first time it was loaded. + This is only set/used for legacy modules that do not support + multiple initializations. + It is set by _PyImport_FixupExtensionObject(). */ PyObject* m_copy; } PyModuleDef_Base; @@ -64,13 +73,23 @@ struct PyModuleDef_Slot { #define Py_mod_create 1 #define Py_mod_exec 2 +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030c0000 +# define Py_mod_multiple_interpreters 3 +#endif #ifndef Py_LIMITED_API -#define _Py_mod_LAST_SLOT 2 +#define _Py_mod_LAST_SLOT 3 #endif #endif /* New in 3.5 */ +/* for Py_mod_multiple_interpreters: */ +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030c0000 +# define Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED ((void *)0) +# define Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED ((void *)1) +# define Py_MOD_PER_INTERPRETER_GIL_SUPPORTED ((void *)2) +#endif + struct PyModuleDef { PyModuleDef_Base m_base; const char* m_name; @@ -83,12 +102,6 @@ struct PyModuleDef { freefunc m_free; }; - -// Internal C API -#ifdef Py_BUILD_CORE -extern int _PyModule_IsExtension(PyObject *obj); -#endif - #ifdef __cplusplus } #endif diff --git a/Include/object.h b/Include/object.h index 75624fe8c77a514..6b70a4948444767 100644 --- a/Include/object.h +++ b/Include/object.h @@ -51,39 +51,102 @@ A standard interface exists for objects that contain an array of items whose size is determined when the object is allocated. */ -#include "pystats.h" - /* Py_DEBUG implies Py_REF_DEBUG. */ #if defined(Py_DEBUG) && !defined(Py_REF_DEBUG) # define Py_REF_DEBUG #endif -#if defined(Py_LIMITED_API) && defined(Py_TRACE_REFS) -# error Py_LIMITED_API is incompatible with Py_TRACE_REFS -#endif +/* PyObject_HEAD defines the initial segment of every PyObject. */ +#define PyObject_HEAD PyObject ob_base; -#ifdef Py_TRACE_REFS -/* Define pointers to support a doubly-linked list of all live heap objects. */ -#define _PyObject_HEAD_EXTRA \ - PyObject *_ob_next; \ - PyObject *_ob_prev; +/* +Immortalization: + +The following indicates the immortalization strategy depending on the amount +of available bits in the reference count field. All strategies are backwards +compatible but the specific reference count value or immortalization check +might change depending on the specializations for the underlying system. -#define _PyObject_EXTRA_INIT _Py_NULL, _Py_NULL, +Proper deallocation of immortal instances requires distinguishing between +statically allocated immortal instances vs those promoted by the runtime to be +immortal. The latter should be the only instances that require +cleanup during runtime finalization. +*/ + +#if SIZEOF_VOID_P > 4 +/* +In 64+ bit systems, an object will be marked as immortal by setting all of the +lower 32 bits of the reference count field, which is equal to: 0xFFFFFFFF + +Using the lower 32 bits makes the value backwards compatible by allowing +C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely +increase and decrease the objects reference count. The object would lose its +immortality, but the execution would still be correct. + +Reference count increases will use saturated arithmetic, taking advantage of +having all the lower 32 bits set, which will avoid the reference count to go +beyond the refcount limit. Immortality checks for reference count decreases will +be done by checking the bit sign flag in the lower 32 bits. +*/ +#define _Py_IMMORTAL_REFCNT UINT_MAX #else -# define _PyObject_HEAD_EXTRA -# define _PyObject_EXTRA_INIT -#endif +/* +In 32 bit systems, an object will be marked as immortal by setting all of the +lower 30 bits of the reference count field, which is equal to: 0x3FFFFFFF -/* PyObject_HEAD defines the initial segment of every PyObject. */ -#define PyObject_HEAD PyObject ob_base; +Using the lower 30 bits makes the value backwards compatible by allowing +C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely +increase and decrease the objects reference count. The object would lose its +immortality, but the execution would still be correct. + +Reference count increases and decreases will first go through an immortality +check by comparing the reference count field to the immortality reference count. +*/ +#define _Py_IMMORTAL_REFCNT (UINT_MAX >> 2) +#endif -#define PyObject_HEAD_INIT(type) \ - { _PyObject_EXTRA_INIT \ - 1, (type) }, +// Py_GIL_DISABLED builds indicate immortal objects using `ob_ref_local`, which is +// always 32-bits. +#ifdef Py_GIL_DISABLED +#define _Py_IMMORTAL_REFCNT_LOCAL UINT32_MAX +#endif -#define PyVarObject_HEAD_INIT(type, size) \ - { PyObject_HEAD_INIT(type) (size) }, +// Kept for backward compatibility. It was needed by Py_TRACE_REFS build. +#define _PyObject_EXTRA_INIT + +// Make all internal uses of PyObject_HEAD_INIT immortal while preserving the +// C-API expectation that the refcnt will be set to 1. +#if defined(Py_GIL_DISABLED) +#define PyObject_HEAD_INIT(type) \ + { \ + 0, \ + 0, \ + { 0 }, \ + 0, \ + _Py_IMMORTAL_REFCNT_LOCAL, \ + 0, \ + (type), \ + }, +#elif defined(Py_BUILD_CORE) +#define PyObject_HEAD_INIT(type) \ + { \ + { _Py_IMMORTAL_REFCNT }, \ + (type) \ + }, +#else +#define PyObject_HEAD_INIT(type) \ + { \ + { 1 }, \ + (type) \ + }, +#endif /* Py_BUILD_CORE */ + +#define PyVarObject_HEAD_INIT(type, size) \ + { \ + PyObject_HEAD_INIT(type) \ + (size) \ + }, /* PyObject_VAR_HEAD defines the initial segment of all variable-size * container objects. These end with a declaration of an array with 1 @@ -99,11 +162,65 @@ whose size is determined when the object is allocated. * by hand. Similarly every pointer to a variable-size Python object can, * in addition, be cast to PyVarObject*. */ +#ifndef Py_GIL_DISABLED +struct _object { +#if (defined(__GNUC__) || defined(__clang__)) \ + && !(defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L) + // On C99 and older, anonymous union is a GCC and clang extension + __extension__ +#endif +#ifdef _MSC_VER + // Ignore MSC warning C4201: "nonstandard extension used: + // nameless struct/union" + __pragma(warning(push)) + __pragma(warning(disable: 4201)) +#endif + union { + Py_ssize_t ob_refcnt; +#if SIZEOF_VOID_P > 4 + PY_UINT32_T ob_refcnt_split[2]; +#endif + }; +#ifdef _MSC_VER + __pragma(warning(pop)) +#endif + + PyTypeObject *ob_type; +}; +#else +// Objects that are not owned by any thread use a thread id (tid) of zero. +// This includes both immortal objects and objects whose reference count +// fields have been merged. +#define _Py_UNOWNED_TID 0 + +// The shared reference count uses the two least-significant bits to store +// flags. The remaining bits are used to store the reference count. +#define _Py_REF_SHARED_SHIFT 2 +#define _Py_REF_SHARED_FLAG_MASK 0x3 + +// The shared flags are initialized to zero. +#define _Py_REF_SHARED_INIT 0x0 +#define _Py_REF_MAYBE_WEAKREF 0x1 +#define _Py_REF_QUEUED 0x2 +#define _Py_REF_MERGED 0x3 + +// Create a shared field from a refcnt and desired flags +#define _Py_REF_SHARED(refcnt, flags) (((refcnt) << _Py_REF_SHARED_SHIFT) + (flags)) + +// NOTE: In non-free-threaded builds, `struct _PyMutex` is defined in +// pycore_lock.h. See pycore_lock.h for more details. +struct _PyMutex { uint8_t v; }; + struct _object { - _PyObject_HEAD_EXTRA - Py_ssize_t ob_refcnt; + uintptr_t ob_tid; // thread id (or zero) + uint16_t _padding; + struct _PyMutex ob_mutex; // per-object lock + uint8_t ob_gc_bits; // gc-related state + uint32_t ob_ref_local; // local reference count + Py_ssize_t ob_ref_shared; // shared (atomic) reference count PyTypeObject *ob_type; }; +#endif /* Cast argument to PyObject* type. */ #define _PyObject_CAST(op) _Py_CAST(PyObject*, (op)) @@ -121,9 +238,54 @@ typedef struct { PyAPI_FUNC(int) Py_Is(PyObject *x, PyObject *y); #define Py_Is(x, y) ((x) == (y)) +#if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API) +static inline uintptr_t +_Py_ThreadId(void) +{ + uintptr_t tid; +#if defined(_MSC_VER) && defined(_M_X64) + tid = __readgsqword(48); +#elif defined(_MSC_VER) && defined(_M_IX86) + tid = __readfsdword(24); +#elif defined(_MSC_VER) && defined(_M_ARM64) + tid = __getReg(18); +#elif defined(__i386__) + __asm__("movl %%gs:0, %0" : "=r" (tid)); // 32-bit always uses GS +#elif defined(__MACH__) && defined(__x86_64__) + __asm__("movq %%gs:0, %0" : "=r" (tid)); // x86_64 macOSX uses GS +#elif defined(__x86_64__) + __asm__("movq %%fs:0, %0" : "=r" (tid)); // x86_64 Linux, BSD uses FS +#elif defined(__arm__) + __asm__ ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tid)); +#elif defined(__aarch64__) && defined(__APPLE__) + __asm__ ("mrs %0, tpidrro_el0" : "=r" (tid)); +#elif defined(__aarch64__) + __asm__ ("mrs %0, tpidr_el0" : "=r" (tid)); +#else + # error "define _Py_ThreadId for this platform" +#endif + return tid; +} + +static inline Py_ALWAYS_INLINE int +_Py_IsOwnedByCurrentThread(PyObject *ob) +{ + return ob->ob_tid == _Py_ThreadId(); +} +#endif static inline Py_ssize_t Py_REFCNT(PyObject *ob) { +#if !defined(Py_GIL_DISABLED) return ob->ob_refcnt; +#else + uint32_t local = _Py_atomic_load_uint32_relaxed(&ob->ob_ref_local); + if (local == _Py_IMMORTAL_REFCNT_LOCAL) { + return _Py_IMMORTAL_REFCNT; + } + Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&ob->ob_ref_shared); + return _Py_STATIC_CAST(Py_ssize_t, local) + + Py_ARITHMETIC_RIGHT_SHIFT(Py_ssize_t, shared, _Py_REF_SHARED_SHIFT); +#endif } #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 # define Py_REFCNT(ob) Py_REFCNT(_PyObject_CAST(ob)) @@ -138,8 +300,13 @@ static inline PyTypeObject* Py_TYPE(PyObject *ob) { # define Py_TYPE(ob) Py_TYPE(_PyObject_CAST(ob)) #endif +PyAPI_DATA(PyTypeObject) PyLong_Type; +PyAPI_DATA(PyTypeObject) PyBool_Type; + // bpo-39573: The Py_SET_SIZE() function must be used to set an object size. static inline Py_ssize_t Py_SIZE(PyObject *ob) { + assert(ob->ob_type != &PyLong_Type); + assert(ob->ob_type != &PyBool_Type); PyVarObject *var_ob = _PyVarObject_CAST(ob); return var_ob->ob_size; } @@ -147,6 +314,17 @@ static inline Py_ssize_t Py_SIZE(PyObject *ob) { # define Py_SIZE(ob) Py_SIZE(_PyObject_CAST(ob)) #endif +static inline Py_ALWAYS_INLINE int _Py_IsImmortal(PyObject *op) +{ +#if defined(Py_GIL_DISABLED) + return op->ob_ref_local == _Py_IMMORTAL_REFCNT_LOCAL; +#elif SIZEOF_VOID_P > 4 + return _Py_CAST(PY_INT32_T, op->ob_refcnt) < 0; +#else + return op->ob_refcnt == _Py_IMMORTAL_REFCNT; +#endif +} +#define _Py_IsImmortal(op) _Py_IsImmortal(_PyObject_CAST(op)) static inline int Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { return Py_TYPE(ob) == type; @@ -156,8 +334,41 @@ static inline int Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { #endif +// Py_SET_REFCNT() implementation for stable ABI +PyAPI_FUNC(void) _Py_SetRefcnt(PyObject *ob, Py_ssize_t refcnt); + static inline void Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) { +#if defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030d0000 + // Stable ABI implements Py_SET_REFCNT() as a function call + // on limited C API version 3.13 and newer. + _Py_SetRefcnt(ob, refcnt); +#else + // This immortal check is for code that is unaware of immortal objects. + // The runtime tracks these objects and we should avoid as much + // as possible having extensions inadvertently change the refcnt + // of an immortalized object. + if (_Py_IsImmortal(ob)) { + return; + } +#ifndef Py_GIL_DISABLED ob->ob_refcnt = refcnt; +#else + if (_Py_IsOwnedByCurrentThread(ob)) { + // Set local refcount to desired refcount and shared refcount to zero, + // but preserve the shared refcount flags. + assert(refcnt < UINT32_MAX); + ob->ob_ref_local = _Py_STATIC_CAST(uint32_t, refcnt); + ob->ob_ref_shared &= _Py_REF_SHARED_FLAG_MASK; + } + else { + // Set local refcount to zero and shared refcount to desired refcount. + // Mark the object as merged. + ob->ob_tid = _Py_UNOWNED_TID; + ob->ob_ref_local = 0; + ob->ob_ref_shared = _Py_REF_SHARED(refcnt, _Py_REF_MERGED); + } +#endif // Py_GIL_DISABLED +#endif // Py_LIMITED_API+0 < 0x030d0000 } #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 # define Py_SET_REFCNT(ob, refcnt) Py_SET_REFCNT(_PyObject_CAST(ob), (refcnt)) @@ -171,8 +382,9 @@ static inline void Py_SET_TYPE(PyObject *ob, PyTypeObject *type) { # define Py_SET_TYPE(ob, type) Py_SET_TYPE(_PyObject_CAST(ob), type) #endif - static inline void Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) { + assert(ob->ob_base.ob_type != &PyLong_Type); + assert(ob->ob_base.ob_type != &PyBool_Type); ob->ob_size = size; } #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 @@ -264,6 +476,8 @@ PyAPI_FUNC(PyObject *) PyType_GetQualName(PyTypeObject *); #endif #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030C0000 PyAPI_FUNC(PyObject *) PyType_FromMetaclass(PyTypeObject*, PyObject*, PyType_Spec*, PyObject*); +PyAPI_FUNC(void *) PyObject_GetTypeData(PyObject *obj, PyTypeObject *cls); +PyAPI_FUNC(Py_ssize_t) PyType_GetTypeDataSize(PyTypeObject *cls); #endif /* Generic type check */ @@ -298,10 +512,20 @@ PyAPI_FUNC(PyObject *) PyObject_RichCompare(PyObject *, PyObject *, int); PyAPI_FUNC(int) PyObject_RichCompareBool(PyObject *, PyObject *, int); PyAPI_FUNC(PyObject *) PyObject_GetAttrString(PyObject *, const char *); PyAPI_FUNC(int) PyObject_SetAttrString(PyObject *, const char *, PyObject *); +PyAPI_FUNC(int) PyObject_DelAttrString(PyObject *v, const char *name); PyAPI_FUNC(int) PyObject_HasAttrString(PyObject *, const char *); PyAPI_FUNC(PyObject *) PyObject_GetAttr(PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(int) PyObject_GetOptionalAttr(PyObject *, PyObject *, PyObject **); +PyAPI_FUNC(int) PyObject_GetOptionalAttrString(PyObject *, const char *, PyObject **); +#endif PyAPI_FUNC(int) PyObject_SetAttr(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(int) PyObject_DelAttr(PyObject *v, PyObject *name); PyAPI_FUNC(int) PyObject_HasAttr(PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(int) PyObject_HasAttrWithError(PyObject *, PyObject *); +PyAPI_FUNC(int) PyObject_HasAttrStringWithError(PyObject *, const char *); +#endif PyAPI_FUNC(PyObject *) PyObject_SelfIter(PyObject *); PyAPI_FUNC(PyObject *) PyObject_GenericGetAttr(PyObject *, PyObject *); PyAPI_FUNC(int) PyObject_GenericSetAttr(PyObject *, PyObject *, PyObject *); @@ -322,12 +546,6 @@ PyAPI_FUNC(void) PyObject_ClearWeakRefs(PyObject *); */ PyAPI_FUNC(PyObject *) PyObject_Dir(PyObject *); -/* Pickle support. */ -#ifndef Py_LIMITED_API -PyAPI_FUNC(PyObject *) _PyObject_GetState(PyObject *); -#endif - - /* Helpers for printing recursive container types */ PyAPI_FUNC(int) Py_ReprEnter(PyObject *); PyAPI_FUNC(void) Py_ReprLeave(PyObject *); @@ -430,6 +648,9 @@ given type object has a specified feature. // subject itself (rather than a mapped attribute on it): #define _Py_TPFLAGS_MATCH_SELF (1UL << 22) +/* Items (ob_size*tp_itemsize) are found at the end of an instance's memory */ +#define Py_TPFLAGS_ITEMS_AT_END (1UL << 23) + /* These flags are used to determine if a type is a subclass. */ #define Py_TPFLAGS_LONG_SUBCLASS (1UL << 24) #define Py_TPFLAGS_LIST_SUBCLASS (1UL << 25) @@ -489,11 +710,12 @@ decision that's up to the implementer of each new type so if you want, you can count such references to the type object.) */ -#ifdef Py_REF_DEBUG -PyAPI_DATA(Py_ssize_t) _Py_RefTotal; +#if defined(Py_REF_DEBUG) && !defined(Py_LIMITED_API) PyAPI_FUNC(void) _Py_NegativeRefcount(const char *filename, int lineno, PyObject *op); -#endif /* Py_REF_DEBUG */ +PyAPI_FUNC(void) _Py_INCREF_IncRefTotal(void); +PyAPI_FUNC(void) _Py_DECREF_DecRefTotal(void); +#endif // Py_REF_DEBUG && !Py_LIMITED_API PyAPI_FUNC(void) _Py_Dealloc(PyObject *); @@ -509,54 +731,157 @@ PyAPI_FUNC(void) Py_DecRef(PyObject *); PyAPI_FUNC(void) _Py_IncRef(PyObject *); PyAPI_FUNC(void) _Py_DecRef(PyObject *); -static inline void Py_INCREF(PyObject *op) +static inline Py_ALWAYS_INLINE void Py_INCREF(PyObject *op) { -#if defined(Py_REF_DEBUG) && defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000 - // Stable ABI for Python 3.10 built in debug mode. +#if defined(Py_LIMITED_API) && (Py_LIMITED_API+0 >= 0x030c0000 || defined(Py_REF_DEBUG)) + // Stable ABI implements Py_INCREF() as a function call on limited C API + // version 3.12 and newer, and on Python built in debug mode. _Py_IncRef() + // was added to Python 3.10.0a7, use Py_IncRef() on older Python versions. + // Py_IncRef() accepts NULL whereas _Py_IncRef() doesn't. +# if Py_LIMITED_API+0 >= 0x030a00A7 _Py_IncRef(op); +# else + Py_IncRef(op); +# endif #else - _Py_INCREF_STAT_INC(); // Non-limited C API and limited C API for Python 3.9 and older access // directly PyObject.ob_refcnt. +#if defined(Py_GIL_DISABLED) + uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local); + uint32_t new_local = local + 1; + if (new_local == 0) { + return; + } + if (_Py_IsOwnedByCurrentThread(op)) { + _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, new_local); + } + else { + _Py_atomic_add_ssize(&op->ob_ref_shared, (1 << _Py_REF_SHARED_SHIFT)); + } +#elif SIZEOF_VOID_P > 4 + // Portable saturated add, branching on the carry flag and set low bits + PY_UINT32_T cur_refcnt = op->ob_refcnt_split[PY_BIG_ENDIAN]; + PY_UINT32_T new_refcnt = cur_refcnt + 1; + if (new_refcnt == 0) { + return; + } + op->ob_refcnt_split[PY_BIG_ENDIAN] = new_refcnt; +#else + // Explicitly check immortality against the immortal value + if (_Py_IsImmortal(op)) { + return; + } + op->ob_refcnt++; +#endif + _Py_INCREF_STAT_INC(); #ifdef Py_REF_DEBUG - _Py_RefTotal++; + _Py_INCREF_IncRefTotal(); #endif - op->ob_refcnt++; #endif } #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 # define Py_INCREF(op) Py_INCREF(_PyObject_CAST(op)) #endif -#if defined(Py_REF_DEBUG) && defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030A0000 -// Stable ABI for limited C API version 3.10 of Python debug build + +#if !defined(Py_LIMITED_API) && defined(Py_GIL_DISABLED) +// Implements Py_DECREF on objects not owned by the current thread. +PyAPI_FUNC(void) _Py_DecRefShared(PyObject *); +PyAPI_FUNC(void) _Py_DecRefSharedDebug(PyObject *, const char *, int); + +// Called from Py_DECREF by the owning thread when the local refcount reaches +// zero. The call will deallocate the object if the shared refcount is also +// zero. Otherwise, the thread gives up ownership and merges the reference +// count fields. +PyAPI_FUNC(void) _Py_MergeZeroLocalRefcount(PyObject *); +#endif + +#if defined(Py_LIMITED_API) && (Py_LIMITED_API+0 >= 0x030c0000 || defined(Py_REF_DEBUG)) +// Stable ABI implements Py_DECREF() as a function call on limited C API +// version 3.12 and newer, and on Python built in debug mode. _Py_DecRef() was +// added to Python 3.10.0a7, use Py_DecRef() on older Python versions. +// Py_DecRef() accepts NULL whereas _Py_IncRef() doesn't. static inline void Py_DECREF(PyObject *op) { +# if Py_LIMITED_API+0 >= 0x030a00A7 _Py_DecRef(op); +# else + Py_DecRef(op); +# endif } #define Py_DECREF(op) Py_DECREF(_PyObject_CAST(op)) -#elif defined(Py_REF_DEBUG) +#elif defined(Py_GIL_DISABLED) && defined(Py_REF_DEBUG) static inline void Py_DECREF(const char *filename, int lineno, PyObject *op) { + uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local); + if (local == _Py_IMMORTAL_REFCNT_LOCAL) { + return; + } _Py_DECREF_STAT_INC(); - _Py_RefTotal--; - if (--op->ob_refcnt != 0) { - if (op->ob_refcnt < 0) { + _Py_DECREF_DecRefTotal(); + if (_Py_IsOwnedByCurrentThread(op)) { + if (local == 0) { _Py_NegativeRefcount(filename, lineno, op); } + local--; + _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local); + if (local == 0) { + _Py_MergeZeroLocalRefcount(op); + } } else { - _Py_Dealloc(op); + _Py_DecRefSharedDebug(op, filename, lineno); } } #define Py_DECREF(op) Py_DECREF(__FILE__, __LINE__, _PyObject_CAST(op)) -#else +#elif defined(Py_GIL_DISABLED) static inline void Py_DECREF(PyObject *op) { + uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local); + if (local == _Py_IMMORTAL_REFCNT_LOCAL) { + return; + } _Py_DECREF_STAT_INC(); + if (_Py_IsOwnedByCurrentThread(op)) { + local--; + _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local); + if (local == 0) { + _Py_MergeZeroLocalRefcount(op); + } + } + else { + _Py_DecRefShared(op); + } +} +#define Py_DECREF(op) Py_DECREF(_PyObject_CAST(op)) + +#elif defined(Py_REF_DEBUG) +static inline void Py_DECREF(const char *filename, int lineno, PyObject *op) +{ + if (op->ob_refcnt <= 0) { + _Py_NegativeRefcount(filename, lineno, op); + } + if (_Py_IsImmortal(op)) { + return; + } + _Py_DECREF_STAT_INC(); + _Py_DECREF_DecRefTotal(); + if (--op->ob_refcnt == 0) { + _Py_Dealloc(op); + } +} +#define Py_DECREF(op) Py_DECREF(__FILE__, __LINE__, _PyObject_CAST(op)) + +#else +static inline Py_ALWAYS_INLINE void Py_DECREF(PyObject *op) +{ // Non-limited C API and limited C API for Python 3.9 and older access // directly PyObject.ob_refcnt. + if (_Py_IsImmortal(op)) { + return; + } + _Py_DECREF_STAT_INC(); if (--op->ob_refcnt == 0) { _Py_Dealloc(op); } @@ -598,15 +923,44 @@ static inline void Py_DECREF(PyObject *op) * one of those can't cause problems -- but in part that relies on that * Python integers aren't currently weakly referencable. Best practice is * to use Py_CLEAR() even if you can't think of a reason for why you need to. + * + * gh-98724: Use a temporary variable to only evaluate the macro argument once, + * to avoid the duplication of side effects if the argument has side effects. + * + * gh-99701: If the PyObject* type is used with casting arguments to PyObject*, + * the code can be miscompiled with strict aliasing because of type punning. + * With strict aliasing, a compiler considers that two pointers of different + * types cannot read or write the same memory which enables optimization + * opportunities. + * + * If available, use _Py_TYPEOF() to use the 'op' type for temporary variables, + * and so avoid type punning. Otherwise, use memcpy() which causes type erasure + * and so prevents the compiler to reuse an old cached 'op' value after + * Py_CLEAR(). */ -#define Py_CLEAR(op) \ - do { \ - PyObject *_py_tmp = _PyObject_CAST(op); \ - if (_py_tmp != NULL) { \ - (op) = NULL; \ - Py_DECREF(_py_tmp); \ - } \ +#ifdef _Py_TYPEOF +#define Py_CLEAR(op) \ + do { \ + _Py_TYPEOF(op)* _tmp_op_ptr = &(op); \ + _Py_TYPEOF(op) _tmp_old_op = (*_tmp_op_ptr); \ + if (_tmp_old_op != NULL) { \ + *_tmp_op_ptr = _Py_NULL; \ + Py_DECREF(_tmp_old_op); \ + } \ + } while (0) +#else +#define Py_CLEAR(op) \ + do { \ + PyObject **_tmp_op_ptr = _Py_CAST(PyObject**, &(op)); \ + PyObject *_tmp_old_op = (*_tmp_op_ptr); \ + if (_tmp_old_op != NULL) { \ + PyObject *_null_ptr = _Py_NULL; \ + memcpy(_tmp_op_ptr, &_null_ptr, sizeof(PyObject*)); \ + Py_DECREF(_tmp_old_op); \ + } \ } while (0) +#endif + /* Function to use in case the object pointer can be NULL: */ static inline void Py_XINCREF(PyObject *op) @@ -663,8 +1017,6 @@ static inline PyObject* _Py_XNewRef(PyObject *obj) /* _Py_NoneStruct is an object of undefined type which can be used in contexts where NULL (nil) is not suitable (since NULL often means 'error'). - -Don't forget to apply Py_INCREF() when returning this value!!! */ PyAPI_DATA(PyObject) _Py_NoneStruct; /* Don't use this directly */ #define Py_None (&_Py_NoneStruct) @@ -674,7 +1026,7 @@ PyAPI_FUNC(int) Py_IsNone(PyObject *x); #define Py_IsNone(x) Py_Is((x), Py_None) /* Macro for returning Py_None from a function */ -#define Py_RETURN_NONE return Py_NewRef(Py_None) +#define Py_RETURN_NONE return Py_None /* Py_NotImplemented is a singleton used to signal that an operation is @@ -684,7 +1036,7 @@ PyAPI_DATA(PyObject) _Py_NotImplementedStruct; /* Don't use this directly */ #define Py_NotImplemented (&_Py_NotImplementedStruct) /* Macro for returning Py_NotImplemented from a function */ -#define Py_RETURN_NOTIMPLEMENTED return Py_NewRef(Py_NotImplemented) +#define Py_RETURN_NOTIMPLEMENTED return Py_NotImplemented /* Rich comparison opcodes */ #define Py_LT 0 diff --git a/Include/objimpl.h b/Include/objimpl.h index dde8df34835328e..967e2776767756e 100644 --- a/Include/objimpl.h +++ b/Include/objimpl.h @@ -1,12 +1,8 @@ -/* The PyObject_ memory family: high-level object memory interfaces. - See pymem.h for the low-level PyMem_ family. -*/ +// The PyObject_ memory family: high-level object memory interfaces. +// See pymem.h for the low-level PyMem_ family. #ifndef Py_OBJIMPL_H #define Py_OBJIMPL_H - -#include "pymem.h" - #ifdef __cplusplus extern "C" { #endif @@ -157,6 +153,25 @@ PyAPI_FUNC(int) PyGC_Enable(void); PyAPI_FUNC(int) PyGC_Disable(void); PyAPI_FUNC(int) PyGC_IsEnabled(void); + +#if !defined(Py_LIMITED_API) +/* Visit all live GC-capable objects, similar to gc.get_objects(None). The + * supplied callback is called on every such object with the void* arg set + * to the supplied arg. Returning 0 from the callback ends iteration, returning + * 1 allows iteration to continue. Returning any other value may result in + * undefined behaviour. + * + * If new objects are (de)allocated by the callback it is undefined if they + * will be visited. + + * Garbage collection is disabled during operation. Explicitly running a + * collection in the callback may lead to undefined behaviour e.g. visiting the + * same objects multiple times or not at all. + */ +typedef int (*gcvisitobjects_t)(PyObject*, void*); +PyAPI_FUNC(void) PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void* arg); +#endif + /* Test if a type has a GC head */ #define PyType_IS_GC(t) PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC) @@ -212,4 +227,4 @@ PyAPI_FUNC(int) PyObject_GC_IsFinalized(PyObject *); #ifdef __cplusplus } #endif -#endif /* !Py_OBJIMPL_H */ +#endif // !Py_OBJIMPL_H diff --git a/Include/opcode.h b/Include/opcode.h index 4efa35779fedc11..2619b690019acc3 100644 --- a/Include/opcode.h +++ b/Include/opcode.h @@ -1,216 +1,11 @@ -// Auto-generated by Tools/build/generate_opcode_h.py from Lib/opcode.py - #ifndef Py_OPCODE_H #define Py_OPCODE_H #ifdef __cplusplus extern "C" { #endif +#include "opcode_ids.h" -/* Instruction opcodes for compiled code */ -#define CACHE 0 -#define POP_TOP 1 -#define PUSH_NULL 2 -#define END_FOR 4 -#define NOP 9 -#define UNARY_POSITIVE 10 -#define UNARY_NEGATIVE 11 -#define UNARY_NOT 12 -#define UNARY_INVERT 15 -#define BINARY_SUBSCR 25 -#define BINARY_SLICE 26 -#define STORE_SLICE 27 -#define GET_LEN 30 -#define MATCH_MAPPING 31 -#define MATCH_SEQUENCE 32 -#define MATCH_KEYS 33 -#define PUSH_EXC_INFO 35 -#define CHECK_EXC_MATCH 36 -#define CHECK_EG_MATCH 37 -#define WITH_EXCEPT_START 49 -#define GET_AITER 50 -#define GET_ANEXT 51 -#define BEFORE_ASYNC_WITH 52 -#define BEFORE_WITH 53 -#define END_ASYNC_FOR 54 -#define CLEANUP_THROW 55 -#define STORE_SUBSCR 60 -#define DELETE_SUBSCR 61 -#define STOPITERATION_ERROR 63 -#define GET_ITER 68 -#define GET_YIELD_FROM_ITER 69 -#define PRINT_EXPR 70 -#define LOAD_BUILD_CLASS 71 -#define LOAD_ASSERTION_ERROR 74 -#define RETURN_GENERATOR 75 -#define LIST_TO_TUPLE 82 -#define RETURN_VALUE 83 -#define IMPORT_STAR 84 -#define SETUP_ANNOTATIONS 85 -#define ASYNC_GEN_WRAP 87 -#define PREP_RERAISE_STAR 88 -#define POP_EXCEPT 89 -#define HAVE_ARGUMENT 90 -#define STORE_NAME 90 -#define DELETE_NAME 91 -#define UNPACK_SEQUENCE 92 -#define FOR_ITER 93 -#define UNPACK_EX 94 -#define STORE_ATTR 95 -#define DELETE_ATTR 96 -#define STORE_GLOBAL 97 -#define DELETE_GLOBAL 98 -#define SWAP 99 -#define LOAD_CONST 100 -#define LOAD_NAME 101 -#define BUILD_TUPLE 102 -#define BUILD_LIST 103 -#define BUILD_SET 104 -#define BUILD_MAP 105 -#define LOAD_ATTR 106 -#define COMPARE_OP 107 -#define IMPORT_NAME 108 -#define IMPORT_FROM 109 -#define JUMP_FORWARD 110 -#define JUMP_IF_FALSE_OR_POP 111 -#define JUMP_IF_TRUE_OR_POP 112 -#define POP_JUMP_IF_FALSE 114 -#define POP_JUMP_IF_TRUE 115 -#define LOAD_GLOBAL 116 -#define IS_OP 117 -#define CONTAINS_OP 118 -#define RERAISE 119 -#define COPY 120 -#define BINARY_OP 122 -#define SEND 123 -#define LOAD_FAST 124 -#define STORE_FAST 125 -#define DELETE_FAST 126 -#define LOAD_FAST_CHECK 127 -#define POP_JUMP_IF_NOT_NONE 128 -#define POP_JUMP_IF_NONE 129 -#define RAISE_VARARGS 130 -#define GET_AWAITABLE 131 -#define MAKE_FUNCTION 132 -#define BUILD_SLICE 133 -#define JUMP_BACKWARD_NO_INTERRUPT 134 -#define MAKE_CELL 135 -#define LOAD_CLOSURE 136 -#define LOAD_DEREF 137 -#define STORE_DEREF 138 -#define DELETE_DEREF 139 -#define JUMP_BACKWARD 140 -#define CALL_FUNCTION_EX 142 -#define EXTENDED_ARG 144 -#define LIST_APPEND 145 -#define SET_ADD 146 -#define MAP_ADD 147 -#define LOAD_CLASSDEREF 148 -#define COPY_FREE_VARS 149 -#define YIELD_VALUE 150 -#define RESUME 151 -#define MATCH_CLASS 152 -#define FORMAT_VALUE 155 -#define BUILD_CONST_KEY_MAP 156 -#define BUILD_STRING 157 -#define LIST_EXTEND 162 -#define SET_UPDATE 163 -#define DICT_MERGE 164 -#define DICT_UPDATE 165 -#define CALL 171 -#define KW_NAMES 172 -#define MIN_PSEUDO_OPCODE 256 -#define SETUP_FINALLY 256 -#define SETUP_CLEANUP 257 -#define SETUP_WITH 258 -#define POP_BLOCK 259 -#define JUMP 260 -#define JUMP_NO_INTERRUPT 261 -#define LOAD_METHOD 262 -#define MAX_PSEUDO_OPCODE 262 -#define BINARY_OP_ADAPTIVE 3 -#define BINARY_OP_ADD_FLOAT 5 -#define BINARY_OP_ADD_INT 6 -#define BINARY_OP_ADD_UNICODE 7 -#define BINARY_OP_INPLACE_ADD_UNICODE 8 -#define BINARY_OP_MULTIPLY_FLOAT 13 -#define BINARY_OP_MULTIPLY_INT 14 -#define BINARY_OP_SUBTRACT_FLOAT 16 -#define BINARY_OP_SUBTRACT_INT 17 -#define BINARY_SUBSCR_ADAPTIVE 18 -#define BINARY_SUBSCR_DICT 19 -#define BINARY_SUBSCR_GETITEM 20 -#define BINARY_SUBSCR_LIST_INT 21 -#define BINARY_SUBSCR_TUPLE_INT 22 -#define CALL_ADAPTIVE 23 -#define CALL_PY_EXACT_ARGS 24 -#define CALL_PY_WITH_DEFAULTS 28 -#define CALL_BOUND_METHOD_EXACT_ARGS 29 -#define CALL_BUILTIN_CLASS 34 -#define CALL_BUILTIN_FAST_WITH_KEYWORDS 38 -#define CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS 39 -#define CALL_NO_KW_BUILTIN_FAST 40 -#define CALL_NO_KW_BUILTIN_O 41 -#define CALL_NO_KW_ISINSTANCE 42 -#define CALL_NO_KW_LEN 43 -#define CALL_NO_KW_LIST_APPEND 44 -#define CALL_NO_KW_METHOD_DESCRIPTOR_FAST 45 -#define CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS 46 -#define CALL_NO_KW_METHOD_DESCRIPTOR_O 47 -#define CALL_NO_KW_STR_1 48 -#define CALL_NO_KW_TUPLE_1 56 -#define CALL_NO_KW_TYPE_1 57 -#define COMPARE_OP_ADAPTIVE 58 -#define COMPARE_OP_FLOAT_JUMP 59 -#define COMPARE_OP_INT_JUMP 62 -#define COMPARE_OP_STR_JUMP 64 -#define EXTENDED_ARG_QUICK 65 -#define FOR_ITER_ADAPTIVE 66 -#define FOR_ITER_LIST 67 -#define FOR_ITER_RANGE 72 -#define LOAD_ATTR_ADAPTIVE 73 -#define LOAD_ATTR_CLASS 76 -#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 77 -#define LOAD_ATTR_INSTANCE_VALUE 78 -#define LOAD_ATTR_MODULE 79 -#define LOAD_ATTR_PROPERTY 80 -#define LOAD_ATTR_SLOT 81 -#define LOAD_ATTR_WITH_HINT 86 -#define LOAD_ATTR_METHOD_LAZY_DICT 113 -#define LOAD_ATTR_METHOD_NO_DICT 121 -#define LOAD_ATTR_METHOD_WITH_DICT 141 -#define LOAD_ATTR_METHOD_WITH_VALUES 143 -#define LOAD_CONST__LOAD_FAST 153 -#define LOAD_FAST__LOAD_CONST 154 -#define LOAD_FAST__LOAD_FAST 158 -#define LOAD_GLOBAL_ADAPTIVE 159 -#define LOAD_GLOBAL_BUILTIN 160 -#define LOAD_GLOBAL_MODULE 161 -#define STORE_ATTR_ADAPTIVE 166 -#define STORE_ATTR_INSTANCE_VALUE 167 -#define STORE_ATTR_SLOT 168 -#define STORE_ATTR_WITH_HINT 169 -#define STORE_FAST__LOAD_FAST 170 -#define STORE_FAST__STORE_FAST 173 -#define STORE_SUBSCR_ADAPTIVE 174 -#define STORE_SUBSCR_DICT 175 -#define STORE_SUBSCR_LIST_INT 176 -#define UNPACK_SEQUENCE_ADAPTIVE 177 -#define UNPACK_SEQUENCE_LIST 178 -#define UNPACK_SEQUENCE_TUPLE 179 -#define UNPACK_SEQUENCE_TWO_TUPLE 180 -#define DO_TRACING 255 - -#define HAS_ARG(op) ((((op) >= HAVE_ARGUMENT) && (!IS_PSEUDO_OPCODE(op)))\ - || ((op) == JUMP) \ - || ((op) == JUMP_NO_INTERRUPT) \ - || ((op) == LOAD_METHOD) \ - ) - -#define HAS_CONST(op) (false\ - || ((op) == LOAD_CONST) \ - || ((op) == KW_NAMES) \ - ) #define NB_ADD 0 #define NB_AND 1 @@ -239,8 +34,7 @@ extern "C" { #define NB_INPLACE_TRUE_DIVIDE 24 #define NB_INPLACE_XOR 25 - -#define IS_PSEUDO_OPCODE(op) (((op) >= MIN_PSEUDO_OPCODE) && ((op) <= MAX_PSEUDO_OPCODE)) +#define NB_OPARG_LAST 25 #ifdef __cplusplus } diff --git a/Include/opcode_ids.h b/Include/opcode_ids.h new file mode 100644 index 000000000000000..ba25bd459c1bcde --- /dev/null +++ b/Include/opcode_ids.h @@ -0,0 +1,239 @@ +// This file is generated by Tools/cases_generator/generate_cases.py +// from: +// Python/bytecodes.c +// Do not edit! + +#ifndef Py_OPCODE_IDS_H +#define Py_OPCODE_IDS_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Instruction opcodes for compiled code */ +#define CACHE 0 +#define BEFORE_ASYNC_WITH 1 +#define BEFORE_WITH 2 +#define BINARY_OP_INPLACE_ADD_UNICODE 3 +#define BINARY_SLICE 4 +#define BINARY_SUBSCR 5 +#define CHECK_EG_MATCH 6 +#define CHECK_EXC_MATCH 7 +#define CLEANUP_THROW 8 +#define DELETE_SUBSCR 9 +#define END_ASYNC_FOR 10 +#define END_FOR 11 +#define END_SEND 12 +#define EXIT_INIT_CHECK 13 +#define FORMAT_SIMPLE 14 +#define FORMAT_WITH_SPEC 15 +#define GET_AITER 16 +#define RESERVED 17 +#define GET_ANEXT 18 +#define GET_ITER 19 +#define GET_LEN 20 +#define GET_YIELD_FROM_ITER 21 +#define INTERPRETER_EXIT 22 +#define LOAD_ASSERTION_ERROR 23 +#define LOAD_BUILD_CLASS 24 +#define LOAD_LOCALS 25 +#define MAKE_FUNCTION 26 +#define MATCH_KEYS 27 +#define MATCH_MAPPING 28 +#define MATCH_SEQUENCE 29 +#define NOP 30 +#define POP_EXCEPT 31 +#define POP_TOP 32 +#define PUSH_EXC_INFO 33 +#define PUSH_NULL 34 +#define RETURN_GENERATOR 35 +#define RETURN_VALUE 36 +#define SETUP_ANNOTATIONS 37 +#define STORE_SLICE 38 +#define STORE_SUBSCR 39 +#define TO_BOOL 40 +#define UNARY_INVERT 41 +#define UNARY_NEGATIVE 42 +#define UNARY_NOT 43 +#define WITH_EXCEPT_START 44 +#define HAVE_ARGUMENT 45 +#define BINARY_OP 45 +#define BUILD_CONST_KEY_MAP 46 +#define BUILD_LIST 47 +#define BUILD_MAP 48 +#define BUILD_SET 49 +#define BUILD_SLICE 50 +#define BUILD_STRING 51 +#define BUILD_TUPLE 52 +#define CALL 53 +#define CALL_FUNCTION_EX 54 +#define CALL_INTRINSIC_1 55 +#define CALL_INTRINSIC_2 56 +#define CALL_KW 57 +#define COMPARE_OP 58 +#define CONTAINS_OP 59 +#define CONVERT_VALUE 60 +#define COPY 61 +#define COPY_FREE_VARS 62 +#define DELETE_ATTR 63 +#define DELETE_DEREF 64 +#define DELETE_FAST 65 +#define DELETE_GLOBAL 66 +#define DELETE_NAME 67 +#define DICT_MERGE 68 +#define DICT_UPDATE 69 +#define ENTER_EXECUTOR 70 +#define EXTENDED_ARG 71 +#define FOR_ITER 72 +#define GET_AWAITABLE 73 +#define IMPORT_FROM 74 +#define IMPORT_NAME 75 +#define IS_OP 76 +#define JUMP_BACKWARD 77 +#define JUMP_BACKWARD_NO_INTERRUPT 78 +#define JUMP_FORWARD 79 +#define LIST_APPEND 80 +#define LIST_EXTEND 81 +#define LOAD_ATTR 82 +#define LOAD_CONST 83 +#define LOAD_DEREF 84 +#define LOAD_FAST 85 +#define LOAD_FAST_AND_CLEAR 86 +#define LOAD_FAST_CHECK 87 +#define LOAD_FAST_LOAD_FAST 88 +#define LOAD_FROM_DICT_OR_DEREF 89 +#define LOAD_FROM_DICT_OR_GLOBALS 90 +#define LOAD_GLOBAL 91 +#define LOAD_NAME 92 +#define LOAD_SUPER_ATTR 93 +#define MAKE_CELL 94 +#define MAP_ADD 95 +#define MATCH_CLASS 96 +#define POP_JUMP_IF_FALSE 97 +#define POP_JUMP_IF_NONE 98 +#define POP_JUMP_IF_NOT_NONE 99 +#define POP_JUMP_IF_TRUE 100 +#define RAISE_VARARGS 101 +#define RERAISE 102 +#define RETURN_CONST 103 +#define SEND 104 +#define SET_ADD 105 +#define SET_FUNCTION_ATTRIBUTE 106 +#define SET_UPDATE 107 +#define STORE_ATTR 108 +#define STORE_DEREF 109 +#define STORE_FAST 110 +#define STORE_FAST_LOAD_FAST 111 +#define STORE_FAST_STORE_FAST 112 +#define STORE_GLOBAL 113 +#define STORE_NAME 114 +#define SWAP 115 +#define UNPACK_EX 116 +#define UNPACK_SEQUENCE 117 +#define YIELD_VALUE 118 +#define RESUME 149 +#define BINARY_OP_ADD_FLOAT 150 +#define BINARY_OP_ADD_INT 151 +#define BINARY_OP_ADD_UNICODE 152 +#define BINARY_OP_MULTIPLY_FLOAT 153 +#define BINARY_OP_MULTIPLY_INT 154 +#define BINARY_OP_SUBTRACT_FLOAT 155 +#define BINARY_OP_SUBTRACT_INT 156 +#define BINARY_SUBSCR_DICT 157 +#define BINARY_SUBSCR_GETITEM 158 +#define BINARY_SUBSCR_LIST_INT 159 +#define BINARY_SUBSCR_STR_INT 160 +#define BINARY_SUBSCR_TUPLE_INT 161 +#define CALL_ALLOC_AND_ENTER_INIT 162 +#define CALL_BOUND_METHOD_EXACT_ARGS 163 +#define CALL_BUILTIN_CLASS 164 +#define CALL_BUILTIN_FAST 165 +#define CALL_BUILTIN_FAST_WITH_KEYWORDS 166 +#define CALL_BUILTIN_O 167 +#define CALL_ISINSTANCE 168 +#define CALL_LEN 169 +#define CALL_LIST_APPEND 170 +#define CALL_METHOD_DESCRIPTOR_FAST 171 +#define CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS 172 +#define CALL_METHOD_DESCRIPTOR_NOARGS 173 +#define CALL_METHOD_DESCRIPTOR_O 174 +#define CALL_PY_EXACT_ARGS 175 +#define CALL_PY_WITH_DEFAULTS 176 +#define CALL_STR_1 177 +#define CALL_TUPLE_1 178 +#define CALL_TYPE_1 179 +#define COMPARE_OP_FLOAT 180 +#define COMPARE_OP_INT 181 +#define COMPARE_OP_STR 182 +#define FOR_ITER_GEN 183 +#define FOR_ITER_LIST 184 +#define FOR_ITER_RANGE 185 +#define FOR_ITER_TUPLE 186 +#define LOAD_ATTR_CLASS 187 +#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 188 +#define LOAD_ATTR_INSTANCE_VALUE 189 +#define LOAD_ATTR_METHOD_LAZY_DICT 190 +#define LOAD_ATTR_METHOD_NO_DICT 191 +#define LOAD_ATTR_METHOD_WITH_VALUES 192 +#define LOAD_ATTR_MODULE 193 +#define LOAD_ATTR_NONDESCRIPTOR_NO_DICT 194 +#define LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 195 +#define LOAD_ATTR_PROPERTY 196 +#define LOAD_ATTR_SLOT 197 +#define LOAD_ATTR_WITH_HINT 198 +#define LOAD_GLOBAL_BUILTIN 199 +#define LOAD_GLOBAL_MODULE 200 +#define LOAD_SUPER_ATTR_ATTR 201 +#define LOAD_SUPER_ATTR_METHOD 202 +#define RESUME_CHECK 203 +#define SEND_GEN 204 +#define STORE_ATTR_INSTANCE_VALUE 205 +#define STORE_ATTR_SLOT 206 +#define STORE_ATTR_WITH_HINT 207 +#define STORE_SUBSCR_DICT 208 +#define STORE_SUBSCR_LIST_INT 209 +#define TO_BOOL_ALWAYS_TRUE 210 +#define TO_BOOL_BOOL 211 +#define TO_BOOL_INT 212 +#define TO_BOOL_LIST 213 +#define TO_BOOL_NONE 214 +#define TO_BOOL_STR 215 +#define UNPACK_SEQUENCE_LIST 216 +#define UNPACK_SEQUENCE_TUPLE 217 +#define UNPACK_SEQUENCE_TWO_TUPLE 218 +#define MIN_INSTRUMENTED_OPCODE 236 +#define INSTRUMENTED_RESUME 236 +#define INSTRUMENTED_END_FOR 237 +#define INSTRUMENTED_END_SEND 238 +#define INSTRUMENTED_RETURN_VALUE 239 +#define INSTRUMENTED_RETURN_CONST 240 +#define INSTRUMENTED_YIELD_VALUE 241 +#define INSTRUMENTED_LOAD_SUPER_ATTR 242 +#define INSTRUMENTED_FOR_ITER 243 +#define INSTRUMENTED_CALL 244 +#define INSTRUMENTED_CALL_KW 245 +#define INSTRUMENTED_CALL_FUNCTION_EX 246 +#define INSTRUMENTED_INSTRUCTION 247 +#define INSTRUMENTED_JUMP_FORWARD 248 +#define INSTRUMENTED_JUMP_BACKWARD 249 +#define INSTRUMENTED_POP_JUMP_IF_TRUE 250 +#define INSTRUMENTED_POP_JUMP_IF_FALSE 251 +#define INSTRUMENTED_POP_JUMP_IF_NONE 252 +#define INSTRUMENTED_POP_JUMP_IF_NOT_NONE 253 +#define INSTRUMENTED_LINE 254 +#define JUMP 256 +#define JUMP_NO_INTERRUPT 257 +#define LOAD_CLOSURE 258 +#define LOAD_METHOD 259 +#define LOAD_SUPER_METHOD 260 +#define LOAD_ZERO_SUPER_ATTR 261 +#define LOAD_ZERO_SUPER_METHOD 262 +#define POP_BLOCK 263 +#define SETUP_CLEANUP 264 +#define SETUP_FINALLY 265 +#define SETUP_WITH 266 +#define STORE_FAST_MAYBE_NULL 267 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OPCODE_IDS_H */ diff --git a/Include/osdefs.h b/Include/osdefs.h index 3243944a1483e95..2599e87a9d7c4b8 100644 --- a/Include/osdefs.h +++ b/Include/osdefs.h @@ -1,51 +1,57 @@ +// Operating system dependencies. +// +// Define constants: +// +// - ALTSEP +// - DELIM +// - MAXPATHLEN +// - SEP + #ifndef Py_OSDEFS_H #define Py_OSDEFS_H #ifdef __cplusplus extern "C" { #endif - -/* Operating system dependencies */ - #ifdef MS_WINDOWS -#define SEP L'\\' -#define ALTSEP L'/' -#define MAXPATHLEN 256 -#define DELIM L';' +# define SEP L'\\' +# define ALTSEP L'/' +# define MAXPATHLEN 256 +# define DELIM L';' #endif #ifdef __VXWORKS__ -#define DELIM L';' +# define DELIM L';' #endif /* Filename separator */ #ifndef SEP -#define SEP L'/' +# define SEP L'/' #endif /* Max pathname length */ #ifdef __hpux -#include -#include -#ifndef PATH_MAX -#define PATH_MAX MAXPATHLEN -#endif +# include +# include +# ifndef PATH_MAX +# define PATH_MAX MAXPATHLEN +# endif #endif #ifndef MAXPATHLEN -#if defined(PATH_MAX) && PATH_MAX > 1024 -#define MAXPATHLEN PATH_MAX -#else -#define MAXPATHLEN 1024 -#endif +# if defined(PATH_MAX) && PATH_MAX > 1024 +# define MAXPATHLEN PATH_MAX +# else +# define MAXPATHLEN 1024 +# endif #endif /* Search path entry delimiter */ #ifndef DELIM -#define DELIM L':' +# define DELIM L':' #endif #ifdef __cplusplus } #endif -#endif /* !Py_OSDEFS_H */ +#endif // !Py_OSDEFS_H diff --git a/Include/patchlevel.h b/Include/patchlevel.h index 6827225c1914123..fad79ecfda7b284 100644 --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -17,13 +17,13 @@ /* Version parsed out into numeric values */ /*--start constants--*/ #define PY_MAJOR_VERSION 3 -#define PY_MINOR_VERSION 12 +#define PY_MINOR_VERSION 13 #define PY_MICRO_VERSION 0 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_ALPHA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_SERIAL 2 /* Version as a string */ -#define PY_VERSION "3.12.0a1+" +#define PY_VERSION "3.13.0a2+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Include/pyatomic.h b/Include/pyatomic.h new file mode 100644 index 000000000000000..2ce2c81cf5251a9 --- /dev/null +++ b/Include/pyatomic.h @@ -0,0 +1,16 @@ +#ifndef Py_ATOMIC_H +#define Py_ATOMIC_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +# define Py_CPYTHON_ATOMIC_H +# include "cpython/pyatomic.h" +# undef Py_CPYTHON_ATOMIC_H +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_ATOMIC_H */ diff --git a/Include/pybuffer.h b/Include/pybuffer.h index bbac60972f51276..ca1c6058d9052c5 100644 --- a/Include/pybuffer.h +++ b/Include/pybuffer.h @@ -104,7 +104,7 @@ PyAPI_FUNC(void) PyBuffer_Release(Py_buffer *view); /* Maximum number of dimensions */ #define PyBUF_MAX_NDIM 64 -/* Flags for getting buffers */ +/* Flags for getting buffers. Keep these in sync with inspect.BufferFlags. */ #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 diff --git a/Include/pycapsule.h b/Include/pycapsule.h index 929a9a685259fb3..666b9f867396705 100644 --- a/Include/pycapsule.h +++ b/Include/pycapsule.h @@ -52,7 +52,6 @@ PyAPI_FUNC(void *) PyCapsule_Import( const char *name, /* UTF-8 encoded string */ int no_block); - #ifdef __cplusplus } #endif diff --git a/Include/pydtrace.h b/Include/pydtrace.h index 75f8e7f70979c52..e197d36694537b1 100644 --- a/Include/pydtrace.h +++ b/Include/pydtrace.h @@ -12,7 +12,7 @@ extern "C" { /* pydtrace_probes.h, on systems with DTrace, is auto-generated to include `PyDTrace_{PROBE}` and `PyDTrace_{PROBE}_ENABLED()` macros for every probe - defined in pydtrace_provider.d. + defined in pydtrace.d. Calling these functions must be guarded by a `PyDTrace_{PROBE}_ENABLED()` check to minimize performance impact when probing is off. For example: diff --git a/Include/pyerrors.h b/Include/pyerrors.h index d5ac6af5b32c6cd..5d0028c116e2d86 100644 --- a/Include/pyerrors.h +++ b/Include/pyerrors.h @@ -1,13 +1,11 @@ +// Error handling definitions + #ifndef Py_ERRORS_H #define Py_ERRORS_H #ifdef __cplusplus extern "C" { #endif -#include // va_list - -/* Error handling definitions */ - PyAPI_FUNC(void) PyErr_SetNone(PyObject *); PyAPI_FUNC(void) PyErr_SetObject(PyObject *, PyObject *); PyAPI_FUNC(void) PyErr_SetString( @@ -18,6 +16,8 @@ PyAPI_FUNC(PyObject *) PyErr_Occurred(void); PyAPI_FUNC(void) PyErr_Clear(void); PyAPI_FUNC(void) PyErr_Fetch(PyObject **, PyObject **, PyObject **); PyAPI_FUNC(void) PyErr_Restore(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyErr_GetRaisedException(void); +PyAPI_FUNC(void) PyErr_SetRaisedException(PyObject *); #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030b0000 PyAPI_FUNC(PyObject*) PyErr_GetHandledException(void); PyAPI_FUNC(void) PyErr_SetHandledException(PyObject *); @@ -51,6 +51,10 @@ PyAPI_FUNC(void) PyException_SetCause(PyObject *, PyObject *); PyAPI_FUNC(PyObject *) PyException_GetContext(PyObject *); PyAPI_FUNC(void) PyException_SetContext(PyObject *, PyObject *); + +PyAPI_FUNC(PyObject *) PyException_GetArgs(PyObject *); +PyAPI_FUNC(void) PyException_SetArgs(PyObject *, PyObject *); + /* */ #define PyExceptionClass_Check(x) \ diff --git a/Include/pyhash.h b/Include/pyhash.h index 182d223fab1cac4..3e23e2758808d79 100644 --- a/Include/pyhash.h +++ b/Include/pyhash.h @@ -1,100 +1,10 @@ #ifndef Py_HASH_H - #define Py_HASH_H #ifdef __cplusplus extern "C" { #endif -/* Helpers for hash functions */ -#ifndef Py_LIMITED_API -PyAPI_FUNC(Py_hash_t) _Py_HashDouble(PyObject *, double); -PyAPI_FUNC(Py_hash_t) _Py_HashPointer(const void*); -// Similar to _Py_HashPointer(), but don't replace -1 with -2 -PyAPI_FUNC(Py_hash_t) _Py_HashPointerRaw(const void*); -PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void*, Py_ssize_t); -#endif - -/* Prime multiplier used in string and various other hashes. */ -#define _PyHASH_MULTIPLIER 1000003UL /* 0xf4243 */ - -/* Parameters used for the numeric hash implementation. See notes for - _Py_HashDouble in Python/pyhash.c. Numeric hashes are based on - reduction modulo the prime 2**_PyHASH_BITS - 1. */ - -#if SIZEOF_VOID_P >= 8 -# define _PyHASH_BITS 61 -#else -# define _PyHASH_BITS 31 -#endif - -#define _PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1) -#define _PyHASH_INF 314159 -#define _PyHASH_IMAG _PyHASH_MULTIPLIER - - -/* hash secret - * - * memory layout on 64 bit systems - * cccccccc cccccccc cccccccc uc -- unsigned char[24] - * pppppppp ssssssss ........ fnv -- two Py_hash_t - * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t - * ........ ........ ssssssss djbx33a -- 16 bytes padding + one Py_hash_t - * ........ ........ eeeeeeee pyexpat XML hash salt - * - * memory layout on 32 bit systems - * cccccccc cccccccc cccccccc uc - * ppppssss ........ ........ fnv -- two Py_hash_t - * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t (*) - * ........ ........ ssss.... djbx33a -- 16 bytes padding + one Py_hash_t - * ........ ........ eeee.... pyexpat XML hash salt - * - * (*) The siphash member may not be available on 32 bit platforms without - * an unsigned int64 data type. - */ -#ifndef Py_LIMITED_API -typedef union { - /* ensure 24 bytes */ - unsigned char uc[24]; - /* two Py_hash_t for FNV */ - struct { - Py_hash_t prefix; - Py_hash_t suffix; - } fnv; - /* two uint64 for SipHash24 */ - struct { - uint64_t k0; - uint64_t k1; - } siphash; - /* a different (!) Py_hash_t for small string optimization */ - struct { - unsigned char padding[16]; - Py_hash_t suffix; - } djbx33a; - struct { - unsigned char padding[16]; - Py_hash_t hashsalt; - } expat; -} _Py_HashSecret_t; -PyAPI_DATA(_Py_HashSecret_t) _Py_HashSecret; - -#ifdef Py_DEBUG -PyAPI_DATA(int) _Py_HashSecret_Initialized; -#endif - - -/* hash function definition */ -typedef struct { - Py_hash_t (*const hash)(const void *, Py_ssize_t); - const char *name; - const int hash_bits; - const int seed_bits; -} PyHash_FuncDef; - -PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void); -#endif - - -/* cutoff for small string DJBX33A optimization in range [1, cutoff). +/* Cutoff for small string DJBX33A optimization in range [1, cutoff). * * About 50% of the strings in a typical Python application are smaller than * 6 to 7 chars. However DJBX33A is vulnerable to hash collision attacks. @@ -112,7 +22,7 @@ PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void); #endif /* Py_HASH_CUTOFF */ -/* hash algorithm selection +/* Hash algorithm selection * * The values for Py_HASH_* are hard-coded in the * configure script. @@ -137,8 +47,13 @@ PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void); # endif /* uint64_t && uint32_t && aligned */ #endif /* Py_HASH_ALGORITHM */ +#ifndef Py_LIMITED_API +# define Py_CPYTHON_HASH_H +# include "cpython/pyhash.h" +# undef Py_CPYTHON_HASH_H +#endif + #ifdef __cplusplus } #endif - -#endif /* !Py_HASH_H */ +#endif // !Py_HASH_H diff --git a/Include/pylifecycle.h b/Include/pylifecycle.h index e4c3b09c963fe8a..c1e2bc5e3233584 100644 --- a/Include/pylifecycle.h +++ b/Include/pylifecycle.h @@ -34,18 +34,12 @@ PyAPI_FUNC(int) Py_Main(int argc, wchar_t **argv); PyAPI_FUNC(int) Py_BytesMain(int argc, char **argv); /* In pathconfig.c */ -Py_DEPRECATED(3.11) PyAPI_FUNC(void) Py_SetProgramName(const wchar_t *); -PyAPI_FUNC(wchar_t *) Py_GetProgramName(void); - -Py_DEPRECATED(3.11) PyAPI_FUNC(void) Py_SetPythonHome(const wchar_t *); -PyAPI_FUNC(wchar_t *) Py_GetPythonHome(void); - -PyAPI_FUNC(wchar_t *) Py_GetProgramFullPath(void); - -PyAPI_FUNC(wchar_t *) Py_GetPrefix(void); -PyAPI_FUNC(wchar_t *) Py_GetExecPrefix(void); -PyAPI_FUNC(wchar_t *) Py_GetPath(void); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) Py_SetPath(const wchar_t *); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetProgramName(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetPythonHome(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetProgramFullPath(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetPrefix(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetExecPrefix(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(wchar_t *) Py_GetPath(void); #ifdef MS_WINDOWS int _Py_CheckPython3(void); #endif @@ -66,6 +60,10 @@ PyAPI_FUNC(PyOS_sighandler_t) PyOS_setsig(int, PyOS_sighandler_t); PyAPI_DATA(const unsigned long) Py_Version; #endif +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030D0000 +PyAPI_FUNC(int) Py_IsFinalizing(void); +#endif + #ifndef Py_LIMITED_API # define Py_CPYTHON_PYLIFECYCLE_H # include "cpython/pylifecycle.h" diff --git a/Include/pymacconfig.h b/Include/pymacconfig.h index 00459a03b980be6..806e41955efd7fa 100644 --- a/Include/pymacconfig.h +++ b/Include/pymacconfig.h @@ -1,90 +1,82 @@ -#ifndef PYMACCONFIG_H -#define PYMACCONFIG_H - /* - * This file moves some of the autoconf magic to compile-time - * when building on MacOSX. This is needed for building 4-way - * universal binaries and for 64-bit universal binaries because - * the values redefined below aren't configure-time constant but - * only compile-time constant in these scenarios. - */ +// This file moves some of the autoconf magic to compile-time when building on +// macOS. This is needed for building 4-way universal binaries and for 64-bit +// universal binaries because the values redefined below aren't configure-time +// constant but only compile-time constant in these scenarios. -#if defined(__APPLE__) +#ifndef PY_MACCONFIG_H +#define PY_MACCONFIG_H +#ifdef __APPLE__ -# undef SIZEOF_LONG -# undef SIZEOF_PTHREAD_T -# undef SIZEOF_SIZE_T -# undef SIZEOF_TIME_T -# undef SIZEOF_VOID_P -# undef SIZEOF__BOOL -# undef SIZEOF_UINTPTR_T -# undef SIZEOF_PTHREAD_T -# undef WORDS_BIGENDIAN -# undef DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754 -# undef DOUBLE_IS_BIG_ENDIAN_IEEE754 -# undef DOUBLE_IS_LITTLE_ENDIAN_IEEE754 -# undef HAVE_GCC_ASM_FOR_X87 +#undef SIZEOF_LONG +#undef SIZEOF_PTHREAD_T +#undef SIZEOF_SIZE_T +#undef SIZEOF_TIME_T +#undef SIZEOF_VOID_P +#undef SIZEOF__BOOL +#undef SIZEOF_UINTPTR_T +#undef SIZEOF_PTHREAD_T +#undef WORDS_BIGENDIAN +#undef DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754 +#undef DOUBLE_IS_BIG_ENDIAN_IEEE754 +#undef DOUBLE_IS_LITTLE_ENDIAN_IEEE754 +#undef HAVE_GCC_ASM_FOR_X87 -# undef VA_LIST_IS_ARRAY -# if defined(__LP64__) && defined(__x86_64__) -# define VA_LIST_IS_ARRAY 1 -# endif - -# undef HAVE_LARGEFILE_SUPPORT -# ifndef __LP64__ -# define HAVE_LARGEFILE_SUPPORT 1 -# endif - -# undef SIZEOF_LONG -# ifdef __LP64__ -# define SIZEOF__BOOL 1 -# define SIZEOF__BOOL 1 -# define SIZEOF_LONG 8 -# define SIZEOF_PTHREAD_T 8 -# define SIZEOF_SIZE_T 8 -# define SIZEOF_TIME_T 8 -# define SIZEOF_VOID_P 8 -# define SIZEOF_UINTPTR_T 8 -# define SIZEOF_PTHREAD_T 8 -# else -# ifdef __ppc__ -# define SIZEOF__BOOL 4 -# else -# define SIZEOF__BOOL 1 -# endif -# define SIZEOF_LONG 4 -# define SIZEOF_PTHREAD_T 4 -# define SIZEOF_SIZE_T 4 -# define SIZEOF_TIME_T 4 -# define SIZEOF_VOID_P 4 -# define SIZEOF_UINTPTR_T 4 -# define SIZEOF_PTHREAD_T 4 -# endif +#undef VA_LIST_IS_ARRAY +#if defined(__LP64__) && defined(__x86_64__) +# define VA_LIST_IS_ARRAY 1 +#endif -# if defined(__LP64__) - /* MacOSX 10.4 (the first release to support 64-bit code - * at all) only supports 64-bit in the UNIX layer. - * Therefore suppress the toolbox-glue in 64-bit mode. - */ +#undef HAVE_LARGEFILE_SUPPORT +#ifndef __LP64__ +# define HAVE_LARGEFILE_SUPPORT 1 +#endif - /* In 64-bit mode setpgrp always has no arguments, in 32-bit - * mode that depends on the compilation environment - */ -# undef SETPGRP_HAVE_ARG +#undef SIZEOF_LONG +#ifdef __LP64__ +# define SIZEOF__BOOL 1 +# define SIZEOF__BOOL 1 +# define SIZEOF_LONG 8 +# define SIZEOF_PTHREAD_T 8 +# define SIZEOF_SIZE_T 8 +# define SIZEOF_TIME_T 8 +# define SIZEOF_VOID_P 8 +# define SIZEOF_UINTPTR_T 8 +# define SIZEOF_PTHREAD_T 8 +#else +# ifdef __ppc__ +# define SIZEOF__BOOL 4 +# else +# define SIZEOF__BOOL 1 +# endif +# define SIZEOF_LONG 4 +# define SIZEOF_PTHREAD_T 4 +# define SIZEOF_SIZE_T 4 +# define SIZEOF_TIME_T 4 +# define SIZEOF_VOID_P 4 +# define SIZEOF_UINTPTR_T 4 +# define SIZEOF_PTHREAD_T 4 +#endif -# endif +// macOS 10.4 (the first release to support 64-bit code +// at all) only supports 64-bit in the UNIX layer. +// Therefore suppress the toolbox-glue in 64-bit mode. +// +// In 64-bit mode setpgrp always has no arguments, in 32-bit +// mode that depends on the compilation environment +#if defined(__LP64__) +# undef SETPGRP_HAVE_ARG +#endif #ifdef __BIG_ENDIAN__ -#define WORDS_BIGENDIAN 1 -#define DOUBLE_IS_BIG_ENDIAN_IEEE754 +# define WORDS_BIGENDIAN 1 +# define DOUBLE_IS_BIG_ENDIAN_IEEE754 #else -#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 -#endif /* __BIG_ENDIAN */ +# define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 +#endif #ifdef __i386__ -# define HAVE_GCC_ASM_FOR_X87 +# define HAVE_GCC_ASM_FOR_X87 #endif - -#endif /* defined(_APPLE__) */ - -#endif /* PYMACCONFIG_H */ +#endif // __APPLE__ +#endif // !PY_MACCONFIG_H diff --git a/Include/pymacro.h b/Include/pymacro.h index e37cda44c5ebf13..9d264fe6eea1d44 100644 --- a/Include/pymacro.h +++ b/Include/pymacro.h @@ -3,20 +3,23 @@ // gh-91782: On FreeBSD 12, if the _POSIX_C_SOURCE and _XOPEN_SOURCE macros are // defined, disables C11 support and does not define -// the static_assert() macro. Define the static_assert() macro in Python until -// suports C11: +// the static_assert() macro. // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=255290 -#if defined(__FreeBSD__) && !defined(static_assert) -# define static_assert _Static_assert -#endif - -// static_assert is defined in glibc from version 2.16. Before it requires -// compiler support (gcc >= 4.6) and is called _Static_assert. -// In C++ 11 static_assert is a keyword, redefining is undefined behaviour. -#if (defined(__GLIBC__) \ - && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 16)) \ - && !(defined(__cplusplus) && __cplusplus >= 201103L) \ - && !defined(static_assert)) +// +// macOS <= 10.10 doesn't define static_assert in assert.h at all despite +// having C11 compiler support. +// +// static_assert is defined in glibc from version 2.16. Compiler support for +// the C11 _Static_assert keyword is in gcc >= 4.6. +// +// MSVC makes static_assert a keyword in C11-17, contrary to the standards. +// +// In C++11 and C2x, static_assert is a keyword, redefining is undefined +// behaviour. So only define if building as C (if __STDC_VERSION__ is defined), +// not C++, and only for C11-17. +#if !defined(static_assert) && (defined(__GNUC__) || defined(__clang__)) \ + && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ + && __STDC_VERSION__ <= 201710L # define static_assert _Static_assert #endif @@ -115,6 +118,15 @@ */ #if defined(__GNUC__) || defined(__clang__) # define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) +#elif defined(_MSC_VER) + // Disable warning C4100: unreferenced formal parameter, + // declare the parameter, + // restore old compiler warnings. +# define Py_UNUSED(name) \ + __pragma(warning(push)) \ + __pragma(warning(suppress: 4100)) \ + _unused_ ## name \ + __pragma(warning(pop)) #else # define Py_UNUSED(name) _unused_ ## name #endif diff --git a/Include/pymath.h b/Include/pymath.h index 772b67e4977563e..4c1e3d9984894b0 100644 --- a/Include/pymath.h +++ b/Include/pymath.h @@ -39,27 +39,24 @@ // Return 1 if float or double arg is neither infinite nor NAN, else 0. #define Py_IS_FINITE(X) isfinite(X) -/* HUGE_VAL is supposed to expand to a positive double infinity. Python - * uses Py_HUGE_VAL instead because some platforms are broken in this - * respect. We used to embed code in pyport.h to try to worm around that, - * but different platforms are broken in conflicting ways. If you're on - * a platform where HUGE_VAL is defined incorrectly, fiddle your Python - * config to #define Py_HUGE_VAL to something that works on your platform. +// Py_INFINITY: Value that evaluates to a positive double infinity. +#ifndef Py_INFINITY +# define Py_INFINITY ((double)INFINITY) +#endif + +/* Py_HUGE_VAL should always be the same as Py_INFINITY. But historically + * this was not reliable and Python did not require IEEE floats and C99 + * conformity. Prefer Py_INFINITY for new code. */ #ifndef Py_HUGE_VAL # define Py_HUGE_VAL HUGE_VAL #endif -// Py_NAN: Value that evaluates to a quiet Not-a-Number (NaN). +/* Py_NAN: Value that evaluates to a quiet Not-a-Number (NaN). The sign is + * undefined and normally not relevant, but e.g. fixed for float("nan"). + */ #if !defined(Py_NAN) -# if _Py__has_builtin(__builtin_nan) - // Built-in implementation of the ISO C99 function nan(): quiet NaN. -# define Py_NAN (__builtin_nan("")) -#else - // Use C99 NAN constant: quiet Not-A-Number. - // NAN is a float, Py_NAN is a double: cast to double. # define Py_NAN ((double)NAN) -# endif #endif #endif /* Py_PYMATH_H */ diff --git a/Include/pymem.h b/Include/pymem.h index e882645757bfd39..a80da99e1dd7fc5 100644 --- a/Include/pymem.h +++ b/Include/pymem.h @@ -1,12 +1,8 @@ -/* The PyMem_ family: low-level memory allocation interfaces. - See objimpl.h for the PyObject_ memory family. -*/ +// The PyMem_ family: low-level memory allocation interfaces. +// See objimpl.h for the PyObject_ memory family. #ifndef Py_PYMEM_H #define Py_PYMEM_H - -#include "pyport.h" - #ifdef __cplusplus extern "C" { #endif @@ -91,6 +87,17 @@ PyAPI_FUNC(void) PyMem_Free(void *ptr); #define PyMem_DEL(p) PyMem_Free((p)) +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +// Memory allocator which doesn't require the GIL to be held. +// Usually, it's just a thin wrapper to functions of the standard C library: +// malloc(), calloc(), realloc() and free(). The difference is that +// tracemalloc can track these memory allocations. +PyAPI_FUNC(void *) PyMem_RawMalloc(size_t size); +PyAPI_FUNC(void *) PyMem_RawCalloc(size_t nelem, size_t elsize); +PyAPI_FUNC(void *) PyMem_RawRealloc(void *ptr, size_t new_size); +PyAPI_FUNC(void) PyMem_RawFree(void *ptr); +#endif + #ifndef Py_LIMITED_API # define Py_CPYTHON_PYMEM_H # include "cpython/pymem.h" @@ -100,5 +107,4 @@ PyAPI_FUNC(void) PyMem_Free(void *ptr); #ifdef __cplusplus } #endif - -#endif /* !Py_PYMEM_H */ +#endif // !Py_PYMEM_H diff --git a/Include/pyport.h b/Include/pyport.h index b3ff2f4882e90f6..abb526d503fddd7 100644 --- a/Include/pyport.h +++ b/Include/pyport.h @@ -1,13 +1,8 @@ #ifndef Py_PYPORT_H #define Py_PYPORT_H -#include "pyconfig.h" /* include for defines */ - -#include - -#include #ifndef UCHAR_MAX -# error "limits.h must define UCHAR_MAX" +# error " header must define UCHAR_MAX" #endif #if UCHAR_MAX != 255 # error "Python's source code assumes C's unsigned char is an 8-bit type" @@ -24,9 +19,10 @@ #define _Py_CAST(type, expr) ((type)(expr)) // Static inline functions should use _Py_NULL rather than using directly NULL -// to prevent C++ compiler warnings. On C++11 and newer, _Py_NULL is defined as -// nullptr. -#if defined(__cplusplus) && __cplusplus >= 201103 +// to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer, +// _Py_NULL is defined as nullptr. +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \ + || (defined(__cplusplus) && __cplusplus >= 201103) # define _Py_NULL nullptr #else # define _Py_NULL NULL @@ -184,69 +180,10 @@ typedef Py_ssize_t Py_ssize_clean_t; # define Py_LOCAL_INLINE(type) static inline type #endif -// bpo-28126: Py_MEMCPY is kept for backwards compatibility, #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 # define Py_MEMCPY memcpy #endif -#ifdef HAVE_IEEEFP_H -#include /* needed for 'finite' declaration on some platforms */ -#endif - -#include /* Moved here from the math section, before extern "C" */ - -/******************************************** - * WRAPPER FOR and/or * - ********************************************/ - -#ifdef HAVE_SYS_TIME_H -#include -#endif -#include - -/****************************** - * WRAPPER FOR * - ******************************/ - -/* NB caller must include */ - -#ifdef HAVE_SYS_SELECT_H -#include -#endif /* !HAVE_SYS_SELECT_H */ - -/******************************* - * stat() and fstat() fiddling * - *******************************/ - -#ifdef HAVE_SYS_STAT_H -#include -#elif defined(HAVE_STAT_H) -#include -#endif - -#ifndef S_IFMT -/* VisualAge C/C++ Failed to Define MountType Field in sys/stat.h */ -#define S_IFMT 0170000 -#endif - -#ifndef S_IFLNK -/* Windows doesn't define S_IFLNK but posixmodule.c maps - * IO_REPARSE_TAG_SYMLINK to S_IFLNK */ -# define S_IFLNK 0120000 -#endif - -#ifndef S_ISREG -#define S_ISREG(x) (((x) & S_IFMT) == S_IFREG) -#endif - -#ifndef S_ISDIR -#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) -#endif - -#ifndef S_ISCHR -#define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR) -#endif - #ifdef __cplusplus /* Move this down here since some C++ #include's don't like to be included inside an extern "C" */ @@ -319,6 +256,15 @@ extern "C" { #define Py_DEPRECATED(VERSION_UNUSED) #endif +// _Py_DEPRECATED_EXTERNALLY(version) +// Deprecated outside CPython core. +#ifdef Py_BUILD_CORE +#define _Py_DEPRECATED_EXTERNALLY(VERSION_UNUSED) +#else +#define _Py_DEPRECATED_EXTERNALLY(version) Py_DEPRECATED(version) +#endif + + #if defined(__clang__) #define _Py_COMP_DIAG_PUSH _Pragma("clang diagnostic push") #define _Py_COMP_DIAG_IGNORE_DEPR_DECLS \ @@ -409,141 +355,15 @@ extern "C" { # define Py_NO_INLINE #endif -/************************************************************************** -Prototypes that are missing from the standard include files on some systems -(and possibly only some versions of such systems.) - -Please be conservative with adding new ones, document them and enclose them -in platform-specific #ifdefs. -**************************************************************************/ - -#ifdef SOLARIS -/* Unchecked */ -extern int gethostname(char *, int); -#endif - -#ifdef HAVE__GETPTY -#include /* we need to import mode_t */ -extern char * _getpty(int *, int, mode_t, int); -#endif - -/* On QNX 6, struct termio must be declared by including sys/termio.h - if TCGETA, TCSETA, TCSETAW, or TCSETAF are used. sys/termio.h must - be included before termios.h or it will generate an error. */ -#if defined(HAVE_SYS_TERMIO_H) && !defined(__hpux) -#include -#endif - - -/* On 4.4BSD-descendants, ctype functions serves the whole range of - * wchar_t character set rather than single byte code points only. - * This characteristic can break some operations of string object - * including str.upper() and str.split() on UTF-8 locales. This - * workaround was provided by Tim Robbins of FreeBSD project. - */ - -#if defined(__APPLE__) -# define _PY_PORT_CTYPE_UTF8_ISSUE -#endif - -#ifdef _PY_PORT_CTYPE_UTF8_ISSUE -#ifndef __cplusplus - /* The workaround below is unsafe in C++ because - * the defines these symbols as real functions, - * with a slightly different signature. - * See issue #10910 - */ -#include -#include -#undef isalnum -#define isalnum(c) iswalnum(btowc(c)) -#undef isalpha -#define isalpha(c) iswalpha(btowc(c)) -#undef islower -#define islower(c) iswlower(btowc(c)) -#undef isspace -#define isspace(c) iswspace(btowc(c)) -#undef isupper -#define isupper(c) iswupper(btowc(c)) -#undef tolower -#define tolower(c) towlower(btowc(c)) -#undef toupper -#define toupper(c) towupper(btowc(c)) -#endif -#endif - - -/* Declarations for symbol visibility. - - PyAPI_FUNC(type): Declares a public Python API function and return type - PyAPI_DATA(type): Declares public Python data and its type - PyMODINIT_FUNC: A Python module init function. If these functions are - inside the Python core, they are private to the core. - If in an extension module, it may be declared with - external linkage depending on the platform. - - As a number of platforms support/require "__declspec(dllimport/dllexport)", - we support a HAVE_DECLSPEC_DLL macro to save duplication. -*/ - -/* - All windows ports, except cygwin, are handled in PC/pyconfig.h. - - Cygwin is the only other autoconf platform requiring special - linkage handling and it uses __declspec(). -*/ -#if defined(__CYGWIN__) -# define HAVE_DECLSPEC_DLL -#endif - #include "exports.h" -/* only get special linkage if built as shared or platform is Cygwin */ -#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__) -# if defined(HAVE_DECLSPEC_DLL) -# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) -# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE -# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE - /* module init functions inside the core need no external linkage */ - /* except for Cygwin to handle embedding */ -# if defined(__CYGWIN__) -# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* -# else /* __CYGWIN__ */ -# define PyMODINIT_FUNC PyObject* -# endif /* __CYGWIN__ */ -# else /* Py_BUILD_CORE */ - /* Building an extension module, or an embedded situation */ - /* public Python functions and data are imported */ - /* Under Cygwin, auto-import functions to prevent compilation */ - /* failures similar to those described at the bottom of 4.1: */ - /* http://docs.python.org/extending/windows.html#a-cookbook-approach */ -# if !defined(__CYGWIN__) -# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE -# endif /* !__CYGWIN__ */ -# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE - /* module init functions outside the core must be exported */ -# if defined(__cplusplus) -# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject* -# else /* __cplusplus */ -# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* -# endif /* __cplusplus */ -# endif /* Py_BUILD_CORE */ -# endif /* HAVE_DECLSPEC_DLL */ -#endif /* Py_ENABLE_SHARED */ - -/* If no external linkage macros defined by now, create defaults */ -#ifndef PyAPI_FUNC -# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE -#endif -#ifndef PyAPI_DATA -# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE -#endif -#ifndef PyMODINIT_FUNC -# if defined(__cplusplus) -# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject* -# else /* __cplusplus */ -# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject* -# endif /* __cplusplus */ +#ifdef Py_LIMITED_API + // The internal C API must not be used with the limited C API: make sure + // that Py_BUILD_CORE macro is not defined in this case. These 3 macros are + // used by exports.h, so only undefine them afterwards. +# undef Py_BUILD_CORE +# undef Py_BUILD_CORE_BUILTIN +# undef Py_BUILD_CORE_MODULE #endif /* limits.h constants that may be missing */ @@ -650,10 +470,33 @@ extern char * _getpty(int *, int, mode_t, int); # define WITH_THREAD #endif -/* Check that ALT_SOABI is consistent with Py_TRACE_REFS: - ./configure --with-trace-refs should must be used to define Py_TRACE_REFS */ -#if defined(ALT_SOABI) && defined(Py_TRACE_REFS) -# error "Py_TRACE_REFS ABI is not compatible with release and debug ABI" +/* Some WebAssembly platforms do not provide a working pthread implementation. + * Thread support is stubbed and any attempt to create a new thread fails. + */ +#if (!defined(HAVE_PTHREAD_STUBS) && \ + (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))) +# define Py_CAN_START_THREADS 1 +#endif + +#ifdef WITH_THREAD +# ifdef Py_BUILD_CORE +# ifdef HAVE_THREAD_LOCAL +# error "HAVE_THREAD_LOCAL is already defined" +# endif +# define HAVE_THREAD_LOCAL 1 +# ifdef thread_local +# define _Py_thread_local thread_local +# elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) +# define _Py_thread_local _Thread_local +# elif defined(_MSC_VER) /* AKA NT_THREADS */ +# define _Py_thread_local __declspec(thread) +# elif defined(__GNUC__) /* includes clang */ +# define _Py_thread_local __thread +# else + // fall back to the PyThread_tss_*() API, or ignore. +# undef HAVE_THREAD_LOCAL +# endif +# endif #endif #if defined(__ANDROID__) || defined(__VXWORKS__) @@ -698,6 +541,15 @@ extern char * _getpty(int *, int, mode_t, int); # define _Py__has_builtin(x) 0 #endif +// _Py_TYPEOF(expr) gets the type of an expression. +// +// Example: _Py_TYPEOF(x) x_copy = (x); +// +// The macro is only defined if GCC or clang compiler is used. +#if defined(__GNUC__) || defined(__clang__) +# define _Py_TYPEOF(expr) __typeof__(expr) +#endif + /* A convenient way for code to know if sanitizers are enabled. */ #if defined(__has_feature) @@ -717,4 +569,25 @@ extern char * _getpty(int *, int, mode_t, int); # endif #endif + +/* AIX has __bool__ redefined in it's system header file. */ +#if defined(_AIX) && defined(__bool__) +#undef __bool__ +#endif + +// Make sure we have maximum alignment, even if the current compiler +// does not support max_align_t. Note that: +// - Autoconf reports alignment of unknown types to 0. +// - 'long double' has maximum alignment on *most* platforms, +// looks like the best we can do for pre-C11 compilers. +// - The value is tested, see test_alignof_max_align_t +#if !defined(ALIGNOF_MAX_ALIGN_T) || ALIGNOF_MAX_ALIGN_T == 0 +# undef ALIGNOF_MAX_ALIGN_T +# define ALIGNOF_MAX_ALIGN_T _Alignof(long double) +#endif + +#if defined(__sgi) && !defined(_SGI_MP_SOURCE) +# define _SGI_MP_SOURCE +#endif + #endif /* Py_PYPORT_H */ diff --git a/Include/pystate.h b/Include/pystate.h index e6b4de979c87b88..727b8fbfffe0e67 100644 --- a/Include/pystate.h +++ b/Include/pystate.h @@ -56,7 +56,7 @@ PyAPI_FUNC(void) PyThreadState_Delete(PyThreadState *); The caller must hold the GIL. - See also _PyThreadState_UncheckedGet() and _PyThreadState_GET(). */ + See also PyThreadState_GetUnchecked() and _PyThreadState_GET(). */ PyAPI_FUNC(PyThreadState *) PyThreadState_Get(void); // Alias to PyThreadState_Get() diff --git a/Include/pystats.h b/Include/pystats.h index 87e92aa4f05fa62..acfa32201711e07 100644 --- a/Include/pystats.h +++ b/Include/pystats.h @@ -1,4 +1,9 @@ - +// Statistics on Python performance (public API). +// +// Define _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF() +// and Py_DECREF(). +// +// See Include/cpython/pystats.h for the full API. #ifndef Py_PYSTATS_H #define Py_PYSTATS_H @@ -6,100 +11,16 @@ extern "C" { #endif -#ifdef Py_STATS - -#define SPECIALIZATION_FAILURE_KINDS 32 - -/* Stats for determining who is calling PyEval_EvalFrame */ -#define EVAL_CALL_TOTAL 0 -#define EVAL_CALL_VECTOR 1 -#define EVAL_CALL_GENERATOR 2 -#define EVAL_CALL_LEGACY 3 -#define EVAL_CALL_FUNCTION_VECTORCALL 4 -#define EVAL_CALL_BUILD_CLASS 5 -#define EVAL_CALL_SLOT 6 -#define EVAL_CALL_FUNCTION_EX 7 -#define EVAL_CALL_API 8 -#define EVAL_CALL_METHOD 9 - -#define EVAL_CALL_KINDS 10 - -typedef struct _specialization_stats { - uint64_t success; - uint64_t failure; - uint64_t hit; - uint64_t deferred; - uint64_t miss; - uint64_t deopt; - uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS]; -} SpecializationStats; - -typedef struct _opcode_stats { - SpecializationStats specialization; - uint64_t execution_count; - uint64_t pair_count[256]; -} OpcodeStats; - -typedef struct _call_stats { - uint64_t inlined_py_calls; - uint64_t pyeval_calls; - uint64_t frames_pushed; - uint64_t frame_objects_created; - uint64_t eval_calls[EVAL_CALL_KINDS]; -} CallStats; - -typedef struct _object_stats { - uint64_t increfs; - uint64_t decrefs; - uint64_t interpreter_increfs; - uint64_t interpreter_decrefs; - uint64_t allocations; - uint64_t allocations512; - uint64_t allocations4k; - uint64_t allocations_big; - uint64_t frees; - uint64_t to_freelist; - uint64_t from_freelist; - uint64_t new_values; - uint64_t dict_materialized_on_request; - uint64_t dict_materialized_new_key; - uint64_t dict_materialized_too_big; - uint64_t dict_materialized_str_subclass; -} ObjectStats; - -typedef struct _stats { - OpcodeStats opcode_stats[256]; - CallStats call_stats; - ObjectStats object_stats; -} PyStats; - - -PyAPI_DATA(PyStats) _py_stats_struct; -PyAPI_DATA(PyStats *) _py_stats; - -extern void _Py_StatsClear(void); -extern void _Py_PrintSpecializationStats(int to_file); - -#ifdef _PY_INTERPRETER - -#define _Py_INCREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.interpreter_increfs++; } while (0) -#define _Py_DECREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.interpreter_decrefs++; } while (0) - -#else - -#define _Py_INCREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.increfs++; } while (0) -#define _Py_DECREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.decrefs++; } while (0) - -#endif - +#if defined(Py_STATS) && !defined(Py_LIMITED_API) +# define Py_CPYTHON_PYSTATS_H +# include "cpython/pystats.h" +# undef Py_CPYTHON_PYSTATS_H #else - -#define _Py_INCREF_STAT_INC() ((void)0) -#define _Py_DECREF_STAT_INC() ((void)0) - +# define _Py_INCREF_STAT_INC() ((void)0) +# define _Py_DECREF_STAT_INC() ((void)0) #endif // !Py_STATS #ifdef __cplusplus } #endif -#endif /* !Py_PYSTATs_H */ +#endif // !Py_PYSTATS_H diff --git a/Include/pystrtod.h b/Include/pystrtod.h index fa056d17b6395fc..e83d245eb623afa 100644 --- a/Include/pystrtod.h +++ b/Include/pystrtod.h @@ -18,15 +18,6 @@ PyAPI_FUNC(char *) PyOS_double_to_string(double val, int flags, int *type); -#ifndef Py_LIMITED_API -PyAPI_FUNC(PyObject *) _Py_string_to_number_with_underscores( - const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg, - PyObject *(*innerfunc)(const char *, Py_ssize_t, void *)); - -PyAPI_FUNC(double) _Py_parse_inf_or_nan(const char *p, char **endptr); -#endif - - /* PyOS_double_to_string's "flags" parameter can be set to 0 or more of: */ #define Py_DTSF_SIGN 0x01 /* always add the sign */ #define Py_DTSF_ADD_DOT_0 0x02 /* if the result is an integer add ".0" */ diff --git a/Include/pythonrun.h b/Include/pythonrun.h index 1b208b734ab1bfe..154c7450cb934f9 100644 --- a/Include/pythonrun.h +++ b/Include/pythonrun.h @@ -13,6 +13,10 @@ PyAPI_FUNC(void) PyErr_Print(void); PyAPI_FUNC(void) PyErr_PrintEx(int); PyAPI_FUNC(void) PyErr_Display(PyObject *, PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030C0000 +PyAPI_FUNC(void) PyErr_DisplayException(PyObject *); +#endif + /* Stuff with no proper home (yet) */ PyAPI_DATA(int) (*PyOS_InputHook)(void); diff --git a/Include/pythread.h b/Include/pythread.h index 63714437c496b74..a3216c51d661659 100644 --- a/Include/pythread.h +++ b/Include/pythread.h @@ -21,7 +21,8 @@ PyAPI_FUNC(void) _Py_NO_RETURN PyThread_exit_thread(void); PyAPI_FUNC(unsigned long) PyThread_get_thread_ident(void); #if (defined(__APPLE__) || defined(__linux__) || defined(_WIN32) \ - || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) \ + || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ + || defined(__OpenBSD__) || defined(__NetBSD__) \ || defined(__DragonFly__) || defined(_AIX)) #define PY_HAVE_THREAD_NATIVE_ID PyAPI_FUNC(unsigned long) PyThread_get_thread_native_id(void); @@ -33,42 +34,18 @@ PyAPI_FUNC(int) PyThread_acquire_lock(PyThread_type_lock, int); #define WAIT_LOCK 1 #define NOWAIT_LOCK 0 -/* PY_TIMEOUT_T is the integral type used to specify timeouts when waiting - on a lock (see PyThread_acquire_lock_timed() below). - PY_TIMEOUT_MAX is the highest usable value (in microseconds) of that - type, and depends on the system threading API. - - NOTE: this isn't the same value as `_thread.TIMEOUT_MAX`. The _thread - module exposes a higher-level API, with timeouts expressed in seconds - and floating-point numbers allowed. -*/ +// PY_TIMEOUT_T is the integral type used to specify timeouts when waiting +// on a lock (see PyThread_acquire_lock_timed() below). #define PY_TIMEOUT_T long long -#if defined(_POSIX_THREADS) - /* PyThread_acquire_lock_timed() uses _PyTime_FromNanoseconds(us * 1000), - convert microseconds to nanoseconds. */ -# define PY_TIMEOUT_MAX (LLONG_MAX / 1000) -#elif defined (NT_THREADS) - // WaitForSingleObject() accepts timeout in milliseconds in the range - // [0; 0xFFFFFFFE] (DWORD type). INFINITE value (0xFFFFFFFF) means no - // timeout. 0xFFFFFFFE milliseconds is around 49.7 days. -# if 0xFFFFFFFELL * 1000 < LLONG_MAX -# define PY_TIMEOUT_MAX (0xFFFFFFFELL * 1000) -# else -# define PY_TIMEOUT_MAX LLONG_MAX -# endif -#else -# define PY_TIMEOUT_MAX LLONG_MAX -#endif - /* If microseconds == 0, the call is non-blocking: it returns immediately even when the lock can't be acquired. If microseconds > 0, the call waits up to the specified duration. If microseconds < 0, the call waits until success (or abnormal failure) - microseconds must be less than PY_TIMEOUT_MAX. Behaviour otherwise is - undefined. + If *microseconds* is greater than PY_TIMEOUT_MAX, clamp the timeout to + PY_TIMEOUT_MAX microseconds. If intr_flag is true and the acquire is interrupted by a signal, then the call will return PY_LOCK_INTR. The caller may reattempt to acquire the diff --git a/Include/structmember.h b/Include/structmember.h index 65a777d5f52117c..f6e8fd829892f41 100644 --- a/Include/structmember.h +++ b/Include/structmember.h @@ -5,69 +5,50 @@ extern "C" { #endif -/* Interface to map C struct members to Python object attributes */ - -#include /* For offsetof */ - -/* An array of PyMemberDef structures defines the name, type and offset - of selected members of a C structure. These can be read by - PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY - flag is set). The array must be terminated with an entry whose name - pointer is NULL. */ - -struct PyMemberDef { - const char *name; - int type; - Py_ssize_t offset; - int flags; - const char *doc; -}; +/* Interface to map C struct members to Python object attributes + * + * This header is deprecated: new code should not use stuff from here. + * New definitions are in descrobject.h. + * + * However, there's nothing wrong with old code continuing to use it, + * and there's not much mainenance overhead in maintaining a few aliases. + * So, don't be too eager to convert old code. + * + * It uses names not prefixed with Py_. + * It is also *not* included from Python.h and must be included individually. + */ + +#include /* For offsetof (not always provided by Python.h) */ /* Types */ -#define T_SHORT 0 -#define T_INT 1 -#define T_LONG 2 -#define T_FLOAT 3 -#define T_DOUBLE 4 -#define T_STRING 5 -#define T_OBJECT 6 -/* XXX the ordering here is weird for binary compatibility */ -#define T_CHAR 7 /* 1-character string */ -#define T_BYTE 8 /* 8-bit signed int */ -/* unsigned variants: */ -#define T_UBYTE 9 -#define T_USHORT 10 -#define T_UINT 11 -#define T_ULONG 12 - -/* Added by Jack: strings contained in the structure */ -#define T_STRING_INPLACE 13 - -/* Added by Lillo: bools contained in the structure (assumed char) */ -#define T_BOOL 14 - -#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError - when the value is NULL, instead of - converting to None. */ -#define T_LONGLONG 17 -#define T_ULONGLONG 18 - -#define T_PYSSIZET 19 /* Py_ssize_t */ -#define T_NONE 20 /* Value is always None */ - +#define T_SHORT Py_T_SHORT +#define T_INT Py_T_INT +#define T_LONG Py_T_LONG +#define T_FLOAT Py_T_FLOAT +#define T_DOUBLE Py_T_DOUBLE +#define T_STRING Py_T_STRING +#define T_OBJECT _Py_T_OBJECT +#define T_CHAR Py_T_CHAR +#define T_BYTE Py_T_BYTE +#define T_UBYTE Py_T_UBYTE +#define T_USHORT Py_T_USHORT +#define T_UINT Py_T_UINT +#define T_ULONG Py_T_ULONG +#define T_STRING_INPLACE Py_T_STRING_INPLACE +#define T_BOOL Py_T_BOOL +#define T_OBJECT_EX Py_T_OBJECT_EX +#define T_LONGLONG Py_T_LONGLONG +#define T_ULONGLONG Py_T_ULONGLONG +#define T_PYSSIZET Py_T_PYSSIZET +#define T_NONE _Py_T_NONE /* Flags */ -#define READONLY 1 -#define READ_RESTRICTED 2 -#define PY_WRITE_RESTRICTED 4 +#define READONLY Py_READONLY +#define PY_AUDIT_READ Py_AUDIT_READ +#define READ_RESTRICTED Py_AUDIT_READ +#define PY_WRITE_RESTRICTED _Py_WRITE_RESTRICTED #define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) -#define PY_AUDIT_READ READ_RESTRICTED - -/* Current API, use this */ -PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, PyMemberDef *); -PyAPI_FUNC(int) PyMember_SetOne(char *, PyMemberDef *, PyObject *); - #ifdef __cplusplus } diff --git a/Include/structseq.h b/Include/structseq.h index 968711556119581..29e24fee54e6135 100644 --- a/Include/structseq.h +++ b/Include/structseq.h @@ -31,18 +31,15 @@ PyAPI_FUNC(PyTypeObject*) PyStructSequence_NewType(PyStructSequence_Desc *desc); PyAPI_FUNC(PyObject *) PyStructSequence_New(PyTypeObject* type); +PyAPI_FUNC(void) PyStructSequence_SetItem(PyObject*, Py_ssize_t, PyObject*); +PyAPI_FUNC(PyObject*) PyStructSequence_GetItem(PyObject*, Py_ssize_t); + #ifndef Py_LIMITED_API typedef PyTupleObject PyStructSequence; - -/* Macro, *only* to be used to fill in brand new objects */ -#define PyStructSequence_SET_ITEM(op, i, v) PyTuple_SET_ITEM((op), (i), (v)) - -#define PyStructSequence_GET_ITEM(op, i) PyTuple_GET_ITEM((op), (i)) +#define PyStructSequence_SET_ITEM PyStructSequence_SetItem +#define PyStructSequence_GET_ITEM PyStructSequence_GetItem #endif -PyAPI_FUNC(void) PyStructSequence_SetItem(PyObject*, Py_ssize_t, PyObject*); -PyAPI_FUNC(PyObject*) PyStructSequence_GetItem(PyObject*, Py_ssize_t); - #ifdef __cplusplus } #endif diff --git a/Include/sysmodule.h b/Include/sysmodule.h index b5087119b1cae7d..7b14f72ee2e4945 100644 --- a/Include/sysmodule.h +++ b/Include/sysmodule.h @@ -1,6 +1,3 @@ - -/* System module interface */ - #ifndef Py_SYSMODULE_H #define Py_SYSMODULE_H #ifdef __cplusplus @@ -10,10 +7,6 @@ extern "C" { PyAPI_FUNC(PyObject *) PySys_GetObject(const char *); PyAPI_FUNC(int) PySys_SetObject(const char *, PyObject *); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_SetArgv(int, wchar_t **); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_SetArgvEx(int, wchar_t **, int); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_SetPath(const wchar_t *); - PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...) Py_GCC_ATTRIBUTE((format(printf, 1, 2))); PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...) @@ -21,14 +14,21 @@ PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...) PyAPI_FUNC(void) PySys_FormatStdout(const char *format, ...); PyAPI_FUNC(void) PySys_FormatStderr(const char *format, ...); -PyAPI_FUNC(void) PySys_ResetWarnOptions(void); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_AddWarnOption(const wchar_t *); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_AddWarnOptionUnicode(PyObject *); -Py_DEPRECATED(3.11) PyAPI_FUNC(int) PySys_HasWarnOptions(void); +Py_DEPRECATED(3.13) PyAPI_FUNC(void) PySys_ResetWarnOptions(void); -Py_DEPRECATED(3.11) PyAPI_FUNC(void) PySys_AddXOption(const wchar_t *); PyAPI_FUNC(PyObject *) PySys_GetXOptions(void); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000 +PyAPI_FUNC(int) PySys_Audit( + const char *event, + const char *argFormat, + ...); + +PyAPI_FUNC(int) PySys_AuditTuple( + const char *event, + PyObject *args); +#endif + #ifndef Py_LIMITED_API # define Py_CPYTHON_SYSMODULE_H # include "cpython/sysmodule.h" diff --git a/Include/tracemalloc.h b/Include/tracemalloc.h deleted file mode 100644 index bd14217c199c3cb..000000000000000 --- a/Include/tracemalloc.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef Py_TRACEMALLOC_H -#define Py_TRACEMALLOC_H - -#ifndef Py_LIMITED_API -/* Track an allocated memory block in the tracemalloc module. - Return 0 on success, return -1 on error (failed to allocate memory to store - the trace). - - Return -2 if tracemalloc is disabled. - - If memory block is already tracked, update the existing trace. */ -PyAPI_FUNC(int) PyTraceMalloc_Track( - unsigned int domain, - uintptr_t ptr, - size_t size); - -/* Untrack an allocated memory block in the tracemalloc module. - Do nothing if the block was not tracked. - - Return -2 if tracemalloc is disabled, otherwise return 0. */ -PyAPI_FUNC(int) PyTraceMalloc_Untrack( - unsigned int domain, - uintptr_t ptr); - -/* Get the traceback where a memory block was allocated. - - Return a tuple of (filename: str, lineno: int) tuples. - - Return None if the tracemalloc module is disabled or if the memory block - is not tracked by tracemalloc. - - Raise an exception and return NULL on error. */ -PyAPI_FUNC(PyObject*) _PyTraceMalloc_GetTraceback( - unsigned int domain, - uintptr_t ptr); -#endif - -#endif /* !Py_TRACEMALLOC_H */ diff --git a/Include/unicodeobject.h b/Include/unicodeobject.h index 74474f5bb8f976f..dee00715b3c51d5 100644 --- a/Include/unicodeobject.h +++ b/Include/unicodeobject.h @@ -1,8 +1,6 @@ #ifndef Py_UNICODEOBJECT_H #define Py_UNICODEOBJECT_H -#include // va_list - /* Unicode implementation based on original code by Fredrik Lundh, @@ -55,8 +53,6 @@ Copyright (c) Corporation for National Research Initiatives. * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * -------------------------------------------------------------------- */ -#include - /* === Internal API ======================================================= */ /* --- Internal Unicode Format -------------------------------------------- */ @@ -93,10 +89,6 @@ Copyright (c) Corporation for National Research Initiatives. # endif #endif -#ifdef HAVE_WCHAR_H -# include -#endif - /* Py_UCS4 and Py_UCS2 are typedefs for the respective unicode representations. */ typedef uint32_t Py_UCS4; @@ -626,7 +618,7 @@ PyAPI_FUNC(PyObject*) PyUnicode_AsLatin1String( /* --- ASCII Codecs ------------------------------------------------------- - Only 7-bit ASCII data is excepted. All other codes generate errors. + Only 7-bit ASCII data is expected. All other codes generate errors. */ @@ -965,6 +957,15 @@ PyAPI_FUNC(int) PyUnicode_CompareWithASCIIString( const char *right /* ASCII-encoded string */ ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030D0000 +/* Compare a Unicode object with UTF-8 encoded C string. + Return 1 if they are equal, or 0 otherwise. + This function does not raise exceptions. */ + +PyAPI_FUNC(int) PyUnicode_EqualToUTF8(PyObject *, const char *); +PyAPI_FUNC(int) PyUnicode_EqualToUTF8AndSize(PyObject *, const char *, Py_ssize_t); +#endif + /* Rich compare two strings and return one of the following: - NULL in case an exception was raised diff --git a/Include/weakrefobject.h b/Include/weakrefobject.h index 8e1fa1b9286a77c..727ba6934bbacbb 100644 --- a/Include/weakrefobject.h +++ b/Include/weakrefobject.h @@ -27,7 +27,8 @@ PyAPI_FUNC(PyObject *) PyWeakref_NewRef(PyObject *ob, PyObject *callback); PyAPI_FUNC(PyObject *) PyWeakref_NewProxy(PyObject *ob, PyObject *callback); -PyAPI_FUNC(PyObject *) PyWeakref_GetObject(PyObject *ref); +Py_DEPRECATED(3.13) PyAPI_FUNC(PyObject *) PyWeakref_GetObject(PyObject *ref); +PyAPI_FUNC(int) PyWeakref_GetRef(PyObject *ref, PyObject **pobj); #ifndef Py_LIMITED_API diff --git a/LICENSE b/LICENSE index 9838d443f9fc02b..f26bcf4d2de6eb1 100644 --- a/LICENSE +++ b/LICENSE @@ -84,7 +84,7 @@ analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation; +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. diff --git a/Lib/_aix_support.py b/Lib/_aix_support.py index 1d8482ff3825071..dadc75c2bf4200e 100644 --- a/Lib/_aix_support.py +++ b/Lib/_aix_support.py @@ -3,12 +3,24 @@ import sys import sysconfig -try: - import subprocess -except ImportError: # pragma: no cover - # _aix_support is used in distutils by setup.py to build C extensions, - # before subprocess dependencies like _posixsubprocess are available. - import _bootsubprocess as subprocess + +# Taken from _osx_support _read_output function +def _read_cmd_output(commandstring, capture_stderr=False): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + import os + import contextlib + fp = open("/tmp/_aix_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + if capture_stderr: + cmd = "%s >'%s' 2>&1" % (commandstring, fp.name) + else: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read() if not os.system(cmd) else None def _aix_tag(vrtl, bd): @@ -36,7 +48,12 @@ def _aix_bos_rte(): If no builddate is found give a value that will satisfy pep425 related queries """ # All AIX systems to have lslpp installed in this location - out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) + # subprocess may not be available during python bootstrap + try: + import subprocess + out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) + except ImportError: + out = _read_cmd_output("/usr/bin/lslpp -Lqc bos.rte") out = out.decode("utf-8") out = out.strip().split(":") # type: ignore _bd = int(out[-1]) if out[-1] != '' else 9988 diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py index c62233b81a5c95c..601107d2d867718 100644 --- a/Lib/_collections_abc.py +++ b/Lib/_collections_abc.py @@ -49,7 +49,7 @@ def _f(): pass "Mapping", "MutableMapping", "MappingView", "KeysView", "ItemsView", "ValuesView", "Sequence", "MutableSequence", - "ByteString", + "ByteString", "Buffer", ] # This module has been renamed from collections.abc to _collections_abc to @@ -439,6 +439,21 @@ def __subclasshook__(cls, C): return NotImplemented +class Buffer(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __buffer__(self, flags: int, /) -> memoryview: + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, C): + if cls is Buffer: + return _check_methods(C, "__buffer__") + return NotImplemented + + class _CallableGenericAlias(GenericAlias): """ Represent `Callable[argtypes, resulttype]`. @@ -481,15 +496,8 @@ def __getitem__(self, item): # rather than the default types.GenericAlias object. Most of the # code is copied from typing's _GenericAlias and the builtin # types.GenericAlias. - if not isinstance(item, tuple): item = (item,) - # A special case in PEP 612 where if X = Callable[P, int], - # then X[int, str] == X[[int, str]]. - if (len(self.__parameters__) == 1 - and _is_param_expr(self.__parameters__[0]) - and item and not _is_param_expr(item[0])): - item = (item,) new_args = super().__getitem__(item).__args__ @@ -517,9 +525,8 @@ def _type_repr(obj): Copied from :mod:`typing` since collections.abc shouldn't depend on that module. + (Keep this roughly in sync with the typing version.) """ - if isinstance(obj, GenericAlias): - return repr(obj) if isinstance(obj, type): if obj.__module__ == 'builtins': return obj.__qualname__ @@ -1064,8 +1071,27 @@ def count(self, value): Sequence.register(range) Sequence.register(memoryview) +class _DeprecateByteStringMeta(ABCMeta): + def __new__(cls, name, bases, namespace, **kwargs): + if name != "ByteString": + import warnings + + warnings._deprecated( + "collections.abc.ByteString", + remove=(3, 14), + ) + return super().__new__(cls, name, bases, namespace, **kwargs) + + def __instancecheck__(cls, instance): + import warnings + + warnings._deprecated( + "collections.abc.ByteString", + remove=(3, 14), + ) + return super().__instancecheck__(instance) -class ByteString(Sequence): +class ByteString(Sequence, metaclass=_DeprecateByteStringMeta): """This unifies bytes and bytearray. XXX Should add all their methods. diff --git a/Lib/_compat_pickle.py b/Lib/_compat_pickle.py index 65a94b6b1bdfd53..439f8c02f4b586a 100644 --- a/Lib/_compat_pickle.py +++ b/Lib/_compat_pickle.py @@ -22,7 +22,6 @@ 'tkMessageBox': 'tkinter.messagebox', 'ScrolledText': 'tkinter.scrolledtext', 'Tkconstants': 'tkinter.constants', - 'Tix': 'tkinter.tix', 'ttk': 'tkinter.ttk', 'Tkinter': 'tkinter', 'markupbase': '_markupbase', diff --git a/Lib/_opcode_metadata.py b/Lib/_opcode_metadata.py new file mode 100644 index 000000000000000..5dd06ae487dfcfe --- /dev/null +++ b/Lib/_opcode_metadata.py @@ -0,0 +1,335 @@ +# This file is generated by Tools/cases_generator/generate_cases.py +# from: +# Python/bytecodes.c +# Do not edit! + +_specializations = { + "RESUME": [ + "RESUME_CHECK", + ], + "TO_BOOL": [ + "TO_BOOL_ALWAYS_TRUE", + "TO_BOOL_BOOL", + "TO_BOOL_INT", + "TO_BOOL_LIST", + "TO_BOOL_NONE", + "TO_BOOL_STR", + ], + "BINARY_OP": [ + "BINARY_OP_MULTIPLY_INT", + "BINARY_OP_ADD_INT", + "BINARY_OP_SUBTRACT_INT", + "BINARY_OP_MULTIPLY_FLOAT", + "BINARY_OP_ADD_FLOAT", + "BINARY_OP_SUBTRACT_FLOAT", + "BINARY_OP_ADD_UNICODE", + ], + "BINARY_SUBSCR": [ + "BINARY_SUBSCR_DICT", + "BINARY_SUBSCR_GETITEM", + "BINARY_SUBSCR_LIST_INT", + "BINARY_SUBSCR_STR_INT", + "BINARY_SUBSCR_TUPLE_INT", + ], + "STORE_SUBSCR": [ + "STORE_SUBSCR_DICT", + "STORE_SUBSCR_LIST_INT", + ], + "SEND": [ + "SEND_GEN", + ], + "UNPACK_SEQUENCE": [ + "UNPACK_SEQUENCE_TWO_TUPLE", + "UNPACK_SEQUENCE_TUPLE", + "UNPACK_SEQUENCE_LIST", + ], + "STORE_ATTR": [ + "STORE_ATTR_INSTANCE_VALUE", + "STORE_ATTR_SLOT", + "STORE_ATTR_WITH_HINT", + ], + "LOAD_GLOBAL": [ + "LOAD_GLOBAL_MODULE", + "LOAD_GLOBAL_BUILTIN", + ], + "LOAD_SUPER_ATTR": [ + "LOAD_SUPER_ATTR_ATTR", + "LOAD_SUPER_ATTR_METHOD", + ], + "LOAD_ATTR": [ + "LOAD_ATTR_INSTANCE_VALUE", + "LOAD_ATTR_MODULE", + "LOAD_ATTR_WITH_HINT", + "LOAD_ATTR_SLOT", + "LOAD_ATTR_CLASS", + "LOAD_ATTR_PROPERTY", + "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN", + "LOAD_ATTR_METHOD_WITH_VALUES", + "LOAD_ATTR_METHOD_NO_DICT", + "LOAD_ATTR_METHOD_LAZY_DICT", + "LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES", + "LOAD_ATTR_NONDESCRIPTOR_NO_DICT", + ], + "COMPARE_OP": [ + "COMPARE_OP_FLOAT", + "COMPARE_OP_INT", + "COMPARE_OP_STR", + ], + "FOR_ITER": [ + "FOR_ITER_LIST", + "FOR_ITER_TUPLE", + "FOR_ITER_RANGE", + "FOR_ITER_GEN", + ], + "CALL": [ + "CALL_BOUND_METHOD_EXACT_ARGS", + "CALL_PY_EXACT_ARGS", + "CALL_PY_WITH_DEFAULTS", + "CALL_TYPE_1", + "CALL_STR_1", + "CALL_TUPLE_1", + "CALL_BUILTIN_CLASS", + "CALL_BUILTIN_O", + "CALL_BUILTIN_FAST", + "CALL_BUILTIN_FAST_WITH_KEYWORDS", + "CALL_LEN", + "CALL_ISINSTANCE", + "CALL_LIST_APPEND", + "CALL_METHOD_DESCRIPTOR_O", + "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS", + "CALL_METHOD_DESCRIPTOR_NOARGS", + "CALL_METHOD_DESCRIPTOR_FAST", + "CALL_ALLOC_AND_ENTER_INIT", + ], +} + +# An irregular case: +_specializations["BINARY_OP"].append("BINARY_OP_INPLACE_ADD_UNICODE") + +_specialized_opmap = { + 'BINARY_OP_INPLACE_ADD_UNICODE': 3, + 'BINARY_OP_ADD_FLOAT': 150, + 'BINARY_OP_ADD_INT': 151, + 'BINARY_OP_ADD_UNICODE': 152, + 'BINARY_OP_MULTIPLY_FLOAT': 153, + 'BINARY_OP_MULTIPLY_INT': 154, + 'BINARY_OP_SUBTRACT_FLOAT': 155, + 'BINARY_OP_SUBTRACT_INT': 156, + 'BINARY_SUBSCR_DICT': 157, + 'BINARY_SUBSCR_GETITEM': 158, + 'BINARY_SUBSCR_LIST_INT': 159, + 'BINARY_SUBSCR_STR_INT': 160, + 'BINARY_SUBSCR_TUPLE_INT': 161, + 'CALL_ALLOC_AND_ENTER_INIT': 162, + 'CALL_BOUND_METHOD_EXACT_ARGS': 163, + 'CALL_BUILTIN_CLASS': 164, + 'CALL_BUILTIN_FAST': 165, + 'CALL_BUILTIN_FAST_WITH_KEYWORDS': 166, + 'CALL_BUILTIN_O': 167, + 'CALL_ISINSTANCE': 168, + 'CALL_LEN': 169, + 'CALL_LIST_APPEND': 170, + 'CALL_METHOD_DESCRIPTOR_FAST': 171, + 'CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS': 172, + 'CALL_METHOD_DESCRIPTOR_NOARGS': 173, + 'CALL_METHOD_DESCRIPTOR_O': 174, + 'CALL_PY_EXACT_ARGS': 175, + 'CALL_PY_WITH_DEFAULTS': 176, + 'CALL_STR_1': 177, + 'CALL_TUPLE_1': 178, + 'CALL_TYPE_1': 179, + 'COMPARE_OP_FLOAT': 180, + 'COMPARE_OP_INT': 181, + 'COMPARE_OP_STR': 182, + 'FOR_ITER_GEN': 183, + 'FOR_ITER_LIST': 184, + 'FOR_ITER_RANGE': 185, + 'FOR_ITER_TUPLE': 186, + 'LOAD_ATTR_CLASS': 187, + 'LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN': 188, + 'LOAD_ATTR_INSTANCE_VALUE': 189, + 'LOAD_ATTR_METHOD_LAZY_DICT': 190, + 'LOAD_ATTR_METHOD_NO_DICT': 191, + 'LOAD_ATTR_METHOD_WITH_VALUES': 192, + 'LOAD_ATTR_MODULE': 193, + 'LOAD_ATTR_NONDESCRIPTOR_NO_DICT': 194, + 'LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES': 195, + 'LOAD_ATTR_PROPERTY': 196, + 'LOAD_ATTR_SLOT': 197, + 'LOAD_ATTR_WITH_HINT': 198, + 'LOAD_GLOBAL_BUILTIN': 199, + 'LOAD_GLOBAL_MODULE': 200, + 'LOAD_SUPER_ATTR_ATTR': 201, + 'LOAD_SUPER_ATTR_METHOD': 202, + 'RESUME_CHECK': 203, + 'SEND_GEN': 204, + 'STORE_ATTR_INSTANCE_VALUE': 205, + 'STORE_ATTR_SLOT': 206, + 'STORE_ATTR_WITH_HINT': 207, + 'STORE_SUBSCR_DICT': 208, + 'STORE_SUBSCR_LIST_INT': 209, + 'TO_BOOL_ALWAYS_TRUE': 210, + 'TO_BOOL_BOOL': 211, + 'TO_BOOL_INT': 212, + 'TO_BOOL_LIST': 213, + 'TO_BOOL_NONE': 214, + 'TO_BOOL_STR': 215, + 'UNPACK_SEQUENCE_LIST': 216, + 'UNPACK_SEQUENCE_TUPLE': 217, + 'UNPACK_SEQUENCE_TWO_TUPLE': 218, +} + +opmap = { + 'CACHE': 0, + 'BEFORE_ASYNC_WITH': 1, + 'BEFORE_WITH': 2, + 'BINARY_SLICE': 4, + 'BINARY_SUBSCR': 5, + 'CHECK_EG_MATCH': 6, + 'CHECK_EXC_MATCH': 7, + 'CLEANUP_THROW': 8, + 'DELETE_SUBSCR': 9, + 'END_ASYNC_FOR': 10, + 'END_FOR': 11, + 'END_SEND': 12, + 'EXIT_INIT_CHECK': 13, + 'FORMAT_SIMPLE': 14, + 'FORMAT_WITH_SPEC': 15, + 'GET_AITER': 16, + 'RESERVED': 17, + 'GET_ANEXT': 18, + 'GET_ITER': 19, + 'GET_LEN': 20, + 'GET_YIELD_FROM_ITER': 21, + 'INTERPRETER_EXIT': 22, + 'LOAD_ASSERTION_ERROR': 23, + 'LOAD_BUILD_CLASS': 24, + 'LOAD_LOCALS': 25, + 'MAKE_FUNCTION': 26, + 'MATCH_KEYS': 27, + 'MATCH_MAPPING': 28, + 'MATCH_SEQUENCE': 29, + 'NOP': 30, + 'POP_EXCEPT': 31, + 'POP_TOP': 32, + 'PUSH_EXC_INFO': 33, + 'PUSH_NULL': 34, + 'RETURN_GENERATOR': 35, + 'RETURN_VALUE': 36, + 'SETUP_ANNOTATIONS': 37, + 'STORE_SLICE': 38, + 'STORE_SUBSCR': 39, + 'TO_BOOL': 40, + 'UNARY_INVERT': 41, + 'UNARY_NEGATIVE': 42, + 'UNARY_NOT': 43, + 'WITH_EXCEPT_START': 44, + 'BINARY_OP': 45, + 'BUILD_CONST_KEY_MAP': 46, + 'BUILD_LIST': 47, + 'BUILD_MAP': 48, + 'BUILD_SET': 49, + 'BUILD_SLICE': 50, + 'BUILD_STRING': 51, + 'BUILD_TUPLE': 52, + 'CALL': 53, + 'CALL_FUNCTION_EX': 54, + 'CALL_INTRINSIC_1': 55, + 'CALL_INTRINSIC_2': 56, + 'CALL_KW': 57, + 'COMPARE_OP': 58, + 'CONTAINS_OP': 59, + 'CONVERT_VALUE': 60, + 'COPY': 61, + 'COPY_FREE_VARS': 62, + 'DELETE_ATTR': 63, + 'DELETE_DEREF': 64, + 'DELETE_FAST': 65, + 'DELETE_GLOBAL': 66, + 'DELETE_NAME': 67, + 'DICT_MERGE': 68, + 'DICT_UPDATE': 69, + 'ENTER_EXECUTOR': 70, + 'EXTENDED_ARG': 71, + 'FOR_ITER': 72, + 'GET_AWAITABLE': 73, + 'IMPORT_FROM': 74, + 'IMPORT_NAME': 75, + 'IS_OP': 76, + 'JUMP_BACKWARD': 77, + 'JUMP_BACKWARD_NO_INTERRUPT': 78, + 'JUMP_FORWARD': 79, + 'LIST_APPEND': 80, + 'LIST_EXTEND': 81, + 'LOAD_ATTR': 82, + 'LOAD_CONST': 83, + 'LOAD_DEREF': 84, + 'LOAD_FAST': 85, + 'LOAD_FAST_AND_CLEAR': 86, + 'LOAD_FAST_CHECK': 87, + 'LOAD_FAST_LOAD_FAST': 88, + 'LOAD_FROM_DICT_OR_DEREF': 89, + 'LOAD_FROM_DICT_OR_GLOBALS': 90, + 'LOAD_GLOBAL': 91, + 'LOAD_NAME': 92, + 'LOAD_SUPER_ATTR': 93, + 'MAKE_CELL': 94, + 'MAP_ADD': 95, + 'MATCH_CLASS': 96, + 'POP_JUMP_IF_FALSE': 97, + 'POP_JUMP_IF_NONE': 98, + 'POP_JUMP_IF_NOT_NONE': 99, + 'POP_JUMP_IF_TRUE': 100, + 'RAISE_VARARGS': 101, + 'RERAISE': 102, + 'RETURN_CONST': 103, + 'SEND': 104, + 'SET_ADD': 105, + 'SET_FUNCTION_ATTRIBUTE': 106, + 'SET_UPDATE': 107, + 'STORE_ATTR': 108, + 'STORE_DEREF': 109, + 'STORE_FAST': 110, + 'STORE_FAST_LOAD_FAST': 111, + 'STORE_FAST_STORE_FAST': 112, + 'STORE_GLOBAL': 113, + 'STORE_NAME': 114, + 'SWAP': 115, + 'UNPACK_EX': 116, + 'UNPACK_SEQUENCE': 117, + 'YIELD_VALUE': 118, + 'RESUME': 149, + 'INSTRUMENTED_RESUME': 236, + 'INSTRUMENTED_END_FOR': 237, + 'INSTRUMENTED_END_SEND': 238, + 'INSTRUMENTED_RETURN_VALUE': 239, + 'INSTRUMENTED_RETURN_CONST': 240, + 'INSTRUMENTED_YIELD_VALUE': 241, + 'INSTRUMENTED_LOAD_SUPER_ATTR': 242, + 'INSTRUMENTED_FOR_ITER': 243, + 'INSTRUMENTED_CALL': 244, + 'INSTRUMENTED_CALL_KW': 245, + 'INSTRUMENTED_CALL_FUNCTION_EX': 246, + 'INSTRUMENTED_INSTRUCTION': 247, + 'INSTRUMENTED_JUMP_FORWARD': 248, + 'INSTRUMENTED_JUMP_BACKWARD': 249, + 'INSTRUMENTED_POP_JUMP_IF_TRUE': 250, + 'INSTRUMENTED_POP_JUMP_IF_FALSE': 251, + 'INSTRUMENTED_POP_JUMP_IF_NONE': 252, + 'INSTRUMENTED_POP_JUMP_IF_NOT_NONE': 253, + 'INSTRUMENTED_LINE': 254, + 'JUMP': 256, + 'JUMP_NO_INTERRUPT': 257, + 'LOAD_CLOSURE': 258, + 'LOAD_METHOD': 259, + 'LOAD_SUPER_METHOD': 260, + 'LOAD_ZERO_SUPER_ATTR': 261, + 'LOAD_ZERO_SUPER_METHOD': 262, + 'POP_BLOCK': 263, + 'SETUP_CLEANUP': 264, + 'SETUP_FINALLY': 265, + 'SETUP_WITH': 266, + 'STORE_FAST_MAYBE_NULL': 267, +} +MIN_INSTRUMENTED_OPCODE = 236 +HAVE_ARGUMENT = 45 diff --git a/Lib/_pydatetime.py b/Lib/_pydatetime.py new file mode 100644 index 000000000000000..bca2acf1fc88cf9 --- /dev/null +++ b/Lib/_pydatetime.py @@ -0,0 +1,2649 @@ +"""Concrete date/time and related types. + +See http://www.iana.org/time-zones/repository/tz-link.html for +time zone and DST data sources. +""" + +__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo", + "MINYEAR", "MAXYEAR", "UTC") + + +import time as _time +import math as _math +import sys +from operator import index as _index + +def _cmp(x, y): + return 0 if x == y else 1 if x > y else -1 + +def _get_class_module(self): + module_name = self.__class__.__module__ + if module_name == '_pydatetime': + return 'datetime' + else: + return module_name + +MINYEAR = 1 +MAXYEAR = 9999 +_MAXORDINAL = 3652059 # date.max.toordinal() + +# Utility functions, adapted from Python's Demo/classes/Dates.py, which +# also assumes the current Gregorian calendar indefinitely extended in +# both directions. Difference: Dates.py calls January 1 of year 0 day +# number 1. The code here calls January 1 of year 1 day number 1. This is +# to match the definition of the "proleptic Gregorian" calendar in Dershowitz +# and Reingold's "Calendrical Calculations", where it's the base calendar +# for all computations. See the book for algorithms for converting between +# proleptic Gregorian ordinals and many other calendar systems. + +# -1 is a placeholder for indexing purposes. +_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes. +dbm = 0 +for dim in _DAYS_IN_MONTH[1:]: + _DAYS_BEFORE_MONTH.append(dbm) + dbm += dim +del dbm, dim + +def _is_leap(year): + "year -> 1 if leap year, else 0." + return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) + +def _days_before_year(year): + "year -> number of days before January 1st of year." + y = year - 1 + return y*365 + y//4 - y//100 + y//400 + +def _days_in_month(year, month): + "year, month -> number of days in that month in that year." + assert 1 <= month <= 12, month + if month == 2 and _is_leap(year): + return 29 + return _DAYS_IN_MONTH[month] + +def _days_before_month(year, month): + "year, month -> number of days in year preceding first day of month." + assert 1 <= month <= 12, 'month must be in 1..12' + return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) + +def _ymd2ord(year, month, day): + "year, month, day -> ordinal, considering 01-Jan-0001 as day 1." + assert 1 <= month <= 12, 'month must be in 1..12' + dim = _days_in_month(year, month) + assert 1 <= day <= dim, ('day must be in 1..%d' % dim) + return (_days_before_year(year) + + _days_before_month(year, month) + + day) + +_DI400Y = _days_before_year(401) # number of days in 400 years +_DI100Y = _days_before_year(101) # " " " " 100 " +_DI4Y = _days_before_year(5) # " " " " 4 " + +# A 4-year cycle has an extra leap day over what we'd get from pasting +# together 4 single years. +assert _DI4Y == 4 * 365 + 1 + +# Similarly, a 400-year cycle has an extra leap day over what we'd get from +# pasting together 4 100-year cycles. +assert _DI400Y == 4 * _DI100Y + 1 + +# OTOH, a 100-year cycle has one fewer leap day than we'd get from +# pasting together 25 4-year cycles. +assert _DI100Y == 25 * _DI4Y - 1 + +def _ord2ymd(n): + "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." + + # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years + # repeats exactly every 400 years. The basic strategy is to find the + # closest 400-year boundary at or before n, then work with the offset + # from that boundary to n. Life is much clearer if we subtract 1 from + # n first -- then the values of n at 400-year boundaries are exactly + # those divisible by _DI400Y: + # + # D M Y n n-1 + # -- --- ---- ---------- ---------------- + # 31 Dec -400 -_DI400Y -_DI400Y -1 + # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary + # ... + # 30 Dec 000 -1 -2 + # 31 Dec 000 0 -1 + # 1 Jan 001 1 0 400-year boundary + # 2 Jan 001 2 1 + # 3 Jan 001 3 2 + # ... + # 31 Dec 400 _DI400Y _DI400Y -1 + # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary + n -= 1 + n400, n = divmod(n, _DI400Y) + year = n400 * 400 + 1 # ..., -399, 1, 401, ... + + # Now n is the (non-negative) offset, in days, from January 1 of year, to + # the desired date. Now compute how many 100-year cycles precede n. + # Note that it's possible for n100 to equal 4! In that case 4 full + # 100-year cycles precede the desired day, which implies the desired + # day is December 31 at the end of a 400-year cycle. + n100, n = divmod(n, _DI100Y) + + # Now compute how many 4-year cycles precede it. + n4, n = divmod(n, _DI4Y) + + # And now how many single years. Again n1 can be 4, and again meaning + # that the desired day is December 31 at the end of the 4-year cycle. + n1, n = divmod(n, 365) + + year += n100 * 100 + n4 * 4 + n1 + if n1 == 4 or n100 == 4: + assert n == 0 + return year-1, 12, 31 + + # Now the year is correct, and n is the offset from January 1. We find + # the month via an estimate that's either exact or one too large. + leapyear = n1 == 3 and (n4 != 24 or n100 == 3) + assert leapyear == _is_leap(year) + month = (n + 50) >> 5 + preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) + if preceding > n: # estimate is too large + month -= 1 + preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) + n -= preceding + assert 0 <= n < _days_in_month(year, month) + + # Now the year and month are correct, and n is the offset from the + # start of that month: we're done! + return year, month, n+1 + +# Month and day names. For localized versions, see the calendar module. +_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] +_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + +def _build_struct_time(y, m, d, hh, mm, ss, dstflag): + wday = (_ymd2ord(y, m, d) + 6) % 7 + dnum = _days_before_month(y, m) + d + return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag)) + +def _format_time(hh, mm, ss, us, timespec='auto'): + specs = { + 'hours': '{:02d}', + 'minutes': '{:02d}:{:02d}', + 'seconds': '{:02d}:{:02d}:{:02d}', + 'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}', + 'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}' + } + + if timespec == 'auto': + # Skip trailing microseconds when us==0. + timespec = 'microseconds' if us else 'seconds' + elif timespec == 'milliseconds': + us //= 1000 + try: + fmt = specs[timespec] + except KeyError: + raise ValueError('Unknown timespec value') + else: + return fmt.format(hh, mm, ss, us) + +def _format_offset(off, sep=':'): + s = '' + if off is not None: + if off.days < 0: + sign = "-" + off = -off + else: + sign = "+" + hh, mm = divmod(off, timedelta(hours=1)) + mm, ss = divmod(mm, timedelta(minutes=1)) + s += "%s%02d%s%02d" % (sign, hh, sep, mm) + if ss or ss.microseconds: + s += "%s%02d" % (sep, ss.seconds) + + if ss.microseconds: + s += '.%06d' % ss.microseconds + return s + +# Correctly substitute for %z and %Z escapes in strftime formats. +def _wrap_strftime(object, format, timetuple): + # Don't call utcoffset() or tzname() unless actually needed. + freplace = None # the string to use for %f + zreplace = None # the string to use for %z + colonzreplace = None # the string to use for %:z + Zreplace = None # the string to use for %Z + + # Scan format for %z, %:z and %Z escapes, replacing as needed. + newformat = [] + push = newformat.append + i, n = 0, len(format) + while i < n: + ch = format[i] + i += 1 + if ch == '%': + if i < n: + ch = format[i] + i += 1 + if ch == 'f': + if freplace is None: + freplace = '%06d' % getattr(object, + 'microsecond', 0) + newformat.append(freplace) + elif ch == 'z': + if zreplace is None: + if hasattr(object, "utcoffset"): + zreplace = _format_offset(object.utcoffset(), sep="") + else: + zreplace = "" + assert '%' not in zreplace + newformat.append(zreplace) + elif ch == ':': + if i < n: + ch2 = format[i] + i += 1 + if ch2 == 'z': + if colonzreplace is None: + if hasattr(object, "utcoffset"): + colonzreplace = _format_offset(object.utcoffset(), sep=":") + else: + colonzreplace = "" + assert '%' not in colonzreplace + newformat.append(colonzreplace) + else: + push('%') + push(ch) + push(ch2) + elif ch == 'Z': + if Zreplace is None: + Zreplace = "" + if hasattr(object, "tzname"): + s = object.tzname() + if s is not None: + # strftime is going to have at this: escape % + Zreplace = s.replace('%', '%%') + newformat.append(Zreplace) + else: + push('%') + push(ch) + else: + push('%') + else: + push(ch) + newformat = "".join(newformat) + return _time.strftime(newformat, timetuple) + +# Helpers for parsing the result of isoformat() +def _is_ascii_digit(c): + return c in "0123456789" + +def _find_isoformat_datetime_separator(dtstr): + # See the comment in _datetimemodule.c:_find_isoformat_datetime_separator + len_dtstr = len(dtstr) + if len_dtstr == 7: + return 7 + + assert len_dtstr > 7 + date_separator = "-" + week_indicator = "W" + + if dtstr[4] == date_separator: + if dtstr[5] == week_indicator: + if len_dtstr < 8: + raise ValueError("Invalid ISO string") + if len_dtstr > 8 and dtstr[8] == date_separator: + if len_dtstr == 9: + raise ValueError("Invalid ISO string") + if len_dtstr > 10 and _is_ascii_digit(dtstr[10]): + # This is as far as we need to resolve the ambiguity for + # the moment - if we have YYYY-Www-##, the separator is + # either a hyphen at 8 or a number at 10. + # + # We'll assume it's a hyphen at 8 because it's way more + # likely that someone will use a hyphen as a separator than + # a number, but at this point it's really best effort + # because this is an extension of the spec anyway. + # TODO(pganssle): Document this + return 8 + return 10 + else: + # YYYY-Www (8) + return 8 + else: + # YYYY-MM-DD (10) + return 10 + else: + if dtstr[4] == week_indicator: + # YYYYWww (7) or YYYYWwwd (8) + idx = 7 + while idx < len_dtstr: + if not _is_ascii_digit(dtstr[idx]): + break + idx += 1 + + if idx < 9: + return idx + + if idx % 2 == 0: + # If the index of the last number is even, it's YYYYWwwd + return 7 + else: + return 8 + else: + # YYYYMMDD (8) + return 8 + + +def _parse_isoformat_date(dtstr): + # It is assumed that this is an ASCII-only string of lengths 7, 8 or 10, + # see the comment on Modules/_datetimemodule.c:_find_isoformat_datetime_separator + assert len(dtstr) in (7, 8, 10) + year = int(dtstr[0:4]) + has_sep = dtstr[4] == '-' + + pos = 4 + has_sep + if dtstr[pos:pos + 1] == "W": + # YYYY-?Www-?D? + pos += 1 + weekno = int(dtstr[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dtstr) > pos: + if (dtstr[pos:pos + 1] == '-') != has_sep: + raise ValueError("Inconsistent use of dash separator") + + pos += has_sep + + dayno = int(dtstr[pos:pos + 1]) + + return list(_isoweek_to_gregorian(year, weekno, dayno)) + else: + month = int(dtstr[pos:pos + 2]) + pos += 2 + if (dtstr[pos:pos + 1] == "-") != has_sep: + raise ValueError("Inconsistent use of dash separator") + + pos += has_sep + day = int(dtstr[pos:pos + 2]) + + return [year, month, day] + + +_FRACTION_CORRECTION = [100000, 10000, 1000, 100, 10] + + +def _parse_hh_mm_ss_ff(tstr): + # Parses things of the form HH[:?MM[:?SS[{.,}fff[fff]]]] + len_str = len(tstr) + + time_comps = [0, 0, 0, 0] + pos = 0 + for comp in range(0, 3): + if (len_str - pos) < 2: + raise ValueError("Incomplete time component") + + time_comps[comp] = int(tstr[pos:pos+2]) + + pos += 2 + next_char = tstr[pos:pos+1] + + if comp == 0: + has_sep = next_char == ':' + + if not next_char or comp >= 2: + break + + if has_sep and next_char != ':': + raise ValueError("Invalid time separator: %c" % next_char) + + pos += has_sep + + if pos < len_str: + if tstr[pos] not in '.,': + raise ValueError("Invalid microsecond component") + else: + pos += 1 + + len_remainder = len_str - pos + + if len_remainder >= 6: + to_parse = 6 + else: + to_parse = len_remainder + + time_comps[3] = int(tstr[pos:(pos+to_parse)]) + if to_parse < 6: + time_comps[3] *= _FRACTION_CORRECTION[to_parse-1] + if (len_remainder > to_parse + and not all(map(_is_ascii_digit, tstr[(pos+to_parse):]))): + raise ValueError("Non-digit values in unparsed fraction") + + return time_comps + +def _parse_isoformat_time(tstr): + # Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]] + len_str = len(tstr) + if len_str < 2: + raise ValueError("Isoformat time too short") + + # This is equivalent to re.search('[+-Z]', tstr), but faster + tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1 or tstr.find('Z') + 1) + timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr + + time_comps = _parse_hh_mm_ss_ff(timestr) + + tzi = None + if tz_pos == len_str and tstr[-1] == 'Z': + tzi = timezone.utc + elif tz_pos > 0: + tzstr = tstr[tz_pos:] + + # Valid time zone strings are: + # HH len: 2 + # HHMM len: 4 + # HH:MM len: 5 + # HHMMSS len: 6 + # HHMMSS.f+ len: 7+ + # HH:MM:SS len: 8 + # HH:MM:SS.f+ len: 10+ + + if len(tzstr) in (0, 1, 3): + raise ValueError("Malformed time zone string") + + tz_comps = _parse_hh_mm_ss_ff(tzstr) + + if all(x == 0 for x in tz_comps): + tzi = timezone.utc + else: + tzsign = -1 if tstr[tz_pos - 1] == '-' else 1 + + td = timedelta(hours=tz_comps[0], minutes=tz_comps[1], + seconds=tz_comps[2], microseconds=tz_comps[3]) + + tzi = timezone(tzsign * td) + + time_comps.append(tzi) + + return time_comps + +# tuple[int, int, int] -> tuple[int, int, int] version of date.fromisocalendar +def _isoweek_to_gregorian(year, week, day): + # Year is bounded this way because 9999-12-31 is (9999, 52, 5) + if not MINYEAR <= year <= MAXYEAR: + raise ValueError(f"Year is out of range: {year}") + + if not 0 < week < 53: + out_of_range = True + + if week == 53: + # ISO years have 53 weeks in them on years starting with a + # Thursday and leap years starting on a Wednesday + first_weekday = _ymd2ord(year, 1, 1) % 7 + if (first_weekday == 4 or (first_weekday == 3 and + _is_leap(year))): + out_of_range = False + + if out_of_range: + raise ValueError(f"Invalid week: {week}") + + if not 0 < day < 8: + raise ValueError(f"Invalid weekday: {day} (range is [1, 7])") + + # Now compute the offset from (Y, 1, 1) in days: + day_offset = (week - 1) * 7 + (day - 1) + + # Calculate the ordinal day for monday, week 1 + day_1 = _isoweek1monday(year) + ord_day = day_1 + day_offset + + return _ord2ymd(ord_day) + + +# Just raise TypeError if the arg isn't None or a string. +def _check_tzname(name): + if name is not None and not isinstance(name, str): + raise TypeError("tzinfo.tzname() must return None or string, " + "not '%s'" % type(name)) + +# name is the offset-producing method, "utcoffset" or "dst". +# offset is what it returned. +# If offset isn't None or timedelta, raises TypeError. +# If offset is None, returns None. +# Else offset is checked for being in range. +# If it is, its integer value is returned. Else ValueError is raised. +def _check_utc_offset(name, offset): + assert name in ("utcoffset", "dst") + if offset is None: + return + if not isinstance(offset, timedelta): + raise TypeError("tzinfo.%s() must return None " + "or timedelta, not '%s'" % (name, type(offset))) + if not -timedelta(1) < offset < timedelta(1): + raise ValueError("%s()=%s, must be strictly between " + "-timedelta(hours=24) and timedelta(hours=24)" % + (name, offset)) + +def _check_date_fields(year, month, day): + year = _index(year) + month = _index(month) + day = _index(day) + if not MINYEAR <= year <= MAXYEAR: + raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) + if not 1 <= month <= 12: + raise ValueError('month must be in 1..12', month) + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + raise ValueError('day must be in 1..%d' % dim, day) + return year, month, day + +def _check_time_fields(hour, minute, second, microsecond, fold): + hour = _index(hour) + minute = _index(minute) + second = _index(second) + microsecond = _index(microsecond) + if not 0 <= hour <= 23: + raise ValueError('hour must be in 0..23', hour) + if not 0 <= minute <= 59: + raise ValueError('minute must be in 0..59', minute) + if not 0 <= second <= 59: + raise ValueError('second must be in 0..59', second) + if not 0 <= microsecond <= 999999: + raise ValueError('microsecond must be in 0..999999', microsecond) + if fold not in (0, 1): + raise ValueError('fold must be either 0 or 1', fold) + return hour, minute, second, microsecond, fold + +def _check_tzinfo_arg(tz): + if tz is not None and not isinstance(tz, tzinfo): + raise TypeError("tzinfo argument must be None or of a tzinfo subclass") + +def _cmperror(x, y): + raise TypeError("can't compare '%s' to '%s'" % ( + type(x).__name__, type(y).__name__)) + +def _divide_and_round(a, b): + """divide a by b and round result to the nearest integer + + When the ratio is exactly half-way between two integers, + the even integer is returned. + """ + # Based on the reference implementation for divmod_near + # in Objects/longobject.c. + q, r = divmod(a, b) + # round up if either r / b > 0.5, or r / b == 0.5 and q is odd. + # The expression r / b > 0.5 is equivalent to 2 * r > b if b is + # positive, 2 * r < b if b negative. + r *= 2 + greater_than_half = r > b if b > 0 else r < b + if greater_than_half or r == b and q % 2 == 1: + q += 1 + + return q + + +class timedelta: + """Represent the difference between two datetime objects. + + Supported operators: + + - add, subtract timedelta + - unary plus, minus, abs + - compare to timedelta + - multiply, divide by int + + In addition, datetime supports subtraction of two datetime objects + returning a timedelta, and addition or subtraction of a datetime + and a timedelta giving a datetime. + + Representation: (days, seconds, microseconds). + """ + # The representation of (days, seconds, microseconds) was chosen + # arbitrarily; the exact rationale originally specified in the docstring + # was "Because I felt like it." + + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' + + def __new__(cls, days=0, seconds=0, microseconds=0, + milliseconds=0, minutes=0, hours=0, weeks=0): + # Doing this efficiently and accurately in C is going to be difficult + # and error-prone, due to ubiquitous overflow possibilities, and that + # C double doesn't have enough bits of precision to represent + # microseconds over 10K years faithfully. The code here tries to make + # explicit where go-fast assumptions can be relied on, in order to + # guide the C implementation; it's way more convoluted than speed- + # ignoring auto-overflow-to-long idiomatic Python could be. + + # XXX Check that all inputs are ints or floats. + + # Final values, all integer. + # s and us fit in 32-bit signed ints; d isn't bounded. + d = s = us = 0 + + # Normalize everything to days, seconds, microseconds. + days += weeks*7 + seconds += minutes*60 + hours*3600 + microseconds += milliseconds*1000 + + # Get rid of all fractions, and normalize s and us. + # Take a deep breath . + if isinstance(days, float): + dayfrac, days = _math.modf(days) + daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) + assert daysecondswhole == int(daysecondswhole) # can't overflow + s = int(daysecondswhole) + assert days == int(days) + d = int(days) + else: + daysecondsfrac = 0.0 + d = days + assert isinstance(daysecondsfrac, float) + assert abs(daysecondsfrac) <= 1.0 + assert isinstance(d, int) + assert abs(s) <= 24 * 3600 + # days isn't referenced again before redefinition + + if isinstance(seconds, float): + secondsfrac, seconds = _math.modf(seconds) + assert seconds == int(seconds) + seconds = int(seconds) + secondsfrac += daysecondsfrac + assert abs(secondsfrac) <= 2.0 + else: + secondsfrac = daysecondsfrac + # daysecondsfrac isn't referenced again + assert isinstance(secondsfrac, float) + assert abs(secondsfrac) <= 2.0 + + assert isinstance(seconds, int) + days, seconds = divmod(seconds, 24*3600) + d += days + s += int(seconds) # can't overflow + assert isinstance(s, int) + assert abs(s) <= 2 * 24 * 3600 + # seconds isn't referenced again before redefinition + + usdouble = secondsfrac * 1e6 + assert abs(usdouble) < 2.1e6 # exact value not critical + # secondsfrac isn't referenced again + + if isinstance(microseconds, float): + microseconds = round(microseconds + usdouble) + seconds, microseconds = divmod(microseconds, 1000000) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds + else: + microseconds = int(microseconds) + seconds, microseconds = divmod(microseconds, 1000000) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds + microseconds = round(microseconds + usdouble) + assert isinstance(s, int) + assert isinstance(microseconds, int) + assert abs(s) <= 3 * 24 * 3600 + assert abs(microseconds) < 3.1e6 + + # Just a little bit of carrying possible for microseconds and seconds. + seconds, us = divmod(microseconds, 1000000) + s += seconds + days, s = divmod(s, 24*3600) + d += days + + assert isinstance(d, int) + assert isinstance(s, int) and 0 <= s < 24*3600 + assert isinstance(us, int) and 0 <= us < 1000000 + + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + + self = object.__new__(cls) + self._days = d + self._seconds = s + self._microseconds = us + self._hashcode = -1 + return self + + def __repr__(self): + args = [] + if self._days: + args.append("days=%d" % self._days) + if self._seconds: + args.append("seconds=%d" % self._seconds) + if self._microseconds: + args.append("microseconds=%d" % self._microseconds) + if not args: + args.append('0') + return "%s.%s(%s)" % (_get_class_module(self), + self.__class__.__qualname__, + ', '.join(args)) + + def __str__(self): + mm, ss = divmod(self._seconds, 60) + hh, mm = divmod(mm, 60) + s = "%d:%02d:%02d" % (hh, mm, ss) + if self._days: + def plural(n): + return n, abs(n) != 1 and "s" or "" + s = ("%d day%s, " % plural(self._days)) + s + if self._microseconds: + s = s + ".%06d" % self._microseconds + return s + + def total_seconds(self): + """Total seconds in the duration.""" + return ((self.days * 86400 + self.seconds) * 10**6 + + self.microseconds) / 10**6 + + # Read-only field accessors + @property + def days(self): + """days""" + return self._days + + @property + def seconds(self): + """seconds""" + return self._seconds + + @property + def microseconds(self): + """microseconds""" + return self._microseconds + + def __add__(self, other): + if isinstance(other, timedelta): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds) + return NotImplemented + + __radd__ = __add__ + + def __sub__(self, other): + if isinstance(other, timedelta): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds) + return NotImplemented + + def __rsub__(self, other): + if isinstance(other, timedelta): + return -self + other + return NotImplemented + + def __neg__(self): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(-self._days, + -self._seconds, + -self._microseconds) + + def __pos__(self): + return self + + def __abs__(self): + if self._days < 0: + return -self + else: + return self + + def __mul__(self, other): + if isinstance(other, int): + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta(self._days * other, + self._seconds * other, + self._microseconds * other) + if isinstance(other, float): + usec = self._to_microseconds() + a, b = other.as_integer_ratio() + return timedelta(0, 0, _divide_and_round(usec * a, b)) + return NotImplemented + + __rmul__ = __mul__ + + def _to_microseconds(self): + return ((self._days * (24*3600) + self._seconds) * 1000000 + + self._microseconds) + + def __floordiv__(self, other): + if not isinstance(other, (int, timedelta)): + return NotImplemented + usec = self._to_microseconds() + if isinstance(other, timedelta): + return usec // other._to_microseconds() + if isinstance(other, int): + return timedelta(0, 0, usec // other) + + def __truediv__(self, other): + if not isinstance(other, (int, float, timedelta)): + return NotImplemented + usec = self._to_microseconds() + if isinstance(other, timedelta): + return usec / other._to_microseconds() + if isinstance(other, int): + return timedelta(0, 0, _divide_and_round(usec, other)) + if isinstance(other, float): + a, b = other.as_integer_ratio() + return timedelta(0, 0, _divide_and_round(b * usec, a)) + + def __mod__(self, other): + if isinstance(other, timedelta): + r = self._to_microseconds() % other._to_microseconds() + return timedelta(0, 0, r) + return NotImplemented + + def __divmod__(self, other): + if isinstance(other, timedelta): + q, r = divmod(self._to_microseconds(), + other._to_microseconds()) + return q, timedelta(0, 0, r) + return NotImplemented + + # Comparisons of timedelta objects with other. + + def __eq__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) == 0 + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) <= 0 + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) < 0 + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) >= 0 + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, timedelta): + return self._cmp(other) > 0 + else: + return NotImplemented + + def _cmp(self, other): + assert isinstance(other, timedelta) + return _cmp(self._getstate(), other._getstate()) + + def __hash__(self): + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode + + def __bool__(self): + return (self._days != 0 or + self._seconds != 0 or + self._microseconds != 0) + + # Pickle support. + + def _getstate(self): + return (self._days, self._seconds, self._microseconds) + + def __reduce__(self): + return (self.__class__, self._getstate()) + +timedelta.min = timedelta(-999999999) +timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, + microseconds=999999) +timedelta.resolution = timedelta(microseconds=1) + +class date: + """Concrete date type. + + Constructors: + + __new__() + fromtimestamp() + today() + fromordinal() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + __add__, __radd__, __sub__ (add/radd only with timedelta arg) + + Methods: + + timetuple() + toordinal() + weekday() + isoweekday(), isocalendar(), isoformat() + ctime() + strftime() + + Properties (readonly): + year, month, day + """ + __slots__ = '_year', '_month', '_day', '_hashcode' + + def __new__(cls, year, month=None, day=None): + """Constructor. + + Arguments: + + year, month, day (required, base 1) + """ + if (month is None and + isinstance(year, (bytes, str)) and len(year) == 4 and + 1 <= ord(year[2:3]) <= 12): + # Pickle support + if isinstance(year, str): + try: + year = year.encode('latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a date object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(year) + self._hashcode = -1 + return self + year, month, day = _check_date_fields(year, month, day) + self = object.__new__(cls) + self._year = year + self._month = month + self._day = day + self._hashcode = -1 + return self + + # Additional constructors + + @classmethod + def fromtimestamp(cls, t): + "Construct a date from a POSIX timestamp (like time.time())." + y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t) + return cls(y, m, d) + + @classmethod + def today(cls): + "Construct a date from time.time()." + t = _time.time() + return cls.fromtimestamp(t) + + @classmethod + def fromordinal(cls, n): + """Construct a date from a proleptic Gregorian ordinal. + + January 1 of year 1 is day 1. Only the year, month and day are + non-zero in the result. + """ + y, m, d = _ord2ymd(n) + return cls(y, m, d) + + @classmethod + def fromisoformat(cls, date_string): + """Construct a date from a string in ISO 8601 format.""" + if not isinstance(date_string, str): + raise TypeError('fromisoformat: argument must be str') + + if len(date_string) not in (7, 8, 10): + raise ValueError(f'Invalid isoformat string: {date_string!r}') + + try: + return cls(*_parse_isoformat_date(date_string)) + except Exception: + raise ValueError(f'Invalid isoformat string: {date_string!r}') + + @classmethod + def fromisocalendar(cls, year, week, day): + """Construct a date from the ISO year, week number and weekday. + + This is the inverse of the date.isocalendar() function""" + return cls(*_isoweek_to_gregorian(year, week, day)) + + # Conversions to string + + def __repr__(self): + """Convert to formal string, for repr(). + + >>> d = date(2010, 1, 1) + >>> repr(d) + 'datetime.date(2010, 1, 1)' + """ + return "%s.%s(%d, %d, %d)" % (_get_class_module(self), + self.__class__.__qualname__, + self._year, + self._month, + self._day) + # XXX These shouldn't depend on time.localtime(), because that + # clips the usable dates to [1970 .. 2038). At least ctime() is + # easily done without using strftime() -- that's better too because + # strftime("%c", ...) is locale specific. + + + def ctime(self): + "Return ctime() style string." + weekday = self.toordinal() % 7 or 7 + return "%s %s %2d 00:00:00 %04d" % ( + _DAYNAMES[weekday], + _MONTHNAMES[self._month], + self._day, self._year) + + def strftime(self, format): + """ + Format using strftime(). + + Example: "%d/%m/%Y, %H:%M:%S" + """ + return _wrap_strftime(self, format, self.timetuple()) + + def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) + if len(fmt) != 0: + return self.strftime(fmt) + return str(self) + + def isoformat(self): + """Return the date formatted according to ISO. + + This is 'YYYY-MM-DD'. + + References: + - http://www.w3.org/TR/NOTE-datetime + - http://www.cl.cam.ac.uk/~mgk25/iso-time.html + """ + return "%04d-%02d-%02d" % (self._year, self._month, self._day) + + __str__ = isoformat + + # Read-only field accessors + @property + def year(self): + """year (1-9999)""" + return self._year + + @property + def month(self): + """month (1-12)""" + return self._month + + @property + def day(self): + """day (1-31)""" + return self._day + + # Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__, + # __hash__ (and helpers) + + def timetuple(self): + "Return local time tuple compatible with time.localtime()." + return _build_struct_time(self._year, self._month, self._day, + 0, 0, 0, -1) + + def toordinal(self): + """Return proleptic Gregorian ordinal for the year, month and day. + + January 1 of year 1 is day 1. Only the year, month and day values + contribute to the result. + """ + return _ymd2ord(self._year, self._month, self._day) + + def replace(self, year=None, month=None, day=None): + """Return a new date with new values for the specified fields.""" + if year is None: + year = self._year + if month is None: + month = self._month + if day is None: + day = self._day + return type(self)(year, month, day) + + __replace__ = replace + + # Comparisons of date objects with other. + + def __eq__(self, other): + if isinstance(other, date): + return self._cmp(other) == 0 + return NotImplemented + + def __le__(self, other): + if isinstance(other, date): + return self._cmp(other) <= 0 + return NotImplemented + + def __lt__(self, other): + if isinstance(other, date): + return self._cmp(other) < 0 + return NotImplemented + + def __ge__(self, other): + if isinstance(other, date): + return self._cmp(other) >= 0 + return NotImplemented + + def __gt__(self, other): + if isinstance(other, date): + return self._cmp(other) > 0 + return NotImplemented + + def _cmp(self, other): + assert isinstance(other, date) + y, m, d = self._year, self._month, self._day + y2, m2, d2 = other._year, other._month, other._day + return _cmp((y, m, d), (y2, m2, d2)) + + def __hash__(self): + "Hash." + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode + + # Computations + + def __add__(self, other): + "Add a date to a timedelta." + if isinstance(other, timedelta): + o = self.toordinal() + other.days + if 0 < o <= _MAXORDINAL: + return type(self).fromordinal(o) + raise OverflowError("result out of range") + return NotImplemented + + __radd__ = __add__ + + def __sub__(self, other): + """Subtract two dates, or a date and a timedelta.""" + if isinstance(other, timedelta): + return self + timedelta(-other.days) + if isinstance(other, date): + days1 = self.toordinal() + days2 = other.toordinal() + return timedelta(days1 - days2) + return NotImplemented + + def weekday(self): + "Return day of the week, where Monday == 0 ... Sunday == 6." + return (self.toordinal() + 6) % 7 + + # Day-of-the-week and week-of-the-year, according to ISO + + def isoweekday(self): + "Return day of the week, where Monday == 1 ... Sunday == 7." + # 1-Jan-0001 is a Monday + return self.toordinal() % 7 or 7 + + def isocalendar(self): + """Return a named tuple containing ISO year, week number, and weekday. + + The first ISO week of the year is the (Mon-Sun) week + containing the year's first Thursday; everything else derives + from that. + + The first week is 1; Monday is 1 ... Sunday is 7. + + ISO calendar algorithm taken from + http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm + (used with permission) + """ + year = self._year + week1monday = _isoweek1monday(year) + today = _ymd2ord(self._year, self._month, self._day) + # Internally, week and day have origin 0 + week, day = divmod(today - week1monday, 7) + if week < 0: + year -= 1 + week1monday = _isoweek1monday(year) + week, day = divmod(today - week1monday, 7) + elif week >= 52: + if today >= _isoweek1monday(year+1): + year += 1 + week = 0 + return _IsoCalendarDate(year, week+1, day+1) + + # Pickle support. + + def _getstate(self): + yhi, ylo = divmod(self._year, 256) + return bytes([yhi, ylo, self._month, self._day]), + + def __setstate(self, string): + yhi, ylo, self._month, self._day = string + self._year = yhi * 256 + ylo + + def __reduce__(self): + return (self.__class__, self._getstate()) + +_date_class = date # so functions w/ args named "date" can get at the class + +date.min = date(1, 1, 1) +date.max = date(9999, 12, 31) +date.resolution = timedelta(days=1) + + +class tzinfo: + """Abstract base class for time zone info classes. + + Subclasses must override the tzname(), utcoffset() and dst() methods. + """ + __slots__ = () + + def tzname(self, dt): + "datetime -> string name of time zone." + raise NotImplementedError("tzinfo subclass must override tzname()") + + def utcoffset(self, dt): + "datetime -> timedelta, positive for east of UTC, negative for west of UTC" + raise NotImplementedError("tzinfo subclass must override utcoffset()") + + def dst(self, dt): + """datetime -> DST offset as timedelta, positive for east of UTC. + + Return 0 if DST not in effect. utcoffset() must include the DST + offset. + """ + raise NotImplementedError("tzinfo subclass must override dst()") + + def fromutc(self, dt): + "datetime in UTC -> datetime in local time." + + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # See the long comment block at the end of this file for an + # explanation of this algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + if delta: + dt += delta + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + # Pickle support. + + def __reduce__(self): + getinitargs = getattr(self, "__getinitargs__", None) + if getinitargs: + args = getinitargs() + else: + args = () + return (self.__class__, args, self.__getstate__()) + + +class IsoCalendarDate(tuple): + + def __new__(cls, year, week, weekday, /): + return super().__new__(cls, (year, week, weekday)) + + @property + def year(self): + return self[0] + + @property + def week(self): + return self[1] + + @property + def weekday(self): + return self[2] + + def __reduce__(self): + # This code is intended to pickle the object without making the + # class public. See https://bugs.python.org/msg352381 + return (tuple, (tuple(self),)) + + def __repr__(self): + return (f'{self.__class__.__name__}' + f'(year={self[0]}, week={self[1]}, weekday={self[2]})') + + +_IsoCalendarDate = IsoCalendarDate +del IsoCalendarDate +_tzinfo_class = tzinfo + +class time: + """Time with time zone. + + Constructors: + + __new__() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + + Methods: + + strftime() + isoformat() + utcoffset() + tzname() + dst() + + Properties (readonly): + hour, minute, second, microsecond, tzinfo, fold + """ + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold' + + def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0): + """Constructor. + + Arguments: + + hour, minute (required) + second, microsecond (default to zero) + tzinfo (default to None) + fold (keyword only, default to zero) + """ + if (isinstance(hour, (bytes, str)) and len(hour) == 6 and + ord(hour[0:1])&0x7F < 24): + # Pickle support + if isinstance(hour, str): + try: + hour = hour.encode('latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a time object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(hour, minute or None) + self._hashcode = -1 + return self + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) + _check_tzinfo_arg(tzinfo) + self = object.__new__(cls) + self._hour = hour + self._minute = minute + self._second = second + self._microsecond = microsecond + self._tzinfo = tzinfo + self._hashcode = -1 + self._fold = fold + return self + + # Read-only field accessors + @property + def hour(self): + """hour (0-23)""" + return self._hour + + @property + def minute(self): + """minute (0-59)""" + return self._minute + + @property + def second(self): + """second (0-59)""" + return self._second + + @property + def microsecond(self): + """microsecond (0-999999)""" + return self._microsecond + + @property + def tzinfo(self): + """timezone info object""" + return self._tzinfo + + @property + def fold(self): + return self._fold + + # Standard conversions, __hash__ (and helpers) + + # Comparisons of time objects with other. + + def __eq__(self, other): + if isinstance(other, time): + return self._cmp(other, allow_mixed=True) == 0 + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, time): + return self._cmp(other) <= 0 + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, time): + return self._cmp(other) < 0 + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, time): + return self._cmp(other) >= 0 + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, time): + return self._cmp(other) > 0 + else: + return NotImplemented + + def _cmp(self, other, allow_mixed=False): + assert isinstance(other, time) + mytz = self._tzinfo + ottz = other._tzinfo + myoff = otoff = None + + if mytz is ottz: + base_compare = True + else: + myoff = self.utcoffset() + otoff = other.utcoffset() + base_compare = myoff == otoff + + if base_compare: + return _cmp((self._hour, self._minute, self._second, + self._microsecond), + (other._hour, other._minute, other._second, + other._microsecond)) + if myoff is None or otoff is None: + if allow_mixed: + return 2 # arbitrary non-zero value + else: + raise TypeError("cannot compare naive and aware times") + myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1) + othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1) + return _cmp((myhhmm, self._second, self._microsecond), + (othhmm, other._second, other._microsecond)) + + def __hash__(self): + """Hash.""" + if self._hashcode == -1: + if self.fold: + t = self.replace(fold=0) + else: + t = self + tzoff = t.utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(t._getstate()[0]) + else: + h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, + timedelta(hours=1)) + assert not m % timedelta(minutes=1), "whole minute" + m //= timedelta(minutes=1) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode + + # Conversion to string + + def _tzstr(self): + """Return formatted timezone offset (+xx:xx) or an empty string.""" + off = self.utcoffset() + return _format_offset(off) + + def __repr__(self): + """Convert to formal string, for repr().""" + if self._microsecond != 0: + s = ", %d, %d" % (self._second, self._microsecond) + elif self._second != 0: + s = ", %d" % self._second + else: + s = "" + s= "%s.%s(%d, %d%s)" % (_get_class_module(self), + self.__class__.__qualname__, + self._hour, self._minute, s) + if self._tzinfo is not None: + assert s[-1:] == ")" + s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" + if self._fold: + assert s[-1:] == ")" + s = s[:-1] + ", fold=1)" + return s + + def isoformat(self, timespec='auto'): + """Return the time formatted according to ISO. + + The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional + part is omitted if self.microsecond == 0. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + """ + s = _format_time(self._hour, self._minute, self._second, + self._microsecond, timespec) + tz = self._tzstr() + if tz: + s += tz + return s + + __str__ = isoformat + + @classmethod + def fromisoformat(cls, time_string): + """Construct a time from a string in one of the ISO 8601 formats.""" + if not isinstance(time_string, str): + raise TypeError('fromisoformat: argument must be str') + + # The spec actually requires that time-only ISO 8601 strings start with + # T, but the extended format allows this to be omitted as long as there + # is no ambiguity with date strings. + time_string = time_string.removeprefix('T') + + try: + return cls(*_parse_isoformat_time(time_string)) + except Exception: + raise ValueError(f'Invalid isoformat string: {time_string!r}') + + def strftime(self, format): + """Format using strftime(). The date part of the timestamp passed + to underlying strftime should not be used. + """ + # The year must be >= 1000 else Python's strftime implementation + # can raise a bogus exception. + timetuple = (1900, 1, 1, + self._hour, self._minute, self._second, + 0, 1, -1) + return _wrap_strftime(self, format, timetuple) + + def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) + if len(fmt) != 0: + return self.strftime(fmt) + return str(self) + + # Timezone functions + + def utcoffset(self): + """Return the timezone offset as timedelta, positive east of UTC + (negative west of UTC).""" + if self._tzinfo is None: + return None + offset = self._tzinfo.utcoffset(None) + _check_utc_offset("utcoffset", offset) + return offset + + def tzname(self): + """Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + """ + if self._tzinfo is None: + return None + name = self._tzinfo.tzname(None) + _check_tzname(name) + return name + + def dst(self): + """Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + """ + if self._tzinfo is None: + return None + offset = self._tzinfo.dst(None) + _check_utc_offset("dst", offset) + return offset + + def replace(self, hour=None, minute=None, second=None, microsecond=None, + tzinfo=True, *, fold=None): + """Return a new time with new values for the specified fields.""" + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self._fold + return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold) + + __replace__ = replace + + # Pickle support. + + def _getstate(self, protocol=3): + us2, us3 = divmod(self._microsecond, 256) + us1, us2 = divmod(us2, 256) + h = self._hour + if self._fold and protocol > 3: + h += 128 + basestate = bytes([h, self._minute, self._second, + us1, us2, us3]) + if self._tzinfo is None: + return (basestate,) + else: + return (basestate, self._tzinfo) + + def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") + h, self._minute, self._second, us1, us2, us3 = string + if h > 127: + self._fold = 1 + self._hour = h - 128 + else: + self._fold = 0 + self._hour = h + self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._tzinfo = tzinfo + + def __reduce_ex__(self, protocol): + return (self.__class__, self._getstate(protocol)) + + def __reduce__(self): + return self.__reduce_ex__(2) + +_time_class = time # so functions w/ args named "time" can get at the class + +time.min = time(0, 0, 0) +time.max = time(23, 59, 59, 999999) +time.resolution = timedelta(microseconds=1) + + +class datetime(date): + """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + + The year, month and day arguments are required. tzinfo may be None, or an + instance of a tzinfo subclass. The remaining arguments may be ints. + """ + __slots__ = time.__slots__ + + def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, + microsecond=0, tzinfo=None, *, fold=0): + if (isinstance(year, (bytes, str)) and len(year) == 10 and + 1 <= ord(year[2:3])&0x7F <= 12): + # Pickle support + if isinstance(year, str): + try: + year = bytes(year, 'latin1') + except UnicodeEncodeError: + # More informative error message. + raise ValueError( + "Failed to encode latin1 string when unpickling " + "a datetime object. " + "pickle.load(data, encoding='latin1') is assumed.") + self = object.__new__(cls) + self.__setstate(year, month) + self._hashcode = -1 + return self + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) + _check_tzinfo_arg(tzinfo) + self = object.__new__(cls) + self._year = year + self._month = month + self._day = day + self._hour = hour + self._minute = minute + self._second = second + self._microsecond = microsecond + self._tzinfo = tzinfo + self._hashcode = -1 + self._fold = fold + return self + + # Read-only field accessors + @property + def hour(self): + """hour (0-23)""" + return self._hour + + @property + def minute(self): + """minute (0-59)""" + return self._minute + + @property + def second(self): + """second (0-59)""" + return self._second + + @property + def microsecond(self): + """microsecond (0-999999)""" + return self._microsecond + + @property + def tzinfo(self): + """timezone info object""" + return self._tzinfo + + @property + def fold(self): + return self._fold + + @classmethod + def _fromtimestamp(cls, t, utc, tz): + """Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + """ + frac, t = _math.modf(t) + us = round(frac * 1e6) + if us >= 1000000: + t += 1 + us -= 1000000 + elif us < 0: + t -= 1 + us += 1000000 + + converter = _time.gmtime if utc else _time.localtime + y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) + ss = min(ss, 59) # clamp out leap seconds if the platform has them + result = cls(y, m, d, hh, mm, ss, us, tz) + if tz is None and not utc: + # As of version 2015f max fold in IANA database is + # 23 hours at 1969-09-30 13:00:00 in Kwajalein. + # Let's probe 24 hours in the past to detect a transition: + max_fold_seconds = 24 * 3600 + + # On Windows localtime_s throws an OSError for negative values, + # thus we can't perform fold detection for values of time less + # than the max time fold. See comments in _datetimemodule's + # version of this method for more details. + if t < max_fold_seconds and sys.platform.startswith("win"): + return result + + y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6] + probe1 = cls(y, m, d, hh, mm, ss, us, tz) + trans = result - probe1 - timedelta(0, max_fold_seconds) + if trans.days < 0: + y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6] + probe2 = cls(y, m, d, hh, mm, ss, us, tz) + if probe2 == result: + result._fold = 1 + elif tz is not None: + result = tz.fromutc(result) + return result + + @classmethod + def fromtimestamp(cls, timestamp, tz=None): + """Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + """ + _check_tzinfo_arg(tz) + + return cls._fromtimestamp(timestamp, tz is not None, tz) + + @classmethod + def utcfromtimestamp(cls, t): + """Construct a naive UTC datetime from a POSIX timestamp.""" + import warnings + warnings.warn("datetime.utcfromtimestamp() is deprecated and scheduled " + "for removal in a future version. Use timezone-aware " + "objects to represent datetimes in UTC: " + "datetime.datetime.fromtimestamp(t, datetime.UTC).", + DeprecationWarning, + stacklevel=2) + return cls._fromtimestamp(t, True, None) + + @classmethod + def now(cls, tz=None): + "Construct a datetime from time.time() and optional time zone info." + t = _time.time() + return cls.fromtimestamp(t, tz) + + @classmethod + def utcnow(cls): + "Construct a UTC datetime from time.time()." + import warnings + warnings.warn("datetime.utcnow() is deprecated and scheduled for " + "removal in a future version. Instead, Use timezone-aware " + "objects to represent datetimes in UTC: " + "datetime.datetime.now(datetime.UTC).", + DeprecationWarning, + stacklevel=2) + t = _time.time() + return cls._fromtimestamp(t, True, None) + + @classmethod + def combine(cls, date, time, tzinfo=True): + "Construct a datetime from a given date and a given time." + if not isinstance(date, _date_class): + raise TypeError("date argument must be a date instance") + if not isinstance(time, _time_class): + raise TypeError("time argument must be a time instance") + if tzinfo is True: + tzinfo = time.tzinfo + return cls(date.year, date.month, date.day, + time.hour, time.minute, time.second, time.microsecond, + tzinfo, fold=time.fold) + + @classmethod + def fromisoformat(cls, date_string): + """Construct a datetime from a string in one of the ISO 8601 formats.""" + if not isinstance(date_string, str): + raise TypeError('fromisoformat: argument must be str') + + if len(date_string) < 7: + raise ValueError(f'Invalid isoformat string: {date_string!r}') + + # Split this at the separator + try: + separator_location = _find_isoformat_datetime_separator(date_string) + dstr = date_string[0:separator_location] + tstr = date_string[(separator_location+1):] + + date_components = _parse_isoformat_date(dstr) + except ValueError: + raise ValueError( + f'Invalid isoformat string: {date_string!r}') from None + + if tstr: + try: + time_components = _parse_isoformat_time(tstr) + except ValueError: + raise ValueError( + f'Invalid isoformat string: {date_string!r}') from None + else: + time_components = [0, 0, 0, 0, None] + + return cls(*(date_components + time_components)) + + def timetuple(self): + "Return local time tuple compatible with time.localtime()." + dst = self.dst() + if dst is None: + dst = -1 + elif dst: + dst = 1 + else: + dst = 0 + return _build_struct_time(self.year, self.month, self.day, + self.hour, self.minute, self.second, + dst) + + def _mktime(self): + """Return integer POSIX timestamp.""" + epoch = datetime(1970, 1, 1) + max_fold_seconds = 24 * 3600 + t = (self - epoch) // timedelta(0, 1) + def local(u): + y, m, d, hh, mm, ss = _time.localtime(u)[:6] + return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1) + + # Our goal is to solve t = local(u) for u. + a = local(t) - t + u1 = t - a + t1 = local(u1) + if t1 == t: + # We found one solution, but it may not be the one we need. + # Look for an earlier solution (if `fold` is 0), or a + # later one (if `fold` is 1). + u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold] + b = local(u2) - u2 + if a == b: + return u1 + else: + b = t1 - u1 + assert a != b + u2 = t - b + t2 = local(u2) + if t2 == t: + return u2 + if t1 == t: + return u1 + # We have found both offsets a and b, but neither t - a nor t - b is + # a solution. This means t is in the gap. + return (max, min)[self.fold](u1, u2) + + + def timestamp(self): + "Return POSIX timestamp as float" + if self._tzinfo is None: + s = self._mktime() + return s + self.microsecond / 1e6 + else: + return (self - _EPOCH).total_seconds() + + def utctimetuple(self): + "Return UTC time tuple compatible with time.gmtime()." + offset = self.utcoffset() + if offset: + self -= offset + y, m, d = self.year, self.month, self.day + hh, mm, ss = self.hour, self.minute, self.second + return _build_struct_time(y, m, d, hh, mm, ss, 0) + + def date(self): + "Return the date part." + return date(self._year, self._month, self._day) + + def time(self): + "Return the time part, with tzinfo None." + return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold) + + def timetz(self): + "Return the time part, with same tzinfo." + return time(self.hour, self.minute, self.second, self.microsecond, + self._tzinfo, fold=self.fold) + + def replace(self, year=None, month=None, day=None, hour=None, + minute=None, second=None, microsecond=None, tzinfo=True, + *, fold=None): + """Return a new datetime with new values for the specified fields.""" + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + return type(self)(year, month, day, hour, minute, second, + microsecond, tzinfo, fold=fold) + + __replace__ = replace + + def _local_timezone(self): + if self.tzinfo is None: + ts = self._mktime() + # Detect gap + ts2 = self.replace(fold=1-self.fold)._mktime() + if ts2 != ts: # This happens in a gap or a fold + if (ts2 > ts) == self.fold: + ts = ts2 + else: + ts = (self - _EPOCH) // timedelta(seconds=1) + localtm = _time.localtime(ts) + local = datetime(*localtm[:6]) + # Extract TZ data + gmtoff = localtm.tm_gmtoff + zone = localtm.tm_zone + return timezone(timedelta(seconds=gmtoff), zone) + + def astimezone(self, tz=None): + if tz is None: + tz = self._local_timezone() + elif not isinstance(tz, tzinfo): + raise TypeError("tz argument must be an instance of tzinfo") + + mytz = self.tzinfo + if mytz is None: + mytz = self._local_timezone() + myoffset = mytz.utcoffset(self) + else: + myoffset = mytz.utcoffset(self) + if myoffset is None: + mytz = self.replace(tzinfo=None)._local_timezone() + myoffset = mytz.utcoffset(self) + + if tz is mytz: + return self + + # Convert self to UTC, and attach the new time zone object. + utc = (self - myoffset).replace(tzinfo=tz) + + # Convert from UTC to tz's local time. + return tz.fromutc(utc) + + # Ways to produce a string. + + def ctime(self): + "Return ctime() style string." + weekday = self.toordinal() % 7 or 7 + return "%s %s %2d %02d:%02d:%02d %04d" % ( + _DAYNAMES[weekday], + _MONTHNAMES[self._month], + self._day, + self._hour, self._minute, self._second, + self._year) + + def isoformat(self, sep='T', timespec='auto'): + """Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. + By default, the fractional part is omitted if self.microsecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. + + Optional argument sep specifies the separator between date and + time, default 'T'. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + """ + s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + + _format_time(self._hour, self._minute, self._second, + self._microsecond, timespec)) + + off = self.utcoffset() + tz = _format_offset(off) + if tz: + s += tz + + return s + + def __repr__(self): + """Convert to formal string, for repr().""" + L = [self._year, self._month, self._day, # These are never zero + self._hour, self._minute, self._second, self._microsecond] + if L[-1] == 0: + del L[-1] + if L[-1] == 0: + del L[-1] + s = "%s.%s(%s)" % (_get_class_module(self), + self.__class__.__qualname__, + ", ".join(map(str, L))) + if self._tzinfo is not None: + assert s[-1:] == ")" + s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" + if self._fold: + assert s[-1:] == ")" + s = s[:-1] + ", fold=1)" + return s + + def __str__(self): + "Convert to string, for str()." + return self.isoformat(sep=' ') + + @classmethod + def strptime(cls, date_string, format): + 'string, format -> new datetime parsed from a string (like time.strptime()).' + import _strptime + return _strptime._strptime_datetime(cls, date_string, format) + + def utcoffset(self): + """Return the timezone offset as timedelta positive east of UTC (negative west of + UTC).""" + if self._tzinfo is None: + return None + offset = self._tzinfo.utcoffset(self) + _check_utc_offset("utcoffset", offset) + return offset + + def tzname(self): + """Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + """ + if self._tzinfo is None: + return None + name = self._tzinfo.tzname(self) + _check_tzname(name) + return name + + def dst(self): + """Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + """ + if self._tzinfo is None: + return None + offset = self._tzinfo.dst(self) + _check_utc_offset("dst", offset) + return offset + + # Comparisons of datetime objects with other. + + def __eq__(self, other): + if isinstance(other, datetime): + return self._cmp(other, allow_mixed=True) == 0 + elif not isinstance(other, date): + return NotImplemented + else: + return False + + def __le__(self, other): + if isinstance(other, datetime): + return self._cmp(other) <= 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __lt__(self, other): + if isinstance(other, datetime): + return self._cmp(other) < 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __ge__(self, other): + if isinstance(other, datetime): + return self._cmp(other) >= 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def __gt__(self, other): + if isinstance(other, datetime): + return self._cmp(other) > 0 + elif not isinstance(other, date): + return NotImplemented + else: + _cmperror(self, other) + + def _cmp(self, other, allow_mixed=False): + assert isinstance(other, datetime) + mytz = self._tzinfo + ottz = other._tzinfo + myoff = otoff = None + + if mytz is ottz: + base_compare = True + else: + myoff = self.utcoffset() + otoff = other.utcoffset() + # Assume that allow_mixed means that we are called from __eq__ + if allow_mixed: + if myoff != self.replace(fold=not self.fold).utcoffset(): + return 2 + if otoff != other.replace(fold=not other.fold).utcoffset(): + return 2 + base_compare = myoff == otoff + + if base_compare: + return _cmp((self._year, self._month, self._day, + self._hour, self._minute, self._second, + self._microsecond), + (other._year, other._month, other._day, + other._hour, other._minute, other._second, + other._microsecond)) + if myoff is None or otoff is None: + if allow_mixed: + return 2 # arbitrary non-zero value + else: + raise TypeError("cannot compare naive and aware datetimes") + # XXX What follows could be done more efficiently... + diff = self - other # this will take offsets into account + if diff.days < 0: + return -1 + return diff and 1 or 0 + + def __add__(self, other): + "Add a datetime and a timedelta." + if not isinstance(other, timedelta): + return NotImplemented + delta = timedelta(self.toordinal(), + hours=self._hour, + minutes=self._minute, + seconds=self._second, + microseconds=self._microsecond) + delta += other + hour, rem = divmod(delta.seconds, 3600) + minute, second = divmod(rem, 60) + if 0 < delta.days <= _MAXORDINAL: + return type(self).combine(date.fromordinal(delta.days), + time(hour, minute, second, + delta.microseconds, + tzinfo=self._tzinfo)) + raise OverflowError("result out of range") + + __radd__ = __add__ + + def __sub__(self, other): + "Subtract two datetimes, or a datetime and a timedelta." + if not isinstance(other, datetime): + if isinstance(other, timedelta): + return self + -other + return NotImplemented + + days1 = self.toordinal() + days2 = other.toordinal() + secs1 = self._second + self._minute * 60 + self._hour * 3600 + secs2 = other._second + other._minute * 60 + other._hour * 3600 + base = timedelta(days1 - days2, + secs1 - secs2, + self._microsecond - other._microsecond) + if self._tzinfo is other._tzinfo: + return base + myoff = self.utcoffset() + otoff = other.utcoffset() + if myoff == otoff: + return base + if myoff is None or otoff is None: + raise TypeError("cannot mix naive and timezone-aware time") + return base + otoff - myoff + + def __hash__(self): + if self._hashcode == -1: + if self.fold: + t = self.replace(fold=0) + else: + t = self + tzoff = t.utcoffset() + if tzoff is None: + self._hashcode = hash(t._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + self.minute * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) + return self._hashcode + + # Pickle support. + + def _getstate(self, protocol=3): + yhi, ylo = divmod(self._year, 256) + us2, us3 = divmod(self._microsecond, 256) + us1, us2 = divmod(us2, 256) + m = self._month + if self._fold and protocol > 3: + m += 128 + basestate = bytes([yhi, ylo, m, self._day, + self._hour, self._minute, self._second, + us1, us2, us3]) + if self._tzinfo is None: + return (basestate,) + else: + return (basestate, self._tzinfo) + + def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") + (yhi, ylo, m, self._day, self._hour, + self._minute, self._second, us1, us2, us3) = string + if m > 127: + self._fold = 1 + self._month = m - 128 + else: + self._fold = 0 + self._month = m + self._year = yhi * 256 + ylo + self._microsecond = (((us1 << 8) | us2) << 8) | us3 + self._tzinfo = tzinfo + + def __reduce_ex__(self, protocol): + return (self.__class__, self._getstate(protocol)) + + def __reduce__(self): + return self.__reduce_ex__(2) + + +datetime.min = datetime(1, 1, 1) +datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999) +datetime.resolution = timedelta(microseconds=1) + + +def _isoweek1monday(year): + # Helper to calculate the day number of the Monday starting week 1 + # XXX This could be done more efficiently + THURSDAY = 3 + firstday = _ymd2ord(year, 1, 1) + firstweekday = (firstday + 6) % 7 # See weekday() above + week1monday = firstday - firstweekday + if firstweekday > THURSDAY: + week1monday += 7 + return week1monday + + +class timezone(tzinfo): + __slots__ = '_offset', '_name' + + # Sentinel value to disallow None + _Omitted = object() + def __new__(cls, offset, name=_Omitted): + if not isinstance(offset, timedelta): + raise TypeError("offset must be a timedelta") + if name is cls._Omitted: + if not offset: + return cls.utc + name = None + elif not isinstance(name, str): + raise TypeError("name must be a string") + if not cls._minoffset <= offset <= cls._maxoffset: + raise ValueError("offset must be a timedelta " + "strictly between -timedelta(hours=24) and " + "timedelta(hours=24).") + return cls._create(offset, name) + + @classmethod + def _create(cls, offset, name=None): + self = tzinfo.__new__(cls) + self._offset = offset + self._name = name + return self + + def __getinitargs__(self): + """pickle support""" + if self._name is None: + return (self._offset,) + return (self._offset, self._name) + + def __eq__(self, other): + if isinstance(other, timezone): + return self._offset == other._offset + return NotImplemented + + def __hash__(self): + return hash(self._offset) + + def __repr__(self): + """Convert to formal string, for repr(). + + >>> tz = timezone.utc + >>> repr(tz) + 'datetime.timezone.utc' + >>> tz = timezone(timedelta(hours=-5), 'EST') + >>> repr(tz) + "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" + """ + if self is self.utc: + return 'datetime.timezone.utc' + if self._name is None: + return "%s.%s(%r)" % (_get_class_module(self), + self.__class__.__qualname__, + self._offset) + return "%s.%s(%r, %r)" % (_get_class_module(self), + self.__class__.__qualname__, + self._offset, self._name) + + def __str__(self): + return self.tzname(None) + + def utcoffset(self, dt): + if isinstance(dt, datetime) or dt is None: + return self._offset + raise TypeError("utcoffset() argument must be a datetime instance" + " or None") + + def tzname(self, dt): + if isinstance(dt, datetime) or dt is None: + if self._name is None: + return self._name_from_offset(self._offset) + return self._name + raise TypeError("tzname() argument must be a datetime instance" + " or None") + + def dst(self, dt): + if isinstance(dt, datetime) or dt is None: + return None + raise TypeError("dst() argument must be a datetime instance" + " or None") + + def fromutc(self, dt): + if isinstance(dt, datetime): + if dt.tzinfo is not self: + raise ValueError("fromutc: dt.tzinfo " + "is not self") + return dt + self._offset + raise TypeError("fromutc() argument must be a datetime instance" + " or None") + + _maxoffset = timedelta(hours=24, microseconds=-1) + _minoffset = -_maxoffset + + @staticmethod + def _name_from_offset(delta): + if not delta: + return 'UTC' + if delta < timedelta(0): + sign = '-' + delta = -delta + else: + sign = '+' + hours, rest = divmod(delta, timedelta(hours=1)) + minutes, rest = divmod(rest, timedelta(minutes=1)) + seconds = rest.seconds + microseconds = rest.microseconds + if microseconds: + return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' + f'.{microseconds:06d}') + if seconds: + return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' + return f'UTC{sign}{hours:02d}:{minutes:02d}' + +UTC = timezone.utc = timezone._create(timedelta(0)) + +# bpo-37642: These attributes are rounded to the nearest minute for backwards +# compatibility, even though the constructor will accept a wider range of +# values. This may change in the future. +timezone.min = timezone._create(-timedelta(hours=23, minutes=59)) +timezone.max = timezone._create(timedelta(hours=23, minutes=59)) +_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) + +# Some time zone algebra. For a datetime x, let +# x.n = x stripped of its timezone -- its naive time. +# x.o = x.utcoffset(), and assuming that doesn't raise an exception or +# return None +# x.d = x.dst(), and assuming that doesn't raise an exception or +# return None +# x.s = x's standard offset, x.o - x.d +# +# Now some derived rules, where k is a duration (timedelta). +# +# 1. x.o = x.s + x.d +# This follows from the definition of x.s. +# +# 2. If x and y have the same tzinfo member, x.s = y.s. +# This is actually a requirement, an assumption we need to make about +# sane tzinfo classes. +# +# 3. The naive UTC time corresponding to x is x.n - x.o. +# This is again a requirement for a sane tzinfo class. +# +# 4. (x+k).s = x.s +# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo. +# +# 5. (x+k).n = x.n + k +# Again follows from how arithmetic is defined. +# +# Now we can explain tz.fromutc(x). Let's assume it's an interesting case +# (meaning that the various tzinfo methods exist, and don't blow up or return +# None when called). +# +# The function wants to return a datetime y with timezone tz, equivalent to x. +# x is already in UTC. +# +# By #3, we want +# +# y.n - y.o = x.n [1] +# +# The algorithm starts by attaching tz to x.n, and calling that y. So +# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1] +# becomes true; in effect, we want to solve [2] for k: +# +# (y+k).n - (y+k).o = x.n [2] +# +# By #1, this is the same as +# +# (y+k).n - ((y+k).s + (y+k).d) = x.n [3] +# +# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start. +# Substituting that into [3], +# +# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving +# k - (y+k).s - (y+k).d = 0; rearranging, +# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so +# k = y.s - (y+k).d +# +# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we +# approximate k by ignoring the (y+k).d term at first. Note that k can't be +# very large, since all offset-returning methods return a duration of magnitude +# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must +# be 0, so ignoring it has no consequence then. +# +# In any case, the new value is +# +# z = y + y.s [4] +# +# It's helpful to step back at look at [4] from a higher level: it's simply +# mapping from UTC to tz's standard time. +# +# At this point, if +# +# z.n - z.o = x.n [5] +# +# we have an equivalent time, and are almost done. The insecurity here is +# at the start of daylight time. Picture US Eastern for concreteness. The wall +# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good +# sense then. The docs ask that an Eastern tzinfo class consider such a time to +# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST +# on the day DST starts. We want to return the 1:MM EST spelling because that's +# the only spelling that makes sense on the local wall clock. +# +# In fact, if [5] holds at this point, we do have the standard-time spelling, +# but that takes a bit of proof. We first prove a stronger result. What's the +# difference between the LHS and RHS of [5]? Let +# +# diff = x.n - (z.n - z.o) [6] +# +# Now +# z.n = by [4] +# (y + y.s).n = by #5 +# y.n + y.s = since y.n = x.n +# x.n + y.s = since z and y are have the same tzinfo member, +# y.s = z.s by #2 +# x.n + z.s +# +# Plugging that back into [6] gives +# +# diff = +# x.n - ((x.n + z.s) - z.o) = expanding +# x.n - x.n - z.s + z.o = cancelling +# - z.s + z.o = by #2 +# z.d +# +# So diff = z.d. +# +# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time +# spelling we wanted in the endcase described above. We're done. Contrarily, +# if z.d = 0, then we have a UTC equivalent, and are also done. +# +# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to +# add to z (in effect, z is in tz's standard time, and we need to shift the +# local clock into tz's daylight time). +# +# Let +# +# z' = z + z.d = z + diff [7] +# +# and we can again ask whether +# +# z'.n - z'.o = x.n [8] +# +# If so, we're done. If not, the tzinfo class is insane, according to the +# assumptions we've made. This also requires a bit of proof. As before, let's +# compute the difference between the LHS and RHS of [8] (and skipping some of +# the justifications for the kinds of substitutions we've done several times +# already): +# +# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7] +# x.n - (z.n + diff - z'.o) = replacing diff via [6] +# x.n - (z.n + x.n - (z.n - z.o) - z'.o) = +# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n +# - z.n + z.n - z.o + z'.o = cancel z.n +# - z.o + z'.o = #1 twice +# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo +# z'.d - z.d +# +# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal, +# we've found the UTC-equivalent so are done. In fact, we stop with [7] and +# return z', not bothering to compute z'.d. +# +# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by +# a dst() offset, and starting *from* a time already in DST (we know z.d != 0), +# would have to change the result dst() returns: we start in DST, and moving +# a little further into it takes us out of DST. +# +# There isn't a sane case where this can happen. The closest it gets is at +# the end of DST, where there's an hour in UTC with no spelling in a hybrid +# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During +# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM +# UTC) because the docs insist on that, but 0:MM is taken as being in daylight +# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local +# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in +# standard time. Since that's what the local clock *does*, we want to map both +# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous +# in local time, but so it goes -- it's the way the local clock works. +# +# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0, +# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going. +# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8] +# (correctly) concludes that z' is not UTC-equivalent to x. +# +# Because we know z.d said z was in daylight time (else [5] would have held and +# we would have stopped then), and we know z.d != z'.d (else [8] would have held +# and we have stopped then), and there are only 2 possible values dst() can +# return in Eastern, it follows that z'.d must be 0 (which it is in the example, +# but the reasoning doesn't depend on the example -- it depends on there being +# two possible dst() outcomes, one zero and the other non-zero). Therefore +# z' must be in standard time, and is the spelling we want in this case. +# +# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is +# concerned (because it takes z' as being in standard time rather than the +# daylight time we intend here), but returning it gives the real-life "local +# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into +# tz. +# +# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with +# the 1:MM standard time spelling we want. +# +# So how can this break? One of the assumptions must be violated. Two +# possibilities: +# +# 1) [2] effectively says that y.s is invariant across all y belong to a given +# time zone. This isn't true if, for political reasons or continental drift, +# a region decides to change its base offset from UTC. +# +# 2) There may be versions of "double daylight" time where the tail end of +# the analysis gives up a step too early. I haven't thought about that +# enough to say. +# +# In any case, it's clear that the default fromutc() is strong enough to handle +# "almost all" time zones: so long as the standard offset is invariant, it +# doesn't matter if daylight time transition points change from year to year, or +# if daylight time is skipped in some years; it doesn't matter how large or +# small dst() may get within its bounds; and it doesn't even matter if some +# perverse time zone returns a negative dst()). So a breaking case must be +# pretty bizarre, and a tzinfo subclass can override fromutc() if it is. diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py index f9d6c9901f1f310..2692f2fcba45bf2 100644 --- a/Lib/_pydecimal.py +++ b/Lib/_pydecimal.py @@ -159,7 +159,7 @@ try: from collections import namedtuple as _namedtuple - DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent') + DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent', module='decimal') except ImportError: DecimalTuple = lambda *args: args diff --git a/Lib/_pyio.py b/Lib/_pyio.py index 163cf9de279ff05..32698abac78d25a 100644 --- a/Lib/_pyio.py +++ b/Lib/_pyio.py @@ -33,11 +33,8 @@ # Rebind for compatibility BlockingIOError = BlockingIOError -# Does io.IOBase finalizer log the exception if the close() method fails? -# The exception is ignored silently by default in release build. -_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) # Does open() check its 'errors' argument? -_CHECK_ERRORS = _IOBASE_EMITS_UNRAISABLE +_CHECK_ERRORS = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) def text_encoding(encoding, stacklevel=2): @@ -416,18 +413,9 @@ def __del__(self): if closed: return - if _IOBASE_EMITS_UNRAISABLE: - self.close() - else: - # The try/except block is in case this is called at program - # exit time, when it's possible that globals have already been - # deleted, and then the close() call might fail. Since - # there's nothing we can do about such failures and they annoy - # the end users, we suppress the traceback. - try: - self.close() - except: - pass + # If close() fails, the caller logs the exception with + # sys.unraisablehook. close() must be called at the end at __del__(). + self.close() ### Inquiries ### @@ -638,10 +626,7 @@ def read(self, size=-1): def readall(self): """Read until EOF, using multiple read() call.""" res = bytearray() - while True: - data = self.read(DEFAULT_BUFFER_SIZE) - if not data: - break + while data := self.read(DEFAULT_BUFFER_SIZE): res += data if res: return bytes(res) diff --git a/Lib/_pylong.py b/Lib/_pylong.py index e0d4e9042193e25..936346e187ff699 100644 --- a/Lib/_pylong.py +++ b/Lib/_pylong.py @@ -12,12 +12,9 @@ tricky or non-obvious code is not worth it. For people looking for maximum performance, they should use something like gmpy2.""" -import sys import re import decimal -_DEBUG = False - def int_to_decimal(n): """Asymptotically fast conversion of an 'int' to Decimal.""" @@ -32,9 +29,6 @@ def int_to_decimal(n): # "clever" recursive way. If we want a string representation, we # apply str to _that_. - if _DEBUG: - print('int_to_decimal', n.bit_length(), file=sys.stderr) - D = decimal.Decimal D2 = D(2) @@ -141,8 +135,6 @@ def inner(a, b): def int_from_string(s): """Asymptotically fast version of PyLong_FromString(), conversion of a string of decimal digits into an 'int'.""" - if _DEBUG: - print('int_from_string', len(s), file=sys.stderr) # PyLong_FromString() has already removed leading +/-, checked for invalid # use of underscore characters, checked that string consists of only digits # and underscores, and stripped leading whitespace. The input can still @@ -281,8 +273,6 @@ def int_divmod(a, b): """Asymptotically fast replacement for divmod, for 'int'. Its time complexity is O(n**1.58), where n = #bits(a) + #bits(b). """ - if _DEBUG: - print('int_divmod', a.bit_length(), b.bit_length(), file=sys.stderr) if b == 0: raise ZeroDivisionError elif b < 0: diff --git a/Lib/_strptime.py b/Lib/_strptime.py index b97dfcce1e8e4d7..77ccdc9e1d789b5 100644 --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -290,22 +290,6 @@ def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): return 1 + days_to_week + day_of_week -def _calc_julian_from_V(iso_year, iso_week, iso_weekday): - """Calculate the Julian day based on the ISO 8601 year, week, and weekday. - ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. - ISO week days range from 1 (Monday) to 7 (Sunday). - """ - correction = datetime_date(iso_year, 1, 4).isoweekday() + 3 - ordinal = (iso_week * 7) + iso_weekday - correction - # ordinal may be negative or 0 now, which means the date is in the previous - # calendar year - if ordinal < 1: - ordinal += datetime_date(iso_year, 1, 1).toordinal() - iso_year -= 1 - ordinal -= datetime_date(iso_year, 1, 1).toordinal() - return iso_year, ordinal - - def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the @@ -483,7 +467,8 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): else: tz = value break - # Deal with the cases where ambiguities arize + + # Deal with the cases where ambiguities arise # don't assume default values for ISO week/year if year is None and iso_year is not None: if iso_week is None or weekday is None: @@ -511,7 +496,6 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): elif year is None: year = 1900 - # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian is None and weekday is not None: @@ -520,7 +504,10 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) elif iso_year is not None and iso_week is not None: - year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) + datetime_result = datetime_date.fromisocalendar(iso_year, iso_week, weekday + 1) + year = datetime_result.year + month = datetime_result.month + day = datetime_result.day if julian is not None and julian <= 0: year -= 1 yday = 366 if calendar.isleap(year) else 365 diff --git a/Lib/abc.py b/Lib/abc.py index 42048ddb855381f..f8a4e11ce9c3b1e 100644 --- a/Lib/abc.py +++ b/Lib/abc.py @@ -18,7 +18,7 @@ class that has a metaclass derived from ABCMeta cannot be class C(metaclass=ABCMeta): @abstractmethod - def my_abstract_method(self, ...): + def my_abstract_method(self, arg1, arg2, argN): ... """ funcobj.__isabstractmethod__ = True diff --git a/Lib/aifc.py b/Lib/aifc.py deleted file mode 100644 index 5254987e22bc167..000000000000000 --- a/Lib/aifc.py +++ /dev/null @@ -1,984 +0,0 @@ -"""Stuff to parse AIFF-C and AIFF files. - -Unless explicitly stated otherwise, the description below is true -both for AIFF-C files and AIFF files. - -An AIFF-C file has the following structure. - - +-----------------+ - | FORM | - +-----------------+ - | | - +----+------------+ - | | AIFC | - | +------------+ - | | | - | | . | - | | . | - | | . | - +----+------------+ - -An AIFF file has the string "AIFF" instead of "AIFC". - -A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, -big endian order), followed by the data. The size field does not include -the size of the 8 byte header. - -The following chunk types are recognized. - - FVER - (AIFF-C only). - MARK - <# of markers> (2 bytes) - list of markers: - (2 bytes, must be > 0) - (4 bytes) - ("pstring") - COMM - <# of channels> (2 bytes) - <# of sound frames> (4 bytes) - (2 bytes) - (10 bytes, IEEE 80-bit extended - floating point) - in AIFF-C files only: - (4 bytes) - ("pstring") - SSND - (4 bytes, not used by this program) - (4 bytes, not used by this program) - - -A pstring consists of 1 byte length, a string of characters, and 0 or 1 -byte pad to make the total length even. - -Usage. - -Reading AIFF files: - f = aifc.open(file, 'r') -where file is either the name of a file or an open file pointer. -The open file pointer must have methods read(), seek(), and close(). -In some types of audio files, if the setpos() method is not used, -the seek() method is not necessary. - -This returns an instance of a class with the following public methods: - getnchannels() -- returns number of audio channels (1 for - mono, 2 for stereo) - getsampwidth() -- returns sample width in bytes - getframerate() -- returns sampling frequency - getnframes() -- returns number of audio frames - getcomptype() -- returns compression type ('NONE' for AIFF files) - getcompname() -- returns human-readable version of - compression type ('not compressed' for AIFF files) - getparams() -- returns a namedtuple consisting of all of the - above in the above order - getmarkers() -- get the list of marks in the audio file or None - if there are no marks - getmark(id) -- get mark with the specified id (raises an error - if the mark does not exist) - readframes(n) -- returns at most n frames of audio - rewind() -- rewind to the beginning of the audio stream - setpos(pos) -- seek to the specified position - tell() -- return the current position - close() -- close the instance (make it unusable) -The position returned by tell(), the position given to setpos() and -the position of marks are all compatible and have nothing to do with -the actual position in the file. -The close() method is called automatically when the class instance -is destroyed. - -Writing AIFF files: - f = aifc.open(file, 'w') -where file is either the name of a file or an open file pointer. -The open file pointer must have methods write(), tell(), seek(), and -close(). - -This returns an instance of a class with the following public methods: - aiff() -- create an AIFF file (AIFF-C default) - aifc() -- create an AIFF-C file - setnchannels(n) -- set the number of channels - setsampwidth(n) -- set the sample width - setframerate(n) -- set the frame rate - setnframes(n) -- set the number of frames - setcomptype(type, name) - -- set the compression type and the - human-readable compression type - setparams(tuple) - -- set all parameters at once - setmark(id, pos, name) - -- add specified mark to the list of marks - tell() -- return current position in output file (useful - in combination with setmark()) - writeframesraw(data) - -- write audio frames without pathing up the - file header - writeframes(data) - -- write audio frames and patch up the file header - close() -- patch up the file header and close the - output file -You should set the parameters before the first writeframesraw or -writeframes. The total number of frames does not need to be set, -but when it is set to the correct value, the header does not have to -be patched up. -It is best to first set all parameters, perhaps possibly the -compression type, and then write audio frames using writeframesraw. -When all frames have been written, either call writeframes(b'') or -close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, you must call -close() after all frames have been written. -The close() method is called automatically when the class instance -is destroyed. - -When a file is opened with the extension '.aiff', an AIFF file is -written, otherwise an AIFF-C file is written. This default can be -changed by calling aiff() or aifc() before the first writeframes or -writeframesraw. -""" - -import struct -import builtins -import warnings - -__all__ = ["Error", "open"] - - -warnings._deprecated(__name__, remove=(3, 13)) - - -class Error(Exception): - pass - -_AIFC_version = 0xA2805140 # Version 1 of AIFF-C - -def _read_long(file): - try: - return struct.unpack('>l', file.read(4))[0] - except struct.error: - raise EOFError from None - -def _read_ulong(file): - try: - return struct.unpack('>L', file.read(4))[0] - except struct.error: - raise EOFError from None - -def _read_short(file): - try: - return struct.unpack('>h', file.read(2))[0] - except struct.error: - raise EOFError from None - -def _read_ushort(file): - try: - return struct.unpack('>H', file.read(2))[0] - except struct.error: - raise EOFError from None - -def _read_string(file): - length = ord(file.read(1)) - if length == 0: - data = b'' - else: - data = file.read(length) - if length & 1 == 0: - dummy = file.read(1) - return data - -_HUGE_VAL = 1.79769313486231e+308 # See - -def _read_float(f): # 10 bytes - expon = _read_short(f) # 2 bytes - sign = 1 - if expon < 0: - sign = -1 - expon = expon + 0x8000 - himant = _read_ulong(f) # 4 bytes - lomant = _read_ulong(f) # 4 bytes - if expon == himant == lomant == 0: - f = 0.0 - elif expon == 0x7FFF: - f = _HUGE_VAL - else: - expon = expon - 16383 - f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) - return sign * f - -def _write_short(f, x): - f.write(struct.pack('>h', x)) - -def _write_ushort(f, x): - f.write(struct.pack('>H', x)) - -def _write_long(f, x): - f.write(struct.pack('>l', x)) - -def _write_ulong(f, x): - f.write(struct.pack('>L', x)) - -def _write_string(f, s): - if len(s) > 255: - raise ValueError("string exceeds maximum pstring length") - f.write(struct.pack('B', len(s))) - f.write(s) - if len(s) & 1 == 0: - f.write(b'\x00') - -def _write_float(f, x): - import math - if x < 0: - sign = 0x8000 - x = x * -1 - else: - sign = 0 - if x == 0: - expon = 0 - himant = 0 - lomant = 0 - else: - fmant, expon = math.frexp(x) - if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN - expon = sign|0x7FFF - himant = 0 - lomant = 0 - else: # Finite - expon = expon + 16382 - if expon < 0: # denormalized - fmant = math.ldexp(fmant, expon) - expon = 0 - expon = expon | sign - fmant = math.ldexp(fmant, 32) - fsmant = math.floor(fmant) - himant = int(fsmant) - fmant = math.ldexp(fmant - fsmant, 32) - fsmant = math.floor(fmant) - lomant = int(fsmant) - _write_ushort(f, expon) - _write_ulong(f, himant) - _write_ulong(f, lomant) - -with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - from chunk import Chunk -from collections import namedtuple - -_aifc_params = namedtuple('_aifc_params', - 'nchannels sampwidth framerate nframes comptype compname') - -_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)' -_aifc_params.sampwidth.__doc__ = 'Sample width in bytes' -_aifc_params.framerate.__doc__ = 'Sampling frequency' -_aifc_params.nframes.__doc__ = 'Number of audio frames' -_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)' -_aifc_params.compname.__doc__ = ("""\ -A human-readable version of the compression type -('not compressed' for AIFF files)""") - - -class Aifc_read: - # Variables used in this class: - # - # These variables are available to the user though appropriate - # methods of this class: - # _file -- the open file with methods read(), close(), and seek() - # set through the __init__() method - # _nchannels -- the number of audio channels - # available through the getnchannels() method - # _nframes -- the number of audio frames - # available through the getnframes() method - # _sampwidth -- the number of bytes per audio sample - # available through the getsampwidth() method - # _framerate -- the sampling frequency - # available through the getframerate() method - # _comptype -- the AIFF-C compression type ('NONE' if AIFF) - # available through the getcomptype() method - # _compname -- the human-readable AIFF-C compression type - # available through the getcomptype() method - # _markers -- the marks in the audio file - # available through the getmarkers() and getmark() - # methods - # _soundpos -- the position in the audio stream - # available through the tell() method, set through the - # setpos() method - # - # These variables are used internally only: - # _version -- the AIFF-C version number - # _decomp -- the decompressor from builtin module cl - # _comm_chunk_read -- 1 iff the COMM chunk has been read - # _aifc -- 1 iff reading an AIFF-C file - # _ssnd_seek_needed -- 1 iff positioned correctly in audio - # file for readframes() - # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk - # _framesize -- size of one frame in the file - - _file = None # Set here since __del__ checks it - - def initfp(self, file): - self._version = 0 - self._convert = None - self._markers = [] - self._soundpos = 0 - self._file = file - chunk = Chunk(file) - if chunk.getname() != b'FORM': - raise Error('file does not start with FORM id') - formdata = chunk.read(4) - if formdata == b'AIFF': - self._aifc = 0 - elif formdata == b'AIFC': - self._aifc = 1 - else: - raise Error('not an AIFF or AIFF-C file') - self._comm_chunk_read = 0 - self._ssnd_chunk = None - while 1: - self._ssnd_seek_needed = 1 - try: - chunk = Chunk(self._file) - except EOFError: - break - chunkname = chunk.getname() - if chunkname == b'COMM': - self._read_comm_chunk(chunk) - self._comm_chunk_read = 1 - elif chunkname == b'SSND': - self._ssnd_chunk = chunk - dummy = chunk.read(8) - self._ssnd_seek_needed = 0 - elif chunkname == b'FVER': - self._version = _read_ulong(chunk) - elif chunkname == b'MARK': - self._readmark(chunk) - chunk.skip() - if not self._comm_chunk_read or not self._ssnd_chunk: - raise Error('COMM chunk and/or SSND chunk missing') - - def __init__(self, f): - if isinstance(f, str): - file_object = builtins.open(f, 'rb') - try: - self.initfp(file_object) - except: - file_object.close() - raise - else: - # assume it is an open file object already - self.initfp(f) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - # - # User visible methods. - # - def getfp(self): - return self._file - - def rewind(self): - self._ssnd_seek_needed = 1 - self._soundpos = 0 - - def close(self): - file = self._file - if file is not None: - self._file = None - file.close() - - def tell(self): - return self._soundpos - - def getnchannels(self): - return self._nchannels - - def getnframes(self): - return self._nframes - - def getsampwidth(self): - return self._sampwidth - - def getframerate(self): - return self._framerate - - def getcomptype(self): - return self._comptype - - def getcompname(self): - return self._compname - -## def getversion(self): -## return self._version - - def getparams(self): - return _aifc_params(self.getnchannels(), self.getsampwidth(), - self.getframerate(), self.getnframes(), - self.getcomptype(), self.getcompname()) - - def getmarkers(self): - if len(self._markers) == 0: - return None - return self._markers - - def getmark(self, id): - for marker in self._markers: - if id == marker[0]: - return marker - raise Error('marker {0!r} does not exist'.format(id)) - - def setpos(self, pos): - if pos < 0 or pos > self._nframes: - raise Error('position not in range') - self._soundpos = pos - self._ssnd_seek_needed = 1 - - def readframes(self, nframes): - if self._ssnd_seek_needed: - self._ssnd_chunk.seek(0) - dummy = self._ssnd_chunk.read(8) - pos = self._soundpos * self._framesize - if pos: - self._ssnd_chunk.seek(pos + 8) - self._ssnd_seek_needed = 0 - if nframes == 0: - return b'' - data = self._ssnd_chunk.read(nframes * self._framesize) - if self._convert and data: - data = self._convert(data) - self._soundpos = self._soundpos + len(data) // (self._nchannels - * self._sampwidth) - return data - - # - # Internal methods. - # - - def _alaw2lin(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.alaw2lin(data, 2) - - def _ulaw2lin(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.ulaw2lin(data, 2) - - def _adpcm2lin(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - if not hasattr(self, '_adpcmstate'): - # first time - self._adpcmstate = None - data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate) - return data - - def _sowt2lin(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.byteswap(data, 2) - - def _read_comm_chunk(self, chunk): - self._nchannels = _read_short(chunk) - self._nframes = _read_long(chunk) - self._sampwidth = (_read_short(chunk) + 7) // 8 - self._framerate = int(_read_float(chunk)) - if self._sampwidth <= 0: - raise Error('bad sample width') - if self._nchannels <= 0: - raise Error('bad # of channels') - self._framesize = self._nchannels * self._sampwidth - if self._aifc: - #DEBUG: SGI's soundeditor produces a bad size :-( - kludge = 0 - if chunk.chunksize == 18: - kludge = 1 - warnings.warn('Warning: bad COMM chunk size') - chunk.chunksize = 23 - #DEBUG end - self._comptype = chunk.read(4) - #DEBUG start - if kludge: - length = ord(chunk.file.read(1)) - if length & 1 == 0: - length = length + 1 - chunk.chunksize = chunk.chunksize + length - chunk.file.seek(-1, 1) - #DEBUG end - self._compname = _read_string(chunk) - if self._comptype != b'NONE': - if self._comptype == b'G722': - self._convert = self._adpcm2lin - elif self._comptype in (b'ulaw', b'ULAW'): - self._convert = self._ulaw2lin - elif self._comptype in (b'alaw', b'ALAW'): - self._convert = self._alaw2lin - elif self._comptype in (b'sowt', b'SOWT'): - self._convert = self._sowt2lin - else: - raise Error('unsupported compression type') - self._sampwidth = 2 - else: - self._comptype = b'NONE' - self._compname = b'not compressed' - - def _readmark(self, chunk): - nmarkers = _read_short(chunk) - # Some files appear to contain invalid counts. - # Cope with this by testing for EOF. - try: - for i in range(nmarkers): - id = _read_short(chunk) - pos = _read_long(chunk) - name = _read_string(chunk) - if pos or name: - # some files appear to have - # dummy markers consisting of - # a position 0 and name '' - self._markers.append((id, pos, name)) - except EOFError: - w = ('Warning: MARK chunk contains only %s marker%s instead of %s' % - (len(self._markers), '' if len(self._markers) == 1 else 's', - nmarkers)) - warnings.warn(w) - -class Aifc_write: - # Variables used in this class: - # - # These variables are user settable through appropriate methods - # of this class: - # _file -- the open file with methods write(), close(), tell(), seek() - # set through the __init__() method - # _comptype -- the AIFF-C compression type ('NONE' in AIFF) - # set through the setcomptype() or setparams() method - # _compname -- the human-readable AIFF-C compression type - # set through the setcomptype() or setparams() method - # _nchannels -- the number of audio channels - # set through the setnchannels() or setparams() method - # _sampwidth -- the number of bytes per audio sample - # set through the setsampwidth() or setparams() method - # _framerate -- the sampling frequency - # set through the setframerate() or setparams() method - # _nframes -- the number of audio frames written to the header - # set through the setnframes() or setparams() method - # _aifc -- whether we're writing an AIFF-C file or an AIFF file - # set through the aifc() method, reset through the - # aiff() method - # - # These variables are used internally only: - # _version -- the AIFF-C version number - # _comp -- the compressor from builtin module cl - # _nframeswritten -- the number of audio frames actually written - # _datalength -- the size of the audio samples written to the header - # _datawritten -- the size of the audio samples actually written - - _file = None # Set here since __del__ checks it - - def __init__(self, f): - if isinstance(f, str): - file_object = builtins.open(f, 'wb') - try: - self.initfp(file_object) - except: - file_object.close() - raise - - # treat .aiff file extensions as non-compressed audio - if f.endswith('.aiff'): - self._aifc = 0 - else: - # assume it is an open file object already - self.initfp(f) - - def initfp(self, file): - self._file = file - self._version = _AIFC_version - self._comptype = b'NONE' - self._compname = b'not compressed' - self._convert = None - self._nchannels = 0 - self._sampwidth = 0 - self._framerate = 0 - self._nframes = 0 - self._nframeswritten = 0 - self._datawritten = 0 - self._datalength = 0 - self._markers = [] - self._marklength = 0 - self._aifc = 1 # AIFF-C is default - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - # - # User visible methods. - # - def aiff(self): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - self._aifc = 0 - - def aifc(self): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - self._aifc = 1 - - def setnchannels(self, nchannels): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - if nchannels < 1: - raise Error('bad # of channels') - self._nchannels = nchannels - - def getnchannels(self): - if not self._nchannels: - raise Error('number of channels not set') - return self._nchannels - - def setsampwidth(self, sampwidth): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - if sampwidth < 1 or sampwidth > 4: - raise Error('bad sample width') - self._sampwidth = sampwidth - - def getsampwidth(self): - if not self._sampwidth: - raise Error('sample width not set') - return self._sampwidth - - def setframerate(self, framerate): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - if framerate <= 0: - raise Error('bad frame rate') - self._framerate = framerate - - def getframerate(self): - if not self._framerate: - raise Error('frame rate not set') - return self._framerate - - def setnframes(self, nframes): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - self._nframes = nframes - - def getnframes(self): - return self._nframeswritten - - def setcomptype(self, comptype, compname): - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - if comptype not in (b'NONE', b'ulaw', b'ULAW', - b'alaw', b'ALAW', b'G722', b'sowt', b'SOWT'): - raise Error('unsupported compression type') - self._comptype = comptype - self._compname = compname - - def getcomptype(self): - return self._comptype - - def getcompname(self): - return self._compname - -## def setversion(self, version): -## if self._nframeswritten: -## raise Error, 'cannot change parameters after starting to write' -## self._version = version - - def setparams(self, params): - nchannels, sampwidth, framerate, nframes, comptype, compname = params - if self._nframeswritten: - raise Error('cannot change parameters after starting to write') - if comptype not in (b'NONE', b'ulaw', b'ULAW', - b'alaw', b'ALAW', b'G722', b'sowt', b'SOWT'): - raise Error('unsupported compression type') - self.setnchannels(nchannels) - self.setsampwidth(sampwidth) - self.setframerate(framerate) - self.setnframes(nframes) - self.setcomptype(comptype, compname) - - def getparams(self): - if not self._nchannels or not self._sampwidth or not self._framerate: - raise Error('not all parameters set') - return _aifc_params(self._nchannels, self._sampwidth, self._framerate, - self._nframes, self._comptype, self._compname) - - def setmark(self, id, pos, name): - if id <= 0: - raise Error('marker ID must be > 0') - if pos < 0: - raise Error('marker position must be >= 0') - if not isinstance(name, bytes): - raise Error('marker name must be bytes') - for i in range(len(self._markers)): - if id == self._markers[i][0]: - self._markers[i] = id, pos, name - return - self._markers.append((id, pos, name)) - - def getmark(self, id): - for marker in self._markers: - if id == marker[0]: - return marker - raise Error('marker {0!r} does not exist'.format(id)) - - def getmarkers(self): - if len(self._markers) == 0: - return None - return self._markers - - def tell(self): - return self._nframeswritten - - def writeframesraw(self, data): - if not isinstance(data, (bytes, bytearray)): - data = memoryview(data).cast('B') - self._ensure_header_written(len(data)) - nframes = len(data) // (self._sampwidth * self._nchannels) - if self._convert: - data = self._convert(data) - self._file.write(data) - self._nframeswritten = self._nframeswritten + nframes - self._datawritten = self._datawritten + len(data) - - def writeframes(self, data): - self.writeframesraw(data) - if self._nframeswritten != self._nframes or \ - self._datalength != self._datawritten: - self._patchheader() - - def close(self): - if self._file is None: - return - try: - self._ensure_header_written(0) - if self._datawritten & 1: - # quick pad to even size - self._file.write(b'\x00') - self._datawritten = self._datawritten + 1 - self._writemarkers() - if self._nframeswritten != self._nframes or \ - self._datalength != self._datawritten or \ - self._marklength: - self._patchheader() - finally: - # Prevent ref cycles - self._convert = None - f = self._file - self._file = None - f.close() - - # - # Internal methods. - # - - def _lin2alaw(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.lin2alaw(data, 2) - - def _lin2ulaw(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.lin2ulaw(data, 2) - - def _lin2adpcm(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - if not hasattr(self, '_adpcmstate'): - self._adpcmstate = None - data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate) - return data - - def _lin2sowt(self, data): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=DeprecationWarning) - import audioop - return audioop.byteswap(data, 2) - - def _ensure_header_written(self, datasize): - if not self._nframeswritten: - if self._comptype in (b'ULAW', b'ulaw', - b'ALAW', b'alaw', b'G722', - b'sowt', b'SOWT'): - if not self._sampwidth: - self._sampwidth = 2 - if self._sampwidth != 2: - raise Error('sample width must be 2 when compressing ' - 'with ulaw/ULAW, alaw/ALAW, sowt/SOWT ' - 'or G7.22 (ADPCM)') - if not self._nchannels: - raise Error('# channels not specified') - if not self._sampwidth: - raise Error('sample width not specified') - if not self._framerate: - raise Error('sampling rate not specified') - self._write_header(datasize) - - def _init_compression(self): - if self._comptype == b'G722': - self._convert = self._lin2adpcm - elif self._comptype in (b'ulaw', b'ULAW'): - self._convert = self._lin2ulaw - elif self._comptype in (b'alaw', b'ALAW'): - self._convert = self._lin2alaw - elif self._comptype in (b'sowt', b'SOWT'): - self._convert = self._lin2sowt - - def _write_header(self, initlength): - if self._aifc and self._comptype != b'NONE': - self._init_compression() - self._file.write(b'FORM') - if not self._nframes: - self._nframes = initlength // (self._nchannels * self._sampwidth) - self._datalength = self._nframes * self._nchannels * self._sampwidth - if self._datalength & 1: - self._datalength = self._datalength + 1 - if self._aifc: - if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'): - self._datalength = self._datalength // 2 - if self._datalength & 1: - self._datalength = self._datalength + 1 - elif self._comptype == b'G722': - self._datalength = (self._datalength + 3) // 4 - if self._datalength & 1: - self._datalength = self._datalength + 1 - try: - self._form_length_pos = self._file.tell() - except (AttributeError, OSError): - self._form_length_pos = None - commlength = self._write_form_length(self._datalength) - if self._aifc: - self._file.write(b'AIFC') - self._file.write(b'FVER') - _write_ulong(self._file, 4) - _write_ulong(self._file, self._version) - else: - self._file.write(b'AIFF') - self._file.write(b'COMM') - _write_ulong(self._file, commlength) - _write_short(self._file, self._nchannels) - if self._form_length_pos is not None: - self._nframes_pos = self._file.tell() - _write_ulong(self._file, self._nframes) - if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): - _write_short(self._file, 8) - else: - _write_short(self._file, self._sampwidth * 8) - _write_float(self._file, self._framerate) - if self._aifc: - self._file.write(self._comptype) - _write_string(self._file, self._compname) - self._file.write(b'SSND') - if self._form_length_pos is not None: - self._ssnd_length_pos = self._file.tell() - _write_ulong(self._file, self._datalength + 8) - _write_ulong(self._file, 0) - _write_ulong(self._file, 0) - - def _write_form_length(self, datalength): - if self._aifc: - commlength = 18 + 5 + len(self._compname) - if commlength & 1: - commlength = commlength + 1 - verslength = 12 - else: - commlength = 18 - verslength = 0 - _write_ulong(self._file, 4 + verslength + self._marklength + \ - 8 + commlength + 16 + datalength) - return commlength - - def _patchheader(self): - curpos = self._file.tell() - if self._datawritten & 1: - datalength = self._datawritten + 1 - self._file.write(b'\x00') - else: - datalength = self._datawritten - if datalength == self._datalength and \ - self._nframes == self._nframeswritten and \ - self._marklength == 0: - self._file.seek(curpos, 0) - return - self._file.seek(self._form_length_pos, 0) - dummy = self._write_form_length(datalength) - self._file.seek(self._nframes_pos, 0) - _write_ulong(self._file, self._nframeswritten) - self._file.seek(self._ssnd_length_pos, 0) - _write_ulong(self._file, datalength + 8) - self._file.seek(curpos, 0) - self._nframes = self._nframeswritten - self._datalength = datalength - - def _writemarkers(self): - if len(self._markers) == 0: - return - self._file.write(b'MARK') - length = 2 - for marker in self._markers: - id, pos, name = marker - length = length + len(name) + 1 + 6 - if len(name) & 1 == 0: - length = length + 1 - _write_ulong(self._file, length) - self._marklength = length + 8 - _write_short(self._file, len(self._markers)) - for marker in self._markers: - id, pos, name = marker - _write_short(self._file, id) - _write_ulong(self._file, pos) - _write_string(self._file, name) - -def open(f, mode=None): - if mode is None: - if hasattr(f, 'mode'): - mode = f.mode - else: - mode = 'rb' - if mode in ('r', 'rb'): - return Aifc_read(f) - elif mode in ('w', 'wb'): - return Aifc_write(f) - else: - raise Error("mode must be 'r', 'rb', 'w', or 'wb'") - - -if __name__ == '__main__': - import sys - if not sys.argv[1:]: - sys.argv.append('/usr/demos/data/audio/bach.aiff') - fn = sys.argv[1] - with open(fn, 'r') as f: - print("Reading", fn) - print("nchannels =", f.getnchannels()) - print("nframes =", f.getnframes()) - print("sampwidth =", f.getsampwidth()) - print("framerate =", f.getframerate()) - print("comptype =", f.getcomptype()) - print("compname =", f.getcompname()) - if sys.argv[2:]: - gn = sys.argv[2] - print("Writing", gn) - with open(gn, 'w') as g: - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - print("Done.") diff --git a/Lib/argparse.py b/Lib/argparse.py index d2dcfdf5682f7c1..a32884db80d1ea1 100644 --- a/Lib/argparse.py +++ b/Lib/argparse.py @@ -89,8 +89,6 @@ import re as _re import sys as _sys -import warnings - from gettext import gettext as _, ngettext SUPPRESS = '==SUPPRESS==' @@ -345,21 +343,22 @@ def _format_usage(self, usage, actions, groups, prefix): def get_lines(parts, indent, prefix=None): lines = [] line = [] + indent_length = len(indent) if prefix is not None: line_len = len(prefix) - 1 else: - line_len = len(indent) - 1 + line_len = indent_length - 1 for part in parts: if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] - line_len = len(indent) - 1 + line_len = indent_length - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: - lines[0] = lines[0][len(indent):] + lines[0] = lines[0][indent_length:] return lines # if prog is short, follow it with optionals or positionals @@ -403,10 +402,18 @@ def _format_actions_usage(self, actions, groups): except ValueError: continue else: - end = start + len(group._group_actions) + group_action_count = len(group._group_actions) + end = start + group_action_count if actions[start:end] == group._group_actions: + + suppressed_actions_count = 0 for action in group._group_actions: group_actions.add(action) + if action.help is SUPPRESS: + suppressed_actions_count += 1 + + exposed_actions_count = group_action_count - suppressed_actions_count + if not group.required: if start in inserts: inserts[start] += ' [' @@ -416,7 +423,7 @@ def _format_actions_usage(self, actions, groups): inserts[end] += ']' else: inserts[end] = ']' - else: + elif exposed_actions_count > 1: if start in inserts: inserts[start] += ' (' else: @@ -490,7 +497,6 @@ def _format_actions_usage(self, actions, groups): text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) - text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text @@ -875,16 +881,19 @@ def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) +# FIXME: remove together with `BooleanOptionalAction` deprecated arguments. +_deprecated_default = object() + class BooleanOptionalAction(Action): def __init__(self, option_strings, dest, default=None, - type=None, - choices=None, + type=_deprecated_default, + choices=_deprecated_default, required=False, help=None, - metavar=None): + metavar=_deprecated_default): _option_strings = [] for option_string in option_strings: @@ -894,6 +903,25 @@ def __init__(self, option_string = '--no-' + option_string[2:] _option_strings.append(option_string) + # We need `_deprecated` special value to ban explicit arguments that + # match default value. Like: + # parser.add_argument('-f', action=BooleanOptionalAction, type=int) + for field_name in ('type', 'choices', 'metavar'): + if locals()[field_name] is not _deprecated_default: + import warnings + warnings._deprecated( + field_name, + "{name!r} is deprecated as of Python 3.12 and will be " + "removed in Python {remove}.", + remove=(3, 14)) + + if type is _deprecated_default: + type = None + if choices is _deprecated_default: + choices = None + if metavar is _deprecated_default: + metavar = None + super().__init__( option_strings=_option_strings, dest=dest, @@ -1499,6 +1527,8 @@ def _add_container_actions(self, container): title_group_map = {} for group in self._action_groups: if group.title in title_group_map: + # This branch could happen if a derived class added + # groups with duplicated titles in __init__ msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group @@ -1669,6 +1699,7 @@ def _remove_action(self, action): self._group_actions.remove(action) def add_argument_group(self, *args, **kwargs): + import warnings warnings.warn( "Nesting argument groups is deprecated.", category=DeprecationWarning, @@ -1697,6 +1728,7 @@ def _remove_action(self, action): self._group_actions.remove(action) def add_mutually_exclusive_group(self, *args, **kwargs): + import warnings warnings.warn( "Nesting mutually exclusive groups is deprecated.", category=DeprecationWarning, @@ -1782,13 +1814,11 @@ def identity(string): # add parent arguments and defaults for parent in parents: + if not isinstance(parent, ArgumentParser): + raise TypeError('parents must be a list of ArgumentParser') self._add_container_actions(parent) - try: - defaults = parent._defaults - except AttributeError: - pass - else: - self._defaults.update(defaults) + defaults = parent._defaults + self._defaults.update(defaults) # ======================= # Pretty __repr__ methods @@ -1997,7 +2027,11 @@ def consume_optional(start_index): # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars - if arg_count == 0 and option_string[1] not in chars: + if ( + arg_count == 0 + and option_string[1] not in chars + and explicit_arg != '' + ): action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] @@ -2594,9 +2628,11 @@ def print_help(self, file=None): def _print_message(self, message, file=None): if message: - if file is None: - file = _sys.stderr - file.write(message) + file = file or _sys.stderr + try: + file.write(message) + except (AttributeError, OSError): + pass # =============== # Exiting methods diff --git a/Lib/ast.py b/Lib/ast.py index 1a94e9368c161a0..f7888d18859ae48 100644 --- a/Lib/ast.py +++ b/Lib/ast.py @@ -25,19 +25,22 @@ :license: Python License. """ import sys +import re from _ast import * from contextlib import contextmanager, nullcontext from enum import IntEnum, auto, _simple_enum def parse(source, filename='', mode='exec', *, - type_comments=False, feature_version=None): + type_comments=False, feature_version=None, optimize=-1): """ Parse the source into an AST node. Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). Pass type_comments=True to get back type comments where the syntax allows. """ flags = PyCF_ONLY_AST + if optimize > 0: + flags |= PyCF_OPTIMIZED_AST if type_comments: flags |= PyCF_TYPE_COMMENTS if feature_version is None: @@ -49,7 +52,7 @@ def parse(source, filename='', mode='exec', *, feature_version = minor # Else it should be an int giving the minor version for 3.x. return compile(source, filename, mode, flags, - _feature_version=feature_version) + _feature_version=feature_version, optimize=optimize) def literal_eval(node_or_string): @@ -237,6 +240,12 @@ def increment_lineno(node, n=1): location in a file. """ for child in walk(node): + # TypeIgnore is a special case where lineno is not an attribute + # but rather a field of the node itself. + if isinstance(child, TypeIgnore): + child.lineno = getattr(child, 'lineno', 0) + n + continue + if 'lineno' in child._attributes: child.lineno = getattr(child, 'lineno', 0) + n if ( @@ -287,9 +296,7 @@ def get_docstring(node, clean=True): if not(node.body and isinstance(node.body[0], Expr)): return None node = node.body[0].value - if isinstance(node, Str): - text = node.s - elif isinstance(node, Constant) and isinstance(node.value, str): + if isinstance(node, Constant) and isinstance(node.value, str): text = node.value else: return None @@ -299,28 +306,17 @@ def get_docstring(node, clean=True): return text -def _splitlines_no_ff(source): +_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))") +def _splitlines_no_ff(source, maxlines=None): """Split a string into lines ignoring form feed and other chars. This mimics how the Python parser splits source code. """ - idx = 0 lines = [] - next_line = '' - while idx < len(source): - c = source[idx] - next_line += c - idx += 1 - # Keep \r\n together - if c == '\r' and idx < len(source) and source[idx] == '\n': - next_line += '\n' - idx += 1 - if c in '\r\n': - lines.append(next_line) - next_line = '' - - if next_line: - lines.append(next_line) + for lineno, match in enumerate(_line_pattern.finditer(source), 1): + if maxlines is not None and lineno > maxlines: + break + lines.append(match[0]) return lines @@ -354,7 +350,7 @@ def get_source_segment(source, node, *, padded=False): except AttributeError: return None - lines = _splitlines_no_ff(source) + lines = _splitlines_no_ff(source, maxlines=end_lineno+1) if end_lineno == lineno: return lines[lineno].encode()[col_offset:end_col_offset].decode() @@ -503,20 +499,52 @@ def generic_visit(self, node): return node +_DEPRECATED_VALUE_ALIAS_MESSAGE = ( + "{name} is deprecated and will be removed in Python {remove}; use value instead" +) +_DEPRECATED_CLASS_MESSAGE = ( + "{name} is deprecated and will be removed in Python {remove}; " + "use ast.Constant instead" +) + + # If the ast module is loaded more than once, only add deprecated methods once if not hasattr(Constant, 'n'): # The following code is for backward compatibility. # It will be removed in future. - def _getter(self): + def _n_getter(self): + """Deprecated. Use value instead.""" + import warnings + warnings._deprecated( + "Attribute n", message=_DEPRECATED_VALUE_ALIAS_MESSAGE, remove=(3, 14) + ) + return self.value + + def _n_setter(self, value): + import warnings + warnings._deprecated( + "Attribute n", message=_DEPRECATED_VALUE_ALIAS_MESSAGE, remove=(3, 14) + ) + self.value = value + + def _s_getter(self): """Deprecated. Use value instead.""" + import warnings + warnings._deprecated( + "Attribute s", message=_DEPRECATED_VALUE_ALIAS_MESSAGE, remove=(3, 14) + ) return self.value - def _setter(self, value): + def _s_setter(self, value): + import warnings + warnings._deprecated( + "Attribute s", message=_DEPRECATED_VALUE_ALIAS_MESSAGE, remove=(3, 14) + ) self.value = value - Constant.n = property(_getter, _setter) - Constant.s = property(_getter, _setter) + Constant.n = property(_n_getter, _n_setter) + Constant.s = property(_s_getter, _s_setter) class _ABC(type): @@ -524,6 +552,13 @@ def __init__(cls, *args): cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead""" def __instancecheck__(cls, inst): + if cls in _const_types: + import warnings + warnings._deprecated( + f"ast.{cls.__qualname__}", + message=_DEPRECATED_CLASS_MESSAGE, + remove=(3, 14) + ) if not isinstance(inst, Constant): return False if cls in _const_types: @@ -547,6 +582,10 @@ def _new(cls, *args, **kwargs): if pos < len(args): raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}") if cls in _const_types: + import warnings + warnings._deprecated( + f"ast.{cls.__qualname__}", message=_DEPRECATED_CLASS_MESSAGE, remove=(3, 14) + ) return Constant(*args, **kwargs) return Constant.__new__(cls, *args, **kwargs) @@ -569,10 +608,19 @@ class Ellipsis(Constant, metaclass=_ABC): _fields = () def __new__(cls, *args, **kwargs): - if cls is Ellipsis: + if cls is _ast_Ellipsis: + import warnings + warnings._deprecated( + "ast.Ellipsis", message=_DEPRECATED_CLASS_MESSAGE, remove=(3, 14) + ) return Constant(..., *args, **kwargs) return Constant.__new__(cls, *args, **kwargs) +# Keep another reference to Ellipsis in the global namespace +# so it can be referenced in Ellipsis.__new__ +# (The original "Ellipsis" name is removed from the global namespace later on) +_ast_Ellipsis = Ellipsis + _const_types = { Num: (int, float, complex), Str: (str,), @@ -1005,6 +1053,8 @@ def visit_ClassDef(self, node): self.fill("@") self.traverse(deco) self.fill("class " + node.name) + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) with self.delimit_if("(", ")", condition = node.bases or node.keywords): comma = False for e in node.bases: @@ -1036,6 +1086,8 @@ def _function_helper(self, node, fill_suffix): self.traverse(deco) def_str = fill_suffix + " " + node.name self.fill(def_str) + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) with self.delimit("(", ")"): self.traverse(node.args) if node.returns: @@ -1044,6 +1096,30 @@ def _function_helper(self, node, fill_suffix): with self.block(extra=self.get_type_comment(node)): self._write_docstring_and_traverse_body(node) + def _type_params_helper(self, type_params): + if type_params is not None and len(type_params) > 0: + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, type_params) + + def visit_TypeVar(self, node): + self.write(node.name) + if node.bound: + self.write(": ") + self.traverse(node.bound) + + def visit_TypeVarTuple(self, node): + self.write("*" + node.name) + + def visit_ParamSpec(self, node): + self.write("**" + node.name) + + def visit_TypeAlias(self, node): + self.fill("type ") + self.traverse(node.name) + self._type_params_helper(node.type_params) + self.write(" = ") + self.traverse(node.value) + def visit_For(self, node): self._for_helper("for ", node) @@ -1149,17 +1225,7 @@ def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): def visit_JoinedStr(self, node): self.write("f") - if self._avoid_backslashes: - with self.buffered() as buffer: - self._write_fstring_inner(node) - return self._write_str_avoiding_backslashes("".join(buffer)) - - # If we don't need to avoid backslashes globally (i.e., we only need - # to avoid them inside FormattedValues), it's cosmetically preferred - # to use escaped whitespace. That is, it's preferred to use backslashes - # for cases like: f"{x}\n". To accomplish this, we keep track of what - # in our buffer corresponds to FormattedValues and what corresponds to - # Constant parts of the f-string, and allow escapes accordingly. + fstring_parts = [] for value in node.values: with self.buffered() as buffer: @@ -1170,25 +1236,49 @@ def visit_JoinedStr(self, node): new_fstring_parts = [] quote_types = list(_ALL_QUOTES) + fallback_to_repr = False for value, is_constant in fstring_parts: - value, quote_types = self._str_literal_helper( - value, - quote_types=quote_types, - escape_special_whitespace=is_constant, - ) + if is_constant: + value, new_quote_types = self._str_literal_helper( + value, + quote_types=quote_types, + escape_special_whitespace=True, + ) + if set(new_quote_types).isdisjoint(quote_types): + fallback_to_repr = True + break + quote_types = new_quote_types + elif "\n" in value: + quote_types = [q for q in quote_types if q in _MULTI_QUOTES] + assert quote_types new_fstring_parts.append(value) + if fallback_to_repr: + # If we weren't able to find a quote type that works for all parts + # of the JoinedStr, fallback to using repr and triple single quotes. + quote_types = ["'''"] + new_fstring_parts.clear() + for value, is_constant in fstring_parts: + if is_constant: + value = repr('"' + value) # force repr to use single quotes + expected_prefix = "'\"" + assert value.startswith(expected_prefix), repr(value) + value = value[len(expected_prefix):-1] + new_fstring_parts.append(value) + value = "".join(new_fstring_parts) quote_type = quote_types[0] self.write(f"{quote_type}{value}{quote_type}") - def _write_fstring_inner(self, node): + def _write_fstring_inner(self, node, scape_newlines=False): if isinstance(node, JoinedStr): # for both the f-string itself, and format_spec for value in node.values: - self._write_fstring_inner(value) + self._write_fstring_inner(value, scape_newlines=scape_newlines) elif isinstance(node, Constant) and isinstance(node.value, str): value = node.value.replace("{", "{{").replace("}", "}}") + if scape_newlines: + value = value.replace("\n", "\\n") self.write(value) elif isinstance(node, FormattedValue): self.visit_FormattedValue(node) @@ -1197,16 +1287,12 @@ def _write_fstring_inner(self, node): def visit_FormattedValue(self, node): def unparse_inner(inner): - unparser = type(self)(_avoid_backslashes=True) + unparser = type(self)() unparser.set_precedence(_Precedence.TEST.next(), inner) return unparser.visit(inner) with self.delimit("{", "}"): expr = unparse_inner(node.value) - if "\\" in expr: - raise ValueError( - "Unable to avoid backslash in f-string expression part" - ) if expr.startswith("{"): # Separate pair of opening brackets as "{ {" self.write(" ") @@ -1215,7 +1301,10 @@ def unparse_inner(inner): self.write(f"!{chr(node.conversion)}") if node.format_spec: self.write(":") - self._write_fstring_inner(node.format_spec) + self._write_fstring_inner( + node.format_spec, + scape_newlines=True + ) def visit_Name(self, node): self.write(node.id) @@ -1703,6 +1792,22 @@ def unparse(ast_obj): return unparser.visit(ast_obj) +_deprecated_globals = { + name: globals().pop(name) + for name in ('Num', 'Str', 'Bytes', 'NameConstant', 'Ellipsis') +} + +def __getattr__(name): + if name in _deprecated_globals: + globals()[name] = value = _deprecated_globals[name] + import warnings + warnings._deprecated( + f"ast.{name}", message=_DEPRECATED_CLASS_MESSAGE, remove=(3, 14) + ) + return value + raise AttributeError(f"module 'ast' has no attribute '{name}'") + + def main(): import argparse diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py index fed16ec7c67fac3..03165a425eb7d25 100644 --- a/Lib/asyncio/__init__.py +++ b/Lib/asyncio/__init__.py @@ -34,6 +34,7 @@ streams.__all__ + subprocess.__all__ + tasks.__all__ + + taskgroups.__all__ + threads.__all__ + timeouts.__all__ + transports.__all__) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py index c8a2f9f25634ef2..416c732298d9a94 100644 --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -305,7 +305,7 @@ def _wakeup(self): self._waiters = None for waiter in waiters: if not waiter.done(): - waiter.set_result(waiter) + waiter.set_result(None) def _start_serving(self): if self._serving: @@ -377,7 +377,27 @@ async def serve_forever(self): self._serving_forever_fut = None async def wait_closed(self): - if self._sockets is None or self._waiters is None: + """Wait until server is closed and all connections are dropped. + + - If the server is not closed, wait. + - If it is closed, but there are still active connections, wait. + + Anyone waiting here will be unblocked once both conditions + (server is closed and all connections have been dropped) + have become true, in either order. + + Historical note: In 3.11 and before, this was broken, returning + immediately if the server was already closed, even if there + were still active connections. An attempted fix in 3.12.0 was + still broken, returning immediately if the server was still + open and there were no active connections. Hopefully in 3.12.1 + we have it right. + """ + # Waiters are unblocked by self._wakeup(), which is called + # from two places: self.close() and self._detach(), but only + # when both conditions have become true. To signal that this + # has happened, self._wakeup() sets self._waiters to None. + if self._waiters is None: return waiter = self._loop.create_future() self._waiters.append(waiter) @@ -400,6 +420,8 @@ def __init__(self): self._clock_resolution = time.get_clock_info('monotonic').resolution self._exception_handler = None self.set_debug(coroutines._is_debug_mode()) + # The preserved state of async generator hooks. + self._old_agen_hooks = None # In debug mode, if the execution of a callback or a step of a task # exceed this duration in seconds, the slow callback/task is logged. self.slow_callback_duration = 0.1 @@ -443,7 +465,7 @@ def create_task(self, coro, *, name=None, context=None): else: task = self._task_factory(self, coro, context=context) - tasks._set_task_name(task, name) + task.set_name(name) return task @@ -601,29 +623,52 @@ def _check_running(self): raise RuntimeError( 'Cannot run the event loop while another loop is running') - def run_forever(self): - """Run until stop() is called.""" + def _run_forever_setup(self): + """Prepare the run loop to process events. + + This method exists so that custom custom event loop subclasses (e.g., event loops + that integrate a GUI event loop with Python's event loop) have access to all the + loop setup logic. + """ self._check_closed() self._check_running() self._set_coroutine_origin_tracking(self._debug) - old_agen_hooks = sys.get_asyncgen_hooks() - try: - self._thread_id = threading.get_ident() - sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, - finalizer=self._asyncgen_finalizer_hook) + self._old_agen_hooks = sys.get_asyncgen_hooks() + self._thread_id = threading.get_ident() + sys.set_asyncgen_hooks( + firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook + ) + + events._set_running_loop(self) - events._set_running_loop(self) + def _run_forever_cleanup(self): + """Clean up after an event loop finishes the looping over events. + + This method exists so that custom custom event loop subclasses (e.g., event loops + that integrate a GUI event loop with Python's event loop) have access to all the + loop cleanup logic. + """ + self._stopping = False + self._thread_id = None + events._set_running_loop(None) + self._set_coroutine_origin_tracking(False) + # Restore any pre-existing async generator hooks. + if self._old_agen_hooks is not None: + sys.set_asyncgen_hooks(*self._old_agen_hooks) + self._old_agen_hooks = None + + def run_forever(self): + """Run until stop() is called.""" + try: + self._run_forever_setup() while True: self._run_once() if self._stopping: break finally: - self._stopping = False - self._thread_id = None - events._set_running_loop(None) - self._set_coroutine_origin_tracking(False) - sys.set_asyncgen_hooks(*old_agen_hooks) + self._run_forever_cleanup() def run_until_complete(self, future): """Run until the Future is done. @@ -727,7 +772,7 @@ def call_later(self, delay, callback, *args, context=None): always relative to the current time. Each callback will be called exactly once. If two callbacks - are scheduled for exactly the same time, it undefined which + are scheduled for exactly the same time, it is undefined which will be called first. Any positional arguments after the callback will be passed to @@ -961,7 +1006,10 @@ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): sock = socket.socket(family=family, type=type_, proto=proto) sock.setblocking(False) if local_addr_infos is not None: - for _, _, _, _, laddr in local_addr_infos: + for lfamily, _, _, _, laddr in local_addr_infos: + # skip local addresses of different family + if lfamily != family: + continue try: sock.bind(laddr) break @@ -974,7 +1022,10 @@ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): exc = OSError(exc.errno, msg) my_exceptions.append(exc) else: # all bind attempts failed - raise my_exceptions.pop() + if my_exceptions: + raise my_exceptions.pop() + else: + raise OSError(f"no matching local address with {family=} found") await self.sock_connect(sock, address) return sock except OSError as exc: @@ -986,6 +1037,8 @@ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): if sock is not None: sock.close() raise + finally: + exceptions = my_exceptions = None async def create_connection( self, protocol_factory, host=None, port=None, @@ -1084,19 +1137,22 @@ async def create_connection( if sock is None: exceptions = [exc for sub in exceptions for exc in sub] - if all_errors: - raise ExceptionGroup("create_connection failed", exceptions) - if len(exceptions) == 1: - raise exceptions[0] - else: - # If they all have the same str(), raise one. - model = str(exceptions[0]) - if all(str(exc) == model for exc in exceptions): + try: + if all_errors: + raise ExceptionGroup("create_connection failed", exceptions) + if len(exceptions) == 1: raise exceptions[0] - # Raise a combined exception so the user can see all - # the various error messages. - raise OSError('Multiple exceptions: {}'.format( - ', '.join(str(exc) for exc in exceptions))) + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise OSError('Multiple exceptions: {}'.format( + ', '.join(str(exc) for exc in exceptions))) + finally: + exceptions = None else: if sock is None: @@ -1846,12 +1902,9 @@ def call_exception_handler(self, context): exc_info=True) def _add_callback(self, handle): - """Add a Handle to _scheduled (TimerHandle) or _ready.""" - assert isinstance(handle, events.Handle), 'A Handle is required here' - if handle._cancelled: - return - assert not isinstance(handle, events.TimerHandle) - self._ready.append(handle) + """Add a Handle to _ready.""" + if not handle._cancelled: + self._ready.append(handle) def _add_callback_signalsafe(self, handle): """Like _add_callback() but called from a signal handler.""" @@ -1899,11 +1952,16 @@ def _run_once(self): timeout = 0 elif self._scheduled: # Compute the desired timeout. - when = self._scheduled[0]._when - timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT) + timeout = self._scheduled[0]._when - self.time() + if timeout > MAXIMUM_SELECT_TIMEOUT: + timeout = MAXIMUM_SELECT_TIMEOUT + elif timeout < 0: + timeout = 0 event_list = self._selector.select(timeout) self._process_events(event_list) + # Needed to break cycles when an exception occurs. + event_list = None # Handle 'later' callbacks that are ready. end_time = self.time() + self._clock_resolution diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py index e15bb4141fc02ad..4c9b0dd5653c0c5 100644 --- a/Lib/asyncio/base_subprocess.py +++ b/Lib/asyncio/base_subprocess.py @@ -215,9 +215,6 @@ def _process_exited(self, returncode): # object. On Python 3.6, it is required to avoid a ResourceWarning. self._proc.returncode = returncode self._call(self._protocol.process_exited) - for p in self._pipes.values(): - if p is not None: - p.pipe.close() self._try_finish() diff --git a/Lib/asyncio/base_tasks.py b/Lib/asyncio/base_tasks.py index 26298e638cbf0d8..c907b6834137325 100644 --- a/Lib/asyncio/base_tasks.py +++ b/Lib/asyncio/base_tasks.py @@ -15,11 +15,13 @@ def _task_repr_info(task): info.insert(1, 'name=%r' % task.get_name()) - coro = coroutines._format_coroutine(task._coro) - info.insert(2, f'coro=<{coro}>') - if task._fut_waiter is not None: - info.insert(3, f'wait_for={task._fut_waiter!r}') + info.insert(2, f'wait_for={task._fut_waiter!r}') + + if task._coro: + coro = coroutines._format_coroutine(task._coro) + info.insert(2, f'coro=<{coro}>') + return info diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py index 7fda0e449d500a0..ab4f30eb51ba2c6 100644 --- a/Lib/asyncio/coroutines.py +++ b/Lib/asyncio/coroutines.py @@ -25,8 +25,7 @@ def iscoroutinefunction(func): # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. -_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, - collections.abc.Coroutine) +_COROUTINE_TYPES = (types.CoroutineType, collections.abc.Coroutine) _iscoroutine_typecache = set() diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py index a327ba54a323a8e..0ccf85105e6673f 100644 --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -13,6 +13,7 @@ import contextvars import os +import signal import socket import subprocess import sys @@ -616,7 +617,7 @@ class AbstractEventLoopPolicy: def get_event_loop(self): """Get the event loop for the current context. - Returns an event loop object implementing the BaseEventLoop interface, + Returns an event loop object implementing the AbstractEventLoop interface, or raises an exception in case no event loop has been set for the current context and the current policy does not specify to create one. @@ -674,6 +675,23 @@ def get_event_loop(self): if (self._local._loop is None and not self._local._set_called and threading.current_thread() is threading.main_thread()): + stacklevel = 2 + try: + f = sys._getframe(1) + except AttributeError: + pass + else: + # Move up the call stack so that the warning is attached + # to the line outside asyncio itself. + while f: + module = f.f_globals.get('__name__') + if not (module == 'asyncio' or module.startswith('asyncio.')): + break + f = f.f_back + stacklevel += 1 + import warnings + warnings.warn('There is no current event loop', + DeprecationWarning, stacklevel=stacklevel) self.set_event_loop(self.new_event_loop()) if self._local._loop is None: @@ -785,16 +803,9 @@ def get_event_loop(): the result of `get_event_loop_policy().get_event_loop()` call. """ # NOTE: this function is implemented in C (see _asynciomodule.c) - return _py__get_event_loop() - - -def _get_event_loop(stacklevel=3): current_loop = _get_running_loop() if current_loop is not None: return current_loop - import warnings - warnings.warn('There is no current event loop', - DeprecationWarning, stacklevel=stacklevel) return get_event_loop_policy().get_event_loop() @@ -824,7 +835,6 @@ def set_child_watcher(watcher): _py__set_running_loop = _set_running_loop _py_get_running_loop = get_running_loop _py_get_event_loop = get_event_loop -_py__get_event_loop = _get_event_loop try: @@ -832,7 +842,7 @@ def set_child_watcher(watcher): # functions in asyncio. Pure Python implementation is # about 4 times slower than C-accelerated. from _asyncio import (_get_running_loop, _set_running_loop, - get_running_loop, get_event_loop, _get_event_loop) + get_running_loop, get_event_loop) except ImportError: pass else: @@ -841,4 +851,14 @@ def set_child_watcher(watcher): _c__set_running_loop = _set_running_loop _c_get_running_loop = get_running_loop _c_get_event_loop = get_event_loop - _c__get_event_loop = _get_event_loop + + +if hasattr(os, 'fork'): + def on_fork(): + # Reset the loop and wakeupfd in the forked child process. + if _event_loop_policy is not None: + _event_loop_policy._local = BaseDefaultEventLoopPolicy._Local() + _set_running_loop(None) + signal.set_wakeup_fd(-1) + + os.register_at_fork(after_in_child=on_fork) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py index 3a6b44a09108695..97fc4e3fcb60ee2 100644 --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -77,7 +77,7 @@ def __init__(self, *, loop=None): the default event loop. """ if loop is None: - self._loop = events._get_event_loop() + self._loop = events.get_event_loop() else: self._loop = loop self._callbacks = [] @@ -413,7 +413,7 @@ def wrap_future(future, *, loop=None): assert isinstance(future, concurrent.futures.Future), \ f'concurrent.futures.Future is expected, got {future!r}' if loop is None: - loop = events._get_event_loop() + loop = events.get_event_loop() new_future = loop.create_future() _chain_future(future, new_future) return new_future diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py index c6aab408fc74107..1e2a730cf368a99 100644 --- a/Lib/asyncio/proactor_events.py +++ b/Lib/asyncio/proactor_events.py @@ -288,7 +288,8 @@ def _loop_reading(self, fut=None): # we got end-of-file so no need to reschedule a new read return - data = self._data[:length] + # It's a new slice so make it immutable so protocols upstream don't have problems + data = bytes(memoryview(self._data)[:length]) else: # the future will be replaced by next proactor.recv call fut.cancel() diff --git a/Lib/asyncio/runners.py b/Lib/asyncio/runners.py index b1c4dbd76197214..1b89236599aad70 100644 --- a/Lib/asyncio/runners.py +++ b/Lib/asyncio/runners.py @@ -157,7 +157,7 @@ def _on_sigint(self, signum, frame, main_task): raise KeyboardInterrupt() -def run(main, *, debug=None): +def run(main, *, debug=None, loop_factory=None): """Execute the coroutine and return the result. This function runs the passed coroutine, taking care of @@ -190,7 +190,7 @@ async def main(): raise RuntimeError( "asyncio.run() cannot be called from a running event loop") - with Runner(debug=debug) as runner: + with Runner(debug=debug, loop_factory=loop_factory) as runner: return runner.run(main) diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py index bfa4590154f3721..d521b4e2e255a9f 100644 --- a/Lib/asyncio/selector_events.py +++ b/Lib/asyncio/selector_events.py @@ -9,6 +9,8 @@ import collections import errno import functools +import itertools +import os import selectors import socket import warnings @@ -28,6 +30,14 @@ from . import trsock from .log import logger +_HAS_SENDMSG = hasattr(socket.socket, 'sendmsg') + +if _HAS_SENDMSG: + try: + SC_IOV_MAX = os.sysconf('SC_IOV_MAX') + except OSError: + # Fallback to send + _HAS_SENDMSG = False def _test_selector_event(selector, fd, event): # Test if the selector is monitoring 'event' events @@ -264,9 +274,8 @@ def _ensure_fd_no_transport(self, fd): def _add_reader(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: self._selector.register(fd, selectors.EVENT_READ, (handle, None)) else: @@ -280,30 +289,27 @@ def _add_reader(self, fd, callback, *args): def _remove_reader(self, fd): if self.is_closed(): return False - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: return False + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) else: - mask, (reader, writer) = key.events, key.data - mask &= ~selectors.EVENT_READ - if not mask: - self._selector.unregister(fd) - else: - self._selector.modify(fd, mask, (None, writer)) + self._selector.modify(fd, mask, (None, writer)) - if reader is not None: - reader.cancel() - return True - else: - return False + if reader is not None: + reader.cancel() + return True + else: + return False def _add_writer(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: self._selector.register(fd, selectors.EVENT_WRITE, (None, handle)) else: @@ -318,24 +324,22 @@ def _remove_writer(self, fd): """Remove a writer callback.""" if self.is_closed(): return False - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: return False + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) else: - mask, (reader, writer) = key.events, key.data - # Remove both writer and connector. - mask &= ~selectors.EVENT_WRITE - if not mask: - self._selector.unregister(fd) - else: - self._selector.modify(fd, mask, (reader, None)) + self._selector.modify(fd, mask, (reader, None)) - if writer is not None: - writer.cancel() - return True - else: - return False + if writer is not None: + writer.cancel() + return True + else: + return False def add_reader(self, fd, callback, *args): """Add a reader callback.""" @@ -633,7 +637,11 @@ async def sock_connect(self, sock, address): fut = self.create_future() self._sock_connect(fut, sock, address) - return await fut + try: + return await fut + finally: + # Needed to break cycles when an exception occurs. + fut = None def _sock_connect(self, fut, sock, address): fd = sock.fileno() @@ -655,6 +663,8 @@ def _sock_connect(self, fut, sock, address): fut.set_exception(exc) else: fut.set_result(None) + finally: + fut = None def _sock_write_done(self, fd, fut, handle=None): if handle is None or not handle.cancelled(): @@ -678,6 +688,8 @@ def _sock_connect_cb(self, fut, sock, address): fut.set_exception(exc) else: fut.set_result(None) + finally: + fut = None async def sock_accept(self, sock): """Accept a connection. @@ -749,8 +761,6 @@ class _SelectorTransport(transports._FlowControlMixin, max_size = 256 * 1024 # Buffer size passed to recv(). - _buffer_factory = bytearray # Constructs initial value for self._buffer. - # Attribute used in the destructor: it must be set even if the constructor # is not called (see _SelectorSslTransport which may start by raising an # exception) @@ -775,9 +785,11 @@ def __init__(self, loop, sock, protocol, extra=None, server=None): self.set_protocol(protocol) self._server = server - self._buffer = self._buffer_factory() + self._buffer = collections.deque() self._conn_lost = 0 # Set when call to connection_lost scheduled. self._closing = False # Set when close() called. + self._paused = False # Set when pause_reading() called + if self._server is not None: self._server._attach() loop._transports[self._sock_fd] = self @@ -823,6 +835,25 @@ def get_protocol(self): def is_closing(self): return self._closing + def is_reading(self): + return not self.is_closing() and not self._paused + + def pause_reading(self): + if not self.is_reading(): + return + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + def close(self): if self._closing: return @@ -879,12 +910,11 @@ def _call_connection_lost(self, exc): self._server = None def get_write_buffer_size(self): - return len(self._buffer) + return sum(map(len, self._buffer)) def _add_reader(self, fd, callback, *args): - if self._closing: + if not self.is_reading(): return - self._loop._add_reader(fd, callback, *args) @@ -899,9 +929,11 @@ def __init__(self, loop, sock, protocol, waiter=None, self._read_ready_cb = None super().__init__(loop, sock, protocol, extra, server) self._eof = False - self._paused = False self._empty_waiter = None - + if _HAS_SENDMSG: + self._write_ready = self._write_sendmsg + else: + self._write_ready = self._write_send # Disable the Nagle algorithm -- small writes will be # sent without waiting for the TCP ACK. This generally # decreases the latency (in some cases significantly.) @@ -924,25 +956,6 @@ def set_protocol(self, protocol): super().set_protocol(protocol) - def is_reading(self): - return not self._paused and not self._closing - - def pause_reading(self): - if self._closing or self._paused: - return - self._paused = True - self._loop._remove_reader(self._sock_fd) - if self._loop.get_debug(): - logger.debug("%r pauses reading", self) - - def resume_reading(self): - if self._closing or not self._paused: - return - self._paused = False - self._add_reader(self._sock_fd, self._read_ready) - if self._loop.get_debug(): - logger.debug("%r resumes reading", self) - def _read_ready(self): self._read_ready_cb() @@ -1058,23 +1071,68 @@ def write(self, data): self._fatal_error(exc, 'Fatal write error on socket transport') return else: - data = data[n:] + data = memoryview(data)[n:] if not data: return # Not all was written; register write handler. self._loop._add_writer(self._sock_fd, self._write_ready) # Add it to the buffer. - self._buffer.extend(data) + self._buffer.append(data) self._maybe_pause_protocol() - def _write_ready(self): + def _get_sendmsg_buffer(self): + return itertools.islice(self._buffer, SC_IOV_MAX) + + def _write_sendmsg(self): assert self._buffer, 'Data should not be empty' + if self._conn_lost: + return + try: + nbytes = self._sock.sendmsg(self._get_sendmsg_buffer()) + self._adjust_leftover_buffer(nbytes) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on socket transport') + if self._empty_waiter is not None: + self._empty_waiter.set_exception(exc) + else: + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._empty_waiter is not None: + self._empty_waiter.set_result(None) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + def _adjust_leftover_buffer(self, nbytes: int) -> None: + buffer = self._buffer + while nbytes: + b = buffer.popleft() + b_len = len(b) + if b_len <= nbytes: + nbytes -= b_len + else: + buffer.appendleft(b[nbytes:]) + break + + def _write_send(self): + assert self._buffer, 'Data should not be empty' if self._conn_lost: return try: - n = self._sock.send(self._buffer) + buffer = self._buffer.popleft() + n = self._sock.send(buffer) + if n != len(buffer): + # Not all data was written + self._buffer.appendleft(buffer[n:]) except (BlockingIOError, InterruptedError): pass except (SystemExit, KeyboardInterrupt): @@ -1086,8 +1144,6 @@ def _write_ready(self): if self._empty_waiter is not None: self._empty_waiter.set_exception(exc) else: - if n: - del self._buffer[:n] self._maybe_resume_protocol() # May append to buffer. if not self._buffer: self._loop._remove_writer(self._sock_fd) @@ -1105,6 +1161,19 @@ def write_eof(self): if not self._buffer: self._sock.shutdown(socket.SHUT_WR) + def writelines(self, list_of_data): + if self._eof: + raise RuntimeError('Cannot call writelines() after write_eof()') + if self._empty_waiter is not None: + raise RuntimeError('unable to writelines; sendfile is in progress') + if not list_of_data: + return + self._buffer.extend([memoryview(data) for data in list_of_data]) + self._write_ready() + # If the entire buffer couldn't be written, register a write handler + if self._buffer: + self._loop._add_writer(self._sock_fd, self._write_ready) + def can_write_eof(self): return True @@ -1125,6 +1194,11 @@ def _make_empty_waiter(self): def _reset_empty_waiter(self): self._empty_waiter = None + def close(self): + self._read_ready_cb = None + self._write_ready = None + super().close() + class _SelectorDatagramTransport(_SelectorTransport, transports.DatagramTransport): diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py index 5cb5cd35883f07d..3eb65a8a08b5a05 100644 --- a/Lib/asyncio/sslproto.py +++ b/Lib/asyncio/sslproto.py @@ -199,12 +199,6 @@ def get_read_buffer_size(self): """Return the current size of the read buffer.""" return self._ssl_protocol._get_read_buffer_size() - def get_write_buffer_limits(self): - """Get the high and low watermarks for write flow control. - Return a tuple (low, high) where low and high are - positive number of bytes.""" - return self._ssl_protocol._transport.get_write_buffer_limits() - @property def _protocol_paused(self): # Required for sendfile fallback pause_writing/resume_writing logic @@ -250,7 +244,8 @@ def abort(self): called with None as its argument. """ self._closed = True - self._ssl_protocol._abort() + if self._ssl_protocol is not None: + self._ssl_protocol._abort() def _force_close(self, exc): self._closed = True @@ -544,7 +539,7 @@ def _start_handshake(self): # start handshake timeout count down self._handshake_timeout_handle = \ self._loop.call_later(self._ssl_handshake_timeout, - lambda: self._check_handshake_timeout()) + self._check_handshake_timeout) self._do_handshake() @@ -624,7 +619,7 @@ def _start_shutdown(self): self._set_state(SSLProtocolState.FLUSHING) self._shutdown_timeout_handle = self._loop.call_later( self._ssl_shutdown_timeout, - lambda: self._check_shutdown_timeout() + self._check_shutdown_timeout ) self._do_flush() @@ -763,7 +758,7 @@ def _do_read__buffered(self): else: break else: - self._loop.call_soon(lambda: self._do_read()) + self._loop.call_soon(self._do_read) except SSLAgainErrors: pass if offset > 0: diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py index c4d837a1170819d..ffb48b9cd6cb70a 100644 --- a/Lib/asyncio/streams.py +++ b/Lib/asyncio/streams.py @@ -5,6 +5,7 @@ import collections import socket import sys +import warnings import weakref if hasattr(socket, 'AF_UNIX'): @@ -66,9 +67,8 @@ async def start_server(client_connected_cb, host=None, port=None, *, positional host and port, with various optional keyword arguments following. The return value is the same as loop.create_server(). - Additional optional keyword arguments are loop (to set the event loop - instance to use) and limit (to set the buffer limit passed to the - StreamReader). + Additional optional keyword argument is limit (to set the buffer + limit passed to the StreamReader). The return value is the same as loop.create_server(), i.e. a Server object which can be used to stop the service. @@ -125,7 +125,7 @@ class FlowControlMixin(protocols.Protocol): def __init__(self, loop=None): if loop is None: - self._loop = events._get_event_loop(stacklevel=4) + self._loop = events.get_event_loop() else: self._loop = loop self._paused = False @@ -245,7 +245,19 @@ def connection_made(self, transport): res = self._client_connected_cb(reader, self._stream_writer) if coroutines.iscoroutine(res): + def callback(task): + exc = task.exception() + if exc is not None: + self._loop.call_exception_handler({ + 'message': 'Unhandled exception in client_connected_cb', + 'exception': exc, + 'transport': transport, + }) + transport.close() + self._task = self._loop.create_task(res) + self._task.add_done_callback(callback) + self._strong_reader = None def connection_lost(self, exc): @@ -378,7 +390,8 @@ async def drain(self): async def start_tls(self, sslcontext, *, server_hostname=None, - ssl_handshake_timeout=None): + ssl_handshake_timeout=None, + ssl_shutdown_timeout=None): """Upgrade an existing stream-based connection to TLS.""" server_side = self._protocol._client_connected_cb is not None protocol = self._protocol @@ -386,10 +399,18 @@ async def start_tls(self, sslcontext, *, new_transport = await self._loop.start_tls( # type: ignore self._transport, protocol, sslcontext, server_side=server_side, server_hostname=server_hostname, - ssl_handshake_timeout=ssl_handshake_timeout) + ssl_handshake_timeout=ssl_handshake_timeout, + ssl_shutdown_timeout=ssl_shutdown_timeout) self._transport = new_transport protocol._replace_writer(self) + def __del__(self, warnings=warnings): + if not self._transport.is_closing(): + if self._loop.is_closed(): + warnings.warn("loop is closed", ResourceWarning) + else: + self.close() + warnings.warn(f"unclosed {self!r}", ResourceWarning) class StreamReader: @@ -404,7 +425,7 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None): self._limit = limit if loop is None: - self._loop = events._get_event_loop() + self._loop = events.get_event_loop() else: self._loop = loop self._buffer = bytearray() @@ -647,16 +668,17 @@ async def readuntil(self, separator=b'\n'): async def read(self, n=-1): """Read up to `n` bytes from the stream. - If n is not provided, or set to -1, read until EOF and return all read - bytes. If the EOF was received and the internal buffer is empty, return - an empty bytes object. + If `n` is not provided or set to -1, + read until EOF, then return all read bytes. + If EOF was received and the internal buffer is empty, + return an empty bytes object. - If n is zero, return empty bytes object immediately. + If `n` is 0, return an empty bytes object immediately. - If n is positive, this function try to read `n` bytes, and may return - less or equal bytes than requested, but at least one byte. If EOF was - received before any byte is read, this function returns empty byte - object. + If `n` is positive, return at most `n` available bytes + as soon as at least 1 byte is available in the internal buffer. + If EOF is received before any byte is read, return an empty + bytes object. Returned value is not limited with limit, configured at stream creation. @@ -688,7 +710,7 @@ async def read(self, n=-1): await self._wait_for_data('read') # This will work right even if buffer is less than n bytes - data = bytes(self._buffer[:n]) + data = bytes(memoryview(self._buffer)[:n]) del self._buffer[:n] self._maybe_resume_transport() @@ -730,7 +752,7 @@ async def readexactly(self, n): data = bytes(self._buffer) self._buffer.clear() else: - data = bytes(self._buffer[:n]) + data = bytes(memoryview(self._buffer)[:n]) del self._buffer[:n] self._maybe_resume_transport() return data diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py index cd10231f710f113..043359bbd03f8aa 100644 --- a/Lib/asyncio/subprocess.py +++ b/Lib/asyncio/subprocess.py @@ -81,6 +81,9 @@ def pipe_connection_lost(self, fd, exc): self._stdin_closed.set_result(None) else: self._stdin_closed.set_exception(exc) + # Since calling `wait_closed()` is not mandatory, + # we shouldn't log the traceback if this is not awaited. + self._stdin_closed._log_traceback = False return if fd == 1: reader = self.stdout @@ -144,14 +147,17 @@ def kill(self): async def _feed_stdin(self, input): debug = self._loop.get_debug() - self.stdin.write(input) - if debug: - logger.debug( - '%r communicate: feed stdin (%s bytes)', self, len(input)) try: + if input is not None: + self.stdin.write(input) + if debug: + logger.debug( + '%r communicate: feed stdin (%s bytes)', self, len(input)) + await self.stdin.drain() except (BrokenPipeError, ConnectionResetError) as exc: - # communicate() ignores BrokenPipeError and ConnectionResetError + # communicate() ignores BrokenPipeError and ConnectionResetError. + # write() and drain() can raise these exceptions. if debug: logger.debug('%r communicate: stdin got %r', self, exc) @@ -180,7 +186,7 @@ async def _read_stream(self, fd): return output async def communicate(self, input=None): - if input is not None: + if self.stdin is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() diff --git a/Lib/asyncio/taskgroups.py b/Lib/asyncio/taskgroups.py index 911419e1769c17a..91be0decc41c426 100644 --- a/Lib/asyncio/taskgroups.py +++ b/Lib/asyncio/taskgroups.py @@ -2,7 +2,7 @@ # license: PSFL. -__all__ = ["TaskGroup"] +__all__ = ("TaskGroup",) from . import events from . import exceptions @@ -10,7 +10,21 @@ class TaskGroup: + """Asynchronous context manager for managing groups of tasks. + Example use: + + async with asyncio.TaskGroup() as group: + task1 = group.create_task(some_coroutine(...)) + task2 = group.create_task(other_coroutine(...)) + print("Both tasks have completed now.") + + All tasks are awaited when the context manager exits. + + Any exceptions other than `asyncio.CancelledError` raised within + a task will cancel all remaining tasks and wait for them to exit. + The exceptions are then combined and raised as an `ExceptionGroup`. + """ def __init__(self): self._entered = False self._exiting = False @@ -40,16 +54,14 @@ def __repr__(self): async def __aenter__(self): if self._entered: raise RuntimeError( - f"TaskGroup {self!r} has been already entered") - self._entered = True - + f"TaskGroup {self!r} has already been entered") if self._loop is None: self._loop = events.get_running_loop() - self._parent_task = tasks.current_task(self._loop) if self._parent_task is None: raise RuntimeError( f'TaskGroup {self!r} cannot determine the parent task') + self._entered = True return self @@ -135,6 +147,10 @@ async def __aexit__(self, et, exc, tb): self._errors = None def create_task(self, coro, *, name=None, context=None): + """Create a new task in this group and return it. + + Similar to `asyncio.create_task`. + """ if not self._entered: raise RuntimeError(f"TaskGroup {self!r} has not been entered") if self._exiting and not self._tasks: @@ -145,9 +161,15 @@ def create_task(self, coro, *, name=None, context=None): task = self._loop.create_task(coro) else: task = self._loop.create_task(coro, context=context) - tasks._set_task_name(task, name) - task.add_done_callback(self._on_task_done) - self._tasks.add(task) + task.set_name(name) + # optimization: Immediately call the done callback if the task is + # already done (e.g. if the coro was able to complete eagerly), + # and skip scheduling a done callback + if task.done(): + self._on_task_done(task) + else: + self._tasks.add(task) + task.add_done_callback(self._on_task_done) return task # Since Python 3.8 Tasks propagate all exceptions correctly, diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py index 571013745aa03aa..e84b21390557beb 100644 --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -6,6 +6,7 @@ 'wait', 'wait_for', 'as_completed', 'sleep', 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', 'current_task', 'all_tasks', + 'create_eager_task_factory', 'eager_task_factory', '_register_task', '_unregister_task', '_enter_task', '_leave_task', ) @@ -14,8 +15,8 @@ import functools import inspect import itertools +import math import types -import warnings import weakref from types import GenericAlias @@ -24,7 +25,7 @@ from . import events from . import exceptions from . import futures -from .coroutines import _is_coroutine +from . import timeouts # Helper to generate new task names # This uses itertools.count() instead of a "+= 1" operation because the latter @@ -43,57 +44,59 @@ def all_tasks(loop=None): """Return a set of all tasks for the loop.""" if loop is None: loop = events.get_running_loop() - # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another - # thread while we do so. Therefore we cast it to list prior to filtering. The list - # cast itself requires iteration, so we repeat it several times ignoring - # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for - # details. + # capturing the set of eager tasks first, so if an eager task "graduates" + # to a regular task in another thread, we don't risk missing it. + eager_tasks = list(_eager_tasks) + # Looping over the WeakSet isn't safe as it can be updated from another + # thread, therefore we cast it to list prior to filtering. The list cast + # itself requires iteration, so we repeat it several times ignoring + # RuntimeErrors (which are not very likely to occur). + # See issues 34970 and 36607 for details. + scheduled_tasks = None i = 0 while True: try: - tasks = list(_all_tasks) + scheduled_tasks = list(_scheduled_tasks) except RuntimeError: i += 1 if i >= 1000: raise else: break - return {t for t in tasks + return {t for t in itertools.chain(scheduled_tasks, eager_tasks) if futures._get_loop(t) is loop and not t.done()} -def _set_task_name(task, name): - if name is not None: - try: - set_name = task.set_name - except AttributeError: - warnings.warn("Task.set_name() was added in Python 3.8, " - "the method support will be mandatory for third-party " - "task implementations since 3.13.", - DeprecationWarning, stacklevel=3) - else: - set_name(name) - - class Task(futures._PyFuture): # Inherit Python Task implementation # from a Python Future implementation. """A coroutine wrapped in a Future.""" # An important invariant maintained while a Task not done: + # _fut_waiter is either None or a Future. The Future + # can be either done() or not done(). + # The task can be in any of 3 states: # - # - Either _fut_waiter is None, and _step() is scheduled; - # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # - 1: _fut_waiter is not None and not _fut_waiter.done(): + # __step() is *not* scheduled and the Task is waiting for _fut_waiter. + # - 2: (_fut_waiter is None or _fut_waiter.done()) and __step() is scheduled: + # the Task is waiting for __step() to be executed. + # - 3: _fut_waiter is None and __step() is *not* scheduled: + # the Task is currently executing (in __step()). # - # The only transition from the latter to the former is through - # _wakeup(). When _fut_waiter is not None, one of its callbacks - # must be _wakeup(). - - # If False, don't log a message if the task is destroyed whereas its + # * In state 1, one of the callbacks of __fut_waiter must be __wakeup(). + # * The transition from 1 to 2 happens when _fut_waiter becomes done(), + # as it schedules __wakeup() to be called (which calls __step() so + # we way that __step() is scheduled). + # * It transitions from 2 to 3 when __step() is executed, and it clears + # _fut_waiter to None. + + # If False, don't log a message if the task is destroyed while its # status is still pending _log_destroy_pending = True - def __init__(self, coro, *, loop=None, name=None, context=None): + def __init__(self, coro, *, loop=None, name=None, context=None, + eager_start=False): super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] @@ -117,8 +120,11 @@ def __init__(self, coro, *, loop=None, name=None, context=None): else: self._context = context - self._loop.call_soon(self.__step, context=self._context) - _register_task(self) + if eager_start and self._loop.is_running(): + self.__eager_start() + else: + self._loop.call_soon(self.__step, context=self._context) + _register_task(self) def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: @@ -250,6 +256,25 @@ def uncancel(self): self._num_cancels_requested -= 1 return self._num_cancels_requested + def __eager_start(self): + prev_task = _swap_current_task(self._loop, self) + try: + _register_eager_task(self) + try: + self._context.run(self.__step_run_and_handle_result, None) + finally: + _unregister_eager_task(self) + finally: + try: + curtask = _swap_current_task(self._loop, prev_task) + assert curtask is self + finally: + if self.done(): + self._coro = None + self = None # Needed to break cycles when an exception occurs. + else: + _register_task(self) + def __step(self, exc=None): if self.done(): raise exceptions.InvalidStateError( @@ -258,11 +283,17 @@ def __step(self, exc=None): if not isinstance(exc, exceptions.CancelledError): exc = self._make_cancelled_error() self._must_cancel = False - coro = self._coro self._fut_waiter = None _enter_task(self._loop, self) - # Call either coro.throw(exc) or coro.send(None). + try: + self.__step_run_and_handle_result(exc) + finally: + _leave_task(self._loop, self) + self = None # Needed to break cycles when an exception occurs. + + def __step_run_and_handle_result(self, exc): + coro = self._coro try: if exc is None: # We use the `send` method directly, because coroutines @@ -334,7 +365,6 @@ def __step(self, exc=None): self._loop.call_soon( self.__step, new_exc, context=self._context) finally: - _leave_task(self._loop, self) self = None # Needed to break cycles when an exception occurs. def __wakeup(self, future): @@ -378,7 +408,7 @@ def create_task(coro, *, name=None, context=None): else: task = loop.create_task(coro, context=context) - _set_task_name(task, name) + task.set_name(name) return task @@ -394,8 +424,6 @@ async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED): The fs iterable must not be empty. - Coroutines will be wrapped in Tasks. - Returns two sets of Future: (done, pending). Usage: @@ -437,65 +465,44 @@ async def wait_for(fut, timeout): If the wait is cancelled, the task is also cancelled. + If the task supresses the cancellation and returns a value instead, + that value is returned. + This function is a coroutine. """ - loop = events.get_running_loop() + # The special case for timeout <= 0 is for the following case: + # + # async def test_waitfor(): + # func_started = False + # + # async def func(): + # nonlocal func_started + # func_started = True + # + # try: + # await asyncio.wait_for(func(), 0) + # except asyncio.TimeoutError: + # assert not func_started + # else: + # assert False + # + # asyncio.run(test_waitfor()) - if timeout is None: - return await fut - if timeout <= 0: - fut = ensure_future(fut, loop=loop) + if timeout is not None and timeout <= 0: + fut = ensure_future(fut) if fut.done(): return fut.result() - await _cancel_and_wait(fut, loop=loop) + await _cancel_and_wait(fut) try: return fut.result() except exceptions.CancelledError as exc: - raise exceptions.TimeoutError() from exc - - waiter = loop.create_future() - timeout_handle = loop.call_later(timeout, _release_waiter, waiter) - cb = functools.partial(_release_waiter, waiter) - - fut = ensure_future(fut, loop=loop) - fut.add_done_callback(cb) - - try: - # wait until the future completes or the timeout - try: - await waiter - except exceptions.CancelledError: - if fut.done(): - return fut.result() - else: - fut.remove_done_callback(cb) - # We must ensure that the task is not running - # after wait_for() returns. - # See https://bugs.python.org/issue32751 - await _cancel_and_wait(fut, loop=loop) - raise - - if fut.done(): - return fut.result() - else: - fut.remove_done_callback(cb) - # We must ensure that the task is not running - # after wait_for() returns. - # See https://bugs.python.org/issue32751 - await _cancel_and_wait(fut, loop=loop) - # In case task cancellation failed with some - # exception, we should re-raise it - # See https://bugs.python.org/issue40607 - try: - return fut.result() - except exceptions.CancelledError as exc: - raise exceptions.TimeoutError() from exc - finally: - timeout_handle.cancel() + raise TimeoutError from exc + async with timeouts.timeout(timeout): + return await fut async def _wait(fs, timeout, return_when, loop): """Internal helper for wait(). @@ -541,9 +548,10 @@ def _on_completion(f): return done, pending -async def _cancel_and_wait(fut, loop): +async def _cancel_and_wait(fut): """Cancel the *fut* future or task and wait until it completes.""" + loop = events.get_running_loop() waiter = loop.create_future() cb = functools.partial(_release_waiter, waiter) fut.add_done_callback(cb) @@ -582,7 +590,7 @@ def as_completed(fs, *, timeout=None): from .queues import Queue # Import here to avoid circular import problem. done = Queue() - loop = events._get_event_loop() + loop = events.get_event_loop() todo = {ensure_future(f, loop=loop) for f in set(fs)} timeout_handle = None @@ -633,6 +641,9 @@ async def sleep(delay, result=None): await __sleep0() return result + if math.isnan(delay): + raise ValueError("Invalid delay: NaN (not a number)") + loop = events.get_running_loop() future = loop.create_future() h = loop.call_later(delay, @@ -649,46 +660,33 @@ def ensure_future(coro_or_future, *, loop=None): If the argument is a Future, it is returned directly. """ - return _ensure_future(coro_or_future, loop=loop) - - -def _ensure_future(coro_or_future, *, loop=None): if futures.isfuture(coro_or_future): if loop is not None and loop is not futures._get_loop(coro_or_future): raise ValueError('The future belongs to a different loop than ' 'the one specified as the loop argument') return coro_or_future - called_wrap_awaitable = False + should_close = True if not coroutines.iscoroutine(coro_or_future): if inspect.isawaitable(coro_or_future): + async def _wrap_awaitable(awaitable): + return await awaitable + coro_or_future = _wrap_awaitable(coro_or_future) - called_wrap_awaitable = True + should_close = False else: raise TypeError('An asyncio.Future, a coroutine or an awaitable ' 'is required') if loop is None: - loop = events._get_event_loop(stacklevel=4) + loop = events.get_event_loop() try: return loop.create_task(coro_or_future) except RuntimeError: - if not called_wrap_awaitable: + if should_close: coro_or_future.close() raise -@types.coroutine -def _wrap_awaitable(awaitable): - """Helper for asyncio.ensure_future(). - - Wraps awaitable (an object with __await__) into a coroutine - that will later be wrapped in a Task by ensure_future(). - """ - return (yield from awaitable.__await__()) - -_wrap_awaitable._is_coroutine = _is_coroutine - - class _GatheringFuture(futures.Future): """Helper for gather(). @@ -749,7 +747,7 @@ def gather(*coros_or_futures, return_exceptions=False): gather won't cancel any other awaitables. """ if not coros_or_futures: - loop = events._get_event_loop() + loop = events.get_event_loop() outer = loop.create_future() outer.set_result([]) return outer @@ -813,11 +811,12 @@ def _done_callback(fut): children = [] nfuts = 0 nfinished = 0 + done_futs = [] loop = None outer = None # bpo-46672 for arg in coros_or_futures: if arg not in arg_to_fut: - fut = _ensure_future(arg, loop=loop) + fut = ensure_future(arg, loop=loop) if loop is None: loop = futures._get_loop(fut) if fut is not arg: @@ -829,7 +828,10 @@ def _done_callback(fut): nfuts += 1 arg_to_fut[arg] = fut - fut.add_done_callback(_done_callback) + if fut.done(): + done_futs.append(fut) + else: + fut.add_done_callback(_done_callback) else: # There's a duplicate Future object in coros_or_futures. @@ -838,6 +840,13 @@ def _done_callback(fut): children.append(fut) outer = _GatheringFuture(children, loop=loop) + # Run done callbacks after GatheringFuture created so any post-processing + # can be performed at this point + # optimization: in the special case that *all* futures finished eagerly, + # this will effectively complete the gather eagerly, with the last + # callback setting the result (or exception) on outer before returning it + for fut in done_futs: + _done_callback(fut) return outer @@ -874,7 +883,7 @@ def shield(arg): weak references to tasks. A task that isn't referenced elsewhere may get garbage collected at any time, even before it's done. """ - inner = _ensure_future(arg) + inner = ensure_future(arg) if inner.done(): # Shortcut. return inner @@ -930,8 +939,40 @@ def callback(): return future -# WeakSet containing all alive tasks. -_all_tasks = weakref.WeakSet() +def create_eager_task_factory(custom_task_constructor): + """Create a function suitable for use as a task factory on an event-loop. + + Example usage: + + loop.set_task_factory( + asyncio.create_eager_task_factory(my_task_constructor)) + + Now, tasks created will be started immediately (rather than being first + scheduled to an event loop). The constructor argument can be any callable + that returns a Task-compatible object and has a signature compatible + with `Task.__init__`; it must have the `eager_start` keyword argument. + + Most applications will use `Task` for `custom_task_constructor` and in + this case there's no need to call `create_eager_task_factory()` + directly. Instead the global `eager_task_factory` instance can be + used. E.g. `loop.set_task_factory(asyncio.eager_task_factory)`. + """ + + def factory(loop, coro, *, name=None, context=None): + return custom_task_constructor( + coro, loop=loop, name=name, context=context, eager_start=True) + + return factory + + +eager_task_factory = create_eager_task_factory(Task) + + +# Collectively these two sets hold references to the complete set of active +# tasks. Eagerly executed tasks use a faster regular set as an optimization +# but may graduate to a WeakSet if the task blocks on IO. +_scheduled_tasks = weakref.WeakSet() +_eager_tasks = set() # Dictionary containing tasks that are currently active in # all running event loops. {EventLoop: Task} @@ -939,8 +980,13 @@ def callback(): def _register_task(task): - """Register a new task in asyncio as executed by loop.""" - _all_tasks.add(task) + """Register an asyncio Task scheduled to run on an event loop.""" + _scheduled_tasks.add(task) + + +def _register_eager_task(task): + """Register an asyncio Task about to be eagerly executed.""" + _eager_tasks.add(task) def _enter_task(loop, task): @@ -959,25 +1005,49 @@ def _leave_task(loop, task): del _current_tasks[loop] +def _swap_current_task(loop, task): + prev_task = _current_tasks.get(loop) + if task is None: + del _current_tasks[loop] + else: + _current_tasks[loop] = task + return prev_task + + def _unregister_task(task): - """Unregister a task.""" - _all_tasks.discard(task) + """Unregister a completed, scheduled Task.""" + _scheduled_tasks.discard(task) + + +def _unregister_eager_task(task): + """Unregister a task which finished its first eager step.""" + _eager_tasks.discard(task) +_py_current_task = current_task _py_register_task = _register_task +_py_register_eager_task = _register_eager_task _py_unregister_task = _unregister_task +_py_unregister_eager_task = _unregister_eager_task _py_enter_task = _enter_task _py_leave_task = _leave_task +_py_swap_current_task = _swap_current_task try: - from _asyncio import (_register_task, _unregister_task, - _enter_task, _leave_task, - _all_tasks, _current_tasks) + from _asyncio import (_register_task, _register_eager_task, + _unregister_task, _unregister_eager_task, + _enter_task, _leave_task, _swap_current_task, + _scheduled_tasks, _eager_tasks, _current_tasks, + current_task) except ImportError: pass else: + _c_current_task = current_task _c_register_task = _register_task + _c_register_eager_task = _register_eager_task _c_unregister_task = _unregister_task + _c_unregister_eager_task = _unregister_eager_task _c_enter_task = _enter_task _c_leave_task = _leave_task + _c_swap_current_task = _swap_current_task diff --git a/Lib/asyncio/timeouts.py b/Lib/asyncio/timeouts.py index 94d25535fbc0595..30042abb3ad804d 100644 --- a/Lib/asyncio/timeouts.py +++ b/Lib/asyncio/timeouts.py @@ -25,8 +25,18 @@ class _State(enum.Enum): @final class Timeout: + """Asynchronous context manager for cancelling overdue coroutines. + + Use `timeout()` or `timeout_at()` rather than instantiating this class directly. + """ def __init__(self, when: Optional[float]) -> None: + """Schedule a timeout that will trigger at a given loop time. + + - If `when` is `None`, the timeout will never trigger. + - If `when < loop.time()`, the timeout will trigger on the next + iteration of the event loop. + """ self._state = _State.CREATED self._timeout_handler: Optional[events.TimerHandle] = None @@ -34,11 +44,14 @@ def __init__(self, when: Optional[float]) -> None: self._when = when def when(self) -> Optional[float]: + """Return the current deadline.""" return self._when def reschedule(self, when: Optional[float]) -> None: - assert self._state is not _State.CREATED + """Reschedule the timeout.""" if self._state is not _State.ENTERED: + if self._state is _State.CREATED: + raise RuntimeError("Timeout has not been entered") raise RuntimeError( f"Cannot change state of {self._state.value} Timeout", ) @@ -70,10 +83,14 @@ def __repr__(self) -> str: return f"" async def __aenter__(self) -> "Timeout": - self._state = _State.ENTERED - self._task = tasks.current_task() - if self._task is None: + if self._state is not _State.CREATED: + raise RuntimeError("Timeout has already been entered") + task = tasks.current_task() + if task is None: raise RuntimeError("Timeout should be used inside a task") + self._state = _State.ENTERED + self._task = task + self._cancelling = self._task.cancelling() self.reschedule(self._when) return self @@ -92,10 +109,10 @@ async def __aexit__( if self._state is _State.EXPIRING: self._state = _State.EXPIRED - if self._task.uncancel() == 0 and exc_type is exceptions.CancelledError: - # Since there are no outstanding cancel requests, we're + if self._task.uncancel() <= self._cancelling and exc_type is exceptions.CancelledError: + # Since there are no new cancel requests, we're # handling this. - raise TimeoutError + raise TimeoutError from exc_val elif self._state is _State.ENTERED: self._state = _State.EXITED diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py index 2de7a1bfd681553..41ccf1b78fb93b8 100644 --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -32,6 +32,7 @@ 'FastChildWatcher', 'PidfdChildWatcher', 'MultiLoopChildWatcher', 'ThreadedChildWatcher', 'DefaultEventLoopPolicy', + 'EventLoop', ) @@ -63,6 +64,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): def __init__(self, selector=None): super().__init__(selector) self._signal_handlers = {} + self._unix_server_sockets = {} def close(self): super().close() @@ -226,8 +228,7 @@ async def _make_subprocess_transport(self, protocol, args, shell, return transp def _child_watcher_callback(self, pid, returncode, transp): - # Skip one iteration for callbacks to be executed - self.call_soon_threadsafe(self.call_soon, transp._process_exited, returncode) + self.call_soon_threadsafe(transp._process_exited, returncode) async def create_unix_connection( self, protocol_factory, path=None, *, @@ -284,7 +285,7 @@ async def create_unix_server( sock=None, backlog=100, ssl=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, - start_serving=True): + start_serving=True, cleanup_socket=True): if isinstance(ssl, bool): raise TypeError('ssl argument must be an SSLContext or None') @@ -340,6 +341,15 @@ async def create_unix_server( raise ValueError( f'A UNIX Domain Stream Socket was expected, got {sock!r}') + if cleanup_socket: + path = sock.getsockname() + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + self._unix_server_sockets[sock] = os.stat(path).st_ino + except FileNotFoundError: + pass + sock.setblocking(False) server = base_events.Server(self, [sock], protocol_factory, ssl, backlog, ssl_handshake_timeout, @@ -394,6 +404,9 @@ def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno, fut.set_result(total_sent) return + # On 32-bit architectures truncate to 1GiB to avoid OverflowError + blocksize = min(blocksize, sys.maxsize//2 + 1) + try: sent = os.sendfile(fd, fileno, offset, blocksize) except (BlockingIOError, InterruptedError): @@ -457,6 +470,27 @@ def cb(fut): self.remove_writer(fd) fut.add_done_callback(cb) + def _stop_serving(self, sock): + # Is this a unix socket that needs cleanup? + if sock in self._unix_server_sockets: + path = sock.getsockname() + else: + path = None + + super()._stop_serving(sock) + + if path is not None: + prev_ino = self._unix_server_sockets[sock] + del self._unix_server_sockets[sock] + try: + if os.stat(path).st_ino == prev_ino: + os.unlink(path) + except FileNotFoundError: + pass + except OSError as err: + logger.error('Unable to clean up listening UNIX socket ' + '%r: %r', path, err) + class _UnixReadPipeTransport(transports.ReadTransport): @@ -485,13 +519,21 @@ def __init__(self, loop, pipe, protocol, waiter=None, extra=None): self._loop.call_soon(self._protocol.connection_made, self) # only start reading when connection_made() has been called - self._loop.call_soon(self._loop._add_reader, + self._loop.call_soon(self._add_reader, self._fileno, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) + def _add_reader(self, fd, callback): + if not self.is_reading(): + return + self._loop._add_reader(fd, callback) + + def is_reading(self): + return not self._paused and not self._closing + def __repr__(self): info = [self.__class__.__name__] if self._pipe is None: @@ -532,7 +574,7 @@ def _read_ready(self): self._loop.call_soon(self._call_connection_lost, None) def pause_reading(self): - if self._closing or self._paused: + if not self.is_reading(): return self._paused = True self._loop._remove_reader(self._fileno) @@ -846,6 +888,13 @@ class AbstractChildWatcher: waitpid(-1), there should be only one active object per process. """ + def __init_subclass__(cls) -> None: + if cls.__module__ != __name__: + warnings._deprecated("AbstractChildWatcher", + "{name!r} is deprecated as of Python 3.12 and will be " + "removed in Python {remove}.", + remove=(3, 14)) + def add_child_handler(self, pid, callback, *args): """Register a new child handler. @@ -1353,14 +1402,7 @@ def is_active(self): return True def close(self): - self._join_threads() - - def _join_threads(self): - """Internal: Join all non-daemon threads""" - threads = [thread for thread in list(self._threads.values()) - if thread.is_alive() and not thread.daemon] - for thread in threads: - thread.join() + pass def __enter__(self): return self @@ -1379,7 +1421,7 @@ def __del__(self, _warn=warnings.warn): def add_child_handler(self, pid, callback, *args): loop = events.get_running_loop() thread = threading.Thread(target=self._do_waitpid, - name=f"waitpid-{next(self._pid_counter)}", + name=f"asyncio-waitpid-{next(self._pid_counter)}", args=(loop, pid, callback, args), daemon=True) self._threads[pid] = thread @@ -1447,8 +1489,6 @@ def _init_watcher(self): self._watcher = PidfdChildWatcher() else: self._watcher = ThreadedChildWatcher() - if threading.current_thread() is threading.main_thread(): - self._watcher.attach_loop(self._local._loop) def set_event_loop(self, loop): """Set the event loop. @@ -1493,3 +1533,4 @@ def set_child_watcher(self, watcher): SelectorEventLoop = _UnixSelectorEventLoop DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy +EventLoop = SelectorEventLoop diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py index acc97daafecc0b3..b62ea75fee3858c 100644 --- a/Lib/asyncio/windows_events.py +++ b/Lib/asyncio/windows_events.py @@ -29,7 +29,7 @@ __all__ = ( 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', - 'WindowsProactorEventLoopPolicy', + 'WindowsProactorEventLoopPolicy', 'EventLoop', ) @@ -314,24 +314,25 @@ def __init__(self, proactor=None): proactor = IocpProactor() super().__init__(proactor) - def run_forever(self): - try: - assert self._self_reading_future is None - self.call_soon(self._loop_self_reading) - super().run_forever() - finally: - if self._self_reading_future is not None: - ov = self._self_reading_future._ov - self._self_reading_future.cancel() - # self_reading_future was just cancelled so if it hasn't been - # finished yet, it never will be (it's possible that it has - # already finished and its callback is waiting in the queue, - # where it could still happen if the event loop is restarted). - # Unregister it otherwise IocpProactor.close will wait for it - # forever - if ov is not None: - self._proactor._unregister(ov) - self._self_reading_future = None + def _run_forever_setup(self): + assert self._self_reading_future is None + self.call_soon(self._loop_self_reading) + super()._run_forever_setup() + + def _run_forever_cleanup(self): + super()._run_forever_cleanup() + if self._self_reading_future is not None: + ov = self._self_reading_future._ov + self._self_reading_future.cancel() + # self_reading_future was just cancelled so if it hasn't been + # finished yet, it never will be (it's possible that it has + # already finished and its callback is waiting in the queue, + # where it could still happen if the event loop is restarted). + # Unregister it otherwise IocpProactor.close will wait for it + # forever + if ov is not None: + self._proactor._unregister(ov) + self._self_reading_future = None async def create_pipe_connection(self, protocol_factory, address): f = self._proactor.connect_pipe(address) @@ -366,6 +367,10 @@ def loop_accept_pipe(f=None): return f = self._proactor.accept_pipe(pipe) + except BrokenPipeError: + if pipe and pipe.fileno() != -1: + pipe.close() + self.call_soon(loop_accept_pipe) except OSError as exc: if pipe and pipe.fileno() != -1: self.call_exception_handler({ @@ -377,6 +382,7 @@ def loop_accept_pipe(f=None): elif self._debug: logger.warning("Accept pipe failed on pipe %r", pipe, exc_info=True) + self.call_soon(loop_accept_pipe) except exceptions.CancelledError: if pipe: pipe.close() @@ -439,7 +445,11 @@ def select(self, timeout=None): self._poll(timeout) tmp = self._results self._results = [] - return tmp + try: + return tmp + finally: + # Needed to break cycles when an exception occurs. + tmp = None def _result(self, value): fut = self._loop.create_future() @@ -793,6 +803,8 @@ def _poll(self, timeout=None): else: f.set_result(value) self._results.append(f) + finally: + f = None # Remove unregistered futures for ov in self._unregistered: @@ -883,3 +895,4 @@ class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy +EventLoop = ProactorEventLoop diff --git a/Lib/base64.py b/Lib/base64.py index 30796a6fd6d05ac..e3e983b3064fe7b 100755 --- a/Lib/base64.py +++ b/Lib/base64.py @@ -164,7 +164,6 @@ def urlsafe_b64decode(s): _b32rev = {} def _b32encode(alphabet, s): - global _b32tab2 # Delay the initialization of the table to not waste memory # if the function is never called if alphabet not in _b32tab2: @@ -200,7 +199,6 @@ def _b32encode(alphabet, s): return bytes(encoded) def _b32decode(alphabet, s, casefold=False, map01=None): - global _b32rev # Delay the initialization of the table to not waste memory # if the function is never called if alphabet not in _b32rev: @@ -508,14 +506,8 @@ def b85decode(b): def encode(input, output): """Encode a file; input and output are binary files.""" - while True: - s = input.read(MAXBINSIZE) - if not s: - break - while len(s) < MAXBINSIZE: - ns = input.read(MAXBINSIZE-len(s)) - if not ns: - break + while s := input.read(MAXBINSIZE): + while len(s) < MAXBINSIZE and (ns := input.read(MAXBINSIZE-len(s))): s += ns line = binascii.b2a_base64(s) output.write(line) @@ -523,10 +515,7 @@ def encode(input, output): def decode(input, output): """Decode a file; input and output are binary files.""" - while True: - line = input.readline() - if not line: - break + while line := input.readline(): s = binascii.a2b_base64(line) output.write(s) @@ -567,12 +556,12 @@ def decodebytes(s): def main(): """Small main program""" import sys, getopt - usage = f"""usage: {sys.argv[0]} [-h|-d|-e|-u|-t] [file|-] + usage = f"""usage: {sys.argv[0]} [-h|-d|-e|-u] [file|-] -h: print this help message and exit -d, -u: decode -e: encode (default)""" try: - opts, args = getopt.getopt(sys.argv[1:], 'hdeut') + opts, args = getopt.getopt(sys.argv[1:], 'hdeu') except getopt.error as msg: sys.stdout = sys.stderr print(msg) diff --git a/Lib/bdb.py b/Lib/bdb.py index 81fbb8514acb6f4..1acf7957f0d6693 100644 --- a/Lib/bdb.py +++ b/Lib/bdb.py @@ -32,6 +32,7 @@ def __init__(self, skip=None): self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_trace_lines = {} self.frame_returning = None self._load_breaks() @@ -331,6 +332,9 @@ def set_trace(self, frame=None): while frame: frame.f_trace = self.trace_dispatch self.botframe = frame + # We need f_trace_liens == True for the debugger to work + self.frame_trace_lines[frame] = frame.f_trace_lines + frame.f_trace_lines = True frame = frame.f_back self.set_step() sys.settrace(self.trace_dispatch) @@ -349,6 +353,9 @@ def set_continue(self): while frame and frame is not self.botframe: del frame.f_trace frame = frame.f_back + for frame, prev_trace_lines in self.frame_trace_lines.items(): + frame.f_trace_lines = prev_trace_lines + self.frame_trace_lines = {} def set_quit(self): """Set quitting attribute to True. @@ -570,9 +577,12 @@ def format_stack_entry(self, frame_lineno, lprefix=': '): rv = frame.f_locals['__return__'] s += '->' s += reprlib.repr(rv) - line = linecache.getline(filename, lineno, frame.f_globals) - if line: - s += lprefix + line.strip() + if lineno is not None: + line = linecache.getline(filename, lineno, frame.f_globals) + if line: + s += lprefix + line.strip() + else: + s += f'{lprefix}Warning: lineno is None' return s # The following methods can be called by clients to use diff --git a/Lib/bisect.py b/Lib/bisect.py index d37da74f7b4055c..ca6ca7240840bbe 100644 --- a/Lib/bisect.py +++ b/Lib/bisect.py @@ -8,6 +8,8 @@ def insort_right(a, x, lo=0, hi=None, *, key=None): Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. + + A custom key function can be supplied to customize the sort order. """ if key is None: lo = bisect_right(a, x, lo, hi) @@ -25,6 +27,8 @@ def bisect_right(a, x, lo=0, hi=None, *, key=None): Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. + + A custom key function can be supplied to customize the sort order. """ if lo < 0: @@ -57,6 +61,8 @@ def insort_left(a, x, lo=0, hi=None, *, key=None): Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. + + A custom key function can be supplied to customize the sort order. """ if key is None: @@ -74,6 +80,8 @@ def bisect_left(a, x, lo=0, hi=None, *, key=None): Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. + + A custom key function can be supplied to customize the sort order. """ if lo < 0: diff --git a/Lib/cProfile.py b/Lib/cProfile.py index f7000a8bfa0ddb3..135a12c3965c00e 100755 --- a/Lib/cProfile.py +++ b/Lib/cProfile.py @@ -8,6 +8,7 @@ import _lsprof import importlib.machinery +import io import profile as _pyprofile # ____________________________________________________________ @@ -168,7 +169,7 @@ def main(): else: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) - with open(progname, 'rb') as fp: + with io.open_code(progname) as fp: code = compile(fp.read(), progname, 'exec') spec = importlib.machinery.ModuleSpec(name='__main__', loader=None, origin=progname) diff --git a/Lib/calendar.py b/Lib/calendar.py index 657396439c91fcb..03469d8ac96bcd9 100644 --- a/Lib/calendar.py +++ b/Lib/calendar.py @@ -7,6 +7,7 @@ import sys import datetime +from enum import IntEnum, global_enum import locale as _locale from itertools import repeat @@ -16,6 +17,9 @@ "timegm", "month_name", "month_abbr", "day_name", "day_abbr", "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar", "LocaleHTMLCalendar", "weekheader", + "Day", "Month", "JANUARY", "FEBRUARY", "MARCH", + "APRIL", "MAY", "JUNE", "JULY", + "AUGUST", "SEPTEMBER", "OCTOBER", "NOVEMBER", "DECEMBER", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] @@ -37,9 +41,47 @@ def __str__(self): return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday -# Constants for months referenced later -January = 1 -February = 2 +def __getattr__(name): + if name in ('January', 'February'): + import warnings + warnings.warn(f"The '{name}' attribute is deprecated, use '{name.upper()}' instead", + DeprecationWarning, stacklevel=2) + if name == 'January': + return 1 + else: + return 2 + + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + + +# Constants for months +@global_enum +class Month(IntEnum): + JANUARY = 1 + FEBRUARY = 2 + MARCH = 3 + APRIL = 4 + MAY = 5 + JUNE = 6 + JULY = 7 + AUGUST = 8 + SEPTEMBER = 9 + OCTOBER = 10 + NOVEMBER = 11 + DECEMBER = 12 + + +# Constants for days +@global_enum +class Day(IntEnum): + MONDAY = 0 + TUESDAY = 1 + WEDNESDAY = 2 + THURSDAY = 3 + FRIDAY = 4 + SATURDAY = 5 + SUNDAY = 6 + # Number of days per month (except for February in leap years) mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] @@ -95,9 +137,6 @@ def __len__(self): month_name = _localized_month('%B') month_abbr = _localized_month('%b') -# Constants for weekdays -(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) - def isleap(year): """Return True for leap years, False for non-leap years.""" @@ -116,7 +155,7 @@ def weekday(year, month, day): """Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).""" if not datetime.MINYEAR <= year <= datetime.MAXYEAR: year = 2000 + year % 400 - return datetime.date(year, month, day).weekday() + return Day(datetime.date(year, month, day).weekday()) def monthrange(year, month): @@ -125,12 +164,12 @@ def monthrange(year, month): if not 1 <= month <= 12: raise IllegalMonthError(month) day1 = weekday(year, month, 1) - ndays = mdays[month] + (month == February and isleap(year)) + ndays = mdays[month] + (month == FEBRUARY and isleap(year)) return day1, ndays def _monthlen(year, month): - return mdays[month] + (month == February and isleap(year)) + return mdays[month] + (month == FEBRUARY and isleap(year)) def _prevmonth(year, month): @@ -260,10 +299,7 @@ def yeardatescalendar(self, year, width=3): Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ - months = [ - self.monthdatescalendar(year, i) - for i in range(January, January+12) - ] + months = [self.monthdatescalendar(year, m) for m in Month] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardays2calendar(self, year, width=3): @@ -273,10 +309,7 @@ def yeardays2calendar(self, year, width=3): (day number, weekday number) tuples. Day numbers outside this month are zero. """ - months = [ - self.monthdays2calendar(year, i) - for i in range(January, January+12) - ] + months = [self.monthdays2calendar(year, m) for m in Month] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardayscalendar(self, year, width=3): @@ -285,10 +318,7 @@ def yeardayscalendar(self, year, width=3): yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. """ - months = [ - self.monthdayscalendar(year, i) - for i in range(January, January+12) - ] + months = [self.monthdayscalendar(year, m) for m in Month] return [months[i:i+width] for i in range(0, len(months), width) ] @@ -509,7 +539,7 @@ def formatyear(self, theyear, width=3): a('\n') a('%s' % ( width, self.cssclass_year_head, theyear)) - for i in range(January, January+12, width): + for i in range(JANUARY, JANUARY+12, width): # months in this row months = range(i, min(i+width, 13)) a('') @@ -555,8 +585,6 @@ def __enter__(self): _locale.setlocale(_locale.LC_TIME, self.locale) def __exit__(self, *args): - if self.oldlocale is None: - return _locale.setlocale(_locale.LC_TIME, self.oldlocale) @@ -660,7 +688,7 @@ def timegm(tuple): return seconds -def main(args): +def main(args=None): import argparse parser = argparse.ArgumentParser() textgroup = parser.add_argument_group('text only arguments') @@ -693,7 +721,7 @@ def main(args): parser.add_argument( "-L", "--locale", default=None, - help="locale to be used from month and weekday names" + help="locale to use for month and weekday names" ) parser.add_argument( "-e", "--encoding", @@ -717,7 +745,7 @@ def main(args): help="month number (1-12, text only)" ) - options = parser.parse_args(args[1:]) + options = parser.parse_args(args) if options.locale and not options.encoding: parser.error("if --locale is specified --encoding is required") @@ -726,6 +754,9 @@ def main(args): locale = options.locale, options.encoding if options.type == "html": + if options.month: + parser.error("incorrect number of arguments") + sys.exit(1) if options.locale: cal = LocaleHTMLCalendar(locale=locale) else: @@ -737,11 +768,8 @@ def main(args): write = sys.stdout.buffer.write if options.year is None: write(cal.formatyearpage(datetime.date.today().year, **optdict)) - elif options.month is None: - write(cal.formatyearpage(options.year, **optdict)) else: - parser.error("incorrect number of arguments") - sys.exit(1) + write(cal.formatyearpage(options.year, **optdict)) else: if options.locale: cal = LocaleTextCalendar(locale=locale) @@ -765,4 +793,4 @@ def main(args): if __name__ == "__main__": - main(sys.argv) + main() diff --git a/Lib/cgi.py b/Lib/cgi.py deleted file mode 100755 index 8787567be7c081c..000000000000000 --- a/Lib/cgi.py +++ /dev/null @@ -1,1012 +0,0 @@ -#! /usr/local/bin/python - -# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is -# intentionally NOT "/usr/bin/env python". On many systems -# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI -# scripts, and /usr/local/bin is the default directory where Python is -# installed, so /usr/bin/env would be unable to find python. Granted, -# binary installations by Linux vendors often install Python in -# /usr/bin. So let those vendors patch cgi.py to match their choice -# of installation. - -"""Support module for CGI (Common Gateway Interface) scripts. - -This module defines a number of utilities for use by CGI scripts -written in Python. - -The global variable maxlen can be set to an integer indicating the maximum size -of a POST request. POST requests larger than this size will result in a -ValueError being raised during parsing. The default value of this variable is 0, -meaning the request size is unlimited. -""" - -# History -# ------- -# -# Michael McLay started this module. Steve Majewski changed the -# interface to SvFormContentDict and FormContentDict. The multipart -# parsing was inspired by code submitted by Andreas Paepcke. Guido van -# Rossum rewrote, reformatted and documented the module and is currently -# responsible for its maintenance. -# - -__version__ = "2.6" - - -# Imports -# ======= - -from io import StringIO, BytesIO, TextIOWrapper -from collections.abc import Mapping -import sys -import os -import urllib.parse -from email.parser import FeedParser -from email.message import Message -import html -import locale -import tempfile -import warnings - -__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart", - "parse_header", "test", "print_exception", "print_environ", - "print_form", "print_directory", "print_arguments", - "print_environ_usage"] - - -warnings._deprecated(__name__, remove=(3,13)) - -# Logging support -# =============== - -logfile = "" # Filename to log to, if not empty -logfp = None # File object to log to, if not None - -def initlog(*allargs): - """Write a log message, if there is a log file. - - Even though this function is called initlog(), you should always - use log(); log is a variable that is set either to initlog - (initially), to dolog (once the log file has been opened), or to - nolog (when logging is disabled). - - The first argument is a format string; the remaining arguments (if - any) are arguments to the % operator, so e.g. - log("%s: %s", "a", "b") - will write "a: b" to the log file, followed by a newline. - - If the global logfp is not None, it should be a file object to - which log data is written. - - If the global logfp is None, the global logfile may be a string - giving a filename to open, in append mode. This file should be - world writable!!! If the file can't be opened, logging is - silently disabled (since there is no safe place where we could - send an error message). - - """ - global log, logfile, logfp - warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead", - DeprecationWarning, stacklevel=2) - if logfile and not logfp: - try: - logfp = open(logfile, "a", encoding="locale") - except OSError: - pass - if not logfp: - log = nolog - else: - log = dolog - log(*allargs) - -def dolog(fmt, *args): - """Write a log message to the log file. See initlog() for docs.""" - logfp.write(fmt%args + "\n") - -def nolog(*allargs): - """Dummy function, assigned to log when logging is disabled.""" - pass - -def closelog(): - """Close the log file.""" - global log, logfile, logfp - logfile = '' - if logfp: - logfp.close() - logfp = None - log = initlog - -log = initlog # The current logging function - - -# Parsing functions -# ================= - -# Maximum input we will accept when REQUEST_METHOD is POST -# 0 ==> unlimited input -maxlen = 0 - -def parse(fp=None, environ=os.environ, keep_blank_values=0, - strict_parsing=0, separator='&'): - """Parse a query in the environment or from a file (default stdin) - - Arguments, all optional: - - fp : file pointer; default: sys.stdin.buffer - - environ : environment dictionary; default: os.environ - - keep_blank_values: flag indicating whether blank values in - percent-encoded forms should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - - separator: str. The symbol to use for separating the query arguments. - Defaults to &. - """ - if fp is None: - fp = sys.stdin - - # field keys and values (except for files) are returned as strings - # an encoding is required to decode the bytes read from self.fp - if hasattr(fp,'encoding'): - encoding = fp.encoding - else: - encoding = 'latin-1' - - # fp.read() must return bytes - if isinstance(fp, TextIOWrapper): - fp = fp.buffer - - if not 'REQUEST_METHOD' in environ: - environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone - if environ['REQUEST_METHOD'] == 'POST': - ctype, pdict = parse_header(environ['CONTENT_TYPE']) - if ctype == 'multipart/form-data': - return parse_multipart(fp, pdict, separator=separator) - elif ctype == 'application/x-www-form-urlencoded': - clength = int(environ['CONTENT_LENGTH']) - if maxlen and clength > maxlen: - raise ValueError('Maximum content length exceeded') - qs = fp.read(clength).decode(encoding) - else: - qs = '' # Unknown content-type - if 'QUERY_STRING' in environ: - if qs: qs = qs + '&' - qs = qs + environ['QUERY_STRING'] - elif sys.argv[1:]: - if qs: qs = qs + '&' - qs = qs + sys.argv[1] - environ['QUERY_STRING'] = qs # XXX Shouldn't, really - elif 'QUERY_STRING' in environ: - qs = environ['QUERY_STRING'] - else: - if sys.argv[1:]: - qs = sys.argv[1] - else: - qs = "" - environ['QUERY_STRING'] = qs # XXX Shouldn't, really - return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing, - encoding=encoding, separator=separator) - - -def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'): - """Parse multipart input. - - Arguments: - fp : input file - pdict: dictionary containing other parameters of content-type header - encoding, errors: request encoding and error handler, passed to - FieldStorage - - Returns a dictionary just like parse_qs(): keys are the field names, each - value is a list of values for that field. For non-file fields, the value - is a list of strings. - """ - # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always - # represented as 7bit US-ASCII. - boundary = pdict['boundary'].decode('ascii') - ctype = "multipart/form-data; boundary={}".format(boundary) - headers = Message() - headers.set_type(ctype) - try: - headers['Content-Length'] = pdict['CONTENT-LENGTH'] - except KeyError: - pass - fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors, - environ={'REQUEST_METHOD': 'POST'}, separator=separator) - return {k: fs.getlist(k) for k in fs} - -def _parseparam(s): - while s[:1] == ';': - s = s[1:] - end = s.find(';') - while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: - end = s.find(';', end + 1) - if end < 0: - end = len(s) - f = s[:end] - yield f.strip() - s = s[end:] - -def parse_header(line): - """Parse a Content-type like header. - - Return the main content-type and a dictionary of options. - - """ - parts = _parseparam(';' + line) - key = parts.__next__() - pdict = {} - for p in parts: - i = p.find('=') - if i >= 0: - name = p[:i].strip().lower() - value = p[i+1:].strip() - if len(value) >= 2 and value[0] == value[-1] == '"': - value = value[1:-1] - value = value.replace('\\\\', '\\').replace('\\"', '"') - pdict[name] = value - return key, pdict - - -# Classes for field storage -# ========================= - -class MiniFieldStorage: - - """Like FieldStorage, for use when no file uploads are possible.""" - - # Dummy attributes - filename = None - list = None - type = None - file = None - type_options = {} - disposition = None - disposition_options = {} - headers = {} - - def __init__(self, name, value): - """Constructor from field name and value.""" - self.name = name - self.value = value - # self.file = StringIO(value) - - def __repr__(self): - """Return printable representation.""" - return "MiniFieldStorage(%r, %r)" % (self.name, self.value) - - -class FieldStorage: - - """Store a sequence of fields, reading multipart/form-data. - - This class provides naming, typing, files stored on disk, and - more. At the top level, it is accessible like a dictionary, whose - keys are the field names. (Note: None can occur as a field name.) - The items are either a Python list (if there's multiple values) or - another FieldStorage or MiniFieldStorage object. If it's a single - object, it has the following attributes: - - name: the field name, if specified; otherwise None - - filename: the filename, if specified; otherwise None; this is the - client side filename, *not* the file name on which it is - stored (that's a temporary file you don't deal with) - - value: the value as a *string*; for file uploads, this - transparently reads the file every time you request the value - and returns *bytes* - - file: the file(-like) object from which you can read the data *as - bytes* ; None if the data is stored a simple string - - type: the content-type, or None if not specified - - type_options: dictionary of options specified on the content-type - line - - disposition: content-disposition, or None if not specified - - disposition_options: dictionary of corresponding options - - headers: a dictionary(-like) object (sometimes email.message.Message or a - subclass thereof) containing *all* headers - - The class is subclassable, mostly for the purpose of overriding - the make_file() method, which is called internally to come up with - a file open for reading and writing. This makes it possible to - override the default choice of storing all files in a temporary - directory and unlinking them as soon as they have been opened. - - """ - def __init__(self, fp=None, headers=None, outerboundary=b'', - environ=os.environ, keep_blank_values=0, strict_parsing=0, - limit=None, encoding='utf-8', errors='replace', - max_num_fields=None, separator='&'): - """Constructor. Read multipart/* until last part. - - Arguments, all optional: - - fp : file pointer; default: sys.stdin.buffer - (not used when the request method is GET) - Can be : - 1. a TextIOWrapper object - 2. an object whose read() and readline() methods return bytes - - headers : header dictionary-like object; default: - taken from environ as per CGI spec - - outerboundary : terminating multipart boundary - (for internal use only) - - environ : environment dictionary; default: os.environ - - keep_blank_values: flag indicating whether blank values in - percent-encoded forms should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - - limit : used internally to read parts of multipart/form-data forms, - to exit from the reading loop when reached. It is the difference - between the form content-length and the number of bytes already - read - - encoding, errors : the encoding and error handler used to decode the - binary stream to strings. Must be the same as the charset defined - for the page sending the form (content-type : meta http-equiv or - header) - - max_num_fields: int. If set, then __init__ throws a ValueError - if there are more than n fields read by parse_qsl(). - - """ - method = 'GET' - self.keep_blank_values = keep_blank_values - self.strict_parsing = strict_parsing - self.max_num_fields = max_num_fields - self.separator = separator - if 'REQUEST_METHOD' in environ: - method = environ['REQUEST_METHOD'].upper() - self.qs_on_post = None - if method == 'GET' or method == 'HEAD': - if 'QUERY_STRING' in environ: - qs = environ['QUERY_STRING'] - elif sys.argv[1:]: - qs = sys.argv[1] - else: - qs = "" - qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape') - fp = BytesIO(qs) - if headers is None: - headers = {'content-type': - "application/x-www-form-urlencoded"} - if headers is None: - headers = {} - if method == 'POST': - # Set default content-type for POST to what's traditional - headers['content-type'] = "application/x-www-form-urlencoded" - if 'CONTENT_TYPE' in environ: - headers['content-type'] = environ['CONTENT_TYPE'] - if 'QUERY_STRING' in environ: - self.qs_on_post = environ['QUERY_STRING'] - if 'CONTENT_LENGTH' in environ: - headers['content-length'] = environ['CONTENT_LENGTH'] - else: - if not (isinstance(headers, (Mapping, Message))): - raise TypeError("headers must be mapping or an instance of " - "email.message.Message") - self.headers = headers - if fp is None: - self.fp = sys.stdin.buffer - # self.fp.read() must return bytes - elif isinstance(fp, TextIOWrapper): - self.fp = fp.buffer - else: - if not (hasattr(fp, 'read') and hasattr(fp, 'readline')): - raise TypeError("fp must be file pointer") - self.fp = fp - - self.encoding = encoding - self.errors = errors - - if not isinstance(outerboundary, bytes): - raise TypeError('outerboundary must be bytes, not %s' - % type(outerboundary).__name__) - self.outerboundary = outerboundary - - self.bytes_read = 0 - self.limit = limit - - # Process content-disposition header - cdisp, pdict = "", {} - if 'content-disposition' in self.headers: - cdisp, pdict = parse_header(self.headers['content-disposition']) - self.disposition = cdisp - self.disposition_options = pdict - self.name = None - if 'name' in pdict: - self.name = pdict['name'] - self.filename = None - if 'filename' in pdict: - self.filename = pdict['filename'] - self._binary_file = self.filename is not None - - # Process content-type header - # - # Honor any existing content-type header. But if there is no - # content-type header, use some sensible defaults. Assume - # outerboundary is "" at the outer level, but something non-false - # inside a multi-part. The default for an inner part is text/plain, - # but for an outer part it should be urlencoded. This should catch - # bogus clients which erroneously forget to include a content-type - # header. - # - # See below for what we do if there does exist a content-type header, - # but it happens to be something we don't understand. - if 'content-type' in self.headers: - ctype, pdict = parse_header(self.headers['content-type']) - elif self.outerboundary or method != 'POST': - ctype, pdict = "text/plain", {} - else: - ctype, pdict = 'application/x-www-form-urlencoded', {} - self.type = ctype - self.type_options = pdict - if 'boundary' in pdict: - self.innerboundary = pdict['boundary'].encode(self.encoding, - self.errors) - else: - self.innerboundary = b"" - - clen = -1 - if 'content-length' in self.headers: - try: - clen = int(self.headers['content-length']) - except ValueError: - pass - if maxlen and clen > maxlen: - raise ValueError('Maximum content length exceeded') - self.length = clen - if self.limit is None and clen >= 0: - self.limit = clen - - self.list = self.file = None - self.done = 0 - if ctype == 'application/x-www-form-urlencoded': - self.read_urlencoded() - elif ctype[:10] == 'multipart/': - self.read_multi(environ, keep_blank_values, strict_parsing) - else: - self.read_single() - - def __del__(self): - try: - self.file.close() - except AttributeError: - pass - - def __enter__(self): - return self - - def __exit__(self, *args): - self.file.close() - - def __repr__(self): - """Return a printable representation.""" - return "FieldStorage(%r, %r, %r)" % ( - self.name, self.filename, self.value) - - def __iter__(self): - return iter(self.keys()) - - def __getattr__(self, name): - if name != 'value': - raise AttributeError(name) - if self.file: - self.file.seek(0) - value = self.file.read() - self.file.seek(0) - elif self.list is not None: - value = self.list - else: - value = None - return value - - def __getitem__(self, key): - """Dictionary style indexing.""" - if self.list is None: - raise TypeError("not indexable") - found = [] - for item in self.list: - if item.name == key: found.append(item) - if not found: - raise KeyError(key) - if len(found) == 1: - return found[0] - else: - return found - - def getvalue(self, key, default=None): - """Dictionary style get() method, including 'value' lookup.""" - if key in self: - value = self[key] - if isinstance(value, list): - return [x.value for x in value] - else: - return value.value - else: - return default - - def getfirst(self, key, default=None): - """ Return the first value received.""" - if key in self: - value = self[key] - if isinstance(value, list): - return value[0].value - else: - return value.value - else: - return default - - def getlist(self, key): - """ Return list of received values.""" - if key in self: - value = self[key] - if isinstance(value, list): - return [x.value for x in value] - else: - return [value.value] - else: - return [] - - def keys(self): - """Dictionary style keys() method.""" - if self.list is None: - raise TypeError("not indexable") - return list(set(item.name for item in self.list)) - - def __contains__(self, key): - """Dictionary style __contains__ method.""" - if self.list is None: - raise TypeError("not indexable") - return any(item.name == key for item in self.list) - - def __len__(self): - """Dictionary style len(x) support.""" - return len(self.keys()) - - def __bool__(self): - if self.list is None: - raise TypeError("Cannot be converted to bool.") - return bool(self.list) - - def read_urlencoded(self): - """Internal: read data in query string format.""" - qs = self.fp.read(self.length) - if not isinstance(qs, bytes): - raise ValueError("%s should return bytes, got %s" \ - % (self.fp, type(qs).__name__)) - qs = qs.decode(self.encoding, self.errors) - if self.qs_on_post: - qs += '&' + self.qs_on_post - query = urllib.parse.parse_qsl( - qs, self.keep_blank_values, self.strict_parsing, - encoding=self.encoding, errors=self.errors, - max_num_fields=self.max_num_fields, separator=self.separator) - self.list = [MiniFieldStorage(key, value) for key, value in query] - self.skip_lines() - - FieldStorageClass = None - - def read_multi(self, environ, keep_blank_values, strict_parsing): - """Internal: read a part that is itself multipart.""" - ib = self.innerboundary - if not valid_boundary(ib): - raise ValueError('Invalid boundary in multipart form: %r' % (ib,)) - self.list = [] - if self.qs_on_post: - query = urllib.parse.parse_qsl( - self.qs_on_post, self.keep_blank_values, self.strict_parsing, - encoding=self.encoding, errors=self.errors, - max_num_fields=self.max_num_fields, separator=self.separator) - self.list.extend(MiniFieldStorage(key, value) for key, value in query) - - klass = self.FieldStorageClass or self.__class__ - first_line = self.fp.readline() # bytes - if not isinstance(first_line, bytes): - raise ValueError("%s should return bytes, got %s" \ - % (self.fp, type(first_line).__name__)) - self.bytes_read += len(first_line) - - # Ensure that we consume the file until we've hit our inner boundary - while (first_line.strip() != (b"--" + self.innerboundary) and - first_line): - first_line = self.fp.readline() - self.bytes_read += len(first_line) - - # Propagate max_num_fields into the sub class appropriately - max_num_fields = self.max_num_fields - if max_num_fields is not None: - max_num_fields -= len(self.list) - - while True: - parser = FeedParser() - hdr_text = b"" - while True: - data = self.fp.readline() - hdr_text += data - if not data.strip(): - break - if not hdr_text: - break - # parser takes strings, not bytes - self.bytes_read += len(hdr_text) - parser.feed(hdr_text.decode(self.encoding, self.errors)) - headers = parser.close() - - # Some clients add Content-Length for part headers, ignore them - if 'content-length' in headers: - del headers['content-length'] - - limit = None if self.limit is None \ - else self.limit - self.bytes_read - part = klass(self.fp, headers, ib, environ, keep_blank_values, - strict_parsing, limit, - self.encoding, self.errors, max_num_fields, self.separator) - - if max_num_fields is not None: - max_num_fields -= 1 - if part.list: - max_num_fields -= len(part.list) - if max_num_fields < 0: - raise ValueError('Max number of fields exceeded') - - self.bytes_read += part.bytes_read - self.list.append(part) - if part.done or self.bytes_read >= self.length > 0: - break - self.skip_lines() - - def read_single(self): - """Internal: read an atomic part.""" - if self.length >= 0: - self.read_binary() - self.skip_lines() - else: - self.read_lines() - self.file.seek(0) - - bufsize = 8*1024 # I/O buffering size for copy to file - - def read_binary(self): - """Internal: read binary data.""" - self.file = self.make_file() - todo = self.length - if todo >= 0: - while todo > 0: - data = self.fp.read(min(todo, self.bufsize)) # bytes - if not isinstance(data, bytes): - raise ValueError("%s should return bytes, got %s" - % (self.fp, type(data).__name__)) - self.bytes_read += len(data) - if not data: - self.done = -1 - break - self.file.write(data) - todo = todo - len(data) - - def read_lines(self): - """Internal: read lines until EOF or outerboundary.""" - if self._binary_file: - self.file = self.__file = BytesIO() # store data as bytes for files - else: - self.file = self.__file = StringIO() # as strings for other fields - if self.outerboundary: - self.read_lines_to_outerboundary() - else: - self.read_lines_to_eof() - - def __write(self, line): - """line is always bytes, not string""" - if self.__file is not None: - if self.__file.tell() + len(line) > 1000: - self.file = self.make_file() - data = self.__file.getvalue() - self.file.write(data) - self.__file = None - if self._binary_file: - # keep bytes - self.file.write(line) - else: - # decode to string - self.file.write(line.decode(self.encoding, self.errors)) - - def read_lines_to_eof(self): - """Internal: read lines until EOF.""" - while 1: - line = self.fp.readline(1<<16) # bytes - self.bytes_read += len(line) - if not line: - self.done = -1 - break - self.__write(line) - - def read_lines_to_outerboundary(self): - """Internal: read lines until outerboundary. - Data is read as bytes: boundaries and line ends must be converted - to bytes for comparisons. - """ - next_boundary = b"--" + self.outerboundary - last_boundary = next_boundary + b"--" - delim = b"" - last_line_lfend = True - _read = 0 - while 1: - - if self.limit is not None and 0 <= self.limit <= _read: - break - line = self.fp.readline(1<<16) # bytes - self.bytes_read += len(line) - _read += len(line) - if not line: - self.done = -1 - break - if delim == b"\r": - line = delim + line - delim = b"" - if line.startswith(b"--") and last_line_lfend: - strippedline = line.rstrip() - if strippedline == next_boundary: - break - if strippedline == last_boundary: - self.done = 1 - break - odelim = delim - if line.endswith(b"\r\n"): - delim = b"\r\n" - line = line[:-2] - last_line_lfend = True - elif line.endswith(b"\n"): - delim = b"\n" - line = line[:-1] - last_line_lfend = True - elif line.endswith(b"\r"): - # We may interrupt \r\n sequences if they span the 2**16 - # byte boundary - delim = b"\r" - line = line[:-1] - last_line_lfend = False - else: - delim = b"" - last_line_lfend = False - self.__write(odelim + line) - - def skip_lines(self): - """Internal: skip lines until outer boundary if defined.""" - if not self.outerboundary or self.done: - return - next_boundary = b"--" + self.outerboundary - last_boundary = next_boundary + b"--" - last_line_lfend = True - while True: - line = self.fp.readline(1<<16) - self.bytes_read += len(line) - if not line: - self.done = -1 - break - if line.endswith(b"--") and last_line_lfend: - strippedline = line.strip() - if strippedline == next_boundary: - break - if strippedline == last_boundary: - self.done = 1 - break - last_line_lfend = line.endswith(b'\n') - - def make_file(self): - """Overridable: return a readable & writable file. - - The file will be used as follows: - - data is written to it - - seek(0) - - data is read from it - - The file is opened in binary mode for files, in text mode - for other fields - - This version opens a temporary file for reading and writing, - and immediately deletes (unlinks) it. The trick (on Unix!) is - that the file can still be used, but it can't be opened by - another process, and it will automatically be deleted when it - is closed or when the current process terminates. - - If you want a more permanent file, you derive a class which - overrides this method. If you want a visible temporary file - that is nevertheless automatically deleted when the script - terminates, try defining a __del__ method in a derived class - which unlinks the temporary files you have created. - - """ - if self._binary_file: - return tempfile.TemporaryFile("wb+") - else: - return tempfile.TemporaryFile("w+", - encoding=self.encoding, newline = '\n') - - -# Test/debug code -# =============== - -def test(environ=os.environ): - """Robust test CGI script, usable as main program. - - Write minimal HTTP headers and dump all information provided to - the script in HTML form. - - """ - print("Content-type: text/html") - print() - sys.stderr = sys.stdout - try: - form = FieldStorage() # Replace with other classes to test those - print_directory() - print_arguments() - print_form(form) - print_environ(environ) - print_environ_usage() - def f(): - exec("testing print_exception() -- italics?") - def g(f=f): - f() - print("

What follows is a test, not an actual exception:

") - g() - except: - print_exception() - - print("

Second try with a small maxlen...

") - - global maxlen - maxlen = 50 - try: - form = FieldStorage() # Replace with other classes to test those - print_directory() - print_arguments() - print_form(form) - print_environ(environ) - except: - print_exception() - -def print_exception(type=None, value=None, tb=None, limit=None): - if type is None: - type, value, tb = sys.exc_info() - import traceback - print() - print("

Traceback (most recent call last):

") - list = traceback.format_tb(tb, limit) + \ - traceback.format_exception_only(type, value) - print("
%s%s
" % ( - html.escape("".join(list[:-1])), - html.escape(list[-1]), - )) - del tb - -def print_environ(environ=os.environ): - """Dump the shell environment as HTML.""" - keys = sorted(environ.keys()) - print() - print("

Shell Environment:

") - print("
") - for key in keys: - print("
", html.escape(key), "
", html.escape(environ[key])) - print("
") - print() - -def print_form(form): - """Dump the contents of a form as HTML.""" - keys = sorted(form.keys()) - print() - print("

Form Contents:

") - if not keys: - print("

No form fields.") - print("

") - for key in keys: - print("
" + html.escape(key) + ":", end=' ') - value = form[key] - print("" + html.escape(repr(type(value))) + "") - print("
" + html.escape(repr(value))) - print("
") - print() - -def print_directory(): - """Dump the current directory as HTML.""" - print() - print("

Current Working Directory:

") - try: - pwd = os.getcwd() - except OSError as msg: - print("OSError:", html.escape(str(msg))) - else: - print(html.escape(pwd)) - print() - -def print_arguments(): - print() - print("

Command Line Arguments:

") - print() - print(sys.argv) - print() - -def print_environ_usage(): - """Dump a list of environment variables used by CGI as HTML.""" - print(""" -

These environment variables could have been set:

-
    -
  • AUTH_TYPE -
  • CONTENT_LENGTH -
  • CONTENT_TYPE -
  • DATE_GMT -
  • DATE_LOCAL -
  • DOCUMENT_NAME -
  • DOCUMENT_ROOT -
  • DOCUMENT_URI -
  • GATEWAY_INTERFACE -
  • LAST_MODIFIED -
  • PATH -
  • PATH_INFO -
  • PATH_TRANSLATED -
  • QUERY_STRING -
  • REMOTE_ADDR -
  • REMOTE_HOST -
  • REMOTE_IDENT -
  • REMOTE_USER -
  • REQUEST_METHOD -
  • SCRIPT_NAME -
  • SERVER_NAME -
  • SERVER_PORT -
  • SERVER_PROTOCOL -
  • SERVER_ROOT -
  • SERVER_SOFTWARE -
-In addition, HTTP headers sent by the server may be passed in the -environment as well. Here are some common variable names: -
    -
  • HTTP_ACCEPT -
  • HTTP_CONNECTION -
  • HTTP_HOST -
  • HTTP_PRAGMA -
  • HTTP_REFERER -
  • HTTP_USER_AGENT -
-""") - - -# Utilities -# ========= - -def valid_boundary(s): - import re - if isinstance(s, bytes): - _vb_pattern = b"^[ -~]{0,200}[!-~]$" - else: - _vb_pattern = "^[ -~]{0,200}[!-~]$" - return re.match(_vb_pattern, s) - -# Invoke mainline -# =============== - -# Call test() when this file is run as a script (not imported as a module) -if __name__ == '__main__': - test() diff --git a/Lib/cgitb.py b/Lib/cgitb.py deleted file mode 100644 index f6b97f25c59de98..000000000000000 --- a/Lib/cgitb.py +++ /dev/null @@ -1,332 +0,0 @@ -"""More comprehensive traceback formatting for Python scripts. - -To enable this module, do: - - import cgitb; cgitb.enable() - -at the top of your script. The optional arguments to enable() are: - - display - if true, tracebacks are displayed in the web browser - logdir - if set, tracebacks are written to files in this directory - context - number of lines of source code to show for each stack frame - format - 'text' or 'html' controls the output format - -By default, tracebacks are displayed but not saved, the context is 5 lines -and the output format is 'html' (for backwards compatibility with the -original use of this module) - -Alternatively, if you have caught an exception and want cgitb to display it -for you, call cgitb.handler(). The optional argument to handler() is a -3-item tuple (etype, evalue, etb) just like the value of sys.exc_info(). -The default handler displays output as HTML. - -""" -import inspect -import keyword -import linecache -import os -import pydoc -import sys -import tempfile -import time -import tokenize -import traceback -import warnings -from html import escape as html_escape - -warnings._deprecated(__name__, remove=(3, 13)) - - -def reset(): - """Return a string that resets the CGI and browser to a known state.""" - return ''' - --> --> - - ''' - -__UNDEF__ = [] # a special sentinel object -def small(text): - if text: - return '' + text + '' - else: - return '' - -def strong(text): - if text: - return '' + text + '' - else: - return '' - -def grey(text): - if text: - return '' + text + '' - else: - return '' - -def lookup(name, frame, locals): - """Find the value for a given name in the given environment.""" - if name in locals: - return 'local', locals[name] - if name in frame.f_globals: - return 'global', frame.f_globals[name] - if '__builtins__' in frame.f_globals: - builtins = frame.f_globals['__builtins__'] - if isinstance(builtins, dict): - if name in builtins: - return 'builtin', builtins[name] - else: - if hasattr(builtins, name): - return 'builtin', getattr(builtins, name) - return None, __UNDEF__ - -def scanvars(reader, frame, locals): - """Scan one logical line of Python and look up values of variables used.""" - vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ - for ttype, token, start, end, line in tokenize.generate_tokens(reader): - if ttype == tokenize.NEWLINE: break - if ttype == tokenize.NAME and token not in keyword.kwlist: - if lasttoken == '.': - if parent is not __UNDEF__: - value = getattr(parent, token, __UNDEF__) - vars.append((prefix + token, prefix, value)) - else: - where, value = lookup(token, frame, locals) - vars.append((token, where, value)) - elif token == '.': - prefix += lasttoken + '.' - parent = value - else: - parent, prefix = None, '' - lasttoken = token - return vars - -def html(einfo, context=5): - """Return a nice HTML document describing a given traceback.""" - etype, evalue, etb = einfo - if isinstance(etype, type): - etype = etype.__name__ - pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable - date = time.ctime(time.time()) - head = f''' - - - - - -
 
- 
-{html_escape(str(etype))}
-{pyver}
{date}
-

A problem occurred in a Python script. Here is the sequence of -function calls leading up to the error, in the order they occurred.

''' - - indent = '' + small(' ' * 5) + ' ' - frames = [] - records = inspect.getinnerframes(etb, context) - for frame, file, lnum, func, lines, index in records: - if file: - file = os.path.abspath(file) - link = '%s' % (file, pydoc.html.escape(file)) - else: - file = link = '?' - args, varargs, varkw, locals = inspect.getargvalues(frame) - call = '' - if func != '?': - call = 'in ' + strong(pydoc.html.escape(func)) - if func != "": - call += inspect.formatargvalues(args, varargs, varkw, locals, - formatvalue=lambda value: '=' + pydoc.html.repr(value)) - - highlight = {} - def reader(lnum=[lnum]): - highlight[lnum[0]] = 1 - try: return linecache.getline(file, lnum[0]) - finally: lnum[0] += 1 - vars = scanvars(reader, frame, locals) - - rows = ['%s%s %s' % - (' ', link, call)] - if index is not None: - i = lnum - index - for line in lines: - num = small(' ' * (5-len(str(i))) + str(i)) + ' ' - if i in highlight: - line = '=>%s%s' % (num, pydoc.html.preformat(line)) - rows.append('%s' % line) - else: - line = '  %s%s' % (num, pydoc.html.preformat(line)) - rows.append('%s' % grey(line)) - i += 1 - - done, dump = {}, [] - for name, where, value in vars: - if name in done: continue - done[name] = 1 - if value is not __UNDEF__: - if where in ('global', 'builtin'): - name = ('%s ' % where) + strong(name) - elif where == 'local': - name = strong(name) - else: - name = where + strong(name.split('.')[-1]) - dump.append('%s = %s' % (name, pydoc.html.repr(value))) - else: - dump.append(name + ' undefined') - - rows.append('%s' % small(grey(', '.join(dump)))) - frames.append(''' - -%s
''' % '\n'.join(rows)) - - exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), - pydoc.html.escape(str(evalue)))] - for name in dir(evalue): - if name[:1] == '_': continue - value = pydoc.html.repr(getattr(evalue, name)) - exception.append('\n
%s%s =\n%s' % (indent, name, value)) - - return head + ''.join(frames) + ''.join(exception) + ''' - - - -''' % pydoc.html.escape( - ''.join(traceback.format_exception(etype, evalue, etb))) - -def text(einfo, context=5): - """Return a plain text document describing a given traceback.""" - etype, evalue, etb = einfo - if isinstance(etype, type): - etype = etype.__name__ - pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable - date = time.ctime(time.time()) - head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' -A problem occurred in a Python script. Here is the sequence of -function calls leading up to the error, in the order they occurred. -''' - - frames = [] - records = inspect.getinnerframes(etb, context) - for frame, file, lnum, func, lines, index in records: - file = file and os.path.abspath(file) or '?' - args, varargs, varkw, locals = inspect.getargvalues(frame) - call = '' - if func != '?': - call = 'in ' + func - if func != "": - call += inspect.formatargvalues(args, varargs, varkw, locals, - formatvalue=lambda value: '=' + pydoc.text.repr(value)) - - highlight = {} - def reader(lnum=[lnum]): - highlight[lnum[0]] = 1 - try: return linecache.getline(file, lnum[0]) - finally: lnum[0] += 1 - vars = scanvars(reader, frame, locals) - - rows = [' %s %s' % (file, call)] - if index is not None: - i = lnum - index - for line in lines: - num = '%5d ' % i - rows.append(num+line.rstrip()) - i += 1 - - done, dump = {}, [] - for name, where, value in vars: - if name in done: continue - done[name] = 1 - if value is not __UNDEF__: - if where == 'global': name = 'global ' + name - elif where != 'local': name = where + name.split('.')[-1] - dump.append('%s = %s' % (name, pydoc.text.repr(value))) - else: - dump.append(name + ' undefined') - - rows.append('\n'.join(dump)) - frames.append('\n%s\n' % '\n'.join(rows)) - - exception = ['%s: %s' % (str(etype), str(evalue))] - for name in dir(evalue): - value = pydoc.text.repr(getattr(evalue, name)) - exception.append('\n%s%s = %s' % (" "*4, name, value)) - - return head + ''.join(frames) + ''.join(exception) + ''' - -The above is a description of an error in a Python program. Here is -the original traceback: - -%s -''' % ''.join(traceback.format_exception(etype, evalue, etb)) - -class Hook: - """A hook to replace sys.excepthook that shows tracebacks in HTML.""" - - def __init__(self, display=1, logdir=None, context=5, file=None, - format="html"): - self.display = display # send tracebacks to browser if true - self.logdir = logdir # log tracebacks to files if not None - self.context = context # number of source code lines per frame - self.file = file or sys.stdout # place to send the output - self.format = format - - def __call__(self, etype, evalue, etb): - self.handle((etype, evalue, etb)) - - def handle(self, info=None): - info = info or sys.exc_info() - if self.format == "html": - self.file.write(reset()) - - formatter = (self.format=="html") and html or text - plain = False - try: - doc = formatter(info, self.context) - except: # just in case something goes wrong - doc = ''.join(traceback.format_exception(*info)) - plain = True - - if self.display: - if plain: - doc = pydoc.html.escape(doc) - self.file.write('

' + doc + '
\n') - else: - self.file.write(doc + '\n') - else: - self.file.write('

A problem occurred in a Python script.\n') - - if self.logdir is not None: - suffix = ['.txt', '.html'][self.format=="html"] - (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) - - try: - with os.fdopen(fd, 'w') as file: - file.write(doc) - msg = '%s contains the description of this error.' % path - except: - msg = 'Tried to save traceback to %s, but failed.' % path - - if self.format == 'html': - self.file.write('

%s

\n' % msg) - else: - self.file.write(msg + '\n') - try: - self.file.flush() - except: pass - -handler = Hook().handle -def enable(display=1, logdir=None, context=5, format="html"): - """Install an exception handler that formats tracebacks as HTML. - - The optional argument 'display' can be set to 0 to suppress sending the - traceback to the browser, and 'logdir' can be set to a directory to cause - tracebacks to be written to files there.""" - sys.excepthook = Hook(display=display, logdir=logdir, - context=context, format=format) diff --git a/Lib/chunk.py b/Lib/chunk.py deleted file mode 100644 index 618781efd11e715..000000000000000 --- a/Lib/chunk.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Simple class to read IFF chunks. - -An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File -Format)) has the following structure: - -+----------------+ -| ID (4 bytes) | -+----------------+ -| size (4 bytes) | -+----------------+ -| data | -| ... | -+----------------+ - -The ID is a 4-byte string which identifies the type of chunk. - -The size field (a 32-bit value, encoded using big-endian byte order) -gives the size of the whole chunk, including the 8-byte header. - -Usually an IFF-type file consists of one or more chunks. The proposed -usage of the Chunk class defined here is to instantiate an instance at -the start of each chunk and read from the instance until it reaches -the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with an EOFError -exception. - -Usage: -while True: - try: - chunk = Chunk(file) - except EOFError: - break - chunktype = chunk.getname() - while True: - data = chunk.read(nbytes) - if not data: - pass - # do something with data - -The interface is file-like. The implemented methods are: -read, close, seek, tell, isatty. -Extra methods are: skip() (called by close, skips to the end of the chunk), -getname() (returns the name (ID) of the chunk) - -The __init__ method has one required argument, a file-like object -(including a chunk instance), and one optional argument, a flag which -specifies whether or not chunks are aligned on 2-byte boundaries. The -default is 1, i.e. aligned. -""" - -import warnings - -warnings._deprecated(__name__, remove=(3, 13)) - -class Chunk: - def __init__(self, file, align=True, bigendian=True, inclheader=False): - import struct - self.closed = False - self.align = align # whether to align to word (2-byte) boundaries - if bigendian: - strflag = '>' - else: - strflag = '<' - self.file = file - self.chunkname = file.read(4) - if len(self.chunkname) < 4: - raise EOFError - try: - self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] - except struct.error: - raise EOFError from None - if inclheader: - self.chunksize = self.chunksize - 8 # subtract header - self.size_read = 0 - try: - self.offset = self.file.tell() - except (AttributeError, OSError): - self.seekable = False - else: - self.seekable = True - - def getname(self): - """Return the name (ID) of the current chunk.""" - return self.chunkname - - def getsize(self): - """Return the size of the current chunk.""" - return self.chunksize - - def close(self): - if not self.closed: - try: - self.skip() - finally: - self.closed = True - - def isatty(self): - if self.closed: - raise ValueError("I/O operation on closed file") - return False - - def seek(self, pos, whence=0): - """Seek to specified position into the chunk. - Default position is 0 (start of chunk). - If the file is not seekable, this will result in an error. - """ - - if self.closed: - raise ValueError("I/O operation on closed file") - if not self.seekable: - raise OSError("cannot seek") - if whence == 1: - pos = pos + self.size_read - elif whence == 2: - pos = pos + self.chunksize - if pos < 0 or pos > self.chunksize: - raise RuntimeError - self.file.seek(self.offset + pos, 0) - self.size_read = pos - - def tell(self): - if self.closed: - raise ValueError("I/O operation on closed file") - return self.size_read - - def read(self, size=-1): - """Read at most size bytes from the chunk. - If size is omitted or negative, read until the end - of the chunk. - """ - - if self.closed: - raise ValueError("I/O operation on closed file") - if self.size_read >= self.chunksize: - return b'' - if size < 0: - size = self.chunksize - self.size_read - if size > self.chunksize - self.size_read: - size = self.chunksize - self.size_read - data = self.file.read(size) - self.size_read = self.size_read + len(data) - if self.size_read == self.chunksize and \ - self.align and \ - (self.chunksize & 1): - dummy = self.file.read(1) - self.size_read = self.size_read + len(dummy) - return data - - def skip(self): - """Skip the rest of the chunk. - If you are not interested in the contents of the chunk, - this method should be called so that the file points to - the start of the next chunk. - """ - - if self.closed: - raise ValueError("I/O operation on closed file") - if self.seekable: - try: - n = self.chunksize - self.size_read - # maybe fix alignment - if self.align and (self.chunksize & 1): - n = n + 1 - self.file.seek(n, 1) - self.size_read = self.size_read + n - return - except OSError: - pass - while self.size_read < self.chunksize: - n = min(8192, self.chunksize - self.size_read) - dummy = self.read(n) - if not dummy: - raise EOFError diff --git a/Lib/cmd.py b/Lib/cmd.py index 88ee7d3ddc46944..e933b8dbc1470a8 100644 --- a/Lib/cmd.py +++ b/Lib/cmd.py @@ -210,9 +210,8 @@ def onecmd(self, line): if cmd == '': return self.default(line) else: - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: + func = getattr(self, 'do_' + cmd, None) + if func is None: return self.default(line) return func(arg) diff --git a/Lib/code.py b/Lib/code.py index 76000f8c8b2c1e1..f4aecddeca7813b 100644 --- a/Lib/code.py +++ b/Lib/code.py @@ -5,6 +5,7 @@ # Inspired by similar code by Jeff Epler and Fredrik Lundh. +import builtins import sys import traceback from codeop import CommandCompiler, compile_command @@ -106,6 +107,7 @@ def showsyntaxerror(self, filename=None): """ type, value, tb = sys.exc_info() + sys.last_exc = value sys.last_type = type sys.last_value = value sys.last_traceback = tb @@ -119,7 +121,7 @@ def showsyntaxerror(self, filename=None): else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) - sys.last_value = value + sys.last_exc = sys.last_value = value if sys.excepthook is sys.__excepthook__: lines = traceback.format_exception_only(type, value) self.write(''.join(lines)) @@ -138,6 +140,7 @@ def showtraceback(self): """ sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() sys.last_traceback = last_tb + sys.last_exc = ei[1] try: lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) if sys.excepthook is sys.__excepthook__: @@ -167,7 +170,7 @@ class InteractiveConsole(InteractiveInterpreter): """ - def __init__(self, locals=None, filename=""): + def __init__(self, locals=None, filename="", local_exit=False): """Constructor. The optional locals argument will be passed to the @@ -179,6 +182,7 @@ def __init__(self, locals=None, filename=""): """ InteractiveInterpreter.__init__(self, locals) self.filename = filename + self.local_exit = local_exit self.resetbuffer() def resetbuffer(self): @@ -217,27 +221,64 @@ def interact(self, banner=None, exitmsg=None): elif banner: self.write("%s\n" % str(banner)) more = 0 - while 1: - try: - if more: - prompt = sys.ps2 - else: - prompt = sys.ps1 + + # When the user uses exit() or quit() in their interactive shell + # they probably just want to exit the created shell, not the whole + # process. exit and quit in builtins closes sys.stdin which makes + # it super difficult to restore + # + # When self.local_exit is True, we overwrite the builtins so + # exit() and quit() only raises SystemExit and we can catch that + # to only exit the interactive shell + + _exit = None + _quit = None + + if self.local_exit: + if hasattr(builtins, "exit"): + _exit = builtins.exit + builtins.exit = Quitter("exit") + + if hasattr(builtins, "quit"): + _quit = builtins.quit + builtins.quit = Quitter("quit") + + try: + while True: try: - line = self.raw_input(prompt) - except EOFError: - self.write("\n") - break - else: - more = self.push(line) - except KeyboardInterrupt: - self.write("\nKeyboardInterrupt\n") - self.resetbuffer() - more = 0 - if exitmsg is None: - self.write('now exiting %s...\n' % self.__class__.__name__) - elif exitmsg != '': - self.write('%s\n' % exitmsg) + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + except SystemExit as e: + if self.local_exit: + self.write("\n") + break + else: + raise e + finally: + # restore exit and quit in builtins if they were modified + if _exit is not None: + builtins.exit = _exit + + if _quit is not None: + builtins.quit = _quit + + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) def push(self, line): """Push a line to the interpreter. @@ -274,8 +315,22 @@ def raw_input(self, prompt=""): return input(prompt) +class Quitter: + def __init__(self, name): + self.name = name + if sys.platform == "win32": + self.eof = 'Ctrl-Z plus Return' + else: + self.eof = 'Ctrl-D (i.e. EOF)' + + def __repr__(self): + return f'Use {self.name} or {self.eof} to exit' + + def __call__(self, code=None): + raise SystemExit(code) + -def interact(banner=None, readfunc=None, local=None, exitmsg=None): +def interact(banner=None, readfunc=None, local=None, exitmsg=None, local_exit=False): """Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole @@ -288,9 +343,10 @@ def interact(banner=None, readfunc=None, local=None, exitmsg=None): readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() exitmsg -- passed to InteractiveConsole.interact() + local_exit -- passed to InteractiveConsole.__init__() """ - console = InteractiveConsole(local) + console = InteractiveConsole(local, local_exit=local_exit) if readfunc is not None: console.raw_input = readfunc else: diff --git a/Lib/codecs.py b/Lib/codecs.py index c1c55d8afef389a..9b35b6127dd01c7 100644 --- a/Lib/codecs.py +++ b/Lib/codecs.py @@ -111,6 +111,9 @@ def __repr__(self): (self.__class__.__module__, self.__class__.__qualname__, self.name, id(self)) + def __getnewargs__(self): + return tuple(self) + class Codec: """ Defines the interface for stateless encoders/decoders. @@ -414,6 +417,9 @@ def __enter__(self): def __exit__(self, type, value, tb): self.stream.close() + def __reduce_ex__(self, proto): + raise TypeError("can't serialize %s" % self.__class__.__name__) + ### class StreamReader(Codec): @@ -663,6 +669,9 @@ def __enter__(self): def __exit__(self, type, value, tb): self.stream.close() + def __reduce_ex__(self, proto): + raise TypeError("can't serialize %s" % self.__class__.__name__) + ### class StreamReaderWriter: @@ -750,6 +759,9 @@ def __enter__(self): def __exit__(self, type, value, tb): self.stream.close() + def __reduce_ex__(self, proto): + raise TypeError("can't serialize %s" % self.__class__.__name__) + ### class StreamRecoder: @@ -866,6 +878,9 @@ def __enter__(self): def __exit__(self, type, value, tb): self.stream.close() + def __reduce_ex__(self, proto): + raise TypeError("can't serialize %s" % self.__class__.__name__) + ### Shortcuts def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): diff --git a/Lib/codeop.py b/Lib/codeop.py index 2213b69f231f92f..91146be2c438e2c 100644 --- a/Lib/codeop.py +++ b/Lib/codeop.py @@ -70,20 +70,14 @@ def _maybe_compile(compiler, source, filename, symbol): return None # fallthrough - return compiler(source, filename, symbol) + return compiler(source, filename, symbol, incomplete_input=False) - -def _is_syntax_error(err1, err2): - rep1 = repr(err1) - rep2 = repr(err2) - if "was never closed" in rep1 and "was never closed" in rep2: - return False - if rep1 == rep2: - return True - return False - -def _compile(source, filename, symbol): - return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT) +def _compile(source, filename, symbol, incomplete_input=True): + flags = 0 + if incomplete_input: + flags |= PyCF_ALLOW_INCOMPLETE_INPUT + flags |= PyCF_DONT_IMPLY_DEDENT + return compile(source, filename, symbol, flags) def compile_command(source, filename="", symbol="single"): r"""Compile a command and determine whether it is incomplete. @@ -114,8 +108,12 @@ class Compile: def __init__(self): self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT - def __call__(self, source, filename, symbol): - codeob = compile(source, filename, symbol, self.flags, True) + def __call__(self, source, filename, symbol, **kwargs): + flags = self.flags + if kwargs.get('incomplete_input', True) is False: + flags &= ~PyCF_DONT_IMPLY_DEDENT + flags &= ~PyCF_ALLOW_INCOMPLETE_INPUT + codeob = compile(source, filename, symbol, flags, True) for feature in _features: if codeob.co_flags & feature.compiler_flag: self.flags |= feature.compiler_flag diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py index f07ee143a5aff11..a461550ea40da74 100644 --- a/Lib/collections/__init__.py +++ b/Lib/collections/__init__.py @@ -45,6 +45,11 @@ else: _collections_abc.MutableSequence.register(deque) +try: + from _collections import _deque_iterator +except ImportError: + pass + try: from _collections import defaultdict except ImportError: @@ -90,17 +95,19 @@ class OrderedDict(dict): # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. + def __new__(cls, /, *args, **kwds): + "Create the ordered dict object and set up the underlying structures." + self = dict.__new__(cls) + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + return self + def __init__(self, other=(), /, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' - try: - self.__root - except AttributeError: - self.__hardroot = _Link() - self.__root = root = _proxy(self.__hardroot) - root.prev = root.next = root - self.__map = {} self.__update(other, **kwds) def __setitem__(self, key, value, @@ -267,7 +274,7 @@ def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self.items())) + return '%s(%r)' % (self.__class__.__name__, dict(self.items())) def __reduce__(self): 'Return state information for pickling' @@ -488,6 +495,7 @@ def __getnewargs__(self): '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, + '__replace__': _replace, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, @@ -507,9 +515,12 @@ def __getnewargs__(self): # specified a particular module. if module is None: try: - module = _sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - pass + module = _sys._getframemodulename(1) or '__main__' + except AttributeError: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass if module is not None: result.__module__ = module diff --git a/Lib/colorsys.py b/Lib/colorsys.py index 9bdc83e37726033..bc897bd0f992982 100644 --- a/Lib/colorsys.py +++ b/Lib/colorsys.py @@ -83,7 +83,7 @@ def rgb_to_hls(r, g, b): if l <= 0.5: s = rangec / sumc else: - s = rangec / (2.0-sumc) + s = rangec / (2.0-maxc-minc) # Not always 2.0-sumc: gh-106498. rc = (maxc-r) / rangec gc = (maxc-g) / rangec bc = (maxc-b) / rangec diff --git a/Lib/compileall.py b/Lib/compileall.py index 330a90786efc5f1..9b53086bf41380e 100644 --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -97,9 +97,15 @@ def compile_dir(dir, maxlevels=None, ddir=None, force=False, files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) success = True if workers != 1 and ProcessPoolExecutor is not None: + import multiprocessing + if multiprocessing.get_start_method() == 'fork': + mp_context = multiprocessing.get_context('forkserver') + else: + mp_context = None # If workers == 0, let ProcessPoolExecutor choose workers = workers or None - with ProcessPoolExecutor(max_workers=workers) as executor: + with ProcessPoolExecutor(max_workers=workers, + mp_context=mp_context) as executor: results = executor.map(partial(compile_file, ddir=ddir, force=force, rx=rx, quiet=quiet, @@ -154,8 +160,8 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, "in combination with stripdir or prependdir")) success = True - if quiet < 2 and isinstance(fullname, os.PathLike): - fullname = os.fspath(fullname) + fullname = os.fspath(fullname) + stripdir = os.fspath(stripdir) if stripdir is not None else None name = os.path.basename(fullname) dfile = None @@ -166,13 +172,13 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, if stripdir is not None: fullname_parts = fullname.split(os.path.sep) stripdir_parts = stripdir.split(os.path.sep) - ddir_parts = list(fullname_parts) - - for spart, opart in zip(stripdir_parts, fullname_parts): - if spart == opart: - ddir_parts.remove(spart) - dfile = os.path.join(*ddir_parts) + if stripdir_parts != fullname_parts[:len(stripdir_parts)]: + if quiet < 2: + print("The stripdir path {!r} is not a valid prefix for " + "source path {!r}; ignoring".format(stripdir, fullname)) + else: + dfile = os.path.join(*fullname_parts[len(stripdir_parts):]) if prependdir is not None: if dfile is None: diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py index 7e2f5fa30e82641..ffaffdb8b3d0aad 100644 --- a/Lib/concurrent/futures/process.py +++ b/Lib/concurrent/futures/process.py @@ -49,6 +49,8 @@ from concurrent.futures import _base import queue import multiprocessing as mp +# This import is required to load the multiprocessing.connection submodule +# so that it can be accessed later as `mp.connection` import multiprocessing.connection from multiprocessing.queues import Queue import threading @@ -69,6 +71,11 @@ def __init__(self): self._reader, self._writer = mp.Pipe(duplex=False) def close(self): + # Please note that we do not take the shutdown lock when + # calling clear() (to avoid deadlocking) so this method can + # only be called safely from the same thread as all calls to + # clear() even if you hold the shutdown lock. Otherwise we + # might try to read from the closed pipe. if not self._closed: self._closed = True self._writer.close() @@ -334,7 +341,14 @@ def run(self): # Main loop for the executor manager thread. while True: - self.add_call_item_to_queue() + # gh-109047: During Python finalization, self.call_queue.put() + # creation of a thread can fail with RuntimeError. + try: + self.add_call_item_to_queue() + except BaseException as exc: + cause = format_exception(exc) + self.terminate_broken(cause) + return result_item, is_broken, cause = self.wait_result_broken_or_wakeup() @@ -364,6 +378,11 @@ def run(self): if self.is_shutting_down(): self.flag_executor_shutting_down() + # When only canceled futures remain in pending_work_items, our + # next call to wait_result_broken_or_wakeup would hang forever. + # This makes sure we have some running futures or none at all. + self.add_call_item_to_queue() + # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not self.pending_work_items: @@ -413,14 +432,18 @@ def wait_result_broken_or_wakeup(self): try: result_item = result_reader.recv() is_broken = False - except BaseException as e: - cause = format_exception(type(e), e, e.__traceback__) + except BaseException as exc: + cause = format_exception(exc) elif wakeup_reader in ready: is_broken = False - with self.shutdown_lock: - self.thread_wakeup.clear() + # No need to hold the _shutdown_lock here because: + # 1. we're the only thread to use the wakeup reader + # 2. we're also the only thread to call thread_wakeup.close() + # 3. we want to avoid a possible deadlock when both reader and writer + # would block (gh-105829) + self.thread_wakeup.clear() return result_item, is_broken, cause @@ -428,24 +451,14 @@ def process_result_item(self, result_item): # Process the received a result_item. This can be either the PID of a # worker that exited gracefully or a _ResultItem - if isinstance(result_item, int): - # Clean shutdown of a worker using its PID - # (avoids marking the executor broken) - assert self.is_shutting_down() - p = self.processes.pop(result_item) - p.join() - if not self.processes: - self.join_executor_internals() - return - else: - # Received a _ResultItem so mark the future as completed. - work_item = self.pending_work_items.pop(result_item.work_id, None) - # work_item can be None if another process terminated (see above) - if work_item is not None: - if result_item.exception: - work_item.future.set_exception(result_item.exception) - else: - work_item.future.set_result(result_item.result) + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) def is_shutting_down(self): # Check whether we should start shutting down the executor. @@ -457,7 +470,7 @@ def is_shutting_down(self): return (_global_shutdown or executor is None or executor._shutdown_thread) - def terminate_broken(self, cause): + def _terminate_broken(self, cause): # Terminate the executor because it is in a broken state. The cause # argument can be used to display more information on the error that # lead the executor into becoming broken. @@ -482,7 +495,14 @@ def terminate_broken(self, cause): # Mark pending tasks as failed. for work_id, work_item in self.pending_work_items.items(): - work_item.future.set_exception(bpe) + try: + work_item.future.set_exception(bpe) + except _base.InvalidStateError: + # set_exception() fails if the future is cancelled: ignore it. + # Trying to check if the future is cancelled before calling + # set_exception() would leave a race condition if the future is + # cancelled between the check and set_exception(). + pass # Delete references to object. See issue16284 del work_item self.pending_work_items.clear() @@ -492,8 +512,14 @@ def terminate_broken(self, cause): for p in self.processes.values(): p.terminate() + self.call_queue._terminate_broken() + # clean up resources - self.join_executor_internals() + self._join_executor_internals(broken=True) + + def terminate_broken(self, cause): + with self.shutdown_lock: + self._terminate_broken(cause) def flag_executor_shutting_down(self): # Flag the executor as shutting down and cancel remaining tasks if @@ -536,15 +562,24 @@ def shutdown_workers(self): break def join_executor_internals(self): - self.shutdown_workers() + with self.shutdown_lock: + self._join_executor_internals() + + def _join_executor_internals(self, broken=False): + # If broken, call_queue was closed and so can no longer be used. + if not broken: + self.shutdown_workers() + # Release the queue's resources as soon as possible. self.call_queue.close() self.call_queue.join_thread() - with self.shutdown_lock: - self.thread_wakeup.close() + self.thread_wakeup.close() + # If .join() is not called on the created processes then # some ctx.Queue methods may deadlock on Mac OS X. for p in self.processes.values(): + if broken: + p.terminate() p.join() def get_n_children_alive(self): @@ -616,9 +651,9 @@ def __init__(self, max_workers=None, mp_context=None, max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. - mp_context: A multiprocessing context to launch the workers. This - object should provide SimpleQueue, Queue and Process. Useful - to allow specific multiprocessing start methods. + mp_context: A multiprocessing context to launch the workers created + using the multiprocessing.get_context('start method') API. This + object should provide SimpleQueue, Queue and Process. initializer: A callable used to initialize worker processes. initargs: A tuple of arguments to pass to the initializer. max_tasks_per_child: The maximum number of tasks a worker process @@ -631,7 +666,7 @@ def __init__(self, max_workers=None, mp_context=None, _check_system_limits() if max_workers is None: - self._max_workers = os.cpu_count() or 1 + self._max_workers = os.process_cpu_count() or 1 if sys.platform == 'win32': self._max_workers = min(_MAX_WINDOWS_WORKERS, self._max_workers) @@ -695,7 +730,10 @@ def __init__(self, max_workers=None, mp_context=None, # as it could result in a deadlock if a worker process dies with the # _result_queue write lock still acquired. # - # _shutdown_lock must be locked to access _ThreadWakeup. + # _shutdown_lock must be locked to access _ThreadWakeup.close() and + # .wakeup(). Care must also be taken to not call clear or close from + # more than one thread since _ThreadWakeup.clear() is not protected by + # the _shutdown_lock self._executor_manager_thread_wakeup = _ThreadWakeup() # Create communication channels for the executor diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py index 51c942f51abd371..a024033f35fb548 100644 --- a/Lib/concurrent/futures/thread.py +++ b/Lib/concurrent/futures/thread.py @@ -43,7 +43,7 @@ def _python_exit(): after_in_parent=_global_shutdown_lock.release) -class _WorkItem(object): +class _WorkItem: def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn @@ -78,17 +78,20 @@ def _worker(executor_reference, work_queue, initializer, initargs): return try: while True: - work_item = work_queue.get(block=True) - if work_item is not None: - work_item.run() - # Delete references to object. See issue16284 - del work_item - - # attempt to increment idle count + try: + work_item = work_queue.get_nowait() + except queue.Empty: + # attempt to increment idle count if queue is empty executor = executor_reference() if executor is not None: executor._idle_semaphore.release() del executor + work_item = work_queue.get(block=True) + + if work_item is not None: + work_item.run() + # Delete references to object. See GH-60488 + del work_item continue executor = executor_reference() @@ -136,10 +139,10 @@ def __init__(self, max_workers=None, thread_name_prefix='', # * CPU bound task which releases GIL # * I/O bound task (which releases GIL, of course) # - # We use cpu_count + 4 for both types of tasks. + # We use process_cpu_count + 4 for both types of tasks. # But we limit it to 32 to avoid consuming surprisingly large resource # on many core machine. - max_workers = min(32, (os.cpu_count() or 1) + 4) + max_workers = min(32, (os.process_cpu_count() or 1) + 4) if max_workers <= 0: raise ValueError("max_workers must be greater than 0") diff --git a/Lib/configparser.py b/Lib/configparser.py index f98e6fb01f97cc4..71362d23ec37576 100644 --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -19,36 +19,37 @@ inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section='DEFAULT', interpolation=, converters=): - Create the parser. When `defaults' is given, it is initialized into the + + Create the parser. When `defaults` is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. - When `dict_type' is given, it will be used to create the dictionary + When `dict_type` is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. - When `delimiters' is given, it will be used as the set of substrings + When `delimiters` is given, it will be used as the set of substrings that divide keys from values. - When `comment_prefixes' is given, it will be used as the set of + When `comment_prefixes` is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. - When `inline_comment_prefixes' is given, it will be used as the set of + When `inline_comment_prefixes` is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. - When `empty_lines_in_values' is False (default: True), each empty line + When `empty_lines_in_values` is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. - When `allow_no_value' is True (default: False), options without + When `allow_no_value` is True (default: False), options without values are accepted; the value presented for these is None. - When `default_section' is given, the name of the special section is + When `default_section` is given, the name of the special section is named accordingly. By default it is called ``"DEFAULT"`` but this can be customized to point to any other valid section name. Its current value can be retrieved using the ``parser_instance.default_section`` @@ -58,7 +59,7 @@ instance. It will be used as the handler for option value pre-processing when using getters. RawConfigParser objects don't do any sort of interpolation, whereas ConfigParser uses an instance of - BasicInterpolation. The library also provides a ``zc.buildbot`` + BasicInterpolation. The library also provides a ``zc.buildout`` inspired ExtendedInterpolation implementation. When `converters` is given, it should be a dictionary where each key @@ -87,7 +88,7 @@ read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error - messages (if f has no `name' attribute, the string `' is used). + messages (if f has no `name` attribute, the string `` is used). read_string(string) Read configuration from a given string. @@ -103,9 +104,9 @@ Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. If `option' is a key in - `vars', the value from `vars' is used. + provided using the `vars` argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option` is a key in + `vars`, the value from `vars` is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. @@ -134,7 +135,7 @@ write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If - `space_around_delimiters' is True (the default), delimiters + `space_around_delimiters` is True (the default), delimiters between keys and values are surrounded by spaces. """ @@ -146,7 +147,6 @@ import os import re import sys -import warnings __all__ = ("NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", @@ -154,7 +154,7 @@ "ParsingError", "MissingSectionHeaderError", "ConfigParser", "RawConfigParser", "Interpolation", "BasicInterpolation", "ExtendedInterpolation", - "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "SectionProxy", "ConverterMapping", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH") _default_dict = dict @@ -323,7 +323,7 @@ def __init__(self, filename, lineno, line): # Used in parser getters to indicate the default behaviour when a specific -# option is not found it to raise an exception. Created to enable `None' as +# option is not found it to raise an exception. Created to enable `None` as # a valid fallback value. _UNSET = object() @@ -357,7 +357,7 @@ class BasicInterpolation(Interpolation): would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage - is considered a user error and raises `InterpolationSyntaxError'.""" + is considered a user error and raises `InterpolationSyntaxError`.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") @@ -418,7 +418,7 @@ def _interpolate_some(self, parser, option, accum, rest, section, map, class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by - `zc.buildout'. Enables interpolation between sections.""" + `zc.buildout`. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") @@ -490,53 +490,6 @@ def _interpolate_some(self, parser, option, accum, rest, section, map, "found: %r" % (rest,)) -class LegacyInterpolation(Interpolation): - """Deprecated interpolation used in old versions of ConfigParser. - Use BasicInterpolation or ExtendedInterpolation instead.""" - - _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - "LegacyInterpolation has been deprecated since Python 3.2 " - "and will be removed from the configparser module in Python 3.13. " - "Use BasicInterpolation or ExtendedInterpolation instead.", - DeprecationWarning, stacklevel=2 - ) - - def before_get(self, parser, section, option, value, vars): - rawval = value - depth = MAX_INTERPOLATION_DEPTH - while depth: # Loop through this until it's done - depth -= 1 - if value and "%(" in value: - replace = functools.partial(self._interpolation_replace, - parser=parser) - value = self._KEYCRE.sub(replace, value) - try: - value = value % vars - except KeyError as e: - raise InterpolationMissingOptionError( - option, section, rawval, e.args[0]) from None - else: - break - if value and "%(" in value: - raise InterpolationDepthError(option, section, rawval) - return value - - def before_set(self, parser, section, option, value): - return value - - @staticmethod - def _interpolation_replace(match, parser): - s = match.group(1) - if s is None: - return match.group() - else: - return "%%(%s)s" % parser.optionxform(s) - - class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" @@ -691,10 +644,10 @@ def read(self, filenames, encoding=None): def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. - The `f' argument must be iterable, returning one line at a time. - Optional second argument is the `source' specifying the name of the - file being read. If not given, it is taken from f.name. If `f' has no - `name' attribute, `' is used. + The `f` argument must be iterable, returning one line at a time. + Optional second argument is the `source` specifying the name of the + file being read. If not given, it is taken from f.name. If `f` has no + `name` attribute, `` is used. """ if source is None: try: @@ -718,7 +671,7 @@ def read_dict(self, dictionary, source=''): All types held in the dictionary are converted to strings during reading, including section names, option names and keys. - Optional second argument is the `source' specifying the name of the + Optional second argument is the `source` specifying the name of the dictionary being read. """ elements_added = set() @@ -742,15 +695,15 @@ def read_dict(self, dictionary, source=''): def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. - If `vars' is provided, it must be a dictionary. The option is looked up - in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. - If the key is not found and `fallback' is provided, it is used as - a fallback value. `None' can be provided as a `fallback' value. + If `vars` is provided, it must be a dictionary. The option is looked up + in `vars` (if provided), `section`, and in `DEFAULTSECT` in that order. + If the key is not found and `fallback` is provided, it is used as + a fallback value. `None` can be provided as a `fallback` value. - If interpolation is enabled and the optional argument `raw' is False, + If interpolation is enabled and the optional argument `raw` is False, all interpolations are expanded in the return values. - Arguments `raw', `vars', and `fallback' are keyword only. + Arguments `raw`, `vars`, and `fallback` are keyword only. The section DEFAULT is special. """ @@ -810,8 +763,8 @@ def items(self, section=_UNSET, raw=False, vars=None): All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument - `raw' is true. Additional substitutions may be provided using the - `vars' argument, which must be a dictionary whose contents overrides + `raw` is true. Additional substitutions may be provided using the + `vars` argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. @@ -853,8 +806,8 @@ def optionxform(self, optionstr): def has_option(self, section, option): """Check for the existence of a given option in a given section. - If the specified `section' is None or an empty string, DEFAULT is - assumed. If the specified `section' does not exist, returns False.""" + If the specified `section` is None or an empty string, DEFAULT is + assumed. If the specified `section` does not exist, returns False.""" if not section or section == self.default_section: option = self.optionxform(option) return option in self._defaults @@ -882,7 +835,7 @@ def set(self, section, option, value=None): def write(self, fp, space_around_delimiters=True): """Write an .ini-format representation of the configuration state. - If `space_around_delimiters' is True (the default), delimiters + If `space_around_delimiters` is True (the default), delimiters between keys and values are surrounded by spaces. Please note that comments in the original configuration file are not @@ -900,7 +853,7 @@ def write(self, fp, space_around_delimiters=True): self._sections[section].items(), d) def _write_section(self, fp, section_name, section_items, delimiter): - """Write a single section to the specified `fp'.""" + """Write a single section to the specified `fp`.""" fp.write("[{}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, @@ -974,8 +927,8 @@ def _read(self, fp, fpname): """Parse a sectioned configuration file. Each section in a configuration file contains a header, indicated by - a name in square brackets (`[]'), plus key/value options, indicated by - `name' and `value' delimited with a specific substring (`=' or `:' by + a name in square brackets (`[]`), plus key/value options, indicated by + `name` and `value` delimited with a specific substring (`=` or `:` by default). Values can span multiple lines, as long as they are indented deeper @@ -983,7 +936,7 @@ def _read(self, fp, fpname): lines may be treated as parts of multiline values or ignored. Configuration files may include comments, prefixed by specific - characters (`#' and `;' by default). Comments may appear on their own + characters (`#` and `;` by default). Comments may appear on their own in an otherwise empty line or may be entered in lines holding values or section names. Please note that comments get stripped off when reading configuration files. """ diff --git a/Lib/contextlib.py b/Lib/contextlib.py index d5822219b3e25b3..5b646fabca0225b 100644 --- a/Lib/contextlib.py +++ b/Lib/contextlib.py @@ -20,6 +20,8 @@ class AbstractContextManager(abc.ABC): __class_getitem__ = classmethod(GenericAlias) + __slots__ = () + def __enter__(self): """Return `self` upon entering the runtime context.""" return self @@ -42,6 +44,8 @@ class AbstractAsyncContextManager(abc.ABC): __class_getitem__ = classmethod(GenericAlias) + __slots__ = () + async def __aenter__(self): """Return `self` upon entering the runtime context.""" return self @@ -145,7 +149,10 @@ def __exit__(self, typ, value, traceback): except StopIteration: return False else: - raise RuntimeError("generator didn't stop") + try: + raise RuntimeError("generator didn't stop") + finally: + self.gen.close() else: if value is None: # Need to force instantiation so we can reliably @@ -173,7 +180,7 @@ def __exit__(self, typ, value, traceback): isinstance(value, StopIteration) and exc.__cause__ is value ): - exc.__traceback__ = traceback + value.__traceback__ = traceback return False raise except BaseException as exc: @@ -187,7 +194,10 @@ def __exit__(self, typ, value, traceback): raise exc.__traceback__ = traceback return False - raise RuntimeError("generator didn't stop after throw()") + try: + raise RuntimeError("generator didn't stop after throw()") + finally: + self.gen.close() class _AsyncGeneratorContextManager( _GeneratorContextManagerBase, @@ -212,7 +222,10 @@ async def __aexit__(self, typ, value, traceback): except StopAsyncIteration: return False else: - raise RuntimeError("generator didn't stop") + try: + raise RuntimeError("generator didn't stop") + finally: + await self.gen.aclose() else: if value is None: # Need to force instantiation so we can reliably @@ -228,6 +241,7 @@ async def __aexit__(self, typ, value, traceback): except RuntimeError as exc: # Don't re-raise the passed in exception. (issue27122) if exc is value: + exc.__traceback__ = traceback return False # Avoid suppressing if a Stop(Async)Iteration exception # was passed to athrow() and later wrapped into a RuntimeError @@ -239,6 +253,7 @@ async def __aexit__(self, typ, value, traceback): isinstance(value, (StopIteration, StopAsyncIteration)) and exc.__cause__ is value ): + value.__traceback__ = traceback return False raise except BaseException as exc: @@ -250,8 +265,12 @@ async def __aexit__(self, typ, value, traceback): # and the __exit__() protocol. if exc is not value: raise + exc.__traceback__ = traceback return False - raise RuntimeError("generator didn't stop after athrow()") + try: + raise RuntimeError("generator didn't stop after athrow()") + finally: + await self.gen.aclose() def contextmanager(func): @@ -438,7 +457,16 @@ def __exit__(self, exctype, excinst, exctb): # exactly reproduce the limitations of the CPython interpreter. # # See http://bugs.python.org/issue12029 for more details - return exctype is not None and issubclass(exctype, self._exceptions) + if exctype is None: + return + if issubclass(exctype, self._exceptions): + return True + if issubclass(exctype, BaseExceptionGroup): + match, rest = excinst.split(self._exceptions) + if rest is None: + return True + raise rest + return False class _BaseExitStack: @@ -541,11 +569,12 @@ def __enter__(self): return self def __exit__(self, *exc_details): - received_exc = exc_details[0] is not None + exc = exc_details[1] + received_exc = exc is not None # We manipulate the exception state so it behaves as though # we were actually nesting multiple with statements - frame_exc = sys.exc_info()[1] + frame_exc = sys.exception() def _fix_exception_context(new_exc, old_exc): # Context may not be correct, so find the end of the chain while 1: @@ -568,24 +597,28 @@ def _fix_exception_context(new_exc, old_exc): is_sync, cb = self._exit_callbacks.pop() assert is_sync try: + if exc is None: + exc_details = None, None, None + else: + exc_details = type(exc), exc, exc.__traceback__ if cb(*exc_details): suppressed_exc = True pending_raise = False - exc_details = (None, None, None) - except: - new_exc_details = sys.exc_info() + exc = None + except BaseException as new_exc: # simulate the stack of exceptions by setting the context - _fix_exception_context(new_exc_details[1], exc_details[1]) + _fix_exception_context(new_exc, exc) pending_raise = True - exc_details = new_exc_details + exc = new_exc + if pending_raise: try: - # bare "raise exc_details[1]" replaces our carefully + # bare "raise exc" replaces our carefully # set-up context - fixed_ctx = exc_details[1].__context__ - raise exc_details[1] + fixed_ctx = exc.__context__ + raise exc except BaseException: - exc_details[1].__context__ = fixed_ctx + exc.__context__ = fixed_ctx raise return received_exc and suppressed_exc @@ -681,11 +714,12 @@ async def __aenter__(self): return self async def __aexit__(self, *exc_details): - received_exc = exc_details[0] is not None + exc = exc_details[1] + received_exc = exc is not None # We manipulate the exception state so it behaves as though # we were actually nesting multiple with statements - frame_exc = sys.exc_info()[1] + frame_exc = sys.exception() def _fix_exception_context(new_exc, old_exc): # Context may not be correct, so find the end of the chain while 1: @@ -707,6 +741,10 @@ def _fix_exception_context(new_exc, old_exc): while self._exit_callbacks: is_sync, cb = self._exit_callbacks.pop() try: + if exc is None: + exc_details = None, None, None + else: + exc_details = type(exc), exc, exc.__traceback__ if is_sync: cb_suppress = cb(*exc_details) else: @@ -715,21 +753,21 @@ def _fix_exception_context(new_exc, old_exc): if cb_suppress: suppressed_exc = True pending_raise = False - exc_details = (None, None, None) - except: - new_exc_details = sys.exc_info() + exc = None + except BaseException as new_exc: # simulate the stack of exceptions by setting the context - _fix_exception_context(new_exc_details[1], exc_details[1]) + _fix_exception_context(new_exc, exc) pending_raise = True - exc_details = new_exc_details + exc = new_exc + if pending_raise: try: - # bare "raise exc_details[1]" replaces our carefully + # bare "raise exc" replaces our carefully # set-up context - fixed_ctx = exc_details[1].__context__ - raise exc_details[1] + fixed_ctx = exc.__context__ + raise exc except BaseException: - exc_details[1].__context__ = fixed_ctx + exc.__context__ = fixed_ctx raise return received_exc and suppressed_exc diff --git a/Lib/copy.py b/Lib/copy.py index 1b276afe08121ec..a69bc4e78c20b30 100644 --- a/Lib/copy.py +++ b/Lib/copy.py @@ -56,11 +56,6 @@ class Error(Exception): pass error = Error # backward compatibility -try: - from org.python.core import PyStringMap -except ImportError: - PyStringMap = None - __all__ = ["Error", "copy", "deepcopy"] def copy(x): @@ -106,13 +101,11 @@ def copy(x): def _copy_immutable(x): return x -for t in (type(None), int, float, bool, complex, str, tuple, +for t in (types.NoneType, int, float, bool, complex, str, tuple, bytes, frozenset, type, range, slice, property, - types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented), - types.FunctionType, weakref.ref): - d[t] = _copy_immutable -t = getattr(types, "CodeType", None) -if t is not None: + types.BuiltinFunctionType, types.EllipsisType, + types.NotImplementedType, types.FunctionType, types.CodeType, + weakref.ref): d[t] = _copy_immutable d[list] = list.copy @@ -120,9 +113,6 @@ def _copy_immutable(x): d[set] = set.copy d[bytearray] = bytearray.copy -if PyStringMap is not None: - d[PyStringMap] = PyStringMap.copy - del d, t def deepcopy(x, memo=None, _nil=[]): @@ -131,13 +121,13 @@ def deepcopy(x, memo=None, _nil=[]): See the module's __doc__ string for more info. """ + d = id(x) if memo is None: memo = {} - - d = id(x) - y = memo.get(d, _nil) - if y is not _nil: - return y + else: + y = memo.get(d, _nil) + if y is not _nil: + return y cls = type(x) @@ -181,9 +171,9 @@ def deepcopy(x, memo=None, _nil=[]): def _deepcopy_atomic(x, memo): return x -d[type(None)] = _deepcopy_atomic -d[type(Ellipsis)] = _deepcopy_atomic -d[type(NotImplemented)] = _deepcopy_atomic +d[types.NoneType] = _deepcopy_atomic +d[types.EllipsisType] = _deepcopy_atomic +d[types.NotImplementedType] = _deepcopy_atomic d[int] = _deepcopy_atomic d[float] = _deepcopy_atomic d[bool] = _deepcopy_atomic @@ -231,8 +221,6 @@ def _deepcopy_dict(x, memo, deepcopy=deepcopy): y[deepcopy(key, memo)] = deepcopy(value, memo) return y d[dict] = _deepcopy_dict -if PyStringMap is not None: - d[PyStringMap] = _deepcopy_dict def _deepcopy_method(x, memo): # Copy instance methods return type(x)(x.__func__, deepcopy(x.__self__, memo)) @@ -301,4 +289,17 @@ def _reconstruct(x, memo, func, args, y[key] = value return y -del types, weakref, PyStringMap +del types, weakref + + +def replace(obj, /, **changes): + """Return a new object replacing specified fields with new values. + + This is especially useful for immutable objects, like named tuples or + frozen dataclasses. + """ + cls = obj.__class__ + func = getattr(cls, '__replace__', None) + if func is None: + raise TypeError(f"replace() does not support {cls.__name__} objects") + return func(obj, **changes) diff --git a/Lib/copyreg.py b/Lib/copyreg.py index c8a52a2dc63a27e..578392409b403c0 100644 --- a/Lib/copyreg.py +++ b/Lib/copyreg.py @@ -25,16 +25,10 @@ def constructor(object): # Example: provide pickling support for complex numbers. -try: - complex -except NameError: - pass -else: +def pickle_complex(c): + return complex, (c.real, c.imag) - def pickle_complex(c): - return complex, (c.real, c.imag) - - pickle(complex, pickle_complex, complex) +pickle(complex, pickle_complex, complex) def pickle_union(obj): import functools, operator diff --git a/Lib/crypt.py b/Lib/crypt.py deleted file mode 100644 index de4a14a38847629..000000000000000 --- a/Lib/crypt.py +++ /dev/null @@ -1,124 +0,0 @@ -"""Wrapper to the POSIX crypt library call and associated functionality.""" - -import sys as _sys - -try: - import _crypt -except ModuleNotFoundError: - if _sys.platform == 'win32': - raise ImportError("The crypt module is not supported on Windows") - else: - raise ImportError("The required _crypt module was not built as part of CPython") - -import errno -import string as _string -import warnings -from random import SystemRandom as _SystemRandom -from collections import namedtuple as _namedtuple - - -warnings._deprecated(__name__, remove=(3, 13)) - - -_saltchars = _string.ascii_letters + _string.digits + './' -_sr = _SystemRandom() - - -class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')): - - """Class representing a salt method per the Modular Crypt Format or the - legacy 2-character crypt method.""" - - def __repr__(self): - return ''.format(self.name) - - -def mksalt(method=None, *, rounds=None): - """Generate a salt for the specified method. - - If not specified, the strongest available method will be used. - - """ - if method is None: - method = methods[0] - if rounds is not None and not isinstance(rounds, int): - raise TypeError(f'{rounds.__class__.__name__} object cannot be ' - f'interpreted as an integer') - if not method.ident: # traditional - s = '' - else: # modular - s = f'${method.ident}$' - - if method.ident and method.ident[0] == '2': # Blowfish variants - if rounds is None: - log_rounds = 12 - else: - log_rounds = int.bit_length(rounds-1) - if rounds != 1 << log_rounds: - raise ValueError('rounds must be a power of 2') - if not 4 <= log_rounds <= 31: - raise ValueError('rounds out of the range 2**4 to 2**31') - s += f'{log_rounds:02d}$' - elif method.ident in ('5', '6'): # SHA-2 - if rounds is not None: - if not 1000 <= rounds <= 999_999_999: - raise ValueError('rounds out of the range 1000 to 999_999_999') - s += f'rounds={rounds}$' - elif rounds is not None: - raise ValueError(f"{method} doesn't support the rounds argument") - - s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars)) - return s - - -def crypt(word, salt=None): - """Return a string representing the one-way hash of a password, with a salt - prepended. - - If ``salt`` is not specified or is ``None``, the strongest - available method will be selected and a salt generated. Otherwise, - ``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as - returned by ``crypt.mksalt()``. - - """ - if salt is None or isinstance(salt, _Method): - salt = mksalt(salt) - return _crypt.crypt(word, salt) - - -# available salting/crypto methods -methods = [] - -def _add_method(name, *args, rounds=None): - method = _Method(name, *args) - globals()['METHOD_' + name] = method - salt = mksalt(method, rounds=rounds) - result = None - try: - result = crypt('', salt) - except OSError as e: - # Not all libc libraries support all encryption methods. - if e.errno in {errno.EINVAL, errno.EPERM, errno.ENOSYS}: - return False - raise - if result and len(result) == method.total_size: - methods.append(method) - return True - return False - -_add_method('SHA512', '6', 16, 106) -_add_method('SHA256', '5', 16, 63) - -# Choose the strongest supported version of Blowfish hashing. -# Early versions have flaws. Version 'a' fixes flaws of -# the initial implementation, 'b' fixes flaws of 'a'. -# 'y' is the same as 'b', for compatibility -# with openwall crypt_blowfish. -for _v in 'b', 'y', 'a', '': - if _add_method('BLOWFISH', '2' + _v, 22, 59 + len(_v), rounds=1<<4): - break - -_add_method('MD5', '1', 8, 34) -_add_method('CRYPT', None, 2, 13) - -del _v, _add_method diff --git a/Lib/csv.py b/Lib/csv.py index 0de5656a4eed7ba..77f30c8d2b1f61d 100644 --- a/Lib/csv.py +++ b/Lib/csv.py @@ -9,12 +9,14 @@ unregister_dialect, get_dialect, list_dialects, \ field_size_limit, \ QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \ + QUOTE_STRINGS, QUOTE_NOTNULL, \ __doc__ from _csv import Dialect as _Dialect from io import StringIO __all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE", + "QUOTE_STRINGS", "QUOTE_NOTNULL", "Error", "Dialect", "__doc__", "excel", "excel_tab", "field_size_limit", "reader", "writer", "register_dialect", "get_dialect", "list_dialects", "Sniffer", @@ -139,7 +141,8 @@ def __init__(self, f, fieldnames, restval="", extrasaction="raise", fieldnames = list(fieldnames) self.fieldnames = fieldnames # list of keys for the dict self.restval = restval # for writing short dicts - if extrasaction.lower() not in ("raise", "ignore"): + extrasaction = extrasaction.lower() + if extrasaction not in ("raise", "ignore"): raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'" % extrasaction) self.extrasaction = extrasaction @@ -165,11 +168,6 @@ def writerows(self, rowdicts): __class_getitem__ = classmethod(types.GenericAlias) -# Guard Sniffer's type checking against builds that exclude complex() -try: - complex -except NameError: - complex = float class Sniffer: ''' diff --git a/Lib/ctypes/__init__.py b/Lib/ctypes/__init__.py index b94b337a3157b7c..141142a57dcb3e2 100644 --- a/Lib/ctypes/__init__.py +++ b/Lib/ctypes/__init__.py @@ -302,8 +302,9 @@ def create_unicode_buffer(init, size=None): raise TypeError(init) -# XXX Deprecated def SetPointerType(pointer, cls): + import warnings + warnings._deprecated("ctypes.SetPointerType", remove=(3, 15)) if _pointer_type_cache.get(cls, None) is not None: raise RuntimeError("This type already exists in the cache") if id(pointer) not in _pointer_type_cache: @@ -312,8 +313,9 @@ def SetPointerType(pointer, cls): _pointer_type_cache[cls] = pointer del _pointer_type_cache[id(pointer)] -# XXX Deprecated def ARRAY(typ, len): + import warnings + warnings._deprecated("ctypes.ARRAY", remove=(3, 15)) return typ * len ################################################################ @@ -344,6 +346,8 @@ def __init__(self, name, mode=DEFAULT_MODE, handle=None, use_errno=False, use_last_error=False, winmode=None): + if name: + name = _os.fspath(name) self._name = name flags = self._func_flags_ if use_errno: @@ -444,7 +448,10 @@ def __init__(self, dlltype): def __getattr__(self, name): if name[0] == '_': raise AttributeError(name) - dll = self._dlltype(name) + try: + dll = self._dlltype(name) + except OSError: + raise AttributeError(name) setattr(self, name, dll) return dll diff --git a/Lib/ctypes/_aix.py b/Lib/ctypes/_aix.py index fc3e95cbcc88a53..ee790f713a9ee3a 100644 --- a/Lib/ctypes/_aix.py +++ b/Lib/ctypes/_aix.py @@ -108,12 +108,8 @@ def get_ld_headers(file): p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file], universal_newlines=True, stdout=PIPE, stderr=DEVNULL) # be sure to read to the end-of-file - getting all entries - while True: - ld_header = get_ld_header(p) - if ld_header: - ldr_headers.append((ld_header, get_ld_header_info(p))) - else: - break + while ld_header := get_ld_header(p): + ldr_headers.append((ld_header, get_ld_header_info(p))) p.stdout.close() p.wait() return ldr_headers diff --git a/Lib/ctypes/_endian.py b/Lib/ctypes/_endian.py index 34dee64b1a65a69..3febb3118b82307 100644 --- a/Lib/ctypes/_endian.py +++ b/Lib/ctypes/_endian.py @@ -1,5 +1,5 @@ import sys -from ctypes import * +from ctypes import Array, Structure, Union _array_type = type(Array) @@ -37,7 +37,7 @@ class _swapped_union_meta(_swapped_meta, type(Union)): pass ################################################################ # Note: The Structure metaclass checks for the *presence* (not the -# value!) of a _swapped_bytes_ attribute to determine the bit order in +# value!) of a _swappedbytes_ attribute to determine the bit order in # structures containing bit fields. if sys.byteorder == "little": diff --git a/Lib/ctypes/wintypes.py b/Lib/ctypes/wintypes.py index c619d27596d40b6..9c4e721438aad5d 100644 --- a/Lib/ctypes/wintypes.py +++ b/Lib/ctypes/wintypes.py @@ -1,7 +1,7 @@ # The most useful windows datatypes import ctypes -BYTE = ctypes.c_byte +BYTE = ctypes.c_ubyte WORD = ctypes.c_ushort DWORD = ctypes.c_ulong diff --git a/Lib/curses/textpad.py b/Lib/curses/textpad.py index 2079953a06614ba..aa87061b8d749e9 100644 --- a/Lib/curses/textpad.py +++ b/Lib/curses/textpad.py @@ -102,7 +102,10 @@ def do_command(self, ch): self._insert_printable_char(ch) elif ch == curses.ascii.SOH: # ^a self.win.move(y, 0) - elif ch in (curses.ascii.STX,curses.KEY_LEFT, curses.ascii.BS,curses.KEY_BACKSPACE): + elif ch in (curses.ascii.STX,curses.KEY_LEFT, + curses.ascii.BS, + curses.KEY_BACKSPACE, + curses.ascii.DEL): if x > 0: self.win.move(y, x-1) elif y == 0: @@ -111,7 +114,7 @@ def do_command(self, ch): self.win.move(y-1, self._end_of_line(y-1)) else: self.win.move(y-1, self.maxx) - if ch in (curses.ascii.BS, curses.KEY_BACKSPACE): + if ch in (curses.ascii.BS, curses.KEY_BACKSPACE, curses.ascii.DEL): self.win.delch() elif ch == curses.ascii.EOT: # ^d self.win.delch() diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py index b54e16984cb15c1..2fba32b5ffbc1e2 100644 --- a/Lib/dataclasses.py +++ b/Lib/dataclasses.py @@ -4,7 +4,6 @@ import types import inspect import keyword -import builtins import functools import itertools import abc @@ -223,6 +222,49 @@ def __repr__(self): # https://bugs.python.org/issue33453 for details. _MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)') +# Atomic immutable types which don't require any recursive handling and for which deepcopy +# returns the same object. We can provide a fast-path for these types in asdict and astuple. +_ATOMIC_TYPES = frozenset({ + # Common JSON Serializable types + types.NoneType, + bool, + int, + float, + str, + # Other common types + complex, + bytes, + # Other types that are also unaffected by deepcopy + types.EllipsisType, + types.NotImplementedType, + types.CodeType, + types.BuiltinFunctionType, + types.FunctionType, + type, + range, + property, +}) + +# This function's logic is copied from "recursive_repr" function in +# reprlib module to avoid dependency. +def _recursive_repr(user_function): + # Decorator to make a repr function return "..." for a recursive + # call. + repr_running = set() + + @functools.wraps(user_function) + def wrapper(self): + key = id(self), _thread.get_ident() + if key in repr_running: + return '...' + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + return wrapper + class InitVar: __slots__ = ('type', ) @@ -280,6 +322,7 @@ def __init__(self, default, default_factory, init, repr, hash, compare, self.kw_only = kw_only self._field_type = None + @_recursive_repr def __repr__(self): return ('Field(' f'name={self.name!r},' @@ -403,27 +446,6 @@ def _tuple_str(obj_name, fields): return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)' -# This function's logic is copied from "recursive_repr" function in -# reprlib module to avoid dependency. -def _recursive_repr(user_function): - # Decorator to make a repr function return "..." for a recursive - # call. - repr_running = set() - - @functools.wraps(user_function) - def wrapper(self): - key = id(self), _thread.get_ident() - if key in repr_running: - return '...' - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - return wrapper - - def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): # Note that we may mutate locals. Callers beware! @@ -433,8 +455,8 @@ def _create_fn(name, args, body, *, globals=None, locals=None, locals = {} return_annotation = '' if return_type is not MISSING: - locals['_return_type'] = return_type - return_annotation = '->_return_type' + locals['__dataclass_return_type__'] = return_type + return_annotation = '->__dataclass_return_type__' args = ','.join(args) body = '\n'.join(f' {b}' for b in body) @@ -468,14 +490,14 @@ def _field_init(f, frozen, globals, self_name, slots): # Return the text of the line in the body of __init__ that will # initialize this field. - default_name = f'_dflt_{f.name}' + default_name = f'__dataclass_dflt_{f.name}__' if f.default_factory is not MISSING: if f.init: # This field has a default factory. If a parameter is # given, use it. If not, call the factory. globals[default_name] = f.default_factory value = (f'{default_name}() ' - f'if {f.name} is _HAS_DEFAULT_FACTORY ' + f'if {f.name} is __dataclass_HAS_DEFAULT_FACTORY__ ' f'else {f.name}') else: # This is a field that's not in the __init__ params, but @@ -536,11 +558,11 @@ def _init_param(f): elif f.default is not MISSING: # There's a default, this will be the name that's used to look # it up. - default = f'=_dflt_{f.name}' + default = f'=__dataclass_dflt_{f.name}__' elif f.default_factory is not MISSING: # There's a factory function. Set a marker. - default = '=_HAS_DEFAULT_FACTORY' - return f'{f.name}:_type_{f.name}{default}' + default = '=__dataclass_HAS_DEFAULT_FACTORY__' + return f'{f.name}:__dataclass_type_{f.name}__{default}' def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, @@ -553,20 +575,19 @@ def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, # message, and future-proofs us in case we build up the function # using ast. - seen_default = False + seen_default = None for f in std_fields: # Only consider the non-kw-only fields in the __init__ call. if f.init: if not (f.default is MISSING and f.default_factory is MISSING): - seen_default = True + seen_default = f elif seen_default: raise TypeError(f'non-default argument {f.name!r} ' - 'follows default argument') + f'follows default argument {seen_default.name!r}') - locals = {f'_type_{f.name}': f.type for f in fields} + locals = {f'__dataclass_type_{f.name}__': f.type for f in fields} locals.update({ - 'MISSING': MISSING, - '_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY, + '__dataclass_HAS_DEFAULT_FACTORY__': _HAS_DEFAULT_FACTORY, '__dataclass_builtins_object__': object, }) @@ -606,7 +627,7 @@ def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, def _repr_fn(fields, globals): fn = _create_fn('__repr__', ('self',), - ['return self.__class__.__qualname__ + f"(' + + ['return f"{self.__class__.__qualname__}(' + ', '.join([f"{f.name}={{self.{f.name}!r}}" for f in fields]) + ')"'], @@ -617,21 +638,19 @@ def _repr_fn(fields, globals): def _frozen_get_del_attr(cls, fields, globals): locals = {'cls': cls, 'FrozenInstanceError': FrozenInstanceError} + condition = 'type(self) is cls' if fields: - fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)' - else: - # Special case for the zero-length tuple. - fields_str = '()' + condition += ' or name in {' + ', '.join(repr(f.name) for f in fields) + '}' return (_create_fn('__setattr__', ('self', 'name', 'value'), - (f'if type(self) is cls or name in {fields_str}:', + (f'if {condition}:', ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', f'super(cls, self).__setattr__(name, value)'), locals=locals, globals=globals), _create_fn('__delattr__', ('self', 'name'), - (f'if type(self) is cls or name in {fields_str}:', + (f'if {condition}:', ' raise FrozenInstanceError(f"cannot delete field {name!r}")', f'super(cls, self).__delattr__(name)'), locals=locals, @@ -925,8 +944,11 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, # Find our base classes in reverse MRO order, and exclude # ourselves. In reversed order so that more derived classes # override earlier field definitions in base classes. As long as - # we're iterating over them, see if any are frozen. + # we're iterating over them, see if all or any of them are frozen. any_frozen_base = False + # By default `all_frozen_bases` is `None` to represent a case, + # where some dataclasses does not have any bases with `_FIELDS` + all_frozen_bases = None has_dataclass_bases = False for b in cls.__mro__[-1:0:-1]: # Only process classes that have been processed by our @@ -936,8 +958,11 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, has_dataclass_bases = True for f in base_fields.values(): fields[f.name] = f - if getattr(b, _PARAMS).frozen: - any_frozen_base = True + if all_frozen_bases is None: + all_frozen_bases = True + current_frozen = getattr(b, _PARAMS).frozen + all_frozen_bases = all_frozen_bases and current_frozen + any_frozen_base = any_frozen_base or current_frozen # Annotations defined specifically in this class (not in base classes). # @@ -1006,7 +1031,7 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, 'frozen one') # Raise an exception if we're frozen, but none of our bases are. - if not any_frozen_base and frozen: + if all_frozen_bases is False and frozen: raise TypeError('cannot inherit frozen dataclass from a ' 'non-frozen one') @@ -1017,7 +1042,7 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, # Was this class defined with an explicit __hash__? Note that if # __eq__ is defined in this class, then python will automatically # set __hash__ to None. This is a heuristic, as it's possible - # that such a __hash__ == None was not auto-generated, but it + # that such a __hash__ == None was not auto-generated, but it's # close enough. class_hash = cls.__dict__.get('__hash__', MISSING) has_explicit_hash = not (class_hash is MISSING or @@ -1054,6 +1079,7 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, globals, slots, )) + _set_new_attribute(cls, '__replace__', _replace) # Get the fields as a list, and include only real fields. This is # used in all of the following methods. @@ -1066,13 +1092,17 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, if eq: # Create __eq__ method. There's no need for a __ne__ method, # since python will call __eq__ and negate it. - flds = [f for f in field_list if f.compare] - self_tuple = _tuple_str('self', flds) - other_tuple = _tuple_str('other', flds) - _set_new_attribute(cls, '__eq__', - _cmp_fn('__eq__', '==', - self_tuple, other_tuple, - globals=globals)) + cmp_fields = (field for field in field_list if field.compare) + terms = [f'self.{field.name}==other.{field.name}' for field in cmp_fields] + field_comparisons = ' and '.join(terms) or 'True' + body = [f'if other.__class__ is self.__class__:', + f' return {field_comparisons}', + f'return NotImplemented'] + func = _create_fn('__eq__', + ('self', 'other'), + body, + globals=globals) + _set_new_attribute(cls, '__eq__', func) if order: # Create and set the ordering methods. @@ -1109,8 +1139,13 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, if not getattr(cls, '__doc__'): # Create a class doc-string. - cls.__doc__ = (cls.__name__ + - str(inspect.signature(cls)).replace(' -> None', '')) + try: + # In some cases fetching a signature is not possible. + # But, we surely should not fail in this case. + text_sig = str(inspect.signature(cls)).replace(' -> None', '') + except (TypeError, ValueError): + text_sig = '' + cls.__doc__ = (cls.__name__ + text_sig) if match_args: # I could probably compute this once @@ -1192,6 +1227,9 @@ def _add_slots(cls, is_frozen, weakref_slot): # Remove __dict__ itself. cls_dict.pop('__dict__', None) + # Clear existing `__weakref__` descriptor, it belongs to a previous type: + cls_dict.pop('__weakref__', None) # gh-102069 + # And finally create the class. qualname = getattr(cls, '__qualname__', None) cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) @@ -1200,8 +1238,10 @@ def _add_slots(cls, is_frozen, weakref_slot): if is_frozen: # Need this for pickling frozen classes with slots. - cls.__getstate__ = _dataclass_getstate - cls.__setstate__ = _dataclass_setstate + if '__getstate__' not in cls_dict: + cls.__getstate__ = _dataclass_getstate + if '__setstate__' not in cls_dict: + cls.__setstate__ = _dataclass_setstate return cls @@ -1248,7 +1288,7 @@ def fields(class_or_instance): try: fields = getattr(class_or_instance, _FIELDS) except AttributeError: - raise TypeError('must be called with a dataclass type or instance') + raise TypeError('must be called with a dataclass type or instance') from None # Exclude pseudo-fields. Note that fields is sorted by insertion # order, so the order of the tuple is as the fields were defined. @@ -1284,7 +1324,7 @@ class C: If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: - tuples, lists, and dicts. + tuples, lists, and dicts. Other objects are copied with 'copy.deepcopy()'. """ if not _is_dataclass_instance(obj): raise TypeError("asdict() should be called on dataclass instances") @@ -1292,12 +1332,21 @@ class C: def _asdict_inner(obj, dict_factory): - if _is_dataclass_instance(obj): - result = [] - for f in fields(obj): - value = _asdict_inner(getattr(obj, f.name), dict_factory) - result.append((f.name, value)) - return dict_factory(result) + if type(obj) in _ATOMIC_TYPES: + return obj + elif _is_dataclass_instance(obj): + # fast path for the common case + if dict_factory is dict: + return { + f.name: _asdict_inner(getattr(obj, f.name), dict) + for f in fields(obj) + } + else: + result = [] + for f in fields(obj): + value = _asdict_inner(getattr(obj, f.name), dict_factory) + result.append((f.name, value)) + return dict_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is @@ -1324,15 +1373,14 @@ def _asdict_inner(obj, dict_factory): # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v, dict_factory) for v in obj) - elif isinstance(obj, dict) and hasattr(type(obj), 'default_factory'): - # obj is a defaultdict, which has a different constructor from - # dict as it requires the default_factory as its first arg. - # https://bugs.python.org/issue35540 - result = type(obj)(getattr(obj, 'default_factory')) - for k, v in obj.items(): - result[_asdict_inner(k, dict_factory)] = _asdict_inner(v, dict_factory) - return result elif isinstance(obj, dict): + if hasattr(type(obj), 'default_factory'): + # obj is a defaultdict, which has a different constructor from + # dict as it requires the default_factory as its first arg. + result = type(obj)(getattr(obj, 'default_factory')) + for k, v in obj.items(): + result[_asdict_inner(k, dict_factory)] = _asdict_inner(v, dict_factory) + return result return type(obj)((_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items()) @@ -1356,7 +1404,7 @@ class C: If given, 'tuple_factory' will be used instead of built-in tuple. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: - tuples, lists, and dicts. + tuples, lists, and dicts. Other objects are copied with 'copy.deepcopy()'. """ if not _is_dataclass_instance(obj): @@ -1365,7 +1413,9 @@ class C: def _astuple_inner(obj, tuple_factory): - if _is_dataclass_instance(obj): + if type(obj) in _ATOMIC_TYPES: + return obj + elif _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _astuple_inner(getattr(obj, f.name), tuple_factory) @@ -1385,7 +1435,15 @@ def _astuple_inner(obj, tuple_factory): # above). return type(obj)(_astuple_inner(v, tuple_factory) for v in obj) elif isinstance(obj, dict): - return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) + obj_type = type(obj) + if hasattr(obj_type, 'default_factory'): + # obj is a defaultdict, which has a different constructor from + # dict as it requires the default_factory as its first arg. + result = obj_type(getattr(obj, 'default_factory')) + for k, v in obj.items(): + result[_astuple_inner(k, tuple_factory)] = _astuple_inner(v, tuple_factory) + return result + return obj_type((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) for k, v in obj.items()) else: return copy.deepcopy(obj) @@ -1394,7 +1452,7 @@ def _astuple_inner(obj, tuple_factory): def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False, - weakref_slot=False): + weakref_slot=False, module=None): """Return a new dynamically created dataclass. The dataclass name will be 'cls_name'. 'fields' is an iterable @@ -1414,8 +1472,11 @@ class C(Base): For the bases and namespace parameters, see the builtin type() function. - The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to - dataclass(). + The parameters init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, + slots, and weakref_slot are passed to dataclass(). + + If module parameter is defined, the '__module__' attribute of the dataclass is + set to that value. """ if namespace is None: @@ -1458,6 +1519,19 @@ def exec_body_callback(ns): # of generic dataclasses. cls = types.new_class(cls_name, bases, {}, exec_body_callback) + # For pickling to work, the __module__ variable needs to be set to the frame + # where the dataclass is created. + if module is None: + try: + module = sys._getframemodulename(1) or '__main__' + except AttributeError: + try: + module = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + cls.__module__ = module + # Apply the normal decorator. return dataclass(cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, @@ -1479,13 +1553,15 @@ class C: c1 = replace(c, x=3) assert c1.x == 3 and c1.y == 2 """ + if not _is_dataclass_instance(obj): + raise TypeError("replace() should be called on dataclass instances") + return _replace(obj, **changes) + +def _replace(obj, /, **changes): # We're going to mutate 'changes', but that's okay because it's a # new dict, even if called with 'replace(obj, **my_changes)'. - if not _is_dataclass_instance(obj): - raise TypeError("replace() should be called on dataclass instances") - # It's an error to have init=False fields in 'changes'. # If a field is not in 'changes', read its value from the provided obj. @@ -1497,15 +1573,15 @@ class C: if not f.init: # Error if this field is specified in changes. if f.name in changes: - raise ValueError(f'field {f.name} is declared with ' - 'init=False, it cannot be specified with ' - 'replace()') + raise TypeError(f'field {f.name} is declared with ' + f'init=False, it cannot be specified with ' + f'replace()') continue if f.name not in changes: if f._field_type is _FIELD_INITVAR and f.default is MISSING: - raise ValueError(f"InitVar {f.name!r} " - 'must be specified with replace()') + raise TypeError(f"InitVar {f.name!r} " + f'must be specified with replace()') changes[f.name] = getattr(obj, f.name) # Create the new object, which calls __init__() and diff --git a/Lib/datetime.py b/Lib/datetime.py index 01742680a95bb66..a33d2d724cb33d2 100644 --- a/Lib/datetime.py +++ b/Lib/datetime.py @@ -1,2642 +1,9 @@ -"""Concrete date/time and related types. - -See http://www.iana.org/time-zones/repository/tz-link.html for -time zone and DST data sources. -""" - -__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo", - "MINYEAR", "MAXYEAR", "UTC") - - -import time as _time -import math as _math -import sys -from operator import index as _index - -def _cmp(x, y): - return 0 if x == y else 1 if x > y else -1 - -MINYEAR = 1 -MAXYEAR = 9999 -_MAXORDINAL = 3652059 # date.max.toordinal() - -# Utility functions, adapted from Python's Demo/classes/Dates.py, which -# also assumes the current Gregorian calendar indefinitely extended in -# both directions. Difference: Dates.py calls January 1 of year 0 day -# number 1. The code here calls January 1 of year 1 day number 1. This is -# to match the definition of the "proleptic Gregorian" calendar in Dershowitz -# and Reingold's "Calendrical Calculations", where it's the base calendar -# for all computations. See the book for algorithms for converting between -# proleptic Gregorian ordinals and many other calendar systems. - -# -1 is a placeholder for indexing purposes. -_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] - -_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes. -dbm = 0 -for dim in _DAYS_IN_MONTH[1:]: - _DAYS_BEFORE_MONTH.append(dbm) - dbm += dim -del dbm, dim - -def _is_leap(year): - "year -> 1 if leap year, else 0." - return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) - -def _days_before_year(year): - "year -> number of days before January 1st of year." - y = year - 1 - return y*365 + y//4 - y//100 + y//400 - -def _days_in_month(year, month): - "year, month -> number of days in that month in that year." - assert 1 <= month <= 12, month - if month == 2 and _is_leap(year): - return 29 - return _DAYS_IN_MONTH[month] - -def _days_before_month(year, month): - "year, month -> number of days in year preceding first day of month." - assert 1 <= month <= 12, 'month must be in 1..12' - return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) - -def _ymd2ord(year, month, day): - "year, month, day -> ordinal, considering 01-Jan-0001 as day 1." - assert 1 <= month <= 12, 'month must be in 1..12' - dim = _days_in_month(year, month) - assert 1 <= day <= dim, ('day must be in 1..%d' % dim) - return (_days_before_year(year) + - _days_before_month(year, month) + - day) - -_DI400Y = _days_before_year(401) # number of days in 400 years -_DI100Y = _days_before_year(101) # " " " " 100 " -_DI4Y = _days_before_year(5) # " " " " 4 " - -# A 4-year cycle has an extra leap day over what we'd get from pasting -# together 4 single years. -assert _DI4Y == 4 * 365 + 1 - -# Similarly, a 400-year cycle has an extra leap day over what we'd get from -# pasting together 4 100-year cycles. -assert _DI400Y == 4 * _DI100Y + 1 - -# OTOH, a 100-year cycle has one fewer leap day than we'd get from -# pasting together 25 4-year cycles. -assert _DI100Y == 25 * _DI4Y - 1 - -def _ord2ymd(n): - "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." - - # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years - # repeats exactly every 400 years. The basic strategy is to find the - # closest 400-year boundary at or before n, then work with the offset - # from that boundary to n. Life is much clearer if we subtract 1 from - # n first -- then the values of n at 400-year boundaries are exactly - # those divisible by _DI400Y: - # - # D M Y n n-1 - # -- --- ---- ---------- ---------------- - # 31 Dec -400 -_DI400Y -_DI400Y -1 - # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary - # ... - # 30 Dec 000 -1 -2 - # 31 Dec 000 0 -1 - # 1 Jan 001 1 0 400-year boundary - # 2 Jan 001 2 1 - # 3 Jan 001 3 2 - # ... - # 31 Dec 400 _DI400Y _DI400Y -1 - # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary - n -= 1 - n400, n = divmod(n, _DI400Y) - year = n400 * 400 + 1 # ..., -399, 1, 401, ... - - # Now n is the (non-negative) offset, in days, from January 1 of year, to - # the desired date. Now compute how many 100-year cycles precede n. - # Note that it's possible for n100 to equal 4! In that case 4 full - # 100-year cycles precede the desired day, which implies the desired - # day is December 31 at the end of a 400-year cycle. - n100, n = divmod(n, _DI100Y) - - # Now compute how many 4-year cycles precede it. - n4, n = divmod(n, _DI4Y) - - # And now how many single years. Again n1 can be 4, and again meaning - # that the desired day is December 31 at the end of the 4-year cycle. - n1, n = divmod(n, 365) - - year += n100 * 100 + n4 * 4 + n1 - if n1 == 4 or n100 == 4: - assert n == 0 - return year-1, 12, 31 - - # Now the year is correct, and n is the offset from January 1. We find - # the month via an estimate that's either exact or one too large. - leapyear = n1 == 3 and (n4 != 24 or n100 == 3) - assert leapyear == _is_leap(year) - month = (n + 50) >> 5 - preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) - if preceding > n: # estimate is too large - month -= 1 - preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) - n -= preceding - assert 0 <= n < _days_in_month(year, month) - - # Now the year and month are correct, and n is the offset from the - # start of that month: we're done! - return year, month, n+1 - -# Month and day names. For localized versions, see the calendar module. -_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", - "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] -_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] - - -def _build_struct_time(y, m, d, hh, mm, ss, dstflag): - wday = (_ymd2ord(y, m, d) + 6) % 7 - dnum = _days_before_month(y, m) + d - return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag)) - -def _format_time(hh, mm, ss, us, timespec='auto'): - specs = { - 'hours': '{:02d}', - 'minutes': '{:02d}:{:02d}', - 'seconds': '{:02d}:{:02d}:{:02d}', - 'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}', - 'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}' - } - - if timespec == 'auto': - # Skip trailing microseconds when us==0. - timespec = 'microseconds' if us else 'seconds' - elif timespec == 'milliseconds': - us //= 1000 - try: - fmt = specs[timespec] - except KeyError: - raise ValueError('Unknown timespec value') - else: - return fmt.format(hh, mm, ss, us) - -def _format_offset(off, sep=':'): - s = '' - if off is not None: - if off.days < 0: - sign = "-" - off = -off - else: - sign = "+" - hh, mm = divmod(off, timedelta(hours=1)) - mm, ss = divmod(mm, timedelta(minutes=1)) - s += "%s%02d%s%02d" % (sign, hh, sep, mm) - if ss or ss.microseconds: - s += "%s%02d" % (sep, ss.seconds) - - if ss.microseconds: - s += '.%06d' % ss.microseconds - return s - -# Correctly substitute for %z and %Z escapes in strftime formats. -def _wrap_strftime(object, format, timetuple): - # Don't call utcoffset() or tzname() unless actually needed. - freplace = None # the string to use for %f - zreplace = None # the string to use for %z - colonzreplace = None # the string to use for %:z - Zreplace = None # the string to use for %Z - - # Scan format for %z, %:z and %Z escapes, replacing as needed. - newformat = [] - push = newformat.append - i, n = 0, len(format) - while i < n: - ch = format[i] - i += 1 - if ch == '%': - if i < n: - ch = format[i] - i += 1 - if ch == 'f': - if freplace is None: - freplace = '%06d' % getattr(object, - 'microsecond', 0) - newformat.append(freplace) - elif ch == 'z': - if zreplace is None: - if hasattr(object, "utcoffset"): - zreplace = _format_offset(object.utcoffset(), sep="") - else: - zreplace = "" - assert '%' not in zreplace - newformat.append(zreplace) - elif ch == ':': - if i < n: - ch2 = format[i] - i += 1 - if ch2 == 'z': - if colonzreplace is None: - if hasattr(object, "utcoffset"): - colonzreplace = _format_offset(object.utcoffset(), sep=":") - else: - colonzreplace = "" - assert '%' not in colonzreplace - newformat.append(colonzreplace) - else: - push('%') - push(ch) - push(ch2) - elif ch == 'Z': - if Zreplace is None: - Zreplace = "" - if hasattr(object, "tzname"): - s = object.tzname() - if s is not None: - # strftime is going to have at this: escape % - Zreplace = s.replace('%', '%%') - newformat.append(Zreplace) - else: - push('%') - push(ch) - else: - push('%') - else: - push(ch) - newformat = "".join(newformat) - return _time.strftime(newformat, timetuple) - -# Helpers for parsing the result of isoformat() -def _is_ascii_digit(c): - return c in "0123456789" - -def _find_isoformat_datetime_separator(dtstr): - # See the comment in _datetimemodule.c:_find_isoformat_datetime_separator - len_dtstr = len(dtstr) - if len_dtstr == 7: - return 7 - - assert len_dtstr > 7 - date_separator = "-" - week_indicator = "W" - - if dtstr[4] == date_separator: - if dtstr[5] == week_indicator: - if len_dtstr < 8: - raise ValueError("Invalid ISO string") - if len_dtstr > 8 and dtstr[8] == date_separator: - if len_dtstr == 9: - raise ValueError("Invalid ISO string") - if len_dtstr > 10 and _is_ascii_digit(dtstr[10]): - # This is as far as we need to resolve the ambiguity for - # the moment - if we have YYYY-Www-##, the separator is - # either a hyphen at 8 or a number at 10. - # - # We'll assume it's a hyphen at 8 because it's way more - # likely that someone will use a hyphen as a separator than - # a number, but at this point it's really best effort - # because this is an extension of the spec anyway. - # TODO(pganssle): Document this - return 8 - return 10 - else: - # YYYY-Www (8) - return 8 - else: - # YYYY-MM-DD (10) - return 10 - else: - if dtstr[4] == week_indicator: - # YYYYWww (7) or YYYYWwwd (8) - idx = 7 - while idx < len_dtstr: - if not _is_ascii_digit(dtstr[idx]): - break - idx += 1 - - if idx < 9: - return idx - - if idx % 2 == 0: - # If the index of the last number is even, it's YYYYWwwd - return 7 - else: - return 8 - else: - # YYYYMMDD (8) - return 8 - - -def _parse_isoformat_date(dtstr): - # It is assumed that this is an ASCII-only string of lengths 7, 8 or 10, - # see the comment on Modules/_datetimemodule.c:_find_isoformat_datetime_separator - assert len(dtstr) in (7, 8, 10) - year = int(dtstr[0:4]) - has_sep = dtstr[4] == '-' - - pos = 4 + has_sep - if dtstr[pos:pos + 1] == "W": - # YYYY-?Www-?D? - pos += 1 - weekno = int(dtstr[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dtstr) > pos: - if (dtstr[pos:pos + 1] == '-') != has_sep: - raise ValueError("Inconsistent use of dash separator") - - pos += has_sep - - dayno = int(dtstr[pos:pos + 1]) - - return list(_isoweek_to_gregorian(year, weekno, dayno)) - else: - month = int(dtstr[pos:pos + 2]) - pos += 2 - if (dtstr[pos:pos + 1] == "-") != has_sep: - raise ValueError("Inconsistent use of dash separator") - - pos += has_sep - day = int(dtstr[pos:pos + 2]) - - return [year, month, day] - - -_FRACTION_CORRECTION = [100000, 10000, 1000, 100, 10] - - -def _parse_hh_mm_ss_ff(tstr): - # Parses things of the form HH[:?MM[:?SS[{.,}fff[fff]]]] - len_str = len(tstr) - - time_comps = [0, 0, 0, 0] - pos = 0 - for comp in range(0, 3): - if (len_str - pos) < 2: - raise ValueError("Incomplete time component") - - time_comps[comp] = int(tstr[pos:pos+2]) - - pos += 2 - next_char = tstr[pos:pos+1] - - if comp == 0: - has_sep = next_char == ':' - - if not next_char or comp >= 2: - break - - if has_sep and next_char != ':': - raise ValueError("Invalid time separator: %c" % next_char) - - pos += has_sep - - if pos < len_str: - if tstr[pos] not in '.,': - raise ValueError("Invalid microsecond component") - else: - pos += 1 - - len_remainder = len_str - pos - - if len_remainder >= 6: - to_parse = 6 - else: - to_parse = len_remainder - - time_comps[3] = int(tstr[pos:(pos+to_parse)]) - if to_parse < 6: - time_comps[3] *= _FRACTION_CORRECTION[to_parse-1] - if (len_remainder > to_parse - and not all(map(_is_ascii_digit, tstr[(pos+to_parse):]))): - raise ValueError("Non-digit values in unparsed fraction") - - return time_comps - -def _parse_isoformat_time(tstr): - # Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]] - len_str = len(tstr) - if len_str < 2: - raise ValueError("Isoformat time too short") - - # This is equivalent to re.search('[+-Z]', tstr), but faster - tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1 or tstr.find('Z') + 1) - timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr - - time_comps = _parse_hh_mm_ss_ff(timestr) - - tzi = None - if tz_pos == len_str and tstr[-1] == 'Z': - tzi = timezone.utc - elif tz_pos > 0: - tzstr = tstr[tz_pos:] - - # Valid time zone strings are: - # HH len: 2 - # HHMM len: 4 - # HH:MM len: 5 - # HHMMSS len: 6 - # HHMMSS.f+ len: 7+ - # HH:MM:SS len: 8 - # HH:MM:SS.f+ len: 10+ - - if len(tzstr) in (0, 1, 3): - raise ValueError("Malformed time zone string") - - tz_comps = _parse_hh_mm_ss_ff(tzstr) - - if all(x == 0 for x in tz_comps): - tzi = timezone.utc - else: - tzsign = -1 if tstr[tz_pos - 1] == '-' else 1 - - td = timedelta(hours=tz_comps[0], minutes=tz_comps[1], - seconds=tz_comps[2], microseconds=tz_comps[3]) - - tzi = timezone(tzsign * td) - - time_comps.append(tzi) - - return time_comps - -# tuple[int, int, int] -> tuple[int, int, int] version of date.fromisocalendar -def _isoweek_to_gregorian(year, week, day): - # Year is bounded this way because 9999-12-31 is (9999, 52, 5) - if not MINYEAR <= year <= MAXYEAR: - raise ValueError(f"Year is out of range: {year}") - - if not 0 < week < 53: - out_of_range = True - - if week == 53: - # ISO years have 53 weeks in them on years starting with a - # Thursday and leap years starting on a Wednesday - first_weekday = _ymd2ord(year, 1, 1) % 7 - if (first_weekday == 4 or (first_weekday == 3 and - _is_leap(year))): - out_of_range = False - - if out_of_range: - raise ValueError(f"Invalid week: {week}") - - if not 0 < day < 8: - raise ValueError(f"Invalid weekday: {day} (range is [1, 7])") - - # Now compute the offset from (Y, 1, 1) in days: - day_offset = (week - 1) * 7 + (day - 1) - - # Calculate the ordinal day for monday, week 1 - day_1 = _isoweek1monday(year) - ord_day = day_1 + day_offset - - return _ord2ymd(ord_day) - - -# Just raise TypeError if the arg isn't None or a string. -def _check_tzname(name): - if name is not None and not isinstance(name, str): - raise TypeError("tzinfo.tzname() must return None or string, " - "not '%s'" % type(name)) - -# name is the offset-producing method, "utcoffset" or "dst". -# offset is what it returned. -# If offset isn't None or timedelta, raises TypeError. -# If offset is None, returns None. -# Else offset is checked for being in range. -# If it is, its integer value is returned. Else ValueError is raised. -def _check_utc_offset(name, offset): - assert name in ("utcoffset", "dst") - if offset is None: - return - if not isinstance(offset, timedelta): - raise TypeError("tzinfo.%s() must return None " - "or timedelta, not '%s'" % (name, type(offset))) - if not -timedelta(1) < offset < timedelta(1): - raise ValueError("%s()=%s, must be strictly between " - "-timedelta(hours=24) and timedelta(hours=24)" % - (name, offset)) - -def _check_date_fields(year, month, day): - year = _index(year) - month = _index(month) - day = _index(day) - if not MINYEAR <= year <= MAXYEAR: - raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) - if not 1 <= month <= 12: - raise ValueError('month must be in 1..12', month) - dim = _days_in_month(year, month) - if not 1 <= day <= dim: - raise ValueError('day must be in 1..%d' % dim, day) - return year, month, day - -def _check_time_fields(hour, minute, second, microsecond, fold): - hour = _index(hour) - minute = _index(minute) - second = _index(second) - microsecond = _index(microsecond) - if not 0 <= hour <= 23: - raise ValueError('hour must be in 0..23', hour) - if not 0 <= minute <= 59: - raise ValueError('minute must be in 0..59', minute) - if not 0 <= second <= 59: - raise ValueError('second must be in 0..59', second) - if not 0 <= microsecond <= 999999: - raise ValueError('microsecond must be in 0..999999', microsecond) - if fold not in (0, 1): - raise ValueError('fold must be either 0 or 1', fold) - return hour, minute, second, microsecond, fold - -def _check_tzinfo_arg(tz): - if tz is not None and not isinstance(tz, tzinfo): - raise TypeError("tzinfo argument must be None or of a tzinfo subclass") - -def _cmperror(x, y): - raise TypeError("can't compare '%s' to '%s'" % ( - type(x).__name__, type(y).__name__)) - -def _divide_and_round(a, b): - """divide a by b and round result to the nearest integer - - When the ratio is exactly half-way between two integers, - the even integer is returned. - """ - # Based on the reference implementation for divmod_near - # in Objects/longobject.c. - q, r = divmod(a, b) - # round up if either r / b > 0.5, or r / b == 0.5 and q is odd. - # The expression r / b > 0.5 is equivalent to 2 * r > b if b is - # positive, 2 * r < b if b negative. - r *= 2 - greater_than_half = r > b if b > 0 else r < b - if greater_than_half or r == b and q % 2 == 1: - q += 1 - - return q - - -class timedelta: - """Represent the difference between two datetime objects. - - Supported operators: - - - add, subtract timedelta - - unary plus, minus, abs - - compare to timedelta - - multiply, divide by int - - In addition, datetime supports subtraction of two datetime objects - returning a timedelta, and addition or subtraction of a datetime - and a timedelta giving a datetime. - - Representation: (days, seconds, microseconds). Why? Because I - felt like it. - """ - __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' - - def __new__(cls, days=0, seconds=0, microseconds=0, - milliseconds=0, minutes=0, hours=0, weeks=0): - # Doing this efficiently and accurately in C is going to be difficult - # and error-prone, due to ubiquitous overflow possibilities, and that - # C double doesn't have enough bits of precision to represent - # microseconds over 10K years faithfully. The code here tries to make - # explicit where go-fast assumptions can be relied on, in order to - # guide the C implementation; it's way more convoluted than speed- - # ignoring auto-overflow-to-long idiomatic Python could be. - - # XXX Check that all inputs are ints or floats. - - # Final values, all integer. - # s and us fit in 32-bit signed ints; d isn't bounded. - d = s = us = 0 - - # Normalize everything to days, seconds, microseconds. - days += weeks*7 - seconds += minutes*60 + hours*3600 - microseconds += milliseconds*1000 - - # Get rid of all fractions, and normalize s and us. - # Take a deep breath . - if isinstance(days, float): - dayfrac, days = _math.modf(days) - daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) - assert daysecondswhole == int(daysecondswhole) # can't overflow - s = int(daysecondswhole) - assert days == int(days) - d = int(days) - else: - daysecondsfrac = 0.0 - d = days - assert isinstance(daysecondsfrac, float) - assert abs(daysecondsfrac) <= 1.0 - assert isinstance(d, int) - assert abs(s) <= 24 * 3600 - # days isn't referenced again before redefinition - - if isinstance(seconds, float): - secondsfrac, seconds = _math.modf(seconds) - assert seconds == int(seconds) - seconds = int(seconds) - secondsfrac += daysecondsfrac - assert abs(secondsfrac) <= 2.0 - else: - secondsfrac = daysecondsfrac - # daysecondsfrac isn't referenced again - assert isinstance(secondsfrac, float) - assert abs(secondsfrac) <= 2.0 - - assert isinstance(seconds, int) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 2 * 24 * 3600 - # seconds isn't referenced again before redefinition - - usdouble = secondsfrac * 1e6 - assert abs(usdouble) < 2.1e6 # exact value not critical - # secondsfrac isn't referenced again - - if isinstance(microseconds, float): - microseconds = round(microseconds + usdouble) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += seconds - else: - microseconds = int(microseconds) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += seconds - microseconds = round(microseconds + usdouble) - assert isinstance(s, int) - assert isinstance(microseconds, int) - assert abs(s) <= 3 * 24 * 3600 - assert abs(microseconds) < 3.1e6 - - # Just a little bit of carrying possible for microseconds and seconds. - seconds, us = divmod(microseconds, 1000000) - s += seconds - days, s = divmod(s, 24*3600) - d += days - - assert isinstance(d, int) - assert isinstance(s, int) and 0 <= s < 24*3600 - assert isinstance(us, int) and 0 <= us < 1000000 - - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) - - self = object.__new__(cls) - self._days = d - self._seconds = s - self._microseconds = us - self._hashcode = -1 - return self - - def __repr__(self): - args = [] - if self._days: - args.append("days=%d" % self._days) - if self._seconds: - args.append("seconds=%d" % self._seconds) - if self._microseconds: - args.append("microseconds=%d" % self._microseconds) - if not args: - args.append('0') - return "%s.%s(%s)" % (self.__class__.__module__, - self.__class__.__qualname__, - ', '.join(args)) - - def __str__(self): - mm, ss = divmod(self._seconds, 60) - hh, mm = divmod(mm, 60) - s = "%d:%02d:%02d" % (hh, mm, ss) - if self._days: - def plural(n): - return n, abs(n) != 1 and "s" or "" - s = ("%d day%s, " % plural(self._days)) + s - if self._microseconds: - s = s + ".%06d" % self._microseconds - return s - - def total_seconds(self): - """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds) * 10**6 + - self.microseconds) / 10**6 - - # Read-only field accessors - @property - def days(self): - """days""" - return self._days - - @property - def seconds(self): - """seconds""" - return self._seconds - - @property - def microseconds(self): - """microseconds""" - return self._microseconds - - def __add__(self, other): - if isinstance(other, timedelta): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days + other._days, - self._seconds + other._seconds, - self._microseconds + other._microseconds) - return NotImplemented - - __radd__ = __add__ - - def __sub__(self, other): - if isinstance(other, timedelta): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days - other._days, - self._seconds - other._seconds, - self._microseconds - other._microseconds) - return NotImplemented - - def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other - return NotImplemented - - def __neg__(self): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(-self._days, - -self._seconds, - -self._microseconds) - - def __pos__(self): - return self - - def __abs__(self): - if self._days < 0: - return -self - else: - return self - - def __mul__(self, other): - if isinstance(other, int): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days * other, - self._seconds * other, - self._microseconds * other) - if isinstance(other, float): - usec = self._to_microseconds() - a, b = other.as_integer_ratio() - return timedelta(0, 0, _divide_and_round(usec * a, b)) - return NotImplemented - - __rmul__ = __mul__ - - def _to_microseconds(self): - return ((self._days * (24*3600) + self._seconds) * 1000000 + - self._microseconds) - - def __floordiv__(self, other): - if not isinstance(other, (int, timedelta)): - return NotImplemented - usec = self._to_microseconds() - if isinstance(other, timedelta): - return usec // other._to_microseconds() - if isinstance(other, int): - return timedelta(0, 0, usec // other) - - def __truediv__(self, other): - if not isinstance(other, (int, float, timedelta)): - return NotImplemented - usec = self._to_microseconds() - if isinstance(other, timedelta): - return usec / other._to_microseconds() - if isinstance(other, int): - return timedelta(0, 0, _divide_and_round(usec, other)) - if isinstance(other, float): - a, b = other.as_integer_ratio() - return timedelta(0, 0, _divide_and_round(b * usec, a)) - - def __mod__(self, other): - if isinstance(other, timedelta): - r = self._to_microseconds() % other._to_microseconds() - return timedelta(0, 0, r) - return NotImplemented - - def __divmod__(self, other): - if isinstance(other, timedelta): - q, r = divmod(self._to_microseconds(), - other._to_microseconds()) - return q, timedelta(0, 0, r) - return NotImplemented - - # Comparisons of timedelta objects with other. - - def __eq__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) == 0 - else: - return NotImplemented - - def __le__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) <= 0 - else: - return NotImplemented - - def __lt__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) < 0 - else: - return NotImplemented - - def __ge__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) >= 0 - else: - return NotImplemented - - def __gt__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) > 0 - else: - return NotImplemented - - def _cmp(self, other): - assert isinstance(other, timedelta) - return _cmp(self._getstate(), other._getstate()) - - def __hash__(self): - if self._hashcode == -1: - self._hashcode = hash(self._getstate()) - return self._hashcode - - def __bool__(self): - return (self._days != 0 or - self._seconds != 0 or - self._microseconds != 0) - - # Pickle support. - - def _getstate(self): - return (self._days, self._seconds, self._microseconds) - - def __reduce__(self): - return (self.__class__, self._getstate()) - -timedelta.min = timedelta(-999999999) -timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, - microseconds=999999) -timedelta.resolution = timedelta(microseconds=1) - -class date: - """Concrete date type. - - Constructors: - - __new__() - fromtimestamp() - today() - fromordinal() - - Operators: - - __repr__, __str__ - __eq__, __le__, __lt__, __ge__, __gt__, __hash__ - __add__, __radd__, __sub__ (add/radd only with timedelta arg) - - Methods: - - timetuple() - toordinal() - weekday() - isoweekday(), isocalendar(), isoformat() - ctime() - strftime() - - Properties (readonly): - year, month, day - """ - __slots__ = '_year', '_month', '_day', '_hashcode' - - def __new__(cls, year, month=None, day=None): - """Constructor. - - Arguments: - - year, month, day (required, base 1) - """ - if (month is None and - isinstance(year, (bytes, str)) and len(year) == 4 and - 1 <= ord(year[2:3]) <= 12): - # Pickle support - if isinstance(year, str): - try: - year = year.encode('latin1') - except UnicodeEncodeError: - # More informative error message. - raise ValueError( - "Failed to encode latin1 string when unpickling " - "a date object. " - "pickle.load(data, encoding='latin1') is assumed.") - self = object.__new__(cls) - self.__setstate(year) - self._hashcode = -1 - return self - year, month, day = _check_date_fields(year, month, day) - self = object.__new__(cls) - self._year = year - self._month = month - self._day = day - self._hashcode = -1 - return self - - # Additional constructors - - @classmethod - def fromtimestamp(cls, t): - "Construct a date from a POSIX timestamp (like time.time())." - y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t) - return cls(y, m, d) - - @classmethod - def today(cls): - "Construct a date from time.time()." - t = _time.time() - return cls.fromtimestamp(t) - - @classmethod - def fromordinal(cls, n): - """Construct a date from a proleptic Gregorian ordinal. - - January 1 of year 1 is day 1. Only the year, month and day are - non-zero in the result. - """ - y, m, d = _ord2ymd(n) - return cls(y, m, d) - - @classmethod - def fromisoformat(cls, date_string): - """Construct a date from a string in ISO 8601 format.""" - if not isinstance(date_string, str): - raise TypeError('fromisoformat: argument must be str') - - if len(date_string) not in (7, 8, 10): - raise ValueError(f'Invalid isoformat string: {date_string!r}') - - try: - return cls(*_parse_isoformat_date(date_string)) - except Exception: - raise ValueError(f'Invalid isoformat string: {date_string!r}') - - @classmethod - def fromisocalendar(cls, year, week, day): - """Construct a date from the ISO year, week number and weekday. - - This is the inverse of the date.isocalendar() function""" - return cls(*_isoweek_to_gregorian(year, week, day)) - - # Conversions to string - - def __repr__(self): - """Convert to formal string, for repr(). - - >>> dt = datetime(2010, 1, 1) - >>> repr(dt) - 'datetime.datetime(2010, 1, 1, 0, 0)' - - >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) - >>> repr(dt) - 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' - """ - return "%s.%s(%d, %d, %d)" % (self.__class__.__module__, - self.__class__.__qualname__, - self._year, - self._month, - self._day) - # XXX These shouldn't depend on time.localtime(), because that - # clips the usable dates to [1970 .. 2038). At least ctime() is - # easily done without using strftime() -- that's better too because - # strftime("%c", ...) is locale specific. - - - def ctime(self): - "Return ctime() style string." - weekday = self.toordinal() % 7 or 7 - return "%s %s %2d 00:00:00 %04d" % ( - _DAYNAMES[weekday], - _MONTHNAMES[self._month], - self._day, self._year) - - def strftime(self, fmt): - """ - Format using strftime(). - - Example: "%d/%m/%Y, %H:%M:%S" - """ - return _wrap_strftime(self, fmt, self.timetuple()) - - def __format__(self, fmt): - if not isinstance(fmt, str): - raise TypeError("must be str, not %s" % type(fmt).__name__) - if len(fmt) != 0: - return self.strftime(fmt) - return str(self) - - def isoformat(self): - """Return the date formatted according to ISO. - - This is 'YYYY-MM-DD'. - - References: - - http://www.w3.org/TR/NOTE-datetime - - http://www.cl.cam.ac.uk/~mgk25/iso-time.html - """ - return "%04d-%02d-%02d" % (self._year, self._month, self._day) - - __str__ = isoformat - - # Read-only field accessors - @property - def year(self): - """year (1-9999)""" - return self._year - - @property - def month(self): - """month (1-12)""" - return self._month - - @property - def day(self): - """day (1-31)""" - return self._day - - # Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__, - # __hash__ (and helpers) - - def timetuple(self): - "Return local time tuple compatible with time.localtime()." - return _build_struct_time(self._year, self._month, self._day, - 0, 0, 0, -1) - - def toordinal(self): - """Return proleptic Gregorian ordinal for the year, month and day. - - January 1 of year 1 is day 1. Only the year, month and day values - contribute to the result. - """ - return _ymd2ord(self._year, self._month, self._day) - - def replace(self, year=None, month=None, day=None): - """Return a new date with new values for the specified fields.""" - if year is None: - year = self._year - if month is None: - month = self._month - if day is None: - day = self._day - return type(self)(year, month, day) - - # Comparisons of date objects with other. - - def __eq__(self, other): - if isinstance(other, date): - return self._cmp(other) == 0 - return NotImplemented - - def __le__(self, other): - if isinstance(other, date): - return self._cmp(other) <= 0 - return NotImplemented - - def __lt__(self, other): - if isinstance(other, date): - return self._cmp(other) < 0 - return NotImplemented - - def __ge__(self, other): - if isinstance(other, date): - return self._cmp(other) >= 0 - return NotImplemented - - def __gt__(self, other): - if isinstance(other, date): - return self._cmp(other) > 0 - return NotImplemented - - def _cmp(self, other): - assert isinstance(other, date) - y, m, d = self._year, self._month, self._day - y2, m2, d2 = other._year, other._month, other._day - return _cmp((y, m, d), (y2, m2, d2)) - - def __hash__(self): - "Hash." - if self._hashcode == -1: - self._hashcode = hash(self._getstate()) - return self._hashcode - - # Computations - - def __add__(self, other): - "Add a date to a timedelta." - if isinstance(other, timedelta): - o = self.toordinal() + other.days - if 0 < o <= _MAXORDINAL: - return type(self).fromordinal(o) - raise OverflowError("result out of range") - return NotImplemented - - __radd__ = __add__ - - def __sub__(self, other): - """Subtract two dates, or a date and a timedelta.""" - if isinstance(other, timedelta): - return self + timedelta(-other.days) - if isinstance(other, date): - days1 = self.toordinal() - days2 = other.toordinal() - return timedelta(days1 - days2) - return NotImplemented - - def weekday(self): - "Return day of the week, where Monday == 0 ... Sunday == 6." - return (self.toordinal() + 6) % 7 - - # Day-of-the-week and week-of-the-year, according to ISO - - def isoweekday(self): - "Return day of the week, where Monday == 1 ... Sunday == 7." - # 1-Jan-0001 is a Monday - return self.toordinal() % 7 or 7 - - def isocalendar(self): - """Return a named tuple containing ISO year, week number, and weekday. - - The first ISO week of the year is the (Mon-Sun) week - containing the year's first Thursday; everything else derives - from that. - - The first week is 1; Monday is 1 ... Sunday is 7. - - ISO calendar algorithm taken from - http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm - (used with permission) - """ - year = self._year - week1monday = _isoweek1monday(year) - today = _ymd2ord(self._year, self._month, self._day) - # Internally, week and day have origin 0 - week, day = divmod(today - week1monday, 7) - if week < 0: - year -= 1 - week1monday = _isoweek1monday(year) - week, day = divmod(today - week1monday, 7) - elif week >= 52: - if today >= _isoweek1monday(year+1): - year += 1 - week = 0 - return _IsoCalendarDate(year, week+1, day+1) - - # Pickle support. - - def _getstate(self): - yhi, ylo = divmod(self._year, 256) - return bytes([yhi, ylo, self._month, self._day]), - - def __setstate(self, string): - yhi, ylo, self._month, self._day = string - self._year = yhi * 256 + ylo - - def __reduce__(self): - return (self.__class__, self._getstate()) - -_date_class = date # so functions w/ args named "date" can get at the class - -date.min = date(1, 1, 1) -date.max = date(9999, 12, 31) -date.resolution = timedelta(days=1) - - -class tzinfo: - """Abstract base class for time zone info classes. - - Subclasses must override the name(), utcoffset() and dst() methods. - """ - __slots__ = () - - def tzname(self, dt): - "datetime -> string name of time zone." - raise NotImplementedError("tzinfo subclass must override tzname()") - - def utcoffset(self, dt): - "datetime -> timedelta, positive for east of UTC, negative for west of UTC" - raise NotImplementedError("tzinfo subclass must override utcoffset()") - - def dst(self, dt): - """datetime -> DST offset as timedelta, positive for east of UTC. - - Return 0 if DST not in effect. utcoffset() must include the DST - offset. - """ - raise NotImplementedError("tzinfo subclass must override dst()") - - def fromutc(self, dt): - "datetime in UTC -> datetime in local time." - - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # See the long comment block at the end of this file for an - # explanation of this algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - if delta: - dt += delta - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - # Pickle support. - - def __reduce__(self): - getinitargs = getattr(self, "__getinitargs__", None) - if getinitargs: - args = getinitargs() - else: - args = () - return (self.__class__, args, self.__getstate__()) - - -class IsoCalendarDate(tuple): - - def __new__(cls, year, week, weekday, /): - return super().__new__(cls, (year, week, weekday)) - - @property - def year(self): - return self[0] - - @property - def week(self): - return self[1] - - @property - def weekday(self): - return self[2] - - def __reduce__(self): - # This code is intended to pickle the object without making the - # class public. See https://bugs.python.org/msg352381 - return (tuple, (tuple(self),)) - - def __repr__(self): - return (f'{self.__class__.__name__}' - f'(year={self[0]}, week={self[1]}, weekday={self[2]})') - - -_IsoCalendarDate = IsoCalendarDate -del IsoCalendarDate -_tzinfo_class = tzinfo - -class time: - """Time with time zone. - - Constructors: - - __new__() - - Operators: - - __repr__, __str__ - __eq__, __le__, __lt__, __ge__, __gt__, __hash__ - - Methods: - - strftime() - isoformat() - utcoffset() - tzname() - dst() - - Properties (readonly): - hour, minute, second, microsecond, tzinfo, fold - """ - __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold' - - def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0): - """Constructor. - - Arguments: - - hour, minute (required) - second, microsecond (default to zero) - tzinfo (default to None) - fold (keyword only, default to zero) - """ - if (isinstance(hour, (bytes, str)) and len(hour) == 6 and - ord(hour[0:1])&0x7F < 24): - # Pickle support - if isinstance(hour, str): - try: - hour = hour.encode('latin1') - except UnicodeEncodeError: - # More informative error message. - raise ValueError( - "Failed to encode latin1 string when unpickling " - "a time object. " - "pickle.load(data, encoding='latin1') is assumed.") - self = object.__new__(cls) - self.__setstate(hour, minute or None) - self._hashcode = -1 - return self - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) - _check_tzinfo_arg(tzinfo) - self = object.__new__(cls) - self._hour = hour - self._minute = minute - self._second = second - self._microsecond = microsecond - self._tzinfo = tzinfo - self._hashcode = -1 - self._fold = fold - return self - - # Read-only field accessors - @property - def hour(self): - """hour (0-23)""" - return self._hour - - @property - def minute(self): - """minute (0-59)""" - return self._minute - - @property - def second(self): - """second (0-59)""" - return self._second - - @property - def microsecond(self): - """microsecond (0-999999)""" - return self._microsecond - - @property - def tzinfo(self): - """timezone info object""" - return self._tzinfo - - @property - def fold(self): - return self._fold - - # Standard conversions, __hash__ (and helpers) - - # Comparisons of time objects with other. - - def __eq__(self, other): - if isinstance(other, time): - return self._cmp(other, allow_mixed=True) == 0 - else: - return NotImplemented - - def __le__(self, other): - if isinstance(other, time): - return self._cmp(other) <= 0 - else: - return NotImplemented - - def __lt__(self, other): - if isinstance(other, time): - return self._cmp(other) < 0 - else: - return NotImplemented - - def __ge__(self, other): - if isinstance(other, time): - return self._cmp(other) >= 0 - else: - return NotImplemented - - def __gt__(self, other): - if isinstance(other, time): - return self._cmp(other) > 0 - else: - return NotImplemented - - def _cmp(self, other, allow_mixed=False): - assert isinstance(other, time) - mytz = self._tzinfo - ottz = other._tzinfo - myoff = otoff = None - - if mytz is ottz: - base_compare = True - else: - myoff = self.utcoffset() - otoff = other.utcoffset() - base_compare = myoff == otoff - - if base_compare: - return _cmp((self._hour, self._minute, self._second, - self._microsecond), - (other._hour, other._minute, other._second, - other._microsecond)) - if myoff is None or otoff is None: - if allow_mixed: - return 2 # arbitrary non-zero value - else: - raise TypeError("cannot compare naive and aware times") - myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1) - othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1) - return _cmp((myhhmm, self._second, self._microsecond), - (othhmm, other._second, other._microsecond)) - - def __hash__(self): - """Hash.""" - if self._hashcode == -1: - if self.fold: - t = self.replace(fold=0) - else: - t = self - tzoff = t.utcoffset() - if not tzoff: # zero or None - self._hashcode = hash(t._getstate()[0]) - else: - h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, - timedelta(hours=1)) - assert not m % timedelta(minutes=1), "whole minute" - m //= timedelta(minutes=1) - if 0 <= h < 24: - self._hashcode = hash(time(h, m, self.second, self.microsecond)) - else: - self._hashcode = hash((h, m, self.second, self.microsecond)) - return self._hashcode - - # Conversion to string - - def _tzstr(self): - """Return formatted timezone offset (+xx:xx) or an empty string.""" - off = self.utcoffset() - return _format_offset(off) - - def __repr__(self): - """Convert to formal string, for repr().""" - if self._microsecond != 0: - s = ", %d, %d" % (self._second, self._microsecond) - elif self._second != 0: - s = ", %d" % self._second - else: - s = "" - s= "%s.%s(%d, %d%s)" % (self.__class__.__module__, - self.__class__.__qualname__, - self._hour, self._minute, s) - if self._tzinfo is not None: - assert s[-1:] == ")" - s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" - if self._fold: - assert s[-1:] == ")" - s = s[:-1] + ", fold=1)" - return s - - def isoformat(self, timespec='auto'): - """Return the time formatted according to ISO. - - The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional - part is omitted if self.microsecond == 0. - - The optional argument timespec specifies the number of additional - terms of the time to include. Valid options are 'auto', 'hours', - 'minutes', 'seconds', 'milliseconds' and 'microseconds'. - """ - s = _format_time(self._hour, self._minute, self._second, - self._microsecond, timespec) - tz = self._tzstr() - if tz: - s += tz - return s - - __str__ = isoformat - - @classmethod - def fromisoformat(cls, time_string): - """Construct a time from a string in one of the ISO 8601 formats.""" - if not isinstance(time_string, str): - raise TypeError('fromisoformat: argument must be str') - - # The spec actually requires that time-only ISO 8601 strings start with - # T, but the extended format allows this to be omitted as long as there - # is no ambiguity with date strings. - time_string = time_string.removeprefix('T') - - try: - return cls(*_parse_isoformat_time(time_string)) - except Exception: - raise ValueError(f'Invalid isoformat string: {time_string!r}') - - - def strftime(self, fmt): - """Format using strftime(). The date part of the timestamp passed - to underlying strftime should not be used. - """ - # The year must be >= 1000 else Python's strftime implementation - # can raise a bogus exception. - timetuple = (1900, 1, 1, - self._hour, self._minute, self._second, - 0, 1, -1) - return _wrap_strftime(self, fmt, timetuple) - - def __format__(self, fmt): - if not isinstance(fmt, str): - raise TypeError("must be str, not %s" % type(fmt).__name__) - if len(fmt) != 0: - return self.strftime(fmt) - return str(self) - - # Timezone functions - - def utcoffset(self): - """Return the timezone offset as timedelta, positive east of UTC - (negative west of UTC).""" - if self._tzinfo is None: - return None - offset = self._tzinfo.utcoffset(None) - _check_utc_offset("utcoffset", offset) - return offset - - def tzname(self): - """Return the timezone name. - - Note that the name is 100% informational -- there's no requirement that - it mean anything in particular. For example, "GMT", "UTC", "-500", - "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. - """ - if self._tzinfo is None: - return None - name = self._tzinfo.tzname(None) - _check_tzname(name) - return name - - def dst(self): - """Return 0 if DST is not in effect, or the DST offset (as timedelta - positive eastward) if DST is in effect. - - This is purely informational; the DST offset has already been added to - the UTC offset returned by utcoffset() if applicable, so there's no - need to consult dst() unless you're interested in displaying the DST - info. - """ - if self._tzinfo is None: - return None - offset = self._tzinfo.dst(None) - _check_utc_offset("dst", offset) - return offset - - def replace(self, hour=None, minute=None, second=None, microsecond=None, - tzinfo=True, *, fold=None): - """Return a new time with new values for the specified fields.""" - if hour is None: - hour = self.hour - if minute is None: - minute = self.minute - if second is None: - second = self.second - if microsecond is None: - microsecond = self.microsecond - if tzinfo is True: - tzinfo = self.tzinfo - if fold is None: - fold = self._fold - return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold) - - # Pickle support. - - def _getstate(self, protocol=3): - us2, us3 = divmod(self._microsecond, 256) - us1, us2 = divmod(us2, 256) - h = self._hour - if self._fold and protocol > 3: - h += 128 - basestate = bytes([h, self._minute, self._second, - us1, us2, us3]) - if self._tzinfo is None: - return (basestate,) - else: - return (basestate, self._tzinfo) - - def __setstate(self, string, tzinfo): - if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): - raise TypeError("bad tzinfo state arg") - h, self._minute, self._second, us1, us2, us3 = string - if h > 127: - self._fold = 1 - self._hour = h - 128 - else: - self._fold = 0 - self._hour = h - self._microsecond = (((us1 << 8) | us2) << 8) | us3 - self._tzinfo = tzinfo - - def __reduce_ex__(self, protocol): - return (self.__class__, self._getstate(protocol)) - - def __reduce__(self): - return self.__reduce_ex__(2) - -_time_class = time # so functions w/ args named "time" can get at the class - -time.min = time(0, 0, 0) -time.max = time(23, 59, 59, 999999) -time.resolution = timedelta(microseconds=1) - - -class datetime(date): - """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) - - The year, month and day arguments are required. tzinfo may be None, or an - instance of a tzinfo subclass. The remaining arguments may be ints. - """ - __slots__ = date.__slots__ + time.__slots__ - - def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, - microsecond=0, tzinfo=None, *, fold=0): - if (isinstance(year, (bytes, str)) and len(year) == 10 and - 1 <= ord(year[2:3])&0x7F <= 12): - # Pickle support - if isinstance(year, str): - try: - year = bytes(year, 'latin1') - except UnicodeEncodeError: - # More informative error message. - raise ValueError( - "Failed to encode latin1 string when unpickling " - "a datetime object. " - "pickle.load(data, encoding='latin1') is assumed.") - self = object.__new__(cls) - self.__setstate(year, month) - self._hashcode = -1 - return self - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) - _check_tzinfo_arg(tzinfo) - self = object.__new__(cls) - self._year = year - self._month = month - self._day = day - self._hour = hour - self._minute = minute - self._second = second - self._microsecond = microsecond - self._tzinfo = tzinfo - self._hashcode = -1 - self._fold = fold - return self - - # Read-only field accessors - @property - def hour(self): - """hour (0-23)""" - return self._hour - - @property - def minute(self): - """minute (0-59)""" - return self._minute - - @property - def second(self): - """second (0-59)""" - return self._second - - @property - def microsecond(self): - """microsecond (0-999999)""" - return self._microsecond - - @property - def tzinfo(self): - """timezone info object""" - return self._tzinfo - - @property - def fold(self): - return self._fold - - @classmethod - def _fromtimestamp(cls, t, utc, tz): - """Construct a datetime from a POSIX timestamp (like time.time()). - - A timezone info object may be passed in as well. - """ - frac, t = _math.modf(t) - us = round(frac * 1e6) - if us >= 1000000: - t += 1 - us -= 1000000 - elif us < 0: - t -= 1 - us += 1000000 - - converter = _time.gmtime if utc else _time.localtime - y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) - if tz is None and not utc: - # As of version 2015f max fold in IANA database is - # 23 hours at 1969-09-30 13:00:00 in Kwajalein. - # Let's probe 24 hours in the past to detect a transition: - max_fold_seconds = 24 * 3600 - - # On Windows localtime_s throws an OSError for negative values, - # thus we can't perform fold detection for values of time less - # than the max time fold. See comments in _datetimemodule's - # version of this method for more details. - if t < max_fold_seconds and sys.platform.startswith("win"): - return result - - y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6] - probe1 = cls(y, m, d, hh, mm, ss, us, tz) - trans = result - probe1 - timedelta(0, max_fold_seconds) - if trans.days < 0: - y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6] - probe2 = cls(y, m, d, hh, mm, ss, us, tz) - if probe2 == result: - result._fold = 1 - elif tz is not None: - result = tz.fromutc(result) - return result - - @classmethod - def fromtimestamp(cls, t, tz=None): - """Construct a datetime from a POSIX timestamp (like time.time()). - - A timezone info object may be passed in as well. - """ - _check_tzinfo_arg(tz) - - return cls._fromtimestamp(t, tz is not None, tz) - - @classmethod - def utcfromtimestamp(cls, t): - """Construct a naive UTC datetime from a POSIX timestamp.""" - return cls._fromtimestamp(t, True, None) - - @classmethod - def now(cls, tz=None): - "Construct a datetime from time.time() and optional time zone info." - t = _time.time() - return cls.fromtimestamp(t, tz) - - @classmethod - def utcnow(cls): - "Construct a UTC datetime from time.time()." - t = _time.time() - return cls.utcfromtimestamp(t) - - @classmethod - def combine(cls, date, time, tzinfo=True): - "Construct a datetime from a given date and a given time." - if not isinstance(date, _date_class): - raise TypeError("date argument must be a date instance") - if not isinstance(time, _time_class): - raise TypeError("time argument must be a time instance") - if tzinfo is True: - tzinfo = time.tzinfo - return cls(date.year, date.month, date.day, - time.hour, time.minute, time.second, time.microsecond, - tzinfo, fold=time.fold) - - @classmethod - def fromisoformat(cls, date_string): - """Construct a datetime from a string in one of the ISO 8601 formats.""" - if not isinstance(date_string, str): - raise TypeError('fromisoformat: argument must be str') - - if len(date_string) < 7: - raise ValueError(f'Invalid isoformat string: {date_string!r}') - - # Split this at the separator - try: - separator_location = _find_isoformat_datetime_separator(date_string) - dstr = date_string[0:separator_location] - tstr = date_string[(separator_location+1):] - - date_components = _parse_isoformat_date(dstr) - except ValueError: - raise ValueError( - f'Invalid isoformat string: {date_string!r}') from None - - if tstr: - try: - time_components = _parse_isoformat_time(tstr) - except ValueError: - raise ValueError( - f'Invalid isoformat string: {date_string!r}') from None - else: - time_components = [0, 0, 0, 0, None] - - return cls(*(date_components + time_components)) - - def timetuple(self): - "Return local time tuple compatible with time.localtime()." - dst = self.dst() - if dst is None: - dst = -1 - elif dst: - dst = 1 - else: - dst = 0 - return _build_struct_time(self.year, self.month, self.day, - self.hour, self.minute, self.second, - dst) - - def _mktime(self): - """Return integer POSIX timestamp.""" - epoch = datetime(1970, 1, 1) - max_fold_seconds = 24 * 3600 - t = (self - epoch) // timedelta(0, 1) - def local(u): - y, m, d, hh, mm, ss = _time.localtime(u)[:6] - return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1) - - # Our goal is to solve t = local(u) for u. - a = local(t) - t - u1 = t - a - t1 = local(u1) - if t1 == t: - # We found one solution, but it may not be the one we need. - # Look for an earlier solution (if `fold` is 0), or a - # later one (if `fold` is 1). - u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold] - b = local(u2) - u2 - if a == b: - return u1 - else: - b = t1 - u1 - assert a != b - u2 = t - b - t2 = local(u2) - if t2 == t: - return u2 - if t1 == t: - return u1 - # We have found both offsets a and b, but neither t - a nor t - b is - # a solution. This means t is in the gap. - return (max, min)[self.fold](u1, u2) - - - def timestamp(self): - "Return POSIX timestamp as float" - if self._tzinfo is None: - s = self._mktime() - return s + self.microsecond / 1e6 - else: - return (self - _EPOCH).total_seconds() - - def utctimetuple(self): - "Return UTC time tuple compatible with time.gmtime()." - offset = self.utcoffset() - if offset: - self -= offset - y, m, d = self.year, self.month, self.day - hh, mm, ss = self.hour, self.minute, self.second - return _build_struct_time(y, m, d, hh, mm, ss, 0) - - def date(self): - "Return the date part." - return date(self._year, self._month, self._day) - - def time(self): - "Return the time part, with tzinfo None." - return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold) - - def timetz(self): - "Return the time part, with same tzinfo." - return time(self.hour, self.minute, self.second, self.microsecond, - self._tzinfo, fold=self.fold) - - def replace(self, year=None, month=None, day=None, hour=None, - minute=None, second=None, microsecond=None, tzinfo=True, - *, fold=None): - """Return a new datetime with new values for the specified fields.""" - if year is None: - year = self.year - if month is None: - month = self.month - if day is None: - day = self.day - if hour is None: - hour = self.hour - if minute is None: - minute = self.minute - if second is None: - second = self.second - if microsecond is None: - microsecond = self.microsecond - if tzinfo is True: - tzinfo = self.tzinfo - if fold is None: - fold = self.fold - return type(self)(year, month, day, hour, minute, second, - microsecond, tzinfo, fold=fold) - - def _local_timezone(self): - if self.tzinfo is None: - ts = self._mktime() - else: - ts = (self - _EPOCH) // timedelta(seconds=1) - localtm = _time.localtime(ts) - local = datetime(*localtm[:6]) - # Extract TZ data - gmtoff = localtm.tm_gmtoff - zone = localtm.tm_zone - return timezone(timedelta(seconds=gmtoff), zone) - - def astimezone(self, tz=None): - if tz is None: - tz = self._local_timezone() - elif not isinstance(tz, tzinfo): - raise TypeError("tz argument must be an instance of tzinfo") - - mytz = self.tzinfo - if mytz is None: - mytz = self._local_timezone() - myoffset = mytz.utcoffset(self) - else: - myoffset = mytz.utcoffset(self) - if myoffset is None: - mytz = self.replace(tzinfo=None)._local_timezone() - myoffset = mytz.utcoffset(self) - - if tz is mytz: - return self - - # Convert self to UTC, and attach the new time zone object. - utc = (self - myoffset).replace(tzinfo=tz) - - # Convert from UTC to tz's local time. - return tz.fromutc(utc) - - # Ways to produce a string. - - def ctime(self): - "Return ctime() style string." - weekday = self.toordinal() % 7 or 7 - return "%s %s %2d %02d:%02d:%02d %04d" % ( - _DAYNAMES[weekday], - _MONTHNAMES[self._month], - self._day, - self._hour, self._minute, self._second, - self._year) - - def isoformat(self, sep='T', timespec='auto'): - """Return the time formatted according to ISO. - - The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. - By default, the fractional part is omitted if self.microsecond == 0. - - If self.tzinfo is not None, the UTC offset is also attached, giving - giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. - - Optional argument sep specifies the separator between date and - time, default 'T'. - - The optional argument timespec specifies the number of additional - terms of the time to include. Valid options are 'auto', 'hours', - 'minutes', 'seconds', 'milliseconds' and 'microseconds'. - """ - s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + - _format_time(self._hour, self._minute, self._second, - self._microsecond, timespec)) - - off = self.utcoffset() - tz = _format_offset(off) - if tz: - s += tz - - return s - - def __repr__(self): - """Convert to formal string, for repr().""" - L = [self._year, self._month, self._day, # These are never zero - self._hour, self._minute, self._second, self._microsecond] - if L[-1] == 0: - del L[-1] - if L[-1] == 0: - del L[-1] - s = "%s.%s(%s)" % (self.__class__.__module__, - self.__class__.__qualname__, - ", ".join(map(str, L))) - if self._tzinfo is not None: - assert s[-1:] == ")" - s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" - if self._fold: - assert s[-1:] == ")" - s = s[:-1] + ", fold=1)" - return s - - def __str__(self): - "Convert to string, for str()." - return self.isoformat(sep=' ') - - @classmethod - def strptime(cls, date_string, format): - 'string, format -> new datetime parsed from a string (like time.strptime()).' - import _strptime - return _strptime._strptime_datetime(cls, date_string, format) - - def utcoffset(self): - """Return the timezone offset as timedelta positive east of UTC (negative west of - UTC).""" - if self._tzinfo is None: - return None - offset = self._tzinfo.utcoffset(self) - _check_utc_offset("utcoffset", offset) - return offset - - def tzname(self): - """Return the timezone name. - - Note that the name is 100% informational -- there's no requirement that - it mean anything in particular. For example, "GMT", "UTC", "-500", - "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. - """ - if self._tzinfo is None: - return None - name = self._tzinfo.tzname(self) - _check_tzname(name) - return name - - def dst(self): - """Return 0 if DST is not in effect, or the DST offset (as timedelta - positive eastward) if DST is in effect. - - This is purely informational; the DST offset has already been added to - the UTC offset returned by utcoffset() if applicable, so there's no - need to consult dst() unless you're interested in displaying the DST - info. - """ - if self._tzinfo is None: - return None - offset = self._tzinfo.dst(self) - _check_utc_offset("dst", offset) - return offset - - # Comparisons of datetime objects with other. - - def __eq__(self, other): - if isinstance(other, datetime): - return self._cmp(other, allow_mixed=True) == 0 - elif not isinstance(other, date): - return NotImplemented - else: - return False - - def __le__(self, other): - if isinstance(other, datetime): - return self._cmp(other) <= 0 - elif not isinstance(other, date): - return NotImplemented - else: - _cmperror(self, other) - - def __lt__(self, other): - if isinstance(other, datetime): - return self._cmp(other) < 0 - elif not isinstance(other, date): - return NotImplemented - else: - _cmperror(self, other) - - def __ge__(self, other): - if isinstance(other, datetime): - return self._cmp(other) >= 0 - elif not isinstance(other, date): - return NotImplemented - else: - _cmperror(self, other) - - def __gt__(self, other): - if isinstance(other, datetime): - return self._cmp(other) > 0 - elif not isinstance(other, date): - return NotImplemented - else: - _cmperror(self, other) - - def _cmp(self, other, allow_mixed=False): - assert isinstance(other, datetime) - mytz = self._tzinfo - ottz = other._tzinfo - myoff = otoff = None - - if mytz is ottz: - base_compare = True - else: - myoff = self.utcoffset() - otoff = other.utcoffset() - # Assume that allow_mixed means that we are called from __eq__ - if allow_mixed: - if myoff != self.replace(fold=not self.fold).utcoffset(): - return 2 - if otoff != other.replace(fold=not other.fold).utcoffset(): - return 2 - base_compare = myoff == otoff - - if base_compare: - return _cmp((self._year, self._month, self._day, - self._hour, self._minute, self._second, - self._microsecond), - (other._year, other._month, other._day, - other._hour, other._minute, other._second, - other._microsecond)) - if myoff is None or otoff is None: - if allow_mixed: - return 2 # arbitrary non-zero value - else: - raise TypeError("cannot compare naive and aware datetimes") - # XXX What follows could be done more efficiently... - diff = self - other # this will take offsets into account - if diff.days < 0: - return -1 - return diff and 1 or 0 - - def __add__(self, other): - "Add a datetime and a timedelta." - if not isinstance(other, timedelta): - return NotImplemented - delta = timedelta(self.toordinal(), - hours=self._hour, - minutes=self._minute, - seconds=self._second, - microseconds=self._microsecond) - delta += other - hour, rem = divmod(delta.seconds, 3600) - minute, second = divmod(rem, 60) - if 0 < delta.days <= _MAXORDINAL: - return type(self).combine(date.fromordinal(delta.days), - time(hour, minute, second, - delta.microseconds, - tzinfo=self._tzinfo)) - raise OverflowError("result out of range") - - __radd__ = __add__ - - def __sub__(self, other): - "Subtract two datetimes, or a datetime and a timedelta." - if not isinstance(other, datetime): - if isinstance(other, timedelta): - return self + -other - return NotImplemented - - days1 = self.toordinal() - days2 = other.toordinal() - secs1 = self._second + self._minute * 60 + self._hour * 3600 - secs2 = other._second + other._minute * 60 + other._hour * 3600 - base = timedelta(days1 - days2, - secs1 - secs2, - self._microsecond - other._microsecond) - if self._tzinfo is other._tzinfo: - return base - myoff = self.utcoffset() - otoff = other.utcoffset() - if myoff == otoff: - return base - if myoff is None or otoff is None: - raise TypeError("cannot mix naive and timezone-aware time") - return base + otoff - myoff - - def __hash__(self): - if self._hashcode == -1: - if self.fold: - t = self.replace(fold=0) - else: - t = self - tzoff = t.utcoffset() - if tzoff is None: - self._hashcode = hash(t._getstate()[0]) - else: - days = _ymd2ord(self.year, self.month, self.day) - seconds = self.hour * 3600 + self.minute * 60 + self.second - self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) - return self._hashcode - - # Pickle support. - - def _getstate(self, protocol=3): - yhi, ylo = divmod(self._year, 256) - us2, us3 = divmod(self._microsecond, 256) - us1, us2 = divmod(us2, 256) - m = self._month - if self._fold and protocol > 3: - m += 128 - basestate = bytes([yhi, ylo, m, self._day, - self._hour, self._minute, self._second, - us1, us2, us3]) - if self._tzinfo is None: - return (basestate,) - else: - return (basestate, self._tzinfo) - - def __setstate(self, string, tzinfo): - if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): - raise TypeError("bad tzinfo state arg") - (yhi, ylo, m, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = string - if m > 127: - self._fold = 1 - self._month = m - 128 - else: - self._fold = 0 - self._month = m - self._year = yhi * 256 + ylo - self._microsecond = (((us1 << 8) | us2) << 8) | us3 - self._tzinfo = tzinfo - - def __reduce_ex__(self, protocol): - return (self.__class__, self._getstate(protocol)) - - def __reduce__(self): - return self.__reduce_ex__(2) - - -datetime.min = datetime(1, 1, 1) -datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999) -datetime.resolution = timedelta(microseconds=1) - - -def _isoweek1monday(year): - # Helper to calculate the day number of the Monday starting week 1 - # XXX This could be done more efficiently - THURSDAY = 3 - firstday = _ymd2ord(year, 1, 1) - firstweekday = (firstday + 6) % 7 # See weekday() above - week1monday = firstday - firstweekday - if firstweekday > THURSDAY: - week1monday += 7 - return week1monday - - -class timezone(tzinfo): - __slots__ = '_offset', '_name' - - # Sentinel value to disallow None - _Omitted = object() - def __new__(cls, offset, name=_Omitted): - if not isinstance(offset, timedelta): - raise TypeError("offset must be a timedelta") - if name is cls._Omitted: - if not offset: - return cls.utc - name = None - elif not isinstance(name, str): - raise TypeError("name must be a string") - if not cls._minoffset <= offset <= cls._maxoffset: - raise ValueError("offset must be a timedelta " - "strictly between -timedelta(hours=24) and " - "timedelta(hours=24).") - return cls._create(offset, name) - - @classmethod - def _create(cls, offset, name=None): - self = tzinfo.__new__(cls) - self._offset = offset - self._name = name - return self - - def __getinitargs__(self): - """pickle support""" - if self._name is None: - return (self._offset,) - return (self._offset, self._name) - - def __eq__(self, other): - if isinstance(other, timezone): - return self._offset == other._offset - return NotImplemented - - def __hash__(self): - return hash(self._offset) - - def __repr__(self): - """Convert to formal string, for repr(). - - >>> tz = timezone.utc - >>> repr(tz) - 'datetime.timezone.utc' - >>> tz = timezone(timedelta(hours=-5), 'EST') - >>> repr(tz) - "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" - """ - if self is self.utc: - return 'datetime.timezone.utc' - if self._name is None: - return "%s.%s(%r)" % (self.__class__.__module__, - self.__class__.__qualname__, - self._offset) - return "%s.%s(%r, %r)" % (self.__class__.__module__, - self.__class__.__qualname__, - self._offset, self._name) - - def __str__(self): - return self.tzname(None) - - def utcoffset(self, dt): - if isinstance(dt, datetime) or dt is None: - return self._offset - raise TypeError("utcoffset() argument must be a datetime instance" - " or None") - - def tzname(self, dt): - if isinstance(dt, datetime) or dt is None: - if self._name is None: - return self._name_from_offset(self._offset) - return self._name - raise TypeError("tzname() argument must be a datetime instance" - " or None") - - def dst(self, dt): - if isinstance(dt, datetime) or dt is None: - return None - raise TypeError("dst() argument must be a datetime instance" - " or None") - - def fromutc(self, dt): - if isinstance(dt, datetime): - if dt.tzinfo is not self: - raise ValueError("fromutc: dt.tzinfo " - "is not self") - return dt + self._offset - raise TypeError("fromutc() argument must be a datetime instance" - " or None") - - _maxoffset = timedelta(hours=24, microseconds=-1) - _minoffset = -_maxoffset - - @staticmethod - def _name_from_offset(delta): - if not delta: - return 'UTC' - if delta < timedelta(0): - sign = '-' - delta = -delta - else: - sign = '+' - hours, rest = divmod(delta, timedelta(hours=1)) - minutes, rest = divmod(rest, timedelta(minutes=1)) - seconds = rest.seconds - microseconds = rest.microseconds - if microseconds: - return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' - f'.{microseconds:06d}') - if seconds: - return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' - return f'UTC{sign}{hours:02d}:{minutes:02d}' - -UTC = timezone.utc = timezone._create(timedelta(0)) - -# bpo-37642: These attributes are rounded to the nearest minute for backwards -# compatibility, even though the constructor will accept a wider range of -# values. This may change in the future. -timezone.min = timezone._create(-timedelta(hours=23, minutes=59)) -timezone.max = timezone._create(timedelta(hours=23, minutes=59)) -_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) - -# Some time zone algebra. For a datetime x, let -# x.n = x stripped of its timezone -- its naive time. -# x.o = x.utcoffset(), and assuming that doesn't raise an exception or -# return None -# x.d = x.dst(), and assuming that doesn't raise an exception or -# return None -# x.s = x's standard offset, x.o - x.d -# -# Now some derived rules, where k is a duration (timedelta). -# -# 1. x.o = x.s + x.d -# This follows from the definition of x.s. -# -# 2. If x and y have the same tzinfo member, x.s = y.s. -# This is actually a requirement, an assumption we need to make about -# sane tzinfo classes. -# -# 3. The naive UTC time corresponding to x is x.n - x.o. -# This is again a requirement for a sane tzinfo class. -# -# 4. (x+k).s = x.s -# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo. -# -# 5. (x+k).n = x.n + k -# Again follows from how arithmetic is defined. -# -# Now we can explain tz.fromutc(x). Let's assume it's an interesting case -# (meaning that the various tzinfo methods exist, and don't blow up or return -# None when called). -# -# The function wants to return a datetime y with timezone tz, equivalent to x. -# x is already in UTC. -# -# By #3, we want -# -# y.n - y.o = x.n [1] -# -# The algorithm starts by attaching tz to x.n, and calling that y. So -# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1] -# becomes true; in effect, we want to solve [2] for k: -# -# (y+k).n - (y+k).o = x.n [2] -# -# By #1, this is the same as -# -# (y+k).n - ((y+k).s + (y+k).d) = x.n [3] -# -# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start. -# Substituting that into [3], -# -# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving -# k - (y+k).s - (y+k).d = 0; rearranging, -# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so -# k = y.s - (y+k).d -# -# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we -# approximate k by ignoring the (y+k).d term at first. Note that k can't be -# very large, since all offset-returning methods return a duration of magnitude -# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must -# be 0, so ignoring it has no consequence then. -# -# In any case, the new value is -# -# z = y + y.s [4] -# -# It's helpful to step back at look at [4] from a higher level: it's simply -# mapping from UTC to tz's standard time. -# -# At this point, if -# -# z.n - z.o = x.n [5] -# -# we have an equivalent time, and are almost done. The insecurity here is -# at the start of daylight time. Picture US Eastern for concreteness. The wall -# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good -# sense then. The docs ask that an Eastern tzinfo class consider such a time to -# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST -# on the day DST starts. We want to return the 1:MM EST spelling because that's -# the only spelling that makes sense on the local wall clock. -# -# In fact, if [5] holds at this point, we do have the standard-time spelling, -# but that takes a bit of proof. We first prove a stronger result. What's the -# difference between the LHS and RHS of [5]? Let -# -# diff = x.n - (z.n - z.o) [6] -# -# Now -# z.n = by [4] -# (y + y.s).n = by #5 -# y.n + y.s = since y.n = x.n -# x.n + y.s = since z and y are have the same tzinfo member, -# y.s = z.s by #2 -# x.n + z.s -# -# Plugging that back into [6] gives -# -# diff = -# x.n - ((x.n + z.s) - z.o) = expanding -# x.n - x.n - z.s + z.o = cancelling -# - z.s + z.o = by #2 -# z.d -# -# So diff = z.d. -# -# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time -# spelling we wanted in the endcase described above. We're done. Contrarily, -# if z.d = 0, then we have a UTC equivalent, and are also done. -# -# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to -# add to z (in effect, z is in tz's standard time, and we need to shift the -# local clock into tz's daylight time). -# -# Let -# -# z' = z + z.d = z + diff [7] -# -# and we can again ask whether -# -# z'.n - z'.o = x.n [8] -# -# If so, we're done. If not, the tzinfo class is insane, according to the -# assumptions we've made. This also requires a bit of proof. As before, let's -# compute the difference between the LHS and RHS of [8] (and skipping some of -# the justifications for the kinds of substitutions we've done several times -# already): -# -# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7] -# x.n - (z.n + diff - z'.o) = replacing diff via [6] -# x.n - (z.n + x.n - (z.n - z.o) - z'.o) = -# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n -# - z.n + z.n - z.o + z'.o = cancel z.n -# - z.o + z'.o = #1 twice -# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo -# z'.d - z.d -# -# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal, -# we've found the UTC-equivalent so are done. In fact, we stop with [7] and -# return z', not bothering to compute z'.d. -# -# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by -# a dst() offset, and starting *from* a time already in DST (we know z.d != 0), -# would have to change the result dst() returns: we start in DST, and moving -# a little further into it takes us out of DST. -# -# There isn't a sane case where this can happen. The closest it gets is at -# the end of DST, where there's an hour in UTC with no spelling in a hybrid -# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During -# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM -# UTC) because the docs insist on that, but 0:MM is taken as being in daylight -# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local -# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in -# standard time. Since that's what the local clock *does*, we want to map both -# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous -# in local time, but so it goes -- it's the way the local clock works. -# -# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0, -# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going. -# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8] -# (correctly) concludes that z' is not UTC-equivalent to x. -# -# Because we know z.d said z was in daylight time (else [5] would have held and -# we would have stopped then), and we know z.d != z'.d (else [8] would have held -# and we have stopped then), and there are only 2 possible values dst() can -# return in Eastern, it follows that z'.d must be 0 (which it is in the example, -# but the reasoning doesn't depend on the example -- it depends on there being -# two possible dst() outcomes, one zero and the other non-zero). Therefore -# z' must be in standard time, and is the spelling we want in this case. -# -# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is -# concerned (because it takes z' as being in standard time rather than the -# daylight time we intend here), but returning it gives the real-life "local -# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into -# tz. -# -# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with -# the 1:MM standard time spelling we want. -# -# So how can this break? One of the assumptions must be violated. Two -# possibilities: -# -# 1) [2] effectively says that y.s is invariant across all y belong to a given -# time zone. This isn't true if, for political reasons or continental drift, -# a region decides to change its base offset from UTC. -# -# 2) There may be versions of "double daylight" time where the tail end of -# the analysis gives up a step too early. I haven't thought about that -# enough to say. -# -# In any case, it's clear that the default fromutc() is strong enough to handle -# "almost all" time zones: so long as the standard offset is invariant, it -# doesn't matter if daylight time transition points change from year to year, or -# if daylight time is skipped in some years; it doesn't matter how large or -# small dst() may get within its bounds; and it doesn't even matter if some -# perverse time zone returns a negative dst()). So a breaking case must be -# pretty bizarre, and a tzinfo subclass can override fromutc() if it is. - try: from _datetime import * -except ImportError: - pass -else: - # Clean up unused names - del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y, - _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time, - _check_date_fields, _check_time_fields, - _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror, - _date_class, _days_before_month, _days_before_year, _days_in_month, - _format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math, - _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord, - _divide_and_round, _parse_isoformat_date, _parse_isoformat_time, - _parse_hh_mm_ss_ff, _IsoCalendarDate, _isoweek_to_gregorian, - _find_isoformat_datetime_separator, _FRACTION_CORRECTION, - _is_ascii_digit) - # XXX Since import * above excludes names that start with _, - # docstring does not get overwritten. In the future, it may be - # appropriate to maintain a single module level docstring and - # remove the following line. from _datetime import __doc__ +except ImportError: + from _pydatetime import * + from _pydatetime import __doc__ + +__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo", + "MINYEAR", "MAXYEAR", "UTC") diff --git a/Lib/dis.py b/Lib/dis.py index a045d18241b4652..e08e9a940576893 100644 --- a/Lib/dis.py +++ b/Lib/dis.py @@ -11,8 +11,10 @@ _cache_format, _inline_cache_entries, _nb_ops, + _intrinsic_1_descs, + _intrinsic_2_descs, _specializations, - _specialized_instructions, + _specialized_opmap, ) __all__ = ["code_info", "dis", "disassemble", "distb", "disco", @@ -23,32 +25,36 @@ _have_code = (types.MethodType, types.FunctionType, types.CodeType, classmethod, staticmethod, type) -FORMAT_VALUE = opmap['FORMAT_VALUE'] -FORMAT_VALUE_CONVERTERS = ( - (None, ''), - (str, 'str'), - (repr, 'repr'), - (ascii, 'ascii'), -) -MAKE_FUNCTION = opmap['MAKE_FUNCTION'] -MAKE_FUNCTION_FLAGS = ('defaults', 'kwdefaults', 'annotations', 'closure') +CONVERT_VALUE = opmap['CONVERT_VALUE'] + +SET_FUNCTION_ATTRIBUTE = opmap['SET_FUNCTION_ATTRIBUTE'] +FUNCTION_ATTR_FLAGS = ('defaults', 'kwdefaults', 'annotations', 'closure') +ENTER_EXECUTOR = opmap['ENTER_EXECUTOR'] LOAD_CONST = opmap['LOAD_CONST'] +RETURN_CONST = opmap['RETURN_CONST'] LOAD_GLOBAL = opmap['LOAD_GLOBAL'] BINARY_OP = opmap['BINARY_OP'] JUMP_BACKWARD = opmap['JUMP_BACKWARD'] FOR_ITER = opmap['FOR_ITER'] +SEND = opmap['SEND'] LOAD_ATTR = opmap['LOAD_ATTR'] +LOAD_SUPER_ATTR = opmap['LOAD_SUPER_ATTR'] +CALL_INTRINSIC_1 = opmap['CALL_INTRINSIC_1'] +CALL_INTRINSIC_2 = opmap['CALL_INTRINSIC_2'] +LOAD_FAST_LOAD_FAST = opmap['LOAD_FAST_LOAD_FAST'] +STORE_FAST_LOAD_FAST = opmap['STORE_FAST_LOAD_FAST'] +STORE_FAST_STORE_FAST = opmap['STORE_FAST_STORE_FAST'] CACHE = opmap["CACHE"] _all_opname = list(opname) _all_opmap = dict(opmap) -_empty_slot = [slot for slot, name in enumerate(_all_opname) if name.startswith("<")] -for spec_op, specialized in zip(_empty_slot, _specialized_instructions): +for name, op in _specialized_opmap.items(): # fill opname and opmap - _all_opname[spec_op] = specialized - _all_opmap[specialized] = spec_op + assert op < len(_all_opname) + _all_opname[op] = name + _all_opmap[name] = op deoptmap = { specialized: base for base, family in _specializations.items() for specialized in family @@ -62,12 +68,13 @@ def _try_compile(source, name): expect code objects """ try: - c = compile(source, name, 'eval') + return compile(source, name, 'eval') except SyntaxError: - c = compile(source, name, 'exec') - return c + pass + return compile(source, name, 'exec') -def dis(x=None, *, file=None, depth=None, show_caches=False, adaptive=False): +def dis(x=None, *, file=None, depth=None, show_caches=False, adaptive=False, + show_offsets=False): """Disassemble classes, methods, functions, and other compiled objects. With no argument, disassemble the last traceback. @@ -77,7 +84,8 @@ def dis(x=None, *, file=None, depth=None, show_caches=False, adaptive=False): in a special attribute. """ if x is None: - distb(file=file, show_caches=show_caches, adaptive=adaptive) + distb(file=file, show_caches=show_caches, adaptive=adaptive, + show_offsets=show_offsets) return # Extract functions from methods. if hasattr(x, '__func__'): @@ -98,29 +106,32 @@ def dis(x=None, *, file=None, depth=None, show_caches=False, adaptive=False): if isinstance(x1, _have_code): print("Disassembly of %s:" % name, file=file) try: - dis(x1, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive) + dis(x1, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive, show_offsets=show_offsets) except TypeError as msg: print("Sorry:", msg, file=file) print(file=file) elif hasattr(x, 'co_code'): # Code object - _disassemble_recursive(x, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive) + _disassemble_recursive(x, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive, show_offsets=show_offsets) elif isinstance(x, (bytes, bytearray)): # Raw bytecode - _disassemble_bytes(x, file=file, show_caches=show_caches) + _disassemble_bytes(x, file=file, show_caches=show_caches, show_offsets=show_offsets) elif isinstance(x, str): # Source code - _disassemble_str(x, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive) + _disassemble_str(x, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive, show_offsets=show_offsets) else: raise TypeError("don't know how to disassemble %s objects" % type(x).__name__) -def distb(tb=None, *, file=None, show_caches=False, adaptive=False): +def distb(tb=None, *, file=None, show_caches=False, adaptive=False, show_offsets=False): """Disassemble a traceback (default: last traceback).""" if tb is None: try: - tb = sys.last_traceback + if hasattr(sys, 'last_exc'): + tb = sys.last_exc.__traceback__ + else: + tb = sys.last_traceback except AttributeError: raise RuntimeError("no last traceback to disassemble") from None while tb.tb_next: tb = tb.tb_next - disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file, show_caches=show_caches, adaptive=adaptive) + disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file, show_caches=show_caches, adaptive=adaptive, show_offsets=show_offsets) # The inspect module interrogates this dictionary to build its # list of CO_* constants. It is also used by pretty_flags to @@ -252,11 +263,13 @@ def show_code(co, *, file=None): 'argval', 'argrepr', 'offset', + 'start_offset', 'starts_line', - 'is_jump_target', + 'line_number', + 'label', 'positions' ], - defaults=[None] + defaults=[None, None] ) _Instruction.opname.__doc__ = "Human readable name for operation" @@ -265,18 +278,47 @@ def show_code(co, *, file=None): _Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg" _Instruction.argrepr.__doc__ = "Human readable description of operation argument" _Instruction.offset.__doc__ = "Start index of operation within bytecode sequence" -_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None" -_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False" +_Instruction.start_offset.__doc__ = ( + "Start index of operation within bytecode sequence, including extended args if present; " + "otherwise equal to Instruction.offset" +) +_Instruction.starts_line.__doc__ = "True if this opcode starts a source line, otherwise False" +_Instruction.line_number.__doc__ = "source line number associated with this opcode (if any), otherwise None" +_Instruction.label.__doc__ = "A label (int > 0) if this instruction is a jump target, otherwise None" _Instruction.positions.__doc__ = "dis.Positions object holding the span of source code covered by this instruction" -_ExceptionTableEntry = collections.namedtuple("_ExceptionTableEntry", +_ExceptionTableEntryBase = collections.namedtuple("_ExceptionTableEntryBase", "start end target depth lasti") +class _ExceptionTableEntry(_ExceptionTableEntryBase): + pass + _OPNAME_WIDTH = 20 _OPARG_WIDTH = 5 +def _get_cache_size(opname): + return _inline_cache_entries.get(opname, 0) + +def _get_jump_target(op, arg, offset): + """Gets the bytecode offset of the jump target if this is a jump instruction. + + Otherwise return None. + """ + deop = _deoptop(op) + caches = _get_cache_size(_all_opname[deop]) + if deop in hasjrel: + if _is_backward_jump(deop): + arg = -arg + target = offset + 2 + arg*2 + target += 2 * caches + elif deop in hasjabs: + target = arg*2 + else: + target = None + return target + class Instruction(_Instruction): - """Details for a bytecode operation + """Details for a bytecode operation. Defined fields: opname - human readable name for operation @@ -285,14 +327,149 @@ class Instruction(_Instruction): argval - resolved arg value (if known), otherwise same as arg argrepr - human readable description of operation argument offset - start index of operation within bytecode sequence - starts_line - line started by this opcode (if any), otherwise None - is_jump_target - True if other code jumps to here, otherwise False + start_offset - start index of operation within bytecode sequence including extended args if present; + otherwise equal to Instruction.offset + starts_line - True if this opcode starts a source line, otherwise False + line_number - source line number associated with this opcode (if any), otherwise None + label - A label if this instruction is a jump target, otherwise None positions - Optional dis.Positions object holding the span of source code covered by this instruction """ - def _disassemble(self, lineno_width=3, mark_as_current=False, offset_width=4): - """Format instruction details for inclusion in disassembly output + @staticmethod + def _get_argval_argrepr(op, arg, offset, co_consts, names, varname_from_oparg, + labels_map): + get_name = None if names is None else names.__getitem__ + argval = None + argrepr = '' + deop = _deoptop(op) + if arg is not None: + # Set argval to the dereferenced value of the argument when + # available, and argrepr to the string representation of argval. + # _disassemble_bytes needs the string repr of the + # raw name index for LOAD_GLOBAL, LOAD_CONST, etc. + argval = arg + if deop in hasconst: + argval, argrepr = _get_const_info(deop, arg, co_consts) + elif deop in hasname: + if deop == LOAD_GLOBAL: + argval, argrepr = _get_name_info(arg//2, get_name) + if (arg & 1) and argrepr: + argrepr = f"{argrepr} + NULL" + elif deop == LOAD_ATTR: + argval, argrepr = _get_name_info(arg//2, get_name) + if (arg & 1) and argrepr: + argrepr = f"{argrepr} + NULL|self" + elif deop == LOAD_SUPER_ATTR: + argval, argrepr = _get_name_info(arg//4, get_name) + if (arg & 1) and argrepr: + argrepr = f"{argrepr} + NULL|self" + else: + argval, argrepr = _get_name_info(arg, get_name) + elif deop in hasjabs: + argval = arg*2 + argrepr = f"to L{labels_map[argval]}" + elif deop in hasjrel: + signed_arg = -arg if _is_backward_jump(deop) else arg + argval = offset + 2 + signed_arg*2 + caches = _get_cache_size(_all_opname[deop]) + argval += 2 * caches + if deop == ENTER_EXECUTOR: + argval += 2 + argrepr = f"to L{labels_map[argval]}" + elif deop in (LOAD_FAST_LOAD_FAST, STORE_FAST_LOAD_FAST, STORE_FAST_STORE_FAST): + arg1 = arg >> 4 + arg2 = arg & 15 + val1, argrepr1 = _get_name_info(arg1, varname_from_oparg) + val2, argrepr2 = _get_name_info(arg2, varname_from_oparg) + argrepr = argrepr1 + ", " + argrepr2 + argval = val1, val2 + elif deop in haslocal or deop in hasfree: + argval, argrepr = _get_name_info(arg, varname_from_oparg) + elif deop in hascompare: + argval = cmp_op[arg >> 5] + argrepr = argval + if arg & 16: + argrepr = f"bool({argrepr})" + elif deop == CONVERT_VALUE: + argval = (None, str, repr, ascii)[arg] + argrepr = ('', 'str', 'repr', 'ascii')[arg] + elif deop == SET_FUNCTION_ATTRIBUTE: + argrepr = ', '.join(s for i, s in enumerate(FUNCTION_ATTR_FLAGS) + if arg & (1<' marker arrow as part of the line @@ -301,33 +478,44 @@ def _disassemble(self, lineno_width=3, mark_as_current=False, offset_width=4): fields = [] # Column: Source code line number if lineno_width: - if self.starts_line is not None: - lineno_fmt = "%%%dd" % lineno_width - fields.append(lineno_fmt % self.starts_line) + if self.starts_line: + lineno_fmt = "%%%dd" if self.line_number is not None else "%%%ds" + lineno_fmt = lineno_fmt % lineno_width + lineno = self.line_number if self.line_number is not None else '--' + fields.append(lineno_fmt % lineno) else: fields.append(' ' * lineno_width) + # Column: Label + if self.label is not None: + lbl = f"L{self.label}:" + fields.append(f"{lbl:>{self.label_width}}") + else: + fields.append(' ' * self.label_width) + # Column: Instruction offset from start of code sequence + if offset_width > 0: + fields.append(f"{repr(self.offset):>{offset_width}} ") # Column: Current instruction indicator if mark_as_current: fields.append('-->') else: fields.append(' ') - # Column: Jump target marker - if self.is_jump_target: - fields.append('>>') - else: - fields.append(' ') - # Column: Instruction offset from start of code sequence - fields.append(repr(self.offset).rjust(offset_width)) # Column: Opcode name fields.append(self.opname.ljust(_OPNAME_WIDTH)) # Column: Opcode argument if self.arg is not None: - fields.append(repr(self.arg).rjust(_OPARG_WIDTH)) + arg = repr(self.arg) + # If opname is longer than _OPNAME_WIDTH, we allow it to overflow into + # the space reserved for oparg. This results in fewer misaligned opargs + # in the disassembly output. + opname_excess = max(0, len(self.opname) - _OPNAME_WIDTH) + fields.append(repr(self.arg).rjust(_OPARG_WIDTH - opname_excess)) # Column: Opcode argument details if self.argrepr: fields.append('(' + self.argrepr + ')') return ' '.join(fields).rstrip() + def __str__(self): + return self._disassemble() def get_instructions(x, *, first_line=None, show_caches=False, adaptive=False): """Iterator for the opcodes in methods, functions or code @@ -351,7 +539,8 @@ def get_instructions(x, *, first_line=None, show_caches=False, adaptive=False): co.co_names, co.co_consts, linestarts, line_offset, co_positions=co.co_positions(), - show_caches=show_caches) + show_caches=show_caches, + original_code=co.co_code) def _get_const_value(op, arg, co_consts): """Helper to get the value of the const in a hasconst op. @@ -363,9 +552,8 @@ def _get_const_value(op, arg, co_consts): assert op in hasconst argval = UNKNOWN - if op == LOAD_CONST: - if co_consts is not None: - argval = co_consts[arg] + if co_consts is not None: + argval = co_consts[arg] return argval def _get_const_info(op, arg, co_consts): @@ -420,13 +608,15 @@ def _parse_exception_table(code): return entries def _is_backward_jump(op): - return 'JUMP_BACKWARD' in opname[op] + return opname[op] in ('JUMP_BACKWARD', + 'JUMP_BACKWARD_NO_INTERRUPT', + 'ENTER_EXECUTOR') def _get_instructions_bytes(code, varname_from_oparg=None, names=None, co_consts=None, linestarts=None, line_offset=0, exception_entries=(), co_positions=None, - show_caches=False): + show_caches=False, original_code=None): """Iterate over the instructions in a bytecode string. Generates a sequence of Instruction namedtuples giving the details of each @@ -435,72 +625,57 @@ def _get_instructions_bytes(code, varname_from_oparg=None, arguments. """ + # Use the basic, unadaptive code for finding labels and actually walking the + # bytecode, since replacements like ENTER_EXECUTOR and INSTRUMENTED_* can + # mess that logic up pretty badly: + original_code = original_code or code co_positions = co_positions or iter(()) get_name = None if names is None else names.__getitem__ - labels = set(findlabels(code)) - for start, end, target, _, _ in exception_entries: - for i in range(start, end): + + def make_labels_map(original_code, exception_entries): + jump_targets = set(findlabels(original_code)) + labels = set(jump_targets) + for start, end, target, _, _ in exception_entries: + labels.add(start) + labels.add(end) labels.add(target) - starts_line = None - for offset, op, arg in _unpack_opargs(code): + labels = sorted(labels) + labels_map = {offset: i+1 for (i, offset) in enumerate(sorted(labels))} + for e in exception_entries: + e.start_label = labels_map[e.start] + e.end_label = labels_map[e.end] + e.target_label = labels_map[e.target] + return labels_map + + labels_map = make_labels_map(original_code, exception_entries) + + exceptions_map = {} + for start, end, target, _, _ in exception_entries: + exceptions_map[start] = labels_map[target] + exceptions_map[end] = -1 + + starts_line = False + local_line_number = None + line_number = None + for offset, start_offset, op, arg in _unpack_opargs(original_code): if linestarts is not None: - starts_line = linestarts.get(offset, None) - if starts_line is not None: - starts_line += line_offset - is_jump_target = offset in labels - argval = None - argrepr = '' + starts_line = offset in linestarts + if starts_line: + local_line_number = linestarts[offset] + if local_line_number is not None: + line_number = local_line_number + line_offset + else: + line_number = None positions = Positions(*next(co_positions, ())) deop = _deoptop(op) - if arg is not None: - # Set argval to the dereferenced value of the argument when - # available, and argrepr to the string representation of argval. - # _disassemble_bytes needs the string repr of the - # raw name index for LOAD_GLOBAL, LOAD_CONST, etc. - argval = arg - if deop in hasconst: - argval, argrepr = _get_const_info(deop, arg, co_consts) - elif deop in hasname: - if deop == LOAD_GLOBAL: - argval, argrepr = _get_name_info(arg//2, get_name) - if (arg & 1) and argrepr: - argrepr = "NULL + " + argrepr - elif deop == LOAD_ATTR: - argval, argrepr = _get_name_info(arg//2, get_name) - if (arg & 1) and argrepr: - argrepr = "NULL|self + " + argrepr - else: - argval, argrepr = _get_name_info(arg, get_name) - elif deop in hasjabs: - argval = arg*2 - argrepr = "to " + repr(argval) - elif deop in hasjrel: - signed_arg = -arg if _is_backward_jump(deop) else arg - argval = offset + 2 + signed_arg*2 - if deop == FOR_ITER: - argval += 2 - argrepr = "to " + repr(argval) - elif deop in haslocal or deop in hasfree: - argval, argrepr = _get_name_info(arg, varname_from_oparg) - elif deop in hascompare: - argval = cmp_op[arg] - argrepr = argval - elif deop == FORMAT_VALUE: - argval, argrepr = FORMAT_VALUE_CONVERTERS[arg & 0x3] - argval = (argval, bool(arg & 0x4)) - if argval[1]: - if argrepr: - argrepr += ', ' - argrepr += 'with format' - elif deop == MAKE_FUNCTION: - argrepr = ', '.join(s for i, s in enumerate(MAKE_FUNCTION_FLAGS) - if arg & (1< 0: if depth is not None: depth = depth - 1 @@ -544,48 +720,70 @@ def _disassemble_recursive(co, *, file=None, depth=None, show_caches=False, adap print(file=file) print("Disassembly of %r:" % (x,), file=file) _disassemble_recursive( - x, file=file, depth=depth, show_caches=show_caches, adaptive=adaptive + x, file=file, depth=depth, show_caches=show_caches, + adaptive=adaptive, show_offsets=show_offsets ) def _disassemble_bytes(code, lasti=-1, varname_from_oparg=None, names=None, co_consts=None, linestarts=None, *, file=None, line_offset=0, exception_entries=(), - co_positions=None, show_caches=False): + co_positions=None, show_caches=False, original_code=None, + show_offsets=False): # Omit the line number column entirely if we have no line number info - show_lineno = bool(linestarts) + if bool(linestarts): + linestarts_ints = [line for line in linestarts.values() if line is not None] + show_lineno = len(linestarts_ints) > 0 + else: + show_lineno = False + if show_lineno: - maxlineno = max(linestarts.values()) + line_offset + maxlineno = max(linestarts_ints) + line_offset if maxlineno >= 1000: lineno_width = len(str(maxlineno)) else: lineno_width = 3 + + if lineno_width < len(str(None)) and None in linestarts.values(): + lineno_width = len(str(None)) else: lineno_width = 0 - maxoffset = len(code) - 2 - if maxoffset >= 10000: - offset_width = len(str(maxoffset)) + if show_offsets: + maxoffset = len(code) - 2 + if maxoffset >= 10000: + offset_width = len(str(maxoffset)) + else: + offset_width = 4 else: - offset_width = 4 + offset_width = 0 + for instr in _get_instructions_bytes(code, varname_from_oparg, names, co_consts, linestarts, line_offset=line_offset, exception_entries=exception_entries, co_positions=co_positions, - show_caches=show_caches): + show_caches=show_caches, + original_code=original_code): new_source_line = (show_lineno and - instr.starts_line is not None and + instr.starts_line and instr.offset > 0) if new_source_line: print(file=file) - is_current_instr = instr.offset == lasti + if show_caches: + is_current_instr = instr.offset == lasti + else: + # Each CACHE takes 2 bytes + is_current_instr = instr.offset <= lasti \ + <= instr.offset + 2 * _get_cache_size(_all_opname[_deoptop(instr.opcode)]) print(instr._disassemble(lineno_width, is_current_instr, offset_width), file=file) if exception_entries: print("ExceptionTable:", file=file) for entry in exception_entries: lasti = " lasti" if entry.lasti else "" - end = entry.end-2 - print(f" {entry.start} to {end} -> {entry.target} [{entry.depth}]{lasti}", file=file) + start = entry.start_label + end = entry.end_label + target = entry.target_label + print(f" L{start} to L{end} -> L{target} [{entry.depth}]{lasti}", file=file) def _disassemble_str(source, **kwargs): """Compile the source string, then disassemble the code object.""" @@ -601,6 +799,7 @@ def _disassemble_str(source, **kwargs): def _unpack_opargs(code): extended_arg = 0 + extended_args_offset = 0 # Number of EXTENDED_ARG instructions preceding the current instruction caches = 0 for i in range(0, len(code), 2): # Skip inline CACHE entries: @@ -609,7 +808,7 @@ def _unpack_opargs(code): continue op = code[i] deop = _deoptop(op) - caches = _inline_cache_entries[deop] + caches = _get_cache_size(_all_opname[deop]) if deop in hasarg: arg = code[i+1] | extended_arg extended_arg = (arg << 8) if deop == EXTENDED_ARG else 0 @@ -621,7 +820,13 @@ def _unpack_opargs(code): else: arg = None extended_arg = 0 - yield (i, op, arg) + if deop == EXTENDED_ARG: + extended_args_offset += 1 + yield (i, i, op, arg) + else: + start_offset = i - extended_args_offset*2 + yield (i, start_offset, op, arg) + extended_args_offset = 0 def findlabels(code): """Detect all offsets in a byte code which are jump targets. @@ -630,18 +835,10 @@ def findlabels(code): """ labels = [] - for offset, op, arg in _unpack_opargs(code): + for offset, _, op, arg in _unpack_opargs(code): if arg is not None: - deop = _deoptop(op) - if deop in hasjrel: - if _is_backward_jump(deop): - arg = -arg - label = offset + 2 + arg*2 - if deop == FOR_ITER: - label += 2 - elif deop in hasjabs: - label = arg*2 - else: + label = _get_jump_target(op, arg, offset) + if label is None: continue if label not in labels: labels.append(label) @@ -651,10 +848,12 @@ def findlinestarts(code): """Find the offsets in a byte code which are start of lines in the source. Generate pairs (offset, lineno) + lineno will be an integer or None the offset does not have a source line. """ - lastline = None + + lastline = False # None is a valid line number for start, end, line in code.co_lines(): - if line is not None and line != lastline: + if line is not lastline: lastline = line yield start, line return @@ -667,11 +866,10 @@ def _find_imports(co): the corresponding args to __import__. """ IMPORT_NAME = opmap['IMPORT_NAME'] - LOAD_CONST = opmap['LOAD_CONST'] consts = co.co_consts names = co.co_names - opargs = [(op, arg) for _, op, arg in _unpack_opargs(co.co_code) + opargs = [(op, arg) for _, _, op, arg in _unpack_opargs(co.co_code) if op != EXTENDED_ARG] for i, (op, oparg) in enumerate(opargs): if op == IMPORT_NAME and i >= 2: @@ -693,7 +891,7 @@ def _find_store_names(co): } names = co.co_names - for _, op, arg in _unpack_opargs(co.co_code): + for _, _, op, arg in _unpack_opargs(co.co_code): if op in STORE_OPS: yield names[arg] @@ -706,7 +904,7 @@ class Bytecode: Iterating over this yields the bytecode operations as Instruction instances. """ - def __init__(self, x, *, first_line=None, current_offset=None, show_caches=False, adaptive=False): + def __init__(self, x, *, first_line=None, current_offset=None, show_caches=False, adaptive=False, show_offsets=False): self.codeobj = co = _get_code_object(x) if first_line is None: self.first_line = co.co_firstlineno @@ -720,6 +918,7 @@ def __init__(self, x, *, first_line=None, current_offset=None, show_caches=False self.exception_entries = _parse_exception_table(co) self.show_caches = show_caches self.adaptive = adaptive + self.show_offsets = show_offsets def __iter__(self): co = self.codeobj @@ -730,7 +929,8 @@ def __iter__(self): line_offset=self._line_offset, exception_entries=self.exception_entries, co_positions=co.co_positions(), - show_caches=self.show_caches) + show_caches=self.show_caches, + original_code=co.co_code) def __repr__(self): return "{}({!r})".format(self.__class__.__name__, @@ -766,21 +966,26 @@ def dis(self): lasti=offset, exception_entries=self.exception_entries, co_positions=co.co_positions(), - show_caches=self.show_caches) + show_caches=self.show_caches, + original_code=co.co_code, + show_offsets=self.show_offsets) return output.getvalue() -def _test(): - """Simple test program to disassemble a file.""" +def main(): import argparse parser = argparse.ArgumentParser() + parser.add_argument('-C', '--show-caches', action='store_true', + help='show inline caches') + parser.add_argument('-O', '--show-offsets', action='store_true', + help='show instruction offsets') parser.add_argument('infile', type=argparse.FileType('rb'), nargs='?', default='-') args = parser.parse_args() with args.infile as infile: source = infile.read() code = compile(source, args.infile.name, "exec") - dis(code) + dis(code, show_caches=args.show_caches, show_offsets=args.show_offsets) if __name__ == "__main__": - _test() + main() diff --git a/Lib/distutils/README b/Lib/distutils/README deleted file mode 100644 index 73bd25187c02709..000000000000000 --- a/Lib/distutils/README +++ /dev/null @@ -1,11 +0,0 @@ -This directory contains the Distutils package. - -There's a full documentation available at: - - https://docs.python.org/distutils/ - -The Distutils-SIG web page is also a good starting point: - - https://www.python.org/sigs/distutils-sig/ - -$Id$ diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py deleted file mode 100644 index fdad6f65a785627..000000000000000 --- a/Lib/distutils/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -"""distutils - -The main package for the Python Module Distribution Utilities. Normally -used from a setup script as - - from distutils.core import setup - - setup (...) -""" - -import sys -import warnings - -__version__ = sys.version[:sys.version.index(' ')] - -_DEPRECATION_MESSAGE = ("The distutils package is deprecated and slated for " - "removal in Python 3.12. Use setuptools or check " - "PEP 632 for potential alternatives") -warnings.warn(_DEPRECATION_MESSAGE, - DeprecationWarning, 2) diff --git a/Lib/distutils/_msvccompiler.py b/Lib/distutils/_msvccompiler.py deleted file mode 100644 index af8099a4078192e..000000000000000 --- a/Lib/distutils/_msvccompiler.py +++ /dev/null @@ -1,539 +0,0 @@ -"""distutils._msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for Microsoft Visual Studio 2015. - -The module is compatible with VS 2015 and later. You can find legacy support -for older versions in distutils.msvc9compiler and distutils.msvccompiler. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) -# ported to VS 2005 and VS 2008 by Christian Heimes -# ported to VS 2015 by Steve Dower - -import os -import subprocess -import winreg - -from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ - CompileError, LibError, LinkError -from distutils.ccompiler import CCompiler, gen_lib_options -from distutils import log -from distutils.util import get_platform - -from itertools import count - -def _find_vc2015(): - try: - key = winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) - except OSError: - log.debug("Visual C++ is not registered") - return None, None - - best_version = 0 - best_dir = None - with key: - for i in count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - -def _find_vc2017(): - """Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output([ - os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), - "-latest", - "-prerelease", - "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-property", "installationPath", - "-products", "*", - ], encoding="mbcs", errors="strict").strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = os.path.join(path, "VC", "Auxiliary", "Build") - if os.path.isdir(path): - return 15, path - - return None, None - -PLAT_SPEC_TO_RUNTIME = { - 'x86' : 'x86', - 'x86_amd64' : 'x64', - 'x86_arm' : 'arm', - 'x86_arm64' : 'arm64' -} - -def _find_vcvarsall(plat_spec): - # bpo-38597: Removed vcruntime return value - _, best_dir = _find_vc2017() - - if not best_dir: - best_version, best_dir = _find_vc2015() - - if not best_dir: - log.debug("No suitable Visual C++ version found") - return None, None - - vcvarsall = os.path.join(best_dir, "vcvarsall.bat") - if not os.path.isfile(vcvarsall): - log.debug("%s cannot be found", vcvarsall) - return None, None - - return vcvarsall, None - -def _get_vc_env(plat_spec): - if os.getenv("DISTUTILS_USE_SDK"): - return { - key.lower(): value - for key, value in os.environ.items() - } - - vcvarsall, _ = _find_vcvarsall(plat_spec) - if not vcvarsall: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - log.error(exc.output) - raise DistutilsPlatformError("Error executing {}" - .format(exc.cmd)) - - env = { - key.lower(): value - for key, _, value in - (line.partition('=') for line in out.splitlines()) - if key and value - } - - return env - -def _find_exe(exe, paths=None): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - if not paths: - paths = os.getenv('path').split(os.pathsep) - for p in paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - return exe - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Always cross-compile from x86 to work with the -# lighter-weight MSVC installs that do not include native 64-bit tools. -PLAT_TO_VCVARS = { - 'win32' : 'x86', - 'win-amd64' : 'x86_amd64', - 'win-arm32' : 'x86_arm', - 'win-arm64' : 'x86_arm64' -} - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - - def __init__(self, verbose=0, dry_run=0, force=0): - CCompiler.__init__ (self, verbose, dry_run, force) - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.initialized = False - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - if plat_name not in PLAT_TO_VCVARS: - raise DistutilsPlatformError("--plat-name must be one of {}" - .format(tuple(PLAT_TO_VCVARS))) - - # Get the vcvarsall.bat spec for the requested platform. - plat_spec = PLAT_TO_VCVARS[plat_name] - - vc_env = _get_vc_env(plat_spec) - if not vc_env: - raise DistutilsPlatformError("Unable to find a compatible " - "Visual Studio installation.") - - self._paths = vc_env.get('path', '') - paths = self._paths.split(os.pathsep) - self.cc = _find_exe("cl.exe", paths) - self.linker = _find_exe("link.exe", paths) - self.lib = _find_exe("lib.exe", paths) - self.rc = _find_exe("rc.exe", paths) # resource compiler - self.mc = _find_exe("mc.exe", paths) # message compiler - self.mt = _find_exe("mt.exe", paths) # message compiler - - for dir in vc_env.get('include', '').split(os.pathsep): - if dir: - self.add_include_dir(dir.rstrip(os.sep)) - - for dir in vc_env.get('lib', '').split(os.pathsep): - if dir: - self.add_library_dir(dir.rstrip(os.sep)) - - self.preprocess_options = None - # bpo-38597: Always compile with dynamic linking - # Future releases of Python 3.x will include all past - # versions of vcruntime*.dll for compatibility. - self.compile_options = [ - '/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG', '/MD' - ] - - self.compile_options_debug = [ - '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG' - ] - - ldflags = [ - '/nologo', '/INCREMENTAL:NO', '/LTCG' - ] - - ldflags_debug = [ - '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL' - ] - - self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1'] - self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1'] - self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] - self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] - self.ldflags_static = [*ldflags] - self.ldflags_static_debug = [*ldflags_debug] - - self._ldflags = { - (CCompiler.EXECUTABLE, None): self.ldflags_exe, - (CCompiler.EXECUTABLE, False): self.ldflags_exe, - (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug, - (CCompiler.SHARED_OBJECT, None): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, False): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug, - (CCompiler.SHARED_LIBRARY, None): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, False): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug, - } - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - ext_map = { - **{ext: self.obj_extension for ext in self.src_extensions}, - **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions}, - } - - output_dir = output_dir or '' - - def make_out_path(p): - base, ext = os.path.splitext(p) - if strip_dir: - base = os.path.basename(base) - else: - _, base = os.path.splitdrive(base) - if base.startswith((os.path.sep, os.path.altsep)): - base = base[1:] - try: - # XXX: This may produce absurdly long paths. We should check - # the length of the result and trim base until we fit within - # 260 characters. - return os.path.join(output_dir, base + ext_map[ext]) - except LookupError: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError("Don't know how to compile {}".format(p)) - - return list(map(make_out_path, source_filenames)) - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - - add_cpp_opts = False - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - add_cpp_opts = True - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + [output_opt, input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) - base, _ = os.path.splitext(os.path.basename (src)) - rc_file = os.path.join(rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc, "/fo" + obj, rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile {} to {}" - .format(src, obj)) - - args = [self.cc] + compile_opts + pp_opts - if add_cpp_opts: - args.append('/EHsc') - args.append(input_opt) - args.append("/Fo" + obj) - args.extend(extra_postargs) - - try: - self.spawn(args) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - if runtime_library_dirs: - self.warn("I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ldflags = self._ldflags[target_desc, debug] - - export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - build_temp, - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - output_dir = os.path.dirname(os.path.abspath(output_filename)) - self.mkpath(output_dir) - try: - log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args)) - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def spawn(self, cmd): - old_path = os.getenv('path') - try: - os.environ['path'] = self._paths - return super().spawn(cmd) - finally: - os.environ['path'] = old_path - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC") - - def library_option(self, lib): - return self.library_filename(lib) - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.isfile(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/Lib/distutils/archive_util.py b/Lib/distutils/archive_util.py deleted file mode 100644 index 565a3117b4b5e1a..000000000000000 --- a/Lib/distutils/archive_util.py +++ /dev/null @@ -1,256 +0,0 @@ -"""distutils.archive_util - -Utility functions for creating archive files (tarballs, zip files, -that sort of thing).""" - -import os -from warnings import warn -import sys - -try: - import zipfile -except ImportError: - zipfile = None - - -from distutils.errors import DistutilsExecError -from distutils.spawn import spawn -from distutils.dir_util import mkpath -from distutils import log - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or - None. ("compress" will be deprecated in Python 3.2) - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_dir' + ".tar", possibly plus - the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '', - 'compress': ''} - compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz', - 'compress': '.Z'} - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext.keys(): - raise ValueError( - "bad value for 'compress': must be None, 'gzip', 'bzip2', " - "'xz' or 'compress'") - - archive_name = base_name + '.tar' - if compress != 'compress': - archive_name += compress_ext.get(compress, '') - - mkpath(os.path.dirname(archive_name), dry_run=dry_run) - - # creating the tarball - import tarfile # late import so Python build itself doesn't break - - log.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - # compression using `compress` - if compress == 'compress': - warn("'compress' will be deprecated.", PendingDeprecationWarning) - # the option varies depending on the platform - compressed_name = archive_name + compress_ext[compress] - if sys.platform == 'win32': - cmd = [compress, archive_name, compressed_name] - else: - cmd = [compress, '-f', archive_name] - spawn(cmd, dry_run=dry_run) - return compressed_name - - return archive_name - -def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises DistutilsExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - - # If zipfile module is not available, try spawning an external - # 'zip' command. - if zipfile is None: - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - - try: - spawn(["zip", zipoptions, zip_filename, base_dir], - dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise DistutilsExecError(("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename) - - else: - log.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - try: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - except RuntimeError: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_STORED) - - with zip: - if base_dir != os.curdir: - path = os.path.normpath(os.path.join(base_dir, '')) - zip.write(path, path) - log.info("adding '%s'", path) - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in dirnames: - path = os.path.normpath(os.path.join(dirpath, name, '')) - zip.write(path, path) - log.info("adding '%s'", path) - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - log.info("adding '%s'", path) - - return zip_filename - -ARCHIVE_FORMATS = { - 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"), - 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), - 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (make_zipfile, [],"ZIP file") - } - -def check_archive_formats(formats): - """Returns the first format from the 'format' list that is unknown. - - If all formats are known, returns None - """ - for format in formats: - if format not in ARCHIVE_FORMATS: - return format - return None - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "gztar", - "bztar", "xztar", or "ztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - log.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run} - - try: - format_info = ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - log.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename diff --git a/Lib/distutils/bcppcompiler.py b/Lib/distutils/bcppcompiler.py deleted file mode 100644 index 071fea5d038cb04..000000000000000 --- a/Lib/distutils/bcppcompiler.py +++ /dev/null @@ -1,393 +0,0 @@ -"""distutils.bcppcompiler - -Contains BorlandCCompiler, an implementation of the abstract CCompiler class -for the Borland C++ compiler. -""" - -# This implementation by Lyle Johnson, based on the original msvccompiler.py -# module and using the directions originally published by Gordon Williams. - -# XXX looks like there's a LOT of overlap between these two classes: -# someone should sit down and factor out the common code as -# WindowsCCompiler! --GPW - - -import os -from distutils.errors import \ - DistutilsExecError, \ - CompileError, LibError, LinkError, UnknownFileError -from distutils.ccompiler import \ - CCompiler, gen_preprocess_options -from distutils.file_util import write_file -from distutils.dep_util import newer -from distutils import log - -class BCPPCompiler(CCompiler) : - """Concrete class that implements an interface to the Borland C/C++ - compiler, as defined by the CCompiler abstract class. - """ - - compiler_type = 'bcpp' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - CCompiler.__init__ (self, verbose, dry_run, force) - - # These executables are assumed to all be in the path. - # Borland doesn't seem to use any special registry settings to - # indicate their installation locations. - - self.cc = "bcc32.exe" - self.linker = "ilink32.exe" - self.lib = "tlib.exe" - - self.preprocess_options = None - self.compile_options = ['/tWM', '/O2', '/q', '/g0'] - self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0'] - - self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_static = [] - self.ldflags_exe = ['/Gn', '/q', '/x'] - self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r'] - - - # -- Worker methods ------------------------------------------------ - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - compile_opts = extra_preargs or [] - compile_opts.append ('-c') - if debug: - compile_opts.extend (self.compile_options_debug) - else: - compile_opts.extend (self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - # XXX why do the normpath here? - src = os.path.normpath(src) - obj = os.path.normpath(obj) - # XXX _setup_compile() did a mkpath() too but before the normpath. - # Is it possible to skip the normpath? - self.mkpath(os.path.dirname(obj)) - - if ext == '.res': - # This is already a binary file -- skip it. - continue # the 'for' loop - if ext == '.rc': - # This needs to be compiled to a .res file -- do it now. - try: - self.spawn (["brcc32", "-fo", obj, src]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue # the 'for' loop - - # The next two are both for the real compiler. - if ext in self._c_extensions: - input_opt = "" - elif ext in self._cpp_extensions: - input_opt = "-P" - else: - # Unknown file type -- no extra options. The compiler - # will probably fail, but let it just in case this is a - # file the compiler recognizes even if we don't. - input_opt = "" - - output_opt = "-o" + obj - - # Compiler command line syntax is: "bcc32 [options] file(s)". - # Note that the source file names must appear at the end of - # the command line. - try: - self.spawn ([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs + [src]) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - # compile () - - - def create_static_lib (self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - (objects, output_dir) = self._fix_object_args (objects, output_dir) - output_filename = \ - self.library_filename (output_libname, output_dir=output_dir) - - if self._need_link (objects, output_filename): - lib_args = [output_filename, '/u'] + objects - if debug: - pass # XXX what goes here? - try: - self.spawn ([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # create_static_lib () - - - def link (self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - # XXX this ignores 'build_temp'! should follow the lead of - # msvccompiler.py - - (objects, output_dir) = self._fix_object_args (objects, output_dir) - (libraries, library_dirs, runtime_library_dirs) = \ - self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) - - if runtime_library_dirs: - log.warn("I don't know what to do with 'runtime_library_dirs': %s", - str(runtime_library_dirs)) - - if output_dir is not None: - output_filename = os.path.join (output_dir, output_filename) - - if self._need_link (objects, output_filename): - - # Figure out linker args based on type of target. - if target_desc == CCompiler.EXECUTABLE: - startup_obj = 'c0w32' - if debug: - ld_args = self.ldflags_exe_debug[:] - else: - ld_args = self.ldflags_exe[:] - else: - startup_obj = 'c0d32' - if debug: - ld_args = self.ldflags_shared_debug[:] - else: - ld_args = self.ldflags_shared[:] - - - # Create a temporary exports file for use by the linker - if export_symbols is None: - def_file = '' - else: - head, tail = os.path.split (output_filename) - modname, ext = os.path.splitext (tail) - temp_dir = os.path.dirname(objects[0]) # preserve tree structure - def_file = os.path.join (temp_dir, '%s.def' % modname) - contents = ['EXPORTS'] - for sym in (export_symbols or []): - contents.append(' %s=_%s' % (sym, sym)) - self.execute(write_file, (def_file, contents), - "writing %s" % def_file) - - # Borland C++ has problems with '/' in paths - objects2 = map(os.path.normpath, objects) - # split objects in .obj and .res files - # Borland C++ needs them at different positions in the command line - objects = [startup_obj] - resources = [] - for file in objects2: - (base, ext) = os.path.splitext(os.path.normcase(file)) - if ext == '.res': - resources.append(file) - else: - objects.append(file) - - - for l in library_dirs: - ld_args.append("/L%s" % os.path.normpath(l)) - ld_args.append("/L.") # we sometimes use relative paths - - # list of object files - ld_args.extend(objects) - - # XXX the command-line syntax for Borland C++ is a bit wonky; - # certain filenames are jammed together in one big string, but - # comma-delimited. This doesn't mesh too well with the - # Unix-centric attitude (with a DOS/Windows quoting hack) of - # 'spawn()', so constructing the argument list is a bit - # awkward. Note that doing the obvious thing and jamming all - # the filenames and commas into one argument would be wrong, - # because 'spawn()' would quote any filenames with spaces in - # them. Arghghh!. Apparently it works fine as coded... - - # name of dll/exe file - ld_args.extend([',',output_filename]) - # no map file and start libraries - ld_args.append(',,') - - for lib in libraries: - # see if we find it and if there is a bcpp specific lib - # (xxx_bcpp.lib) - libfile = self.find_library_file(library_dirs, lib, debug) - if libfile is None: - ld_args.append(lib) - # probably a BCPP internal library -- don't warn - else: - # full name which prefers bcpp_xxx.lib over xxx.lib - ld_args.append(libfile) - - # some default libraries - ld_args.append ('import32') - ld_args.append ('cw32mt') - - # def file for export symbols - ld_args.extend([',',def_file]) - # add resource files - ld_args.append(',') - ld_args.extend(resources) - - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath (os.path.dirname (output_filename)) - try: - self.spawn ([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # link () - - # -- Miscellaneous methods ----------------------------------------- - - - def find_library_file (self, dirs, lib, debug=0): - # List of effective library names to try, in order of preference: - # xxx_bcpp.lib is better than xxx.lib - # and xxx_d.lib is better than xxx.lib if debug is set - # - # The "_bcpp" suffix is to handle a Python installation for people - # with multiple compilers (primarily Distutils hackers, I suspect - # ;-). The idea is they'd have one static library for each - # compiler they care about, since (almost?) every Windows compiler - # seems to have a different format for static libraries. - if debug: - dlib = (lib + "_d") - try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib) - else: - try_names = (lib + "_bcpp", lib) - - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # overwrite the one from CCompiler to support rc and res-files - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError("unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res': - # these can go unchanged - obj_names.append (os.path.join (output_dir, base + ext)) - elif ext == '.rc': - # these need to be compiled to .res-files - obj_names.append (os.path.join (output_dir, base + '.res')) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - def preprocess (self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None): - - (_, macros, include_dirs) = \ - self._fix_compile_args(None, macros, include_dirs) - pp_opts = gen_preprocess_options(macros, include_dirs) - pp_args = ['cpp32.exe'] + pp_opts - if output_file is not None: - pp_args.append('-o' + output_file) - if extra_preargs: - pp_args[:0] = extra_preargs - if extra_postargs: - pp_args.extend(extra_postargs) - pp_args.append(source) - - # We need to preprocess: either we're being forced to, or the - # source file is newer than the target (or the target doesn't - # exist). - if self.force or output_file is None or newer(source, output_file): - if output_file: - self.mkpath(os.path.dirname(output_file)) - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - print(msg) - raise CompileError(msg) - - # preprocess() diff --git a/Lib/distutils/ccompiler.py b/Lib/distutils/ccompiler.py deleted file mode 100644 index 4c47f2ed245d4f5..000000000000000 --- a/Lib/distutils/ccompiler.py +++ /dev/null @@ -1,1116 +0,0 @@ -"""distutils.ccompiler - -Contains CCompiler, an abstract base class that defines the interface -for the Distutils compiler abstraction model.""" - -import sys, os, re -from distutils.errors import * -from distutils.spawn import spawn -from distutils.file_util import move_file -from distutils.dir_util import mkpath -from distutils.dep_util import newer_group -from distutils.util import split_quoted, execute -from distutils import log - -class CCompiler: - """Abstract base class to define the interface that must be implemented - by real compiler classes. Also has some utility methods used by - several compiler classes. - - The basic idea behind a compiler abstraction class is that each - instance can be used for all the compile/link steps in building a - single project. Thus, attributes common to all of those compile and - link steps -- include directories, macros to define, libraries to link - against, etc. -- are attributes of the compiler instance. To allow for - variability in how individual files are treated, most of those - attributes may be varied on a per-compilation or per-link basis. - """ - - # 'compiler_type' is a class attribute that identifies this class. It - # keeps code that wants to know what kind of compiler it's dealing with - # from having to import all possible compiler classes just to do an - # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type' - # should really, really be one of the keys of the 'compiler_class' - # dictionary (see below -- used by the 'new_compiler()' factory - # function) -- authors of new compiler interface classes are - # responsible for updating 'compiler_class'! - compiler_type = None - - # XXX things not handled by this compiler abstraction model: - # * client can't provide additional options for a compiler, - # e.g. warning, optimization, debugging flags. Perhaps this - # should be the domain of concrete compiler abstraction classes - # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base - # class should have methods for the common ones. - # * can't completely override the include or library searchg - # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2". - # I'm not sure how widely supported this is even by Unix - # compilers, much less on other platforms. And I'm even less - # sure how useful it is; maybe for cross-compiling, but - # support for that is a ways off. (And anyways, cross - # compilers probably have a dedicated binary with the - # right paths compiled in. I hope.) - # * can't do really freaky things with the library list/library - # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against - # different versions of libfoo.a in different locations. I - # think this is useless without the ability to null out the - # library search path anyways. - - - # Subclasses that rely on the standard filename generation methods - # implemented below should override these; see the comment near - # those methods ('object_filenames()' et. al.) for details: - src_extensions = None # list of strings - obj_extension = None # string - static_lib_extension = None - shared_lib_extension = None # string - static_lib_format = None # format string - shared_lib_format = None # prob. same as static_lib_format - exe_extension = None # string - - # Default language settings. language_map is used to detect a source - # file or Extension target language, checking source filenames. - # language_order is used to detect the language precedence, when deciding - # what language to use when mixing source types. For example, if some - # extension has two files with ".c" extension, and one with ".cpp", it - # is still linked as c++. - language_map = {".c" : "c", - ".cc" : "c++", - ".cpp" : "c++", - ".cxx" : "c++", - ".m" : "objc", - } - language_order = ["c++", "objc", "c"] - - def __init__(self, verbose=0, dry_run=0, force=0): - self.dry_run = dry_run - self.force = force - self.verbose = verbose - - # 'output_dir': a common output directory for object, library, - # shared object, and shared library files - self.output_dir = None - - # 'macros': a list of macro definitions (or undefinitions). A - # macro definition is a 2-tuple (name, value), where the value is - # either a string or None (no explicit value). A macro - # undefinition is a 1-tuple (name,). - self.macros = [] - - # 'include_dirs': a list of directories to search for include files - self.include_dirs = [] - - # 'libraries': a list of libraries to include in any link - # (library names, not filenames: eg. "foo" not "libfoo.a") - self.libraries = [] - - # 'library_dirs': a list of directories to search for libraries - self.library_dirs = [] - - # 'runtime_library_dirs': a list of directories to search for - # shared libraries/objects at runtime - self.runtime_library_dirs = [] - - # 'objects': a list of object files (or similar, such as explicitly - # named library files) to include on any link - self.objects = [] - - for key in self.executables.keys(): - self.set_executable(key, self.executables[key]) - - def set_executables(self, **kwargs): - """Define the executables (and options for them) that will be run - to perform the various stages of compilation. The exact set of - executables that may be specified here depends on the compiler - class (via the 'executables' class attribute), but most will have: - compiler the C/C++ compiler - linker_so linker used to create shared objects and libraries - linker_exe linker used to create binary executables - archiver static library creator - - On platforms with a command-line (Unix, DOS/Windows), each of these - is a string that will be split into executable name and (optional) - list of arguments. (Splitting the string is done similarly to how - Unix shells operate: words are delimited by spaces, but quotes and - backslashes can override this. See - 'distutils.util.split_quoted()'.) - """ - - # Note that some CCompiler implementation classes will define class - # attributes 'cpp', 'cc', etc. with hard-coded executable names; - # this is appropriate when a compiler class is for exactly one - # compiler/OS combination (eg. MSVCCompiler). Other compiler - # classes (UnixCCompiler, in particular) are driven by information - # discovered at run-time, since there are many different ways to do - # basically the same things with Unix C compilers. - - for key in kwargs: - if key not in self.executables: - raise ValueError("unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - self.set_executable(key, kwargs[key]) - - def set_executable(self, key, value): - if isinstance(value, str): - setattr(self, key, split_quoted(value)) - else: - setattr(self, key, value) - - def _find_macro(self, name): - i = 0 - for defn in self.macros: - if defn[0] == name: - return i - i += 1 - return None - - def _check_macro_definitions(self, definitions): - """Ensures that every element of 'definitions' is a valid macro - definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do - nothing if all definitions are OK, raise TypeError otherwise. - """ - for defn in definitions: - if not (isinstance(defn, tuple) and - (len(defn) in (1, 2) and - (isinstance (defn[1], str) or defn[1] is None)) and - isinstance (defn[0], str)): - raise TypeError(("invalid macro definition '%s': " % defn) + \ - "must be tuple (string,), (string, string), or " + \ - "(string, None)") - - - # -- Bookkeeping methods ------------------------------------------- - - def define_macro(self, name, value=None): - """Define a preprocessor macro for all compilations driven by this - compiler object. The optional parameter 'value' should be a - string; if it is not supplied, then the macro will be defined - without an explicit value and the exact outcome depends on the - compiler used (XXX true? does ANSI say anything about this?) - """ - # Delete from the list of macro definitions/undefinitions if - # already there (so that this one will take precedence). - i = self._find_macro (name) - if i is not None: - del self.macros[i] - - self.macros.append((name, value)) - - def undefine_macro(self, name): - """Undefine a preprocessor macro for all compilations driven by - this compiler object. If the same macro is defined by - 'define_macro()' and undefined by 'undefine_macro()' the last call - takes precedence (including multiple redefinitions or - undefinitions). If the macro is redefined/undefined on a - per-compilation basis (ie. in the call to 'compile()'), then that - takes precedence. - """ - # Delete from the list of macro definitions/undefinitions if - # already there (so that this one will take precedence). - i = self._find_macro (name) - if i is not None: - del self.macros[i] - - undefn = (name,) - self.macros.append(undefn) - - def add_include_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - header files. The compiler is instructed to search directories in - the order in which they are supplied by successive calls to - 'add_include_dir()'. - """ - self.include_dirs.append(dir) - - def set_include_dirs(self, dirs): - """Set the list of directories that will be searched to 'dirs' (a - list of strings). Overrides any preceding calls to - 'add_include_dir()'; subsequence calls to 'add_include_dir()' add - to the list passed to 'set_include_dirs()'. This does not affect - any list of standard include directories that the compiler may - search by default. - """ - self.include_dirs = dirs[:] - - def add_library(self, libname): - """Add 'libname' to the list of libraries that will be included in - all links driven by this compiler object. Note that 'libname' - should *not* be the name of a file containing a library, but the - name of the library itself: the actual filename will be inferred by - the linker, the compiler, or the compiler class (depending on the - platform). - - The linker will be instructed to link against libraries in the - order they were supplied to 'add_library()' and/or - 'set_libraries()'. It is perfectly valid to duplicate library - names; the linker will be instructed to link against libraries as - many times as they are mentioned. - """ - self.libraries.append(libname) - - def set_libraries(self, libnames): - """Set the list of libraries to be included in all links driven by - this compiler object to 'libnames' (a list of strings). This does - not affect any standard system libraries that the linker may - include by default. - """ - self.libraries = libnames[:] - - def add_library_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - libraries specified to 'add_library()' and 'set_libraries()'. The - linker will be instructed to search for libraries in the order they - are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. - """ - self.library_dirs.append(dir) - - def set_library_dirs(self, dirs): - """Set the list of library search directories to 'dirs' (a list of - strings). This does not affect any standard library search path - that the linker may search by default. - """ - self.library_dirs = dirs[:] - - def add_runtime_library_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - shared libraries at runtime. - """ - self.runtime_library_dirs.append(dir) - - def set_runtime_library_dirs(self, dirs): - """Set the list of directories to search for shared libraries at - runtime to 'dirs' (a list of strings). This does not affect any - standard search path that the runtime linker may search by - default. - """ - self.runtime_library_dirs = dirs[:] - - def add_link_object(self, object): - """Add 'object' to the list of object files (or analogues, such as - explicitly named library files or the output of "resource - compilers") to be included in every link driven by this compiler - object. - """ - self.objects.append(object) - - def set_link_objects(self, objects): - """Set the list of object files (or analogues) to be included in - every link to 'objects'. This does not affect any standard object - files that the linker may include by default (such as system - libraries). - """ - self.objects = objects[:] - - - # -- Private utility methods -------------------------------------- - # (here for the convenience of subclasses) - - # Helper method to prep compiler in subclass compile() methods - - def _setup_compile(self, outdir, macros, incdirs, sources, depends, - extra): - """Process arguments and decide which source files to compile.""" - if outdir is None: - outdir = self.output_dir - elif not isinstance(outdir, str): - raise TypeError("'output_dir' must be a string or None") - - if macros is None: - macros = self.macros - elif isinstance(macros, list): - macros = macros + (self.macros or []) - else: - raise TypeError("'macros' (if supplied) must be a list of tuples") - - if incdirs is None: - incdirs = self.include_dirs - elif isinstance(incdirs, (list, tuple)): - incdirs = list(incdirs) + (self.include_dirs or []) - else: - raise TypeError( - "'include_dirs' (if supplied) must be a list of strings") - - if extra is None: - extra = [] - - # Get the list of expected output (object) files - objects = self.object_filenames(sources, strip_dir=0, - output_dir=outdir) - assert len(objects) == len(sources) - - pp_opts = gen_preprocess_options(macros, incdirs) - - build = {} - for i in range(len(sources)): - src = sources[i] - obj = objects[i] - ext = os.path.splitext(src)[1] - self.mkpath(os.path.dirname(obj)) - build[obj] = (src, ext) - - return macros, objects, extra, pp_opts, build - - def _get_cc_args(self, pp_opts, debug, before): - # works for unixccompiler, cygwinccompiler - cc_args = pp_opts + ['-c'] - if debug: - cc_args[:0] = ['-g'] - if before: - cc_args[:0] = before - return cc_args - - def _fix_compile_args(self, output_dir, macros, include_dirs): - """Typecheck and fix-up some of the arguments to the 'compile()' - method, and return fixed-up values. Specifically: if 'output_dir' - is None, replaces it with 'self.output_dir'; ensures that 'macros' - is a list, and augments it with 'self.macros'; ensures that - 'include_dirs' is a list, and augments it with 'self.include_dirs'. - Guarantees that the returned values are of the correct type, - i.e. for 'output_dir' either string or None, and for 'macros' and - 'include_dirs' either list or None. - """ - if output_dir is None: - output_dir = self.output_dir - elif not isinstance(output_dir, str): - raise TypeError("'output_dir' must be a string or None") - - if macros is None: - macros = self.macros - elif isinstance(macros, list): - macros = macros + (self.macros or []) - else: - raise TypeError("'macros' (if supplied) must be a list of tuples") - - if include_dirs is None: - include_dirs = self.include_dirs - elif isinstance(include_dirs, (list, tuple)): - include_dirs = list(include_dirs) + (self.include_dirs or []) - else: - raise TypeError( - "'include_dirs' (if supplied) must be a list of strings") - - return output_dir, macros, include_dirs - - def _prep_compile(self, sources, output_dir, depends=None): - """Decide which source files must be recompiled. - - Determine the list of object files corresponding to 'sources', - and figure out which ones really need to be recompiled. - Return a list of all object files and a dictionary telling - which source files can be skipped. - """ - # Get the list of expected output (object) files - objects = self.object_filenames(sources, output_dir=output_dir) - assert len(objects) == len(sources) - - # Return an empty dict for the "which source files can be skipped" - # return value to preserve API compatibility. - return objects, {} - - def _fix_object_args(self, objects, output_dir): - """Typecheck and fix up some arguments supplied to various methods. - Specifically: ensure that 'objects' is a list; if output_dir is - None, replace with self.output_dir. Return fixed versions of - 'objects' and 'output_dir'. - """ - if not isinstance(objects, (list, tuple)): - raise TypeError("'objects' must be a list or tuple of strings") - objects = list(objects) - - if output_dir is None: - output_dir = self.output_dir - elif not isinstance(output_dir, str): - raise TypeError("'output_dir' must be a string or None") - - return (objects, output_dir) - - def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs): - """Typecheck and fix up some of the arguments supplied to the - 'link_*' methods. Specifically: ensure that all arguments are - lists, and augment them with their permanent versions - (eg. 'self.libraries' augments 'libraries'). Return a tuple with - fixed versions of all arguments. - """ - if libraries is None: - libraries = self.libraries - elif isinstance(libraries, (list, tuple)): - libraries = list (libraries) + (self.libraries or []) - else: - raise TypeError( - "'libraries' (if supplied) must be a list of strings") - - if library_dirs is None: - library_dirs = self.library_dirs - elif isinstance(library_dirs, (list, tuple)): - library_dirs = list (library_dirs) + (self.library_dirs or []) - else: - raise TypeError( - "'library_dirs' (if supplied) must be a list of strings") - - if runtime_library_dirs is None: - runtime_library_dirs = self.runtime_library_dirs - elif isinstance(runtime_library_dirs, (list, tuple)): - runtime_library_dirs = (list(runtime_library_dirs) + - (self.runtime_library_dirs or [])) - else: - raise TypeError("'runtime_library_dirs' (if supplied) " - "must be a list of strings") - - return (libraries, library_dirs, runtime_library_dirs) - - def _need_link(self, objects, output_file): - """Return true if we need to relink the files listed in 'objects' - to recreate 'output_file'. - """ - if self.force: - return True - else: - if self.dry_run: - newer = newer_group (objects, output_file, missing='newer') - else: - newer = newer_group (objects, output_file) - return newer - - def detect_language(self, sources): - """Detect the language of a given file, or list of files. Uses - language_map, and language_order to do the job. - """ - if not isinstance(sources, list): - sources = [sources] - lang = None - index = len(self.language_order) - for source in sources: - base, ext = os.path.splitext(source) - extlang = self.language_map.get(ext) - try: - extindex = self.language_order.index(extlang) - if extindex < index: - lang = extlang - index = extindex - except ValueError: - pass - return lang - - - # -- Worker methods ------------------------------------------------ - # (must be implemented by subclasses) - - def preprocess(self, source, output_file=None, macros=None, - include_dirs=None, extra_preargs=None, extra_postargs=None): - """Preprocess a single C/C++ source file, named in 'source'. - Output will be written to file named 'output_file', or stdout if - 'output_file' not supplied. 'macros' is a list of macro - definitions as for 'compile()', which will augment the macros set - with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a - list of directory names that will be added to the default list. - - Raises PreprocessError on failure. - """ - pass - - def compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """Compile one or more source files. - - 'sources' must be a list of filenames, most likely C/C++ - files, but in reality anything that can be handled by a - particular compiler and compiler class (eg. MSVCCompiler can - handle resource files in 'sources'). Return a list of object - filenames, one per source filename in 'sources'. Depending on - the implementation, not all source files will necessarily be - compiled, but all corresponding object filenames will be - returned. - - If 'output_dir' is given, object files will be put under it, while - retaining their original path component. That is, "foo/bar.c" - normally compiles to "foo/bar.o" (for a Unix implementation); if - 'output_dir' is "build", then it would compile to - "build/foo/bar.o". - - 'macros', if given, must be a list of macro definitions. A macro - definition is either a (name, value) 2-tuple or a (name,) 1-tuple. - The former defines a macro; if the value is None, the macro is - defined without an explicit value. The 1-tuple case undefines a - macro. Later definitions/redefinitions/ undefinitions take - precedence. - - 'include_dirs', if given, must be a list of strings, the - directories to add to the default include file search path for this - compilation only. - - 'debug' is a boolean; if true, the compiler will be instructed to - output debug symbols in (or alongside) the object file(s). - - 'extra_preargs' and 'extra_postargs' are implementation- dependent. - On platforms that have the notion of a command-line (e.g. Unix, - DOS/Windows), they are most likely lists of strings: extra - command-line arguments to prepend/append to the compiler command - line. On other platforms, consult the implementation class - documentation. In any event, they are intended as an escape hatch - for those occasions when the abstract compiler framework doesn't - cut the mustard. - - 'depends', if given, is a list of filenames that all targets - depend on. If a source file is older than any file in - depends, then the source file will be recompiled. This - supports dependency tracking, but only at a coarse - granularity. - - Raises CompileError on failure. - """ - # A concrete compiler class can either override this method - # entirely or implement _compile(). - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - - # Return *all* object filenames, not just the ones we just built. - return objects - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - # A concrete compiler class that does not override compile() - # should implement _compile(). - pass - - def create_static_lib(self, objects, output_libname, output_dir=None, - debug=0, target_lang=None): - """Link a bunch of stuff together to create a static library file. - The "bunch of stuff" consists of the list of object files supplied - as 'objects', the extra object files supplied to - 'add_link_object()' and/or 'set_link_objects()', the libraries - supplied to 'add_library()' and/or 'set_libraries()', and the - libraries supplied as 'libraries' (if any). - - 'output_libname' should be a library name, not a filename; the - filename will be inferred from the library name. 'output_dir' is - the directory where the library file will be put. - - 'debug' is a boolean; if true, debugging information will be - included in the library (note that on most platforms, it is the - compile step where this matters: the 'debug' flag is included here - just for consistency). - - 'target_lang' is the target language for which the given objects - are being compiled. This allows specific linkage time treatment of - certain languages. - - Raises LibError on failure. - """ - pass - - - # values for target_desc parameter in link() - SHARED_OBJECT = "shared_object" - SHARED_LIBRARY = "shared_library" - EXECUTABLE = "executable" - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - """Link a bunch of stuff together to create an executable or - shared library file. - - The "bunch of stuff" consists of the list of object files supplied - as 'objects'. 'output_filename' should be a filename. If - 'output_dir' is supplied, 'output_filename' is relative to it - (i.e. 'output_filename' can provide directory components if - needed). - - 'libraries' is a list of libraries to link against. These are - library names, not filenames, since they're translated into - filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" - on Unix and "foo.lib" on DOS/Windows). However, they can include a - directory component, which means the linker will look in that - specific directory rather than searching all the normal locations. - - 'library_dirs', if supplied, should be a list of directories to - search for libraries that were specified as bare library names - (ie. no directory component). These are on top of the system - default and those supplied to 'add_library_dir()' and/or - 'set_library_dirs()'. 'runtime_library_dirs' is a list of - directories that will be embedded into the shared library and used - to search for other shared libraries that *it* depends on at - run-time. (This may only be relevant on Unix.) - - 'export_symbols' is a list of symbols that the shared library will - export. (This appears to be relevant only on Windows.) - - 'debug' is as for 'compile()' and 'create_static_lib()', with the - slight distinction that it actually matters on most platforms (as - opposed to 'create_static_lib()', which includes a 'debug' flag - mostly for form's sake). - - 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except - of course that they supply command-line arguments for the - particular linker being used). - - 'target_lang' is the target language for which the given objects - are being compiled. This allows specific linkage time treatment of - certain languages. - - Raises LinkError on failure. - """ - raise NotImplementedError - - - # Old 'link_*()' methods, rewritten to use the new 'link()' method. - - def link_shared_lib(self, - objects, - output_libname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - self.link(CCompiler.SHARED_LIBRARY, objects, - self.library_filename(output_libname, lib_type='shared'), - output_dir, - libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, - extra_preargs, extra_postargs, build_temp, target_lang) - - - def link_shared_object(self, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - self.link(CCompiler.SHARED_OBJECT, objects, - output_filename, output_dir, - libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, - extra_preargs, extra_postargs, build_temp, target_lang) - - - def link_executable(self, - objects, - output_progname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - target_lang=None): - self.link(CCompiler.EXECUTABLE, objects, - self.executable_filename(output_progname), output_dir, - libraries, library_dirs, runtime_library_dirs, None, - debug, extra_preargs, extra_postargs, None, target_lang) - - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function; there is - # no appropriate default implementation so subclasses should - # implement all of these. - - def library_dir_option(self, dir): - """Return the compiler option to add 'dir' to the list of - directories searched for libraries. - """ - raise NotImplementedError - - def runtime_library_dir_option(self, dir): - """Return the compiler option to add 'dir' to the list of - directories searched for runtime libraries. - """ - raise NotImplementedError - - def library_option(self, lib): - """Return the compiler option to add 'lib' to the list of libraries - linked into the shared library or executable. - """ - raise NotImplementedError - - def has_function(self, funcname, includes=None, include_dirs=None, - libraries=None, library_dirs=None): - """Return a boolean indicating whether funcname is supported on - the current platform. The optional arguments can be used to - augment the compilation environment. - """ - # this can't be included at module scope because it tries to - # import math which might not be available at that point - maybe - # the necessary logic should just be inlined? - import tempfile - if includes is None: - includes = [] - if include_dirs is None: - include_dirs = [] - if libraries is None: - libraries = [] - if library_dirs is None: - library_dirs = [] - fd, fname = tempfile.mkstemp(".c", funcname, text=True) - f = os.fdopen(fd, "w") - try: - for incl in includes: - f.write("""#include "%s"\n""" % incl) - f.write("""\ -int main (int argc, char **argv) { - %s(); - return 0; -} -""" % funcname) - finally: - f.close() - try: - objects = self.compile([fname], include_dirs=include_dirs) - except CompileError: - return False - - try: - self.link_executable(objects, "a.out", - libraries=libraries, - library_dirs=library_dirs) - except (LinkError, TypeError): - return False - return True - - def find_library_file (self, dirs, lib, debug=0): - """Search the specified list of directories for a static or shared - library file 'lib' and return the full path to that file. If - 'debug' true, look for a debugging version (if that makes sense on - the current platform). Return None if 'lib' wasn't found in any of - the specified directories. - """ - raise NotImplementedError - - # -- Filename generation methods ----------------------------------- - - # The default implementation of the filename generating methods are - # prejudiced towards the Unix/DOS/Windows view of the world: - # * object files are named by replacing the source file extension - # (eg. .c/.cpp -> .o/.obj) - # * library files (shared or static) are named by plugging the - # library name and extension into a format string, eg. - # "lib%s.%s" % (lib_name, ".a") for Unix static libraries - # * executables are named by appending an extension (possibly - # empty) to the program name: eg. progname + ".exe" for - # Windows - # - # To reduce redundant code, these methods expect to find - # several attributes in the current object (presumably defined - # as class attributes): - # * src_extensions - - # list of C/C++ source file extensions, eg. ['.c', '.cpp'] - # * obj_extension - - # object file extension, eg. '.o' or '.obj' - # * static_lib_extension - - # extension for static library files, eg. '.a' or '.lib' - # * shared_lib_extension - - # extension for shared library/object files, eg. '.so', '.dll' - # * static_lib_format - - # format string for generating static library filenames, - # eg. 'lib%s.%s' or '%s.%s' - # * shared_lib_format - # format string for generating shared library filenames - # (probably same as static_lib_format, since the extension - # is one of the intended parameters to the format string) - # * exe_extension - - # extension for executable files, eg. '' or '.exe' - - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_names.append(os.path.join(output_dir, - base + self.obj_extension)) - return obj_names - - def shared_object_filename(self, basename, strip_dir=0, output_dir=''): - assert output_dir is not None - if strip_dir: - basename = os.path.basename(basename) - return os.path.join(output_dir, basename + self.shared_lib_extension) - - def executable_filename(self, basename, strip_dir=0, output_dir=''): - assert output_dir is not None - if strip_dir: - basename = os.path.basename(basename) - return os.path.join(output_dir, basename + (self.exe_extension or '')) - - def library_filename(self, libname, lib_type='static', # or 'shared' - strip_dir=0, output_dir=''): - assert output_dir is not None - if lib_type not in ("static", "shared", "dylib", "xcode_stub"): - raise ValueError( - "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"") - fmt = getattr(self, lib_type + "_lib_format") - ext = getattr(self, lib_type + "_lib_extension") - - dir, base = os.path.split(libname) - filename = fmt % (base, ext) - if strip_dir: - dir = '' - - return os.path.join(output_dir, dir, filename) - - - # -- Utility methods ----------------------------------------------- - - def announce(self, msg, level=1): - log.debug(msg) - - def debug_print(self, msg): - from distutils.debug import DEBUG - if DEBUG: - print(msg) - - def warn(self, msg): - sys.stderr.write("warning: %s\n" % msg) - - def execute(self, func, args, msg=None, level=1): - execute(func, args, msg, self.dry_run) - - def spawn(self, cmd): - spawn(cmd, dry_run=self.dry_run) - - def move_file(self, src, dst): - return move_file(src, dst, dry_run=self.dry_run) - - def mkpath (self, name, mode=0o777): - mkpath(name, mode, dry_run=self.dry_run) - - -# Map a sys.platform/os.name ('posix', 'nt') to the default compiler -# type for that platform. Keys are interpreted as re match -# patterns. Order is important; platform mappings are preferred over -# OS names. -_default_compilers = ( - - # Platform string mappings - - # on a cygwin built python we can use gcc like an ordinary UNIXish - # compiler - ('cygwin.*', 'unix'), - - # OS name mappings - ('posix', 'unix'), - ('nt', 'msvc'), - - ) - -def get_default_compiler(osname=None, platform=None): - """Determine the default compiler to use for the given platform. - - osname should be one of the standard Python OS names (i.e. the - ones returned by os.name) and platform the common value - returned by sys.platform for the platform in question. - - The default values are os.name and sys.platform in case the - parameters are not given. - """ - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - for pattern, compiler in _default_compilers: - if re.match(pattern, platform) is not None or \ - re.match(pattern, osname) is not None: - return compiler - # Default to Unix compiler - return 'unix' - -# Map compiler types to (module_name, class_name) pairs -- ie. where to -# find the code that implements an interface to this compiler. (The module -# is assumed to be in the 'distutils' package.) -compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', - "standard UNIX-style compiler"), - 'msvc': ('_msvccompiler', 'MSVCCompiler', - "Microsoft Visual C++"), - 'cygwin': ('cygwinccompiler', 'CygwinCCompiler', - "Cygwin port of GNU C Compiler for Win32"), - 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"), - 'bcpp': ('bcppcompiler', 'BCPPCompiler', - "Borland C++ Compiler"), - } - -def show_compilers(): - """Print list of available compilers (used by the "--help-compiler" - options to "build", "build_ext", "build_clib"). - """ - # XXX this "knows" that the compiler option it's describing is - # "--compiler", which just happens to be the case for the three - # commands that use it. - from distutils.fancy_getopt import FancyGetopt - compilers = [] - for compiler in compiler_class.keys(): - compilers.append(("compiler="+compiler, None, - compiler_class[compiler][2])) - compilers.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("List of available compilers:") - - -def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): - """Generate an instance of some CCompiler subclass for the supplied - platform/compiler combination. 'plat' defaults to 'os.name' - (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler - for that platform. Currently only 'posix' and 'nt' are supported, and - the default compilers are "traditional Unix interface" (UnixCCompiler - class) and Visual C++ (MSVCCompiler class). Note that it's perfectly - possible to ask for a Unix compiler object under Windows, and a - Microsoft compiler object under Unix -- if you supply a value for - 'compiler', 'plat' is ignored. - """ - if plat is None: - plat = os.name - - try: - if compiler is None: - compiler = get_default_compiler(plat) - - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - - try: - module_name = "distutils." + module_name - __import__ (module_name) - module = sys.modules[module_name] - klass = vars(module)[class_name] - except ImportError: - raise DistutilsModuleError( - "can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - except KeyError: - raise DistutilsModuleError( - "can't compile C/C++ code: unable to find class '%s' " - "in module '%s'" % (class_name, module_name)) - - # XXX The None is necessary to preserve backwards compatibility - # with classes that expect verbose to be the first positional - # argument. - return klass(None, dry_run, force) - - -def gen_preprocess_options(macros, include_dirs): - """Generate C pre-processor options (-D, -U, -I) as used by at least - two types of compilers: the typical Unix compiler and Visual C++. - 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) - means undefine (-U) macro 'name', and (name,value) means define (-D) - macro 'name' to 'value'. 'include_dirs' is just a list of directory - names to be added to the header file search path (-I). Returns a list - of command-line options suitable for either Unix compilers or Visual - C++. - """ - # XXX it would be nice (mainly aesthetic, and so we don't generate - # stupid-looking command lines) to go over 'macros' and eliminate - # redundant definitions/undefinitions (ie. ensure that only the - # latest mention of a particular macro winds up on the command - # line). I don't think it's essential, though, since most (all?) - # Unix C compilers only pay attention to the latest -D or -U - # mention of a macro on their command line. Similar situation for - # 'include_dirs'. I'm punting on both for now. Anyways, weeding out - # redundancies like this should probably be the province of - # CCompiler, since the data structures used are inherited from it - # and therefore common to all CCompiler classes. - pp_opts = [] - for macro in macros: - if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2): - raise TypeError( - "bad macro definition '%s': " - "each element of 'macros' list must be a 1- or 2-tuple" - % macro) - - if len(macro) == 1: # undefine this macro - pp_opts.append("-U%s" % macro[0]) - elif len(macro) == 2: - if macro[1] is None: # define with no explicit value - pp_opts.append("-D%s" % macro[0]) - else: - # XXX *don't* need to be clever about quoting the - # macro value here, because we're going to avoid the - # shell at all costs when we spawn the command! - pp_opts.append("-D%s=%s" % macro) - - for dir in include_dirs: - pp_opts.append("-I%s" % dir) - return pp_opts - - -def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries): - """Generate linker options for searching library directories and - linking with specific libraries. 'libraries' and 'library_dirs' are, - respectively, lists of library names (not filenames!) and search - directories. Returns a list of command-line options suitable for use - with some compiler (depending on the two format strings passed in). - """ - lib_opts = [] - - for dir in library_dirs: - lib_opts.append(compiler.library_dir_option(dir)) - - for dir in runtime_library_dirs: - opt = compiler.runtime_library_dir_option(dir) - if isinstance(opt, list): - lib_opts = lib_opts + opt - else: - lib_opts.append(opt) - - # XXX it's important that we *not* remove redundant library mentions! - # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to - # resolve all symbols. I just hope we never have to say "-lfoo obj.o - # -lbar" to get things to work -- that's certainly a possibility, but a - # pretty nasty way to arrange your C code. - - for lib in libraries: - (lib_dir, lib_name) = os.path.split(lib) - if lib_dir: - lib_file = compiler.find_library_file([lib_dir], lib_name) - if lib_file: - lib_opts.append(lib_file) - else: - compiler.warn("no library file corresponding to " - "'%s' found (skipping)" % lib) - else: - lib_opts.append(compiler.library_option (lib)) - return lib_opts diff --git a/Lib/distutils/cmd.py b/Lib/distutils/cmd.py deleted file mode 100644 index dba3191e58474c9..000000000000000 --- a/Lib/distutils/cmd.py +++ /dev/null @@ -1,403 +0,0 @@ -"""distutils.cmd - -Provides the Command class, the base class for the command classes -in the distutils.command package. -""" - -import sys, os, re -from distutils.errors import DistutilsOptionError -from distutils import util, dir_util, file_util, archive_util, dep_util -from distutils import log - -class Command: - """Abstract base class for defining command classes, the "worker bees" - of the Distutils. A useful analogy for command classes is to think of - them as subroutines with local variables called "options". The options - are "declared" in 'initialize_options()' and "defined" (given their - final values, aka "finalized") in 'finalize_options()', both of which - must be defined by every command class. The distinction between the - two is necessary because option values might come from the outside - world (command line, config file, ...), and any options dependent on - other options must be computed *after* these outside influences have - been processed -- hence 'finalize_options()'. The "body" of the - subroutine, where it does all its work based on the values of its - options, is the 'run()' method, which must also be implemented by every - command class. - """ - - # 'sub_commands' formalizes the notion of a "family" of commands, - # eg. "install" as the parent with sub-commands "install_lib", - # "install_headers", etc. The parent of a family of commands - # defines 'sub_commands' as a class attribute; it's a list of - # (command_name : string, predicate : unbound_method | string | None) - # tuples, where 'predicate' is a method of the parent command that - # determines whether the corresponding command is applicable in the - # current situation. (Eg. we "install_headers" is only applicable if - # we have any C header files to install.) If 'predicate' is None, - # that command is always applicable. - # - # 'sub_commands' is usually defined at the *end* of a class, because - # predicates can be unbound methods, so they must already have been - # defined. The canonical example is the "install" command. - sub_commands = [] - - - # -- Creation/initialization methods ------------------------------- - - def __init__(self, dist): - """Create and initialize a new Command object. Most importantly, - invokes the 'initialize_options()' method, which is the real - initializer and depends on the actual command being - instantiated. - """ - # late import because of mutual dependence between these classes - from distutils.dist import Distribution - - if not isinstance(dist, Distribution): - raise TypeError("dist must be a Distribution instance") - if self.__class__ is Command: - raise RuntimeError("Command is an abstract class") - - self.distribution = dist - self.initialize_options() - - # Per-command versions of the global flags, so that the user can - # customize Distutils' behaviour command-by-command and let some - # commands fall back on the Distribution's behaviour. None means - # "not defined, check self.distribution's copy", while 0 or 1 mean - # false and true (duh). Note that this means figuring out the real - # value of each flag is a touch complicated -- hence "self._dry_run" - # will be handled by __getattr__, below. - # XXX This needs to be fixed. - self._dry_run = None - - # verbose is largely ignored, but needs to be set for - # backwards compatibility (I think)? - self.verbose = dist.verbose - - # Some commands define a 'self.force' option to ignore file - # timestamps, but methods defined *here* assume that - # 'self.force' exists for all commands. So define it here - # just to be safe. - self.force = None - - # The 'help' flag is just used for command-line parsing, so - # none of that complicated bureaucracy is needed. - self.help = 0 - - # 'finalized' records whether or not 'finalize_options()' has been - # called. 'finalize_options()' itself should not pay attention to - # this flag: it is the business of 'ensure_finalized()', which - # always calls 'finalize_options()', to respect/update it. - self.finalized = 0 - - # XXX A more explicit way to customize dry_run would be better. - def __getattr__(self, attr): - if attr == 'dry_run': - myval = getattr(self, "_" + attr) - if myval is None: - return getattr(self.distribution, attr) - else: - return myval - else: - raise AttributeError(attr) - - def ensure_finalized(self): - if not self.finalized: - self.finalize_options() - self.finalized = 1 - - # Subclasses must define: - # initialize_options() - # provide default values for all options; may be customized by - # setup script, by options from config file(s), or by command-line - # options - # finalize_options() - # decide on the final values for all options; this is called - # after all possible intervention from the outside world - # (command-line, option file, etc.) has been processed - # run() - # run the command: do whatever it is we're here to do, - # controlled by the command's various option values - - def initialize_options(self): - """Set default values for all the options that this command - supports. Note that these defaults may be overridden by other - commands, by the setup script, by config files, or by the - command-line. Thus, this is not the place to code dependencies - between options; generally, 'initialize_options()' implementations - are just a bunch of "self.foo = None" assignments. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - def finalize_options(self): - """Set final values for all the options that this command supports. - This is always called as late as possible, ie. after any option - assignments from the command-line or from other commands have been - done. Thus, this is the place to code option dependencies: if - 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as - long as 'foo' still has the same value it was assigned in - 'initialize_options()'. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - - def dump_options(self, header=None, indent=""): - from distutils.fancy_getopt import longopt_xlate - if header is None: - header = "command options for '%s':" % self.get_command_name() - self.announce(indent + header, level=log.INFO) - indent = indent + " " - for (option, _, _) in self.user_options: - option = option.translate(longopt_xlate) - if option[-1] == "=": - option = option[:-1] - value = getattr(self, option) - self.announce(indent + "%s = %s" % (option, value), - level=log.INFO) - - def run(self): - """A command's raison d'etre: carry out the action it exists to - perform, controlled by the options initialized in - 'initialize_options()', customized by other commands, the setup - script, the command-line, and config files, and finalized in - 'finalize_options()'. All terminal output and filesystem - interaction should be done by 'run()'. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - def announce(self, msg, level=1): - """If the current verbosity level is of greater than or equal to - 'level' print 'msg' to stdout. - """ - log.log(level, msg) - - def debug_print(self, msg): - """Print 'msg' to stdout if the global DEBUG (taken from the - DISTUTILS_DEBUG environment variable) flag is true. - """ - from distutils.debug import DEBUG - if DEBUG: - print(msg) - sys.stdout.flush() - - - # -- Option validation methods ------------------------------------- - # (these are very handy in writing the 'finalize_options()' method) - # - # NB. the general philosophy here is to ensure that a particular option - # value meets certain type and value constraints. If not, we try to - # force it into conformance (eg. if we expect a list but have a string, - # split the string on comma and/or whitespace). If we can't force the - # option into conformance, raise DistutilsOptionError. Thus, command - # classes need do nothing more than (eg.) - # self.ensure_string_list('foo') - # and they can be guaranteed that thereafter, self.foo will be - # a list of strings. - - def _ensure_stringlike(self, option, what, default=None): - val = getattr(self, option) - if val is None: - setattr(self, option, default) - return default - elif not isinstance(val, str): - raise DistutilsOptionError("'%s' must be a %s (got `%s`)" - % (option, what, val)) - return val - - def ensure_string(self, option, default=None): - """Ensure that 'option' is a string; if not defined, set it to - 'default'. - """ - self._ensure_stringlike(option, "string", default) - - def ensure_string_list(self, option): - r"""Ensure that 'option' is a list of strings. If 'option' is - currently a string, we split it either on /,\s*/ or /\s+/, so - "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become - ["foo", "bar", "baz"]. - """ - val = getattr(self, option) - if val is None: - return - elif isinstance(val, str): - setattr(self, option, re.split(r',\s*|\s+', val)) - else: - if isinstance(val, list): - ok = all(isinstance(v, str) for v in val) - else: - ok = False - if not ok: - raise DistutilsOptionError( - "'%s' must be a list of strings (got %r)" - % (option, val)) - - def _ensure_tested_string(self, option, tester, what, error_fmt, - default=None): - val = self._ensure_stringlike(option, what, default) - if val is not None and not tester(val): - raise DistutilsOptionError(("error in '%s' option: " + error_fmt) - % (option, val)) - - def ensure_filename(self, option): - """Ensure that 'option' is the name of an existing file.""" - self._ensure_tested_string(option, os.path.isfile, - "filename", - "'%s' does not exist or is not a file") - - def ensure_dirname(self, option): - self._ensure_tested_string(option, os.path.isdir, - "directory name", - "'%s' does not exist or is not a directory") - - - # -- Convenience methods for commands ------------------------------ - - def get_command_name(self): - if hasattr(self, 'command_name'): - return self.command_name - else: - return self.__class__.__name__ - - def set_undefined_options(self, src_cmd, *option_pairs): - """Set the values of any "undefined" options from corresponding - option values in some other command object. "Undefined" here means - "is None", which is the convention used to indicate that an option - has not been changed between 'initialize_options()' and - 'finalize_options()'. Usually called from 'finalize_options()' for - options that depend on some other command rather than another - option of the same command. 'src_cmd' is the other command from - which option values will be taken (a command object will be created - for it if necessary); the remaining arguments are - '(src_option,dst_option)' tuples which mean "take the value of - 'src_option' in the 'src_cmd' command object, and copy it to - 'dst_option' in the current command object". - """ - # Option_pairs: list of (src_option, dst_option) tuples - src_cmd_obj = self.distribution.get_command_obj(src_cmd) - src_cmd_obj.ensure_finalized() - for (src_option, dst_option) in option_pairs: - if getattr(self, dst_option) is None: - setattr(self, dst_option, getattr(src_cmd_obj, src_option)) - - def get_finalized_command(self, command, create=1): - """Wrapper around Distribution's 'get_command_obj()' method: find - (create if necessary and 'create' is true) the command object for - 'command', call its 'ensure_finalized()' method, and return the - finalized command object. - """ - cmd_obj = self.distribution.get_command_obj(command, create) - cmd_obj.ensure_finalized() - return cmd_obj - - # XXX rename to 'get_reinitialized_command()'? (should do the - # same in dist.py, if so) - def reinitialize_command(self, command, reinit_subcommands=0): - return self.distribution.reinitialize_command(command, - reinit_subcommands) - - def run_command(self, command): - """Run some other command: uses the 'run_command()' method of - Distribution, which creates and finalizes the command object if - necessary and then invokes its 'run()' method. - """ - self.distribution.run_command(command) - - def get_sub_commands(self): - """Determine the sub-commands that are relevant in the current - distribution (ie., that need to be run). This is based on the - 'sub_commands' class attribute: each tuple in that list may include - a method that we call to determine if the subcommand needs to be - run for the current distribution. Return a list of command names. - """ - commands = [] - for (cmd_name, method) in self.sub_commands: - if method is None or method(self): - commands.append(cmd_name) - return commands - - - # -- External world manipulation ----------------------------------- - - def warn(self, msg): - log.warn("warning: %s: %s\n", self.get_command_name(), msg) - - def execute(self, func, args, msg=None, level=1): - util.execute(func, args, msg, dry_run=self.dry_run) - - def mkpath(self, name, mode=0o777): - dir_util.mkpath(name, mode, dry_run=self.dry_run) - - def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, - link=None, level=1): - """Copy a file respecting verbose, dry-run and force flags. (The - former two default to whatever is in the Distribution object, and - the latter defaults to false for commands that don't define it.)""" - return file_util.copy_file(infile, outfile, preserve_mode, - preserve_times, not self.force, link, - dry_run=self.dry_run) - - def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, - preserve_symlinks=0, level=1): - """Copy an entire directory tree respecting verbose, dry-run, - and force flags. - """ - return dir_util.copy_tree(infile, outfile, preserve_mode, - preserve_times, preserve_symlinks, - not self.force, dry_run=self.dry_run) - - def move_file (self, src, dst, level=1): - """Move a file respecting dry-run flag.""" - return file_util.move_file(src, dst, dry_run=self.dry_run) - - def spawn(self, cmd, search_path=1, level=1): - """Spawn an external command respecting dry-run flag.""" - from distutils.spawn import spawn - spawn(cmd, search_path, dry_run=self.dry_run) - - def make_archive(self, base_name, format, root_dir=None, base_dir=None, - owner=None, group=None): - return archive_util.make_archive(base_name, format, root_dir, base_dir, - dry_run=self.dry_run, - owner=owner, group=group) - - def make_file(self, infiles, outfile, func, args, - exec_msg=None, skip_msg=None, level=1): - """Special case of 'execute()' for operations that process one or - more input files and generate one output file. Works just like - 'execute()', except the operation is skipped and a different - message printed if 'outfile' already exists and is newer than all - files listed in 'infiles'. If the command defined 'self.force', - and it is true, then the command is unconditionally run -- does no - timestamp checks. - """ - if skip_msg is None: - skip_msg = "skipping %s (inputs unchanged)" % outfile - - # Allow 'infiles' to be a single string - if isinstance(infiles, str): - infiles = (infiles,) - elif not isinstance(infiles, (list, tuple)): - raise TypeError( - "'infiles' must be a string, or a list or tuple of strings") - - if exec_msg is None: - exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles)) - - # If 'outfile' must be regenerated (either because it doesn't - # exist, is out-of-date, or the 'force' flag is true) then - # perform the action that presumably regenerates it - if self.force or dep_util.newer_group(infiles, outfile): - self.execute(func, args, exec_msg, level) - # Otherwise, print the "skip" message - else: - log.debug(skip_msg) diff --git a/Lib/distutils/command/__init__.py b/Lib/distutils/command/__init__.py deleted file mode 100644 index fd0bfae7ade6ff0..000000000000000 --- a/Lib/distutils/command/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands.""" - -__all__ = ['build', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'clean', - 'install', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - 'sdist', - 'register', - 'bdist', - 'bdist_dumb', - 'bdist_rpm', - 'check', - 'upload', - # These two are reserved for future use: - #'bdist_sdux', - #'bdist_pkgtool', - # Note: - # bdist_packager is not included because it only provides - # an abstract base class - ] diff --git a/Lib/distutils/command/bdist.py b/Lib/distutils/command/bdist.py deleted file mode 100644 index 60309e1ff2fce3a..000000000000000 --- a/Lib/distutils/command/bdist.py +++ /dev/null @@ -1,138 +0,0 @@ -"""distutils.command.bdist - -Implements the Distutils 'bdist' command (create a built [binary] -distribution).""" - -import os -from distutils.core import Command -from distutils.errors import * -from distutils.util import get_platform - - -def show_formats(): - """Print list of available formats (arguments to "--format" option). - """ - from distutils.fancy_getopt import FancyGetopt - formats = [] - for format in bdist.format_commands: - formats.append(("formats=" + format, None, - bdist.format_command[format][1])) - pretty_printer = FancyGetopt(formats) - pretty_printer.print_help("List of available distribution formats:") - - -class bdist(Command): - - description = "create a built (binary) distribution" - - user_options = [('bdist-base=', 'b', - "temporary directory for creating built distributions"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('formats=', None, - "formats for distribution (comma-separated list)"), - ('dist-dir=', 'd', - "directory to put final built distributions in " - "[default: dist]"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('owner=', 'u', - "Owner name used when creating a tar file" - " [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file" - " [default: current group]"), - ] - - boolean_options = ['skip-build'] - - help_options = [ - ('help-formats', None, - "lists available distribution formats", show_formats), - ] - - # The following commands do not take a format option from bdist - no_format_option = ('bdist_rpm',) - - # This won't do in reality: will need to distinguish RPM-ish Linux, - # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. - default_format = {'posix': 'gztar', - 'nt': 'zip'} - - # Establish the preferred order (for the --help-formats option). - format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar', 'zip'] - - # And the real information. - format_command = {'rpm': ('bdist_rpm', "RPM distribution"), - 'gztar': ('bdist_dumb', "gzip'ed tar file"), - 'bztar': ('bdist_dumb', "bzip2'ed tar file"), - 'xztar': ('bdist_dumb', "xz'ed tar file"), - 'ztar': ('bdist_dumb', "compressed tar file"), - 'tar': ('bdist_dumb', "tar file"), - 'zip': ('bdist_dumb', "ZIP file"), - } - - def initialize_options(self): - self.bdist_base = None - self.plat_name = None - self.formats = None - self.dist_dir = None - self.skip_build = 0 - self.group = None - self.owner = None - - def finalize_options(self): - # have to finalize 'plat_name' before 'bdist_base' - if self.plat_name is None: - if self.skip_build: - self.plat_name = get_platform() - else: - self.plat_name = self.get_finalized_command('build').plat_name - - # 'bdist_base' -- parent of per-built-distribution-format - # temporary directories (eg. we'll probably have - # "build/bdist./dumb", "build/bdist./rpm", etc.) - if self.bdist_base is None: - build_base = self.get_finalized_command('build').build_base - self.bdist_base = os.path.join(build_base, - 'bdist.' + self.plat_name) - - self.ensure_string_list('formats') - if self.formats is None: - try: - self.formats = [self.default_format[os.name]] - except KeyError: - raise DistutilsPlatformError( - "don't know how to create built distributions " - "on platform %s" % os.name) - - if self.dist_dir is None: - self.dist_dir = "dist" - - def run(self): - # Figure out which sub-commands we need to run. - commands = [] - for format in self.formats: - try: - commands.append(self.format_command[format][0]) - except KeyError: - raise DistutilsOptionError("invalid format '%s'" % format) - - # Reinitialize and run each command. - for i in range(len(self.formats)): - cmd_name = commands[i] - sub_cmd = self.reinitialize_command(cmd_name) - if cmd_name not in self.no_format_option: - sub_cmd.format = self.formats[i] - - # passing the owner and group names for tar archiving - if cmd_name == 'bdist_dumb': - sub_cmd.owner = self.owner - sub_cmd.group = self.group - - # If we're going to need to run this command again, tell it to - # keep its temporary files around so subsequent runs go faster. - if cmd_name in commands[i+1:]: - sub_cmd.keep_temp = 1 - self.run_command(cmd_name) diff --git a/Lib/distutils/command/bdist_dumb.py b/Lib/distutils/command/bdist_dumb.py deleted file mode 100644 index f0d6b5b8cd8ab3c..000000000000000 --- a/Lib/distutils/command/bdist_dumb.py +++ /dev/null @@ -1,123 +0,0 @@ -"""distutils.command.bdist_dumb - -Implements the Distutils 'bdist_dumb' command (create a "dumb" built -distribution -- i.e., just an archive to be unpacked under $prefix or -$exec_prefix).""" - -import os -from distutils.core import Command -from distutils.util import get_platform -from distutils.dir_util import remove_tree, ensure_relative -from distutils.errors import * -from distutils.sysconfig import get_python_version -from distutils import log - -class bdist_dumb(Command): - - description = "create a \"dumb\" built distribution" - - user_options = [('bdist-dir=', 'd', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('format=', 'f', - "archive format to create (tar, gztar, bztar, xztar, " - "ztar, zip)"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('relative', None, - "build the archive using relative paths " - "(default: false)"), - ('owner=', 'u', - "Owner name used when creating a tar file" - " [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file" - " [default: current group]"), - ] - - boolean_options = ['keep-temp', 'skip-build', 'relative'] - - default_format = { 'posix': 'gztar', - 'nt': 'zip' } - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.format = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = None - self.relative = 0 - self.owner = None - self.group = None - - def finalize_options(self): - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'dumb') - - if self.format is None: - try: - self.format = self.default_format[os.name] - except KeyError: - raise DistutilsPlatformError( - "don't know how to create dumb built distributions " - "on platform %s" % os.name) - - self.set_undefined_options('bdist', - ('dist_dir', 'dist_dir'), - ('plat_name', 'plat_name'), - ('skip_build', 'skip_build')) - - def run(self): - if not self.skip_build: - self.run_command('build') - - install = self.reinitialize_command('install', reinit_subcommands=1) - install.root = self.bdist_dir - install.skip_build = self.skip_build - install.warn_dir = 0 - - log.info("installing to %s", self.bdist_dir) - self.run_command('install') - - # And make an archive relative to the root of the - # pseudo-installation tree. - archive_basename = "%s.%s" % (self.distribution.get_fullname(), - self.plat_name) - - pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) - if not self.relative: - archive_root = self.bdist_dir - else: - if (self.distribution.has_ext_modules() and - (install.install_base != install.install_platbase)): - raise DistutilsPlatformError( - "can't make a dumb built distribution where " - "base and platbase are different (%s, %s)" - % (repr(install.install_base), - repr(install.install_platbase))) - else: - archive_root = os.path.join(self.bdist_dir, - ensure_relative(install.install_base)) - - # Make the archive - filename = self.make_archive(pseudoinstall_root, - self.format, root_dir=archive_root, - owner=self.owner, group=self.group) - if self.distribution.has_ext_modules(): - pyversion = get_python_version() - else: - pyversion = 'any' - self.distribution.dist_files.append(('bdist_dumb', pyversion, - filename)) - - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) diff --git a/Lib/distutils/command/bdist_rpm.py b/Lib/distutils/command/bdist_rpm.py deleted file mode 100644 index 550cbfa1e28a237..000000000000000 --- a/Lib/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,579 +0,0 @@ -"""distutils.command.bdist_rpm - -Implements the Distutils 'bdist_rpm' command (create RPM source and binary -distributions).""" - -import subprocess, sys, os -from distutils.core import Command -from distutils.debug import DEBUG -from distutils.file_util import write_file -from distutils.errors import * -from distutils.sysconfig import get_python_version -from distutils import log - -class bdist_rpm(Command): - - description = "create an RPM distribution" - - user_options = [ - ('bdist-base=', None, - "base directory for creating built distributions"), - ('rpm-base=', None, - "base directory for creating RPMs (defaults to \"rpm\" under " - "--bdist-base; must be specified for RPM 2)"), - ('dist-dir=', 'd', - "directory to put final RPM files in " - "(and .spec files if --spec-only)"), - ('python=', None, - "path to Python interpreter to hard-code in the .spec file " - "(default: \"python\")"), - ('fix-python', None, - "hard-code the exact path to the current Python interpreter in " - "the .spec file"), - ('spec-only', None, - "only regenerate spec file"), - ('source-only', None, - "only generate source RPM"), - ('binary-only', None, - "only generate binary RPM"), - ('use-bzip2', None, - "use bzip2 instead of gzip to create source distribution"), - - # More meta-data: too RPM-specific to put in the setup script, - # but needs to go in the .spec file -- so we make these options - # to "bdist_rpm". The idea is that packagers would put this - # info in setup.cfg, although they are of course free to - # supply it on the command line. - ('distribution-name=', None, - "name of the (Linux) distribution to which this " - "RPM applies (*not* the name of the module distribution!)"), - ('group=', None, - "package classification [default: \"Development/Libraries\"]"), - ('release=', None, - "RPM release number"), - ('serial=', None, - "RPM serial number"), - ('vendor=', None, - "RPM \"vendor\" (eg. \"Joe Blow \") " - "[default: maintainer or author from setup script]"), - ('packager=', None, - "RPM packager (eg. \"Jane Doe \") " - "[default: vendor]"), - ('doc-files=', None, - "list of documentation files (space or comma-separated)"), - ('changelog=', None, - "RPM changelog"), - ('icon=', None, - "name of icon file"), - ('provides=', None, - "capabilities provided by this package"), - ('requires=', None, - "capabilities required by this package"), - ('conflicts=', None, - "capabilities which conflict with this package"), - ('build-requires=', None, - "capabilities required to build this package"), - ('obsoletes=', None, - "capabilities made obsolete by this package"), - ('no-autoreq', None, - "do not automatically calculate dependencies"), - - # Actions to take when building RPM - ('keep-temp', 'k', - "don't clean up RPM build directory"), - ('no-keep-temp', None, - "clean up RPM build directory [default]"), - ('use-rpm-opt-flags', None, - "compile with RPM_OPT_FLAGS when building from source RPM"), - ('no-rpm-opt-flags', None, - "do not pass any RPM CFLAGS to compiler"), - ('rpm3-mode', None, - "RPM 3 compatibility mode (default)"), - ('rpm2-mode', None, - "RPM 2 compatibility mode"), - - # Add the hooks necessary for specifying custom scripts - ('prep-script=', None, - "Specify a script for the PREP phase of RPM building"), - ('build-script=', None, - "Specify a script for the BUILD phase of RPM building"), - - ('pre-install=', None, - "Specify a script for the pre-INSTALL phase of RPM building"), - ('install-script=', None, - "Specify a script for the INSTALL phase of RPM building"), - ('post-install=', None, - "Specify a script for the post-INSTALL phase of RPM building"), - - ('pre-uninstall=', None, - "Specify a script for the pre-UNINSTALL phase of RPM building"), - ('post-uninstall=', None, - "Specify a script for the post-UNINSTALL phase of RPM building"), - - ('clean-script=', None, - "Specify a script for the CLEAN phase of RPM building"), - - ('verify-script=', None, - "Specify a script for the VERIFY phase of the RPM build"), - - # Allow a packager to explicitly force an architecture - ('force-arch=', None, - "Force an architecture onto the RPM build process"), - - ('quiet', 'q', - "Run the INSTALL phase of RPM building in quiet mode"), - ] - - boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode', - 'no-autoreq', 'quiet'] - - negative_opt = {'no-keep-temp': 'keep-temp', - 'no-rpm-opt-flags': 'use-rpm-opt-flags', - 'rpm2-mode': 'rpm3-mode'} - - - def initialize_options(self): - self.bdist_base = None - self.rpm_base = None - self.dist_dir = None - self.python = None - self.fix_python = None - self.spec_only = None - self.binary_only = None - self.source_only = None - self.use_bzip2 = None - - self.distribution_name = None - self.group = None - self.release = None - self.serial = None - self.vendor = None - self.packager = None - self.doc_files = None - self.changelog = None - self.icon = None - - self.prep_script = None - self.build_script = None - self.install_script = None - self.clean_script = None - self.verify_script = None - self.pre_install = None - self.post_install = None - self.pre_uninstall = None - self.post_uninstall = None - self.prep = None - self.provides = None - self.requires = None - self.conflicts = None - self.build_requires = None - self.obsoletes = None - - self.keep_temp = 0 - self.use_rpm_opt_flags = 1 - self.rpm3_mode = 1 - self.no_autoreq = 0 - - self.force_arch = None - self.quiet = 0 - - def finalize_options(self): - self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) - if self.rpm_base is None: - if not self.rpm3_mode: - raise DistutilsOptionError( - "you must specify --rpm-base in RPM 2 mode") - self.rpm_base = os.path.join(self.bdist_base, "rpm") - - if self.python is None: - if self.fix_python: - self.python = sys.executable - else: - self.python = "python3" - elif self.fix_python: - raise DistutilsOptionError( - "--python and --fix-python are mutually exclusive options") - - if os.name != 'posix': - raise DistutilsPlatformError("don't know how to create RPM " - "distributions on platform %s" % os.name) - if self.binary_only and self.source_only: - raise DistutilsOptionError( - "cannot supply both '--source-only' and '--binary-only'") - - # don't pass CFLAGS to pure python distributions - if not self.distribution.has_ext_modules(): - self.use_rpm_opt_flags = 0 - - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - self.finalize_package_data() - - def finalize_package_data(self): - self.ensure_string('group', "Development/Libraries") - self.ensure_string('vendor', - "%s <%s>" % (self.distribution.get_contact(), - self.distribution.get_contact_email())) - self.ensure_string('packager') - self.ensure_string_list('doc_files') - if isinstance(self.doc_files, list): - for readme in ('README', 'README.txt'): - if os.path.exists(readme) and readme not in self.doc_files: - self.doc_files.append(readme) - - self.ensure_string('release', "1") - self.ensure_string('serial') # should it be an int? - - self.ensure_string('distribution_name') - - self.ensure_string('changelog') - # Format changelog correctly - self.changelog = self._format_changelog(self.changelog) - - self.ensure_filename('icon') - - self.ensure_filename('prep_script') - self.ensure_filename('build_script') - self.ensure_filename('install_script') - self.ensure_filename('clean_script') - self.ensure_filename('verify_script') - self.ensure_filename('pre_install') - self.ensure_filename('post_install') - self.ensure_filename('pre_uninstall') - self.ensure_filename('post_uninstall') - - # XXX don't forget we punted on summaries and descriptions -- they - # should be handled here eventually! - - # Now *this* is some meta-data that belongs in the setup script... - self.ensure_string_list('provides') - self.ensure_string_list('requires') - self.ensure_string_list('conflicts') - self.ensure_string_list('build_requires') - self.ensure_string_list('obsoletes') - - self.ensure_string('force_arch') - - def run(self): - if DEBUG: - print("before _get_package_data():") - print("vendor =", self.vendor) - print("packager =", self.packager) - print("doc_files =", self.doc_files) - print("changelog =", self.changelog) - - # make directories - if self.spec_only: - spec_dir = self.dist_dir - self.mkpath(spec_dir) - else: - rpm_dir = {} - for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'): - rpm_dir[d] = os.path.join(self.rpm_base, d) - self.mkpath(rpm_dir[d]) - spec_dir = rpm_dir['SPECS'] - - # Spec file goes into 'dist_dir' if '--spec-only specified', - # build/rpm. otherwise. - spec_path = os.path.join(spec_dir, - "%s.spec" % self.distribution.get_name()) - self.execute(write_file, - (spec_path, - self._make_spec_file()), - "writing '%s'" % spec_path) - - if self.spec_only: # stop if requested - return - - # Make a source distribution and copy to SOURCES directory with - # optional icon. - saved_dist_files = self.distribution.dist_files[:] - sdist = self.reinitialize_command('sdist') - if self.use_bzip2: - sdist.formats = ['bztar'] - else: - sdist.formats = ['gztar'] - self.run_command('sdist') - self.distribution.dist_files = saved_dist_files - - source = sdist.get_archive_files()[0] - source_dir = rpm_dir['SOURCES'] - self.copy_file(source, source_dir) - - if self.icon: - if os.path.exists(self.icon): - self.copy_file(self.icon, source_dir) - else: - raise DistutilsFileError( - "icon file '%s' does not exist" % self.icon) - - # build package - log.info("building RPMs") - rpm_cmd = ['rpmbuild'] - - if self.source_only: # what kind of RPMs? - rpm_cmd.append('-bs') - elif self.binary_only: - rpm_cmd.append('-bb') - else: - rpm_cmd.append('-ba') - rpm_cmd.extend(['--define', '__python %s' % self.python]) - if self.rpm3_mode: - rpm_cmd.extend(['--define', - '_topdir %s' % os.path.abspath(self.rpm_base)]) - if not self.keep_temp: - rpm_cmd.append('--clean') - - if self.quiet: - rpm_cmd.append('--quiet') - - rpm_cmd.append(spec_path) - # Determine the binary rpm names that should be built out of this spec - # file - # Note that some of these may not be really built (if the file - # list is empty) - nvr_string = "%{name}-%{version}-%{release}" - src_rpm = nvr_string + ".src.rpm" - non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm" - q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % ( - src_rpm, non_src_rpm, spec_path) - - out = os.popen(q_cmd) - try: - binary_rpms = [] - source_rpm = None - while True: - line = out.readline() - if not line: - break - l = line.strip().split() - assert(len(l) == 2) - binary_rpms.append(l[1]) - # The source rpm is named after the first entry in the spec file - if source_rpm is None: - source_rpm = l[0] - - status = out.close() - if status: - raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd)) - - finally: - out.close() - - self.spawn(rpm_cmd) - - if not self.dry_run: - if self.distribution.has_ext_modules(): - pyversion = get_python_version() - else: - pyversion = 'any' - - if not self.binary_only: - srpm = os.path.join(rpm_dir['SRPMS'], source_rpm) - assert(os.path.exists(srpm)) - self.move_file(srpm, self.dist_dir) - filename = os.path.join(self.dist_dir, source_rpm) - self.distribution.dist_files.append( - ('bdist_rpm', pyversion, filename)) - - if not self.source_only: - for rpm in binary_rpms: - rpm = os.path.join(rpm_dir['RPMS'], rpm) - if os.path.exists(rpm): - self.move_file(rpm, self.dist_dir) - filename = os.path.join(self.dist_dir, - os.path.basename(rpm)) - self.distribution.dist_files.append( - ('bdist_rpm', pyversion, filename)) - - def _dist_path(self, path): - return os.path.join(self.dist_dir, os.path.basename(path)) - - def _make_spec_file(self): - """Generate the text of an RPM spec file and return it as a - list of strings (one per line). - """ - # definitions and headers - spec_file = [ - '%define name ' + self.distribution.get_name(), - '%define version ' + self.distribution.get_version().replace('-','_'), - '%define unmangled_version ' + self.distribution.get_version(), - '%define release ' + self.release.replace('-','_'), - '', - 'Summary: ' + self.distribution.get_description(), - ] - - # Workaround for #14443 which affects some RPM based systems such as - # RHEL6 (and probably derivatives) - vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') - # Generate a potential replacement value for __os_install_post (whilst - # normalizing the whitespace to simplify the test for whether the - # invocation of brp-python-bytecompile passes in __python): - vendor_hook = '\n'.join([' %s \\' % line.strip() - for line in vendor_hook.splitlines()]) - problem = "brp-python-bytecompile \\\n" - fixed = "brp-python-bytecompile %{__python} \\\n" - fixed_hook = vendor_hook.replace(problem, fixed) - if fixed_hook != vendor_hook: - spec_file.append('# Workaround for http://bugs.python.org/issue14443') - spec_file.append('%define __os_install_post ' + fixed_hook + '\n') - - # put locale summaries into spec file - # XXX not supported for now (hard to put a dictionary - # in a config file -- arg!) - #for locale in self.summaries.keys(): - # spec_file.append('Summary(%s): %s' % (locale, - # self.summaries[locale])) - - spec_file.extend([ - 'Name: %{name}', - 'Version: %{version}', - 'Release: %{release}',]) - - # XXX yuck! this filename is available from the "sdist" command, - # but only after it has run: and we create the spec file before - # running "sdist", in case of --spec-only. - if self.use_bzip2: - spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2') - else: - spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz') - - spec_file.extend([ - 'License: ' + self.distribution.get_license(), - 'Group: ' + self.group, - 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', - 'Prefix: %{_prefix}', ]) - - if not self.force_arch: - # noarch if no extension modules - if not self.distribution.has_ext_modules(): - spec_file.append('BuildArch: noarch') - else: - spec_file.append( 'BuildArch: %s' % self.force_arch ) - - for field in ('Vendor', - 'Packager', - 'Provides', - 'Requires', - 'Conflicts', - 'Obsoletes', - ): - val = getattr(self, field.lower()) - if isinstance(val, list): - spec_file.append('%s: %s' % (field, ' '.join(val))) - elif val is not None: - spec_file.append('%s: %s' % (field, val)) - - - if self.distribution.get_url() != 'UNKNOWN': - spec_file.append('Url: ' + self.distribution.get_url()) - - if self.distribution_name: - spec_file.append('Distribution: ' + self.distribution_name) - - if self.build_requires: - spec_file.append('BuildRequires: ' + - ' '.join(self.build_requires)) - - if self.icon: - spec_file.append('Icon: ' + os.path.basename(self.icon)) - - if self.no_autoreq: - spec_file.append('AutoReq: 0') - - spec_file.extend([ - '', - '%description', - self.distribution.get_long_description() - ]) - - # put locale descriptions into spec file - # XXX again, suppressed because config file syntax doesn't - # easily support this ;-( - #for locale in self.descriptions.keys(): - # spec_file.extend([ - # '', - # '%description -l ' + locale, - # self.descriptions[locale], - # ]) - - # rpm scripts - # figure out default build script - def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0])) - def_build = "%s build" % def_setup_call - if self.use_rpm_opt_flags: - def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build - - # insert contents of files - - # XXX this is kind of misleading: user-supplied options are files - # that we open and interpolate into the spec file, but the defaults - # are just text that we drop in as-is. Hmmm. - - install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT ' - '--record=INSTALLED_FILES') % def_setup_call - - script_options = [ - ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"), - ('build', 'build_script', def_build), - ('install', 'install_script', install_cmd), - ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"), - ('verifyscript', 'verify_script', None), - ('pre', 'pre_install', None), - ('post', 'post_install', None), - ('preun', 'pre_uninstall', None), - ('postun', 'post_uninstall', None), - ] - - for (rpm_opt, attr, default) in script_options: - # Insert contents of file referred to, if no file is referred to - # use 'default' as contents of script - val = getattr(self, attr) - if val or default: - spec_file.extend([ - '', - '%' + rpm_opt,]) - if val: - with open(val) as f: - spec_file.extend(f.read().split('\n')) - else: - spec_file.append(default) - - - # files section - spec_file.extend([ - '', - '%files -f INSTALLED_FILES', - '%defattr(-,root,root)', - ]) - - if self.doc_files: - spec_file.append('%doc ' + ' '.join(self.doc_files)) - - if self.changelog: - spec_file.extend([ - '', - '%changelog',]) - spec_file.extend(self.changelog) - - return spec_file - - def _format_changelog(self, changelog): - """Format the changelog correctly and convert it to a list of strings - """ - if not changelog: - return changelog - new_changelog = [] - for line in changelog.strip().split('\n'): - line = line.strip() - if line[0] == '*': - new_changelog.extend(['', line]) - elif line[0] == '-': - new_changelog.append(line) - else: - new_changelog.append(' ' + line) - - # strip trailing newline inserted by first changelog entry - if not new_changelog[0]: - del new_changelog[0] - - return new_changelog diff --git a/Lib/distutils/command/build.py b/Lib/distutils/command/build.py deleted file mode 100644 index a86df0bc7f92188..000000000000000 --- a/Lib/distutils/command/build.py +++ /dev/null @@ -1,157 +0,0 @@ -"""distutils.command.build - -Implements the Distutils 'build' command.""" - -import sys, os -from distutils.core import Command -from distutils.errors import DistutilsOptionError -from distutils.util import get_platform - - -def show_compilers(): - from distutils.ccompiler import show_compilers - show_compilers() - - -class build(Command): - - description = "build everything needed to install" - - user_options = [ - ('build-base=', 'b', - "base directory for build library"), - ('build-purelib=', None, - "build directory for platform-neutral distributions"), - ('build-platlib=', None, - "build directory for platform-specific distributions"), - ('build-lib=', None, - "build directory for all distribution (defaults to either " + - "build-purelib or build-platlib"), - ('build-scripts=', None, - "build directory for scripts"), - ('build-temp=', 't', - "temporary build directory"), - ('plat-name=', 'p', - "platform name to build for, if supported " - "(default: %s)" % get_platform()), - ('compiler=', 'c', - "specify the compiler type"), - ('parallel=', 'j', - "number of parallel build jobs"), - ('debug', 'g', - "compile extensions and libraries with debugging information"), - ('force', 'f', - "forcibly build everything (ignore file timestamps)"), - ('executable=', 'e', - "specify final destination interpreter path (build.py)"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, - "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_base = 'build' - # these are decided only after 'build_base' has its final value - # (unless overridden by the user or client) - self.build_purelib = None - self.build_platlib = None - self.build_lib = None - self.build_temp = None - self.build_scripts = None - self.compiler = None - self.plat_name = None - self.debug = None - self.force = 0 - self.executable = None - self.parallel = None - - def finalize_options(self): - if self.plat_name is None: - self.plat_name = get_platform() - else: - # plat-name only supported for windows (other platforms are - # supported via ./configure flags, if at all). Avoid misleading - # other platforms. - if os.name != 'nt': - raise DistutilsOptionError( - "--plat-name only supported on Windows (try " - "using './configure --help' on your platform)") - - plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2]) - - # Make it so Python 2.x and Python 2.x with --with-pydebug don't - # share the same build directories. Doing so confuses the build - # process for C modules - if hasattr(sys, 'gettotalrefcount'): - plat_specifier += '-pydebug' - - # 'build_purelib' and 'build_platlib' just default to 'lib' and - # 'lib.' under the base build directory. We only use one of - # them for a given distribution, though -- - if self.build_purelib is None: - self.build_purelib = os.path.join(self.build_base, 'lib') - if self.build_platlib is None: - self.build_platlib = os.path.join(self.build_base, - 'lib' + plat_specifier) - - # 'build_lib' is the actual directory that we will use for this - # particular module distribution -- if user didn't supply it, pick - # one of 'build_purelib' or 'build_platlib'. - if self.build_lib is None: - if self.distribution.ext_modules: - self.build_lib = self.build_platlib - else: - self.build_lib = self.build_purelib - - # 'build_temp' -- temporary directory for compiler turds, - # "build/temp." - if self.build_temp is None: - self.build_temp = os.path.join(self.build_base, - 'temp' + plat_specifier) - if self.build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts-%d.%d' % sys.version_info[:2]) - - if self.executable is None and sys.executable: - self.executable = os.path.normpath(sys.executable) - - if isinstance(self.parallel, str): - try: - self.parallel = int(self.parallel) - except ValueError: - raise DistutilsOptionError("parallel should be an integer") - - def run(self): - # Run all relevant sub-commands. This will be some subset of: - # - build_py - pure Python modules - # - build_clib - standalone C libraries - # - build_ext - Python extensions - # - build_scripts - (Python) scripts - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - - # -- Predicates for the sub-command list --------------------------- - - def has_pure_modules(self): - return self.distribution.has_pure_modules() - - def has_c_libraries(self): - return self.distribution.has_c_libraries() - - def has_ext_modules(self): - return self.distribution.has_ext_modules() - - def has_scripts(self): - return self.distribution.has_scripts() - - - sub_commands = [('build_py', has_pure_modules), - ('build_clib', has_c_libraries), - ('build_ext', has_ext_modules), - ('build_scripts', has_scripts), - ] diff --git a/Lib/distutils/command/build_clib.py b/Lib/distutils/command/build_clib.py deleted file mode 100644 index 3e20ef23cd81e0d..000000000000000 --- a/Lib/distutils/command/build_clib.py +++ /dev/null @@ -1,209 +0,0 @@ -"""distutils.command.build_clib - -Implements the Distutils 'build_clib' command, to build a C/C++ library -that is included in the module distribution and needed by an extension -module.""" - - -# XXX this module has *lots* of code ripped-off quite transparently from -# build_ext.py -- not surprisingly really, as the work required to build -# a static library from a collection of C source files is not really all -# that different from what's required to build a shared object file from -# a collection of C source files. Nevertheless, I haven't done the -# necessary refactoring to account for the overlap in code between the -# two modules, mainly because a number of subtle details changed in the -# cut 'n paste. Sigh. - -import os -from distutils.core import Command -from distutils.errors import * -from distutils.sysconfig import customize_compiler -from distutils import log - -def show_compilers(): - from distutils.ccompiler import show_compilers - show_compilers() - - -class build_clib(Command): - - description = "build C/C++ libraries used by Python extensions" - - user_options = [ - ('build-clib=', 'b', - "directory to build C/C++ libraries to"), - ('build-temp=', 't', - "directory to put temporary build by-products"), - ('debug', 'g', - "compile with debugging information"), - ('force', 'f', - "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', - "specify the compiler type"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, - "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_clib = None - self.build_temp = None - - # List of libraries to build - self.libraries = None - - # Compilation options for all libraries - self.include_dirs = None - self.define = None - self.undef = None - self.debug = None - self.force = 0 - self.compiler = None - - - def finalize_options(self): - # This might be confusing: both build-clib and build-temp default - # to build-temp as defined by the "build" command. This is because - # I think that C libraries are really just temporary build - # by-products, at least from the point of view of building Python - # extensions -- but I want to keep my options open. - self.set_undefined_options('build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force')) - - self.libraries = self.distribution.libraries - if self.libraries: - self.check_library_list(self.libraries) - - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # XXX same as for build_ext -- what about 'self.define' and - # 'self.undef' ? - - - def run(self): - if not self.libraries: - return - - # Yech -- this is cut 'n pasted from build_ext.py! - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - customize_compiler(self.compiler) - - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name,value) in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - - self.build_libraries(self.libraries) - - - def check_library_list(self, libraries): - """Ensure that the list of libraries is valid. - - `library` is presumably provided as a command option 'libraries'. - This method checks that it is a list of 2-tuples, where the tuples - are (library_name, build_info_dict). - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(libraries, list): - raise DistutilsSetupError( - "'libraries' option must be a list of tuples") - - for lib in libraries: - if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError( - "each element of 'libraries' must a 2-tuple") - - name, build_info = lib - - if not isinstance(name, str): - raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)") - - if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError("bad library name '%s': " - "may not contain directory separators" % lib[0]) - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)") - - - def get_library_names(self): - # Assume the library list is valid -- 'check_library_list()' is - # called from 'finalize_options()', so it should be! - if not self.libraries: - return None - - lib_names = [] - for (lib_name, build_info) in self.libraries: - lib_names.append(lib_name) - return lib_names - - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for (lib_name, build_info) in self.libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - - filenames.extend(sources) - return filenames - - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile(sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py deleted file mode 100644 index f287b349984ff0c..000000000000000 --- a/Lib/distutils/command/build_ext.py +++ /dev/null @@ -1,754 +0,0 @@ -"""distutils.command.build_ext - -Implements the Distutils 'build_ext' command, for building extension -modules (currently limited to C extensions, should accommodate C++ -extensions ASAP).""" - -import contextlib -import os -import re -import sys -from distutils.core import Command -from distutils.errors import * -from distutils.sysconfig import customize_compiler, get_python_version -from distutils.sysconfig import get_config_h_filename -from distutils.dep_util import newer_group -from distutils.extension import Extension -from distutils.util import get_platform -from distutils import log - -from site import USER_BASE - -# An extension name is just a dot-separated list of Python NAMEs (ie. -# the same as a fully-qualified module name). -extension_name_re = re.compile \ - (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') - - -def show_compilers (): - from distutils.ccompiler import show_compilers - show_compilers() - - -class build_ext(Command): - - description = "build C/C++ extensions (compile/link to build directory)" - - # XXX thoughts on how to deal with complex command-line options like - # these, i.e. how to make it so fancy_getopt can suck them off the - # command line and make it look like setup.py defined the appropriate - # lists of tuples of what-have-you. - # - each command needs a callback to process its command-line options - # - Command.__init__() needs access to its share of the whole - # command line (must ultimately come from - # Distribution.parse_command_line()) - # - it then calls the current command class' option-parsing - # callback to deal with weird options like -D, which have to - # parse the option text and churn out some custom data - # structure - # - that data structure (in this case, a list of 2-tuples) - # will then be present in the command object by the time - # we get to finalize_options() (i.e. the constructor - # takes care of both command-line and client options - # in between initialize_options() and finalize_options()) - - sep_by = " (separated by '%s')" % os.pathsep - user_options = [ - ('build-lib=', 'b', - "directory for compiled extension modules"), - ('build-temp=', 't', - "directory for temporary files (build by-products)"), - ('plat-name=', 'p', - "platform name to cross-compile for, if supported " - "(default: %s)" % get_platform()), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ('include-dirs=', 'I', - "list of directories to search for header files" + sep_by), - ('define=', 'D', - "C preprocessor macros to define"), - ('undef=', 'U', - "C preprocessor macros to undefine"), - ('libraries=', 'l', - "external C libraries to link with"), - ('library-dirs=', 'L', - "directories to search for external C libraries" + sep_by), - ('rpath=', 'R', - "directories to search for shared C libraries at runtime"), - ('link-objects=', 'O', - "extra explicit link objects to include in the link"), - ('debug', 'g', - "compile/link with debugging information"), - ('force', 'f', - "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', - "specify the compiler type"), - ('parallel=', 'j', - "number of parallel build jobs"), - ('swig-cpp', None, - "make SWIG create C++ files (default is C)"), - ('swig-opts=', None, - "list of SWIG command line options"), - ('swig=', None, - "path to the SWIG executable"), - ('user', None, - "add user include, library and rpath") - ] - - boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] - - help_options = [ - ('help-compiler', None, - "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.extensions = None - self.build_lib = None - self.plat_name = None - self.build_temp = None - self.inplace = 0 - self.package = None - - self.include_dirs = None - self.define = None - self.undef = None - self.libraries = None - self.library_dirs = None - self.rpath = None - self.link_objects = None - self.debug = None - self.force = None - self.compiler = None - self.swig = None - self.swig_cpp = None - self.swig_opts = None - self.user = None - self.parallel = None - - def finalize_options(self): - from distutils import sysconfig - - self.set_undefined_options('build', - ('build_lib', 'build_lib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ('parallel', 'parallel'), - ('plat_name', 'plat_name'), - ) - - if self.package is None: - self.package = self.distribution.ext_package - - self.extensions = self.distribution.ext_modules - - # Make sure Python's include directories (for Python.h, pyconfig.h, - # etc.) are in the include search path. - py_include = sysconfig.get_python_inc() - plat_py_include = sysconfig.get_python_inc(plat_specific=1) - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # If in a virtualenv, add its include directory - # Issue 16116 - if sys.exec_prefix != sys.base_exec_prefix: - self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) - - # Put the Python "system" include dir at the end, so that - # any local include dirs take precedence. - self.include_dirs.extend(py_include.split(os.path.pathsep)) - if plat_py_include != py_include: - self.include_dirs.extend( - plat_py_include.split(os.path.pathsep)) - - self.ensure_string_list('libraries') - self.ensure_string_list('link_objects') - - # Life is easier if we're not forever checking for None, so - # simplify these options to empty lists if unset - if self.libraries is None: - self.libraries = [] - if self.library_dirs is None: - self.library_dirs = [] - elif isinstance(self.library_dirs, str): - self.library_dirs = self.library_dirs.split(os.pathsep) - - if self.rpath is None: - self.rpath = [] - elif isinstance(self.rpath, str): - self.rpath = self.rpath.split(os.pathsep) - - # for extensions under windows use different directories - # for Release and Debug builds. - # also Python's library directory must be appended to library_dirs - if os.name == 'nt': - # the 'libs' directory is for binary installs - we assume that - # must be the *native* platform. But we don't really support - # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) - if sys.base_exec_prefix != sys.prefix: # Issue 16116 - self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) - if self.debug: - self.build_temp = os.path.join(self.build_temp, "Debug") - else: - self.build_temp = os.path.join(self.build_temp, "Release") - - # Append the source distribution include and library directories, - # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.dirname(get_config_h_filename())) - _sys_home = getattr(sys, '_home', None) - if _sys_home: - self.library_dirs.append(_sys_home) - - # Use the .lib files for the correct architecture - if self.plat_name == 'win32': - suffix = 'win32' - else: - # win-amd64 - suffix = self.plat_name[4:] - new_lib = os.path.join(sys.exec_prefix, 'PCbuild') - if suffix: - new_lib = os.path.join(new_lib, suffix) - self.library_dirs.append(new_lib) - - # For extensions under Cygwin, Python's library directory must be - # appended to library_dirs - if sys.platform[:6] == 'cygwin': - if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): - # building third party extensions - self.library_dirs.append(os.path.join(sys.prefix, "lib", - "python" + get_python_version(), - "config")) - else: - # building python standard extensions - self.library_dirs.append('.') - - # For building extensions with a shared Python library, - # Python's library directory must be appended to library_dirs - # See Issues: #1600860, #4366 - if (sysconfig.get_config_var('Py_ENABLE_SHARED')): - if not sysconfig.python_build: - # building third party extensions - self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) - else: - # building python standard extensions - self.library_dirs.append('.') - - # The argument parsing will result in self.define being a string, but - # it has to be a list of 2-tuples. All the preprocessor symbols - # specified by the 'define' option will be set to '1'. Multiple - # symbols can be separated with commas. - - if self.define: - defines = self.define.split(',') - self.define = [(symbol, '1') for symbol in defines] - - # The option for macros to undefine is also a string from the - # option parsing, but has to be a list. Multiple symbols can also - # be separated with commas here. - if self.undef: - self.undef = self.undef.split(',') - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = self.swig_opts.split(' ') - - # Finally add the user include and library directories if requested - if self.user: - user_include = os.path.join(USER_BASE, "include") - user_lib = os.path.join(USER_BASE, "lib") - if os.path.isdir(user_include): - self.include_dirs.append(user_include) - if os.path.isdir(user_lib): - self.library_dirs.append(user_lib) - self.rpath.append(user_lib) - - if isinstance(self.parallel, str): - try: - self.parallel = int(self.parallel) - except ValueError: - raise DistutilsOptionError("parallel should be an integer") - - def run(self): - from distutils.ccompiler import new_compiler - - # 'self.extensions', as supplied by setup.py, is a list of - # Extension instances. See the documentation for Extension (in - # distutils.extension) for details. - # - # For backwards compatibility with Distutils 0.8.2 and earlier, we - # also allow the 'extensions' list to be a list of tuples: - # (ext_name, build_info) - # where build_info is a dictionary containing everything that - # Extension instances do except the name, with a few things being - # differently named. We convert these 2-tuples to Extension - # instances as needed. - - if not self.extensions: - return - - # If we were asked to build any C/C++ libraries, make sure that the - # directory where we put them is in the library search path for - # linking extensions. - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.libraries.extend(build_clib.get_library_names() or []) - self.library_dirs.append(build_clib.build_clib) - - # Setup the CCompiler object that we'll use to do all the - # compiling and linking - self.compiler = new_compiler(compiler=self.compiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - customize_compiler(self.compiler) - # If we are cross-compiling, init the compiler now (if we are not - # cross-compiling, init would not hurt, but people may rely on - # late initialization of compiler even if they shouldn't...) - if os.name == 'nt' and self.plat_name != get_platform(): - self.compiler.initialize(self.plat_name) - - # And make sure that any compile/link-related options (which might - # come from the command-line or from the setup script) are set in - # that CCompiler object -- that way, they automatically apply to - # all compiling and linking done here. - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - if self.libraries is not None: - self.compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - self.compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - self.compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - self.compiler.set_link_objects(self.link_objects) - - # Now actually compile and link everything. - self.build_extensions() - - def check_extensions_list(self, extensions): - """Ensure that the list of extensions (presumably provided as a - command option 'extensions') is valid, i.e. it is a list of - Extension objects. We also support the old-style list of 2-tuples, - where the tuples are (ext_name, build_info), which are converted to - Extension instances here. - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(extensions, list): - raise DistutilsSetupError( - "'ext_modules' option must be a list of Extension instances") - - for i, ext in enumerate(extensions): - if isinstance(ext, Extension): - continue # OK! (assume type-checking done - # by Extension constructor) - - if not isinstance(ext, tuple) or len(ext) != 2: - raise DistutilsSetupError( - "each element of 'ext_modules' option must be an " - "Extension instance or 2-tuple") - - ext_name, build_info = ext - - log.warn("old-style (ext_name, build_info) tuple found in " - "ext_modules for extension '%s' " - "-- please convert to Extension instance", ext_name) - - if not (isinstance(ext_name, str) and - extension_name_re.match(ext_name)): - raise DistutilsSetupError( - "first element of each tuple in 'ext_modules' " - "must be the extension name (a string)") - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'ext_modules' " - "must be a dictionary (build info)") - - # OK, the (ext_name, build_info) dict is type-safe: convert it - # to an Extension instance. - ext = Extension(ext_name, build_info['sources']) - - # Easy stuff: one-to-one mapping from dict elements to - # instance attributes. - for key in ('include_dirs', 'library_dirs', 'libraries', - 'extra_objects', 'extra_compile_args', - 'extra_link_args'): - val = build_info.get(key) - if val is not None: - setattr(ext, key, val) - - # Medium-easy stuff: same syntax/semantics, different names. - ext.runtime_library_dirs = build_info.get('rpath') - if 'def_file' in build_info: - log.warn("'def_file' element of build info dict " - "no longer supported") - - # Non-trivial stuff: 'macros' split into 'define_macros' - # and 'undef_macros'. - macros = build_info.get('macros') - if macros: - ext.define_macros = [] - ext.undef_macros = [] - for macro in macros: - if not (isinstance(macro, tuple) and len(macro) in (1, 2)): - raise DistutilsSetupError( - "'macros' element of build info dict " - "must be 1- or 2-tuple") - if len(macro) == 1: - ext.undef_macros.append(macro[0]) - elif len(macro) == 2: - ext.define_macros.append(macro) - - extensions[i] = ext - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - - # Wouldn't it be neat if we knew the names of header files too... - for ext in self.extensions: - filenames.extend(ext.sources) - return filenames - - def get_outputs(self): - # Sanity check the 'extensions' list -- can't assume this is being - # done in the same run as a 'build_extensions()' call (in fact, we - # can probably assume that it *isn't*!). - self.check_extensions_list(self.extensions) - - # And build the list of output (built) filenames. Note that this - # ignores the 'inplace' flag, and assumes everything goes in the - # "build" tree. - outputs = [] - for ext in self.extensions: - outputs.append(self.get_ext_fullpath(ext.name)) - return outputs - - def build_extensions(self): - # First, sanity-check the 'extensions' list - self.check_extensions_list(self.extensions) - if self.parallel: - self._build_extensions_parallel() - else: - self._build_extensions_serial() - - def _build_extensions_parallel(self): - workers = self.parallel - if self.parallel is True: - workers = os.cpu_count() # may return None - try: - from concurrent.futures import ThreadPoolExecutor - except ImportError: - workers = None - - if workers is None: - self._build_extensions_serial() - return - - with ThreadPoolExecutor(max_workers=workers) as executor: - futures = [executor.submit(self.build_extension, ext) - for ext in self.extensions] - for ext, fut in zip(self.extensions, futures): - with self._filter_build_errors(ext): - fut.result() - - def _build_extensions_serial(self): - for ext in self.extensions: - with self._filter_build_errors(ext): - self.build_extension(ext) - - @contextlib.contextmanager - def _filter_build_errors(self, ext): - try: - yield - except (CCompilerError, DistutilsError, CompileError) as e: - if not ext.optional: - raise - self.warn('building extension "%s" failed: %s' % - (ext.name, e)) - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % ext.name) - # sort to make the resulting .so file build reproducible - sources = sorted(sources) - - ext_path = self.get_ext_fullpath(ext.name) - depends = sources + ext.depends - if not (self.force or newer_group(depends, ext_path, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - # First, scan the sources for SWIG definition files (.i), run - # SWIG on 'em to create .c files, and modify the sources list - # accordingly. - sources = self.swig_sources(sources, ext) - - # Next, compile the source code to object files. - - # XXX not honouring 'define_macros' or 'undef_macros' -- the - # CCompiler API needs to change to accommodate this, and I - # want to do one thing at a time! - - # Two possible sources for extra compiler arguments: - # - 'extra_compile_args' in Extension object - # - CFLAGS environment variable (not particularly - # elegant, but people seem to expect it and I - # guess it's useful) - # The environment variable should take precedence, and - # any sensible compiler will give precedence to later - # command line args. Hence we combine them in order: - extra_args = ext.extra_compile_args or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - objects = self.compiler.compile(sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=ext.include_dirs, - debug=self.debug, - extra_postargs=extra_args, - depends=ext.depends) - - # XXX outdated variable, kept here in case third-part code - # needs it. - self._built_objects = objects[:] - - # Now link the object files together into a "shared object" -- - # of course, first we have to figure out all the other things - # that go into the mix. - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - - # Detect target language, if not provided - language = ext.language or self.compiler.detect_language(sources) - - self.compiler.link_shared_object( - objects, ext_path, - libraries=self.get_libraries(ext), - library_dirs=ext.library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=language) - - def swig_sources(self, sources, extension): - """Walk the list of source files in 'sources', looking for SWIG - interface (.i) files. Run SWIG on all that are found, and - return a modified 'sources' list with SWIG source files replaced - by the generated C (or C++) files. - """ - new_sources = [] - swig_sources = [] - swig_targets = {} - - # XXX this drops generated C/C++ files into the source tree, which - # is fine for developers who want to distribute the generated - # source -- but there should be an option to put SWIG output in - # the temp dir. - - if self.swig_cpp: - log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") - - if self.swig_cpp or ('-c++' in self.swig_opts) or \ - ('-c++' in extension.swig_opts): - target_ext = '.cpp' - else: - target_ext = '.c' - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == ".i": # SWIG interface file - new_sources.append(base + '_wrap' + target_ext) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] - swig_cmd.extend(self.swig_opts) - if self.swig_cpp: - swig_cmd.append("-c++") - - # Do not override commandline arguments - if not self.swig_opts: - for o in extension.swig_opts: - swig_cmd.append(o) - - for source in swig_sources: - target = swig_targets[source] - log.info("swigging %s to %s", source, target) - self.spawn(swig_cmd + ["-o", target, source]) - - return new_sources - - def find_swig(self): - """Return the name of the SWIG executable. On Unix, this is - just "swig" -- it should be in the PATH. Tries a bit harder on - Windows. - """ - if os.name == "posix": - return "swig" - elif os.name == "nt": - # Look for SWIG in its standard installation directory on - # Windows (or so I presume!). If we find it there, great; - # if not, act like Unix and assume it's in the PATH. - for vers in ("1.3", "1.2", "1.1"): - fn = os.path.join("c:\\swig%s" % vers, "swig.exe") - if os.path.isfile(fn): - return fn - else: - return "swig.exe" - else: - raise DistutilsPlatformError( - "I don't know how to find (much less run) SWIG " - "on platform '%s'" % os.name) - - # -- Name generators ----------------------------------------------- - # (extension names, filenames, whatever) - def get_ext_fullpath(self, ext_name): - """Returns the path of the filename for a given extension. - - The file is located in `build_lib` or directly in the package - (inplace option). - """ - fullname = self.get_ext_fullname(ext_name) - modpath = fullname.split('.') - filename = self.get_ext_filename(modpath[-1]) - - if not self.inplace: - # no further work needed - # returning : - # build_dir/package/path/filename - filename = os.path.join(*modpath[:-1]+[filename]) - return os.path.join(self.build_lib, filename) - - # the inplace option requires to find the package directory - # using the build_py command for that - package = '.'.join(modpath[0:-1]) - build_py = self.get_finalized_command('build_py') - package_dir = os.path.abspath(build_py.get_package_dir(package)) - - # returning - # package_dir/filename - return os.path.join(package_dir, filename) - - def get_ext_fullname(self, ext_name): - """Returns the fullname of a given extension name. - - Adds the `package.` prefix""" - if self.package is None: - return ext_name - else: - return self.package + '.' + ext_name - - def get_ext_filename(self, ext_name): - r"""Convert the name of an extension (eg. "foo.bar") into the name - of the file from which it will be loaded (eg. "foo/bar.so", or - "foo\bar.pyd"). - """ - from distutils.sysconfig import get_config_var - ext_path = ext_name.split('.') - ext_suffix = get_config_var('EXT_SUFFIX') - return os.path.join(*ext_path) + ext_suffix - - def get_export_symbols(self, ext): - """Return the list of symbols that a shared extension has to - export. This either uses 'ext.export_symbols' or, if it's not - provided, "PyInit_" + module_name. Only relevant on Windows, where - the .pyd file (DLL) must export the module "PyInit_" function. - """ - suffix = '_' + ext.name.split('.')[-1] - try: - # Unicode module name support as defined in PEP-489 - # https://peps.python.org/pep-0489/#export-hook-name - suffix.encode('ascii') - except UnicodeEncodeError: - suffix = 'U' + suffix.encode('punycode').replace(b'-', b'_').decode('ascii') - - initfunc_name = "PyInit" + suffix - if initfunc_name not in ext.export_symbols: - ext.export_symbols.append(initfunc_name) - return ext.export_symbols - - def get_libraries(self, ext): - """Return the list of libraries to link against when building a - shared extension. On most platforms, this is just 'ext.libraries'; - on Windows, we add the Python library (eg. python20.dll). - """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. - if sys.platform == "win32": - from distutils._msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - # On Android only the main executable and LD_PRELOADs are considered - # to be RTLD_GLOBAL, all the dependencies of the main executable - # remain RTLD_LOCAL and so the shared libraries must be linked with - # libpython when python is built with a shared python library (issue - # bpo-21536). - # On Cygwin (and if required, other POSIX-like platforms based on - # Windows like MinGW) it is simply necessary that all symbols in - # shared libraries are resolved at link time. - from distutils.sysconfig import get_config_var - link_libpython = False - if get_config_var('Py_ENABLE_SHARED'): - # A native build on an Android device or on Cygwin - if hasattr(sys, 'getandroidapilevel'): - link_libpython = True - elif sys.platform == 'cygwin': - link_libpython = True - elif '_PYTHON_HOST_PLATFORM' in os.environ: - # We are cross-compiling for one of the relevant platforms - if get_config_var('ANDROID_API_LEVEL') != 0: - link_libpython = True - elif get_config_var('MACHDEP') == 'cygwin': - link_libpython = True - - if link_libpython: - ldversion = get_config_var('LDVERSION') - return ext.libraries + ['python' + ldversion] - - return ext.libraries diff --git a/Lib/distutils/command/build_py.py b/Lib/distutils/command/build_py.py deleted file mode 100644 index edc2171cd122dda..000000000000000 --- a/Lib/distutils/command/build_py.py +++ /dev/null @@ -1,416 +0,0 @@ -"""distutils.command.build_py - -Implements the Distutils 'build_py' command.""" - -import os -import importlib.util -import sys -import glob - -from distutils.core import Command -from distutils.errors import * -from distutils.util import convert_path, Mixin2to3 -from distutils import log - -class build_py (Command): - - description = "\"build\" pure Python modules (copy to build directory)" - - user_options = [ - ('build-lib=', 'd', "directory to \"build\" (copy) to"), - ('compile', 'c', "compile .py to .pyc"), - ('no-compile', None, "don't compile .py files [default]"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] - - boolean_options = ['compile', 'force'] - negative_opt = {'no-compile' : 'compile'} - - def initialize_options(self): - self.build_lib = None - self.py_modules = None - self.package = None - self.package_data = None - self.package_dir = None - self.compile = 0 - self.optimize = 0 - self.force = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_lib', 'build_lib'), - ('force', 'force')) - - # Get the distribution options that are aliases for build_py - # options -- list of packages and list of modules. - self.packages = self.distribution.packages - self.py_modules = self.distribution.py_modules - self.package_data = self.distribution.package_data - self.package_dir = {} - if self.distribution.package_dir: - for name, path in self.distribution.package_dir.items(): - self.package_dir[name] = convert_path(path) - self.data_files = self.get_data_files() - - # Ick, copied straight from install_lib.py (fancy_getopt needs a - # type system! Hell, *everything* needs a type system!!!) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - assert 0 <= self.optimize <= 2 - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # XXX copy_file by default preserves atime and mtime. IMHO this is - # the right thing to do, but perhaps it should be an option -- in - # particular, a site administrator might want installed files to - # reflect the time of installation rather than the last - # modification time before the installed release. - - # XXX copy_file by default preserves mode, which appears to be the - # wrong thing to do: if a file is read-only in the working - # directory, we want it to be installed read/write so that the next - # installation of the same module distribution can overwrite it - # without problems. (This might be a Unix-specific issue.) Thus - # we turn off 'preserve_mode' when copying to the build directory, - # since the build directory is supposed to be exactly what the - # installation will look like (ie. we preserve mode when - # installing). - - # Two options control which modules will be installed: 'packages' - # and 'py_modules'. The former lets us work with whole packages, not - # specifying individual modules at all; the latter is for - # specifying modules one-at-a-time. - - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - data = [] - if not self.packages: - return data - for package in self.packages: - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = 0 - if src_dir: - plen = len(src_dir)+1 - - # Strip directory from globbed filenames - filenames = [ - file[plen:] for file in self.find_data_files(package, src_dir) - ] - data.append((package, src_dir, build_dir, filenames)) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = (self.package_data.get('', []) - + self.package_data.get(package, [])) - files = [] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) - # Files that match more than one pattern are only added once - files.extend([fn for fn in filelist if fn not in files - and os.path.isfile(fn)]) - return files - - def build_package_data(self): - """Copy data files into build directory""" - lastdir = None - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file(os.path.join(src_dir, filename), target, - preserve_mode=False) - - def get_package_dir(self, package): - """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" - path = package.split('.') - - if not self.package_dir: - if path: - return os.path.join(*path) - else: - return '' - else: - tail = [] - while path: - try: - pdir = self.package_dir['.'.join(path)] - except KeyError: - tail.insert(0, path[-1]) - del path[-1] - else: - tail.insert(0, pdir) - return os.path.join(*tail) - else: - # Oops, got all the way through 'path' without finding a - # match in package_dir. If package_dir defines a directory - # for the root (nameless) package, then fallback on it; - # otherwise, we might as well have not consulted - # package_dir at all, as we just use the directory implied - # by 'tail' (which should be the same as the original value - # of 'path' at this point). - pdir = self.package_dir.get('') - if pdir is not None: - tail.insert(0, pdir) - - if tail: - return os.path.join(*tail) - else: - return '' - - def check_package(self, package, package_dir): - # Empty dir name means current directory, which we can probably - # assume exists. Also, os.path.exists and isdir don't know about - # my "empty string means current dir" convention, so we have to - # circumvent them. - if package_dir != "": - if not os.path.exists(package_dir): - raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir) - if not os.path.isdir(package_dir): - raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir) - - # Require __init__.py for all but the "root package" - if package: - init_py = os.path.join(package_dir, "__init__.py") - if os.path.isfile(init_py): - return init_py - else: - log.warn(("package init file '%s' not found " + - "(or not a regular file)"), init_py) - - # Either not in a package at all (__init__.py not expected), or - # __init__.py doesn't exist -- so don't return the filename. - return None - - def check_module(self, module, module_file): - if not os.path.isfile(module_file): - log.warn("file %s (for module %s) not found", module_file, module) - return False - else: - return True - - def find_package_modules(self, package, package_dir): - self.check_package(package, package_dir) - module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) - modules = [] - setup_script = os.path.abspath(self.distribution.script_name) - - for f in module_files: - abs_f = os.path.abspath(f) - if abs_f != setup_script: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - else: - self.debug_print("excluding %s" % setup_script) - return modules - - def find_modules(self): - """Finds individually-specified Python modules, ie. those listed by - module name in 'self.py_modules'. Returns a list of tuples (package, - module_base, filename): 'package' is a tuple of the path through - package-space to the module; 'module_base' is the bare (no - packages, no dots) module name, and 'filename' is the path to the - ".py" file (relative to the distribution root) that implements the - module. - """ - # Map package names to tuples of useful info about the package: - # (package_dir, checked) - # package_dir - the directory where we'll find source files for - # this package - # checked - true if we have checked that the package directory - # is valid (exists, contains __init__.py, ... ?) - packages = {} - - # List of (package, module, filename) tuples to return - modules = [] - - # We treat modules-in-packages almost the same as toplevel modules, - # just the "package" for a toplevel is empty (either an empty - # string or empty list, depending on context). Differences: - # - don't check for __init__.py in directory for empty package - for module in self.py_modules: - path = module.split('.') - package = '.'.join(path[0:-1]) - module_base = path[-1] - - try: - (package_dir, checked) = packages[package] - except KeyError: - package_dir = self.get_package_dir(package) - checked = 0 - - if not checked: - init_py = self.check_package(package, package_dir) - packages[package] = (package_dir, 1) - if init_py: - modules.append((package, "__init__", init_py)) - - # XXX perhaps we should also check for just .pyc files - # (so greedy closed-source bastards can distribute Python - # modules too) - module_file = os.path.join(package_dir, module_base + ".py") - if not self.check_module(module, module_file): - continue - - modules.append((package, module_base, module_file)) - - return modules - - def find_all_modules(self): - """Compute the list of all modules that will be built, whether - they are specified one-module-at-a-time ('self.py_modules') or - by whole packages ('self.packages'). Return a list of tuples - (package, module, module_file), just like 'find_modules()' and - 'find_package_modules()' do.""" - modules = [] - if self.py_modules: - modules.extend(self.find_modules()) - if self.packages: - for package in self.packages: - package_dir = self.get_package_dir(package) - m = self.find_package_modules(package, package_dir) - modules.extend(m) - return modules - - def get_source_files(self): - return [module[-1] for module in self.find_all_modules()] - - def get_module_outfile(self, build_dir, package, module): - outfile_path = [build_dir] + list(package) + [module + ".py"] - return os.path.join(*outfile_path) - - def get_outputs(self, include_bytecode=1): - modules = self.find_all_modules() - outputs = [] - for (package, module, module_file) in modules: - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - outputs.append(filename) - if include_bytecode: - if self.compile: - outputs.append(importlib.util.cache_from_source( - filename, optimization='')) - if self.optimize > 0: - outputs.append(importlib.util.cache_from_source( - filename, optimization=self.optimize)) - - outputs += [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir, filenames in self.data_files - for filename in filenames - ] - - return outputs - - def build_module(self, module, module_file, package): - if isinstance(package, str): - package = package.split('.') - elif not isinstance(package, (list, tuple)): - raise TypeError( - "'package' must be a string (dot-separated), list, or tuple") - - # Now put the module source file into the "build" area -- this is - # easy, we just copy it somewhere under self.build_lib (the build - # directory for Python source). - outfile = self.get_module_outfile(self.build_lib, package, module) - dir = os.path.dirname(outfile) - self.mkpath(dir) - return self.copy_file(module_file, outfile, preserve_mode=0) - - def build_modules(self): - modules = self.find_modules() - for (package, module, module_file) in modules: - # Now "build" the module -- ie. copy the source file to - # self.build_lib (the build directory for Python source). - # (Actually, it gets copied to the directory for this package - # under self.build_lib.) - self.build_module(module, module_file, package) - - def build_packages(self): - for package in self.packages: - # Get list of (package, module, module_file) tuples based on - # scanning the package directory. 'package' is only included - # in the tuple so that 'find_modules()' and - # 'find_package_tuples()' have a consistent interface; it's - # ignored here (apart from a sanity check). Also, 'module' is - # the *unqualified* module name (ie. no dots, no package -- we - # already know its package!), and 'module_file' is the path to - # the .py file, relative to the current directory - # (ie. including 'package_dir'). - package_dir = self.get_package_dir(package) - modules = self.find_package_modules(package, package_dir) - - # Now loop over the modules we found, "building" each one (just - # copy it to self.build_lib). - for (package_, module, module_file) in modules: - assert package == package_ - self.build_module(module, module_file, package) - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - prefix = self.build_lib - if prefix[-1] != os.sep: - prefix = prefix + os.sep - - # XXX this code is essentially the same as the 'byte_compile() - # method of the "install_lib" command, except for the determination - # of the 'prefix' string. Hmmm. - if self.compile: - byte_compile(files, optimize=0, - force=self.force, prefix=prefix, dry_run=self.dry_run) - if self.optimize > 0: - byte_compile(files, optimize=self.optimize, - force=self.force, prefix=prefix, dry_run=self.dry_run) - -class build_py_2to3(build_py, Mixin2to3): - def run(self): - self.updated_files = [] - - # Base class code - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - # 2to3 - self.run_2to3(self.updated_files) - - # Remaining base class code - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def build_module(self, module, module_file, package): - res = build_py.build_module(self, module, module_file, package) - if res[1]: - # file was copied - self.updated_files.append(res[0]) - return res diff --git a/Lib/distutils/command/build_scripts.py b/Lib/distutils/command/build_scripts.py deleted file mode 100644 index ccc70e6465016ef..000000000000000 --- a/Lib/distutils/command/build_scripts.py +++ /dev/null @@ -1,160 +0,0 @@ -"""distutils.command.build_scripts - -Implements the Distutils 'build_scripts' command.""" - -import os, re -from stat import ST_MODE -from distutils import sysconfig -from distutils.core import Command -from distutils.dep_util import newer -from distutils.util import convert_path, Mixin2to3 -from distutils import log -import tokenize - -# check if Python is called on the first line with this expression -first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$') - -class build_scripts(Command): - - description = "\"build\" scripts (copy and fixup #! line)" - - user_options = [ - ('build-dir=', 'd', "directory to \"build\" (copy) to"), - ('force', 'f', "forcibly build everything (ignore file timestamps"), - ('executable=', 'e', "specify final destination interpreter path"), - ] - - boolean_options = ['force'] - - - def initialize_options(self): - self.build_dir = None - self.scripts = None - self.force = None - self.executable = None - self.outfiles = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_scripts', 'build_dir'), - ('force', 'force'), - ('executable', 'executable')) - self.scripts = self.distribution.scripts - - def get_source_files(self): - return self.scripts - - def run(self): - if not self.scripts: - return - self.copy_scripts() - - - def copy_scripts(self): - r"""Copy each script listed in 'self.scripts'; if it's marked as a - Python script in the Unix way (first line matches 'first_line_re', - ie. starts with "\#!" and contains "python"), then adjust the first - line to refer to the current Python interpreter as we copy. - """ - self.mkpath(self.build_dir) - outfiles = [] - updated_files = [] - for script in self.scripts: - adjust = False - script = convert_path(script) - outfile = os.path.join(self.build_dir, os.path.basename(script)) - outfiles.append(outfile) - - if not self.force and not newer(script, outfile): - log.debug("not copying %s (up-to-date)", script) - continue - - # Always open the file, but ignore failures in dry-run mode -- - # that way, we'll get accurate feedback if we can read the - # script. - try: - f = open(script, "rb") - except OSError: - if not self.dry_run: - raise - f = None - else: - encoding, lines = tokenize.detect_encoding(f.readline) - f.seek(0) - first_line = f.readline() - if not first_line: - self.warn("%s is an empty file (skipping)" % script) - continue - - match = first_line_re.match(first_line) - if match: - adjust = True - post_interp = match.group(1) or b'' - - if adjust: - log.info("copying and adjusting %s -> %s", script, - self.build_dir) - updated_files.append(outfile) - if not self.dry_run: - if not sysconfig.python_build: - executable = self.executable - else: - executable = os.path.join( - sysconfig.get_config_var("BINDIR"), - "python%s%s" % (sysconfig.get_config_var("VERSION"), - sysconfig.get_config_var("EXE"))) - executable = os.fsencode(executable) - shebang = b"#!" + executable + post_interp + b"\n" - # Python parser starts to read a script using UTF-8 until - # it gets a #coding:xxx cookie. The shebang has to be the - # first line of a file, the #coding:xxx cookie cannot be - # written before. So the shebang has to be decodable from - # UTF-8. - try: - shebang.decode('utf-8') - except UnicodeDecodeError: - raise ValueError( - "The shebang ({!r}) is not decodable " - "from utf-8".format(shebang)) - # If the script is encoded to a custom encoding (use a - # #coding:xxx cookie), the shebang has to be decodable from - # the script encoding too. - try: - shebang.decode(encoding) - except UnicodeDecodeError: - raise ValueError( - "The shebang ({!r}) is not decodable " - "from the script encoding ({})" - .format(shebang, encoding)) - with open(outfile, "wb") as outf: - outf.write(shebang) - outf.writelines(f.readlines()) - if f: - f.close() - else: - if f: - f.close() - updated_files.append(outfile) - self.copy_file(script, outfile) - - if os.name == 'posix': - for file in outfiles: - if self.dry_run: - log.info("changing mode of %s", file) - else: - oldmode = os.stat(file)[ST_MODE] & 0o7777 - newmode = (oldmode | 0o555) & 0o7777 - if newmode != oldmode: - log.info("changing mode of %s from %o to %o", - file, oldmode, newmode) - os.chmod(file, newmode) - # XXX should we modify self.outfiles? - return outfiles, updated_files - -class build_scripts_2to3(build_scripts, Mixin2to3): - - def copy_scripts(self): - outfiles, updated_files = build_scripts.copy_scripts(self) - if not self.dry_run: - self.run_2to3(updated_files) - return outfiles, updated_files diff --git a/Lib/distutils/command/check.py b/Lib/distutils/command/check.py deleted file mode 100644 index 73a30f3afd84a34..000000000000000 --- a/Lib/distutils/command/check.py +++ /dev/null @@ -1,148 +0,0 @@ -"""distutils.command.check - -Implements the Distutils 'check' command. -""" -from distutils.core import Command -from distutils.errors import DistutilsSetupError - -try: - # docutils is installed - from docutils.utils import Reporter - from docutils.parsers.rst import Parser - from docutils import frontend - from docutils import nodes - - class SilentReporter(Reporter): - - def __init__(self, source, report_level, halt_level, stream=None, - debug=0, encoding='ascii', error_handler='replace'): - self.messages = [] - Reporter.__init__(self, source, report_level, halt_level, stream, - debug, encoding, error_handler) - - def system_message(self, level, message, *children, **kwargs): - self.messages.append((level, message, children, kwargs)) - return nodes.system_message(message, level=level, - type=self.levels[level], - *children, **kwargs) - - HAS_DOCUTILS = True -except Exception: - # Catch all exceptions because exceptions besides ImportError probably - # indicate that docutils is not ported to Py3k. - HAS_DOCUTILS = False - -class check(Command): - """This command checks the meta-data of the package. - """ - description = ("perform some checks on the package") - user_options = [('metadata', 'm', 'Verify meta-data'), - ('restructuredtext', 'r', - ('Checks if long string meta-data syntax ' - 'are reStructuredText-compliant')), - ('strict', 's', - 'Will exit with an error if a check fails')] - - boolean_options = ['metadata', 'restructuredtext', 'strict'] - - def initialize_options(self): - """Sets default values for options.""" - self.restructuredtext = 0 - self.metadata = 1 - self.strict = 0 - self._warnings = 0 - - def finalize_options(self): - pass - - def warn(self, msg): - """Counts the number of warnings that occurs.""" - self._warnings += 1 - return Command.warn(self, msg) - - def run(self): - """Runs the command.""" - # perform the various tests - if self.metadata: - self.check_metadata() - if self.restructuredtext: - if HAS_DOCUTILS: - self.check_restructuredtext() - elif self.strict: - raise DistutilsSetupError('The docutils package is needed.') - - # let's raise an error in strict mode, if we have at least - # one warning - if self.strict and self._warnings > 0: - raise DistutilsSetupError('Please correct your package.') - - def check_metadata(self): - """Ensures that all required elements of meta-data are supplied. - - Required fields: - name, version, URL - - Recommended fields: - (author and author_email) or (maintainer and maintainer_email) - - Warns if any are missing. - """ - metadata = self.distribution.metadata - - missing = [] - for attr in ('name', 'version', 'url'): - if not (hasattr(metadata, attr) and getattr(metadata, attr)): - missing.append(attr) - - if missing: - self.warn("missing required meta-data: %s" % ', '.join(missing)) - if metadata.author: - if not metadata.author_email: - self.warn("missing meta-data: if 'author' supplied, " + - "'author_email' should be supplied too") - elif metadata.maintainer: - if not metadata.maintainer_email: - self.warn("missing meta-data: if 'maintainer' supplied, " + - "'maintainer_email' should be supplied too") - else: - self.warn("missing meta-data: either (author and author_email) " + - "or (maintainer and maintainer_email) " + - "should be supplied") - - def check_restructuredtext(self): - """Checks if the long string fields are reST-compliant.""" - data = self.distribution.get_long_description() - for warning in self._check_rst_data(data): - line = warning[-1].get('line') - if line is None: - warning = warning[1] - else: - warning = '%s (line %s)' % (warning[1], line) - self.warn(warning) - - def _check_rst_data(self, data): - """Returns warnings when the provided data doesn't compile.""" - # the include and csv_table directives need this to be a path - source_path = self.distribution.script_name or 'setup.py' - parser = Parser() - settings = frontend.OptionParser(components=(Parser,)).get_default_values() - settings.tab_width = 4 - settings.pep_references = None - settings.rfc_references = None - reporter = SilentReporter(source_path, - settings.report_level, - settings.halt_level, - stream=settings.warning_stream, - debug=settings.debug, - encoding=settings.error_encoding, - error_handler=settings.error_encoding_error_handler) - - document = nodes.document(settings, reporter, source=source_path) - document.note_source(source_path, -1) - try: - parser.parse(data, document) - except AttributeError as e: - reporter.messages.append( - (-1, 'Could not finish the parsing: %s.' % e, '', {})) - - return reporter.messages diff --git a/Lib/distutils/command/clean.py b/Lib/distutils/command/clean.py deleted file mode 100644 index 0cb270166211fe2..000000000000000 --- a/Lib/distutils/command/clean.py +++ /dev/null @@ -1,76 +0,0 @@ -"""distutils.command.clean - -Implements the Distutils 'clean' command.""" - -# contributed by Bastian Kleineidam , added 2000-03-18 - -import os -from distutils.core import Command -from distutils.dir_util import remove_tree -from distutils import log - -class clean(Command): - - description = "clean up temporary files from 'build' command" - user_options = [ - ('build-base=', 'b', - "base build directory (default: 'build.build-base')"), - ('build-lib=', None, - "build directory for all modules (default: 'build.build-lib')"), - ('build-temp=', 't', - "temporary build directory (default: 'build.build-temp')"), - ('build-scripts=', None, - "build directory for scripts (default: 'build.build-scripts')"), - ('bdist-base=', None, - "temporary directory for built distributions"), - ('all', 'a', - "remove all build output, not just temporary by-products") - ] - - boolean_options = ['all'] - - def initialize_options(self): - self.build_base = None - self.build_lib = None - self.build_temp = None - self.build_scripts = None - self.bdist_base = None - self.all = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('build_scripts', 'build_scripts'), - ('build_temp', 'build_temp')) - self.set_undefined_options('bdist', - ('bdist_base', 'bdist_base')) - - def run(self): - # remove the build/temp. directory (unless it's already - # gone) - if os.path.exists(self.build_temp): - remove_tree(self.build_temp, dry_run=self.dry_run) - else: - log.debug("'%s' does not exist -- can't clean it", - self.build_temp) - - if self.all: - # remove build directories - for directory in (self.build_lib, - self.bdist_base, - self.build_scripts): - if os.path.exists(directory): - remove_tree(directory, dry_run=self.dry_run) - else: - log.warn("'%s' does not exist -- can't clean it", - directory) - - # just for the heck of it, try to remove the base build directory: - # we might have emptied it right now, but if not we don't care - if not self.dry_run: - try: - os.rmdir(self.build_base) - log.info("removing '%s'", self.build_base) - except OSError: - pass diff --git a/Lib/distutils/command/command_template b/Lib/distutils/command/command_template deleted file mode 100644 index 6106819db843b5d..000000000000000 --- a/Lib/distutils/command/command_template +++ /dev/null @@ -1,33 +0,0 @@ -"""distutils.command.x - -Implements the Distutils 'x' command. -""" - -# created 2000/mm/dd, John Doe - -__revision__ = "$Id$" - -from distutils.core import Command - - -class x(Command): - - # Brief (40-50 characters) description of the command - description = "" - - # List of option tuples: long name, short name (None if no short - # name), and help string. - user_options = [('', '', - ""), - ] - - def initialize_options(self): - self. = None - self. = None - self. = None - - def finalize_options(self): - if self.x is None: - self.x = - - def run(self): diff --git a/Lib/distutils/command/config.py b/Lib/distutils/command/config.py deleted file mode 100644 index aeda408e731979b..000000000000000 --- a/Lib/distutils/command/config.py +++ /dev/null @@ -1,344 +0,0 @@ -"""distutils.command.config - -Implements the Distutils 'config' command, a (mostly) empty command class -that exists mainly to be sub-classed by specific module distributions and -applications. The idea is that while every "config" command is different, -at least they're all named the same, and users always see "config" in the -list of standard commands. Also, this is a good place to put common -configure-like tasks: "try to compile this C code", or "figure out where -this header file lives". -""" - -import os, re - -from distutils.core import Command -from distutils.errors import DistutilsExecError -from distutils.sysconfig import customize_compiler -from distutils import log - -LANG_EXT = {"c": ".c", "c++": ".cxx"} - -class config(Command): - - description = "prepare to build" - - user_options = [ - ('compiler=', None, - "specify the compiler type"), - ('cc=', None, - "specify the compiler executable"), - ('include-dirs=', 'I', - "list of directories to search for header files"), - ('define=', 'D', - "C preprocessor macros to define"), - ('undef=', 'U', - "C preprocessor macros to undefine"), - ('libraries=', 'l', - "external C libraries to link with"), - ('library-dirs=', 'L', - "directories to search for external C libraries"), - - ('noisy', None, - "show every action (compile, link, run, ...) taken"), - ('dump-source', None, - "dump generated source files before attempting to compile them"), - ] - - - # The three standard command methods: since the "config" command - # does nothing by default, these are empty. - - def initialize_options(self): - self.compiler = None - self.cc = None - self.include_dirs = None - self.libraries = None - self.library_dirs = None - - # maximal output for now - self.noisy = 1 - self.dump_source = 1 - - # list of temporary files generated along-the-way that we have - # to clean at some point - self.temp_files = [] - - def finalize_options(self): - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - elif isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - if self.libraries is None: - self.libraries = [] - elif isinstance(self.libraries, str): - self.libraries = [self.libraries] - - if self.library_dirs is None: - self.library_dirs = [] - elif isinstance(self.library_dirs, str): - self.library_dirs = self.library_dirs.split(os.pathsep) - - def run(self): - pass - - # Utility methods for actual "config" commands. The interfaces are - # loosely based on Autoconf macros of similar names. Sub-classes - # may use these freely. - - def _check_compiler(self): - """Check that 'self.compiler' really is a CCompiler object; - if not, make it one. - """ - # We do this late, and only on-demand, because this is an expensive - # import. - from distutils.ccompiler import CCompiler, new_compiler - if not isinstance(self.compiler, CCompiler): - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, force=1) - customize_compiler(self.compiler) - if self.include_dirs: - self.compiler.set_include_dirs(self.include_dirs) - if self.libraries: - self.compiler.set_libraries(self.libraries) - if self.library_dirs: - self.compiler.set_library_dirs(self.library_dirs) - - def _gen_temp_sourcefile(self, body, headers, lang): - filename = "_configtest" + LANG_EXT[lang] - with open(filename, "w") as file: - if headers: - for header in headers: - file.write("#include <%s>\n" % header) - file.write("\n") - file.write(body) - if body[-1] != "\n": - file.write("\n") - return filename - - def _preprocess(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - out = "_configtest.i" - self.temp_files.extend([src, out]) - self.compiler.preprocess(src, out, include_dirs=include_dirs) - return (src, out) - - def _compile(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - if self.dump_source: - dump_file(src, "compiling '%s':" % src) - (obj,) = self.compiler.object_filenames([src]) - self.temp_files.extend([src, obj]) - self.compiler.compile([src], include_dirs=include_dirs) - return (src, obj) - - def _link(self, body, headers, include_dirs, libraries, library_dirs, - lang): - (src, obj) = self._compile(body, headers, include_dirs, lang) - prog = os.path.splitext(os.path.basename(src))[0] - self.compiler.link_executable([obj], prog, - libraries=libraries, - library_dirs=library_dirs, - target_lang=lang) - - if self.compiler.exe_extension is not None: - prog = prog + self.compiler.exe_extension - self.temp_files.append(prog) - - return (src, obj, prog) - - def _clean(self, *filenames): - if not filenames: - filenames = self.temp_files - self.temp_files = [] - log.info("removing: %s", ' '.join(filenames)) - for filename in filenames: - try: - os.remove(filename) - except OSError: - pass - - - # XXX these ignore the dry-run flag: what to do, what to do? even if - # you want a dry-run build, you still need some sort of configuration - # info. My inclination is to make it up to the real config command to - # consult 'dry_run', and assume a default (minimal) configuration if - # true. The problem with trying to do it here is that you'd have to - # return either true or false from all the 'try' methods, neither of - # which is correct. - - # XXX need access to the header search path and maybe default macros. - - def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): - """Construct a source file from 'body' (a string containing lines - of C/C++ code) and 'headers' (a list of header files to include) - and run it through the preprocessor. Return true if the - preprocessor succeeded, false if there were any errors. - ('body' probably isn't of much use, but what the heck.) - """ - from distutils.ccompiler import CompileError - self._check_compiler() - ok = True - try: - self._preprocess(body, headers, include_dirs, lang) - except CompileError: - ok = False - - self._clean() - return ok - - def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, - lang="c"): - """Construct a source file (just like 'try_cpp()'), run it through - the preprocessor, and return true if any line of the output matches - 'pattern'. 'pattern' should either be a compiled regex object or a - string containing a regex. If both 'body' and 'headers' are None, - preprocesses an empty file -- which can be useful to determine the - symbols the preprocessor and compiler set by default. - """ - self._check_compiler() - src, out = self._preprocess(body, headers, include_dirs, lang) - - if isinstance(pattern, str): - pattern = re.compile(pattern) - - with open(out) as file: - match = False - while True: - line = file.readline() - if line == '': - break - if pattern.search(line): - match = True - break - - self._clean() - return match - - def try_compile(self, body, headers=None, include_dirs=None, lang="c"): - """Try to compile a source file built from 'body' and 'headers'. - Return true on success, false otherwise. - """ - from distutils.ccompiler import CompileError - self._check_compiler() - try: - self._compile(body, headers, include_dirs, lang) - ok = True - except CompileError: - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_link(self, body, headers=None, include_dirs=None, libraries=None, - library_dirs=None, lang="c"): - """Try to compile and link a source file, built from 'body' and - 'headers', to executable form. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - try: - self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - ok = True - except (CompileError, LinkError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_run(self, body, headers=None, include_dirs=None, libraries=None, - library_dirs=None, lang="c"): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - self.spawn([exe]) - ok = True - except (CompileError, LinkError, DistutilsExecError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - - # -- High-level methods -------------------------------------------- - # (these are the ones that are actually likely to be useful - # when implementing a real-world config command!) - - def check_func(self, func, headers=None, include_dirs=None, - libraries=None, library_dirs=None, decl=0, call=0): - """Determine if function 'func' is available by constructing a - source file that refers to 'func', and compiles and links it. - If everything succeeds, returns true; otherwise returns false. - - The constructed source file starts out by including the header - files listed in 'headers'. If 'decl' is true, it then declares - 'func' (as "int func()"); you probably shouldn't supply 'headers' - and set 'decl' true in the same call, or you might get errors about - a conflicting declarations for 'func'. Finally, the constructed - 'main()' function either references 'func' or (if 'call' is true) - calls it. 'libraries' and 'library_dirs' are used when - linking. - """ - self._check_compiler() - body = [] - if decl: - body.append("int %s ();" % func) - body.append("int main () {") - if call: - body.append(" %s();" % func) - else: - body.append(" %s;" % func) - body.append("}") - body = "\n".join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_lib(self, library, library_dirs=None, headers=None, - include_dirs=None, other_libraries=[]): - """Determine if 'library' is available to be linked against, - without actually checking that any particular symbols are provided - by it. 'headers' will be used in constructing the source file to - be compiled, but the only effect of this is to check if all the - header files listed are available. Any libraries listed in - 'other_libraries' will be included in the link, in case 'library' - has symbols that depend on other libraries. - """ - self._check_compiler() - return self.try_link("int main (void) { }", headers, include_dirs, - [library] + other_libraries, library_dirs) - - def check_header(self, header, include_dirs=None, library_dirs=None, - lang="c"): - """Determine if the system header file named by 'header_file' - exists and can be found by the preprocessor; return true if so, - false otherwise. - """ - return self.try_cpp(body="/* No body */", headers=[header], - include_dirs=include_dirs) - -def dump_file(filename, head=None): - """Dumps a file content into log.info. - - If head is not None, will be dumped before the file content. - """ - if head is None: - log.info('%s', filename) - else: - log.info(head) - file = open(filename) - try: - log.info(file.read()) - finally: - file.close() diff --git a/Lib/distutils/command/install.py b/Lib/distutils/command/install.py deleted file mode 100644 index a22a5d094d76786..000000000000000 --- a/Lib/distutils/command/install.py +++ /dev/null @@ -1,679 +0,0 @@ -"""distutils.command.install - -Implements the Distutils 'install' command.""" - -import sys -import sysconfig -import os -import re - -from distutils import log -from distutils.core import Command -from distutils.debug import DEBUG -from distutils.sysconfig import get_config_vars -from distutils.errors import DistutilsPlatformError -from distutils.file_util import write_file -from distutils.util import convert_path, subst_vars, change_root -from distutils.util import get_platform -from distutils.errors import DistutilsOptionError - -from site import USER_BASE -from site import USER_SITE - -HAS_USER_SITE = (USER_SITE is not None) - -# The keys to an installation scheme; if any new types of files are to be -# installed, be sure to add an entry to every scheme in -# sysconfig._INSTALL_SCHEMES, and to SCHEME_KEYS here. -SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data') - -# The following code provides backward-compatible INSTALL_SCHEMES -# while making the sysconfig module the single point of truth. -# This makes it easier for OS distributions where they need to -# alter locations for packages installations in a single place. -# Note that this module is deprecated (PEP 632); all consumers -# of this information should switch to using sysconfig directly. -INSTALL_SCHEMES = {"unix_prefix": {}, "unix_home": {}, "nt": {}} - -# Copy from sysconfig._INSTALL_SCHEMES -for key in SCHEME_KEYS: - for distutils_scheme_name, sys_scheme_name in ( - ("unix_prefix", "posix_prefix"), ("unix_home", "posix_home"), - ("nt", "nt")): - sys_key = key - sys_scheme = sysconfig._INSTALL_SCHEMES[sys_scheme_name] - if key == "headers" and key not in sys_scheme: - # On POSIX-y platforms, Python will: - # - Build from .h files in 'headers' (only there when - # building CPython) - # - Install .h files to 'include' - # When 'headers' is missing, fall back to 'include' - sys_key = 'include' - INSTALL_SCHEMES[distutils_scheme_name][key] = sys_scheme[sys_key] - -# Transformation to different template format -for main_key in INSTALL_SCHEMES: - for key, value in INSTALL_SCHEMES[main_key].items(): - # Change all ocurences of {variable} to $variable - value = re.sub(r"\{(.+?)\}", r"$\g<1>", value) - value = value.replace("$installed_base", "$base") - value = value.replace("$py_version_nodot_plat", "$py_version_nodot") - if key == "headers": - value += "/$dist_name" - if sys.version_info >= (3, 9) and key == "platlib": - # platlibdir is available since 3.9: bpo-1294959 - value = value.replace("/lib/", "/$platlibdir/") - INSTALL_SCHEMES[main_key][key] = value - -# The following part of INSTALL_SCHEMES has a different definition -# than the one in sysconfig, but because both depend on the site module, -# the outcomes should be the same. -if HAS_USER_SITE: - INSTALL_SCHEMES['nt_user'] = { - 'purelib': '$usersite', - 'platlib': '$usersite', - 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name', - 'scripts': '$userbase/Python$py_version_nodot/Scripts', - 'data' : '$userbase', - } - - INSTALL_SCHEMES['unix_user'] = { - 'purelib': '$usersite', - 'platlib': '$usersite', - 'headers': - '$userbase/include/python$py_version_short$abiflags/$dist_name', - 'scripts': '$userbase/bin', - 'data' : '$userbase', - } - - -class install(Command): - - description = "install everything from build directory" - - user_options = [ - # Select installation scheme and set base director(y|ies) - ('prefix=', None, - "installation prefix"), - ('exec-prefix=', None, - "(Unix only) prefix for platform-specific files"), - ('home=', None, - "(Unix only) home directory to install under"), - - # Or, just set the base director(y|ies) - ('install-base=', None, - "base installation directory (instead of --prefix or --home)"), - ('install-platbase=', None, - "base installation directory for platform-specific files " + - "(instead of --exec-prefix or --home)"), - ('root=', None, - "install everything relative to this alternate root directory"), - - # Or, explicitly set the installation scheme - ('install-purelib=', None, - "installation directory for pure Python module distributions"), - ('install-platlib=', None, - "installation directory for non-pure module distributions"), - ('install-lib=', None, - "installation directory for all module distributions " + - "(overrides --install-purelib and --install-platlib)"), - - ('install-headers=', None, - "installation directory for C/C++ headers"), - ('install-scripts=', None, - "installation directory for Python scripts"), - ('install-data=', None, - "installation directory for data files"), - - # Byte-compilation options -- see install_lib.py for details, as - # these are duplicated from there (but only install_lib does - # anything with them). - ('compile', 'c', "compile .py to .pyc [default]"), - ('no-compile', None, "don't compile .py files"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - - # Miscellaneous control options - ('force', 'f', - "force installation (overwrite any existing files)"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - - # Where to install documentation (eventually!) - #('doc-format=', None, "format of documentation to generate"), - #('install-man=', None, "directory for Unix man pages"), - #('install-html=', None, "directory for HTML documentation"), - #('install-info=', None, "directory for GNU info files"), - - ('record=', None, - "filename in which to record list of installed files"), - ] - - boolean_options = ['compile', 'force', 'skip-build'] - - if HAS_USER_SITE: - user_options.append(('user', None, - "install in user site-package '%s'" % USER_SITE)) - boolean_options.append('user') - - negative_opt = {'no-compile' : 'compile'} - - - def initialize_options(self): - """Initializes options.""" - # High-level options: these select both an installation base - # and scheme. - self.prefix = None - self.exec_prefix = None - self.home = None - self.user = 0 - - # These select only the installation base; it's up to the user to - # specify the installation scheme (currently, that means supplying - # the --install-{platlib,purelib,scripts,data} options). - self.install_base = None - self.install_platbase = None - self.root = None - - # These options are the actual installation directories; if not - # supplied by the user, they are filled in using the installation - # scheme implied by prefix/exec-prefix/home and the contents of - # that installation scheme. - self.install_purelib = None # for pure module distributions - self.install_platlib = None # non-pure (dists w/ extensions) - self.install_headers = None # for C/C++ headers - self.install_lib = None # set to either purelib or platlib - self.install_scripts = None - self.install_data = None - if HAS_USER_SITE: - self.install_userbase = USER_BASE - self.install_usersite = USER_SITE - - self.compile = None - self.optimize = None - - # Deprecated - # These two are for putting non-packagized distributions into their - # own directory and creating a .pth file if it makes sense. - # 'extra_path' comes from the setup file; 'install_path_file' can - # be turned off if it makes no sense to install a .pth file. (But - # better to install it uselessly than to guess wrong and not - # install it when it's necessary and would be used!) Currently, - # 'install_path_file' is always true unless some outsider meddles - # with it. - self.extra_path = None - self.install_path_file = 1 - - # 'force' forces installation, even if target files are not - # out-of-date. 'skip_build' skips running the "build" command, - # handy if you know it's not necessary. 'warn_dir' (which is *not* - # a user option, it's just there so the bdist_* commands can turn - # it off) determines whether we warn about installing to a - # directory not in sys.path. - self.force = 0 - self.skip_build = 0 - self.warn_dir = 1 - - # These are only here as a conduit from the 'build' command to the - # 'install_*' commands that do the real work. ('build_base' isn't - # actually used anywhere, but it might be useful in future.) They - # are not user options, because if the user told the install - # command where the build directory is, that wouldn't affect the - # build command. - self.build_base = None - self.build_lib = None - - # Not defined yet because we don't know anything about - # documentation yet. - #self.install_man = None - #self.install_html = None - #self.install_info = None - - self.record = None - - - # -- Option finalizing methods ------------------------------------- - # (This is rather more involved than for most commands, - # because this is where the policy for installing third- - # party Python modules on various platforms given a wide - # array of user input is decided. Yes, it's quite complex!) - - def finalize_options(self): - """Finalizes options.""" - # This method (and its helpers, like 'finalize_unix()', - # 'finalize_other()', and 'select_scheme()') is where the default - # installation directories for modules, extension modules, and - # anything else we care to install from a Python module - # distribution. Thus, this code makes a pretty important policy - # statement about how third-party stuff is added to a Python - # installation! Note that the actual work of installation is done - # by the relatively simple 'install_*' commands; they just take - # their orders from the installation directory options determined - # here. - - # Check for errors/inconsistencies in the options; first, stuff - # that's wrong on any platform. - - if ((self.prefix or self.exec_prefix or self.home) and - (self.install_base or self.install_platbase)): - raise DistutilsOptionError( - "must supply either prefix/exec-prefix/home or " + - "install-base/install-platbase -- not both") - - if self.home and (self.prefix or self.exec_prefix): - raise DistutilsOptionError( - "must supply either home or prefix/exec-prefix -- not both") - - if self.user and (self.prefix or self.exec_prefix or self.home or - self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with prefix, " - "exec_prefix/home, or install_(plat)base") - - # Next, stuff that's wrong (or dubious) only on certain platforms. - if os.name != "posix": - if self.exec_prefix: - self.warn("exec-prefix option ignored on this platform") - self.exec_prefix = None - - # Now the interesting logic -- so interesting that we farm it out - # to other methods. The goal of these methods is to set the final - # values for the install_{lib,scripts,data,...} options, using as - # input a heady brew of prefix, exec_prefix, home, install_base, - # install_platbase, user-supplied versions of - # install_{purelib,platlib,lib,scripts,data,...}, and the - # INSTALL_SCHEME dictionary above. Phew! - - self.dump_dirs("pre-finalize_{unix,other}") - - if os.name == 'posix': - self.finalize_unix() - else: - self.finalize_other() - - self.dump_dirs("post-finalize_{unix,other}()") - - # Expand configuration variables, tilde, etc. in self.install_base - # and self.install_platbase -- that way, we can use $base or - # $platbase in the other installation directories and not worry - # about needing recursive variable expansion (shudder). - - py_version = sys.version.split()[0] - (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') - try: - abiflags = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - abiflags = '' - self.config_vars = {'dist_name': self.distribution.get_name(), - 'dist_version': self.distribution.get_version(), - 'dist_fullname': self.distribution.get_fullname(), - 'py_version': py_version, - 'py_version_short': '%d.%d' % sys.version_info[:2], - 'py_version_nodot': '%d%d' % sys.version_info[:2], - 'sys_prefix': prefix, - 'prefix': prefix, - 'sys_exec_prefix': exec_prefix, - 'exec_prefix': exec_prefix, - 'abiflags': abiflags, - 'platlibdir': sys.platlibdir, - } - - if HAS_USER_SITE: - self.config_vars['userbase'] = self.install_userbase - self.config_vars['usersite'] = self.install_usersite - - if sysconfig.is_python_build(): - self.config_vars['srcdir'] = sysconfig.get_config_var('srcdir') - - self.expand_basedirs() - - self.dump_dirs("post-expand_basedirs()") - - # Now define config vars for the base directories so we can expand - # everything else. - self.config_vars['base'] = self.install_base - self.config_vars['platbase'] = self.install_platbase - - if DEBUG: - from pprint import pprint - print("config vars:") - pprint(self.config_vars) - - # Expand "~" and configuration variables in the installation - # directories. - self.expand_dirs() - - self.dump_dirs("post-expand_dirs()") - - # Create directories in the home dir: - if self.user: - self.create_home_path() - - # Pick the actual directory to install all modules to: either - # install_purelib or install_platlib, depending on whether this - # module distribution is pure or not. Of course, if the user - # already specified install_lib, use their selection. - if self.install_lib is None: - if self.distribution.ext_modules: # has extensions: non-pure - self.install_lib = self.install_platlib - else: - self.install_lib = self.install_purelib - - - # Convert directories from Unix /-separated syntax to the local - # convention. - self.convert_paths('lib', 'purelib', 'platlib', - 'scripts', 'data', 'headers') - if HAS_USER_SITE: - self.convert_paths('userbase', 'usersite') - - # Deprecated - # Well, we're not actually fully completely finalized yet: we still - # have to deal with 'extra_path', which is the hack for allowing - # non-packagized module distributions (hello, Numerical Python!) to - # get their own directories. - self.handle_extra_path() - self.install_libbase = self.install_lib # needed for .pth file - self.install_lib = os.path.join(self.install_lib, self.extra_dirs) - - # If a new root directory was supplied, make all the installation - # dirs relative to it. - if self.root is not None: - self.change_roots('libbase', 'lib', 'purelib', 'platlib', - 'scripts', 'data', 'headers') - - self.dump_dirs("after prepending root") - - # Find out the build directories, ie. where to install from. - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib')) - - # Punt on doc directories for now -- after all, we're punting on - # documentation completely! - - def dump_dirs(self, msg): - """Dumps the list of user options.""" - if not DEBUG: - return - from distutils.fancy_getopt import longopt_xlate - log.debug(msg + ":") - for opt in self.user_options: - opt_name = opt[0] - if opt_name[-1] == "=": - opt_name = opt_name[0:-1] - if opt_name in self.negative_opt: - opt_name = self.negative_opt[opt_name] - opt_name = opt_name.translate(longopt_xlate) - val = not getattr(self, opt_name) - else: - opt_name = opt_name.translate(longopt_xlate) - val = getattr(self, opt_name) - log.debug(" %s: %s", opt_name, val) - - def finalize_unix(self): - """Finalizes options for posix platforms.""" - if self.install_base is not None or self.install_platbase is not None: - if ((self.install_lib is None and - self.install_purelib is None and - self.install_platlib is None) or - self.install_headers is None or - self.install_scripts is None or - self.install_data is None): - raise DistutilsOptionError( - "install-base or install-platbase supplied, but " - "installation scheme is incomplete") - return - - if self.user: - if self.install_userbase is None: - raise DistutilsPlatformError( - "User base directory is not specified") - self.install_base = self.install_platbase = self.install_userbase - self.select_scheme("unix_user") - elif self.home is not None: - self.install_base = self.install_platbase = self.home - self.select_scheme("unix_home") - else: - if self.prefix is None: - if self.exec_prefix is not None: - raise DistutilsOptionError( - "must not supply exec-prefix without prefix") - - self.prefix = os.path.normpath(sys.prefix) - self.exec_prefix = os.path.normpath(sys.exec_prefix) - - else: - if self.exec_prefix is None: - self.exec_prefix = self.prefix - - self.install_base = self.prefix - self.install_platbase = self.exec_prefix - self.select_scheme("unix_prefix") - - def finalize_other(self): - """Finalizes options for non-posix platforms""" - if self.user: - if self.install_userbase is None: - raise DistutilsPlatformError( - "User base directory is not specified") - self.install_base = self.install_platbase = self.install_userbase - self.select_scheme(os.name + "_user") - elif self.home is not None: - self.install_base = self.install_platbase = self.home - self.select_scheme("unix_home") - else: - if self.prefix is None: - self.prefix = os.path.normpath(sys.prefix) - - self.install_base = self.install_platbase = self.prefix - try: - self.select_scheme(os.name) - except KeyError: - raise DistutilsPlatformError( - "I don't know how to install stuff on '%s'" % os.name) - - def select_scheme(self, name): - """Sets the install directories by applying the install schemes.""" - # it's the caller's problem if they supply a bad name! - scheme = INSTALL_SCHEMES[name] - for key in SCHEME_KEYS: - attrname = 'install_' + key - if getattr(self, attrname) is None: - setattr(self, attrname, scheme[key]) - - def _expand_attrs(self, attrs): - for attr in attrs: - val = getattr(self, attr) - if val is not None: - if os.name == 'posix' or os.name == 'nt': - val = os.path.expanduser(val) - val = subst_vars(val, self.config_vars) - setattr(self, attr, val) - - def expand_basedirs(self): - """Calls `os.path.expanduser` on install_base, install_platbase and - root.""" - self._expand_attrs(['install_base', 'install_platbase', 'root']) - - def expand_dirs(self): - """Calls `os.path.expanduser` on install dirs.""" - self._expand_attrs(['install_purelib', 'install_platlib', - 'install_lib', 'install_headers', - 'install_scripts', 'install_data',]) - - def convert_paths(self, *names): - """Call `convert_path` over `names`.""" - for name in names: - attr = "install_" + name - setattr(self, attr, convert_path(getattr(self, attr))) - - def handle_extra_path(self): - """Set `path_file` and `extra_dirs` using `extra_path`.""" - if self.extra_path is None: - self.extra_path = self.distribution.extra_path - - if self.extra_path is not None: - log.warn( - "Distribution option extra_path is deprecated. " - "See issue27919 for details." - ) - if isinstance(self.extra_path, str): - self.extra_path = self.extra_path.split(',') - - if len(self.extra_path) == 1: - path_file = extra_dirs = self.extra_path[0] - elif len(self.extra_path) == 2: - path_file, extra_dirs = self.extra_path - else: - raise DistutilsOptionError( - "'extra_path' option must be a list, tuple, or " - "comma-separated string with 1 or 2 elements") - - # convert to local form in case Unix notation used (as it - # should be in setup scripts) - extra_dirs = convert_path(extra_dirs) - else: - path_file = None - extra_dirs = '' - - # XXX should we warn if path_file and not extra_dirs? (in which - # case the path file would be harmless but pointless) - self.path_file = path_file - self.extra_dirs = extra_dirs - - def change_roots(self, *names): - """Change the install directories pointed by name using root.""" - for name in names: - attr = "install_" + name - setattr(self, attr, change_root(self.root, getattr(self, attr))) - - def create_home_path(self): - """Create directories under ~.""" - if not self.user: - return - home = convert_path(os.path.expanduser("~")) - for name, path in self.config_vars.items(): - if path.startswith(home) and not os.path.isdir(path): - self.debug_print("os.makedirs('%s', 0o700)" % path) - os.makedirs(path, 0o700) - - # -- Command execution methods ------------------------------------- - - def run(self): - """Runs the command.""" - # Obviously have to build before we can install - if not self.skip_build: - self.run_command('build') - # If we built for any other platform, we can't install. - build_plat = self.distribution.get_command_obj('build').plat_name - # check warn_dir - it is a clue that the 'install' is happening - # internally, and not to sys.path, so we don't check the platform - # matches what we are running. - if self.warn_dir and build_plat != get_platform(): - raise DistutilsPlatformError("Can't install when " - "cross-compiling") - - # Run all sub-commands (at least those that need to be run) - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - if self.path_file: - self.create_path_file() - - # write list of installed files, if requested. - if self.record: - outputs = self.get_outputs() - if self.root: # strip any package prefix - root_len = len(self.root) - for counter in range(len(outputs)): - outputs[counter] = outputs[counter][root_len:] - self.execute(write_file, - (self.record, outputs), - "writing list of installed files to '%s'" % - self.record) - - sys_path = map(os.path.normpath, sys.path) - sys_path = map(os.path.normcase, sys_path) - install_lib = os.path.normcase(os.path.normpath(self.install_lib)) - if (self.warn_dir and - not (self.path_file and self.install_path_file) and - install_lib not in sys_path): - log.debug(("modules installed to '%s', which is not in " - "Python's module search path (sys.path) -- " - "you'll have to change the search path yourself"), - self.install_lib) - - def create_path_file(self): - """Creates the .pth file""" - filename = os.path.join(self.install_libbase, - self.path_file + ".pth") - if self.install_path_file: - self.execute(write_file, - (filename, [self.extra_dirs]), - "creating %s" % filename) - else: - self.warn("path file '%s' not created" % filename) - - - # -- Reporting methods --------------------------------------------- - - def get_outputs(self): - """Assembles the outputs of all the sub-commands.""" - outputs = [] - for cmd_name in self.get_sub_commands(): - cmd = self.get_finalized_command(cmd_name) - # Add the contents of cmd.get_outputs(), ensuring - # that outputs doesn't contain duplicate entries - for filename in cmd.get_outputs(): - if filename not in outputs: - outputs.append(filename) - - if self.path_file and self.install_path_file: - outputs.append(os.path.join(self.install_libbase, - self.path_file + ".pth")) - - return outputs - - def get_inputs(self): - """Returns the inputs of all the sub-commands""" - # XXX gee, this looks familiar ;-( - inputs = [] - for cmd_name in self.get_sub_commands(): - cmd = self.get_finalized_command(cmd_name) - inputs.extend(cmd.get_inputs()) - - return inputs - - # -- Predicates for sub-command list ------------------------------- - - def has_lib(self): - """Returns true if the current distribution has any Python - modules to install.""" - return (self.distribution.has_pure_modules() or - self.distribution.has_ext_modules()) - - def has_headers(self): - """Returns true if the current distribution has any headers to - install.""" - return self.distribution.has_headers() - - def has_scripts(self): - """Returns true if the current distribution has any scripts to. - install.""" - return self.distribution.has_scripts() - - def has_data(self): - """Returns true if the current distribution has any data to. - install.""" - return self.distribution.has_data_files() - - # 'sub_commands': a list of commands this command might have to run to - # get its work done. See cmd.py for more info. - sub_commands = [('install_lib', has_lib), - ('install_headers', has_headers), - ('install_scripts', has_scripts), - ('install_data', has_data), - ('install_egg_info', lambda self:True), - ] diff --git a/Lib/distutils/command/install_data.py b/Lib/distutils/command/install_data.py deleted file mode 100644 index 947cd76a99e5fdd..000000000000000 --- a/Lib/distutils/command/install_data.py +++ /dev/null @@ -1,79 +0,0 @@ -"""distutils.command.install_data - -Implements the Distutils 'install_data' command, for installing -platform-independent data files.""" - -# contributed by Bastian Kleineidam - -import os -from distutils.core import Command -from distutils.util import change_root, convert_path - -class install_data(Command): - - description = "install data files" - - user_options = [ - ('install-dir=', 'd', - "base directory for installing data files " - "(default: installation base dir)"), - ('root=', None, - "install everything relative to this alternate root directory"), - ('force', 'f', "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - self.root = None - self.force = 0 - self.data_files = self.distribution.data_files - self.warn_dir = 1 - - def finalize_options(self): - self.set_undefined_options('install', - ('install_data', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) - - def run(self): - self.mkpath(self.install_dir) - for f in self.data_files: - if isinstance(f, str): - # it's a simple file, so copy it - f = convert_path(f) - if self.warn_dir: - self.warn("setup script did not provide a directory for " - "'%s' -- installing right in '%s'" % - (f, self.install_dir)) - (out, _) = self.copy_file(f, self.install_dir) - self.outfiles.append(out) - else: - # it's a tuple with path to install to and a list of files - dir = convert_path(f[0]) - if not os.path.isabs(dir): - dir = os.path.join(self.install_dir, dir) - elif self.root: - dir = change_root(self.root, dir) - self.mkpath(dir) - - if f[1] == []: - # If there are no files listed, the user must be - # trying to create an empty directory, so add the - # directory to the list of output files. - self.outfiles.append(dir) - else: - # Copy files, adding them to the list of output files. - for data in f[1]: - data = convert_path(data) - (out, _) = self.copy_file(data, dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.data_files or [] - - def get_outputs(self): - return self.outfiles diff --git a/Lib/distutils/command/install_egg_info.py b/Lib/distutils/command/install_egg_info.py deleted file mode 100644 index 0ddc7367cc608da..000000000000000 --- a/Lib/distutils/command/install_egg_info.py +++ /dev/null @@ -1,77 +0,0 @@ -"""distutils.command.install_egg_info - -Implements the Distutils 'install_egg_info' command, for installing -a package's PKG-INFO metadata.""" - - -from distutils.cmd import Command -from distutils import log, dir_util -import os, sys, re - -class install_egg_info(Command): - """Install an .egg-info file for the package""" - - description = "Install package's PKG-INFO metadata as an .egg-info file" - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - def finalize_options(self): - self.set_undefined_options('install_lib',('install_dir','install_dir')) - basename = "%s-%s-py%d.%d.egg-info" % ( - to_filename(safe_name(self.distribution.get_name())), - to_filename(safe_version(self.distribution.get_version())), - *sys.version_info[:2] - ) - self.target = os.path.join(self.install_dir, basename) - self.outputs = [self.target] - - def run(self): - target = self.target - if os.path.isdir(target) and not os.path.islink(target): - dir_util.remove_tree(target, dry_run=self.dry_run) - elif os.path.exists(target): - self.execute(os.unlink,(self.target,),"Removing "+target) - elif not os.path.isdir(self.install_dir): - self.execute(os.makedirs, (self.install_dir,), - "Creating "+self.install_dir) - log.info("Writing %s", target) - if not self.dry_run: - with open(target, 'w', encoding='UTF-8') as f: - self.distribution.metadata.write_pkg_file(f) - - def get_outputs(self): - return self.outputs - - -# The following routines are taken from setuptools' pkg_resources module and -# can be replaced by importing them from pkg_resources once it is included -# in the stdlib. - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """Convert an arbitrary string to a standard version string - - Spaces become dots, and all other non-alphanumeric characters become - dashes, with runs of multiple dashes condensed to a single dash. - """ - version = version.replace(' ','.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-','_') diff --git a/Lib/distutils/command/install_headers.py b/Lib/distutils/command/install_headers.py deleted file mode 100644 index 9bb0b18dc0d809d..000000000000000 --- a/Lib/distutils/command/install_headers.py +++ /dev/null @@ -1,47 +0,0 @@ -"""distutils.command.install_headers - -Implements the Distutils 'install_headers' command, to install C/C++ header -files to the Python include directory.""" - -from distutils.core import Command - - -# XXX force is never used -class install_headers(Command): - - description = "install C/C++ header files" - - user_options = [('install-dir=', 'd', - "directory to install header files to"), - ('force', 'f', - "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', - ('install_headers', 'install_dir'), - ('force', 'force')) - - - def run(self): - headers = self.distribution.headers - if not headers: - return - - self.mkpath(self.install_dir) - for header in headers: - (out, _) = self.copy_file(header, self.install_dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.distribution.headers or [] - - def get_outputs(self): - return self.outfiles diff --git a/Lib/distutils/command/install_lib.py b/Lib/distutils/command/install_lib.py deleted file mode 100644 index 6154cf09431f722..000000000000000 --- a/Lib/distutils/command/install_lib.py +++ /dev/null @@ -1,217 +0,0 @@ -"""distutils.command.install_lib - -Implements the Distutils 'install_lib' command -(install all Python modules).""" - -import os -import importlib.util -import sys - -from distutils.core import Command -from distutils.errors import DistutilsOptionError - - -# Extension for Python source files. -PYTHON_SOURCE_EXTENSION = ".py" - -class install_lib(Command): - - description = "install all Python modules (extensions and pure Python)" - - # The byte-compilation options are a tad confusing. Here are the - # possible scenarios: - # 1) no compilation at all (--no-compile --no-optimize) - # 2) compile .pyc only (--compile --no-optimize; default) - # 3) compile .pyc and "opt-1" .pyc (--compile --optimize) - # 4) compile "opt-1" .pyc only (--no-compile --optimize) - # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more) - # 6) compile "opt-2" .pyc only (--no-compile --optimize-more) - # - # The UI for this is two options, 'compile' and 'optimize'. - # 'compile' is strictly boolean, and only decides whether to - # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and - # decides both whether to generate .pyc files and what level of - # optimization to use. - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ('build-dir=','b', "build directory (where to install from)"), - ('force', 'f', "force installation (overwrite existing files)"), - ('compile', 'c', "compile .py to .pyc [default]"), - ('no-compile', None, "don't compile .py files"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('skip-build', None, "skip the build steps"), - ] - - boolean_options = ['force', 'compile', 'skip-build'] - negative_opt = {'no-compile' : 'compile'} - - def initialize_options(self): - # let the 'install' command dictate our installation directory - self.install_dir = None - self.build_dir = None - self.force = 0 - self.compile = None - self.optimize = None - self.skip_build = None - - def finalize_options(self): - # Get all the information we need to install pure Python modules - # from the umbrella 'install' command -- build (source) directory, - # install (target) directory, and whether to compile .py files. - self.set_undefined_options('install', - ('build_lib', 'build_dir'), - ('install_lib', 'install_dir'), - ('force', 'force'), - ('compile', 'compile'), - ('optimize', 'optimize'), - ('skip_build', 'skip_build'), - ) - - if self.compile is None: - self.compile = True - if self.optimize is None: - self.optimize = False - - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - if self.optimize not in (0, 1, 2): - raise AssertionError - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # Make sure we have built everything we need first - self.build() - - # Install everything: simply dump the entire contents of the build - # directory to the installation directory (that's the beauty of - # having a build directory!) - outfiles = self.install() - - # (Optionally) compile .py to .pyc - if outfiles is not None and self.distribution.has_pure_modules(): - self.byte_compile(outfiles) - - # -- Top-level worker functions ------------------------------------ - # (called from 'run()') - - def build(self): - if not self.skip_build: - if self.distribution.has_pure_modules(): - self.run_command('build_py') - if self.distribution.has_ext_modules(): - self.run_command('build_ext') - - def install(self): - if os.path.isdir(self.build_dir): - outfiles = self.copy_tree(self.build_dir, self.install_dir) - else: - self.warn("'%s' does not exist -- no Python modules to install" % - self.build_dir) - return - return outfiles - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - - # Get the "--root" directory supplied to the "install" command, - # and use it as a prefix to strip off the purported filename - # encoded in bytecode files. This is far from complete, but it - # should at least generate usable bytecode in RPM distributions. - install_root = self.get_finalized_command('install').root - - if self.compile: - byte_compile(files, optimize=0, - force=self.force, prefix=install_root, - dry_run=self.dry_run) - if self.optimize > 0: - byte_compile(files, optimize=self.optimize, - force=self.force, prefix=install_root, - verbose=self.verbose, dry_run=self.dry_run) - - - # -- Utility methods ----------------------------------------------- - - def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir): - if not has_any: - return [] - - build_cmd = self.get_finalized_command(build_cmd) - build_files = build_cmd.get_outputs() - build_dir = getattr(build_cmd, cmd_option) - - prefix_len = len(build_dir) + len(os.sep) - outputs = [] - for file in build_files: - outputs.append(os.path.join(output_dir, file[prefix_len:])) - - return outputs - - def _bytecode_filenames(self, py_filenames): - bytecode_files = [] - for py_file in py_filenames: - # Since build_py handles package data installation, the - # list of outputs can contain more than just .py files. - # Make sure we only report bytecode for the .py files. - ext = os.path.splitext(os.path.normcase(py_file))[1] - if ext != PYTHON_SOURCE_EXTENSION: - continue - if self.compile: - bytecode_files.append(importlib.util.cache_from_source( - py_file, optimization='')) - if self.optimize > 0: - bytecode_files.append(importlib.util.cache_from_source( - py_file, optimization=self.optimize)) - - return bytecode_files - - - # -- External interface -------------------------------------------- - # (called by outsiders) - - def get_outputs(self): - """Return the list of files that would be installed if this command - were actually run. Not affected by the "dry-run" flag or whether - modules have actually been built yet. - """ - pure_outputs = \ - self._mutate_outputs(self.distribution.has_pure_modules(), - 'build_py', 'build_lib', - self.install_dir) - if self.compile: - bytecode_outputs = self._bytecode_filenames(pure_outputs) - else: - bytecode_outputs = [] - - ext_outputs = \ - self._mutate_outputs(self.distribution.has_ext_modules(), - 'build_ext', 'build_lib', - self.install_dir) - - return pure_outputs + bytecode_outputs + ext_outputs - - def get_inputs(self): - """Get the list of files that are input to this command, ie. the - files that get installed as they are named in the build tree. - The files in this list correspond one-to-one to the output - filenames returned by 'get_outputs()'. - """ - inputs = [] - - if self.distribution.has_pure_modules(): - build_py = self.get_finalized_command('build_py') - inputs.extend(build_py.get_outputs()) - - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - inputs.extend(build_ext.get_outputs()) - - return inputs diff --git a/Lib/distutils/command/install_scripts.py b/Lib/distutils/command/install_scripts.py deleted file mode 100644 index 31a1130ee549371..000000000000000 --- a/Lib/distutils/command/install_scripts.py +++ /dev/null @@ -1,60 +0,0 @@ -"""distutils.command.install_scripts - -Implements the Distutils 'install_scripts' command, for installing -Python scripts.""" - -# contributed by Bastian Kleineidam - -import os -from distutils.core import Command -from distutils import log -from stat import ST_MODE - - -class install_scripts(Command): - - description = "install scripts (Python or otherwise)" - - user_options = [ - ('install-dir=', 'd', "directory to install scripts to"), - ('build-dir=','b', "build directory (where to install from)"), - ('force', 'f', "force installation (overwrite existing files)"), - ('skip-build', None, "skip the build steps"), - ] - - boolean_options = ['force', 'skip-build'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.build_dir = None - self.skip_build = None - - def finalize_options(self): - self.set_undefined_options('build', ('build_scripts', 'build_dir')) - self.set_undefined_options('install', - ('install_scripts', 'install_dir'), - ('force', 'force'), - ('skip_build', 'skip_build'), - ) - - def run(self): - if not self.skip_build: - self.run_command('build_scripts') - self.outfiles = self.copy_tree(self.build_dir, self.install_dir) - if os.name == 'posix': - # Set the executable bits (owner, group, and world) on - # all the scripts we just installed. - for file in self.get_outputs(): - if self.dry_run: - log.info("changing mode of %s", file) - else: - mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777 - log.info("changing mode of %s to %o", file, mode) - os.chmod(file, mode) - - def get_inputs(self): - return self.distribution.scripts or [] - - def get_outputs(self): - return self.outfiles or [] diff --git a/Lib/distutils/command/register.py b/Lib/distutils/command/register.py deleted file mode 100644 index 170f5497141c9dc..000000000000000 --- a/Lib/distutils/command/register.py +++ /dev/null @@ -1,304 +0,0 @@ -"""distutils.command.register - -Implements the Distutils 'register' command (register with the repository). -""" - -# created 2002/10/21, Richard Jones - -import getpass -import io -import urllib.parse, urllib.request -from warnings import warn - -from distutils.core import PyPIRCCommand -from distutils.errors import * -from distutils import log - -class register(PyPIRCCommand): - - description = ("register the distribution with the Python package index") - user_options = PyPIRCCommand.user_options + [ - ('list-classifiers', None, - 'list the valid Trove classifiers'), - ('strict', None , - 'Will stop the registering if the meta-data are not fully compliant') - ] - boolean_options = PyPIRCCommand.boolean_options + [ - 'verify', 'list-classifiers', 'strict'] - - sub_commands = [('check', lambda self: True)] - - def initialize_options(self): - PyPIRCCommand.initialize_options(self) - self.list_classifiers = 0 - self.strict = 0 - - def finalize_options(self): - PyPIRCCommand.finalize_options(self) - # setting options for the `check` subcommand - check_options = {'strict': ('register', self.strict), - 'restructuredtext': ('register', 1)} - self.distribution.command_options['check'] = check_options - - def run(self): - self.finalize_options() - self._set_config() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - if self.dry_run: - self.verify_metadata() - elif self.list_classifiers: - self.classifiers() - else: - self.send_metadata() - - def check_metadata(self): - """Deprecated API.""" - warn("distutils.command.register.check_metadata is deprecated, \ - use the check command instead", PendingDeprecationWarning) - check = self.distribution.get_command_obj('check') - check.ensure_finalized() - check.strict = self.strict - check.restructuredtext = 1 - check.run() - - def _set_config(self): - ''' Reads the configuration file and set attributes. - ''' - config = self._read_pypirc() - if config != {}: - self.username = config['username'] - self.password = config['password'] - self.repository = config['repository'] - self.realm = config['realm'] - self.has_config = True - else: - if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): - raise ValueError('%s not found in .pypirc' % self.repository) - if self.repository == 'pypi': - self.repository = self.DEFAULT_REPOSITORY - self.has_config = False - - def classifiers(self): - ''' Fetch the list of classifiers from the server. - ''' - url = self.repository+'?:action=list_classifiers' - response = urllib.request.urlopen(url) - log.info(self._read_pypi_response(response)) - - def verify_metadata(self): - ''' Send the metadata to the package index server to be checked. - ''' - # send the info to the server and report the result - (code, result) = self.post_to_server(self.build_post_data('verify')) - log.info('Server response (%s): %s', code, result) - - def send_metadata(self): - ''' Send the metadata to the package index server. - - Well, do the following: - 1. figure who the user is, and then - 2. send the data as a Basic auth'ed POST. - - First we try to read the username/password from $HOME/.pypirc, - which is a ConfigParser-formatted file with a section - [distutils] containing username and password entries (both - in clear text). Eg: - - [distutils] - index-servers = - pypi - - [pypi] - username: fred - password: sekrit - - Otherwise, to figure who the user is, we offer the user three - choices: - - 1. use existing login, - 2. register as a new user, or - 3. set the password to a random string and email the user. - - ''' - # see if we can short-cut and get the username/password from the - # config - if self.has_config: - choice = '1' - username = self.username - password = self.password - else: - choice = 'x' - username = password = '' - - # get the user's login info - choices = '1 2 3 4'.split() - while choice not in choices: - self.announce('''\ -We need to know who you are, so please choose either: - 1. use your existing login, - 2. register as a new user, - 3. have the server generate a new password for you (and email it to you), or - 4. quit -Your selection [default 1]: ''', log.INFO) - choice = input() - if not choice: - choice = '1' - elif choice not in choices: - print('Please choose one of the four options!') - - if choice == '1': - # get the username and password - while not username: - username = input('Username: ') - while not password: - password = getpass.getpass('Password: ') - - # set up the authentication - auth = urllib.request.HTTPPasswordMgr() - host = urllib.parse.urlparse(self.repository)[1] - auth.add_password(self.realm, host, username, password) - # send the info to the server and report the result - code, result = self.post_to_server(self.build_post_data('submit'), - auth) - self.announce('Server response (%s): %s' % (code, result), - log.INFO) - - # possibly save the login - if code == 200: - if self.has_config: - # sharing the password in the distribution instance - # so the upload command can reuse it - self.distribution.password = password - else: - self.announce(('I can store your PyPI login so future ' - 'submissions will be faster.'), log.INFO) - self.announce('(the login will be stored in %s)' % \ - self._get_rc_file(), log.INFO) - choice = 'X' - while choice.lower() not in 'yn': - choice = input('Save your login (y/N)?') - if not choice: - choice = 'n' - if choice.lower() == 'y': - self._store_pypirc(username, password) - - elif choice == '2': - data = {':action': 'user'} - data['name'] = data['password'] = data['email'] = '' - data['confirm'] = None - while not data['name']: - data['name'] = input('Username: ') - while data['password'] != data['confirm']: - while not data['password']: - data['password'] = getpass.getpass('Password: ') - while not data['confirm']: - data['confirm'] = getpass.getpass(' Confirm: ') - if data['password'] != data['confirm']: - data['password'] = '' - data['confirm'] = None - print("Password and confirm don't match!") - while not data['email']: - data['email'] = input(' EMail: ') - code, result = self.post_to_server(data) - if code != 200: - log.info('Server response (%s): %s', code, result) - else: - log.info('You will receive an email shortly.') - log.info(('Follow the instructions in it to ' - 'complete registration.')) - elif choice == '3': - data = {':action': 'password_reset'} - data['email'] = '' - while not data['email']: - data['email'] = input('Your email address: ') - code, result = self.post_to_server(data) - log.info('Server response (%s): %s', code, result) - - def build_post_data(self, action): - # figure the data to send - the metadata plus some additional - # information used by the package server - meta = self.distribution.metadata - data = { - ':action': action, - 'metadata_version' : '1.0', - 'name': meta.get_name(), - 'version': meta.get_version(), - 'summary': meta.get_description(), - 'home_page': meta.get_url(), - 'author': meta.get_contact(), - 'author_email': meta.get_contact_email(), - 'license': meta.get_licence(), - 'description': meta.get_long_description(), - 'keywords': meta.get_keywords(), - 'platform': meta.get_platforms(), - 'classifiers': meta.get_classifiers(), - 'download_url': meta.get_download_url(), - # PEP 314 - 'provides': meta.get_provides(), - 'requires': meta.get_requires(), - 'obsoletes': meta.get_obsoletes(), - } - if data['provides'] or data['requires'] or data['obsoletes']: - data['metadata_version'] = '1.1' - return data - - def post_to_server(self, data, auth=None): - ''' Post a query to the server, and return a string response. - ''' - if 'name' in data: - self.announce('Registering %s to %s' % (data['name'], - self.repository), - log.INFO) - # Build up the MIME payload for the urllib2 POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = '\n--' + boundary - end_boundary = sep_boundary + '--' - body = io.StringIO() - for key, value in data.items(): - # handle multiple entries for the same name - if not isinstance(value, (list, tuple)): - value = [value] - for value in value: - value = str(value) - body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"'%key) - body.write("\n\n") - body.write(value) - if value and value[-1] == '\r': - body.write('\n') # write an extra newline (lurve Macs) - body.write(end_boundary) - body.write("\n") - body = body.getvalue().encode("utf-8") - - # build the Request - headers = { - 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, - 'Content-length': str(len(body)) - } - req = urllib.request.Request(self.repository, body, headers) - - # handle HTTP and include the Basic Auth handler - opener = urllib.request.build_opener( - urllib.request.HTTPBasicAuthHandler(password_mgr=auth) - ) - data = '' - try: - result = opener.open(req) - except urllib.error.HTTPError as e: - if self.show_response: - data = e.fp.read() - result = e.code, e.msg - except urllib.error.URLError as e: - result = 500, str(e) - else: - if self.show_response: - data = self._read_pypi_response(result) - result = 200, 'OK' - if self.show_response: - msg = '\n'.join(('-' * 75, data, '-' * 75)) - self.announce(msg, log.INFO) - return result diff --git a/Lib/distutils/command/sdist.py b/Lib/distutils/command/sdist.py deleted file mode 100644 index b4996fcb1d276c4..000000000000000 --- a/Lib/distutils/command/sdist.py +++ /dev/null @@ -1,494 +0,0 @@ -"""distutils.command.sdist - -Implements the Distutils 'sdist' command (create a source distribution).""" - -import os -import sys -from glob import glob -from warnings import warn - -from distutils.core import Command -from distutils import dir_util -from distutils import file_util -from distutils import archive_util -from distutils.text_file import TextFile -from distutils.filelist import FileList -from distutils import log -from distutils.util import convert_path -from distutils.errors import DistutilsTemplateError, DistutilsOptionError - - -def show_formats(): - """Print all possible values for the 'formats' option (used by - the "--help-formats" command-line option). - """ - from distutils.fancy_getopt import FancyGetopt - from distutils.archive_util import ARCHIVE_FORMATS - formats = [] - for format in ARCHIVE_FORMATS.keys(): - formats.append(("formats=" + format, None, - ARCHIVE_FORMATS[format][2])) - formats.sort() - FancyGetopt(formats).print_help( - "List of available source distribution formats:") - - -class sdist(Command): - - description = "create a source distribution (tarball, zip file, etc.)" - - def checking_metadata(self): - """Callable used for the check sub-command. - - Placed here so user_options can view it""" - return self.metadata_check - - user_options = [ - ('template=', 't', - "name of manifest template file [default: MANIFEST.in]"), - ('manifest=', 'm', - "name of manifest file [default: MANIFEST]"), - ('use-defaults', None, - "include the default file set in the manifest " - "[default; disable with --no-defaults]"), - ('no-defaults', None, - "don't include the default file set"), - ('prune', None, - "specifically exclude files/directories that should not be " - "distributed (build tree, RCS/CVS dirs, etc.) " - "[default; disable with --no-prune]"), - ('no-prune', None, - "don't automatically exclude anything"), - ('manifest-only', 'o', - "just regenerate the manifest and then stop " - "(implies --force-manifest)"), - ('force-manifest', 'f', - "forcibly regenerate the manifest and carry on as usual. " - "Deprecated: now the manifest is always regenerated."), - ('formats=', None, - "formats for source distribution (comma-separated list)"), - ('keep-temp', 'k', - "keep the distribution tree around after creating " + - "archive file(s)"), - ('dist-dir=', 'd', - "directory to put the source distribution archive(s) in " - "[default: dist]"), - ('metadata-check', None, - "Ensure that all required elements of meta-data " - "are supplied. Warn if any missing. [default]"), - ('owner=', 'u', - "Owner name used when creating a tar file [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file [default: current group]"), - ] - - boolean_options = ['use-defaults', 'prune', - 'manifest-only', 'force-manifest', - 'keep-temp', 'metadata-check'] - - help_options = [ - ('help-formats', None, - "list available distribution formats", show_formats), - ] - - negative_opt = {'no-defaults': 'use-defaults', - 'no-prune': 'prune' } - - sub_commands = [('check', checking_metadata)] - - READMES = ('README', 'README.txt', 'README.rst') - - def initialize_options(self): - # 'template' and 'manifest' are, respectively, the names of - # the manifest template and manifest file. - self.template = None - self.manifest = None - - # 'use_defaults': if true, we will include the default file set - # in the manifest - self.use_defaults = 1 - self.prune = 1 - - self.manifest_only = 0 - self.force_manifest = 0 - - self.formats = ['gztar'] - self.keep_temp = 0 - self.dist_dir = None - - self.archive_files = None - self.metadata_check = 1 - self.owner = None - self.group = None - - def finalize_options(self): - if self.manifest is None: - self.manifest = "MANIFEST" - if self.template is None: - self.template = "MANIFEST.in" - - self.ensure_string_list('formats') - - bad_format = archive_util.check_archive_formats(self.formats) - if bad_format: - raise DistutilsOptionError( - "unknown archive format '%s'" % bad_format) - - if self.dist_dir is None: - self.dist_dir = "dist" - - def run(self): - # 'filelist' contains the list of files that will make up the - # manifest - self.filelist = FileList() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - # Do whatever it takes to get the list of files to process - # (process the manifest template, read an existing manifest, - # whatever). File list is accumulated in 'self.filelist'. - self.get_file_list() - - # If user just wanted us to regenerate the manifest, stop now. - if self.manifest_only: - return - - # Otherwise, go ahead and create the source distribution tarball, - # or zipfile, or whatever. - self.make_distribution() - - def check_metadata(self): - """Deprecated API.""" - warn("distutils.command.sdist.check_metadata is deprecated, \ - use the check command instead", PendingDeprecationWarning) - check = self.distribution.get_command_obj('check') - check.ensure_finalized() - check.run() - - def get_file_list(self): - """Figure out the list of files to include in the source - distribution, and put it in 'self.filelist'. This might involve - reading the manifest template (and writing the manifest), or just - reading the manifest, or just using the default file set -- it all - depends on the user's options. - """ - # new behavior when using a template: - # the file list is recalculated every time because - # even if MANIFEST.in or setup.py are not changed - # the user might have added some files in the tree that - # need to be included. - # - # This makes --force the default and only behavior with templates. - template_exists = os.path.isfile(self.template) - if not template_exists and self._manifest_is_not_generated(): - self.read_manifest() - self.filelist.sort() - self.filelist.remove_duplicates() - return - - if not template_exists: - self.warn(("manifest template '%s' does not exist " + - "(using default file list)") % - self.template) - self.filelist.findall() - - if self.use_defaults: - self.add_defaults() - - if template_exists: - self.read_template() - - if self.prune: - self.prune_file_list() - - self.filelist.sort() - self.filelist.remove_duplicates() - self.write_manifest() - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist._cs_path_exists(__file__) - True - >>> sdist._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - def read_template(self): - """Read and parse manifest template file named by self.template. - - (usually "MANIFEST.in") The parsing and processing is done by - 'self.filelist', which updates itself accordingly. - """ - log.info("reading manifest template '%s'", self.template) - template = TextFile(self.template, strip_comments=1, skip_blanks=1, - join_lines=1, lstrip_ws=1, rstrip_ws=1, - collapse_join=1) - - try: - while True: - line = template.readline() - if line is None: # end of file - break - - try: - self.filelist.process_template_line(line) - # the call above can raise a DistutilsTemplateError for - # malformed lines, or a ValueError from the lower-level - # convert_path function - except (DistutilsTemplateError, ValueError) as msg: - self.warn("%s, line %d: %s" % (template.filename, - template.current_line, - msg)) - finally: - template.close() - - def prune_file_list(self): - """Prune off branches that might slip into the file list as created - by 'read_template()', but really don't belong there: - * the build tree (typically "build") - * the release tree itself (only an issue if we ran "sdist" - previously with --keep-temp, or it aborted) - * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories - """ - build = self.get_finalized_command('build') - base_dir = self.distribution.get_fullname() - - self.filelist.exclude_pattern(None, prefix=build.build_base) - self.filelist.exclude_pattern(None, prefix=base_dir) - - if sys.platform == 'win32': - seps = r'/|\\' - else: - seps = '/' - - vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', - '_darcs'] - vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps) - self.filelist.exclude_pattern(vcs_ptrn, is_regex=1) - - def write_manifest(self): - """Write the file list in 'self.filelist' (presumably as filled in - by 'add_defaults()' and 'read_template()') to the manifest file - named by 'self.manifest'. - """ - if self._manifest_is_not_generated(): - log.info("not writing to manually maintained " - "manifest file '%s'" % self.manifest) - return - - content = self.filelist.files[:] - content.insert(0, '# file GENERATED by distutils, do NOT edit') - self.execute(file_util.write_file, (self.manifest, content), - "writing manifest file '%s'" % self.manifest) - - def _manifest_is_not_generated(self): - # check for special comment used in 3.1.3 and higher - if not os.path.isfile(self.manifest): - return False - - fp = open(self.manifest) - try: - first_line = fp.readline() - finally: - fp.close() - return first_line != '# file GENERATED by distutils, do NOT edit\n' - - def read_manifest(self): - """Read the manifest file (named by 'self.manifest') and use it to - fill in 'self.filelist', the list of files to include in the source - distribution. - """ - log.info("reading manifest file '%s'", self.manifest) - with open(self.manifest) as manifest: - for line in manifest: - # ignore comments and blank lines - line = line.strip() - if line.startswith('#') or not line: - continue - self.filelist.append(line) - - def make_release_tree(self, base_dir, files): - """Create the directory tree that will become the source - distribution archive. All directories implied by the filenames in - 'files' are created under 'base_dir', and then we hard link or copy - (if hard linking is unavailable) those files into place. - Essentially, this duplicates the developer's source tree, but in a - directory named after the distribution, containing only the files - to be distributed. - """ - # Create all the directories under 'base_dir' necessary to - # put 'files' there; the 'mkpath()' is just so we don't die - # if the manifest happens to be empty. - self.mkpath(base_dir) - dir_util.create_tree(base_dir, files, dry_run=self.dry_run) - - # And walk over the list of files, either making a hard link (if - # os.link exists) to each one that doesn't already exist in its - # corresponding location under 'base_dir', or copying each file - # that's out-of-date in 'base_dir'. (Usually, all files will be - # out-of-date, because by default we blow away 'base_dir' when - # we're done making the distribution archives.) - - if hasattr(os, 'link'): # can make hard links on this system - link = 'hard' - msg = "making hard links in %s..." % base_dir - else: # nope, have to copy - link = None - msg = "copying files to %s..." % base_dir - - if not files: - log.warn("no files to distribute -- empty manifest?") - else: - log.info(msg) - for file in files: - if not os.path.isfile(file): - log.warn("'%s' not a regular file -- skipping", file) - else: - dest = os.path.join(base_dir, file) - self.copy_file(file, dest, link=link) - - self.distribution.metadata.write_pkg_info(base_dir) - - def make_distribution(self): - """Create the source distribution(s). First, we create the release - tree with 'make_release_tree()'; then, we create all required - archive files (according to 'self.formats') from the release tree. - Finally, we clean up by blowing away the release tree (unless - 'self.keep_temp' is true). The list of archive files created is - stored so it can be retrieved later by 'get_archive_files()'. - """ - # Don't warn about missing meta-data here -- should be (and is!) - # done elsewhere. - base_dir = self.distribution.get_fullname() - base_name = os.path.join(self.dist_dir, base_dir) - - self.make_release_tree(base_dir, self.filelist.files) - archive_files = [] # remember names of files we create - # tar archive must be created last to avoid overwrite and remove - if 'tar' in self.formats: - self.formats.append(self.formats.pop(self.formats.index('tar'))) - - for fmt in self.formats: - file = self.make_archive(base_name, fmt, base_dir=base_dir, - owner=self.owner, group=self.group) - archive_files.append(file) - self.distribution.dist_files.append(('sdist', '', file)) - - self.archive_files = archive_files - - if not self.keep_temp: - dir_util.remove_tree(base_dir, dry_run=self.dry_run) - - def get_archive_files(self): - """Return the list of archive files created when the command - was run, or None if the command hasn't run yet. - """ - return self.archive_files diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py deleted file mode 100644 index e0ecb655b93fafa..000000000000000 --- a/Lib/distutils/command/upload.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -distutils.command.upload - -Implements the Distutils 'upload' subcommand (upload package to a package -index). -""" - -import os -import io -import hashlib -from base64 import standard_b64encode -from urllib.error import HTTPError -from urllib.request import urlopen, Request -from urllib.parse import urlparse -from distutils.errors import DistutilsError, DistutilsOptionError -from distutils.core import PyPIRCCommand -from distutils.spawn import spawn -from distutils import log - - -# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256) -# https://bugs.python.org/issue40698 -_FILE_CONTENT_DIGESTS = { - "md5_digest": getattr(hashlib, "md5", None), - "sha256_digest": getattr(hashlib, "sha256", None), - "blake2_256_digest": getattr(hashlib, "blake2b", None), -} - - -class upload(PyPIRCCommand): - - description = "upload binary package to PyPI" - - user_options = PyPIRCCommand.user_options + [ - ('sign', 's', - 'sign files to upload using gpg'), - ('identity=', 'i', 'GPG identity used to sign files'), - ] - - boolean_options = PyPIRCCommand.boolean_options + ['sign'] - - def initialize_options(self): - PyPIRCCommand.initialize_options(self) - self.username = '' - self.password = '' - self.show_response = 0 - self.sign = False - self.identity = None - - def finalize_options(self): - PyPIRCCommand.finalize_options(self) - if self.identity and not self.sign: - raise DistutilsOptionError( - "Must use --sign for --identity to have meaning" - ) - config = self._read_pypirc() - if config != {}: - self.username = config['username'] - self.password = config['password'] - self.repository = config['repository'] - self.realm = config['realm'] - - # getting the password from the distribution - # if previously set by the register command - if not self.password and self.distribution.password: - self.password = self.distribution.password - - def run(self): - if not self.distribution.dist_files: - msg = ("Must create and upload files in one command " - "(e.g. setup.py sdist upload)") - raise DistutilsOptionError(msg) - for command, pyversion, filename in self.distribution.dist_files: - self.upload_file(command, pyversion, filename) - - def upload_file(self, command, pyversion, filename): - # Makes sure the repository URL is compliant - schema, netloc, url, params, query, fragments = \ - urlparse(self.repository) - if params or query or fragments: - raise AssertionError("Incompatible url %s" % self.repository) - - if schema not in ('http', 'https'): - raise AssertionError("unsupported schema " + schema) - - # Sign if requested - if self.sign: - gpg_args = ["gpg", "--detach-sign", "-a", filename] - if self.identity: - gpg_args[2:2] = ["--local-user", self.identity] - spawn(gpg_args, - dry_run=self.dry_run) - - # Fill in the data - send all the meta-data in case we need to - # register a new release - f = open(filename,'rb') - try: - content = f.read() - finally: - f.close() - - meta = self.distribution.metadata - data = { - # action - ':action': 'file_upload', - 'protocol_version': '1', - - # identify release - 'name': meta.get_name(), - 'version': meta.get_version(), - - # file content - 'content': (os.path.basename(filename),content), - 'filetype': command, - 'pyversion': pyversion, - - # additional meta-data - 'metadata_version': '1.0', - 'summary': meta.get_description(), - 'home_page': meta.get_url(), - 'author': meta.get_contact(), - 'author_email': meta.get_contact_email(), - 'license': meta.get_licence(), - 'description': meta.get_long_description(), - 'keywords': meta.get_keywords(), - 'platform': meta.get_platforms(), - 'classifiers': meta.get_classifiers(), - 'download_url': meta.get_download_url(), - # PEP 314 - 'provides': meta.get_provides(), - 'requires': meta.get_requires(), - 'obsoletes': meta.get_obsoletes(), - } - - data['comment'] = '' - - # file content digests - for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items(): - if digest_cons is None: - continue - try: - data[digest_name] = digest_cons(content).hexdigest() - except ValueError: - # hash digest not available or blocked by security policy - pass - - if self.sign: - with open(filename + ".asc", "rb") as f: - data['gpg_signature'] = (os.path.basename(filename) + ".asc", - f.read()) - - # set up the authentication - user_pass = (self.username + ":" + self.password).encode('ascii') - # The exact encoding of the authentication string is debated. - # Anyway PyPI only accepts ascii for both username or password. - auth = "Basic " + standard_b64encode(user_pass).decode('ascii') - - # Build up the MIME payload for the POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\r\n--' + boundary.encode('ascii') - end_boundary = sep_boundary + b'--\r\n' - body = io.BytesIO() - for key, value in data.items(): - title = '\r\nContent-Disposition: form-data; name="%s"' % key - # handle multiple entries for the same name - if not isinstance(value, list): - value = [value] - for value in value: - if type(value) is tuple: - title += '; filename="%s"' % value[0] - value = value[1] - else: - value = str(value).encode('utf-8') - body.write(sep_boundary) - body.write(title.encode('utf-8')) - body.write(b"\r\n\r\n") - body.write(value) - body.write(end_boundary) - body = body.getvalue() - - msg = "Submitting %s to %s" % (filename, self.repository) - self.announce(msg, log.INFO) - - # build the Request - headers = { - 'Content-type': 'multipart/form-data; boundary=%s' % boundary, - 'Content-length': str(len(body)), - 'Authorization': auth, - } - - request = Request(self.repository, data=body, - headers=headers) - # send the data - try: - result = urlopen(request) - status = result.getcode() - reason = result.msg - except HTTPError as e: - status = e.code - reason = e.msg - except OSError as e: - self.announce(str(e), log.ERROR) - raise - - if status == 200: - self.announce('Server response (%s): %s' % (status, reason), - log.INFO) - if self.show_response: - text = self._read_pypi_response(result) - msg = '\n'.join(('-' * 75, text, '-' * 75)) - self.announce(msg, log.INFO) - else: - msg = 'Upload failed (%s): %s' % (status, reason) - self.announce(msg, log.ERROR) - raise DistutilsError(msg) diff --git a/Lib/distutils/config.py b/Lib/distutils/config.py deleted file mode 100644 index a201c86a176844a..000000000000000 --- a/Lib/distutils/config.py +++ /dev/null @@ -1,133 +0,0 @@ -"""distutils.pypirc - -Provides the PyPIRCCommand class, the base class for the command classes -that uses .pypirc in the distutils.command package. -""" -import os -from configparser import RawConfigParser -import warnings - -from distutils.cmd import Command - -DEFAULT_PYPIRC = """\ -[distutils] -index-servers = - pypi - -[pypi] -username:%s -password:%s -""" - -class PyPIRCCommand(Command): - """Base command that knows how to handle the .pypirc file - """ - DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' - DEFAULT_REALM = 'pypi' - repository = None - realm = None - - user_options = [ - ('repository=', 'r', - "url of repository [default: %s]" % \ - DEFAULT_REPOSITORY), - ('show-response', None, - 'display full response text from server')] - - boolean_options = ['show-response'] - - def _get_rc_file(self): - """Returns rc file path.""" - return os.path.join(os.path.expanduser('~'), '.pypirc') - - def _store_pypirc(self, username, password): - """Creates a default .pypirc file.""" - rc = self._get_rc_file() - with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: - f.write(DEFAULT_PYPIRC % (username, password)) - - def _read_pypirc(self): - """Reads the .pypirc file.""" - rc = self._get_rc_file() - if os.path.exists(rc): - self.announce('Using PyPI login from %s' % rc) - repository = self.repository or self.DEFAULT_REPOSITORY - - config = RawConfigParser() - config.read(rc) - sections = config.sections() - if 'distutils' in sections: - # let's get the list of servers - index_servers = config.get('distutils', 'index-servers') - _servers = [server.strip() for server in - index_servers.split('\n') - if server.strip() != ''] - if _servers == []: - # nothing set, let's try to get the default pypi - if 'pypi' in sections: - _servers = ['pypi'] - else: - # the file is not properly defined, returning - # an empty dict - return {} - for server in _servers: - current = {'server': server} - current['username'] = config.get(server, 'username') - - # optional params - for key, default in (('repository', - self.DEFAULT_REPOSITORY), - ('realm', self.DEFAULT_REALM), - ('password', None)): - if config.has_option(server, key): - current[key] = config.get(server, key) - else: - current[key] = default - - # work around people having "repository" for the "pypi" - # section of their config set to the HTTP (rather than - # HTTPS) URL - if (server == 'pypi' and - repository in (self.DEFAULT_REPOSITORY, 'pypi')): - current['repository'] = self.DEFAULT_REPOSITORY - return current - - if (current['server'] == repository or - current['repository'] == repository): - return current - elif 'server-login' in sections: - # old format - server = 'server-login' - if config.has_option(server, 'repository'): - repository = config.get(server, 'repository') - else: - repository = self.DEFAULT_REPOSITORY - return {'username': config.get(server, 'username'), - 'password': config.get(server, 'password'), - 'repository': repository, - 'server': server, - 'realm': self.DEFAULT_REALM} - - return {} - - def _read_pypi_response(self, response): - """Read and decode a PyPI HTTP response.""" - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - import cgi - content_type = response.getheader('content-type', 'text/plain') - encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii') - return response.read().decode(encoding) - - def initialize_options(self): - """Initialize options.""" - self.repository = None - self.realm = None - self.show_response = 0 - - def finalize_options(self): - """Finalizes options.""" - if self.repository is None: - self.repository = self.DEFAULT_REPOSITORY - if self.realm is None: - self.realm = self.DEFAULT_REALM diff --git a/Lib/distutils/core.py b/Lib/distutils/core.py deleted file mode 100644 index d603d4a45a73ee4..000000000000000 --- a/Lib/distutils/core.py +++ /dev/null @@ -1,234 +0,0 @@ -"""distutils.core - -The only module that needs to be imported to use the Distutils; provides -the 'setup' function (which is to be called from the setup script). Also -indirectly provides the Distribution and Command classes, although they are -really defined in distutils.dist and distutils.cmd. -""" - -import os -import sys - -from distutils.debug import DEBUG -from distutils.errors import * - -# Mainly import these so setup scripts can "from distutils.core import" them. -from distutils.dist import Distribution -from distutils.cmd import Command -from distutils.config import PyPIRCCommand -from distutils.extension import Extension - -# This is a barebones help message generated displayed when the user -# runs the setup script with no arguments at all. More useful help -# is generated with various --help options: global help, list commands, -# and per-command help. -USAGE = """\ -usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] - or: %(script)s --help [cmd1 cmd2 ...] - or: %(script)s --help-commands - or: %(script)s cmd --help -""" - -def gen_usage (script_name): - script = os.path.basename(script_name) - return USAGE % vars() - - -# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. -_setup_stop_after = None -_setup_distribution = None - -# Legal keyword arguments for the setup() function -setup_keywords = ('distclass', 'script_name', 'script_args', 'options', - 'name', 'version', 'author', 'author_email', - 'maintainer', 'maintainer_email', 'url', 'license', - 'description', 'long_description', 'keywords', - 'platforms', 'classifiers', 'download_url', - 'requires', 'provides', 'obsoletes', - ) - -# Legal keyword arguments for the Extension constructor -extension_keywords = ('name', 'sources', 'include_dirs', - 'define_macros', 'undef_macros', - 'library_dirs', 'libraries', 'runtime_library_dirs', - 'extra_objects', 'extra_compile_args', 'extra_link_args', - 'swig_opts', 'export_symbols', 'depends', 'language') - -def setup (**attrs): - """The gateway to the Distutils: do everything your setup script needs - to do, in a highly flexible and user-driven way. Briefly: create a - Distribution instance; find and parse config files; parse the command - line; run each Distutils command found there, customized by the options - supplied to 'setup()' (as keyword arguments), in config files, and on - the command line. - - The Distribution instance might be an instance of a class supplied via - the 'distclass' keyword argument to 'setup'; if no such class is - supplied, then the Distribution class (in dist.py) is instantiated. - All other arguments to 'setup' (except for 'cmdclass') are used to set - attributes of the Distribution instance. - - The 'cmdclass' argument, if supplied, is a dictionary mapping command - names to command classes. Each command encountered on the command line - will be turned into a command class, which is in turn instantiated; any - class found in 'cmdclass' is used in place of the default, which is - (for command 'foo_bar') class 'foo_bar' in module - 'distutils.command.foo_bar'. The command class must provide a - 'user_options' attribute which is a list of option specifiers for - 'distutils.fancy_getopt'. Any command-line options between the current - and the next command are used to set attributes of the current command - object. - - When the entire command-line has been successfully parsed, calls the - 'run()' method on each command object in turn. This method will be - driven entirely by the Distribution object (which each command object - has a reference to, thanks to its constructor), and the - command-specific options that became attributes of each command - object. - """ - - global _setup_stop_after, _setup_distribution - - # Determine the distribution class -- either caller-supplied or - # our Distribution (see below). - klass = attrs.get('distclass') - if klass: - del attrs['distclass'] - else: - klass = Distribution - - if 'script_name' not in attrs: - attrs['script_name'] = os.path.basename(sys.argv[0]) - if 'script_args' not in attrs: - attrs['script_args'] = sys.argv[1:] - - # Create the Distribution instance, using the remaining arguments - # (ie. everything except distclass) to initialize it - try: - _setup_distribution = dist = klass(attrs) - except DistutilsSetupError as msg: - if 'name' not in attrs: - raise SystemExit("error in setup command: %s" % msg) - else: - raise SystemExit("error in %s setup command: %s" % \ - (attrs['name'], msg)) - - if _setup_stop_after == "init": - return dist - - # Find and parse the config file(s): they will override options from - # the setup script, but be overridden by the command line. - dist.parse_config_files() - - if DEBUG: - print("options (after parsing config files):") - dist.dump_option_dicts() - - if _setup_stop_after == "config": - return dist - - # Parse the command line and override config files; any - # command-line errors are the end user's fault, so turn them into - # SystemExit to suppress tracebacks. - try: - ok = dist.parse_command_line() - except DistutilsArgError as msg: - raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg) - - if DEBUG: - print("options (after parsing command line):") - dist.dump_option_dicts() - - if _setup_stop_after == "commandline": - return dist - - # And finally, run all the commands found on the command line. - if ok: - try: - dist.run_commands() - except KeyboardInterrupt: - raise SystemExit("interrupted") - except OSError as exc: - if DEBUG: - sys.stderr.write("error: %s\n" % (exc,)) - raise - else: - raise SystemExit("error: %s" % (exc,)) - - except (DistutilsError, - CCompilerError) as msg: - if DEBUG: - raise - else: - raise SystemExit("error: " + str(msg)) - - return dist - -# setup () - - -def run_setup (script_name, script_args=None, stop_after="run"): - """Run a setup script in a somewhat controlled environment, and - return the Distribution instance that drives things. This is useful - if you need to find out the distribution meta-data (passed as - keyword args from 'script' to 'setup()', or the contents of the - config files or command-line. - - 'script_name' is a file that will be read and run with 'exec()'; - 'sys.argv[0]' will be replaced with 'script' for the duration of the - call. 'script_args' is a list of strings; if supplied, - 'sys.argv[1:]' will be replaced by 'script_args' for the duration of - the call. - - 'stop_after' tells 'setup()' when to stop processing; possible - values: - init - stop after the Distribution instance has been created and - populated with the keyword arguments to 'setup()' - config - stop after config files have been parsed (and their data - stored in the Distribution instance) - commandline - stop after the command-line ('sys.argv[1:]' or 'script_args') - have been parsed (and the data stored in the Distribution) - run [default] - stop after all commands have been run (the same as if 'setup()' - had been called in the usual way - - Returns the Distribution instance, which provides all information - used to drive the Distutils. - """ - if stop_after not in ('init', 'config', 'commandline', 'run'): - raise ValueError("invalid value for 'stop_after': %r" % (stop_after,)) - - global _setup_stop_after, _setup_distribution - _setup_stop_after = stop_after - - save_argv = sys.argv.copy() - g = {'__file__': script_name} - try: - try: - sys.argv[0] = script_name - if script_args is not None: - sys.argv[1:] = script_args - with open(script_name, 'rb') as f: - exec(f.read(), g) - finally: - sys.argv = save_argv - _setup_stop_after = None - except SystemExit: - # Hmm, should we do something if exiting with a non-zero code - # (ie. error)? - pass - - if _setup_distribution is None: - raise RuntimeError(("'distutils.core.setup()' was never called -- " - "perhaps '%s' is not a Distutils setup script?") % \ - script_name) - - # I wonder if the setup script's namespace -- g and l -- would be of - # any interest to callers? - #print "_setup_distribution:", _setup_distribution - return _setup_distribution - -# run_setup () diff --git a/Lib/distutils/cygwinccompiler.py b/Lib/distutils/cygwinccompiler.py deleted file mode 100644 index 66c12dd35830b29..000000000000000 --- a/Lib/distutils/cygwinccompiler.py +++ /dev/null @@ -1,403 +0,0 @@ -"""distutils.cygwinccompiler - -Provides the CygwinCCompiler class, a subclass of UnixCCompiler that -handles the Cygwin port of the GNU C compiler to Windows. It also contains -the Mingw32CCompiler class which handles the mingw32 port of GCC (same as -cygwin in no-cygwin mode). -""" - -# problems: -# -# * if you use a msvc compiled python version (1.5.2) -# 1. you have to insert a __GNUC__ section in its config.h -# 2. you have to generate an import library for its dll -# - create a def-file for python??.dll -# - create an import library using -# dlltool --dllname python15.dll --def python15.def \ -# --output-lib libpython15.a -# -# see also http://starship.python.net/crew/kernr/mingw32/Notes.html -# -# * We put export_symbols in a def-file, and don't use -# --export-all-symbols because it doesn't worked reliable in some -# tested configurations. And because other windows compilers also -# need their symbols specified this no serious problem. -# -# tested configurations: -# -# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works -# (after patching python's config.h and for C++ some other include files) -# see also http://starship.python.net/crew/kernr/mingw32/Notes.html -# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works -# (ld doesn't support -shared, so we use dllwrap) -# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now -# - its dllwrap doesn't work, there is a bug in binutils 2.10.90 -# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html -# - using gcc -mdll instead dllwrap doesn't work without -static because -# it tries to link against dlls instead their import libraries. (If -# it finds the dll first.) -# By specifying -static we force ld to link against the import libraries, -# this is windows standard and there are normally not the necessary symbols -# in the dlls. -# *** only the version of June 2000 shows these problems -# * cygwin gcc 3.2/ld 2.13.90 works -# (ld supports -shared) -# * mingw gcc 3.2/ld 2.13 works -# (ld supports -shared) - -import os -import sys -import copy -from subprocess import Popen, PIPE, check_output -import re - -from distutils.unixccompiler import UnixCCompiler -from distutils.file_util import write_file -from distutils.errors import (DistutilsExecError, CCompilerError, - CompileError, UnknownFileError) -from distutils.version import LooseVersion -from distutils.spawn import find_executable - -def get_msvcr(): - """Include the appropriate MSVC runtime library if Python was built - with MSVC 7.0 or later. - """ - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = sys.version[msc_pos+6:msc_pos+10] - if msc_ver == '1300': - # MSVC 7.0 - return ['msvcr70'] - elif msc_ver == '1310': - # MSVC 7.1 - return ['msvcr71'] - elif msc_ver == '1400': - # VS2005 / MSVC 8.0 - return ['msvcr80'] - elif msc_ver == '1500': - # VS2008 / MSVC 9.0 - return ['msvcr90'] - elif msc_ver == '1600': - # VS2010 / MSVC 10.0 - return ['msvcr100'] - else: - raise ValueError("Unknown MS Compiler version %s " % msc_ver) - - -class CygwinCCompiler(UnixCCompiler): - """ Handles the Cygwin port of the GNU C compiler to Windows. - """ - compiler_type = 'cygwin' - obj_extension = ".o" - static_lib_extension = ".a" - shared_lib_extension = ".dll" - static_lib_format = "lib%s%s" - shared_lib_format = "%s%s" - exe_extension = ".exe" - - def __init__(self, verbose=0, dry_run=0, force=0): - - UnixCCompiler.__init__(self, verbose, dry_run, force) - - status, details = check_config_h() - self.debug_print("Python's GCC status: %s (details: %s)" % - (status, details)) - if status is not CONFIG_H_OK: - self.warn( - "Python's pyconfig.h doesn't seem to support your compiler. " - "Reason: %s. " - "Compiling may fail because of undefined preprocessor macros." - % details) - - self.gcc_version, self.ld_version, self.dllwrap_version = \ - get_versions() - self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" % - (self.gcc_version, - self.ld_version, - self.dllwrap_version) ) - - # ld_version >= "2.10.90" and < "2.13" should also be able to use - # gcc -mdll instead of dllwrap - # Older dllwraps had own version numbers, newer ones use the - # same as the rest of binutils ( also ld ) - # dllwrap 2.10.90 is buggy - if self.ld_version >= "2.10.90": - self.linker_dll = "gcc" - else: - self.linker_dll = "dllwrap" - - # ld_version >= "2.13" support -shared so use it instead of - # -mdll -static - if self.ld_version >= "2.13": - shared_option = "-shared" - else: - shared_option = "-mdll -static" - - # Hard-code GCC because that's what this is all about. - # XXX optimization, warnings etc. should be customizable. - self.set_executables(compiler='gcc -mcygwin -O -Wall', - compiler_so='gcc -mcygwin -mdll -O -Wall', - compiler_cxx='g++ -mcygwin -O -Wall', - linker_exe='gcc -mcygwin', - linker_so=('%s -mcygwin %s' % - (self.linker_dll, shared_option))) - - # cygwin and mingw32 need different sets of libraries - if self.gcc_version == "2.91.57": - # cygwin shouldn't need msvcrt, but without the dlls will crash - # (gcc version 2.91.57) -- perhaps something about initialization - self.dll_libraries=["msvcrt"] - self.warn( - "Consider upgrading to a newer version of gcc") - else: - # Include the appropriate MSVC runtime library if Python was built - # with MSVC 7.0 or later. - self.dll_libraries = get_msvcr() - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compiles the source by spawning GCC and windres if needed.""" - if ext == '.rc' or ext == '.res': - # gcc needs '.res' and '.rc' compiled to object files !!! - try: - self.spawn(["windres", "-i", src, "-o", obj]) - except DistutilsExecError as msg: - raise CompileError(msg) - else: # for other files use the C-compiler - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - def link(self, target_desc, objects, output_filename, output_dir=None, - libraries=None, library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - """Link the objects.""" - # use separate copies, so we can modify the lists - extra_preargs = copy.copy(extra_preargs or []) - libraries = copy.copy(libraries or []) - objects = copy.copy(objects or []) - - # Additional libraries - libraries.extend(self.dll_libraries) - - # handle export symbols by creating a def-file - # with executables this only works with gcc/ld as linker - if ((export_symbols is not None) and - (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): - # (The linker doesn't do anything if output is up-to-date. - # So it would probably better to check if we really need this, - # but for this we had to insert some unchanged parts of - # UnixCCompiler, and this is not what we want.) - - # we want to put some files in the same directory as the - # object files are, build_temp doesn't help much - # where are the object files - temp_dir = os.path.dirname(objects[0]) - # name of dll to give the helper files the same base name - (dll_name, dll_extension) = os.path.splitext( - os.path.basename(output_filename)) - - # generate the filenames for these files - def_file = os.path.join(temp_dir, dll_name + ".def") - lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a") - - # Generate .def file - contents = [ - "LIBRARY %s" % os.path.basename(output_filename), - "EXPORTS"] - for sym in export_symbols: - contents.append(sym) - self.execute(write_file, (def_file, contents), - "writing %s" % def_file) - - # next add options for def-file and to creating import libraries - - # dllwrap uses different options than gcc/ld - if self.linker_dll == "dllwrap": - extra_preargs.extend(["--output-lib", lib_file]) - # for dllwrap we have to use a special option - extra_preargs.extend(["--def", def_file]) - # we use gcc/ld here and can be sure ld is >= 2.9.10 - else: - # doesn't work: bfd_close build\...\libfoo.a: Invalid operation - #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file]) - # for gcc/ld the def-file is specified as any object files - objects.append(def_file) - - #end: if ((export_symbols is not None) and - # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): - - # who wants symbols and a many times larger output file - # should explicitly switch the debug mode on - # otherwise we let dllwrap/ld strip the output file - # (On my machine: 10KiB < stripped_file < ??100KiB - # unstripped_file = stripped_file + XXX KiB - # ( XXX=254 for a typical python extension)) - if not debug: - extra_preargs.append("-s") - - UnixCCompiler.link(self, target_desc, objects, output_filename, - output_dir, libraries, library_dirs, - runtime_library_dirs, - None, # export_symbols, we do this in our def-file - debug, extra_preargs, extra_postargs, build_temp, - target_lang) - - # -- Miscellaneous methods ----------------------------------------- - - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """Adds supports for rc and res files.""" - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - base, ext = os.path.splitext(os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError("unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext in ('.res', '.rc'): - # these need to be compiled to object files - obj_names.append (os.path.join(output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join(output_dir, - base + self.obj_extension)) - return obj_names - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(CygwinCCompiler): - """ Handles the Mingw32 port of the GNU C compiler to Windows. - """ - compiler_type = 'mingw32' - - def __init__(self, verbose=0, dry_run=0, force=0): - - CygwinCCompiler.__init__ (self, verbose, dry_run, force) - - # ld_version >= "2.13" support -shared so use it instead of - # -mdll -static - if self.ld_version >= "2.13": - shared_option = "-shared" - else: - shared_option = "-mdll -static" - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if is_cygwingcc(): - raise CCompilerError( - 'Cygwin gcc cannot be used with --compiler=mingw32') - - self.set_executables(compiler='gcc -O -Wall', - compiler_so='gcc -mdll -O -Wall', - compiler_cxx='g++ -O -Wall', - linker_exe='gcc', - linker_so='%s %s %s' - % (self.linker_dll, shared_option, - entry_point)) - # Maybe we should also append -mthreads, but then the finished - # dlls need another dll (mingwm10.dll see Mingw32 docs) - # (-mthreads: Support thread-safe exception handling on `Mingw32') - - # no additional libraries needed - self.dll_libraries=[] - - # Include the appropriate MSVC runtime library if Python was built - # with MSVC 7.0 or later. - self.dll_libraries = get_msvcr() - -# Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using an unmodified -# version. - -CONFIG_H_OK = "ok" -CONFIG_H_NOTOK = "not ok" -CONFIG_H_UNCERTAIN = "uncertain" - -def check_config_h(): - """Check if the current Python installation appears amenable to building - extensions with GCC. - - Returns a tuple (status, details), where 'status' is one of the following - constants: - - - CONFIG_H_OK: all is well, go ahead and compile - - CONFIG_H_NOTOK: doesn't look good - - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h - - 'details' is a human-readable string explaining the situation. - - Note there are two ways to conclude "OK": either 'sys.version' contains - the string "GCC" (implying that this Python was built with GCC), or the - installed "pyconfig.h" contains the string "__GNUC__". - """ - - # XXX since this function also checks sys.version, it's not strictly a - # "pyconfig.h" check -- should probably be renamed... - - from distutils import sysconfig - - # if sys.version contains GCC then python was compiled with GCC, and the - # pyconfig.h file should be OK - if "GCC" in sys.version: - return CONFIG_H_OK, "sys.version mentions 'GCC'" - - # let's see if __GNUC__ is mentioned in python.h - fn = sysconfig.get_config_h_filename() - try: - config_h = open(fn) - try: - if "__GNUC__" in config_h.read(): - return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn - else: - return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn - finally: - config_h.close() - except OSError as exc: - return (CONFIG_H_UNCERTAIN, - "couldn't read '%s': %s" % (fn, exc.strerror)) - -RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)') - -def _find_exe_version(cmd): - """Find the version of an executable by running `cmd` in the shell. - - If the command is not found, or the output does not match - `RE_VERSION`, returns None. - """ - executable = cmd.split()[0] - if find_executable(executable) is None: - return None - out = Popen(cmd, shell=True, stdout=PIPE).stdout - try: - out_string = out.read() - finally: - out.close() - result = RE_VERSION.search(out_string) - if result is None: - return None - # LooseVersion works with strings - # so we need to decode our bytes - return LooseVersion(result.group(1).decode()) - -def get_versions(): - """ Try to find out the versions of gcc, ld and dllwrap. - - If not possible it returns None for it. - """ - commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version'] - return tuple([_find_exe_version(cmd) for cmd in commands]) - -def is_cygwingcc(): - '''Try to determine if the gcc that would be used is from cygwin.''' - out_string = check_output(['gcc', '-dumpmachine']) - return out_string.strip().endswith(b'cygwin') diff --git a/Lib/distutils/dep_util.py b/Lib/distutils/dep_util.py deleted file mode 100644 index d74f5e4e92f3ede..000000000000000 --- a/Lib/distutils/dep_util.py +++ /dev/null @@ -1,92 +0,0 @@ -"""distutils.dep_util - -Utility functions for simple, timestamp-based dependency of files -and groups of files; also, function based entirely on such -timestamp dependency analysis.""" - -import os -from distutils.errors import DistutilsFileError - - -def newer (source, target): - """Return true if 'source' exists and is more recently modified than - 'target', or if 'source' exists and 'target' doesn't. Return false if - both exist and 'target' is the same age or younger than 'source'. - Raise DistutilsFileError if 'source' does not exist. - """ - if not os.path.exists(source): - raise DistutilsFileError("file '%s' does not exist" % - os.path.abspath(source)) - if not os.path.exists(target): - return 1 - - from stat import ST_MTIME - mtime1 = os.stat(source)[ST_MTIME] - mtime2 = os.stat(target)[ST_MTIME] - - return mtime1 > mtime2 - -# newer () - - -def newer_pairwise (sources, targets): - """Walk two filename lists in parallel, testing if each source is newer - than its corresponding target. Return a pair of lists (sources, - targets) where source is newer than target, according to the semantics - of 'newer()'. - """ - if len(sources) != len(targets): - raise ValueError("'sources' and 'targets' must be same length") - - # build a pair of lists (sources, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources)): - if newer(sources[i], targets[i]): - n_sources.append(sources[i]) - n_targets.append(targets[i]) - - return (n_sources, n_targets) - -# newer_pairwise () - - -def newer_group (sources, target, missing='error'): - """Return true if 'target' is out-of-date with respect to any file - listed in 'sources'. In other words, if 'target' exists and is newer - than every file in 'sources', return false; otherwise return true. - 'missing' controls what we do when a source file is missing; the - default ("error") is to blow up with an OSError from inside 'stat()'; - if it is "ignore", we silently drop any missing source files; if it is - "newer", any missing source files make us assume that 'target' is - out-of-date (this is handy in "dry-run" mode: it'll make you pretend to - carry out commands that wouldn't work because inputs are missing, but - that doesn't matter because you're not actually going to run the - commands). - """ - # If the target doesn't even exist, then it's definitely out-of-date. - if not os.path.exists(target): - return 1 - - # Otherwise we have to find out the hard way: if *any* source file - # is more recent than 'target', then 'target' is out-of-date and - # we can immediately return true. If we fall through to the end - # of the loop, then 'target' is up-to-date and we return false. - from stat import ST_MTIME - target_mtime = os.stat(target)[ST_MTIME] - for source in sources: - if not os.path.exists(source): - if missing == 'error': # blow up when we stat() the file - pass - elif missing == 'ignore': # missing source dropped from - continue # target's dependency list - elif missing == 'newer': # missing source means target is - return 1 # out-of-date - - source_mtime = os.stat(source)[ST_MTIME] - if source_mtime > target_mtime: - return 1 - else: - return 0 - -# newer_group () diff --git a/Lib/distutils/dir_util.py b/Lib/distutils/dir_util.py deleted file mode 100644 index d5cd8e3e24f46a8..000000000000000 --- a/Lib/distutils/dir_util.py +++ /dev/null @@ -1,210 +0,0 @@ -"""distutils.dir_util - -Utility functions for manipulating directories and directory trees.""" - -import os -import errno -from distutils.errors import DistutilsFileError, DistutilsInternalError -from distutils import log - -# cache for by mkpath() -- in addition to cheapening redundant calls, -# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode -_path_created = {} - -# I don't use os.makedirs because a) it's new to Python 1.5.2, and -# b) it blows up if the directory already exists (I want to silently -# succeed in that case). -def mkpath(name, mode=0o777, verbose=1, dry_run=0): - """Create a directory and any missing ancestor directories. - - If the directory already exists (or if 'name' is the empty string, which - means the current directory, which of course exists), then do nothing. - Raise DistutilsFileError if unable to create some directory along the way - (eg. some sub-path exists, but is a file rather than a directory). - If 'verbose' is true, print a one-line summary of each mkdir to stdout. - Return the list of directories actually created. - """ - - global _path_created - - # Detect a common bug -- name is None - if not isinstance(name, str): - raise DistutilsInternalError( - "mkpath: 'name' must be a string (got %r)" % (name,)) - - # XXX what's the better way to handle verbosity? print as we create - # each directory in the path (the current behaviour), or only announce - # the creation of the whole path? (quite easy to do the latter since - # we're not using a recursive algorithm) - - name = os.path.normpath(name) - created_dirs = [] - if os.path.isdir(name) or name == '': - return created_dirs - if _path_created.get(os.path.abspath(name)): - return created_dirs - - (head, tail) = os.path.split(name) - tails = [tail] # stack of lone dirs to create - - while head and tail and not os.path.isdir(head): - (head, tail) = os.path.split(head) - tails.insert(0, tail) # push next higher dir onto stack - - # now 'head' contains the deepest directory that already exists - # (that is, the child of 'head' in 'name' is the highest directory - # that does *not* exist) - for d in tails: - #print "head = %s, d = %s: " % (head, d), - head = os.path.join(head, d) - abs_head = os.path.abspath(head) - - if _path_created.get(abs_head): - continue - - if verbose >= 1: - log.info("creating %s", head) - - if not dry_run: - try: - os.mkdir(head, mode) - except OSError as exc: - if not (exc.errno == errno.EEXIST and os.path.isdir(head)): - raise DistutilsFileError( - "could not create '%s': %s" % (head, exc.args[-1])) - created_dirs.append(head) - - _path_created[abs_head] = 1 - return created_dirs - -def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0): - """Create all the empty directories under 'base_dir' needed to put 'files' - there. - - 'base_dir' is just the name of a directory which doesn't necessarily - exist yet; 'files' is a list of filenames to be interpreted relative to - 'base_dir'. 'base_dir' + the directory portion of every file in 'files' - will be created if it doesn't already exist. 'mode', 'verbose' and - 'dry_run' flags are as for 'mkpath()'. - """ - # First get the list of directories to create - need_dir = set() - for file in files: - need_dir.add(os.path.join(base_dir, os.path.dirname(file))) - - # Now create them - for dir in sorted(need_dir): - mkpath(dir, mode, verbose=verbose, dry_run=dry_run) - -def copy_tree(src, dst, preserve_mode=1, preserve_times=1, - preserve_symlinks=0, update=0, verbose=1, dry_run=0): - """Copy an entire directory tree 'src' to a new location 'dst'. - - Both 'src' and 'dst' must be directory names. If 'src' is not a - directory, raise DistutilsFileError. If 'dst' does not exist, it is - created with 'mkpath()'. The end result of the copy is that every - file in 'src' is copied to 'dst', and directories under 'src' are - recursively copied to 'dst'. Return the list of files that were - copied or might have been copied, using their output name. The - return value is unaffected by 'update' or 'dry_run': it is simply - the list of all files under 'src', with the names changed to be - under 'dst'. - - 'preserve_mode' and 'preserve_times' are the same as for - 'copy_file'; note that they only apply to regular files, not to - directories. If 'preserve_symlinks' is true, symlinks will be - copied as symlinks (on platforms that support them!); otherwise - (the default), the destination of the symlink will be copied. - 'update' and 'verbose' are the same as for 'copy_file'. - """ - from distutils.file_util import copy_file - - if not dry_run and not os.path.isdir(src): - raise DistutilsFileError( - "cannot copy tree '%s': not a directory" % src) - try: - names = os.listdir(src) - except OSError as e: - if dry_run: - names = [] - else: - raise DistutilsFileError( - "error listing files in '%s': %s" % (src, e.strerror)) - - if not dry_run: - mkpath(dst, verbose=verbose) - - outputs = [] - - for n in names: - src_name = os.path.join(src, n) - dst_name = os.path.join(dst, n) - - if n.startswith('.nfs'): - # skip NFS rename files - continue - - if preserve_symlinks and os.path.islink(src_name): - link_dest = os.readlink(src_name) - if verbose >= 1: - log.info("linking %s -> %s", dst_name, link_dest) - if not dry_run: - os.symlink(link_dest, dst_name) - outputs.append(dst_name) - - elif os.path.isdir(src_name): - outputs.extend( - copy_tree(src_name, dst_name, preserve_mode, - preserve_times, preserve_symlinks, update, - verbose=verbose, dry_run=dry_run)) - else: - copy_file(src_name, dst_name, preserve_mode, - preserve_times, update, verbose=verbose, - dry_run=dry_run) - outputs.append(dst_name) - - return outputs - -def _build_cmdtuple(path, cmdtuples): - """Helper for remove_tree().""" - for f in os.listdir(path): - real_f = os.path.join(path,f) - if os.path.isdir(real_f) and not os.path.islink(real_f): - _build_cmdtuple(real_f, cmdtuples) - else: - cmdtuples.append((os.remove, real_f)) - cmdtuples.append((os.rmdir, path)) - -def remove_tree(directory, verbose=1, dry_run=0): - """Recursively remove an entire directory tree. - - Any errors are ignored (apart from being reported to stdout if 'verbose' - is true). - """ - global _path_created - - if verbose >= 1: - log.info("removing '%s' (and everything under it)", directory) - if dry_run: - return - cmdtuples = [] - _build_cmdtuple(directory, cmdtuples) - for cmd in cmdtuples: - try: - cmd[0](cmd[1]) - # remove dir from cache if it's already there - abspath = os.path.abspath(cmd[1]) - if abspath in _path_created: - del _path_created[abspath] - except OSError as exc: - log.warn("error removing %s: %s", directory, exc) - -def ensure_relative(path): - """Take the full path 'path', and make it a relative path. - - This is useful to make 'path' the second argument to os.path.join(). - """ - drive, path = os.path.splitdrive(path) - if path[0:1] == os.sep: - path = drive + path[1:] - return path diff --git a/Lib/distutils/dist.py b/Lib/distutils/dist.py deleted file mode 100644 index 6cf0a0d6632dc75..000000000000000 --- a/Lib/distutils/dist.py +++ /dev/null @@ -1,1256 +0,0 @@ -"""distutils.dist - -Provides the Distribution class, which represents the module distribution -being built/installed/distributed. -""" - -import sys -import os -import re -from email import message_from_file - -try: - import warnings -except ImportError: - warnings = None - -from distutils.errors import * -from distutils.fancy_getopt import FancyGetopt, translate_longopt -from distutils.util import check_environ, strtobool, rfc822_escape -from distutils import log -from distutils.debug import DEBUG - -# Regex to define acceptable Distutils command names. This is not *quite* -# the same as a Python NAME -- I don't allow leading underscores. The fact -# that they're very similar is no coincidence; the default naming scheme is -# to look for a Python module named after the command. -command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$') - - -def _ensure_list(value, fieldname): - if isinstance(value, str): - # a string containing comma separated values is okay. It will - # be converted to a list by Distribution.finalize_options(). - pass - elif not isinstance(value, list): - # passing a tuple or an iterator perhaps, warn and convert - typename = type(value).__name__ - msg = f"Warning: '{fieldname}' should be a list, got type '{typename}'" - log.log(log.WARN, msg) - value = list(value) - return value - - -class Distribution: - """The core of the Distutils. Most of the work hiding behind 'setup' - is really done within a Distribution instance, which farms the work out - to the Distutils commands specified on the command line. - - Setup scripts will almost never instantiate Distribution directly, - unless the 'setup()' function is totally inadequate to their needs. - However, it is conceivable that a setup script might wish to subclass - Distribution for some specialized purpose, and then pass the subclass - to 'setup()' as the 'distclass' keyword argument. If so, it is - necessary to respect the expectations that 'setup' has of Distribution. - See the code for 'setup()', in core.py, for details. - """ - - # 'global_options' describes the command-line options that may be - # supplied to the setup script prior to any actual commands. - # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of - # these global options. This list should be kept to a bare minimum, - # since every global option is also valid as a command option -- and we - # don't want to pollute the commands with too many options that they - # have minimal control over. - # The fourth entry for verbose means that it can be repeated. - global_options = [ - ('verbose', 'v', "run verbosely (default)", 1), - ('quiet', 'q', "run quietly (turns verbosity off)"), - ('dry-run', 'n', "don't actually do anything"), - ('help', 'h', "show detailed help message"), - ('no-user-cfg', None, - 'ignore pydistutils.cfg in your home directory'), - ] - - # 'common_usage' is a short (2-3 line) string describing the common - # usage of the setup script. - common_usage = """\ -Common commands: (see '--help-commands' for more) - - setup.py build will build the package underneath 'build/' - setup.py install will install the package -""" - - # options that are not propagated to the commands - display_options = [ - ('help-commands', None, - "list all available commands"), - ('name', None, - "print package name"), - ('version', 'V', - "print package version"), - ('fullname', None, - "print -"), - ('author', None, - "print the author's name"), - ('author-email', None, - "print the author's email address"), - ('maintainer', None, - "print the maintainer's name"), - ('maintainer-email', None, - "print the maintainer's email address"), - ('contact', None, - "print the maintainer's name if known, else the author's"), - ('contact-email', None, - "print the maintainer's email address if known, else the author's"), - ('url', None, - "print the URL for this package"), - ('license', None, - "print the license of the package"), - ('licence', None, - "alias for --license"), - ('description', None, - "print the package description"), - ('long-description', None, - "print the long package description"), - ('platforms', None, - "print the list of platforms"), - ('classifiers', None, - "print the list of classifiers"), - ('keywords', None, - "print the list of keywords"), - ('provides', None, - "print the list of packages/modules provided"), - ('requires', None, - "print the list of packages/modules required"), - ('obsoletes', None, - "print the list of packages/modules made obsolete") - ] - display_option_names = [translate_longopt(x[0]) for x in display_options] - - # negative options are options that exclude other options - negative_opt = {'quiet': 'verbose'} - - # -- Creation/initialization methods ------------------------------- - - def __init__(self, attrs=None): - """Construct a new Distribution instance: initialize all the - attributes of a Distribution, and then use 'attrs' (a dictionary - mapping attribute names to values) to assign some of those - attributes their "real" values. (Any attributes not mentioned in - 'attrs' will be assigned to some null value: 0, None, an empty list - or dictionary, etc.) Most importantly, initialize the - 'command_obj' attribute to the empty dictionary; this will be - filled in with real command objects by 'parse_command_line()'. - """ - - # Default values for our command-line options - self.verbose = 1 - self.dry_run = 0 - self.help = 0 - for attr in self.display_option_names: - setattr(self, attr, 0) - - # Store the distribution meta-data (name, version, author, and so - # forth) in a separate object -- we're getting to have enough - # information here (and enough command-line options) that it's - # worth it. Also delegate 'get_XXX()' methods to the 'metadata' - # object in a sneaky and underhanded (but efficient!) way. - self.metadata = DistributionMetadata() - for basename in self.metadata._METHOD_BASENAMES: - method_name = "get_" + basename - setattr(self, method_name, getattr(self.metadata, method_name)) - - # 'cmdclass' maps command names to class objects, so we - # can 1) quickly figure out which class to instantiate when - # we need to create a new command object, and 2) have a way - # for the setup script to override command classes - self.cmdclass = {} - - # 'command_packages' is a list of packages in which commands - # are searched for. The factory for command 'foo' is expected - # to be named 'foo' in the module 'foo' in one of the packages - # named here. This list is searched from the left; an error - # is raised if no named package provides the command being - # searched for. (Always access using get_command_packages().) - self.command_packages = None - - # 'script_name' and 'script_args' are usually set to sys.argv[0] - # and sys.argv[1:], but they can be overridden when the caller is - # not necessarily a setup script run from the command-line. - self.script_name = None - self.script_args = None - - # 'command_options' is where we store command options between - # parsing them (from config files, the command-line, etc.) and when - # they are actually needed -- ie. when the command in question is - # instantiated. It is a dictionary of dictionaries of 2-tuples: - # command_options = { command_name : { option : (source, value) } } - self.command_options = {} - - # 'dist_files' is the list of (command, pyversion, file) that - # have been created by any dist commands run so far. This is - # filled regardless of whether the run is dry or not. pyversion - # gives sysconfig.get_python_version() if the dist file is - # specific to a Python version, 'any' if it is good for all - # Python versions on the target platform, and '' for a source - # file. pyversion should not be used to specify minimum or - # maximum required Python versions; use the metainfo for that - # instead. - self.dist_files = [] - - # These options are really the business of various commands, rather - # than of the Distribution itself. We provide aliases for them in - # Distribution as a convenience to the developer. - self.packages = None - self.package_data = {} - self.package_dir = None - self.py_modules = None - self.libraries = None - self.headers = None - self.ext_modules = None - self.ext_package = None - self.include_dirs = None - self.extra_path = None - self.scripts = None - self.data_files = None - self.password = '' - - # And now initialize bookkeeping stuff that can't be supplied by - # the caller at all. 'command_obj' maps command names to - # Command instances -- that's how we enforce that every command - # class is a singleton. - self.command_obj = {} - - # 'have_run' maps command names to boolean values; it keeps track - # of whether we have actually run a particular command, to make it - # cheap to "run" a command whenever we think we might need to -- if - # it's already been done, no need for expensive filesystem - # operations, we just check the 'have_run' dictionary and carry on. - # It's only safe to query 'have_run' for a command class that has - # been instantiated -- a false value will be inserted when the - # command object is created, and replaced with a true value when - # the command is successfully run. Thus it's probably best to use - # '.get()' rather than a straight lookup. - self.have_run = {} - - # Now we'll use the attrs dictionary (ultimately, keyword args from - # the setup script) to possibly override any or all of these - # distribution options. - - if attrs: - # Pull out the set of command options and work on them - # specifically. Note that this order guarantees that aliased - # command options will override any supplied redundantly - # through the general options dictionary. - options = attrs.get('options') - if options is not None: - del attrs['options'] - for (command, cmd_options) in options.items(): - opt_dict = self.get_option_dict(command) - for (opt, val) in cmd_options.items(): - opt_dict[opt] = ("setup script", val) - - if 'licence' in attrs: - attrs['license'] = attrs['licence'] - del attrs['licence'] - msg = "'licence' distribution option is deprecated; use 'license'" - if warnings is not None: - warnings.warn(msg) - else: - sys.stderr.write(msg + "\n") - - # Now work on the rest of the attributes. Any attribute that's - # not already defined is invalid! - for (key, val) in attrs.items(): - if hasattr(self.metadata, "set_" + key): - getattr(self.metadata, "set_" + key)(val) - elif hasattr(self.metadata, key): - setattr(self.metadata, key, val) - elif hasattr(self, key): - setattr(self, key, val) - else: - msg = "Unknown distribution option: %s" % repr(key) - warnings.warn(msg) - - # no-user-cfg is handled before other command line args - # because other args override the config files, and this - # one is needed before we can load the config files. - # If attrs['script_args'] wasn't passed, assume false. - # - # This also make sure we just look at the global options - self.want_user_cfg = True - - if self.script_args is not None: - for arg in self.script_args: - if not arg.startswith('-'): - break - if arg == '--no-user-cfg': - self.want_user_cfg = False - break - - self.finalize_options() - - def get_option_dict(self, command): - """Get the option dictionary for a given command. If that - command's option dictionary hasn't been created yet, then create it - and return the new dictionary; otherwise, return the existing - option dictionary. - """ - dict = self.command_options.get(command) - if dict is None: - dict = self.command_options[command] = {} - return dict - - def dump_option_dicts(self, header=None, commands=None, indent=""): - from pprint import pformat - - if commands is None: # dump all command option dicts - commands = sorted(self.command_options.keys()) - - if header is not None: - self.announce(indent + header) - indent = indent + " " - - if not commands: - self.announce(indent + "no commands known yet") - return - - for cmd_name in commands: - opt_dict = self.command_options.get(cmd_name) - if opt_dict is None: - self.announce(indent + - "no option dict for '%s' command" % cmd_name) - else: - self.announce(indent + - "option dict for '%s' command:" % cmd_name) - out = pformat(opt_dict) - for line in out.split('\n'): - self.announce(indent + " " + line) - - # -- Config file finding/parsing methods --------------------------- - - def find_config_files(self): - """Find as many configuration files as should be processed for this - platform, and return a list of filenames in the order in which they - should be parsed. The filenames returned are guaranteed to exist - (modulo nasty race conditions). - - There are three possible config files: distutils.cfg in the - Distutils installation directory (ie. where the top-level - Distutils __inst__.py file lives), a file in the user's home - directory named .pydistutils.cfg on Unix and pydistutils.cfg - on Windows/Mac; and setup.cfg in the current directory. - - The file in the user's home directory can be disabled with the - --no-user-cfg option. - """ - files = [] - check_environ() - - # Where to look for the system-wide Distutils config file - sys_dir = os.path.dirname(sys.modules['distutils'].__file__) - - # Look for the system config file - sys_file = os.path.join(sys_dir, "distutils.cfg") - if os.path.isfile(sys_file): - files.append(sys_file) - - # What to call the per-user config file - if os.name == 'posix': - user_filename = ".pydistutils.cfg" - else: - user_filename = "pydistutils.cfg" - - # And look for the user config file - if self.want_user_cfg: - user_file = os.path.join(os.path.expanduser('~'), user_filename) - if os.path.isfile(user_file): - files.append(user_file) - - # All platforms support local setup.cfg - local_file = "setup.cfg" - if os.path.isfile(local_file): - files.append(local_file) - - if DEBUG: - self.announce("using config files: %s" % ', '.join(files)) - - return files - - def parse_config_files(self, filenames=None): - from configparser import ConfigParser - - # Ignore install directory options if we have a venv - if sys.prefix != sys.base_prefix: - ignore_options = [ - 'install-base', 'install-platbase', 'install-lib', - 'install-platlib', 'install-purelib', 'install-headers', - 'install-scripts', 'install-data', 'prefix', 'exec-prefix', - 'home', 'user', 'root'] - else: - ignore_options = [] - - ignore_options = frozenset(ignore_options) - - if filenames is None: - filenames = self.find_config_files() - - if DEBUG: - self.announce("Distribution.parse_config_files():") - - parser = ConfigParser() - for filename in filenames: - if DEBUG: - self.announce(" reading %s" % filename) - parser.read(filename) - for section in parser.sections(): - options = parser.options(section) - opt_dict = self.get_option_dict(section) - - for opt in options: - if opt != '__name__' and opt not in ignore_options: - val = parser.get(section,opt) - opt = opt.replace('-', '_') - opt_dict[opt] = (filename, val) - - # Make the ConfigParser forget everything (so we retain - # the original filenames that options come from) - parser.__init__() - - # If there was a "global" section in the config file, use it - # to set Distribution options. - - if 'global' in self.command_options: - for (opt, (src, val)) in self.command_options['global'].items(): - alias = self.negative_opt.get(opt) - try: - if alias: - setattr(self, alias, not strtobool(val)) - elif opt in ('verbose', 'dry_run'): # ugh! - setattr(self, opt, strtobool(val)) - else: - setattr(self, opt, val) - except ValueError as msg: - raise DistutilsOptionError(msg) - - # -- Command-line parsing methods ---------------------------------- - - def parse_command_line(self): - """Parse the setup script's command line, taken from the - 'script_args' instance attribute (which defaults to 'sys.argv[1:]' - -- see 'setup()' in core.py). This list is first processed for - "global options" -- options that set attributes of the Distribution - instance. Then, it is alternately scanned for Distutils commands - and options for that command. Each new command terminates the - options for the previous command. The allowed options for a - command are determined by the 'user_options' attribute of the - command class -- thus, we have to be able to load command classes - in order to parse the command line. Any error in that 'options' - attribute raises DistutilsGetoptError; any error on the - command-line raises DistutilsArgError. If no Distutils commands - were found on the command line, raises DistutilsArgError. Return - true if command-line was successfully parsed and we should carry - on with executing commands; false if no errors but we shouldn't - execute commands (currently, this only happens if user asks for - help). - """ - # - # We now have enough information to show the Macintosh dialog - # that allows the user to interactively specify the "command line". - # - toplevel_options = self._get_toplevel_options() - - # We have to parse the command line a bit at a time -- global - # options, then the first command, then its options, and so on -- - # because each command will be handled by a different class, and - # the options that are valid for a particular class aren't known - # until we have loaded the command class, which doesn't happen - # until we know what the command is. - - self.commands = [] - parser = FancyGetopt(toplevel_options + self.display_options) - parser.set_negative_aliases(self.negative_opt) - parser.set_aliases({'licence': 'license'}) - args = parser.getopt(args=self.script_args, object=self) - option_order = parser.get_option_order() - log.set_verbosity(self.verbose) - - # for display options we return immediately - if self.handle_display_options(option_order): - return - while args: - args = self._parse_command_opts(parser, args) - if args is None: # user asked for help (and got it) - return - - # Handle the cases of --help as a "global" option, ie. - # "setup.py --help" and "setup.py --help command ...". For the - # former, we show global options (--verbose, --dry-run, etc.) - # and display-only options (--name, --version, etc.); for the - # latter, we omit the display-only options and show help for - # each command listed on the command line. - if self.help: - self._show_help(parser, - display_options=len(self.commands) == 0, - commands=self.commands) - return - - # Oops, no commands found -- an end-user error - if not self.commands: - raise DistutilsArgError("no commands supplied") - - # All is well: return true - return True - - def _get_toplevel_options(self): - """Return the non-display options recognized at the top level. - - This includes options that are recognized *only* at the top - level as well as options recognized for commands. - """ - return self.global_options + [ - ("command-packages=", None, - "list of packages that provide distutils commands"), - ] - - def _parse_command_opts(self, parser, args): - """Parse the command-line options for a single command. - 'parser' must be a FancyGetopt instance; 'args' must be the list - of arguments, starting with the current command (whose options - we are about to parse). Returns a new version of 'args' with - the next command at the front of the list; will be the empty - list if there are no more commands on the command line. Returns - None if the user asked for help on this command. - """ - # late import because of mutual dependence between these modules - from distutils.cmd import Command - - # Pull the current command from the head of the command line - command = args[0] - if not command_re.match(command): - raise SystemExit("invalid command name '%s'" % command) - self.commands.append(command) - - # Dig up the command class that implements this command, so we - # 1) know that it's a valid command, and 2) know which options - # it takes. - try: - cmd_class = self.get_command_class(command) - except DistutilsModuleError as msg: - raise DistutilsArgError(msg) - - # Require that the command class be derived from Command -- want - # to be sure that the basic "command" interface is implemented. - if not issubclass(cmd_class, Command): - raise DistutilsClassError( - "command class %s must subclass Command" % cmd_class) - - # Also make sure that the command object provides a list of its - # known options. - if not (hasattr(cmd_class, 'user_options') and - isinstance(cmd_class.user_options, list)): - msg = ("command class %s must provide " - "'user_options' attribute (a list of tuples)") - raise DistutilsClassError(msg % cmd_class) - - # If the command class has a list of negative alias options, - # merge it in with the global negative aliases. - negative_opt = self.negative_opt - if hasattr(cmd_class, 'negative_opt'): - negative_opt = negative_opt.copy() - negative_opt.update(cmd_class.negative_opt) - - # Check for help_options in command class. They have a different - # format (tuple of four) so we need to preprocess them here. - if (hasattr(cmd_class, 'help_options') and - isinstance(cmd_class.help_options, list)): - help_options = fix_help_options(cmd_class.help_options) - else: - help_options = [] - - # All commands support the global options too, just by adding - # in 'global_options'. - parser.set_option_table(self.global_options + - cmd_class.user_options + - help_options) - parser.set_negative_aliases(negative_opt) - (args, opts) = parser.getopt(args[1:]) - if hasattr(opts, 'help') and opts.help: - self._show_help(parser, display_options=0, commands=[cmd_class]) - return - - if (hasattr(cmd_class, 'help_options') and - isinstance(cmd_class.help_options, list)): - help_option_found=0 - for (help_option, short, desc, func) in cmd_class.help_options: - if hasattr(opts, parser.get_attr_name(help_option)): - help_option_found=1 - if callable(func): - func() - else: - raise DistutilsClassError( - "invalid help function %r for help option '%s': " - "must be a callable object (function, etc.)" - % (func, help_option)) - - if help_option_found: - return - - # Put the options from the command-line into their official - # holding pen, the 'command_options' dictionary. - opt_dict = self.get_option_dict(command) - for (name, value) in vars(opts).items(): - opt_dict[name] = ("command line", value) - - return args - - def finalize_options(self): - """Set final values for all the options on the Distribution - instance, analogous to the .finalize_options() method of Command - objects. - """ - for attr in ('keywords', 'platforms'): - value = getattr(self.metadata, attr) - if value is None: - continue - if isinstance(value, str): - value = [elm.strip() for elm in value.split(',')] - setattr(self.metadata, attr, value) - - def _show_help(self, parser, global_options=1, display_options=1, - commands=[]): - """Show help for the setup script command-line in the form of - several lists of command-line options. 'parser' should be a - FancyGetopt instance; do not expect it to be returned in the - same state, as its option table will be reset to make it - generate the correct help text. - - If 'global_options' is true, lists the global options: - --verbose, --dry-run, etc. If 'display_options' is true, lists - the "display-only" options: --name, --version, etc. Finally, - lists per-command help for every command name or command class - in 'commands'. - """ - # late import because of mutual dependence between these modules - from distutils.core import gen_usage - from distutils.cmd import Command - - if global_options: - if display_options: - options = self._get_toplevel_options() - else: - options = self.global_options - parser.set_option_table(options) - parser.print_help(self.common_usage + "\nGlobal options:") - print('') - - if display_options: - parser.set_option_table(self.display_options) - parser.print_help( - "Information display options (just display " + - "information, ignore any commands)") - print('') - - for command in self.commands: - if isinstance(command, type) and issubclass(command, Command): - klass = command - else: - klass = self.get_command_class(command) - if (hasattr(klass, 'help_options') and - isinstance(klass.help_options, list)): - parser.set_option_table(klass.user_options + - fix_help_options(klass.help_options)) - else: - parser.set_option_table(klass.user_options) - parser.print_help("Options for '%s' command:" % klass.__name__) - print('') - - print(gen_usage(self.script_name)) - - def handle_display_options(self, option_order): - """If there were any non-global "display-only" options - (--help-commands or the metadata display options) on the command - line, display the requested info and return true; else return - false. - """ - from distutils.core import gen_usage - - # User just wants a list of commands -- we'll print it out and stop - # processing now (ie. if they ran "setup --help-commands foo bar", - # we ignore "foo bar"). - if self.help_commands: - self.print_commands() - print('') - print(gen_usage(self.script_name)) - return 1 - - # If user supplied any of the "display metadata" options, then - # display that metadata in the order in which the user supplied the - # metadata options. - any_display_options = 0 - is_display_option = {} - for option in self.display_options: - is_display_option[option[0]] = 1 - - for (opt, val) in option_order: - if val and is_display_option.get(opt): - opt = translate_longopt(opt) - value = getattr(self.metadata, "get_"+opt)() - if opt in ['keywords', 'platforms']: - print(','.join(value)) - elif opt in ('classifiers', 'provides', 'requires', - 'obsoletes'): - print('\n'.join(value)) - else: - print(value) - any_display_options = 1 - - return any_display_options - - def print_command_list(self, commands, header, max_length): - """Print a subset of the list of all commands -- used by - 'print_commands()'. - """ - print(header + ":") - - for cmd in commands: - klass = self.cmdclass.get(cmd) - if not klass: - klass = self.get_command_class(cmd) - try: - description = klass.description - except AttributeError: - description = "(no description available)" - - print(" %-*s %s" % (max_length, cmd, description)) - - def print_commands(self): - """Print out a help message listing all available commands with a - description of each. The list is divided into "standard commands" - (listed in distutils.command.__all__) and "extra commands" - (mentioned in self.cmdclass, but not a standard command). The - descriptions come from the command class attribute - 'description'. - """ - import distutils.command - std_commands = distutils.command.__all__ - is_std = {} - for cmd in std_commands: - is_std[cmd] = 1 - - extra_commands = [] - for cmd in self.cmdclass.keys(): - if not is_std.get(cmd): - extra_commands.append(cmd) - - max_length = 0 - for cmd in (std_commands + extra_commands): - if len(cmd) > max_length: - max_length = len(cmd) - - self.print_command_list(std_commands, - "Standard commands", - max_length) - if extra_commands: - print() - self.print_command_list(extra_commands, - "Extra commands", - max_length) - - def get_command_list(self): - """Get a list of (command, description) tuples. - The list is divided into "standard commands" (listed in - distutils.command.__all__) and "extra commands" (mentioned in - self.cmdclass, but not a standard command). The descriptions come - from the command class attribute 'description'. - """ - # Currently this is only used on Mac OS, for the Mac-only GUI - # Distutils interface (by Jack Jansen) - import distutils.command - std_commands = distutils.command.__all__ - is_std = {} - for cmd in std_commands: - is_std[cmd] = 1 - - extra_commands = [] - for cmd in self.cmdclass.keys(): - if not is_std.get(cmd): - extra_commands.append(cmd) - - rv = [] - for cmd in (std_commands + extra_commands): - klass = self.cmdclass.get(cmd) - if not klass: - klass = self.get_command_class(cmd) - try: - description = klass.description - except AttributeError: - description = "(no description available)" - rv.append((cmd, description)) - return rv - - # -- Command class/object methods ---------------------------------- - - def get_command_packages(self): - """Return a list of packages from which commands are loaded.""" - pkgs = self.command_packages - if not isinstance(pkgs, list): - if pkgs is None: - pkgs = '' - pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != ''] - if "distutils.command" not in pkgs: - pkgs.insert(0, "distutils.command") - self.command_packages = pkgs - return pkgs - - def get_command_class(self, command): - """Return the class that implements the Distutils command named by - 'command'. First we check the 'cmdclass' dictionary; if the - command is mentioned there, we fetch the class object from the - dictionary and return it. Otherwise we load the command module - ("distutils.command." + command) and fetch the command class from - the module. The loaded class is also stored in 'cmdclass' - to speed future calls to 'get_command_class()'. - - Raises DistutilsModuleError if the expected module could not be - found, or if that module does not define the expected class. - """ - klass = self.cmdclass.get(command) - if klass: - return klass - - for pkgname in self.get_command_packages(): - module_name = "%s.%s" % (pkgname, command) - klass_name = command - - try: - __import__(module_name) - module = sys.modules[module_name] - except ImportError: - continue - - try: - klass = getattr(module, klass_name) - except AttributeError: - raise DistutilsModuleError( - "invalid command '%s' (no class '%s' in module '%s')" - % (command, klass_name, module_name)) - - self.cmdclass[command] = klass - return klass - - raise DistutilsModuleError("invalid command '%s'" % command) - - def get_command_obj(self, command, create=1): - """Return the command object for 'command'. Normally this object - is cached on a previous call to 'get_command_obj()'; if no command - object for 'command' is in the cache, then we either create and - return it (if 'create' is true) or return None. - """ - cmd_obj = self.command_obj.get(command) - if not cmd_obj and create: - if DEBUG: - self.announce("Distribution.get_command_obj(): " - "creating '%s' command object" % command) - - klass = self.get_command_class(command) - cmd_obj = self.command_obj[command] = klass(self) - self.have_run[command] = 0 - - # Set any options that were supplied in config files - # or on the command line. (NB. support for error - # reporting is lame here: any errors aren't reported - # until 'finalize_options()' is called, which means - # we won't report the source of the error.) - options = self.command_options.get(command) - if options: - self._set_command_options(cmd_obj, options) - - return cmd_obj - - def _set_command_options(self, command_obj, option_dict=None): - """Set the options for 'command_obj' from 'option_dict'. Basically - this means copying elements of a dictionary ('option_dict') to - attributes of an instance ('command'). - - 'command_obj' must be a Command instance. If 'option_dict' is not - supplied, uses the standard option dictionary for this command - (from 'self.command_options'). - """ - command_name = command_obj.get_command_name() - if option_dict is None: - option_dict = self.get_option_dict(command_name) - - if DEBUG: - self.announce(" setting options for '%s' command:" % command_name) - for (option, (source, value)) in option_dict.items(): - if DEBUG: - self.announce(" %s = %s (from %s)" % (option, value, - source)) - try: - bool_opts = [translate_longopt(o) - for o in command_obj.boolean_options] - except AttributeError: - bool_opts = [] - try: - neg_opt = command_obj.negative_opt - except AttributeError: - neg_opt = {} - - try: - is_string = isinstance(value, str) - if option in neg_opt and is_string: - setattr(command_obj, neg_opt[option], not strtobool(value)) - elif option in bool_opts and is_string: - setattr(command_obj, option, strtobool(value)) - elif hasattr(command_obj, option): - setattr(command_obj, option, value) - else: - raise DistutilsOptionError( - "error in %s: command '%s' has no such option '%s'" - % (source, command_name, option)) - except ValueError as msg: - raise DistutilsOptionError(msg) - - def reinitialize_command(self, command, reinit_subcommands=0): - """Reinitializes a command to the state it was in when first - returned by 'get_command_obj()': ie., initialized but not yet - finalized. This provides the opportunity to sneak option - values in programmatically, overriding or supplementing - user-supplied values from the config files and command line. - You'll have to re-finalize the command object (by calling - 'finalize_options()' or 'ensure_finalized()') before using it for - real. - - 'command' should be a command name (string) or command object. If - 'reinit_subcommands' is true, also reinitializes the command's - sub-commands, as declared by the 'sub_commands' class attribute (if - it has one). See the "install" command for an example. Only - reinitializes the sub-commands that actually matter, ie. those - whose test predicates return true. - - Returns the reinitialized command object. - """ - from distutils.cmd import Command - if not isinstance(command, Command): - command_name = command - command = self.get_command_obj(command_name) - else: - command_name = command.get_command_name() - - if not command.finalized: - return command - command.initialize_options() - command.finalized = 0 - self.have_run[command_name] = 0 - self._set_command_options(command) - - if reinit_subcommands: - for sub in command.get_sub_commands(): - self.reinitialize_command(sub, reinit_subcommands) - - return command - - # -- Methods that operate on the Distribution ---------------------- - - def announce(self, msg, level=log.INFO): - log.log(level, msg) - - def run_commands(self): - """Run each command that was seen on the setup script command line. - Uses the list of commands found and cache of command objects - created by 'get_command_obj()'. - """ - for cmd in self.commands: - self.run_command(cmd) - - # -- Methods that operate on its Commands -------------------------- - - def run_command(self, command): - """Do whatever it takes to run a command (including nothing at all, - if the command has already been run). Specifically: if we have - already created and run the command named by 'command', return - silently without doing anything. If the command named by 'command' - doesn't even have a command object yet, create one. Then invoke - 'run()' on that command object (or an existing one). - """ - # Already been here, done that? then return silently. - if self.have_run.get(command): - return - - log.info("running %s", command) - cmd_obj = self.get_command_obj(command) - cmd_obj.ensure_finalized() - cmd_obj.run() - self.have_run[command] = 1 - - # -- Distribution query methods ------------------------------------ - - def has_pure_modules(self): - return len(self.packages or self.py_modules or []) > 0 - - def has_ext_modules(self): - return self.ext_modules and len(self.ext_modules) > 0 - - def has_c_libraries(self): - return self.libraries and len(self.libraries) > 0 - - def has_modules(self): - return self.has_pure_modules() or self.has_ext_modules() - - def has_headers(self): - return self.headers and len(self.headers) > 0 - - def has_scripts(self): - return self.scripts and len(self.scripts) > 0 - - def has_data_files(self): - return self.data_files and len(self.data_files) > 0 - - def is_pure(self): - return (self.has_pure_modules() and - not self.has_ext_modules() and - not self.has_c_libraries()) - - # -- Metadata query methods ---------------------------------------- - - # If you're looking for 'get_name()', 'get_version()', and so forth, - # they are defined in a sneaky way: the constructor binds self.get_XXX - # to self.metadata.get_XXX. The actual code is in the - # DistributionMetadata class, below. - -class DistributionMetadata: - """Dummy class to hold the distribution meta-data: name, version, - author, and so forth. - """ - - _METHOD_BASENAMES = ("name", "version", "author", "author_email", - "maintainer", "maintainer_email", "url", - "license", "description", "long_description", - "keywords", "platforms", "fullname", "contact", - "contact_email", "classifiers", "download_url", - # PEP 314 - "provides", "requires", "obsoletes", - ) - - def __init__(self, path=None): - if path is not None: - self.read_pkg_file(open(path)) - else: - self.name = None - self.version = None - self.author = None - self.author_email = None - self.maintainer = None - self.maintainer_email = None - self.url = None - self.license = None - self.description = None - self.long_description = None - self.keywords = None - self.platforms = None - self.classifiers = None - self.download_url = None - # PEP 314 - self.provides = None - self.requires = None - self.obsoletes = None - - def read_pkg_file(self, file): - """Reads the metadata values from a file object.""" - msg = message_from_file(file) - - def _read_field(name): - value = msg[name] - if value == 'UNKNOWN': - return None - return value - - def _read_list(name): - values = msg.get_all(name, None) - if values == []: - return None - return values - - metadata_version = msg['metadata-version'] - self.name = _read_field('name') - self.version = _read_field('version') - self.description = _read_field('summary') - # we are filling author only. - self.author = _read_field('author') - self.maintainer = None - self.author_email = _read_field('author-email') - self.maintainer_email = None - self.url = _read_field('home-page') - self.license = _read_field('license') - - if 'download-url' in msg: - self.download_url = _read_field('download-url') - else: - self.download_url = None - - self.long_description = _read_field('description') - self.description = _read_field('summary') - - if 'keywords' in msg: - self.keywords = _read_field('keywords').split(',') - - self.platforms = _read_list('platform') - self.classifiers = _read_list('classifier') - - # PEP 314 - these fields only exist in 1.1 - if metadata_version == '1.1': - self.requires = _read_list('requires') - self.provides = _read_list('provides') - self.obsoletes = _read_list('obsoletes') - else: - self.requires = None - self.provides = None - self.obsoletes = None - - def write_pkg_info(self, base_dir): - """Write the PKG-INFO file into the release tree. - """ - with open(os.path.join(base_dir, 'PKG-INFO'), 'w', - encoding='UTF-8') as pkg_info: - self.write_pkg_file(pkg_info) - - def write_pkg_file(self, file): - """Write the PKG-INFO format data to a file object. - """ - version = '1.0' - if (self.provides or self.requires or self.obsoletes or - self.classifiers or self.download_url): - version = '1.1' - - file.write('Metadata-Version: %s\n' % version) - file.write('Name: %s\n' % self.get_name()) - file.write('Version: %s\n' % self.get_version()) - file.write('Summary: %s\n' % self.get_description()) - file.write('Home-page: %s\n' % self.get_url()) - file.write('Author: %s\n' % self.get_contact()) - file.write('Author-email: %s\n' % self.get_contact_email()) - file.write('License: %s\n' % self.get_license()) - if self.download_url: - file.write('Download-URL: %s\n' % self.download_url) - - long_desc = rfc822_escape(self.get_long_description()) - file.write('Description: %s\n' % long_desc) - - keywords = ','.join(self.get_keywords()) - if keywords: - file.write('Keywords: %s\n' % keywords) - - self._write_list(file, 'Platform', self.get_platforms()) - self._write_list(file, 'Classifier', self.get_classifiers()) - - # PEP 314 - self._write_list(file, 'Requires', self.get_requires()) - self._write_list(file, 'Provides', self.get_provides()) - self._write_list(file, 'Obsoletes', self.get_obsoletes()) - - def _write_list(self, file, name, values): - for value in values: - file.write('%s: %s\n' % (name, value)) - - # -- Metadata query methods ---------------------------------------- - - def get_name(self): - return self.name or "UNKNOWN" - - def get_version(self): - return self.version or "0.0.0" - - def get_fullname(self): - return "%s-%s" % (self.get_name(), self.get_version()) - - def get_author(self): - return self.author or "UNKNOWN" - - def get_author_email(self): - return self.author_email or "UNKNOWN" - - def get_maintainer(self): - return self.maintainer or "UNKNOWN" - - def get_maintainer_email(self): - return self.maintainer_email or "UNKNOWN" - - def get_contact(self): - return self.maintainer or self.author or "UNKNOWN" - - def get_contact_email(self): - return self.maintainer_email or self.author_email or "UNKNOWN" - - def get_url(self): - return self.url or "UNKNOWN" - - def get_license(self): - return self.license or "UNKNOWN" - get_licence = get_license - - def get_description(self): - return self.description or "UNKNOWN" - - def get_long_description(self): - return self.long_description or "UNKNOWN" - - def get_keywords(self): - return self.keywords or [] - - def set_keywords(self, value): - self.keywords = _ensure_list(value, 'keywords') - - def get_platforms(self): - return self.platforms or ["UNKNOWN"] - - def set_platforms(self, value): - self.platforms = _ensure_list(value, 'platforms') - - def get_classifiers(self): - return self.classifiers or [] - - def set_classifiers(self, value): - self.classifiers = _ensure_list(value, 'classifiers') - - def get_download_url(self): - return self.download_url or "UNKNOWN" - - # PEP 314 - def get_requires(self): - return self.requires or [] - - def set_requires(self, value): - import distutils.versionpredicate - for v in value: - distutils.versionpredicate.VersionPredicate(v) - self.requires = list(value) - - def get_provides(self): - return self.provides or [] - - def set_provides(self, value): - value = [v.strip() for v in value] - for v in value: - import distutils.versionpredicate - distutils.versionpredicate.split_provision(v) - self.provides = value - - def get_obsoletes(self): - return self.obsoletes or [] - - def set_obsoletes(self, value): - import distutils.versionpredicate - for v in value: - distutils.versionpredicate.VersionPredicate(v) - self.obsoletes = list(value) - -def fix_help_options(options): - """Convert a 4-tuple 'help_options' list as found in various command - classes to the 3-tuple form required by FancyGetopt. - """ - new_options = [] - for help_tuple in options: - new_options.append(help_tuple[0:3]) - return new_options diff --git a/Lib/distutils/errors.py b/Lib/distutils/errors.py deleted file mode 100644 index 8b93059e19faa9f..000000000000000 --- a/Lib/distutils/errors.py +++ /dev/null @@ -1,97 +0,0 @@ -"""distutils.errors - -Provides exceptions used by the Distutils modules. Note that Distutils -modules may raise standard exceptions; in particular, SystemExit is -usually raised for errors that are obviously the end-user's fault -(eg. bad command-line arguments). - -This module is safe to use in "from ... import *" mode; it only exports -symbols whose names start with "Distutils" and end with "Error".""" - -class DistutilsError (Exception): - """The root of all Distutils evil.""" - pass - -class DistutilsModuleError (DistutilsError): - """Unable to load an expected module, or to find an expected class - within some module (in particular, command modules and classes).""" - pass - -class DistutilsClassError (DistutilsError): - """Some command class (or possibly distribution class, if anyone - feels a need to subclass Distribution) is found not to be holding - up its end of the bargain, ie. implementing some part of the - "command "interface.""" - pass - -class DistutilsGetoptError (DistutilsError): - """The option table provided to 'fancy_getopt()' is bogus.""" - pass - -class DistutilsArgError (DistutilsError): - """Raised by fancy_getopt in response to getopt.error -- ie. an - error in the command line usage.""" - pass - -class DistutilsFileError (DistutilsError): - """Any problems in the filesystem: expected file not found, etc. - Typically this is for problems that we detect before OSError - could be raised.""" - pass - -class DistutilsOptionError (DistutilsError): - """Syntactic/semantic errors in command options, such as use of - mutually conflicting options, or inconsistent options, - badly-spelled values, etc. No distinction is made between option - values originating in the setup script, the command line, config - files, or what-have-you -- but if we *know* something originated in - the setup script, we'll raise DistutilsSetupError instead.""" - pass - -class DistutilsSetupError (DistutilsError): - """For errors that can be definitely blamed on the setup script, - such as invalid keyword arguments to 'setup()'.""" - pass - -class DistutilsPlatformError (DistutilsError): - """We don't know how to do something on the current platform (but - we do know how to do it on some platform) -- eg. trying to compile - C files on a platform not supported by a CCompiler subclass.""" - pass - -class DistutilsExecError (DistutilsError): - """Any problems executing an external program (such as the C - compiler, when compiling C files).""" - pass - -class DistutilsInternalError (DistutilsError): - """Internal inconsistencies or impossibilities (obviously, this - should never be seen if the code is working!).""" - pass - -class DistutilsTemplateError (DistutilsError): - """Syntax error in a file list template.""" - -class DistutilsByteCompileError(DistutilsError): - """Byte compile error.""" - -# Exception classes used by the CCompiler implementation classes -class CCompilerError (Exception): - """Some compile/link operation failed.""" - -class PreprocessError (CCompilerError): - """Failure to preprocess one or more C/C++ files.""" - -class CompileError (CCompilerError): - """Failure to compile one or more C/C++ source files.""" - -class LibError (CCompilerError): - """Failure to create a static library from one or more C/C++ object - files.""" - -class LinkError (CCompilerError): - """Failure to link one or more C/C++ object files into an executable - or shared library file.""" - -class UnknownFileError (CCompilerError): - """Attempt to process an unknown file type.""" diff --git a/Lib/distutils/extension.py b/Lib/distutils/extension.py deleted file mode 100644 index e85032ece8916f3..000000000000000 --- a/Lib/distutils/extension.py +++ /dev/null @@ -1,241 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts.""" - -import os -import re -import warnings - -# This class is really only used by the "build_ext" command, so it might -# make sense to put it in distutils.command.build_ext. However, that -# module is already big enough, and I want to make this class a bit more -# complex to simplify some common cases ("foo" module in "foo.c") and do -# better error-checking ("foo.c" actually exists). -# -# Also, putting this in build_ext.py means every setup script would have to -# import that large-ish module (indirectly, through distutils.core) in -# order to do anything. - -class Extension: - """Just a collection of attributes that describes an extension - module and everything needed to build it (hopefully in a portable - way, but there are hooks that let you be as unportable as you need). - - Instance attributes: - name : string - the full name of the extension, including any packages -- ie. - *not* a filename or pathname, but Python dotted name - sources : [string] - list of source filenames, relative to the distribution root - (where the setup script lives), in Unix form (slash-separated) - for portability. Source files may be C, C++, SWIG (.i), - platform-specific resource files, or whatever else is recognized - by the "build_ext" command as source for a Python extension. - include_dirs : [string] - list of directories to search for C/C++ header files (in Unix - form for portability) - define_macros : [(name : string, value : string|None)] - list of macros to define; each macro is defined using a 2-tuple, - where 'value' is either the string to define it to or None to - define it without a particular value (equivalent of "#define - FOO" in source or -DFOO on Unix C compiler command line) - undef_macros : [string] - list of macros to undefine explicitly - library_dirs : [string] - list of directories to search for C/C++ libraries at link time - libraries : [string] - list of library names (not filenames or paths) to link against - runtime_library_dirs : [string] - list of directories to search for C/C++ libraries at run time - (for shared extensions, this is when the extension is loaded) - extra_objects : [string] - list of extra files to link with (eg. object files not implied - by 'sources', static library that must be explicitly specified, - binary resource files, etc.) - extra_compile_args : [string] - any extra platform- and compiler-specific information to use - when compiling the source files in 'sources'. For platforms and - compilers where "command line" makes sense, this is typically a - list of command-line arguments, but for other platforms it could - be anything. - extra_link_args : [string] - any extra platform- and compiler-specific information to use - when linking object files together to create the extension (or - to create a new static Python interpreter). Similar - interpretation as for 'extra_compile_args'. - export_symbols : [string] - list of symbols to be exported from a shared extension. Not - used on all platforms, and not generally necessary for Python - extensions, which typically export exactly one symbol: "init" + - extension_name. - swig_opts : [string] - any extra options to pass to SWIG if a source file has the .i - extension. - depends : [string] - list of files that the extension depends on - language : string - extension language (i.e. "c", "c++", "objc"). Will be detected - from the source extensions if not provided. - optional : boolean - specifies that a build failure in the extension should not abort the - build process, but simply not install the failing extension. - """ - - # When adding arguments to this constructor, be sure to update - # setup_keywords in core.py. - def __init__(self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts = None, - depends=None, - language=None, - optional=None, - **kw # To catch unknown keywords - ): - if not isinstance(name, str): - raise AssertionError("'name' must be a string") - if not (isinstance(sources, list) and - all(isinstance(v, str) for v in sources)): - raise AssertionError("'sources' must be a list of strings") - - self.name = name - self.sources = sources - self.include_dirs = include_dirs or [] - self.define_macros = define_macros or [] - self.undef_macros = undef_macros or [] - self.library_dirs = library_dirs or [] - self.libraries = libraries or [] - self.runtime_library_dirs = runtime_library_dirs or [] - self.extra_objects = extra_objects or [] - self.extra_compile_args = extra_compile_args or [] - self.extra_link_args = extra_link_args or [] - self.export_symbols = export_symbols or [] - self.swig_opts = swig_opts or [] - self.depends = depends or [] - self.language = language - self.optional = optional - - # If there are unknown keyword options, warn about them - if len(kw) > 0: - options = [repr(option) for option in kw] - options = ', '.join(sorted(options)) - msg = "Unknown Extension options: %s" % options - warnings.warn(msg) - - def __repr__(self): - return '<%s.%s(%r) at %#x>' % ( - self.__class__.__module__, - self.__class__.__qualname__, - self.name, - id(self)) - - -def read_setup_file(filename): - """Reads a Setup file and returns Extension instances.""" - from distutils.sysconfig import (parse_makefile, expand_makefile_vars, - _variable_rx) - - from distutils.text_file import TextFile - from distutils.util import split_quoted - - # First pass over the file to gather "VAR = VALUE" assignments. - vars = parse_makefile(filename) - - # Second pass to gobble up the real content: lines of the form - # ... [ ...] [ ...] [ ...] - file = TextFile(filename, - strip_comments=1, skip_blanks=1, join_lines=1, - lstrip_ws=1, rstrip_ws=1) - try: - extensions = [] - - while True: - line = file.readline() - if line is None: # eof - break - if re.match(_variable_rx, line): # VAR=VALUE, handled in first pass - continue - - if line[0] == line[-1] == "*": - file.warn("'%s' lines not handled yet" % line) - continue - - line = expand_makefile_vars(line, vars) - words = split_quoted(line) - - # NB. this parses a slightly different syntax than the old - # makesetup script: here, there must be exactly one extension per - # line, and it must be the first word of the line. I have no idea - # why the old syntax supported multiple extensions per line, as - # they all wind up being the same. - - module = words[0] - ext = Extension(module, []) - append_next_word = None - - for word in words[1:]: - if append_next_word is not None: - append_next_word.append(word) - append_next_word = None - continue - - suffix = os.path.splitext(word)[1] - switch = word[0:2] ; value = word[2:] - - if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"): - # hmm, should we do something about C vs. C++ sources? - # or leave it up to the CCompiler implementation to - # worry about? - ext.sources.append(word) - elif switch == "-I": - ext.include_dirs.append(value) - elif switch == "-D": - equals = value.find("=") - if equals == -1: # bare "-DFOO" -- no value - ext.define_macros.append((value, None)) - else: # "-DFOO=blah" - ext.define_macros.append((value[0:equals], - value[equals+2:])) - elif switch == "-U": - ext.undef_macros.append(value) - elif switch == "-C": # only here 'cause makesetup has it! - ext.extra_compile_args.append(word) - elif switch == "-l": - ext.libraries.append(value) - elif switch == "-L": - ext.library_dirs.append(value) - elif switch == "-R": - ext.runtime_library_dirs.append(value) - elif word == "-rpath": - append_next_word = ext.runtime_library_dirs - elif word == "-Xlinker": - append_next_word = ext.extra_link_args - elif word == "-Xcompiler": - append_next_word = ext.extra_compile_args - elif switch == "-u": - ext.extra_link_args.append(word) - if not value: - append_next_word = ext.extra_link_args - elif suffix in (".a", ".so", ".sl", ".o", ".dylib"): - # NB. a really faithful emulation of makesetup would - # append a .o file to extra_objects only if it - # had a slash in it; otherwise, it would s/.o/.c/ - # and append it to sources. Hmmmm. - ext.extra_objects.append(word) - else: - file.warn("unrecognized argument '%s'" % word) - - extensions.append(ext) - finally: - file.close() - - return extensions diff --git a/Lib/distutils/fancy_getopt.py b/Lib/distutils/fancy_getopt.py deleted file mode 100644 index 7d170dd27731a5c..000000000000000 --- a/Lib/distutils/fancy_getopt.py +++ /dev/null @@ -1,457 +0,0 @@ -"""distutils.fancy_getopt - -Wrapper around the standard getopt module that provides the following -additional features: - * short and long options are tied together - * options have help strings, so fancy_getopt could potentially - create a complete usage summary - * options set attributes of a passed-in object -""" - -import sys, string, re -import getopt -from distutils.errors import * - -# Much like command_re in distutils.core, this is close to but not quite -# the same as a Python NAME -- except, in the spirit of most GNU -# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!) -# The similarities to NAME are again not a coincidence... -longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)' -longopt_re = re.compile(r'^%s$' % longopt_pat) - -# For recognizing "negative alias" options, eg. "quiet=!verbose" -neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat)) - -# This is used to translate long options to legitimate Python identifiers -# (for use as attributes of some object). -longopt_xlate = str.maketrans('-', '_') - -class FancyGetopt: - """Wrapper around the standard 'getopt()' module that provides some - handy extra functionality: - * short and long options are tied together - * options have help strings, and help text can be assembled - from them - * options set attributes of a passed-in object - * boolean options can have "negative aliases" -- eg. if - --quiet is the "negative alias" of --verbose, then "--quiet" - on the command line sets 'verbose' to false - """ - - def __init__(self, option_table=None): - # The option table is (currently) a list of tuples. The - # tuples may have 3 or four values: - # (long_option, short_option, help_string [, repeatable]) - # if an option takes an argument, its long_option should have '=' - # appended; short_option should just be a single character, no ':' - # in any case. If a long_option doesn't have a corresponding - # short_option, short_option should be None. All option tuples - # must have long options. - self.option_table = option_table - - # 'option_index' maps long option names to entries in the option - # table (ie. those 3-tuples). - self.option_index = {} - if self.option_table: - self._build_index() - - # 'alias' records (duh) alias options; {'foo': 'bar'} means - # --foo is an alias for --bar - self.alias = {} - - # 'negative_alias' keeps track of options that are the boolean - # opposite of some other option - self.negative_alias = {} - - # These keep track of the information in the option table. We - # don't actually populate these structures until we're ready to - # parse the command-line, since the 'option_table' passed in here - # isn't necessarily the final word. - self.short_opts = [] - self.long_opts = [] - self.short2long = {} - self.attr_name = {} - self.takes_arg = {} - - # And 'option_order' is filled up in 'getopt()'; it records the - # original order of options (and their values) on the command-line, - # but expands short options, converts aliases, etc. - self.option_order = [] - - def _build_index(self): - self.option_index.clear() - for option in self.option_table: - self.option_index[option[0]] = option - - def set_option_table(self, option_table): - self.option_table = option_table - self._build_index() - - def add_option(self, long_option, short_option=None, help_string=None): - if long_option in self.option_index: - raise DistutilsGetoptError( - "option conflict: already an option '%s'" % long_option) - else: - option = (long_option, short_option, help_string) - self.option_table.append(option) - self.option_index[long_option] = option - - def has_option(self, long_option): - """Return true if the option table for this parser has an - option with long name 'long_option'.""" - return long_option in self.option_index - - def get_attr_name(self, long_option): - """Translate long option name 'long_option' to the form it - has as an attribute of some object: ie., translate hyphens - to underscores.""" - return long_option.translate(longopt_xlate) - - def _check_alias_dict(self, aliases, what): - assert isinstance(aliases, dict) - for (alias, opt) in aliases.items(): - if alias not in self.option_index: - raise DistutilsGetoptError(("invalid %s '%s': " - "option '%s' not defined") % (what, alias, alias)) - if opt not in self.option_index: - raise DistutilsGetoptError(("invalid %s '%s': " - "aliased option '%s' not defined") % (what, alias, opt)) - - def set_aliases(self, alias): - """Set the aliases for this option parser.""" - self._check_alias_dict(alias, "alias") - self.alias = alias - - def set_negative_aliases(self, negative_alias): - """Set the negative aliases for this option parser. - 'negative_alias' should be a dictionary mapping option names to - option names, both the key and value must already be defined - in the option table.""" - self._check_alias_dict(negative_alias, "negative alias") - self.negative_alias = negative_alias - - def _grok_option_table(self): - """Populate the various data structures that keep tabs on the - option table. Called by 'getopt()' before it can do anything - worthwhile. - """ - self.long_opts = [] - self.short_opts = [] - self.short2long.clear() - self.repeat = {} - - for option in self.option_table: - if len(option) == 3: - long, short, help = option - repeat = 0 - elif len(option) == 4: - long, short, help, repeat = option - else: - # the option table is part of the code, so simply - # assert that it is correct - raise ValueError("invalid option tuple: %r" % (option,)) - - # Type- and value-check the option names - if not isinstance(long, str) or len(long) < 2: - raise DistutilsGetoptError(("invalid long option '%s': " - "must be a string of length >= 2") % long) - - if (not ((short is None) or - (isinstance(short, str) and len(short) == 1))): - raise DistutilsGetoptError("invalid short option '%s': " - "must a single character or None" % short) - - self.repeat[long] = repeat - self.long_opts.append(long) - - if long[-1] == '=': # option takes an argument? - if short: short = short + ':' - long = long[0:-1] - self.takes_arg[long] = 1 - else: - # Is option is a "negative alias" for some other option (eg. - # "quiet" == "!verbose")? - alias_to = self.negative_alias.get(long) - if alias_to is not None: - if self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid negative alias '%s': " - "aliased option '%s' takes a value" - % (long, alias_to)) - - self.long_opts[-1] = long # XXX redundant?! - self.takes_arg[long] = 0 - - # If this is an alias option, make sure its "takes arg" flag is - # the same as the option it's aliased to. - alias_to = self.alias.get(long) - if alias_to is not None: - if self.takes_arg[long] != self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid alias '%s': inconsistent with " - "aliased option '%s' (one of them takes a value, " - "the other doesn't" - % (long, alias_to)) - - # Now enforce some bondage on the long option name, so we can - # later translate it to an attribute name on some object. Have - # to do this a bit late to make sure we've removed any trailing - # '='. - if not longopt_re.match(long): - raise DistutilsGetoptError( - "invalid long option name '%s' " - "(must be letters, numbers, hyphens only" % long) - - self.attr_name[long] = self.get_attr_name(long) - if short: - self.short_opts.append(short) - self.short2long[short[0]] = long - - def getopt(self, args=None, object=None): - """Parse command-line options in args. Store as attributes on object. - - If 'args' is None or not supplied, uses 'sys.argv[1:]'. If - 'object' is None or not supplied, creates a new OptionDummy - object, stores option values there, and returns a tuple (args, - object). If 'object' is supplied, it is modified in place and - 'getopt()' just returns 'args'; in both cases, the returned - 'args' is a modified copy of the passed-in 'args' list, which - is left untouched. - """ - if args is None: - args = sys.argv[1:] - if object is None: - object = OptionDummy() - created_object = True - else: - created_object = False - - self._grok_option_table() - - short_opts = ' '.join(self.short_opts) - try: - opts, args = getopt.getopt(args, short_opts, self.long_opts) - except getopt.error as msg: - raise DistutilsArgError(msg) - - for opt, val in opts: - if len(opt) == 2 and opt[0] == '-': # it's a short option - opt = self.short2long[opt[1]] - else: - assert len(opt) > 2 and opt[:2] == '--' - opt = opt[2:] - - alias = self.alias.get(opt) - if alias: - opt = alias - - if not self.takes_arg[opt]: # boolean option? - assert val == '', "boolean option can't have value" - alias = self.negative_alias.get(opt) - if alias: - opt = alias - val = 0 - else: - val = 1 - - attr = self.attr_name[opt] - # The only repeating option at the moment is 'verbose'. - # It has a negative option -q quiet, which should set verbose = 0. - if val and self.repeat.get(attr) is not None: - val = getattr(object, attr, 0) + 1 - setattr(object, attr, val) - self.option_order.append((opt, val)) - - # for opts - if created_object: - return args, object - else: - return args - - def get_option_order(self): - """Returns the list of (option, value) tuples processed by the - previous run of 'getopt()'. Raises RuntimeError if - 'getopt()' hasn't been called yet. - """ - if self.option_order is None: - raise RuntimeError("'getopt()' hasn't been called yet") - else: - return self.option_order - - def generate_help(self, header=None): - """Generate help text (a list of strings, one per suggested line of - output) from the option table for this FancyGetopt object. - """ - # Blithely assume the option table is good: probably wouldn't call - # 'generate_help()' unless you've already called 'getopt()'. - - # First pass: determine maximum length of long option names - max_opt = 0 - for option in self.option_table: - long = option[0] - short = option[1] - l = len(long) - if long[-1] == '=': - l = l - 1 - if short is not None: - l = l + 5 # " (-x)" where short == 'x' - if l > max_opt: - max_opt = l - - opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter - - # Typical help block looks like this: - # --foo controls foonabulation - # Help block for longest option looks like this: - # --flimflam set the flim-flam level - # and with wrapped text: - # --flimflam set the flim-flam level (must be between - # 0 and 100, except on Tuesdays) - # Options with short names will have the short name shown (but - # it doesn't contribute to max_opt): - # --foo (-f) controls foonabulation - # If adding the short option would make the left column too wide, - # we push the explanation off to the next line - # --flimflam (-l) - # set the flim-flam level - # Important parameters: - # - 2 spaces before option block start lines - # - 2 dashes for each long option name - # - min. 2 spaces between option and explanation (gutter) - # - 5 characters (incl. space) for short option name - - # Now generate lines of help text. (If 80 columns were good enough - # for Jesus, then 78 columns are good enough for me!) - line_width = 78 - text_width = line_width - opt_width - big_indent = ' ' * opt_width - if header: - lines = [header] - else: - lines = ['Option summary:'] - - for option in self.option_table: - long, short, help = option[:3] - text = wrap_text(help, text_width) - if long[-1] == '=': - long = long[0:-1] - - # Case 1: no short option at all (makes life easy) - if short is None: - if text: - lines.append(" --%-*s %s" % (max_opt, long, text[0])) - else: - lines.append(" --%-*s " % (max_opt, long)) - - # Case 2: we have a short option, so we have to include it - # just after the long option - else: - opt_names = "%s (-%s)" % (long, short) - if text: - lines.append(" --%-*s %s" % - (max_opt, opt_names, text[0])) - else: - lines.append(" --%-*s" % opt_names) - - for l in text[1:]: - lines.append(big_indent + l) - return lines - - def print_help(self, header=None, file=None): - if file is None: - file = sys.stdout - for line in self.generate_help(header): - file.write(line + "\n") - - -def fancy_getopt(options, negative_opt, object, args): - parser = FancyGetopt(options) - parser.set_negative_aliases(negative_opt) - return parser.getopt(args, object) - - -WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace} - -def wrap_text(text, width): - """wrap_text(text : string, width : int) -> [string] - - Split 'text' into multiple lines of no more than 'width' characters - each, and return the list of strings that results. - """ - if text is None: - return [] - if len(text) <= width: - return [text] - - text = text.expandtabs() - text = text.translate(WS_TRANS) - chunks = re.split(r'( +|-+)', text) - chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings - lines = [] - - while chunks: - cur_line = [] # list of chunks (to-be-joined) - cur_len = 0 # length of current line - - while chunks: - l = len(chunks[0]) - if cur_len + l <= width: # can squeeze (at least) this chunk in - cur_line.append(chunks[0]) - del chunks[0] - cur_len = cur_len + l - else: # this line is full - # drop last chunk if all space - if cur_line and cur_line[-1][0] == ' ': - del cur_line[-1] - break - - if chunks: # any chunks left to process? - # if the current line is still empty, then we had a single - # chunk that's too big too fit on a line -- so we break - # down and break it up at the line width - if cur_len == 0: - cur_line.append(chunks[0][0:width]) - chunks[0] = chunks[0][width:] - - # all-whitespace chunks at the end of a line can be discarded - # (and we know from the re.split above that if a chunk has - # *any* whitespace, it is *all* whitespace) - if chunks[0][0] == ' ': - del chunks[0] - - # and store this line in the list-of-all-lines -- as a single - # string, of course! - lines.append(''.join(cur_line)) - - return lines - - -def translate_longopt(opt): - """Convert a long option name to a valid Python identifier by - changing "-" to "_". - """ - return opt.translate(longopt_xlate) - - -class OptionDummy: - """Dummy class just used as a place to hold command-line option - values as instance attributes.""" - - def __init__(self, options=[]): - """Create a new OptionDummy instance. The attributes listed in - 'options' will be initialized to None.""" - for opt in options: - setattr(self, opt, None) - - -if __name__ == "__main__": - text = """\ -Tra-la-la, supercalifragilisticexpialidocious. -How *do* you spell that odd word, anyways? -(Someone ask Mary -- she'll know [or she'll -say, "How should I know?"].)""" - - for w in (10, 20, 30, 40): - print("width: %d" % w) - print("\n".join(wrap_text(text, w))) - print() diff --git a/Lib/distutils/file_util.py b/Lib/distutils/file_util.py deleted file mode 100644 index b3fee35a6cce812..000000000000000 --- a/Lib/distutils/file_util.py +++ /dev/null @@ -1,238 +0,0 @@ -"""distutils.file_util - -Utility functions for operating on single files. -""" - -import os -from distutils.errors import DistutilsFileError -from distutils import log - -# for generating verbose output in 'copy_file()' -_copy_action = { None: 'copying', - 'hard': 'hard linking', - 'sym': 'symbolically linking' } - - -def _copy_file_contents(src, dst, buffer_size=16*1024): - """Copy the file 'src' to 'dst'; both must be filenames. Any error - opening either file, reading from 'src', or writing to 'dst', raises - DistutilsFileError. Data is read/written in chunks of 'buffer_size' - bytes (default 16k). No attempt is made to handle anything apart from - regular files. - """ - # Stolen from shutil module in the standard library, but with - # custom error-handling added. - fsrc = None - fdst = None - try: - try: - fsrc = open(src, 'rb') - except OSError as e: - raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror)) - - if os.path.exists(dst): - try: - os.unlink(dst) - except OSError as e: - raise DistutilsFileError( - "could not delete '%s': %s" % (dst, e.strerror)) - - try: - fdst = open(dst, 'wb') - except OSError as e: - raise DistutilsFileError( - "could not create '%s': %s" % (dst, e.strerror)) - - while True: - try: - buf = fsrc.read(buffer_size) - except OSError as e: - raise DistutilsFileError( - "could not read from '%s': %s" % (src, e.strerror)) - - if not buf: - break - - try: - fdst.write(buf) - except OSError as e: - raise DistutilsFileError( - "could not write to '%s': %s" % (dst, e.strerror)) - finally: - if fdst: - fdst.close() - if fsrc: - fsrc.close() - -def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, - link=None, verbose=1, dry_run=0): - """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is - copied there with the same name; otherwise, it must be a filename. (If - the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' - is true (the default), the file's mode (type and permission bits, or - whatever is analogous on the current platform) is copied. If - 'preserve_times' is true (the default), the last-modified and - last-access times are copied as well. If 'update' is true, 'src' will - only be copied if 'dst' does not exist, or if 'dst' does exist but is - older than 'src'. - - 'link' allows you to make hard links (os.link) or symbolic links - (os.symlink) instead of copying: set it to "hard" or "sym"; if it is - None (the default), files are copied. Don't set 'link' on systems that - don't support it: 'copy_file()' doesn't check if hard or symbolic - linking is available. If hardlink fails, falls back to - _copy_file_contents(). - - Under Mac OS, uses the native file copy function in macostools; on - other systems, uses '_copy_file_contents()' to copy file contents. - - Return a tuple (dest_name, copied): 'dest_name' is the actual name of - the output file, and 'copied' is true if the file was copied (or would - have been copied, if 'dry_run' true). - """ - # XXX if the destination file already exists, we clobber it if - # copying, but blow up if linking. Hmmm. And I don't know what - # macostools.copyfile() does. Should definitely be consistent, and - # should probably blow up if destination exists and we would be - # changing it (ie. it's not already a hard/soft link to src OR - # (not update) and (src newer than dst). - - from distutils.dep_util import newer - from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE - - if not os.path.isfile(src): - raise DistutilsFileError( - "can't copy '%s': doesn't exist or not a regular file" % src) - - if os.path.isdir(dst): - dir = dst - dst = os.path.join(dst, os.path.basename(src)) - else: - dir = os.path.dirname(dst) - - if update and not newer(src, dst): - if verbose >= 1: - log.debug("not copying %s (output up-to-date)", src) - return (dst, 0) - - try: - action = _copy_action[link] - except KeyError: - raise ValueError("invalid value '%s' for 'link' argument" % link) - - if verbose >= 1: - if os.path.basename(dst) == os.path.basename(src): - log.info("%s %s -> %s", action, src, dir) - else: - log.info("%s %s -> %s", action, src, dst) - - if dry_run: - return (dst, 1) - - # If linking (hard or symbolic), use the appropriate system call - # (Unix only, of course, but that's the caller's responsibility) - elif link == 'hard': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - try: - os.link(src, dst) - return (dst, 1) - except OSError: - # If hard linking fails, fall back on copying file - # (some special filesystems don't support hard linking - # even under Unix, see issue #8876). - pass - elif link == 'sym': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - os.symlink(src, dst) - return (dst, 1) - - # Otherwise (non-Mac, not linking), copy the file contents and - # (optionally) copy the times and mode. - _copy_file_contents(src, dst) - if preserve_mode or preserve_times: - st = os.stat(src) - - # According to David Ascher , utime() should be done - # before chmod() (at least under NT). - if preserve_times: - os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) - if preserve_mode: - os.chmod(dst, S_IMODE(st[ST_MODE])) - - return (dst, 1) - - -# XXX I suspect this is Unix-specific -- need porting help! -def move_file (src, dst, - verbose=1, - dry_run=0): - - """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will - be moved into it with the same name; otherwise, 'src' is just renamed - to 'dst'. Return the new full name of the file. - - Handles cross-device moves on Unix using 'copy_file()'. What about - other systems??? - """ - from os.path import exists, isfile, isdir, basename, dirname - import errno - - if verbose >= 1: - log.info("moving %s -> %s", src, dst) - - if dry_run: - return dst - - if not isfile(src): - raise DistutilsFileError("can't move '%s': not a regular file" % src) - - if isdir(dst): - dst = os.path.join(dst, basename(src)) - elif exists(dst): - raise DistutilsFileError( - "can't move '%s': destination '%s' already exists" % - (src, dst)) - - if not isdir(dirname(dst)): - raise DistutilsFileError( - "can't move '%s': destination '%s' not a valid path" % - (src, dst)) - - copy_it = False - try: - os.rename(src, dst) - except OSError as e: - (num, msg) = e.args - if num == errno.EXDEV: - copy_it = True - else: - raise DistutilsFileError( - "couldn't move '%s' to '%s': %s" % (src, dst, msg)) - - if copy_it: - copy_file(src, dst, verbose=verbose) - try: - os.unlink(src) - except OSError as e: - (num, msg) = e.args - try: - os.unlink(dst) - except OSError: - pass - raise DistutilsFileError( - "couldn't move '%s' to '%s' by copy/delete: " - "delete '%s' failed: %s" - % (src, dst, src, msg)) - return dst - - -def write_file (filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - f = open(filename, "w") - try: - for line in contents: - f.write(line + "\n") - finally: - f.close() diff --git a/Lib/distutils/filelist.py b/Lib/distutils/filelist.py deleted file mode 100644 index c92d5fdba393bb4..000000000000000 --- a/Lib/distutils/filelist.py +++ /dev/null @@ -1,327 +0,0 @@ -"""distutils.filelist - -Provides the FileList class, used for poking about the filesystem -and building lists of files. -""" - -import os, re -import fnmatch -import functools -from distutils.util import convert_path -from distutils.errors import DistutilsTemplateError, DistutilsInternalError -from distutils import log - -class FileList: - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - - Instance attributes: - dir - directory from which files will be taken -- only used if - 'allfiles' not supplied to constructor - files - list of filenames currently being built/filtered/manipulated - allfiles - complete list of files under consideration (ie. without any - filtering applied) - """ - - def __init__(self, warn=None, debug_print=None): - # ignore argument to FileList, but keep them for backwards - # compatibility - self.allfiles = None - self.files = [] - - def set_allfiles(self, allfiles): - self.allfiles = allfiles - - def findall(self, dir=os.curdir): - self.allfiles = findall(dir) - - def debug_print(self, msg): - """Print 'msg' to stdout if the global DEBUG (taken from the - DISTUTILS_DEBUG environment variable) flag is true. - """ - from distutils.debug import DEBUG - if DEBUG: - print(msg) - - # -- List-like methods --------------------------------------------- - - def append(self, item): - self.files.append(item) - - def extend(self, items): - self.files.extend(items) - - def sort(self): - # Not a strict lexical sort! - sortable_files = sorted(map(os.path.split, self.files)) - self.files = [] - for sort_tuple in sortable_files: - self.files.append(os.path.join(*sort_tuple)) - - - # -- Other miscellaneous utility methods --------------------------- - - def remove_duplicates(self): - # Assumes list has been sorted! - for i in range(len(self.files) - 1, 0, -1): - if self.files[i] == self.files[i - 1]: - del self.files[i] - - - # -- "File template" methods --------------------------------------- - - def _parse_template_line(self, line): - words = line.split() - action = words[0] - - patterns = dir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistutilsTemplateError( - "'%s' expects ..." % action) - patterns = [convert_path(w) for w in words[1:]] - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistutilsTemplateError( - "'%s' expects ..." % action) - dir = convert_path(words[1]) - patterns = [convert_path(w) for w in words[2:]] - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistutilsTemplateError( - "'%s' expects a single " % action) - dir_pattern = convert_path(words[1]) - else: - raise DistutilsTemplateError("unknown action '%s'" % action) - - return (action, patterns, dir, dir_pattern) - - def process_template_line(self, line): - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dir_pattern). - (action, patterns, dir, dir_pattern) = self._parse_template_line(line) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - self.debug_print("include " + ' '.join(patterns)) - for pattern in patterns: - if not self.include_pattern(pattern, anchor=1): - log.warn("warning: no files found matching '%s'", - pattern) - - elif action == 'exclude': - self.debug_print("exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.exclude_pattern(pattern, anchor=1): - log.warn(("warning: no previously-included files " - "found matching '%s'"), pattern) - - elif action == 'global-include': - self.debug_print("global-include " + ' '.join(patterns)) - for pattern in patterns: - if not self.include_pattern(pattern, anchor=0): - log.warn(("warning: no files found matching '%s' " - "anywhere in distribution"), pattern) - - elif action == 'global-exclude': - self.debug_print("global-exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.exclude_pattern(pattern, anchor=0): - log.warn(("warning: no previously-included files matching " - "'%s' found anywhere in distribution"), - pattern) - - elif action == 'recursive-include': - self.debug_print("recursive-include %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.include_pattern(pattern, prefix=dir): - log.warn(("warning: no files found matching '%s' " - "under directory '%s'"), - pattern, dir) - - elif action == 'recursive-exclude': - self.debug_print("recursive-exclude %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.exclude_pattern(pattern, prefix=dir): - log.warn(("warning: no previously-included files matching " - "'%s' found under directory '%s'"), - pattern, dir) - - elif action == 'graft': - self.debug_print("graft " + dir_pattern) - if not self.include_pattern(None, prefix=dir_pattern): - log.warn("warning: no directories found matching '%s'", - dir_pattern) - - elif action == 'prune': - self.debug_print("prune " + dir_pattern) - if not self.exclude_pattern(None, prefix=dir_pattern): - log.warn(("no previously-included directories found " - "matching '%s'"), dir_pattern) - else: - raise DistutilsInternalError( - "this cannot happen: invalid action '%s'" % action) - - - # -- Filtering/selection methods ----------------------------------- - - def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. Patterns - are not quite the same as implemented by the 'fnmatch' module: '*' - and '?' match non-special characters, where "special" is platform- - dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found, False otherwise. - """ - # XXX docstring lying about what the special chars are? - files_found = False - pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("include_pattern: applying regex r'%s'" % - pattern_re.pattern) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.debug_print(" adding " + name) - self.files.append(name) - files_found = True - return files_found - - - def exclude_pattern (self, pattern, - anchor=1, prefix=None, is_regex=0): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. Other parameters are the same as for - 'include_pattern()', above. - The list 'self.files' is modified in place. - Return True if files are found, False otherwise. - """ - files_found = False - pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("exclude_pattern: applying regex r'%s'" % - pattern_re.pattern) - for i in range(len(self.files)-1, -1, -1): - if pattern_re.search(self.files[i]): - self.debug_print(" removing " + self.files[i]) - del self.files[i] - files_found = True - return files_found - - -# ---------------------------------------------------------------------- -# Utility functions - -def _find_all_simple(path): - """ - Find all files under 'path' - """ - results = ( - os.path.join(base, file) - for base, dirs, files in os.walk(path, followlinks=True) - for file in files - ) - return filter(os.path.isfile, results) - - -def findall(dir=os.curdir): - """ - Find all files under 'dir' and return the list of full filenames. - Unless dir is '.', return full filenames with dir prepended. - """ - files = _find_all_simple(dir) - if dir == os.curdir: - make_rel = functools.partial(os.path.relpath, start=dir) - files = map(make_rel, files) - return list(files) - - -def glob_to_re(pattern): - """Translate a shell-like glob pattern to a regular expression; return - a string containing the regex. Differs from 'fnmatch.translate()' in - that '*' does not match "special characters" (which are - platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((? 2**32) -if NATIVE_WIN64: - # Visual C++ is a 32-bit application, so we need to look in - # the corresponding registry branch, if we're running a - # 64-bit Python on Win64 - VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f" - WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows" - NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework" -else: - VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f" - WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows" - NET_BASE = r"Software\Microsoft\.NETFramework" - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is -# the param to cross-compile on x86 targeting amd64.) -PLAT_TO_VCVARS = { - 'win32' : 'x86', - 'win-amd64' : 'amd64', -} - -class Reg: - """Helper class to read values from the registry - """ - - def get_value(cls, path, key): - for base in HKEYS: - d = cls.read_values(base, path) - if d and key in d: - return d[key] - raise KeyError(key) - get_value = classmethod(get_value) - - def read_keys(cls, base, key): - """Return list of registry keys.""" - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - L = [] - i = 0 - while True: - try: - k = RegEnumKey(handle, i) - except RegError: - break - L.append(k) - i += 1 - return L - read_keys = classmethod(read_keys) - - def read_values(cls, base, key): - """Return dict of registry keys and values. - - All names are converted to lowercase. - """ - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - d = {} - i = 0 - while True: - try: - name, value, type = RegEnumValue(handle, i) - except RegError: - break - name = name.lower() - d[cls.convert_mbcs(name)] = cls.convert_mbcs(value) - i += 1 - return d - read_values = classmethod(read_values) - - def convert_mbcs(s): - dec = getattr(s, "decode", None) - if dec is not None: - try: - s = dec("mbcs") - except UnicodeError: - pass - return s - convert_mbcs = staticmethod(convert_mbcs) - -class MacroExpander: - - def __init__(self, version): - self.macros = {} - self.vsbase = VS_BASE % version - self.load_macros(version) - - def set_macro(self, macro, path, key): - self.macros["$(%s)" % macro] = Reg.get_value(path, key) - - def load_macros(self, version): - self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir") - self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir") - self.set_macro("FrameworkDir", NET_BASE, "installroot") - try: - if version >= 8.0: - self.set_macro("FrameworkSDKDir", NET_BASE, - "sdkinstallrootv2.0") - else: - raise KeyError("sdkinstallrootv2.0") - except KeyError: - raise DistutilsPlatformError( - """Python was built with Visual Studio 2008; -extensions must be built with a compiler than can generate compatible binaries. -Visual Studio 2008 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") - - if version >= 9.0: - self.set_macro("FrameworkVersion", self.vsbase, "clr version") - self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder") - else: - p = r"Software\Microsoft\NET Framework Setup\Product" - for base in HKEYS: - try: - h = RegOpenKeyEx(base, p) - except RegError: - continue - key = RegEnumKey(h, 0) - d = Reg.get_value(base, r"%s\%s" % (p, key)) - self.macros["$(FrameworkVersion)"] = d["version"] - - def sub(self, s): - for k, v in self.macros.items(): - s = s.replace(k, v) - return s - -def get_build_version(): - """Return the version of MSVC that was used to build Python. - - For Python 2.3 and up, the version number is included in - sys.version. For earlier versions, assume the compiler is MSVC 6. - """ - prefix = "MSC v." - i = sys.version.find(prefix) - if i == -1: - return 6 - i = i + len(prefix) - s, rest = sys.version[i:].split(" ", 1) - majorVersion = int(s[:-2]) - 6 - if majorVersion >= 13: - # v13 was skipped and should be v14 - majorVersion += 1 - minorVersion = int(s[2:3]) / 10.0 - # I don't think paths are affected by minor version in version 6 - if majorVersion == 6: - minorVersion = 0 - if majorVersion >= 6: - return majorVersion + minorVersion - # else we don't know what version of the compiler this is - return None - -def normalize_and_reduce_paths(paths): - """Return a list of normalized paths with duplicates removed. - - The current order of paths is maintained. - """ - # Paths are normalized so things like: /a and /a/ aren't both preserved. - reduced_paths = [] - for p in paths: - np = os.path.normpath(p) - # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. - if np not in reduced_paths: - reduced_paths.append(np) - return reduced_paths - -def removeDuplicates(variable): - """Remove duplicate values of an environment variable. - """ - oldList = variable.split(os.pathsep) - newList = [] - for i in oldList: - if i not in newList: - newList.append(i) - newVariable = os.pathsep.join(newList) - return newVariable - -def find_vcvarsall(version): - """Find the vcvarsall.bat file - - At first it tries to find the productdir of VS 2008 in the registry. If - that fails it falls back to the VS90COMNTOOLS env var. - """ - vsbase = VS_BASE % version - try: - productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, - "productdir") - except KeyError: - log.debug("Unable to find productdir in registry") - productdir = None - - if not productdir or not os.path.isdir(productdir): - toolskey = "VS%0.f0COMNTOOLS" % version - toolsdir = os.environ.get(toolskey, None) - - if toolsdir and os.path.isdir(toolsdir): - productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") - productdir = os.path.abspath(productdir) - if not os.path.isdir(productdir): - log.debug("%s is not a valid directory" % productdir) - return None - else: - log.debug("Env var %s is not set or invalid" % toolskey) - if not productdir: - log.debug("No productdir found") - return None - vcvarsall = os.path.join(productdir, "vcvarsall.bat") - if os.path.isfile(vcvarsall): - return vcvarsall - log.debug("Unable to find vcvarsall.bat") - return None - -def query_vcvarsall(version, arch="x86"): - """Launch vcvarsall.bat and read the settings from its environment - """ - vcvarsall = find_vcvarsall(version) - interesting = {"include", "lib", "libpath", "path"} - result = {} - - if vcvarsall is None: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version) - popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - try: - stdout, stderr = popen.communicate() - if popen.wait() != 0: - raise DistutilsPlatformError(stderr.decode("mbcs")) - - stdout = stdout.decode("mbcs") - for line in stdout.split("\n"): - line = Reg.convert_mbcs(line) - if '=' not in line: - continue - line = line.strip() - key, value = line.split('=', 1) - key = key.lower() - if key in interesting: - if value.endswith(os.pathsep): - value = value[:-1] - result[key] = removeDuplicates(value) - - finally: - popen.stdout.close() - popen.stderr.close() - - if len(result) != len(interesting): - raise ValueError(str(list(result.keys()))) - - return result - -# More globals -VERSION = get_build_version() -if VERSION < 8.0: - raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION) -# MACROS = MacroExpander(VERSION) - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - CCompiler.__init__ (self, verbose, dry_run, force) - self.__version = VERSION - self.__root = r"Software\Microsoft\VisualStudio" - # self.__macros = MACROS - self.__paths = [] - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.__arch = None # deprecated name - self.initialized = False - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - ok_plats = 'win32', 'win-amd64' - if plat_name not in ok_plats: - raise DistutilsPlatformError("--plat-name must be one of %s" % - (ok_plats,)) - - if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): - # Assume that the SDK set up everything alright; don't try to be - # smarter - self.cc = "cl.exe" - self.linker = "link.exe" - self.lib = "lib.exe" - self.rc = "rc.exe" - self.mc = "mc.exe" - else: - # On x86, 'vcvars32.bat amd64' creates an env that doesn't work; - # to cross compile, you use 'x86_amd64'. - # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross - # compile use 'x86' (ie, it runs the x86 compiler directly) - if plat_name == get_platform() or plat_name == 'win32': - # native build or cross-compile to win32 - plat_spec = PLAT_TO_VCVARS[plat_name] - else: - # cross compile from win32 -> some 64bit - plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \ - PLAT_TO_VCVARS[plat_name] - - vc_env = query_vcvarsall(VERSION, plat_spec) - - self.__paths = vc_env['path'].split(os.pathsep) - os.environ['lib'] = vc_env['lib'] - os.environ['include'] = vc_env['include'] - - if len(self.__paths) == 0: - raise DistutilsPlatformError("Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." - % self.__product) - - self.cc = self.find_exe("cl.exe") - self.linker = self.find_exe("link.exe") - self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler - #self.set_path_env_var('lib') - #self.set_path_env_var('include') - - # extend the MSVC path with the current path - try: - for p in os.environ['path'].split(';'): - self.__paths.append(p) - except KeyError: - pass - self.__paths = normalize_and_reduce_paths(self.__paths) - os.environ['path'] = ";".join(self.__paths) - - self.preprocess_options = None - if self.__arch == "x86": - self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Z7', '/D_DEBUG'] - else: - # Win64 - self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', - '/Z7', '/D_DEBUG'] - - self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] - if self.__version >= 7: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' - ] - self.ldflags_static = [ '/nologo'] - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - # Copied from ccompiler.py, extended to return .res as 'object'-file - # for .rc input file - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - (base, ext) = os.path.splitext (src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError ("Don't know how to compile %s" % src_name) - if strip_dir: - base = os.path.basename (base) - if ext in self._rc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - elif ext in self._mc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append ('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + - [output_opt] + [input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc] + - ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext (os.path.basename (src)) - rc_file = os.path.join (rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc] + - ["/fo" + obj] + [rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile %s to %s" - % (src, obj)) - - output_opt = "/Fo" + obj - try: - self.spawn([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - (libraries, library_dirs, runtime_library_dirs) = fixed_args - - if runtime_library_dirs: - self.warn ("I don't know what to do with 'runtime_library_dirs': " - + str (runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - if target_desc == CCompiler.EXECUTABLE: - if debug: - ldflags = self.ldflags_shared_debug[1:] - else: - ldflags = self.ldflags_shared[1:] - else: - if debug: - ldflags = self.ldflags_shared_debug - else: - ldflags = self.ldflags_shared - - export_opts = [] - for sym in (export_symbols or []): - export_opts.append("/EXPORT:" + sym) - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - build_temp, - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - self.manifest_setup_ldargs(output_filename, build_temp, ld_args) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath(os.path.dirname(output_filename)) - try: - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - # embed the manifest - # XXX - this is somewhat fragile - if mt.exe fails, distutils - # will still consider the DLL up-to-date, but it will not have a - # manifest. Maybe we should link to a temp file? OTOH, that - # implies a build environment error that shouldn't go undetected. - mfinfo = self.manifest_get_embed_info(target_desc, ld_args) - if mfinfo is not None: - mffilename, mfid = mfinfo - out_arg = '-outputresource:%s;%s' % (output_filename, mfid) - try: - self.spawn(['mt.exe', '-nologo', '-manifest', - mffilename, out_arg]) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - # If we need a manifest at all, an embedded manifest is recommended. - # See MSDN article titled - # "How to: Embed a Manifest Inside a C/C++ Application" - # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx) - # Ask the linker to generate the manifest in the temp dir, so - # we can check it, and possibly embed it, later. - temp_manifest = os.path.join( - build_temp, - os.path.basename(output_filename) + ".manifest") - ld_args.append('/MANIFESTFILE:' + temp_manifest) - - def manifest_get_embed_info(self, target_desc, ld_args): - # If a manifest should be embedded, return a tuple of - # (manifest_filename, resource_id). Returns None if no manifest - # should be embedded. See http://bugs.python.org/issue7833 for why - # we want to avoid any manifest for extension modules if we can. - for arg in ld_args: - if arg.startswith("/MANIFESTFILE:"): - temp_manifest = arg.split(":", 1)[1] - break - else: - # no /MANIFESTFILE so nothing to do. - return None - if target_desc == CCompiler.EXECUTABLE: - # by default, executables always get the manifest with the - # CRT referenced. - mfid = 1 - else: - # Extension modules try and avoid any manifest if possible. - mfid = 2 - temp_manifest = self._remove_visual_c_ref(temp_manifest) - if temp_manifest is None: - return None - return temp_manifest, mfid - - def _remove_visual_c_ref(self, manifest_file): - try: - # Remove references to the Visual C runtime, so they will - # fall through to the Visual C dependency of Python.exe. - # This way, when installed for a restricted user (e.g. - # runtimes are not in WinSxS folder, but in Python's own - # folder), the runtimes do not need to be in every folder - # with .pyd's. - # Returns either the filename of the modified manifest or - # None if no manifest should be embedded. - manifest_f = open(manifest_file) - try: - manifest_buf = manifest_f.read() - finally: - manifest_f.close() - pattern = re.compile( - r"""|)""", - re.DOTALL) - manifest_buf = re.sub(pattern, "", manifest_buf) - pattern = r"\s*" - manifest_buf = re.sub(pattern, "", manifest_buf) - # Now see if any other assemblies are referenced - if not, we - # don't want a manifest embedded. - pattern = re.compile( - r"""|)""", re.DOTALL) - if re.search(pattern, manifest_buf) is None: - return None - - manifest_f = open(manifest_file, 'w') - try: - manifest_f.write(manifest_buf) - return manifest_file - finally: - manifest_f.close() - except OSError: - pass - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++") - - def library_option(self, lib): - return self.library_filename(lib) - - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename (name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # Helper methods for using the MSVC registry settings - - def find_exe(self, exe): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - for p in self.__paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - - # didn't find it; try existing path - for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p),exe) - if os.path.isfile(fn): - return fn - - return exe diff --git a/Lib/distutils/msvccompiler.py b/Lib/distutils/msvccompiler.py deleted file mode 100644 index f0d04fdb7f41786..000000000000000 --- a/Lib/distutils/msvccompiler.py +++ /dev/null @@ -1,642 +0,0 @@ -"""distutils.msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for the Microsoft Visual Studio. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) - -import sys, os -from distutils.errors import \ - DistutilsExecError, DistutilsPlatformError, \ - CompileError, LibError, LinkError -from distutils.ccompiler import \ - CCompiler, gen_lib_options -from distutils import log - -_can_read_reg = False -try: - import winreg - - _can_read_reg = True - hkey_mod = winreg - - RegOpenKeyEx = winreg.OpenKeyEx - RegEnumKey = winreg.EnumKey - RegEnumValue = winreg.EnumValue - RegError = winreg.error - -except ImportError: - try: - import win32api - import win32con - _can_read_reg = True - hkey_mod = win32con - - RegOpenKeyEx = win32api.RegOpenKeyEx - RegEnumKey = win32api.RegEnumKey - RegEnumValue = win32api.RegEnumValue - RegError = win32api.error - except ImportError: - log.info("Warning: Can't read registry to find the " - "necessary compiler setting\n" - "Make sure that Python modules winreg, " - "win32api or win32con are installed.") - -if _can_read_reg: - HKEYS = (hkey_mod.HKEY_USERS, - hkey_mod.HKEY_CURRENT_USER, - hkey_mod.HKEY_LOCAL_MACHINE, - hkey_mod.HKEY_CLASSES_ROOT) - -def read_keys(base, key): - """Return list of registry keys.""" - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - L = [] - i = 0 - while True: - try: - k = RegEnumKey(handle, i) - except RegError: - break - L.append(k) - i += 1 - return L - -def read_values(base, key): - """Return dict of registry keys and values. - - All names are converted to lowercase. - """ - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - d = {} - i = 0 - while True: - try: - name, value, type = RegEnumValue(handle, i) - except RegError: - break - name = name.lower() - d[convert_mbcs(name)] = convert_mbcs(value) - i += 1 - return d - -def convert_mbcs(s): - dec = getattr(s, "decode", None) - if dec is not None: - try: - s = dec("mbcs") - except UnicodeError: - pass - return s - -class MacroExpander: - def __init__(self, version): - self.macros = {} - self.load_macros(version) - - def set_macro(self, macro, path, key): - for base in HKEYS: - d = read_values(base, path) - if d: - self.macros["$(%s)" % macro] = d[key] - break - - def load_macros(self, version): - vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version - self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") - self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") - net = r"Software\Microsoft\.NETFramework" - self.set_macro("FrameworkDir", net, "installroot") - try: - if version > 7.0: - self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") - else: - self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") - except KeyError as exc: # - raise DistutilsPlatformError( - """Python was built with Visual Studio 2003; -extensions must be built with a compiler than can generate compatible binaries. -Visual Studio 2003 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") - - p = r"Software\Microsoft\NET Framework Setup\Product" - for base in HKEYS: - try: - h = RegOpenKeyEx(base, p) - except RegError: - continue - key = RegEnumKey(h, 0) - d = read_values(base, r"%s\%s" % (p, key)) - self.macros["$(FrameworkVersion)"] = d["version"] - - def sub(self, s): - for k, v in self.macros.items(): - s = s.replace(k, v) - return s - -def get_build_version(): - """Return the version of MSVC that was used to build Python. - - For Python 2.3 and up, the version number is included in - sys.version. For earlier versions, assume the compiler is MSVC 6. - """ - prefix = "MSC v." - i = sys.version.find(prefix) - if i == -1: - return 6 - i = i + len(prefix) - s, rest = sys.version[i:].split(" ", 1) - majorVersion = int(s[:-2]) - 6 - if majorVersion >= 13: - # v13 was skipped and should be v14 - majorVersion += 1 - minorVersion = int(s[2:3]) / 10.0 - # I don't think paths are affected by minor version in version 6 - if majorVersion == 6: - minorVersion = 0 - if majorVersion >= 6: - return majorVersion + minorVersion - # else we don't know what version of the compiler this is - return None - -def get_build_architecture(): - """Return the processor architecture. - - Possible results are "Intel" or "AMD64". - """ - - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return "Intel" - j = sys.version.find(")", i) - return sys.version[i+len(prefix):j] - -def normalize_and_reduce_paths(paths): - """Return a list of normalized paths with duplicates removed. - - The current order of paths is maintained. - """ - # Paths are normalized so things like: /a and /a/ aren't both preserved. - reduced_paths = [] - for p in paths: - np = os.path.normpath(p) - # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. - if np not in reduced_paths: - reduced_paths.append(np) - return reduced_paths - - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - CCompiler.__init__ (self, verbose, dry_run, force) - self.__version = get_build_version() - self.__arch = get_build_architecture() - if self.__arch == "Intel": - # x86 - if self.__version >= 7: - self.__root = r"Software\Microsoft\VisualStudio" - self.__macros = MacroExpander(self.__version) - else: - self.__root = r"Software\Microsoft\Devstudio" - self.__product = "Visual Studio version %s" % self.__version - else: - # Win64. Assume this was built with the platform SDK - self.__product = "Microsoft SDK compiler %s" % (self.__version + 6) - - self.initialized = False - - def initialize(self): - self.__paths = [] - if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): - # Assume that the SDK set up everything alright; don't try to be - # smarter - self.cc = "cl.exe" - self.linker = "link.exe" - self.lib = "lib.exe" - self.rc = "rc.exe" - self.mc = "mc.exe" - else: - self.__paths = self.get_msvc_paths("path") - - if len(self.__paths) == 0: - raise DistutilsPlatformError("Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." - % self.__product) - - self.cc = self.find_exe("cl.exe") - self.linker = self.find_exe("link.exe") - self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler - self.set_path_env_var('lib') - self.set_path_env_var('include') - - # extend the MSVC path with the current path - try: - for p in os.environ['path'].split(';'): - self.__paths.append(p) - except KeyError: - pass - self.__paths = normalize_and_reduce_paths(self.__paths) - os.environ['path'] = ";".join(self.__paths) - - self.preprocess_options = None - if self.__arch == "Intel": - self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', - '/Z7', '/D_DEBUG'] - else: - # Win64 - self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', - '/Z7', '/D_DEBUG'] - - self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] - if self.__version >= 7: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' - ] - else: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' - ] - self.ldflags_static = [ '/nologo'] - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - # Copied from ccompiler.py, extended to return .res as 'object'-file - # for .rc input file - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - (base, ext) = os.path.splitext (src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError ("Don't know how to compile %s" % src_name) - if strip_dir: - base = os.path.basename (base) - if ext in self._rc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - elif ext in self._mc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append ('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + - [output_opt] + [input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc] + - ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext (os.path.basename (src)) - rc_file = os.path.join (rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc] + - ["/fo" + obj] + [rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile %s to %s" - % (src, obj)) - - output_opt = "/Fo" + obj - try: - self.spawn([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - (libraries, library_dirs, runtime_library_dirs) = fixed_args - - if runtime_library_dirs: - self.warn ("I don't know what to do with 'runtime_library_dirs': " - + str (runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - if target_desc == CCompiler.EXECUTABLE: - if debug: - ldflags = self.ldflags_shared_debug[1:] - else: - ldflags = self.ldflags_shared[1:] - else: - if debug: - ldflags = self.ldflags_shared_debug - else: - ldflags = self.ldflags_shared - - export_opts = [] - for sym in (export_symbols or []): - export_opts.append("/EXPORT:" + sym) - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - os.path.dirname(objects[0]), - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath(os.path.dirname(output_filename)) - try: - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++") - - def library_option(self, lib): - return self.library_filename(lib) - - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename (name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # Helper methods for using the MSVC registry settings - - def find_exe(self, exe): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - for p in self.__paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - - # didn't find it; try existing path - for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p),exe) - if os.path.isfile(fn): - return fn - - return exe - - def get_msvc_paths(self, path, platform='x86'): - """Get a list of devstudio directories (include, lib or path). - - Return a list of strings. The list will be empty if unable to - access the registry or appropriate registry keys not found. - """ - if not _can_read_reg: - return [] - - path = path + " dirs" - if self.__version >= 7: - key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" - % (self.__root, self.__version)) - else: - key = (r"%s\6.0\Build System\Components\Platforms" - r"\Win32 (%s)\Directories" % (self.__root, platform)) - - for base in HKEYS: - d = read_values(base, key) - if d: - if self.__version >= 7: - return self.__macros.sub(d[path]).split(";") - else: - return d[path].split(";") - # MSVC 6 seems to create the registry entries we need only when - # the GUI is run. - if self.__version == 6: - for base in HKEYS: - if read_values(base, r"%s\6.0" % self.__root) is not None: - self.warn("It seems you have Visual Studio 6 installed, " - "but the expected registry settings are not present.\n" - "You must at least run the Visual Studio GUI once " - "so that these entries are created.") - break - return [] - - def set_path_env_var(self, name): - """Set environment variable 'name' to an MSVC path type value. - - This is equivalent to a SET command prior to execution of spawned - commands. - """ - - if name == "lib": - p = self.get_msvc_paths("library") - else: - p = self.get_msvc_paths(name) - if p: - os.environ[name] = ';'.join(p) - - -if get_build_version() >= 8.0: - log.debug("Importing new compiler from distutils.msvc9compiler") - OldMSVCCompiler = MSVCCompiler - from distutils.msvc9compiler import MSVCCompiler - # get_build_architecture not really relevant now we support cross-compile - from distutils.msvc9compiler import MacroExpander diff --git a/Lib/distutils/spawn.py b/Lib/distutils/spawn.py deleted file mode 100644 index 31df3f7faca5523..000000000000000 --- a/Lib/distutils/spawn.py +++ /dev/null @@ -1,129 +0,0 @@ -"""distutils.spawn - -Provides the 'spawn()' function, a front-end to various platform- -specific functions for launching another program in a sub-process. -Also provides the 'find_executable()' to search the path for a given -executable name. -""" - -import sys -import os -import subprocess - -from distutils.errors import DistutilsPlatformError, DistutilsExecError -from distutils.debug import DEBUG -from distutils import log - - -if sys.platform == 'darwin': - _cfg_target = None - _cfg_target_split = None - - -def spawn(cmd, search_path=1, verbose=0, dry_run=0): - """Run another program, specified as a command list 'cmd', in a new process. - - 'cmd' is just the argument list for the new process, ie. - cmd[0] is the program to run and cmd[1:] are the rest of its arguments. - There is no way to run a program with a name different from that of its - executable. - - If 'search_path' is true (the default), the system's executable - search path will be used to find the program; otherwise, cmd[0] - must be the exact path to the executable. If 'dry_run' is true, - the command will not actually be run. - - Raise DistutilsExecError if running the program fails in any way; just - return on success. - """ - # cmd is documented as a list, but just in case some code passes a tuple - # in, protect our %-formatting code against horrible death - cmd = list(cmd) - - log.info(' '.join(cmd)) - if dry_run: - return - - if search_path: - executable = find_executable(cmd[0]) - if executable is not None: - cmd[0] = executable - - env = None - if sys.platform == 'darwin': - global _cfg_target, _cfg_target_split - if _cfg_target is None: - from distutils import sysconfig - _cfg_target = sysconfig.get_config_var( - 'MACOSX_DEPLOYMENT_TARGET') or '' - if _cfg_target: - _cfg_target_split = [int(x) for x in _cfg_target.split('.')] - if _cfg_target: - # Ensure that the deployment target of the build process is not - # less than 10.3 if the interpreter was built for 10.3 or later. - # This ensures extension modules are built with correct - # compatibility values, specifically LDSHARED which can use - # '-undefined dynamic_lookup' which only works on >= 10.3. - cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target) - cur_target_split = [int(x) for x in cur_target.split('.')] - if _cfg_target_split[:2] >= [10, 3] and cur_target_split[:2] < [10, 3]: - my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: ' - 'now "%s" but "%s" during configure;' - 'must use 10.3 or later' - % (cur_target, _cfg_target)) - raise DistutilsPlatformError(my_msg) - env = dict(os.environ, - MACOSX_DEPLOYMENT_TARGET=cur_target) - - try: - proc = subprocess.Popen(cmd, env=env) - proc.wait() - exitcode = proc.returncode - except OSError as exc: - if not DEBUG: - cmd = cmd[0] - raise DistutilsExecError( - "command %r failed: %s" % (cmd, exc.args[-1])) from exc - - if exitcode: - if not DEBUG: - cmd = cmd[0] - raise DistutilsExecError( - "command %r failed with exit code %s" % (cmd, exitcode)) - - -def find_executable(executable, path=None): - """Tries to find 'executable' in the directories listed in 'path'. - - A string listing directories separated by 'os.pathsep'; defaults to - os.environ['PATH']. Returns the complete filename or None if not found. - """ - _, ext = os.path.splitext(executable) - if (sys.platform == 'win32') and (ext != '.exe'): - executable = executable + '.exe' - - if os.path.isfile(executable): - return executable - - if path is None: - path = os.environ.get('PATH', None) - if path is None: - try: - path = os.confstr("CS_PATH") - except (AttributeError, ValueError): - # os.confstr() or CS_PATH is not available - path = os.defpath - # bpo-35755: Don't use os.defpath if the PATH environment variable is - # set to an empty string - - # PATH='' doesn't match, whereas PATH=':' looks in the current directory - if not path: - return None - - paths = path.split(os.pathsep) - for p in paths: - f = os.path.join(p, executable) - if os.path.isfile(f): - # the file exists, we have a shot at spawn working - return f - return None diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py deleted file mode 100644 index 8eada987c138197..000000000000000 --- a/Lib/distutils/sysconfig.py +++ /dev/null @@ -1,345 +0,0 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -import os -import re -import sys -import warnings - -from functools import partial - -from .errors import DistutilsPlatformError - -from sysconfig import ( - _PREFIX as PREFIX, - _BASE_PREFIX as BASE_PREFIX, - _EXEC_PREFIX as EXEC_PREFIX, - _BASE_EXEC_PREFIX as BASE_EXEC_PREFIX, - _PROJECT_BASE as project_base, - _PYTHON_BUILD as python_build, - _init_posix as sysconfig_init_posix, - parse_config_h as sysconfig_parse_config_h, - - _init_non_posix, - - _variable_rx, - _findvar1_rx, - _findvar2_rx, - - expand_makefile_vars, - is_python_build, - get_config_h_filename, - get_config_var, - get_config_vars, - get_makefile_filename, - get_python_version, -) - -# This is better than -# from sysconfig import _CONFIG_VARS as _config_vars -# because it makes sure that the global dictionary is initialized -# which might not be true in the time of import. -_config_vars = get_config_vars() - -warnings.warn( - 'The distutils.sysconfig module is deprecated, use sysconfig instead', - DeprecationWarning, - stacklevel=2 -) - - -# Following functions are the same as in sysconfig but with different API -def parse_config_h(fp, g=None): - return sysconfig_parse_config_h(fp, vars=g) - - -_python_build = partial(is_python_build, check_home=True) -_init_posix = partial(sysconfig_init_posix, _config_vars) -_init_nt = partial(_init_non_posix, _config_vars) - - -# Similar function is also implemented in sysconfig as _parse_makefile -# but without the parsing capabilities of distutils.text_file.TextFile. -def parse_makefile(fn, g=None): - """Parse a Makefile-style file. - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - from distutils.text_file import TextFile - fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") - - if g is None: - g = {} - done = {} - notdone = {} - - while True: - line = fp.readline() - if line is None: # eof - break - m = re.match(_variable_rx, line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - # do variable interpolation here - while notdone: - for name in list(notdone): - value = notdone[name] - m = re.search(_findvar1_rx, value) or re.search(_findvar2_rx, value) - if m: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if name.startswith('PY_') and name[3:] in renamed_variables: - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - else: - done[n] = item = "" - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - del notdone[name] - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - else: - # bogus variable reference; just drop it since we can't deal - del notdone[name] - - fp.close() - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - g.update(done) - return g - - -# Following functions are deprecated together with this module and they -# have no direct replacement - -# Calculate the build qualifier flags if they are defined. Adding the flags -# to the include and lib directories only makes sense for an installation, not -# an in-source build. -build_flags = '' -try: - if not python_build: - build_flags = sys.abiflags -except AttributeError: - # It's not a configure-based build, so the sys module doesn't have - # this attribute, which is fine. - pass - - -def customize_compiler(compiler): - """Do any platform-specific customization of a CCompiler instance. - - Mainly needed on Unix, so we can plug in the information that - varies across Unices and is stored in Python's Makefile. - """ - if compiler.compiler_type == "unix": - if sys.platform == "darwin": - # Perform first-time customization of compiler-related - # config vars on OS X now that we know we need a compiler. - # This is primarily to support Pythons from binary - # installers. The kind and paths to build tools on - # the user system may vary significantly from the system - # that Python itself was built on. Also the user OS - # version and build tools may not support the same set - # of CPU architectures for universal builds. - if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'): - import _osx_support - _osx_support.customize_compiler(_config_vars) - _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - - (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ - get_config_vars('CC', 'CXX', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') - - if 'CC' in os.environ: - newcc = os.environ['CC'] - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - ldshared = newcc + ldshared[len(cc):] - cc = newcc - if 'CXX' in os.environ: - cxx = os.environ['CXX'] - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = cflags + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - if 'AR' in os.environ: - ar = os.environ['AR'] - if 'ARFLAGS' in os.environ: - archiver = ar + ' ' + os.environ['ARFLAGS'] - else: - archiver = ar + ' ' + ar_flags - - cc_cmd = cc + ' ' + cflags - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - compiler_cxx=cxx, - linker_so=ldshared, - linker_exe=cc, - archiver=archiver) - - compiler.shared_lib_extension = shlib_suffix - - -def get_python_inc(plat_specific=0, prefix=None): - """Return the directory containing installed Python header files. - - If 'plat_specific' is false (the default), this is the path to the - non-platform-specific header files, i.e. Python.h and so on; - otherwise, this is the path to platform-specific header files - (namely pyconfig.h). - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - if os.name == "posix": - if python_build: - # Assume the executable is in the build directory. The - # pyconfig.h file should be in the same directory. Since - # the build directory may not be the source directory, we - # must use "srcdir" from the makefile to find the "Include" - # directory. - if plat_specific: - return project_base - else: - incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) - python_dir = 'python' + get_python_version() + build_flags - return os.path.join(prefix, "include", python_dir) - elif os.name == "nt": - if python_build: - # Include both the include and PC dir to ensure we can find - # pyconfig.h - return (os.path.join(prefix, "include") + os.path.pathsep + - os.path.join(prefix, "PC")) - return os.path.join(prefix, "include") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its C header files " - "on platform '%s'" % os.name) - - -def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): - """Return the directory containing the Python library (standard or - site additions). - - If 'plat_specific' is true, return the directory containing - platform-specific modules, i.e. any module from a non-pure-Python - module distribution; otherwise, return the platform-shared library - directory. If 'standard_lib' is true, return the directory - containing standard Python library modules; otherwise, return the - directory for site-specific modules. - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - if standard_lib: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - else: - prefix = plat_specific and EXEC_PREFIX or PREFIX - - if os.name == "posix": - if plat_specific or standard_lib: - # Platform-specific modules (any module from a non-pure-Python - # module distribution) or standard Python library modules. - libdir = sys.platlibdir - else: - # Pure Python - libdir = "lib" - libpython = os.path.join(prefix, libdir, - "python" + get_python_version()) - if standard_lib: - return libpython - else: - return os.path.join(libpython, "site-packages") - elif os.name == "nt": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - return os.path.join(prefix, "Lib", "site-packages") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its library " - "on platform '%s'" % os.name) diff --git a/Lib/distutils/tests/Setup.sample b/Lib/distutils/tests/Setup.sample deleted file mode 100644 index 36c4290d8ff4822..000000000000000 --- a/Lib/distutils/tests/Setup.sample +++ /dev/null @@ -1,67 +0,0 @@ -# Setup file from the pygame project - -#--StartConfig -SDL = -I/usr/include/SDL -D_REENTRANT -lSDL -FONT = -lSDL_ttf -IMAGE = -lSDL_image -MIXER = -lSDL_mixer -SMPEG = -lsmpeg -PNG = -lpng -JPEG = -ljpeg -SCRAP = -lX11 -PORTMIDI = -lportmidi -PORTTIME = -lporttime -#--EndConfig - -#DEBUG = -C-W -C-Wall -DEBUG = - -#the following modules are optional. you will want to compile -#everything you can, but you can ignore ones you don't have -#dependencies for, just comment them out - -imageext src/imageext.c $(SDL) $(IMAGE) $(PNG) $(JPEG) $(DEBUG) -font src/font.c $(SDL) $(FONT) $(DEBUG) -mixer src/mixer.c $(SDL) $(MIXER) $(DEBUG) -mixer_music src/music.c $(SDL) $(MIXER) $(DEBUG) -_numericsurfarray src/_numericsurfarray.c $(SDL) $(DEBUG) -_numericsndarray src/_numericsndarray.c $(SDL) $(MIXER) $(DEBUG) -movie src/movie.c $(SDL) $(SMPEG) $(DEBUG) -scrap src/scrap.c $(SDL) $(SCRAP) $(DEBUG) -_camera src/_camera.c src/camera_v4l2.c src/camera_v4l.c $(SDL) $(DEBUG) -pypm src/pypm.c $(SDL) $(PORTMIDI) $(PORTTIME) $(DEBUG) - -GFX = src/SDL_gfx/SDL_gfxPrimitives.c -#GFX = src/SDL_gfx/SDL_gfxBlitFunc.c src/SDL_gfx/SDL_gfxPrimitives.c -gfxdraw src/gfxdraw.c $(SDL) $(GFX) $(DEBUG) - - - -#these modules are required for pygame to run. they only require -#SDL as a dependency. these should not be altered - -base src/base.c $(SDL) $(DEBUG) -cdrom src/cdrom.c $(SDL) $(DEBUG) -color src/color.c $(SDL) $(DEBUG) -constants src/constants.c $(SDL) $(DEBUG) -display src/display.c $(SDL) $(DEBUG) -event src/event.c $(SDL) $(DEBUG) -fastevent src/fastevent.c src/fastevents.c $(SDL) $(DEBUG) -key src/key.c $(SDL) $(DEBUG) -mouse src/mouse.c $(SDL) $(DEBUG) -rect src/rect.c $(SDL) $(DEBUG) -rwobject src/rwobject.c $(SDL) $(DEBUG) -surface src/surface.c src/alphablit.c src/surface_fill.c $(SDL) $(DEBUG) -surflock src/surflock.c $(SDL) $(DEBUG) -time src/time.c $(SDL) $(DEBUG) -joystick src/joystick.c $(SDL) $(DEBUG) -draw src/draw.c $(SDL) $(DEBUG) -image src/image.c $(SDL) $(DEBUG) -overlay src/overlay.c $(SDL) $(DEBUG) -transform src/transform.c src/rotozoom.c src/scale2x.c src/scale_mmx.c $(SDL) $(DEBUG) -mask src/mask.c src/bitmask.c $(SDL) $(DEBUG) -bufferproxy src/bufferproxy.c $(SDL) $(DEBUG) -pixelarray src/pixelarray.c $(SDL) $(DEBUG) -_arraysurfarray src/_arraysurfarray.c $(SDL) $(DEBUG) - - diff --git a/Lib/distutils/tests/__init__.py b/Lib/distutils/tests/__init__.py deleted file mode 100644 index 16d011fd9ee6e74..000000000000000 --- a/Lib/distutils/tests/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Test suite for distutils. - -This test suite consists of a collection of test modules in the -distutils.tests package. Each test module has a name starting with -'test' and contains a function test_suite(). The function is expected -to return an initialized unittest.TestSuite instance. - -Tests for the command classes in the distutils.command package are -included in distutils.tests as well, instead of using a separate -distutils.command.tests package, since command identification is done -by import rather than matching pre-defined names. - -""" - -import os -import sys -import unittest -from test.support import run_unittest -from test.support.warnings_helper import save_restore_warnings_filters - - -here = os.path.dirname(__file__) or os.curdir - - -def test_suite(): - suite = unittest.TestSuite() - for fn in os.listdir(here): - if fn.startswith("test") and fn.endswith(".py"): - modname = "distutils.tests." + fn[:-3] - # bpo-40055: Save/restore warnings filters to leave them unchanged. - # Importing tests imports docutils which imports pkg_resources - # which adds a warnings filter. - with save_restore_warnings_filters(): - __import__(modname) - module = sys.modules[modname] - suite.addTest(module.test_suite()) - return suite - - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/includetest.rst b/Lib/distutils/tests/includetest.rst deleted file mode 100644 index d7b4ae38b09d868..000000000000000 --- a/Lib/distutils/tests/includetest.rst +++ /dev/null @@ -1 +0,0 @@ -This should be included. diff --git a/Lib/distutils/tests/support.py b/Lib/distutils/tests/support.py deleted file mode 100644 index 23b907b607efad9..000000000000000 --- a/Lib/distutils/tests/support.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Support code for distutils test cases.""" -import os -import sys -import shutil -import tempfile -import unittest -import sysconfig -from copy import deepcopy -from test.support import os_helper - -from distutils import log -from distutils.log import DEBUG, INFO, WARN, ERROR, FATAL -from distutils.core import Distribution - - -class LoggingSilencer(object): - - def setUp(self): - super().setUp() - self.threshold = log.set_threshold(log.FATAL) - # catching warnings - # when log will be replaced by logging - # we won't need such monkey-patch anymore - self._old_log = log.Log._log - log.Log._log = self._log - self.logs = [] - - def tearDown(self): - log.set_threshold(self.threshold) - log.Log._log = self._old_log - super().tearDown() - - def _log(self, level, msg, args): - if level not in (DEBUG, INFO, WARN, ERROR, FATAL): - raise ValueError('%s wrong log level' % str(level)) - if not isinstance(msg, str): - raise TypeError("msg should be str, not '%.200s'" - % (type(msg).__name__)) - self.logs.append((level, msg, args)) - - def get_logs(self, *levels): - return [msg % args for level, msg, args - in self.logs if level in levels] - - def clear_logs(self): - self.logs = [] - - -class TempdirManager(object): - """Mix-in class that handles temporary directories for test cases. - - This is intended to be used with unittest.TestCase. - """ - - def setUp(self): - super().setUp() - self.old_cwd = os.getcwd() - self.tempdirs = [] - - def tearDown(self): - # Restore working dir, for Solaris and derivatives, where rmdir() - # on the current directory fails. - os.chdir(self.old_cwd) - super().tearDown() - while self.tempdirs: - tmpdir = self.tempdirs.pop() - os_helper.rmtree(tmpdir) - - def mkdtemp(self): - """Create a temporary directory that will be cleaned up. - - Returns the path of the directory. - """ - d = tempfile.mkdtemp() - self.tempdirs.append(d) - return d - - def write_file(self, path, content='xxx'): - """Writes a file in the given path. - - - path can be a string or a sequence. - """ - if isinstance(path, (list, tuple)): - path = os.path.join(*path) - f = open(path, 'w') - try: - f.write(content) - finally: - f.close() - - def create_dist(self, pkg_name='foo', **kw): - """Will generate a test environment. - - This function creates: - - a Distribution instance using keywords - - a temporary directory with a package structure - - It returns the package directory and the distribution - instance. - """ - tmp_dir = self.mkdtemp() - pkg_dir = os.path.join(tmp_dir, pkg_name) - os.mkdir(pkg_dir) - dist = Distribution(attrs=kw) - - return pkg_dir, dist - - -class DummyCommand: - """Class to store options for retrieval via set_undefined_options().""" - - def __init__(self, **kwargs): - for kw, val in kwargs.items(): - setattr(self, kw, val) - - def ensure_finalized(self): - pass - - -class EnvironGuard(object): - - def setUp(self): - super(EnvironGuard, self).setUp() - self.old_environ = deepcopy(os.environ) - - def tearDown(self): - for key, value in self.old_environ.items(): - if os.environ.get(key) != value: - os.environ[key] = value - - for key in tuple(os.environ.keys()): - if key not in self.old_environ: - del os.environ[key] - - super(EnvironGuard, self).tearDown() - - -def copy_xxmodule_c(directory): - """Helper for tests that need the xxmodule.c source file. - - Example use: - - def test_compile(self): - copy_xxmodule_c(self.tmpdir) - self.assertIn('xxmodule.c', os.listdir(self.tmpdir)) - - If the source file can be found, it will be copied to *directory*. If not, - the test will be skipped. Errors during copy are not caught. - """ - filename = _get_xxmodule_path() - if filename is None: - raise unittest.SkipTest('cannot find xxmodule.c (test must run in ' - 'the python build dir)') - shutil.copy(filename, directory) - - -def _get_xxmodule_path(): - srcdir = sysconfig.get_config_var('srcdir') - candidates = [ - # use installed copy if available - os.path.join(os.path.dirname(__file__), 'xxmodule.c'), - # otherwise try using copy from build directory - os.path.join(srcdir, 'Modules', 'xxmodule.c'), - # srcdir mysteriously can be $srcdir/Lib/distutils/tests when - # this file is run from its parent directory, so walk up the - # tree to find the real srcdir - os.path.join(srcdir, '..', '..', '..', 'Modules', 'xxmodule.c'), - ] - for path in candidates: - if os.path.exists(path): - return path - - -def fixup_build_ext(cmd): - """Function needed to make build_ext tests pass. - - When Python was built with --enable-shared on Unix, -L. is not enough to - find libpython.so, because regrtest runs in a tempdir, not in the - source directory where the .so lives. - - When Python was built with in debug mode on Windows, build_ext commands - need their debug attribute set, and it is not done automatically for - some reason. - - This function handles both of these things. Example use: - - cmd = build_ext(dist) - support.fixup_build_ext(cmd) - cmd.ensure_finalized() - - Unlike most other Unix platforms, Mac OS X embeds absolute paths - to shared libraries into executables, so the fixup is not needed there. - """ - if os.name == 'nt': - cmd.debug = sys.executable.endswith('_d.exe') - elif sysconfig.get_config_var('Py_ENABLE_SHARED'): - # To further add to the shared builds fun on Unix, we can't just add - # library_dirs to the Extension() instance because that doesn't get - # plumbed through to the final compiler command. - runshared = sysconfig.get_config_var('RUNSHARED') - if runshared is None: - cmd.library_dirs = ['.'] - else: - if sys.platform == 'darwin': - cmd.library_dirs = [] - else: - name, equals, value = runshared.partition('=') - cmd.library_dirs = [d for d in value.split(os.pathsep) if d] diff --git a/Lib/distutils/tests/test_archive_util.py b/Lib/distutils/tests/test_archive_util.py deleted file mode 100644 index 8aec84078ed48f9..000000000000000 --- a/Lib/distutils/tests/test_archive_util.py +++ /dev/null @@ -1,396 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tests for distutils.archive_util.""" -import unittest -import os -import sys -import tarfile -from os.path import splitdrive -import warnings - -from distutils import archive_util -from distutils.archive_util import (check_archive_formats, make_tarball, - make_zipfile, make_archive, - ARCHIVE_FORMATS) -from distutils.spawn import find_executable, spawn -from distutils.tests import support -from test.support import run_unittest, patch -from test.support.os_helper import change_cwd -from test.support.warnings_helper import check_warnings - -try: - import grp - import pwd - UID_GID_SUPPORT = True -except ImportError: - UID_GID_SUPPORT = False - -try: - import zipfile - ZIP_SUPPORT = True -except ImportError: - ZIP_SUPPORT = find_executable('zip') - -try: - import zlib - ZLIB_SUPPORT = True -except ImportError: - ZLIB_SUPPORT = False - -try: - import bz2 -except ImportError: - bz2 = None - -try: - import lzma -except ImportError: - lzma = None - -def can_fs_encode(filename): - """ - Return True if the filename can be saved in the file system. - """ - if os.path.supports_unicode_filenames: - return True - try: - filename.encode(sys.getfilesystemencoding()) - except UnicodeEncodeError: - return False - return True - - -class ArchiveUtilTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_make_tarball(self, name='archive'): - # creating something to tar - tmpdir = self._create_files() - self._make_tarball(tmpdir, name, '.tar.gz') - # trying an uncompressed one - self._make_tarball(tmpdir, name, '.tar', compress=None) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_make_tarball_gzip(self): - tmpdir = self._create_files() - self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip') - - @unittest.skipUnless(bz2, 'Need bz2 support to run') - def test_make_tarball_bzip2(self): - tmpdir = self._create_files() - self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2') - - @unittest.skipUnless(lzma, 'Need lzma support to run') - def test_make_tarball_xz(self): - tmpdir = self._create_files() - self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz') - - @unittest.skipUnless(can_fs_encode('Ã¥rchiv'), - 'File system cannot handle this filename') - def test_make_tarball_latin1(self): - """ - Mirror test_make_tarball, except filename contains latin characters. - """ - self.test_make_tarball('Ã¥rchiv') # note this isn't a real word - - @unittest.skipUnless(can_fs_encode('ã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–'), - 'File system cannot handle this filename') - def test_make_tarball_extended(self): - """ - Mirror test_make_tarball, except filename contains extended - characters outside the latin charset. - """ - self.test_make_tarball('ã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–') # japanese for archive - - def _make_tarball(self, tmpdir, target_name, suffix, **kwargs): - tmpdir2 = self.mkdtemp() - unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0], - "source and target should be on same drive") - - base_name = os.path.join(tmpdir2, target_name) - - # working with relative paths to avoid tar warnings - with change_cwd(tmpdir): - make_tarball(splitdrive(base_name)[1], 'dist', **kwargs) - - # check if the compressed tarball was created - tarball = base_name + suffix - self.assertTrue(os.path.exists(tarball)) - self.assertEqual(self._tarinfo(tarball), self._created_files) - - def _tarinfo(self, path): - tar = tarfile.open(path) - try: - names = tar.getnames() - names.sort() - return names - finally: - tar.close() - - _zip_created_files = ['dist/', 'dist/file1', 'dist/file2', - 'dist/sub/', 'dist/sub/file3', 'dist/sub2/'] - _created_files = [p.rstrip('/') for p in _zip_created_files] - - def _create_files(self): - # creating something to tar - tmpdir = self.mkdtemp() - dist = os.path.join(tmpdir, 'dist') - os.mkdir(dist) - self.write_file([dist, 'file1'], 'xxx') - self.write_file([dist, 'file2'], 'xxx') - os.mkdir(os.path.join(dist, 'sub')) - self.write_file([dist, 'sub', 'file3'], 'xxx') - os.mkdir(os.path.join(dist, 'sub2')) - return tmpdir - - @unittest.skipUnless(find_executable('tar') and find_executable('gzip') - and ZLIB_SUPPORT, - 'Need the tar, gzip and zlib command to run') - def test_tarfile_vs_tar(self): - tmpdir = self._create_files() - tmpdir2 = self.mkdtemp() - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - make_tarball(base_name, 'dist') - finally: - os.chdir(old_dir) - - # check if the compressed tarball was created - tarball = base_name + '.tar.gz' - self.assertTrue(os.path.exists(tarball)) - - # now create another tarball using `tar` - tarball2 = os.path.join(tmpdir, 'archive2.tar.gz') - tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist'] - gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar'] - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - spawn(tar_cmd) - spawn(gzip_cmd) - finally: - os.chdir(old_dir) - - self.assertTrue(os.path.exists(tarball2)) - # let's compare both tarballs - self.assertEqual(self._tarinfo(tarball), self._created_files) - self.assertEqual(self._tarinfo(tarball2), self._created_files) - - # trying an uncompressed one - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - make_tarball(base_name, 'dist', compress=None) - finally: - os.chdir(old_dir) - tarball = base_name + '.tar' - self.assertTrue(os.path.exists(tarball)) - - # now for a dry_run - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - make_tarball(base_name, 'dist', compress=None, dry_run=True) - finally: - os.chdir(old_dir) - tarball = base_name + '.tar' - self.assertTrue(os.path.exists(tarball)) - - @unittest.skipUnless(find_executable('compress'), - 'The compress program is required') - def test_compress_deprecated(self): - tmpdir = self._create_files() - base_name = os.path.join(self.mkdtemp(), 'archive') - - # using compress and testing the PendingDeprecationWarning - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - with check_warnings() as w: - warnings.simplefilter("always") - make_tarball(base_name, 'dist', compress='compress') - finally: - os.chdir(old_dir) - tarball = base_name + '.tar.Z' - self.assertTrue(os.path.exists(tarball)) - self.assertEqual(len(w.warnings), 1) - - # same test with dry_run - os.remove(tarball) - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - with check_warnings() as w: - warnings.simplefilter("always") - make_tarball(base_name, 'dist', compress='compress', - dry_run=True) - finally: - os.chdir(old_dir) - self.assertFalse(os.path.exists(tarball)) - self.assertEqual(len(w.warnings), 1) - - @unittest.skipUnless(ZIP_SUPPORT and ZLIB_SUPPORT, - 'Need zip and zlib support to run') - def test_make_zipfile(self): - # creating something to tar - tmpdir = self._create_files() - base_name = os.path.join(self.mkdtemp(), 'archive') - with change_cwd(tmpdir): - make_zipfile(base_name, 'dist') - - # check if the compressed tarball was created - tarball = base_name + '.zip' - self.assertTrue(os.path.exists(tarball)) - with zipfile.ZipFile(tarball) as zf: - self.assertEqual(sorted(zf.namelist()), self._zip_created_files) - - @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run') - def test_make_zipfile_no_zlib(self): - patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError - - called = [] - zipfile_class = zipfile.ZipFile - def fake_zipfile(*a, **kw): - if kw.get('compression', None) == zipfile.ZIP_STORED: - called.append((a, kw)) - return zipfile_class(*a, **kw) - - patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile) - - # create something to tar and compress - tmpdir = self._create_files() - base_name = os.path.join(self.mkdtemp(), 'archive') - with change_cwd(tmpdir): - make_zipfile(base_name, 'dist') - - tarball = base_name + '.zip' - self.assertEqual(called, - [((tarball, "w"), {'compression': zipfile.ZIP_STORED})]) - self.assertTrue(os.path.exists(tarball)) - with zipfile.ZipFile(tarball) as zf: - self.assertEqual(sorted(zf.namelist()), self._zip_created_files) - - def test_check_archive_formats(self): - self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']), - 'xxx') - self.assertIsNone(check_archive_formats(['gztar', 'bztar', 'xztar', - 'ztar', 'tar', 'zip'])) - - def test_make_archive(self): - tmpdir = self.mkdtemp() - base_name = os.path.join(tmpdir, 'archive') - self.assertRaises(ValueError, make_archive, base_name, 'xxx') - - def test_make_archive_cwd(self): - current_dir = os.getcwd() - def _breaks(*args, **kw): - raise RuntimeError() - ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file') - try: - try: - make_archive('xxx', 'xxx', root_dir=self.mkdtemp()) - except: - pass - self.assertEqual(os.getcwd(), current_dir) - finally: - del ARCHIVE_FORMATS['xxx'] - - def test_make_archive_tar(self): - base_dir = self._create_files() - base_name = os.path.join(self.mkdtemp() , 'archive') - res = make_archive(base_name, 'tar', base_dir, 'dist') - self.assertTrue(os.path.exists(res)) - self.assertEqual(os.path.basename(res), 'archive.tar') - self.assertEqual(self._tarinfo(res), self._created_files) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_make_archive_gztar(self): - base_dir = self._create_files() - base_name = os.path.join(self.mkdtemp() , 'archive') - res = make_archive(base_name, 'gztar', base_dir, 'dist') - self.assertTrue(os.path.exists(res)) - self.assertEqual(os.path.basename(res), 'archive.tar.gz') - self.assertEqual(self._tarinfo(res), self._created_files) - - @unittest.skipUnless(bz2, 'Need bz2 support to run') - def test_make_archive_bztar(self): - base_dir = self._create_files() - base_name = os.path.join(self.mkdtemp() , 'archive') - res = make_archive(base_name, 'bztar', base_dir, 'dist') - self.assertTrue(os.path.exists(res)) - self.assertEqual(os.path.basename(res), 'archive.tar.bz2') - self.assertEqual(self._tarinfo(res), self._created_files) - - @unittest.skipUnless(lzma, 'Need xz support to run') - def test_make_archive_xztar(self): - base_dir = self._create_files() - base_name = os.path.join(self.mkdtemp() , 'archive') - res = make_archive(base_name, 'xztar', base_dir, 'dist') - self.assertTrue(os.path.exists(res)) - self.assertEqual(os.path.basename(res), 'archive.tar.xz') - self.assertEqual(self._tarinfo(res), self._created_files) - - def test_make_archive_owner_group(self): - # testing make_archive with owner and group, with various combinations - # this works even if there's not gid/uid support - if UID_GID_SUPPORT: - group = grp.getgrgid(0)[0] - owner = pwd.getpwuid(0)[0] - else: - group = owner = 'root' - - base_dir = self._create_files() - root_dir = self.mkdtemp() - base_name = os.path.join(self.mkdtemp() , 'archive') - res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner, - group=group) - self.assertTrue(os.path.exists(res)) - - res = make_archive(base_name, 'zip', root_dir, base_dir) - self.assertTrue(os.path.exists(res)) - - res = make_archive(base_name, 'tar', root_dir, base_dir, - owner=owner, group=group) - self.assertTrue(os.path.exists(res)) - - res = make_archive(base_name, 'tar', root_dir, base_dir, - owner='kjhkjhkjg', group='oihohoh') - self.assertTrue(os.path.exists(res)) - - @unittest.skipUnless(ZLIB_SUPPORT, "Requires zlib") - @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support") - def test_tarfile_root_owner(self): - tmpdir = self._create_files() - base_name = os.path.join(self.mkdtemp(), 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - group = grp.getgrgid(0)[0] - owner = pwd.getpwuid(0)[0] - try: - archive_name = make_tarball(base_name, 'dist', compress=None, - owner=owner, group=group) - finally: - os.chdir(old_dir) - - # check if the compressed tarball was created - self.assertTrue(os.path.exists(archive_name)) - - # now checks the rights - archive = tarfile.open(archive_name) - try: - for member in archive.getmembers(): - self.assertEqual(member.uid, 0) - self.assertEqual(member.gid, 0) - finally: - archive.close() - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(ArchiveUtilTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_bdist.py b/Lib/distutils/tests/test_bdist.py deleted file mode 100644 index 5676f7f34d4292d..000000000000000 --- a/Lib/distutils/tests/test_bdist.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Tests for distutils.command.bdist.""" -import unittest -from test.support import run_unittest - -import warnings -with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - from distutils.command.bdist import bdist - from distutils.tests import support - - -class BuildTestCase(support.TempdirManager, - unittest.TestCase): - - def test_formats(self): - # let's create a command and make sure - # we can set the format - dist = self.create_dist()[1] - cmd = bdist(dist) - cmd.formats = ['tar'] - cmd.ensure_finalized() - self.assertEqual(cmd.formats, ['tar']) - - # what formats does bdist offer? - formats = ['bztar', 'gztar', 'rpm', 'tar', 'xztar', 'zip', 'ztar'] - found = sorted(cmd.format_command) - self.assertEqual(found, formats) - - def test_skip_build(self): - # bug #10946: bdist --skip-build should trickle down to subcommands - dist = self.create_dist()[1] - cmd = bdist(dist) - cmd.skip_build = 1 - cmd.ensure_finalized() - dist.command_obj['bdist'] = cmd - - for name in ['bdist_dumb']: # bdist_rpm does not support --skip-build - subcmd = cmd.get_finalized_command(name) - if getattr(subcmd, '_unsupported', False): - # command is not supported on this build - continue - self.assertTrue(subcmd.skip_build, - '%s should take --skip-build from bdist' % name) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildTestCase) - - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_bdist_dumb.py b/Lib/distutils/tests/test_bdist_dumb.py deleted file mode 100644 index bb860c8ac70345a..000000000000000 --- a/Lib/distutils/tests/test_bdist_dumb.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Tests for distutils.command.bdist_dumb.""" - -import os -import sys -import zipfile -import unittest -from test.support import run_unittest - -from distutils.core import Distribution -from distutils.command.bdist_dumb import bdist_dumb -from distutils.tests import support - -SETUP_PY = """\ -from distutils.core import setup -import foo - -setup(name='foo', version='0.1', py_modules=['foo'], - url='xxx', author='xxx', author_email='xxx') - -""" - -try: - import zlib - ZLIB_SUPPORT = True -except ImportError: - ZLIB_SUPPORT = False - - -class BuildDumbTestCase(support.TempdirManager, - support.LoggingSilencer, - support.EnvironGuard, - unittest.TestCase): - - def setUp(self): - super(BuildDumbTestCase, self).setUp() - self.old_location = os.getcwd() - self.old_sys_argv = sys.argv, sys.argv[:] - - def tearDown(self): - os.chdir(self.old_location) - sys.argv = self.old_sys_argv[0] - sys.argv[:] = self.old_sys_argv[1] - super(BuildDumbTestCase, self).tearDown() - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_simple_built(self): - - # let's create a simple package - tmp_dir = self.mkdtemp() - pkg_dir = os.path.join(tmp_dir, 'foo') - os.mkdir(pkg_dir) - self.write_file((pkg_dir, 'setup.py'), SETUP_PY) - self.write_file((pkg_dir, 'foo.py'), '#') - self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py') - self.write_file((pkg_dir, 'README'), '') - - dist = Distribution({'name': 'foo', 'version': '0.1', - 'py_modules': ['foo'], - 'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx'}) - dist.script_name = 'setup.py' - os.chdir(pkg_dir) - - sys.argv = ['setup.py'] - cmd = bdist_dumb(dist) - - # so the output is the same no matter - # what is the platform - cmd.format = 'zip' - - cmd.ensure_finalized() - cmd.run() - - # see what we have - dist_created = os.listdir(os.path.join(pkg_dir, 'dist')) - base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name) - - self.assertEqual(dist_created, [base]) - - # now let's check what we have in the zip file - fp = zipfile.ZipFile(os.path.join('dist', base)) - try: - contents = fp.namelist() - finally: - fp.close() - - contents = sorted(filter(None, map(os.path.basename, contents))) - wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] - if not sys.dont_write_bytecode: - wanted.append('foo.%s.pyc' % sys.implementation.cache_tag) - self.assertEqual(contents, sorted(wanted)) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildDumbTestCase) - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_bdist_rpm.py b/Lib/distutils/tests/test_bdist_rpm.py deleted file mode 100644 index 7eefa7b9cad84ff..000000000000000 --- a/Lib/distutils/tests/test_bdist_rpm.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Tests for distutils.command.bdist_rpm.""" - -import unittest -import sys -import os -from test.support import run_unittest, requires_zlib - -from distutils.core import Distribution -from distutils.command.bdist_rpm import bdist_rpm -from distutils.tests import support -from distutils.spawn import find_executable - -SETUP_PY = """\ -from distutils.core import setup -import foo - -setup(name='foo', version='0.1', py_modules=['foo'], - url='xxx', author='xxx', author_email='xxx') - -""" - -class BuildRpmTestCase(support.TempdirManager, - support.EnvironGuard, - support.LoggingSilencer, - unittest.TestCase): - - def setUp(self): - try: - sys.executable.encode("UTF-8") - except UnicodeEncodeError: - raise unittest.SkipTest("sys.executable is not encodable to UTF-8") - - super(BuildRpmTestCase, self).setUp() - self.old_location = os.getcwd() - self.old_sys_argv = sys.argv, sys.argv[:] - - def tearDown(self): - os.chdir(self.old_location) - sys.argv = self.old_sys_argv[0] - sys.argv[:] = self.old_sys_argv[1] - super(BuildRpmTestCase, self).tearDown() - - # XXX I am unable yet to make this test work without - # spurious sdtout/stderr output under Mac OS X - @unittest.skipUnless(sys.platform.startswith('linux'), - 'spurious sdtout/stderr output under Mac OS X') - @requires_zlib() - @unittest.skipIf(find_executable('rpm') is None, - 'the rpm command is not found') - @unittest.skipIf(find_executable('rpmbuild') is None, - 'the rpmbuild command is not found') - # import foo fails with safe path - @unittest.skipIf(sys.flags.safe_path, - 'PYTHONSAFEPATH changes default sys.path') - def test_quiet(self): - # let's create a package - tmp_dir = self.mkdtemp() - os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation - pkg_dir = os.path.join(tmp_dir, 'foo') - os.mkdir(pkg_dir) - self.write_file((pkg_dir, 'setup.py'), SETUP_PY) - self.write_file((pkg_dir, 'foo.py'), '#') - self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py') - self.write_file((pkg_dir, 'README'), '') - - dist = Distribution({'name': 'foo', 'version': '0.1', - 'py_modules': ['foo'], - 'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx'}) - dist.script_name = 'setup.py' - os.chdir(pkg_dir) - - sys.argv = ['setup.py'] - cmd = bdist_rpm(dist) - cmd.fix_python = True - - # running in quiet mode - cmd.quiet = 1 - cmd.ensure_finalized() - cmd.run() - - dist_created = os.listdir(os.path.join(pkg_dir, 'dist')) - self.assertIn('foo-0.1-1.noarch.rpm', dist_created) - - # bug #2945: upload ignores bdist_rpm files - self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files) - self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files) - - # XXX I am unable yet to make this test work without - # spurious sdtout/stderr output under Mac OS X - @unittest.skipUnless(sys.platform.startswith('linux'), - 'spurious sdtout/stderr output under Mac OS X') - @requires_zlib() - # http://bugs.python.org/issue1533164 - @unittest.skipIf(find_executable('rpm') is None, - 'the rpm command is not found') - @unittest.skipIf(find_executable('rpmbuild') is None, - 'the rpmbuild command is not found') - # import foo fails with safe path - @unittest.skipIf(sys.flags.safe_path, - 'PYTHONSAFEPATH changes default sys.path') - def test_no_optimize_flag(self): - # let's create a package that breaks bdist_rpm - tmp_dir = self.mkdtemp() - os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation - pkg_dir = os.path.join(tmp_dir, 'foo') - os.mkdir(pkg_dir) - self.write_file((pkg_dir, 'setup.py'), SETUP_PY) - self.write_file((pkg_dir, 'foo.py'), '#') - self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py') - self.write_file((pkg_dir, 'README'), '') - - dist = Distribution({'name': 'foo', 'version': '0.1', - 'py_modules': ['foo'], - 'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx'}) - dist.script_name = 'setup.py' - os.chdir(pkg_dir) - - sys.argv = ['setup.py'] - cmd = bdist_rpm(dist) - cmd.fix_python = True - - cmd.quiet = 1 - cmd.ensure_finalized() - cmd.run() - - dist_created = os.listdir(os.path.join(pkg_dir, 'dist')) - self.assertIn('foo-0.1-1.noarch.rpm', dist_created) - - # bug #2945: upload ignores bdist_rpm files - self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files) - self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files) - - os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm')) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildRpmTestCase) - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_build.py b/Lib/distutils/tests/test_build.py deleted file mode 100644 index 71b5e164bae14aa..000000000000000 --- a/Lib/distutils/tests/test_build.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Tests for distutils.command.build.""" -import unittest -import os -import sys -from test.support import run_unittest - -from distutils.command.build import build -from distutils.tests import support -from sysconfig import get_platform - -class BuildTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - @unittest.skipUnless(sys.executable, "test requires sys.executable") - def test_finalize_options(self): - pkg_dir, dist = self.create_dist() - cmd = build(dist) - cmd.finalize_options() - - # if not specified, plat_name gets the current platform - self.assertEqual(cmd.plat_name, get_platform()) - - # build_purelib is build + lib - wanted = os.path.join(cmd.build_base, 'lib') - self.assertEqual(cmd.build_purelib, wanted) - - # build_platlib is 'build/lib.platform-x.x[-pydebug]' - # examples: - # build/lib.macosx-10.3-i386-2.7 - plat_spec = '.%s-%d.%d' % (cmd.plat_name, *sys.version_info[:2]) - if hasattr(sys, 'gettotalrefcount'): - self.assertTrue(cmd.build_platlib.endswith('-pydebug')) - plat_spec += '-pydebug' - wanted = os.path.join(cmd.build_base, 'lib' + plat_spec) - self.assertEqual(cmd.build_platlib, wanted) - - # by default, build_lib = build_purelib - self.assertEqual(cmd.build_lib, cmd.build_purelib) - - # build_temp is build/temp. - wanted = os.path.join(cmd.build_base, 'temp' + plat_spec) - self.assertEqual(cmd.build_temp, wanted) - - # build_scripts is build/scripts-x.x - wanted = os.path.join(cmd.build_base, - 'scripts-%d.%d' % sys.version_info[:2]) - self.assertEqual(cmd.build_scripts, wanted) - - # executable is os.path.normpath(sys.executable) - self.assertEqual(cmd.executable, os.path.normpath(sys.executable)) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_build_clib.py b/Lib/distutils/tests/test_build_clib.py deleted file mode 100644 index 95f928288e00483..000000000000000 --- a/Lib/distutils/tests/test_build_clib.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Tests for distutils.command.build_clib.""" -import unittest -import os -import sys -import sysconfig - -from test.support import ( - run_unittest, missing_compiler_executable, requires_subprocess -) - -from distutils.command.build_clib import build_clib -from distutils.errors import DistutilsSetupError -from distutils.tests import support - -class BuildCLibTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - def setUp(self): - super().setUp() - self._backup_CONFIG_VARS = dict(sysconfig._CONFIG_VARS) - - def tearDown(self): - super().tearDown() - sysconfig._CONFIG_VARS.clear() - sysconfig._CONFIG_VARS.update(self._backup_CONFIG_VARS) - - def test_check_library_dist(self): - pkg_dir, dist = self.create_dist() - cmd = build_clib(dist) - - # 'libraries' option must be a list - self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo') - - # each element of 'libraries' must a 2-tuple - self.assertRaises(DistutilsSetupError, cmd.check_library_list, - ['foo1', 'foo2']) - - # first element of each tuple in 'libraries' - # must be a string (the library name) - self.assertRaises(DistutilsSetupError, cmd.check_library_list, - [(1, 'foo1'), ('name', 'foo2')]) - - # library name may not contain directory separators - self.assertRaises(DistutilsSetupError, cmd.check_library_list, - [('name', 'foo1'), - ('another/name', 'foo2')]) - - # second element of each tuple must be a dictionary (build info) - self.assertRaises(DistutilsSetupError, cmd.check_library_list, - [('name', {}), - ('another', 'foo2')]) - - # those work - libs = [('name', {}), ('name', {'ok': 'good'})] - cmd.check_library_list(libs) - - def test_get_source_files(self): - pkg_dir, dist = self.create_dist() - cmd = build_clib(dist) - - # "in 'libraries' option 'sources' must be present and must be - # a list of source filenames - cmd.libraries = [('name', {})] - self.assertRaises(DistutilsSetupError, cmd.get_source_files) - - cmd.libraries = [('name', {'sources': 1})] - self.assertRaises(DistutilsSetupError, cmd.get_source_files) - - cmd.libraries = [('name', {'sources': ['a', 'b']})] - self.assertEqual(cmd.get_source_files(), ['a', 'b']) - - cmd.libraries = [('name', {'sources': ('a', 'b')})] - self.assertEqual(cmd.get_source_files(), ['a', 'b']) - - cmd.libraries = [('name', {'sources': ('a', 'b')}), - ('name2', {'sources': ['c', 'd']})] - self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd']) - - def test_build_libraries(self): - - pkg_dir, dist = self.create_dist() - cmd = build_clib(dist) - class FakeCompiler: - def compile(*args, **kw): - pass - create_static_lib = compile - - cmd.compiler = FakeCompiler() - - # build_libraries is also doing a bit of typo checking - lib = [('name', {'sources': 'notvalid'})] - self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib) - - lib = [('name', {'sources': list()})] - cmd.build_libraries(lib) - - lib = [('name', {'sources': tuple()})] - cmd.build_libraries(lib) - - def test_finalize_options(self): - pkg_dir, dist = self.create_dist() - cmd = build_clib(dist) - - cmd.include_dirs = 'one-dir' - cmd.finalize_options() - self.assertEqual(cmd.include_dirs, ['one-dir']) - - cmd.include_dirs = None - cmd.finalize_options() - self.assertEqual(cmd.include_dirs, []) - - cmd.distribution.libraries = 'WONTWORK' - self.assertRaises(DistutilsSetupError, cmd.finalize_options) - - @unittest.skipIf(sys.platform == 'win32', "can't test on Windows") - @requires_subprocess() - def test_run(self): - pkg_dir, dist = self.create_dist() - cmd = build_clib(dist) - - foo_c = os.path.join(pkg_dir, 'foo.c') - self.write_file(foo_c, 'int main(void) { return 1;}\n') - cmd.libraries = [('foo', {'sources': [foo_c]})] - - build_temp = os.path.join(pkg_dir, 'build') - os.mkdir(build_temp) - cmd.build_temp = build_temp - cmd.build_clib = build_temp - - # Before we run the command, we want to make sure - # all commands are present on the system. - ccmd = missing_compiler_executable() - if ccmd is not None: - self.skipTest('The %r command is not found' % ccmd) - - # this should work - cmd.run() - - # let's check the result - self.assertIn('libfoo.a', os.listdir(build_temp)) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildCLibTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_build_ext.py b/Lib/distutils/tests/test_build_ext.py deleted file mode 100644 index 4ebeafecef03c34..000000000000000 --- a/Lib/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,555 +0,0 @@ -import sys -import os -from io import StringIO -import textwrap - -from distutils.core import Distribution -from distutils.command.build_ext import build_ext -from distutils import sysconfig -from distutils.tests.support import (TempdirManager, LoggingSilencer, - copy_xxmodule_c, fixup_build_ext) -from distutils.extension import Extension -from distutils.errors import ( - CompileError, DistutilsPlatformError, DistutilsSetupError, - UnknownFileError) - -import unittest -from test import support -from test.support import os_helper -from test.support.script_helper import assert_python_ok -from test.support import threading_helper - -# http://bugs.python.org/issue4373 -# Don't load the xx module more than once. -ALREADY_TESTED = False - - -class BuildExtTestCase(TempdirManager, - LoggingSilencer, - unittest.TestCase): - def setUp(self): - # Create a simple test environment - super(BuildExtTestCase, self).setUp() - self.tmp_dir = self.mkdtemp() - import site - self.old_user_base = site.USER_BASE - site.USER_BASE = self.mkdtemp() - from distutils.command import build_ext - build_ext.USER_BASE = site.USER_BASE - self.old_config_vars = dict(sysconfig._config_vars) - - # bpo-30132: On Windows, a .pdb file may be created in the current - # working directory. Create a temporary working directory to cleanup - # everything at the end of the test. - self.enterContext(os_helper.change_cwd(self.tmp_dir)) - - def tearDown(self): - import site - site.USER_BASE = self.old_user_base - from distutils.command import build_ext - build_ext.USER_BASE = self.old_user_base - sysconfig._config_vars.clear() - sysconfig._config_vars.update(self.old_config_vars) - super(BuildExtTestCase, self).tearDown() - - def build_ext(self, *args, **kwargs): - return build_ext(*args, **kwargs) - - @support.requires_subprocess() - def test_build_ext(self): - cmd = support.missing_compiler_executable() - if cmd is not None: - self.skipTest('The %r command is not found' % cmd) - global ALREADY_TESTED - copy_xxmodule_c(self.tmp_dir) - xx_c = os.path.join(self.tmp_dir, 'xxmodule.c') - xx_ext = Extension('xx', [xx_c]) - dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]}) - dist.package_dir = self.tmp_dir - cmd = self.build_ext(dist) - fixup_build_ext(cmd) - cmd.build_lib = self.tmp_dir - cmd.build_temp = self.tmp_dir - - old_stdout = sys.stdout - if not support.verbose: - # silence compiler output - sys.stdout = StringIO() - try: - cmd.ensure_finalized() - cmd.run() - finally: - sys.stdout = old_stdout - - if ALREADY_TESTED: - self.skipTest('Already tested in %s' % ALREADY_TESTED) - else: - ALREADY_TESTED = type(self).__name__ - - code = textwrap.dedent(f""" - tmp_dir = {self.tmp_dir!r} - - import sys - import unittest - from test import support - - sys.path.insert(0, tmp_dir) - import xx - - class Tests(unittest.TestCase): - def test_xx(self): - for attr in ('error', 'foo', 'new', 'roj'): - self.assertTrue(hasattr(xx, attr)) - - self.assertEqual(xx.foo(2, 5), 7) - self.assertEqual(xx.foo(13,15), 28) - self.assertEqual(xx.new().demo(), None) - if support.HAVE_DOCSTRINGS: - doc = 'This is a template module just for instruction.' - self.assertEqual(xx.__doc__, doc) - self.assertIsInstance(xx.Null(), xx.Null) - self.assertIsInstance(xx.Str(), xx.Str) - - - unittest.main() - """) - assert_python_ok('-c', code) - - def test_solaris_enable_shared(self): - dist = Distribution({'name': 'xx'}) - cmd = self.build_ext(dist) - old = sys.platform - - sys.platform = 'sunos' # fooling finalize_options - from distutils.sysconfig import _config_vars - old_var = _config_vars.get('Py_ENABLE_SHARED') - _config_vars['Py_ENABLE_SHARED'] = 1 - try: - cmd.ensure_finalized() - finally: - sys.platform = old - if old_var is None: - del _config_vars['Py_ENABLE_SHARED'] - else: - _config_vars['Py_ENABLE_SHARED'] = old_var - - # make sure we get some library dirs under solaris - self.assertGreater(len(cmd.library_dirs), 0) - - def test_user_site(self): - import site - dist = Distribution({'name': 'xx'}) - cmd = self.build_ext(dist) - - # making sure the user option is there - options = [name for name, short, lable in - cmd.user_options] - self.assertIn('user', options) - - # setting a value - cmd.user = 1 - - # setting user based lib and include - lib = os.path.join(site.USER_BASE, 'lib') - incl = os.path.join(site.USER_BASE, 'include') - os.mkdir(lib) - os.mkdir(incl) - - # let's run finalize - cmd.ensure_finalized() - - # see if include_dirs and library_dirs - # were set - self.assertIn(lib, cmd.library_dirs) - self.assertIn(lib, cmd.rpath) - self.assertIn(incl, cmd.include_dirs) - - @threading_helper.requires_working_threading() - def test_optional_extension(self): - - # this extension will fail, but let's ignore this failure - # with the optional argument. - modules = [Extension('foo', ['xxx'], optional=False)] - dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = self.build_ext(dist) - cmd.ensure_finalized() - self.assertRaises((UnknownFileError, CompileError), - cmd.run) # should raise an error - - modules = [Extension('foo', ['xxx'], optional=True)] - dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = self.build_ext(dist) - cmd.ensure_finalized() - cmd.run() # should pass - - def test_finalize_options(self): - # Make sure Python's include directories (for Python.h, pyconfig.h, - # etc.) are in the include search path. - modules = [Extension('foo', ['xxx'], optional=False)] - dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = self.build_ext(dist) - cmd.finalize_options() - - py_include = sysconfig.get_python_inc() - for p in py_include.split(os.path.pathsep): - self.assertIn(p, cmd.include_dirs) - - plat_py_include = sysconfig.get_python_inc(plat_specific=1) - for p in plat_py_include.split(os.path.pathsep): - self.assertIn(p, cmd.include_dirs) - - # make sure cmd.libraries is turned into a list - # if it's a string - cmd = self.build_ext(dist) - cmd.libraries = 'my_lib, other_lib lastlib' - cmd.finalize_options() - self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib']) - - # make sure cmd.library_dirs is turned into a list - # if it's a string - cmd = self.build_ext(dist) - cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep - cmd.finalize_options() - self.assertIn('my_lib_dir', cmd.library_dirs) - self.assertIn('other_lib_dir', cmd.library_dirs) - - # make sure rpath is turned into a list - # if it's a string - cmd = self.build_ext(dist) - cmd.rpath = 'one%stwo' % os.pathsep - cmd.finalize_options() - self.assertEqual(cmd.rpath, ['one', 'two']) - - # make sure cmd.link_objects is turned into a list - # if it's a string - cmd = build_ext(dist) - cmd.link_objects = 'one two,three' - cmd.finalize_options() - self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) - - # XXX more tests to perform for win32 - - # make sure define is turned into 2-tuples - # strings if they are ','-separated strings - cmd = self.build_ext(dist) - cmd.define = 'one,two' - cmd.finalize_options() - self.assertEqual(cmd.define, [('one', '1'), ('two', '1')]) - - # make sure undef is turned into a list of - # strings if they are ','-separated strings - cmd = self.build_ext(dist) - cmd.undef = 'one,two' - cmd.finalize_options() - self.assertEqual(cmd.undef, ['one', 'two']) - - # make sure swig_opts is turned into a list - cmd = self.build_ext(dist) - cmd.swig_opts = None - cmd.finalize_options() - self.assertEqual(cmd.swig_opts, []) - - cmd = self.build_ext(dist) - cmd.swig_opts = '1 2' - cmd.finalize_options() - self.assertEqual(cmd.swig_opts, ['1', '2']) - - def test_check_extensions_list(self): - dist = Distribution() - cmd = self.build_ext(dist) - cmd.finalize_options() - - #'extensions' option must be a list of Extension instances - self.assertRaises(DistutilsSetupError, - cmd.check_extensions_list, 'foo') - - # each element of 'ext_modules' option must be an - # Extension instance or 2-tuple - exts = [('bar', 'foo', 'bar'), 'foo'] - self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) - - # first element of each tuple in 'ext_modules' - # must be the extension name (a string) and match - # a python dotted-separated name - exts = [('foo-bar', '')] - self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) - - # second element of each tuple in 'ext_modules' - # must be a dictionary (build info) - exts = [('foo.bar', '')] - self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) - - # ok this one should pass - exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', - 'some': 'bar'})] - cmd.check_extensions_list(exts) - ext = exts[0] - self.assertIsInstance(ext, Extension) - - # check_extensions_list adds in ext the values passed - # when they are in ('include_dirs', 'library_dirs', 'libraries' - # 'extra_objects', 'extra_compile_args', 'extra_link_args') - self.assertEqual(ext.libraries, 'foo') - self.assertFalse(hasattr(ext, 'some')) - - # 'macros' element of build info dict must be 1- or 2-tuple - exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', - 'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})] - self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) - - exts[0][1]['macros'] = [('1', '2'), ('3',)] - cmd.check_extensions_list(exts) - self.assertEqual(exts[0].undef_macros, ['3']) - self.assertEqual(exts[0].define_macros, [('1', '2')]) - - def test_get_source_files(self): - modules = [Extension('foo', ['xxx'], optional=False)] - dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = self.build_ext(dist) - cmd.ensure_finalized() - self.assertEqual(cmd.get_source_files(), ['xxx']) - - def test_unicode_module_names(self): - modules = [ - Extension('foo', ['aaa'], optional=False), - Extension('föö', ['uuu'], optional=False), - ] - dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = self.build_ext(dist) - cmd.ensure_finalized() - self.assertRegex(cmd.get_ext_filename(modules[0].name), r'foo(_d)?\..*') - self.assertRegex(cmd.get_ext_filename(modules[1].name), r'föö(_d)?\..*') - self.assertEqual(cmd.get_export_symbols(modules[0]), ['PyInit_foo']) - self.assertEqual(cmd.get_export_symbols(modules[1]), ['PyInitU_f_gkaa']) - - def test_compiler_option(self): - # cmd.compiler is an option and - # should not be overridden by a compiler instance - # when the command is run - dist = Distribution() - cmd = self.build_ext(dist) - cmd.compiler = 'unix' - cmd.ensure_finalized() - cmd.run() - self.assertEqual(cmd.compiler, 'unix') - - @support.requires_subprocess() - def test_get_outputs(self): - cmd = support.missing_compiler_executable() - if cmd is not None: - self.skipTest('The %r command is not found' % cmd) - tmp_dir = self.mkdtemp() - c_file = os.path.join(tmp_dir, 'foo.c') - self.write_file(c_file, 'void PyInit_foo(void) {}\n') - ext = Extension('foo', [c_file], optional=False) - dist = Distribution({'name': 'xx', - 'ext_modules': [ext]}) - cmd = self.build_ext(dist) - fixup_build_ext(cmd) - cmd.ensure_finalized() - self.assertEqual(len(cmd.get_outputs()), 1) - - cmd.build_lib = os.path.join(self.tmp_dir, 'build') - cmd.build_temp = os.path.join(self.tmp_dir, 'tempt') - - # issue #5977 : distutils build_ext.get_outputs - # returns wrong result with --inplace - other_tmp_dir = os.path.realpath(self.mkdtemp()) - old_wd = os.getcwd() - os.chdir(other_tmp_dir) - try: - cmd.inplace = 1 - cmd.run() - so_file = cmd.get_outputs()[0] - finally: - os.chdir(old_wd) - self.assertTrue(os.path.exists(so_file)) - ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') - self.assertTrue(so_file.endswith(ext_suffix)) - so_dir = os.path.dirname(so_file) - self.assertEqual(so_dir, other_tmp_dir) - - cmd.inplace = 0 - cmd.compiler = None - cmd.run() - so_file = cmd.get_outputs()[0] - self.assertTrue(os.path.exists(so_file)) - self.assertTrue(so_file.endswith(ext_suffix)) - so_dir = os.path.dirname(so_file) - self.assertEqual(so_dir, cmd.build_lib) - - # inplace = 0, cmd.package = 'bar' - build_py = cmd.get_finalized_command('build_py') - build_py.package_dir = {'': 'bar'} - path = cmd.get_ext_fullpath('foo') - # checking that the last directory is the build_dir - path = os.path.split(path)[0] - self.assertEqual(path, cmd.build_lib) - - # inplace = 1, cmd.package = 'bar' - cmd.inplace = 1 - other_tmp_dir = os.path.realpath(self.mkdtemp()) - old_wd = os.getcwd() - os.chdir(other_tmp_dir) - try: - path = cmd.get_ext_fullpath('foo') - finally: - os.chdir(old_wd) - # checking that the last directory is bar - path = os.path.split(path)[0] - lastdir = os.path.split(path)[-1] - self.assertEqual(lastdir, 'bar') - - def test_ext_fullpath(self): - ext = sysconfig.get_config_var('EXT_SUFFIX') - # building lxml.etree inplace - #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') - #etree_ext = Extension('lxml.etree', [etree_c]) - #dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]}) - dist = Distribution() - cmd = self.build_ext(dist) - cmd.inplace = 1 - cmd.distribution.package_dir = {'': 'src'} - cmd.distribution.packages = ['lxml', 'lxml.html'] - curdir = os.getcwd() - wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) - path = cmd.get_ext_fullpath('lxml.etree') - self.assertEqual(wanted, path) - - # building lxml.etree not inplace - cmd.inplace = 0 - cmd.build_lib = os.path.join(curdir, 'tmpdir') - wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext) - path = cmd.get_ext_fullpath('lxml.etree') - self.assertEqual(wanted, path) - - # building twisted.runner.portmap not inplace - build_py = cmd.get_finalized_command('build_py') - build_py.package_dir = {} - cmd.distribution.packages = ['twisted', 'twisted.runner.portmap'] - path = cmd.get_ext_fullpath('twisted.runner.portmap') - wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', - 'portmap' + ext) - self.assertEqual(wanted, path) - - # building twisted.runner.portmap inplace - cmd.inplace = 1 - path = cmd.get_ext_fullpath('twisted.runner.portmap') - wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext) - self.assertEqual(wanted, path) - - - @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') - def test_deployment_target_default(self): - # Issue 9516: Test that, in the absence of the environment variable, - # an extension module is compiled with the same deployment target as - # the interpreter. - self._try_compile_deployment_target('==', None) - - @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') - def test_deployment_target_too_low(self): - # Issue 9516: Test that an extension module is not allowed to be - # compiled with a deployment target less than that of the interpreter. - self.assertRaises(DistutilsPlatformError, - self._try_compile_deployment_target, '>', '10.1') - - @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') - def test_deployment_target_higher_ok(self): - # Issue 9516: Test that an extension module can be compiled with a - # deployment target higher than that of the interpreter: the ext - # module may depend on some newer OS feature. - deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if deptarget: - # increment the minor version number (i.e. 10.6 -> 10.7) - deptarget = [int(x) for x in deptarget.split('.')] - deptarget[-1] += 1 - deptarget = '.'.join(str(i) for i in deptarget) - self._try_compile_deployment_target('<', deptarget) - - def _try_compile_deployment_target(self, operator, target): - orig_environ = os.environ - os.environ = orig_environ.copy() - self.addCleanup(setattr, os, 'environ', orig_environ) - - if target is None: - if os.environ.get('MACOSX_DEPLOYMENT_TARGET'): - del os.environ['MACOSX_DEPLOYMENT_TARGET'] - else: - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target - - deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c') - - with open(deptarget_c, 'w') as fp: - fp.write(textwrap.dedent('''\ - #include - - int dummy; - - #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED - #else - #error "Unexpected target" - #endif - - ''' % operator)) - - # get the deployment target that the interpreter was built with - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - target = tuple(map(int, target.split('.')[0:2])) - # format the target value as defined in the Apple - # Availability Macros. We can't use the macro names since - # at least one value we test with will not exist yet. - if target[:2] < (10, 10): - # for 10.1 through 10.9.x -> "10n0" - target = '%02d%01d0' % target - else: - # for 10.10 and beyond -> "10nn00" - if len(target) >= 2: - target = '%02d%02d00' % target - else: - # 11 and later can have no minor version (11 instead of 11.0) - target = '%02d0000' % target - deptarget_ext = Extension( - 'deptarget', - [deptarget_c], - extra_compile_args=['-DTARGET=%s'%(target,)], - ) - dist = Distribution({ - 'name': 'deptarget', - 'ext_modules': [deptarget_ext] - }) - dist.package_dir = self.tmp_dir - cmd = self.build_ext(dist) - cmd.build_lib = self.tmp_dir - cmd.build_temp = self.tmp_dir - - try: - old_stdout = sys.stdout - if not support.verbose: - # silence compiler output - sys.stdout = StringIO() - try: - cmd.ensure_finalized() - cmd.run() - finally: - sys.stdout = old_stdout - - except CompileError: - self.fail("Wrong deployment target during compilation") - - -class ParallelBuildExtTestCase(BuildExtTestCase): - - def build_ext(self, *args, **kwargs): - build_ext = super().build_ext(*args, **kwargs) - build_ext.parallel = True - return build_ext - - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BuildExtTestCase)) - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ParallelBuildExtTestCase)) - return suite - -if __name__ == '__main__': - support.run_unittest(__name__) diff --git a/Lib/distutils/tests/test_build_py.py b/Lib/distutils/tests/test_build_py.py deleted file mode 100644 index 44a06cc963aa3c6..000000000000000 --- a/Lib/distutils/tests/test_build_py.py +++ /dev/null @@ -1,181 +0,0 @@ -"""Tests for distutils.command.build_py.""" - -import os -import sys -import unittest - -from distutils.command.build_py import build_py -from distutils.core import Distribution -from distutils.errors import DistutilsFileError - -from distutils.tests import support -from test.support import run_unittest, requires_subprocess - - -class BuildPyTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - def test_package_data(self): - sources = self.mkdtemp() - f = open(os.path.join(sources, "__init__.py"), "w") - try: - f.write("# Pretend this is a package.") - finally: - f.close() - f = open(os.path.join(sources, "README.txt"), "w") - try: - f.write("Info about this package") - finally: - f.close() - - destination = self.mkdtemp() - - dist = Distribution({"packages": ["pkg"], - "package_dir": {"pkg": sources}}) - # script_name need not exist, it just need to be initialized - dist.script_name = os.path.join(sources, "setup.py") - dist.command_obj["build"] = support.DummyCommand( - force=0, - build_lib=destination) - dist.packages = ["pkg"] - dist.package_data = {"pkg": ["README.txt"]} - dist.package_dir = {"pkg": sources} - - cmd = build_py(dist) - cmd.compile = 1 - cmd.ensure_finalized() - self.assertEqual(cmd.package_data, dist.package_data) - - cmd.run() - - # This makes sure the list of outputs includes byte-compiled - # files for Python modules but not for package data files - # (there shouldn't *be* byte-code files for those!). - self.assertEqual(len(cmd.get_outputs()), 3) - pkgdest = os.path.join(destination, "pkg") - files = os.listdir(pkgdest) - pycache_dir = os.path.join(pkgdest, "__pycache__") - self.assertIn("__init__.py", files) - self.assertIn("README.txt", files) - if sys.dont_write_bytecode: - self.assertFalse(os.path.exists(pycache_dir)) - else: - pyc_files = os.listdir(pycache_dir) - self.assertIn("__init__.%s.pyc" % sys.implementation.cache_tag, - pyc_files) - - def test_empty_package_dir(self): - # See bugs #1668596/#1720897 - sources = self.mkdtemp() - open(os.path.join(sources, "__init__.py"), "w").close() - - testdir = os.path.join(sources, "doc") - os.mkdir(testdir) - open(os.path.join(testdir, "testfile"), "w").close() - - os.chdir(sources) - dist = Distribution({"packages": ["pkg"], - "package_dir": {"pkg": ""}, - "package_data": {"pkg": ["doc/*"]}}) - # script_name need not exist, it just need to be initialized - dist.script_name = os.path.join(sources, "setup.py") - dist.script_args = ["build"] - dist.parse_command_line() - - try: - dist.run_commands() - except DistutilsFileError: - self.fail("failed package_data test when package_dir is ''") - - @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled') - @requires_subprocess() - def test_byte_compile(self): - project_dir, dist = self.create_dist(py_modules=['boiledeggs']) - os.chdir(project_dir) - self.write_file('boiledeggs.py', 'import antigravity') - cmd = build_py(dist) - cmd.compile = 1 - cmd.build_lib = 'here' - cmd.finalize_options() - cmd.run() - - found = os.listdir(cmd.build_lib) - self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py']) - found = os.listdir(os.path.join(cmd.build_lib, '__pycache__')) - self.assertEqual(found, - ['boiledeggs.%s.pyc' % sys.implementation.cache_tag]) - - @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled') - @requires_subprocess() - def test_byte_compile_optimized(self): - project_dir, dist = self.create_dist(py_modules=['boiledeggs']) - os.chdir(project_dir) - self.write_file('boiledeggs.py', 'import antigravity') - cmd = build_py(dist) - cmd.compile = 0 - cmd.optimize = 1 - cmd.build_lib = 'here' - cmd.finalize_options() - cmd.run() - - found = os.listdir(cmd.build_lib) - self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py']) - found = os.listdir(os.path.join(cmd.build_lib, '__pycache__')) - expect = 'boiledeggs.{}.opt-1.pyc'.format(sys.implementation.cache_tag) - self.assertEqual(sorted(found), [expect]) - - def test_dir_in_package_data(self): - """ - A directory in package_data should not be added to the filelist. - """ - # See bug 19286 - sources = self.mkdtemp() - pkg_dir = os.path.join(sources, "pkg") - - os.mkdir(pkg_dir) - open(os.path.join(pkg_dir, "__init__.py"), "w").close() - - docdir = os.path.join(pkg_dir, "doc") - os.mkdir(docdir) - open(os.path.join(docdir, "testfile"), "w").close() - - # create the directory that could be incorrectly detected as a file - os.mkdir(os.path.join(docdir, 'otherdir')) - - os.chdir(sources) - dist = Distribution({"packages": ["pkg"], - "package_data": {"pkg": ["doc/*"]}}) - # script_name need not exist, it just need to be initialized - dist.script_name = os.path.join(sources, "setup.py") - dist.script_args = ["build"] - dist.parse_command_line() - - try: - dist.run_commands() - except DistutilsFileError: - self.fail("failed package_data when data dir includes a dir") - - def test_dont_write_bytecode(self): - # makes sure byte_compile is not used - dist = self.create_dist()[1] - cmd = build_py(dist) - cmd.compile = 1 - cmd.optimize = 1 - - old_dont_write_bytecode = sys.dont_write_bytecode - sys.dont_write_bytecode = True - try: - cmd.byte_compile([]) - finally: - sys.dont_write_bytecode = old_dont_write_bytecode - - self.assertIn('byte-compiling is disabled', - self.logs[0][1] % self.logs[0][2]) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildPyTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_build_scripts.py b/Lib/distutils/tests/test_build_scripts.py deleted file mode 100644 index f299e51ef79fac1..000000000000000 --- a/Lib/distutils/tests/test_build_scripts.py +++ /dev/null @@ -1,112 +0,0 @@ -"""Tests for distutils.command.build_scripts.""" - -import os -import unittest - -from distutils.command.build_scripts import build_scripts -from distutils.core import Distribution -from distutils import sysconfig - -from distutils.tests import support -from test.support import run_unittest - - -class BuildScriptsTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - def test_default_settings(self): - cmd = self.get_build_scripts_cmd("/foo/bar", []) - self.assertFalse(cmd.force) - self.assertIsNone(cmd.build_dir) - - cmd.finalize_options() - - self.assertTrue(cmd.force) - self.assertEqual(cmd.build_dir, "/foo/bar") - - def test_build(self): - source = self.mkdtemp() - target = self.mkdtemp() - expected = self.write_sample_scripts(source) - - cmd = self.get_build_scripts_cmd(target, - [os.path.join(source, fn) - for fn in expected]) - cmd.finalize_options() - cmd.run() - - built = os.listdir(target) - for name in expected: - self.assertIn(name, built) - - def get_build_scripts_cmd(self, target, scripts): - import sys - dist = Distribution() - dist.scripts = scripts - dist.command_obj["build"] = support.DummyCommand( - build_scripts=target, - force=1, - executable=sys.executable - ) - return build_scripts(dist) - - def write_sample_scripts(self, dir): - expected = [] - expected.append("script1.py") - self.write_script(dir, "script1.py", - ("#! /usr/bin/env python2.3\n" - "# bogus script w/ Python sh-bang\n" - "pass\n")) - expected.append("script2.py") - self.write_script(dir, "script2.py", - ("#!/usr/bin/python\n" - "# bogus script w/ Python sh-bang\n" - "pass\n")) - expected.append("shell.sh") - self.write_script(dir, "shell.sh", - ("#!/bin/sh\n" - "# bogus shell script w/ sh-bang\n" - "exit 0\n")) - return expected - - def write_script(self, dir, name, text): - f = open(os.path.join(dir, name), "w") - try: - f.write(text) - finally: - f.close() - - def test_version_int(self): - source = self.mkdtemp() - target = self.mkdtemp() - expected = self.write_sample_scripts(source) - - - cmd = self.get_build_scripts_cmd(target, - [os.path.join(source, fn) - for fn in expected]) - cmd.finalize_options() - - # http://bugs.python.org/issue4524 - # - # On linux-g++-32 with command line `./configure --enable-ipv6 - # --with-suffix=3`, python is compiled okay but the build scripts - # failed when writing the name of the executable - old = sysconfig.get_config_vars().get('VERSION') - sysconfig._config_vars['VERSION'] = 4 - try: - cmd.run() - finally: - if old is not None: - sysconfig._config_vars['VERSION'] = old - - built = os.listdir(target) - for name in expected: - self.assertIn(name, built) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(BuildScriptsTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_check.py b/Lib/distutils/tests/test_check.py deleted file mode 100644 index 91bcdceb43bc69f..000000000000000 --- a/Lib/distutils/tests/test_check.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Tests for distutils.command.check.""" -import os -import textwrap -import unittest -from test.support import run_unittest - -from distutils.command.check import check, HAS_DOCUTILS -from distutils.tests import support -from distutils.errors import DistutilsSetupError - -try: - import pygments -except ImportError: - pygments = None - - -HERE = os.path.dirname(__file__) - - -class CheckTestCase(support.LoggingSilencer, - support.TempdirManager, - unittest.TestCase): - - def _run(self, metadata=None, cwd=None, **options): - if metadata is None: - metadata = {} - if cwd is not None: - old_dir = os.getcwd() - os.chdir(cwd) - pkg_info, dist = self.create_dist(**metadata) - cmd = check(dist) - cmd.initialize_options() - for name, value in options.items(): - setattr(cmd, name, value) - cmd.ensure_finalized() - cmd.run() - if cwd is not None: - os.chdir(old_dir) - return cmd - - def test_check_metadata(self): - # let's run the command with no metadata at all - # by default, check is checking the metadata - # should have some warnings - cmd = self._run() - self.assertEqual(cmd._warnings, 2) - - # now let's add the required fields - # and run it again, to make sure we don't get - # any warning anymore - metadata = {'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx', - 'name': 'xxx', 'version': 'xxx'} - cmd = self._run(metadata) - self.assertEqual(cmd._warnings, 0) - - # now with the strict mode, we should - # get an error if there are missing metadata - self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1}) - - # and of course, no error when all metadata are present - cmd = self._run(metadata, strict=1) - self.assertEqual(cmd._warnings, 0) - - # now a test with non-ASCII characters - metadata = {'url': 'xxx', 'author': '\u00c9ric', - 'author_email': 'xxx', 'name': 'xxx', - 'version': 'xxx', - 'description': 'Something about esszet \u00df', - 'long_description': 'More things about esszet \u00df'} - cmd = self._run(metadata) - self.assertEqual(cmd._warnings, 0) - - @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils") - def test_check_document(self): - pkg_info, dist = self.create_dist() - cmd = check(dist) - - # let's see if it detects broken rest - broken_rest = 'title\n===\n\ntest' - msgs = cmd._check_rst_data(broken_rest) - self.assertEqual(len(msgs), 1) - - # and non-broken rest - rest = 'title\n=====\n\ntest' - msgs = cmd._check_rst_data(rest) - self.assertEqual(len(msgs), 0) - - @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils") - def test_check_restructuredtext(self): - # let's see if it detects broken rest in long_description - broken_rest = 'title\n===\n\ntest' - pkg_info, dist = self.create_dist(long_description=broken_rest) - cmd = check(dist) - cmd.check_restructuredtext() - self.assertEqual(cmd._warnings, 1) - - # let's see if we have an error with strict=1 - metadata = {'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx', - 'name': 'xxx', 'version': 'xxx', - 'long_description': broken_rest} - self.assertRaises(DistutilsSetupError, self._run, metadata, - **{'strict': 1, 'restructuredtext': 1}) - - # and non-broken rest, including a non-ASCII character to test #12114 - metadata['long_description'] = 'title\n=====\n\ntest \u00df' - cmd = self._run(metadata, strict=1, restructuredtext=1) - self.assertEqual(cmd._warnings, 0) - - # check that includes work to test #31292 - metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst' - cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1) - self.assertEqual(cmd._warnings, 0) - - @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils") - def test_check_restructuredtext_with_syntax_highlight(self): - # Don't fail if there is a `code` or `code-block` directive - - example_rst_docs = [] - example_rst_docs.append(textwrap.dedent("""\ - Here's some code: - - .. code:: python - - def foo(): - pass - """)) - example_rst_docs.append(textwrap.dedent("""\ - Here's some code: - - .. code-block:: python - - def foo(): - pass - """)) - - for rest_with_code in example_rst_docs: - pkg_info, dist = self.create_dist(long_description=rest_with_code) - cmd = check(dist) - cmd.check_restructuredtext() - msgs = cmd._check_rst_data(rest_with_code) - if pygments is not None: - self.assertEqual(len(msgs), 0) - else: - self.assertEqual(len(msgs), 1) - self.assertEqual( - str(msgs[0][1]), - 'Cannot analyze code. Pygments package not found.' - ) - - def test_check_all(self): - - metadata = {'url': 'xxx', 'author': 'xxx'} - self.assertRaises(DistutilsSetupError, self._run, - {}, **{'strict': 1, - 'restructuredtext': 1}) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(CheckTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_clean.py b/Lib/distutils/tests/test_clean.py deleted file mode 100644 index 92367499cefc046..000000000000000 --- a/Lib/distutils/tests/test_clean.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Tests for distutils.command.clean.""" -import os -import unittest - -from distutils.command.clean import clean -from distutils.tests import support -from test.support import run_unittest - -class cleanTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - def test_simple_run(self): - pkg_dir, dist = self.create_dist() - cmd = clean(dist) - - # let's add some elements clean should remove - dirs = [(d, os.path.join(pkg_dir, d)) - for d in ('build_temp', 'build_lib', 'bdist_base', - 'build_scripts', 'build_base')] - - for name, path in dirs: - os.mkdir(path) - setattr(cmd, name, path) - if name == 'build_base': - continue - for f in ('one', 'two', 'three'): - self.write_file(os.path.join(path, f)) - - # let's run the command - cmd.all = 1 - cmd.ensure_finalized() - cmd.run() - - # make sure the files where removed - for name, path in dirs: - self.assertFalse(os.path.exists(path), - '%s was not removed' % path) - - # let's run the command again (should spit warnings but succeed) - cmd.all = 1 - cmd.ensure_finalized() - cmd.run() - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(cleanTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_cmd.py b/Lib/distutils/tests/test_cmd.py deleted file mode 100644 index 2319214a9e332b8..000000000000000 --- a/Lib/distutils/tests/test_cmd.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Tests for distutils.cmd.""" -import unittest -import os -from test.support import captured_stdout, run_unittest - -from distutils.cmd import Command -from distutils.dist import Distribution -from distutils.errors import DistutilsOptionError -from distutils import debug - -class MyCmd(Command): - def initialize_options(self): - pass - -class CommandTestCase(unittest.TestCase): - - def setUp(self): - dist = Distribution() - self.cmd = MyCmd(dist) - - def test_ensure_string_list(self): - - cmd = self.cmd - cmd.not_string_list = ['one', 2, 'three'] - cmd.yes_string_list = ['one', 'two', 'three'] - cmd.not_string_list2 = object() - cmd.yes_string_list2 = 'ok' - cmd.ensure_string_list('yes_string_list') - cmd.ensure_string_list('yes_string_list2') - - self.assertRaises(DistutilsOptionError, - cmd.ensure_string_list, 'not_string_list') - - self.assertRaises(DistutilsOptionError, - cmd.ensure_string_list, 'not_string_list2') - - cmd.option1 = 'ok,dok' - cmd.ensure_string_list('option1') - self.assertEqual(cmd.option1, ['ok', 'dok']) - - cmd.option2 = ['xxx', 'www'] - cmd.ensure_string_list('option2') - - cmd.option3 = ['ok', 2] - self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, - 'option3') - - - def test_make_file(self): - - cmd = self.cmd - - # making sure it raises when infiles is not a string or a list/tuple - self.assertRaises(TypeError, cmd.make_file, - infiles=1, outfile='', func='func', args=()) - - # making sure execute gets called properly - def _execute(func, args, exec_msg, level): - self.assertEqual(exec_msg, 'generating out from in') - cmd.force = True - cmd.execute = _execute - cmd.make_file(infiles='in', outfile='out', func='func', args=()) - - def test_dump_options(self): - - msgs = [] - def _announce(msg, level): - msgs.append(msg) - cmd = self.cmd - cmd.announce = _announce - cmd.option1 = 1 - cmd.option2 = 1 - cmd.user_options = [('option1', '', ''), ('option2', '', '')] - cmd.dump_options() - - wanted = ["command options for 'MyCmd':", ' option1 = 1', - ' option2 = 1'] - self.assertEqual(msgs, wanted) - - def test_ensure_string(self): - cmd = self.cmd - cmd.option1 = 'ok' - cmd.ensure_string('option1') - - cmd.option2 = None - cmd.ensure_string('option2', 'xxx') - self.assertTrue(hasattr(cmd, 'option2')) - - cmd.option3 = 1 - self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3') - - def test_ensure_filename(self): - cmd = self.cmd - cmd.option1 = __file__ - cmd.ensure_filename('option1') - cmd.option2 = 'xxx' - self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2') - - def test_ensure_dirname(self): - cmd = self.cmd - cmd.option1 = os.path.dirname(__file__) or os.curdir - cmd.ensure_dirname('option1') - cmd.option2 = 'xxx' - self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2') - - def test_debug_print(self): - cmd = self.cmd - with captured_stdout() as stdout: - cmd.debug_print('xxx') - stdout.seek(0) - self.assertEqual(stdout.read(), '') - - debug.DEBUG = True - try: - with captured_stdout() as stdout: - cmd.debug_print('xxx') - stdout.seek(0) - self.assertEqual(stdout.read(), 'xxx\n') - finally: - debug.DEBUG = False - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(CommandTestCase) - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_config.py b/Lib/distutils/tests/test_config.py deleted file mode 100644 index 8ab70efb161cbd8..000000000000000 --- a/Lib/distutils/tests/test_config.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Tests for distutils.pypirc.pypirc.""" -import os -import unittest - -from distutils.core import PyPIRCCommand -from distutils.core import Distribution -from distutils.log import set_threshold -from distutils.log import WARN - -from distutils.tests import support -from test.support import run_unittest - -PYPIRC = """\ -[distutils] - -index-servers = - server1 - server2 - server3 - -[server1] -username:me -password:secret - -[server2] -username:meagain -password: secret -realm:acme -repository:http://another.pypi/ - -[server3] -username:cbiggles -password:yh^%#rest-of-my-password -""" - -PYPIRC_OLD = """\ -[server-login] -username:tarek -password:secret -""" - -WANTED = """\ -[distutils] -index-servers = - pypi - -[pypi] -username:tarek -password:xxx -""" - - -class BasePyPIRCCommandTestCase(support.TempdirManager, - support.LoggingSilencer, - support.EnvironGuard, - unittest.TestCase): - - def setUp(self): - """Patches the environment.""" - super(BasePyPIRCCommandTestCase, self).setUp() - self.tmp_dir = self.mkdtemp() - os.environ['HOME'] = self.tmp_dir - os.environ['USERPROFILE'] = self.tmp_dir - self.rc = os.path.join(self.tmp_dir, '.pypirc') - self.dist = Distribution() - - class command(PyPIRCCommand): - def __init__(self, dist): - PyPIRCCommand.__init__(self, dist) - def initialize_options(self): - pass - finalize_options = initialize_options - - self._cmd = command - self.old_threshold = set_threshold(WARN) - - def tearDown(self): - """Removes the patch.""" - set_threshold(self.old_threshold) - super(BasePyPIRCCommandTestCase, self).tearDown() - - -class PyPIRCCommandTestCase(BasePyPIRCCommandTestCase): - - def test_server_registration(self): - # This test makes sure PyPIRCCommand knows how to: - # 1. handle several sections in .pypirc - # 2. handle the old format - - # new format - self.write_file(self.rc, PYPIRC) - cmd = self._cmd(self.dist) - config = cmd._read_pypirc() - - config = list(sorted(config.items())) - waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://upload.pypi.org/legacy/'), - ('server', 'server1'), ('username', 'me')] - self.assertEqual(config, waited) - - # old format - self.write_file(self.rc, PYPIRC_OLD) - config = cmd._read_pypirc() - config = list(sorted(config.items())) - waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://upload.pypi.org/legacy/'), - ('server', 'server-login'), ('username', 'tarek')] - self.assertEqual(config, waited) - - def test_server_empty_registration(self): - cmd = self._cmd(self.dist) - rc = cmd._get_rc_file() - self.assertFalse(os.path.exists(rc)) - cmd._store_pypirc('tarek', 'xxx') - self.assertTrue(os.path.exists(rc)) - f = open(rc) - try: - content = f.read() - self.assertEqual(content, WANTED) - finally: - f.close() - - def test_config_interpolation(self): - # using the % character in .pypirc should not raise an error (#20120) - self.write_file(self.rc, PYPIRC) - cmd = self._cmd(self.dist) - cmd.repository = 'server3' - config = cmd._read_pypirc() - - config = list(sorted(config.items())) - waited = [('password', 'yh^%#rest-of-my-password'), ('realm', 'pypi'), - ('repository', 'https://upload.pypi.org/legacy/'), - ('server', 'server3'), ('username', 'cbiggles')] - self.assertEqual(config, waited) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(PyPIRCCommandTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_config_cmd.py b/Lib/distutils/tests/test_config_cmd.py deleted file mode 100644 index c79db68aae115d1..000000000000000 --- a/Lib/distutils/tests/test_config_cmd.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Tests for distutils.command.config.""" -import unittest -import os -import sys -import sysconfig -from test.support import ( - run_unittest, missing_compiler_executable, requires_subprocess -) - -from distutils.command.config import dump_file, config -from distutils.tests import support -from distutils import log - -class ConfigTestCase(support.LoggingSilencer, - support.TempdirManager, - unittest.TestCase): - - def _info(self, msg, *args): - for line in msg.splitlines(): - self._logs.append(line) - - def setUp(self): - super(ConfigTestCase, self).setUp() - self._logs = [] - self.old_log = log.info - log.info = self._info - self.old_config_vars = dict(sysconfig._CONFIG_VARS) - - def tearDown(self): - log.info = self.old_log - sysconfig._CONFIG_VARS.clear() - sysconfig._CONFIG_VARS.update(self.old_config_vars) - super(ConfigTestCase, self).tearDown() - - def test_dump_file(self): - this_file = os.path.splitext(__file__)[0] + '.py' - f = open(this_file) - try: - numlines = len(f.readlines()) - finally: - f.close() - - dump_file(this_file, 'I am the header') - self.assertEqual(len(self._logs), numlines+1) - - @unittest.skipIf(sys.platform == 'win32', "can't test on Windows") - @requires_subprocess() - def test_search_cpp(self): - cmd = missing_compiler_executable(['preprocessor']) - if cmd is not None: - self.skipTest('The %r command is not found' % cmd) - pkg_dir, dist = self.create_dist() - cmd = config(dist) - cmd._check_compiler() - compiler = cmd.compiler - if sys.platform[:3] == "aix" and "xlc" in compiler.preprocessor[0].lower(): - self.skipTest('xlc: The -E option overrides the -P, -o, and -qsyntaxonly options') - - # simple pattern searches - match = cmd.search_cpp(pattern='xxx', body='/* xxx */') - self.assertEqual(match, 0) - - match = cmd.search_cpp(pattern='_configtest', body='/* xxx */') - self.assertEqual(match, 1) - - def test_finalize_options(self): - # finalize_options does a bit of transformation - # on options - pkg_dir, dist = self.create_dist() - cmd = config(dist) - cmd.include_dirs = 'one%stwo' % os.pathsep - cmd.libraries = 'one' - cmd.library_dirs = 'three%sfour' % os.pathsep - cmd.ensure_finalized() - - self.assertEqual(cmd.include_dirs, ['one', 'two']) - self.assertEqual(cmd.libraries, ['one']) - self.assertEqual(cmd.library_dirs, ['three', 'four']) - - def test_clean(self): - # _clean removes files - tmp_dir = self.mkdtemp() - f1 = os.path.join(tmp_dir, 'one') - f2 = os.path.join(tmp_dir, 'two') - - self.write_file(f1, 'xxx') - self.write_file(f2, 'xxx') - - for f in (f1, f2): - self.assertTrue(os.path.exists(f)) - - pkg_dir, dist = self.create_dist() - cmd = config(dist) - cmd._clean(f1, f2) - - for f in (f1, f2): - self.assertFalse(os.path.exists(f)) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_core.py b/Lib/distutils/tests/test_core.py deleted file mode 100644 index 700a22da045d454..000000000000000 --- a/Lib/distutils/tests/test_core.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Tests for distutils.core.""" - -import io -import distutils.core -import os -import shutil -import sys -from test.support import captured_stdout, run_unittest -from test.support import os_helper -import unittest -from distutils.tests import support -from distutils import log - -# setup script that uses __file__ -setup_using___file__ = """\ - -__file__ - -from distutils.core import setup -setup() -""" - -setup_prints_cwd = """\ - -import os -print(os.getcwd()) - -from distutils.core import setup -setup() -""" - -setup_does_nothing = """\ -from distutils.core import setup -setup() -""" - - -setup_defines_subclass = """\ -from distutils.core import setup -from distutils.command.install import install as _install - -class install(_install): - sub_commands = _install.sub_commands + ['cmd'] - -setup(cmdclass={'install': install}) -""" - -class CoreTestCase(support.EnvironGuard, unittest.TestCase): - - def setUp(self): - super(CoreTestCase, self).setUp() - self.old_stdout = sys.stdout - self.cleanup_testfn() - self.old_argv = sys.argv, sys.argv[:] - self.addCleanup(log.set_threshold, log._global_log.threshold) - - def tearDown(self): - sys.stdout = self.old_stdout - self.cleanup_testfn() - sys.argv = self.old_argv[0] - sys.argv[:] = self.old_argv[1] - super(CoreTestCase, self).tearDown() - - def cleanup_testfn(self): - path = os_helper.TESTFN - if os.path.isfile(path): - os.remove(path) - elif os.path.isdir(path): - shutil.rmtree(path) - - def write_setup(self, text, path=os_helper.TESTFN): - f = open(path, "w") - try: - f.write(text) - finally: - f.close() - return path - - def test_run_setup_provides_file(self): - # Make sure the script can use __file__; if that's missing, the test - # setup.py script will raise NameError. - distutils.core.run_setup( - self.write_setup(setup_using___file__)) - - def test_run_setup_preserves_sys_argv(self): - # Make sure run_setup does not clobber sys.argv - argv_copy = sys.argv.copy() - distutils.core.run_setup( - self.write_setup(setup_does_nothing)) - self.assertEqual(sys.argv, argv_copy) - - def test_run_setup_defines_subclass(self): - # Make sure the script can use __file__; if that's missing, the test - # setup.py script will raise NameError. - dist = distutils.core.run_setup( - self.write_setup(setup_defines_subclass)) - install = dist.get_command_obj('install') - self.assertIn('cmd', install.sub_commands) - - def test_run_setup_uses_current_dir(self): - # This tests that the setup script is run with the current directory - # as its own current directory; this was temporarily broken by a - # previous patch when TESTFN did not use the current directory. - sys.stdout = io.StringIO() - cwd = os.getcwd() - - # Create a directory and write the setup.py file there: - os.mkdir(os_helper.TESTFN) - setup_py = os.path.join(os_helper.TESTFN, "setup.py") - distutils.core.run_setup( - self.write_setup(setup_prints_cwd, path=setup_py)) - - output = sys.stdout.getvalue() - if output.endswith("\n"): - output = output[:-1] - self.assertEqual(cwd, output) - - def test_debug_mode(self): - # this covers the code called when DEBUG is set - sys.argv = ['setup.py', '--name'] - with captured_stdout() as stdout: - distutils.core.setup(name='bar') - stdout.seek(0) - self.assertEqual(stdout.read(), 'bar\n') - - distutils.core.DEBUG = True - try: - with captured_stdout() as stdout: - distutils.core.setup(name='bar') - finally: - distutils.core.DEBUG = False - stdout.seek(0) - wanted = "options (after parsing config files):\n" - self.assertEqual(stdout.readlines()[0], wanted) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(CoreTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_cygwinccompiler.py b/Lib/distutils/tests/test_cygwinccompiler.py deleted file mode 100644 index 0912ffd15c8ee96..000000000000000 --- a/Lib/distutils/tests/test_cygwinccompiler.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Tests for distutils.cygwinccompiler.""" -import unittest -import sys -import os -from io import BytesIO -from test.support import run_unittest - -from distutils import cygwinccompiler -from distutils.cygwinccompiler import (check_config_h, - CONFIG_H_OK, CONFIG_H_NOTOK, - CONFIG_H_UNCERTAIN, get_versions, - get_msvcr) -from distutils.tests import support - -class FakePopen(object): - test_class = None - - def __init__(self, cmd, shell, stdout): - self.cmd = cmd.split()[0] - exes = self.test_class._exes - if self.cmd in exes: - # issue #6438 in Python 3.x, Popen returns bytes - self.stdout = BytesIO(exes[self.cmd]) - else: - self.stdout = os.popen(cmd, 'r') - - -class CygwinCCompilerTestCase(support.TempdirManager, - unittest.TestCase): - - def setUp(self): - super(CygwinCCompilerTestCase, self).setUp() - self.version = sys.version - self.python_h = os.path.join(self.mkdtemp(), 'python.h') - from distutils import sysconfig - self.old_get_config_h_filename = sysconfig.get_config_h_filename - sysconfig.get_config_h_filename = self._get_config_h_filename - self.old_find_executable = cygwinccompiler.find_executable - cygwinccompiler.find_executable = self._find_executable - self._exes = {} - self.old_popen = cygwinccompiler.Popen - FakePopen.test_class = self - cygwinccompiler.Popen = FakePopen - - def tearDown(self): - sys.version = self.version - from distutils import sysconfig - sysconfig.get_config_h_filename = self.old_get_config_h_filename - cygwinccompiler.find_executable = self.old_find_executable - cygwinccompiler.Popen = self.old_popen - super(CygwinCCompilerTestCase, self).tearDown() - - def _get_config_h_filename(self): - return self.python_h - - def _find_executable(self, name): - if name in self._exes: - return name - return None - - def test_check_config_h(self): - - # check_config_h looks for "GCC" in sys.version first - # returns CONFIG_H_OK if found - sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) \n[GCC ' - '4.0.1 (Apple Computer, Inc. build 5370)]') - - self.assertEqual(check_config_h()[0], CONFIG_H_OK) - - # then it tries to see if it can find "__GNUC__" in pyconfig.h - sys.version = 'something without the *CC word' - - # if the file doesn't exist it returns CONFIG_H_UNCERTAIN - self.assertEqual(check_config_h()[0], CONFIG_H_UNCERTAIN) - - # if it exists but does not contain __GNUC__, it returns CONFIG_H_NOTOK - self.write_file(self.python_h, 'xxx') - self.assertEqual(check_config_h()[0], CONFIG_H_NOTOK) - - # and CONFIG_H_OK if __GNUC__ is found - self.write_file(self.python_h, 'xxx __GNUC__ xxx') - self.assertEqual(check_config_h()[0], CONFIG_H_OK) - - def test_get_versions(self): - - # get_versions calls distutils.spawn.find_executable on - # 'gcc', 'ld' and 'dllwrap' - self.assertEqual(get_versions(), (None, None, None)) - - # Let's fake we have 'gcc' and it returns '3.4.5' - self._exes['gcc'] = b'gcc (GCC) 3.4.5 (mingw special)\nFSF' - res = get_versions() - self.assertEqual(str(res[0]), '3.4.5') - - # and let's see what happens when the version - # doesn't match the regular expression - # (\d+\.\d+(\.\d+)*) - self._exes['gcc'] = b'very strange output' - res = get_versions() - self.assertEqual(res[0], None) - - # same thing for ld - self._exes['ld'] = b'GNU ld version 2.17.50 20060824' - res = get_versions() - self.assertEqual(str(res[1]), '2.17.50') - self._exes['ld'] = b'@(#)PROGRAM:ld PROJECT:ld64-77' - res = get_versions() - self.assertEqual(res[1], None) - - # and dllwrap - self._exes['dllwrap'] = b'GNU dllwrap 2.17.50 20060824\nFSF' - res = get_versions() - self.assertEqual(str(res[2]), '2.17.50') - self._exes['dllwrap'] = b'Cheese Wrap' - res = get_versions() - self.assertEqual(res[2], None) - - def test_get_msvcr(self): - - # none - sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) ' - '\n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]') - self.assertEqual(get_msvcr(), None) - - # MSVC 7.0 - sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' - '[MSC v.1300 32 bits (Intel)]') - self.assertEqual(get_msvcr(), ['msvcr70']) - - # MSVC 7.1 - sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' - '[MSC v.1310 32 bits (Intel)]') - self.assertEqual(get_msvcr(), ['msvcr71']) - - # VS2005 / MSVC 8.0 - sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' - '[MSC v.1400 32 bits (Intel)]') - self.assertEqual(get_msvcr(), ['msvcr80']) - - # VS2008 / MSVC 9.0 - sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' - '[MSC v.1500 32 bits (Intel)]') - self.assertEqual(get_msvcr(), ['msvcr90']) - - # unknown - sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' - '[MSC v.1999 32 bits (Intel)]') - self.assertRaises(ValueError, get_msvcr) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(CygwinCCompilerTestCase) - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_dep_util.py b/Lib/distutils/tests/test_dep_util.py deleted file mode 100644 index 0d52740a9edda39..000000000000000 --- a/Lib/distutils/tests/test_dep_util.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Tests for distutils.dep_util.""" -import unittest -import os - -from distutils.dep_util import newer, newer_pairwise, newer_group -from distutils.errors import DistutilsFileError -from distutils.tests import support -from test.support import run_unittest - -class DepUtilTestCase(support.TempdirManager, unittest.TestCase): - - def test_newer(self): - - tmpdir = self.mkdtemp() - new_file = os.path.join(tmpdir, 'new') - old_file = os.path.abspath(__file__) - - # Raise DistutilsFileError if 'new_file' does not exist. - self.assertRaises(DistutilsFileError, newer, new_file, old_file) - - # Return true if 'new_file' exists and is more recently modified than - # 'old_file', or if 'new_file' exists and 'old_file' doesn't. - self.write_file(new_file) - self.assertTrue(newer(new_file, 'I_dont_exist')) - self.assertTrue(newer(new_file, old_file)) - - # Return false if both exist and 'old_file' is the same age or younger - # than 'new_file'. - self.assertFalse(newer(old_file, new_file)) - - def test_newer_pairwise(self): - tmpdir = self.mkdtemp() - sources = os.path.join(tmpdir, 'sources') - targets = os.path.join(tmpdir, 'targets') - os.mkdir(sources) - os.mkdir(targets) - one = os.path.join(sources, 'one') - two = os.path.join(sources, 'two') - three = os.path.abspath(__file__) # I am the old file - four = os.path.join(targets, 'four') - self.write_file(one) - self.write_file(two) - self.write_file(four) - - self.assertEqual(newer_pairwise([one, two], [three, four]), - ([one],[three])) - - def test_newer_group(self): - tmpdir = self.mkdtemp() - sources = os.path.join(tmpdir, 'sources') - os.mkdir(sources) - one = os.path.join(sources, 'one') - two = os.path.join(sources, 'two') - three = os.path.join(sources, 'three') - old_file = os.path.abspath(__file__) - - # return true if 'old_file' is out-of-date with respect to any file - # listed in 'sources'. - self.write_file(one) - self.write_file(two) - self.write_file(three) - self.assertTrue(newer_group([one, two, three], old_file)) - self.assertFalse(newer_group([one, two, old_file], three)) - - # missing handling - os.remove(one) - self.assertRaises(OSError, newer_group, [one, two, old_file], three) - - self.assertFalse(newer_group([one, two, old_file], three, - missing='ignore')) - - self.assertTrue(newer_group([one, two, old_file], three, - missing='newer')) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(DepUtilTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py deleted file mode 100644 index ebd89f320dca893..000000000000000 --- a/Lib/distutils/tests/test_dir_util.py +++ /dev/null @@ -1,143 +0,0 @@ -"""Tests for distutils.dir_util.""" -import unittest -import os -import stat -import sys -from unittest.mock import patch - -from distutils import dir_util, errors -from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree, - ensure_relative) - -from distutils import log -from distutils.tests import support -from test.support import run_unittest, is_emscripten, is_wasi - - -class DirUtilTestCase(support.TempdirManager, unittest.TestCase): - - def _log(self, msg, *args): - if len(args) > 0: - self._logs.append(msg % args) - else: - self._logs.append(msg) - - def setUp(self): - super(DirUtilTestCase, self).setUp() - self._logs = [] - tmp_dir = self.mkdtemp() - self.root_target = os.path.join(tmp_dir, 'deep') - self.target = os.path.join(self.root_target, 'here') - self.target2 = os.path.join(tmp_dir, 'deep2') - self.old_log = log.info - log.info = self._log - - def tearDown(self): - log.info = self.old_log - super(DirUtilTestCase, self).tearDown() - - def test_mkpath_remove_tree_verbosity(self): - - mkpath(self.target, verbose=0) - wanted = [] - self.assertEqual(self._logs, wanted) - remove_tree(self.root_target, verbose=0) - - mkpath(self.target, verbose=1) - wanted = ['creating %s' % self.root_target, - 'creating %s' % self.target] - self.assertEqual(self._logs, wanted) - self._logs = [] - - remove_tree(self.root_target, verbose=1) - wanted = ["removing '%s' (and everything under it)" % self.root_target] - self.assertEqual(self._logs, wanted) - - @unittest.skipIf(sys.platform.startswith('win'), - "This test is only appropriate for POSIX-like systems.") - @unittest.skipIf( - is_emscripten or is_wasi, - "Emscripten's/WASI's umask is a stub." - ) - def test_mkpath_with_custom_mode(self): - # Get and set the current umask value for testing mode bits. - umask = os.umask(0o002) - os.umask(umask) - mkpath(self.target, 0o700) - self.assertEqual( - stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask) - mkpath(self.target2, 0o555) - self.assertEqual( - stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask) - - def test_create_tree_verbosity(self): - - create_tree(self.root_target, ['one', 'two', 'three'], verbose=0) - self.assertEqual(self._logs, []) - remove_tree(self.root_target, verbose=0) - - wanted = ['creating %s' % self.root_target] - create_tree(self.root_target, ['one', 'two', 'three'], verbose=1) - self.assertEqual(self._logs, wanted) - - remove_tree(self.root_target, verbose=0) - - def test_copy_tree_verbosity(self): - - mkpath(self.target, verbose=0) - - copy_tree(self.target, self.target2, verbose=0) - self.assertEqual(self._logs, []) - - remove_tree(self.root_target, verbose=0) - - mkpath(self.target, verbose=0) - a_file = os.path.join(self.target, 'ok.txt') - with open(a_file, 'w') as f: - f.write('some content') - - wanted = ['copying %s -> %s' % (a_file, self.target2)] - copy_tree(self.target, self.target2, verbose=1) - self.assertEqual(self._logs, wanted) - - remove_tree(self.root_target, verbose=0) - remove_tree(self.target2, verbose=0) - - def test_copy_tree_skips_nfs_temp_files(self): - mkpath(self.target, verbose=0) - - a_file = os.path.join(self.target, 'ok.txt') - nfs_file = os.path.join(self.target, '.nfs123abc') - for f in a_file, nfs_file: - with open(f, 'w') as fh: - fh.write('some content') - - copy_tree(self.target, self.target2) - self.assertEqual(os.listdir(self.target2), ['ok.txt']) - - remove_tree(self.root_target, verbose=0) - remove_tree(self.target2, verbose=0) - - def test_ensure_relative(self): - if os.sep == '/': - self.assertEqual(ensure_relative('/home/foo'), 'home/foo') - self.assertEqual(ensure_relative('some/path'), 'some/path') - else: # \\ - self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') - self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') - - def test_copy_tree_exception_in_listdir(self): - """ - An exception in listdir should raise a DistutilsFileError - """ - with patch("os.listdir", side_effect=OSError()), \ - self.assertRaises(errors.DistutilsFileError): - src = self.tempdirs[-1] - dir_util.copy_tree(src, None) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(DirUtilTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_dist.py b/Lib/distutils/tests/test_dist.py deleted file mode 100644 index 2ef70d987f36bb4..000000000000000 --- a/Lib/distutils/tests/test_dist.py +++ /dev/null @@ -1,529 +0,0 @@ -"""Tests for distutils.dist.""" -import os -import io -import sys -import unittest -import warnings -import textwrap - -from unittest import mock - -from distutils.dist import Distribution, fix_help_options -from distutils.cmd import Command - -from test.support import ( - captured_stdout, captured_stderr, run_unittest -) -from test.support.os_helper import TESTFN -from distutils.tests import support -from distutils import log - - -class test_dist(Command): - """Sample distutils extension command.""" - - user_options = [ - ("sample-option=", "S", "help text"), - ] - - def initialize_options(self): - self.sample_option = None - - -class TestDistribution(Distribution): - """Distribution subclasses that avoids the default search for - configuration files. - - The ._config_files attribute must be set before - .parse_config_files() is called. - """ - - def find_config_files(self): - return self._config_files - - -class DistributionTestCase(support.LoggingSilencer, - support.TempdirManager, - support.EnvironGuard, - unittest.TestCase): - - def setUp(self): - super(DistributionTestCase, self).setUp() - self.argv = sys.argv, sys.argv[:] - del sys.argv[1:] - - def tearDown(self): - sys.argv = self.argv[0] - sys.argv[:] = self.argv[1] - super(DistributionTestCase, self).tearDown() - - def create_distribution(self, configfiles=()): - d = TestDistribution() - d._config_files = configfiles - d.parse_config_files() - d.parse_command_line() - return d - - def test_command_packages_unspecified(self): - sys.argv.append("build") - d = self.create_distribution() - self.assertEqual(d.get_command_packages(), ["distutils.command"]) - - def test_command_packages_cmdline(self): - from distutils.tests.test_dist import test_dist - sys.argv.extend(["--command-packages", - "foo.bar,distutils.tests", - "test_dist", - "-Ssometext", - ]) - d = self.create_distribution() - # let's actually try to load our test command: - self.assertEqual(d.get_command_packages(), - ["distutils.command", "foo.bar", "distutils.tests"]) - cmd = d.get_command_obj("test_dist") - self.assertIsInstance(cmd, test_dist) - self.assertEqual(cmd.sample_option, "sometext") - - def test_venv_install_options(self): - sys.argv.append("install") - self.addCleanup(os.unlink, TESTFN) - - fakepath = '/somedir' - - with open(TESTFN, "w") as f: - print(("[install]\n" - "install-base = {0}\n" - "install-platbase = {0}\n" - "install-lib = {0}\n" - "install-platlib = {0}\n" - "install-purelib = {0}\n" - "install-headers = {0}\n" - "install-scripts = {0}\n" - "install-data = {0}\n" - "prefix = {0}\n" - "exec-prefix = {0}\n" - "home = {0}\n" - "user = {0}\n" - "root = {0}").format(fakepath), file=f) - - # Base case: Not in a Virtual Environment - with mock.patch.multiple(sys, prefix='/a', base_prefix='/a') as values: - d = self.create_distribution([TESTFN]) - - option_tuple = (TESTFN, fakepath) - - result_dict = { - 'install_base': option_tuple, - 'install_platbase': option_tuple, - 'install_lib': option_tuple, - 'install_platlib': option_tuple, - 'install_purelib': option_tuple, - 'install_headers': option_tuple, - 'install_scripts': option_tuple, - 'install_data': option_tuple, - 'prefix': option_tuple, - 'exec_prefix': option_tuple, - 'home': option_tuple, - 'user': option_tuple, - 'root': option_tuple, - } - - self.assertEqual( - sorted(d.command_options.get('install').keys()), - sorted(result_dict.keys())) - - for (key, value) in d.command_options.get('install').items(): - self.assertEqual(value, result_dict[key]) - - # Test case: In a Virtual Environment - with mock.patch.multiple(sys, prefix='/a', base_prefix='/b') as values: - d = self.create_distribution([TESTFN]) - - for key in result_dict.keys(): - self.assertNotIn(key, d.command_options.get('install', {})) - - def test_command_packages_configfile(self): - sys.argv.append("build") - self.addCleanup(os.unlink, TESTFN) - f = open(TESTFN, "w") - try: - print("[global]", file=f) - print("command_packages = foo.bar, splat", file=f) - finally: - f.close() - - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), - ["distutils.command", "foo.bar", "splat"]) - - # ensure command line overrides config: - sys.argv[1:] = ["--command-packages", "spork", "build"] - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), - ["distutils.command", "spork"]) - - # Setting --command-packages to '' should cause the default to - # be used even if a config file specified something else: - sys.argv[1:] = ["--command-packages", "", "build"] - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), ["distutils.command"]) - - def test_empty_options(self): - # an empty options dictionary should not stay in the - # list of attributes - - # catching warnings - warns = [] - - def _warn(msg): - warns.append(msg) - - self.addCleanup(setattr, warnings, 'warn', warnings.warn) - warnings.warn = _warn - dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx', - 'version': 'xxx', 'url': 'xxxx', - 'options': {}}) - - self.assertEqual(len(warns), 0) - self.assertNotIn('options', dir(dist)) - - def test_finalize_options(self): - attrs = {'keywords': 'one,two', - 'platforms': 'one,two'} - - dist = Distribution(attrs=attrs) - dist.finalize_options() - - # finalize_option splits platforms and keywords - self.assertEqual(dist.metadata.platforms, ['one', 'two']) - self.assertEqual(dist.metadata.keywords, ['one', 'two']) - - attrs = {'keywords': 'foo bar', - 'platforms': 'foo bar'} - dist = Distribution(attrs=attrs) - dist.finalize_options() - self.assertEqual(dist.metadata.platforms, ['foo bar']) - self.assertEqual(dist.metadata.keywords, ['foo bar']) - - def test_get_command_packages(self): - dist = Distribution() - self.assertEqual(dist.command_packages, None) - cmds = dist.get_command_packages() - self.assertEqual(cmds, ['distutils.command']) - self.assertEqual(dist.command_packages, - ['distutils.command']) - - dist.command_packages = 'one,two' - cmds = dist.get_command_packages() - self.assertEqual(cmds, ['distutils.command', 'one', 'two']) - - def test_announce(self): - # make sure the level is known - dist = Distribution() - args = ('ok',) - kwargs = {'level': 'ok2'} - self.assertRaises(ValueError, dist.announce, args, kwargs) - - - def test_find_config_files_disable(self): - # Ticket #1180: Allow user to disable their home config file. - temp_home = self.mkdtemp() - if os.name == 'posix': - user_filename = os.path.join(temp_home, ".pydistutils.cfg") - else: - user_filename = os.path.join(temp_home, "pydistutils.cfg") - - with open(user_filename, 'w') as f: - f.write('[distutils]\n') - - def _expander(path): - return temp_home - - old_expander = os.path.expanduser - os.path.expanduser = _expander - try: - d = Distribution() - all_files = d.find_config_files() - - d = Distribution(attrs={'script_args': ['--no-user-cfg']}) - files = d.find_config_files() - finally: - os.path.expanduser = old_expander - - # make sure --no-user-cfg disables the user cfg file - self.assertEqual(len(all_files)-1, len(files)) - -class MetadataTestCase(support.TempdirManager, support.EnvironGuard, - unittest.TestCase): - - def setUp(self): - super(MetadataTestCase, self).setUp() - self.argv = sys.argv, sys.argv[:] - - def tearDown(self): - sys.argv = self.argv[0] - sys.argv[:] = self.argv[1] - super(MetadataTestCase, self).tearDown() - - def format_metadata(self, dist): - sio = io.StringIO() - dist.metadata.write_pkg_file(sio) - return sio.getvalue() - - def test_simple_metadata(self): - attrs = {"name": "package", - "version": "1.0"} - dist = Distribution(attrs) - meta = self.format_metadata(dist) - self.assertIn("Metadata-Version: 1.0", meta) - self.assertNotIn("provides:", meta.lower()) - self.assertNotIn("requires:", meta.lower()) - self.assertNotIn("obsoletes:", meta.lower()) - - def test_provides(self): - attrs = {"name": "package", - "version": "1.0", - "provides": ["package", "package.sub"]} - dist = Distribution(attrs) - self.assertEqual(dist.metadata.get_provides(), - ["package", "package.sub"]) - self.assertEqual(dist.get_provides(), - ["package", "package.sub"]) - meta = self.format_metadata(dist) - self.assertIn("Metadata-Version: 1.1", meta) - self.assertNotIn("requires:", meta.lower()) - self.assertNotIn("obsoletes:", meta.lower()) - - def test_provides_illegal(self): - self.assertRaises(ValueError, Distribution, - {"name": "package", - "version": "1.0", - "provides": ["my.pkg (splat)"]}) - - def test_requires(self): - attrs = {"name": "package", - "version": "1.0", - "requires": ["other", "another (==1.0)"]} - dist = Distribution(attrs) - self.assertEqual(dist.metadata.get_requires(), - ["other", "another (==1.0)"]) - self.assertEqual(dist.get_requires(), - ["other", "another (==1.0)"]) - meta = self.format_metadata(dist) - self.assertIn("Metadata-Version: 1.1", meta) - self.assertNotIn("provides:", meta.lower()) - self.assertIn("Requires: other", meta) - self.assertIn("Requires: another (==1.0)", meta) - self.assertNotIn("obsoletes:", meta.lower()) - - def test_requires_illegal(self): - self.assertRaises(ValueError, Distribution, - {"name": "package", - "version": "1.0", - "requires": ["my.pkg (splat)"]}) - - def test_requires_to_list(self): - attrs = {"name": "package", - "requires": iter(["other"])} - dist = Distribution(attrs) - self.assertIsInstance(dist.metadata.requires, list) - - - def test_obsoletes(self): - attrs = {"name": "package", - "version": "1.0", - "obsoletes": ["other", "another (<1.0)"]} - dist = Distribution(attrs) - self.assertEqual(dist.metadata.get_obsoletes(), - ["other", "another (<1.0)"]) - self.assertEqual(dist.get_obsoletes(), - ["other", "another (<1.0)"]) - meta = self.format_metadata(dist) - self.assertIn("Metadata-Version: 1.1", meta) - self.assertNotIn("provides:", meta.lower()) - self.assertNotIn("requires:", meta.lower()) - self.assertIn("Obsoletes: other", meta) - self.assertIn("Obsoletes: another (<1.0)", meta) - - def test_obsoletes_illegal(self): - self.assertRaises(ValueError, Distribution, - {"name": "package", - "version": "1.0", - "obsoletes": ["my.pkg (splat)"]}) - - def test_obsoletes_to_list(self): - attrs = {"name": "package", - "obsoletes": iter(["other"])} - dist = Distribution(attrs) - self.assertIsInstance(dist.metadata.obsoletes, list) - - def test_classifier(self): - attrs = {'name': 'Boa', 'version': '3.0', - 'classifiers': ['Programming Language :: Python :: 3']} - dist = Distribution(attrs) - self.assertEqual(dist.get_classifiers(), - ['Programming Language :: Python :: 3']) - meta = self.format_metadata(dist) - self.assertIn('Metadata-Version: 1.1', meta) - - def test_classifier_invalid_type(self): - attrs = {'name': 'Boa', 'version': '3.0', - 'classifiers': ('Programming Language :: Python :: 3',)} - with captured_stderr() as error: - d = Distribution(attrs) - # should have warning about passing a non-list - self.assertIn('should be a list', error.getvalue()) - # should be converted to a list - self.assertIsInstance(d.metadata.classifiers, list) - self.assertEqual(d.metadata.classifiers, - list(attrs['classifiers'])) - - def test_keywords(self): - attrs = {'name': 'Monty', 'version': '1.0', - 'keywords': ['spam', 'eggs', 'life of brian']} - dist = Distribution(attrs) - self.assertEqual(dist.get_keywords(), - ['spam', 'eggs', 'life of brian']) - - def test_keywords_invalid_type(self): - attrs = {'name': 'Monty', 'version': '1.0', - 'keywords': ('spam', 'eggs', 'life of brian')} - with captured_stderr() as error: - d = Distribution(attrs) - # should have warning about passing a non-list - self.assertIn('should be a list', error.getvalue()) - # should be converted to a list - self.assertIsInstance(d.metadata.keywords, list) - self.assertEqual(d.metadata.keywords, list(attrs['keywords'])) - - def test_platforms(self): - attrs = {'name': 'Monty', 'version': '1.0', - 'platforms': ['GNU/Linux', 'Some Evil Platform']} - dist = Distribution(attrs) - self.assertEqual(dist.get_platforms(), - ['GNU/Linux', 'Some Evil Platform']) - - def test_platforms_invalid_types(self): - attrs = {'name': 'Monty', 'version': '1.0', - 'platforms': ('GNU/Linux', 'Some Evil Platform')} - with captured_stderr() as error: - d = Distribution(attrs) - # should have warning about passing a non-list - self.assertIn('should be a list', error.getvalue()) - # should be converted to a list - self.assertIsInstance(d.metadata.platforms, list) - self.assertEqual(d.metadata.platforms, list(attrs['platforms'])) - - def test_download_url(self): - attrs = {'name': 'Boa', 'version': '3.0', - 'download_url': 'http://example.org/boa'} - dist = Distribution(attrs) - meta = self.format_metadata(dist) - self.assertIn('Metadata-Version: 1.1', meta) - - def test_long_description(self): - long_desc = textwrap.dedent("""\ - example:: - We start here - and continue here - and end here.""") - attrs = {"name": "package", - "version": "1.0", - "long_description": long_desc} - - dist = Distribution(attrs) - meta = self.format_metadata(dist) - meta = meta.replace('\n' + 8 * ' ', '\n') - self.assertIn(long_desc, meta) - - def test_custom_pydistutils(self): - # fixes #2166 - # make sure pydistutils.cfg is found - if os.name == 'posix': - user_filename = ".pydistutils.cfg" - else: - user_filename = "pydistutils.cfg" - - temp_dir = self.mkdtemp() - user_filename = os.path.join(temp_dir, user_filename) - f = open(user_filename, 'w') - try: - f.write('.') - finally: - f.close() - - try: - dist = Distribution() - - # linux-style - if sys.platform in ('linux', 'darwin'): - os.environ['HOME'] = temp_dir - files = dist.find_config_files() - self.assertIn(user_filename, files) - - # win32-style - if sys.platform == 'win32': - # home drive should be found - os.environ['USERPROFILE'] = temp_dir - files = dist.find_config_files() - self.assertIn(user_filename, files, - '%r not found in %r' % (user_filename, files)) - finally: - os.remove(user_filename) - - def test_fix_help_options(self): - help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)] - fancy_options = fix_help_options(help_tuples) - self.assertEqual(fancy_options[0], ('a', 'b', 'c')) - self.assertEqual(fancy_options[1], (1, 2, 3)) - - def test_show_help(self): - # smoke test, just makes sure some help is displayed - self.addCleanup(log.set_threshold, log._global_log.threshold) - dist = Distribution() - sys.argv = [] - dist.help = 1 - dist.script_name = 'setup.py' - with captured_stdout() as s: - dist.parse_command_line() - - output = [line for line in s.getvalue().split('\n') - if line.strip() != ''] - self.assertTrue(output) - - - def test_read_metadata(self): - attrs = {"name": "package", - "version": "1.0", - "long_description": "desc", - "description": "xxx", - "download_url": "http://example.com", - "keywords": ['one', 'two'], - "requires": ['foo']} - - dist = Distribution(attrs) - metadata = dist.metadata - - # write it then reloads it - PKG_INFO = io.StringIO() - metadata.write_pkg_file(PKG_INFO) - PKG_INFO.seek(0) - metadata.read_pkg_file(PKG_INFO) - - self.assertEqual(metadata.name, "package") - self.assertEqual(metadata.version, "1.0") - self.assertEqual(metadata.description, "xxx") - self.assertEqual(metadata.download_url, 'http://example.com') - self.assertEqual(metadata.keywords, ['one', 'two']) - self.assertEqual(metadata.platforms, ['UNKNOWN']) - self.assertEqual(metadata.obsoletes, None) - self.assertEqual(metadata.requires, ['foo']) - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DistributionTestCase)) - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(MetadataTestCase)) - return suite - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_extension.py b/Lib/distutils/tests/test_extension.py deleted file mode 100644 index 2b08930eafb10a5..000000000000000 --- a/Lib/distutils/tests/test_extension.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Tests for distutils.extension.""" -import unittest -import os -import warnings - -from test.support import run_unittest -from test.support.warnings_helper import check_warnings -from distutils.extension import read_setup_file, Extension - -class ExtensionTestCase(unittest.TestCase): - - def test_read_setup_file(self): - # trying to read a Setup file - # (sample extracted from the PyGame project) - setup = os.path.join(os.path.dirname(__file__), 'Setup.sample') - - exts = read_setup_file(setup) - names = [ext.name for ext in exts] - names.sort() - - # here are the extensions read_setup_file should have created - # out of the file - wanted = ['_arraysurfarray', '_camera', '_numericsndarray', - '_numericsurfarray', 'base', 'bufferproxy', 'cdrom', - 'color', 'constants', 'display', 'draw', 'event', - 'fastevent', 'font', 'gfxdraw', 'image', 'imageext', - 'joystick', 'key', 'mask', 'mixer', 'mixer_music', - 'mouse', 'movie', 'overlay', 'pixelarray', 'pypm', - 'rect', 'rwobject', 'scrap', 'surface', 'surflock', - 'time', 'transform'] - - self.assertEqual(names, wanted) - - def test_extension_init(self): - # the first argument, which is the name, must be a string - self.assertRaises(AssertionError, Extension, 1, []) - ext = Extension('name', []) - self.assertEqual(ext.name, 'name') - - # the second argument, which is the list of files, must - # be a list of strings - self.assertRaises(AssertionError, Extension, 'name', 'file') - self.assertRaises(AssertionError, Extension, 'name', ['file', 1]) - ext = Extension('name', ['file1', 'file2']) - self.assertEqual(ext.sources, ['file1', 'file2']) - - # others arguments have defaults - for attr in ('include_dirs', 'define_macros', 'undef_macros', - 'library_dirs', 'libraries', 'runtime_library_dirs', - 'extra_objects', 'extra_compile_args', 'extra_link_args', - 'export_symbols', 'swig_opts', 'depends'): - self.assertEqual(getattr(ext, attr), []) - - self.assertEqual(ext.language, None) - self.assertEqual(ext.optional, None) - - # if there are unknown keyword options, warn about them - with check_warnings() as w: - warnings.simplefilter('always') - ext = Extension('name', ['file1', 'file2'], chic=True) - - self.assertEqual(len(w.warnings), 1) - self.assertEqual(str(w.warnings[0].message), - "Unknown Extension options: 'chic'") - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(ExtensionTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_file_util.py b/Lib/distutils/tests/test_file_util.py deleted file mode 100644 index 551151b0143661b..000000000000000 --- a/Lib/distutils/tests/test_file_util.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Tests for distutils.file_util.""" -import unittest -import os -import errno -from unittest.mock import patch - -from distutils.file_util import move_file, copy_file -from distutils import log -from distutils.tests import support -from distutils.errors import DistutilsFileError -from test.support import run_unittest -from test.support.os_helper import unlink - - -class FileUtilTestCase(support.TempdirManager, unittest.TestCase): - - def _log(self, msg, *args): - if len(args) > 0: - self._logs.append(msg % args) - else: - self._logs.append(msg) - - def setUp(self): - super(FileUtilTestCase, self).setUp() - self._logs = [] - self.old_log = log.info - log.info = self._log - tmp_dir = self.mkdtemp() - self.source = os.path.join(tmp_dir, 'f1') - self.target = os.path.join(tmp_dir, 'f2') - self.target_dir = os.path.join(tmp_dir, 'd1') - - def tearDown(self): - log.info = self.old_log - super(FileUtilTestCase, self).tearDown() - - def test_move_file_verbosity(self): - f = open(self.source, 'w') - try: - f.write('some content') - finally: - f.close() - - move_file(self.source, self.target, verbose=0) - wanted = [] - self.assertEqual(self._logs, wanted) - - # back to original state - move_file(self.target, self.source, verbose=0) - - move_file(self.source, self.target, verbose=1) - wanted = ['moving %s -> %s' % (self.source, self.target)] - self.assertEqual(self._logs, wanted) - - # back to original state - move_file(self.target, self.source, verbose=0) - - self._logs = [] - # now the target is a dir - os.mkdir(self.target_dir) - move_file(self.source, self.target_dir, verbose=1) - wanted = ['moving %s -> %s' % (self.source, self.target_dir)] - self.assertEqual(self._logs, wanted) - - def test_move_file_exception_unpacking_rename(self): - # see issue 22182 - with patch("os.rename", side_effect=OSError("wrong", 1)), \ - self.assertRaises(DistutilsFileError): - with open(self.source, 'w') as fobj: - fobj.write('spam eggs') - move_file(self.source, self.target, verbose=0) - - def test_move_file_exception_unpacking_unlink(self): - # see issue 22182 - with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \ - patch("os.unlink", side_effect=OSError("wrong", 1)), \ - self.assertRaises(DistutilsFileError): - with open(self.source, 'w') as fobj: - fobj.write('spam eggs') - move_file(self.source, self.target, verbose=0) - - @unittest.skipUnless(hasattr(os, 'link'), 'requires os.link') - def test_copy_file_hard_link(self): - with open(self.source, 'w') as f: - f.write('some content') - # Check first that copy_file() will not fall back on copying the file - # instead of creating the hard link. - try: - os.link(self.source, self.target) - except OSError as e: - self.skipTest('os.link: %s' % e) - else: - unlink(self.target) - st = os.stat(self.source) - copy_file(self.source, self.target, link='hard') - st2 = os.stat(self.source) - st3 = os.stat(self.target) - self.assertTrue(os.path.samestat(st, st2), (st, st2)) - self.assertTrue(os.path.samestat(st2, st3), (st2, st3)) - with open(self.source, 'r') as f: - self.assertEqual(f.read(), 'some content') - - @unittest.skipUnless(hasattr(os, 'link'), 'requires os.link') - def test_copy_file_hard_link_failure(self): - # If hard linking fails, copy_file() falls back on copying file - # (some special filesystems don't support hard linking even under - # Unix, see issue #8876). - with open(self.source, 'w') as f: - f.write('some content') - st = os.stat(self.source) - with patch("os.link", side_effect=OSError(0, "linking unsupported")): - copy_file(self.source, self.target, link='hard') - st2 = os.stat(self.source) - st3 = os.stat(self.target) - self.assertTrue(os.path.samestat(st, st2), (st, st2)) - self.assertFalse(os.path.samestat(st2, st3), (st2, st3)) - for fn in (self.source, self.target): - with open(fn, 'r') as f: - self.assertEqual(f.read(), 'some content') - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(FileUtilTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_filelist.py b/Lib/distutils/tests/test_filelist.py deleted file mode 100644 index 98c97e49f80db57..000000000000000 --- a/Lib/distutils/tests/test_filelist.py +++ /dev/null @@ -1,340 +0,0 @@ -"""Tests for distutils.filelist.""" -import os -import re -import unittest -from distutils import debug -from distutils.log import WARN -from distutils.errors import DistutilsTemplateError -from distutils.filelist import glob_to_re, translate_pattern, FileList -from distutils import filelist - -from test.support import os_helper -from test.support import captured_stdout, run_unittest -from distutils.tests import support - -MANIFEST_IN = """\ -include ok -include xo -exclude xo -include foo.tmp -include buildout.cfg -global-include *.x -global-include *.txt -global-exclude *.tmp -recursive-include f *.oo -recursive-exclude global *.x -graft dir -prune dir3 -""" - - -def make_local_path(s): - """Converts '/' in a string to os.sep""" - return s.replace('/', os.sep) - - -class FileListTestCase(support.LoggingSilencer, - unittest.TestCase): - - def assertNoWarnings(self): - self.assertEqual(self.get_logs(WARN), []) - self.clear_logs() - - def assertWarnings(self): - self.assertGreater(len(self.get_logs(WARN)), 0) - self.clear_logs() - - def test_glob_to_re(self): - sep = os.sep - if os.sep == '\\': - sep = re.escape(os.sep) - - for glob, regex in ( - # simple cases - ('foo*', r'(?s:foo[^%(sep)s]*)\Z'), - ('foo?', r'(?s:foo[^%(sep)s])\Z'), - ('foo??', r'(?s:foo[^%(sep)s][^%(sep)s])\Z'), - # special cases - (r'foo\\*', r'(?s:foo\\\\[^%(sep)s]*)\Z'), - (r'foo\\\*', r'(?s:foo\\\\\\[^%(sep)s]*)\Z'), - ('foo????', r'(?s:foo[^%(sep)s][^%(sep)s][^%(sep)s][^%(sep)s])\Z'), - (r'foo\\??', r'(?s:foo\\\\[^%(sep)s][^%(sep)s])\Z')): - regex = regex % {'sep': sep} - self.assertEqual(glob_to_re(glob), regex) - - def test_process_template_line(self): - # testing all MANIFEST.in template patterns - file_list = FileList() - l = make_local_path - - # simulated file list - file_list.allfiles = ['foo.tmp', 'ok', 'xo', 'four.txt', - 'buildout.cfg', - # filelist does not filter out VCS directories, - # it's sdist that does - l('.hg/last-message.txt'), - l('global/one.txt'), - l('global/two.txt'), - l('global/files.x'), - l('global/here.tmp'), - l('f/o/f.oo'), - l('dir/graft-one'), - l('dir/dir2/graft2'), - l('dir3/ok'), - l('dir3/sub/ok.txt'), - ] - - for line in MANIFEST_IN.split('\n'): - if line.strip() == '': - continue - file_list.process_template_line(line) - - wanted = ['ok', - 'buildout.cfg', - 'four.txt', - l('.hg/last-message.txt'), - l('global/one.txt'), - l('global/two.txt'), - l('f/o/f.oo'), - l('dir/graft-one'), - l('dir/dir2/graft2'), - ] - - self.assertEqual(file_list.files, wanted) - - def test_debug_print(self): - file_list = FileList() - with captured_stdout() as stdout: - file_list.debug_print('xxx') - self.assertEqual(stdout.getvalue(), '') - - debug.DEBUG = True - try: - with captured_stdout() as stdout: - file_list.debug_print('xxx') - self.assertEqual(stdout.getvalue(), 'xxx\n') - finally: - debug.DEBUG = False - - def test_set_allfiles(self): - file_list = FileList() - files = ['a', 'b', 'c'] - file_list.set_allfiles(files) - self.assertEqual(file_list.allfiles, files) - - def test_remove_duplicates(self): - file_list = FileList() - file_list.files = ['a', 'b', 'a', 'g', 'c', 'g'] - # files must be sorted beforehand (sdist does it) - file_list.sort() - file_list.remove_duplicates() - self.assertEqual(file_list.files, ['a', 'b', 'c', 'g']) - - def test_translate_pattern(self): - # not regex - self.assertTrue(hasattr( - translate_pattern('a', anchor=True, is_regex=False), - 'search')) - - # is a regex - regex = re.compile('a') - self.assertEqual( - translate_pattern(regex, anchor=True, is_regex=True), - regex) - - # plain string flagged as regex - self.assertTrue(hasattr( - translate_pattern('a', anchor=True, is_regex=True), - 'search')) - - # glob support - self.assertTrue(translate_pattern( - '*.py', anchor=True, is_regex=False).search('filelist.py')) - - def test_exclude_pattern(self): - # return False if no match - file_list = FileList() - self.assertFalse(file_list.exclude_pattern('*.py')) - - # return True if files match - file_list = FileList() - file_list.files = ['a.py', 'b.py'] - self.assertTrue(file_list.exclude_pattern('*.py')) - - # test excludes - file_list = FileList() - file_list.files = ['a.py', 'a.txt'] - file_list.exclude_pattern('*.py') - self.assertEqual(file_list.files, ['a.txt']) - - def test_include_pattern(self): - # return False if no match - file_list = FileList() - file_list.set_allfiles([]) - self.assertFalse(file_list.include_pattern('*.py')) - - # return True if files match - file_list = FileList() - file_list.set_allfiles(['a.py', 'b.txt']) - self.assertTrue(file_list.include_pattern('*.py')) - - # test * matches all files - file_list = FileList() - self.assertIsNone(file_list.allfiles) - file_list.set_allfiles(['a.py', 'b.txt']) - file_list.include_pattern('*') - self.assertEqual(file_list.allfiles, ['a.py', 'b.txt']) - - def test_process_template(self): - l = make_local_path - # invalid lines - file_list = FileList() - for action in ('include', 'exclude', 'global-include', - 'global-exclude', 'recursive-include', - 'recursive-exclude', 'graft', 'prune', 'blarg'): - self.assertRaises(DistutilsTemplateError, - file_list.process_template_line, action) - - # include - file_list = FileList() - file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')]) - - file_list.process_template_line('include *.py') - self.assertEqual(file_list.files, ['a.py']) - self.assertNoWarnings() - - file_list.process_template_line('include *.rb') - self.assertEqual(file_list.files, ['a.py']) - self.assertWarnings() - - # exclude - file_list = FileList() - file_list.files = ['a.py', 'b.txt', l('d/c.py')] - - file_list.process_template_line('exclude *.py') - self.assertEqual(file_list.files, ['b.txt', l('d/c.py')]) - self.assertNoWarnings() - - file_list.process_template_line('exclude *.rb') - self.assertEqual(file_list.files, ['b.txt', l('d/c.py')]) - self.assertWarnings() - - # global-include - file_list = FileList() - file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')]) - - file_list.process_template_line('global-include *.py') - self.assertEqual(file_list.files, ['a.py', l('d/c.py')]) - self.assertNoWarnings() - - file_list.process_template_line('global-include *.rb') - self.assertEqual(file_list.files, ['a.py', l('d/c.py')]) - self.assertWarnings() - - # global-exclude - file_list = FileList() - file_list.files = ['a.py', 'b.txt', l('d/c.py')] - - file_list.process_template_line('global-exclude *.py') - self.assertEqual(file_list.files, ['b.txt']) - self.assertNoWarnings() - - file_list.process_template_line('global-exclude *.rb') - self.assertEqual(file_list.files, ['b.txt']) - self.assertWarnings() - - # recursive-include - file_list = FileList() - file_list.set_allfiles(['a.py', l('d/b.py'), l('d/c.txt'), - l('d/d/e.py')]) - - file_list.process_template_line('recursive-include d *.py') - self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')]) - self.assertNoWarnings() - - file_list.process_template_line('recursive-include e *.py') - self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')]) - self.assertWarnings() - - # recursive-exclude - file_list = FileList() - file_list.files = ['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')] - - file_list.process_template_line('recursive-exclude d *.py') - self.assertEqual(file_list.files, ['a.py', l('d/c.txt')]) - self.assertNoWarnings() - - file_list.process_template_line('recursive-exclude e *.py') - self.assertEqual(file_list.files, ['a.py', l('d/c.txt')]) - self.assertWarnings() - - # graft - file_list = FileList() - file_list.set_allfiles(['a.py', l('d/b.py'), l('d/d/e.py'), - l('f/f.py')]) - - file_list.process_template_line('graft d') - self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')]) - self.assertNoWarnings() - - file_list.process_template_line('graft e') - self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')]) - self.assertWarnings() - - # prune - file_list = FileList() - file_list.files = ['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')] - - file_list.process_template_line('prune d') - self.assertEqual(file_list.files, ['a.py', l('f/f.py')]) - self.assertNoWarnings() - - file_list.process_template_line('prune e') - self.assertEqual(file_list.files, ['a.py', l('f/f.py')]) - self.assertWarnings() - - -class FindAllTestCase(unittest.TestCase): - @os_helper.skip_unless_symlink - def test_missing_symlink(self): - with os_helper.temp_cwd(): - os.symlink('foo', 'bar') - self.assertEqual(filelist.findall(), []) - - def test_basic_discovery(self): - """ - When findall is called with no parameters or with - '.' as the parameter, the dot should be omitted from - the results. - """ - with os_helper.temp_cwd(): - os.mkdir('foo') - file1 = os.path.join('foo', 'file1.txt') - os_helper.create_empty_file(file1) - os.mkdir('bar') - file2 = os.path.join('bar', 'file2.txt') - os_helper.create_empty_file(file2) - expected = [file2, file1] - self.assertEqual(sorted(filelist.findall()), expected) - - def test_non_local_discovery(self): - """ - When findall is called with another path, the full - path name should be returned. - """ - with os_helper.temp_dir() as temp_dir: - file1 = os.path.join(temp_dir, 'file1.txt') - os_helper.create_empty_file(file1) - expected = [file1] - self.assertEqual(filelist.findall(temp_dir), expected) - - -def test_suite(): - return unittest.TestSuite([ - unittest.TestLoader().loadTestsFromTestCase(FileListTestCase), - unittest.TestLoader().loadTestsFromTestCase(FindAllTestCase), - ]) - - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_install.py b/Lib/distutils/tests/test_install.py deleted file mode 100644 index c38f98b8b2c2949..000000000000000 --- a/Lib/distutils/tests/test_install.py +++ /dev/null @@ -1,261 +0,0 @@ -"""Tests for distutils.command.install.""" - -import os -import sys -import unittest -import site - -from test.support import captured_stdout, run_unittest, requires_subprocess - -from distutils import sysconfig -from distutils.command.install import install, HAS_USER_SITE -from distutils.command import install as install_module -from distutils.command.build_ext import build_ext -from distutils.command.install import INSTALL_SCHEMES -from distutils.core import Distribution -from distutils.errors import DistutilsOptionError -from distutils.extension import Extension - -from distutils.tests import support -from test import support as test_support - - -def _make_ext_name(modname): - return modname + sysconfig.get_config_var('EXT_SUFFIX') - - -class InstallTestCase(support.TempdirManager, - support.EnvironGuard, - support.LoggingSilencer, - unittest.TestCase): - - def setUp(self): - super().setUp() - self._backup_config_vars = dict(sysconfig._config_vars) - - def tearDown(self): - super().tearDown() - sysconfig._config_vars.clear() - sysconfig._config_vars.update(self._backup_config_vars) - - def test_home_installation_scheme(self): - # This ensure two things: - # - that --home generates the desired set of directory names - # - test --home is supported on all platforms - builddir = self.mkdtemp() - destination = os.path.join(builddir, "installation") - - dist = Distribution({"name": "foopkg"}) - # script_name need not exist, it just need to be initialized - dist.script_name = os.path.join(builddir, "setup.py") - dist.command_obj["build"] = support.DummyCommand( - build_base=builddir, - build_lib=os.path.join(builddir, "lib"), - ) - - cmd = install(dist) - cmd.home = destination - cmd.ensure_finalized() - - self.assertEqual(cmd.install_base, destination) - self.assertEqual(cmd.install_platbase, destination) - - def check_path(got, expected): - got = os.path.normpath(got) - expected = os.path.normpath(expected) - self.assertEqual(got, expected) - - libdir = os.path.join(destination, "lib", "python") - check_path(cmd.install_lib, libdir) - platlibdir = os.path.join(destination, sys.platlibdir, "python") - check_path(cmd.install_platlib, platlibdir) - check_path(cmd.install_purelib, libdir) - check_path(cmd.install_headers, - os.path.join(destination, "include", "python", "foopkg")) - check_path(cmd.install_scripts, os.path.join(destination, "bin")) - check_path(cmd.install_data, destination) - - @unittest.skipUnless(HAS_USER_SITE, 'need user site') - def test_user_site(self): - # test install with --user - # preparing the environment for the test - self.old_user_base = site.USER_BASE - self.old_user_site = site.USER_SITE - self.tmpdir = self.mkdtemp() - self.user_base = os.path.join(self.tmpdir, 'B') - self.user_site = os.path.join(self.tmpdir, 'S') - site.USER_BASE = self.user_base - site.USER_SITE = self.user_site - install_module.USER_BASE = self.user_base - install_module.USER_SITE = self.user_site - - def _expanduser(path): - return self.tmpdir - self.old_expand = os.path.expanduser - os.path.expanduser = _expanduser - - def cleanup(): - site.USER_BASE = self.old_user_base - site.USER_SITE = self.old_user_site - install_module.USER_BASE = self.old_user_base - install_module.USER_SITE = self.old_user_site - os.path.expanduser = self.old_expand - - self.addCleanup(cleanup) - - if HAS_USER_SITE: - for key in ('nt_user', 'unix_user'): - self.assertIn(key, INSTALL_SCHEMES) - - dist = Distribution({'name': 'xx'}) - cmd = install(dist) - - # making sure the user option is there - options = [name for name, short, lable in - cmd.user_options] - self.assertIn('user', options) - - # setting a value - cmd.user = 1 - - # user base and site shouldn't be created yet - self.assertFalse(os.path.exists(self.user_base)) - self.assertFalse(os.path.exists(self.user_site)) - - # let's run finalize - cmd.ensure_finalized() - - # now they should - self.assertTrue(os.path.exists(self.user_base)) - self.assertTrue(os.path.exists(self.user_site)) - - self.assertIn('userbase', cmd.config_vars) - self.assertIn('usersite', cmd.config_vars) - - def test_handle_extra_path(self): - dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'}) - cmd = install(dist) - - # two elements - cmd.handle_extra_path() - self.assertEqual(cmd.extra_path, ['path', 'dirs']) - self.assertEqual(cmd.extra_dirs, 'dirs') - self.assertEqual(cmd.path_file, 'path') - - # one element - cmd.extra_path = ['path'] - cmd.handle_extra_path() - self.assertEqual(cmd.extra_path, ['path']) - self.assertEqual(cmd.extra_dirs, 'path') - self.assertEqual(cmd.path_file, 'path') - - # none - dist.extra_path = cmd.extra_path = None - cmd.handle_extra_path() - self.assertEqual(cmd.extra_path, None) - self.assertEqual(cmd.extra_dirs, '') - self.assertEqual(cmd.path_file, None) - - # three elements (no way !) - cmd.extra_path = 'path,dirs,again' - self.assertRaises(DistutilsOptionError, cmd.handle_extra_path) - - def test_finalize_options(self): - dist = Distribution({'name': 'xx'}) - cmd = install(dist) - - # must supply either prefix/exec-prefix/home or - # install-base/install-platbase -- not both - cmd.prefix = 'prefix' - cmd.install_base = 'base' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - - # must supply either home or prefix/exec-prefix -- not both - cmd.install_base = None - cmd.home = 'home' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - - # can't combine user with prefix/exec_prefix/home or - # install_(plat)base - cmd.prefix = None - cmd.user = 'user' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - - def test_record(self): - install_dir = self.mkdtemp() - project_dir, dist = self.create_dist(py_modules=['hello'], - scripts=['sayhi']) - os.chdir(project_dir) - self.write_file('hello.py', "def main(): print('o hai')") - self.write_file('sayhi', 'from hello import main; main()') - - cmd = install(dist) - dist.command_obj['install'] = cmd - cmd.root = install_dir - cmd.record = os.path.join(project_dir, 'filelist') - cmd.ensure_finalized() - cmd.run() - - f = open(cmd.record) - try: - content = f.read() - finally: - f.close() - - found = [os.path.basename(line) for line in content.splitlines()] - expected = ['hello.py', 'hello.%s.pyc' % sys.implementation.cache_tag, - 'sayhi', - 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]] - self.assertEqual(found, expected) - - @requires_subprocess() - def test_record_extensions(self): - cmd = test_support.missing_compiler_executable() - if cmd is not None: - self.skipTest('The %r command is not found' % cmd) - install_dir = self.mkdtemp() - project_dir, dist = self.create_dist(ext_modules=[ - Extension('xx', ['xxmodule.c'])]) - os.chdir(project_dir) - support.copy_xxmodule_c(project_dir) - - buildextcmd = build_ext(dist) - support.fixup_build_ext(buildextcmd) - buildextcmd.ensure_finalized() - - cmd = install(dist) - dist.command_obj['install'] = cmd - dist.command_obj['build_ext'] = buildextcmd - cmd.root = install_dir - cmd.record = os.path.join(project_dir, 'filelist') - cmd.ensure_finalized() - cmd.run() - - f = open(cmd.record) - try: - content = f.read() - finally: - f.close() - - found = [os.path.basename(line) for line in content.splitlines()] - expected = [_make_ext_name('xx'), - 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]] - self.assertEqual(found, expected) - - def test_debug_mode(self): - # this covers the code called when DEBUG is set - old_logs_len = len(self.logs) - install_module.DEBUG = True - try: - with captured_stdout(): - self.test_record() - finally: - install_module.DEBUG = False - self.assertGreater(len(self.logs), old_logs_len) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(InstallTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_install_data.py b/Lib/distutils/tests/test_install_data.py deleted file mode 100644 index 6191d2fa6eefab3..000000000000000 --- a/Lib/distutils/tests/test_install_data.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Tests for distutils.command.install_data.""" -import os -import unittest - -from distutils.command.install_data import install_data -from distutils.tests import support -from test.support import run_unittest - -class InstallDataTestCase(support.TempdirManager, - support.LoggingSilencer, - support.EnvironGuard, - unittest.TestCase): - - def test_simple_run(self): - pkg_dir, dist = self.create_dist() - cmd = install_data(dist) - cmd.install_dir = inst = os.path.join(pkg_dir, 'inst') - - # data_files can contain - # - simple files - # - a tuple with a path, and a list of file - one = os.path.join(pkg_dir, 'one') - self.write_file(one, 'xxx') - inst2 = os.path.join(pkg_dir, 'inst2') - two = os.path.join(pkg_dir, 'two') - self.write_file(two, 'xxx') - - cmd.data_files = [one, (inst2, [two])] - self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])]) - - # let's run the command - cmd.ensure_finalized() - cmd.run() - - # let's check the result - self.assertEqual(len(cmd.get_outputs()), 2) - rtwo = os.path.split(two)[-1] - self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) - rone = os.path.split(one)[-1] - self.assertTrue(os.path.exists(os.path.join(inst, rone))) - cmd.outfiles = [] - - # let's try with warn_dir one - cmd.warn_dir = 1 - cmd.ensure_finalized() - cmd.run() - - # let's check the result - self.assertEqual(len(cmd.get_outputs()), 2) - self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) - self.assertTrue(os.path.exists(os.path.join(inst, rone))) - cmd.outfiles = [] - - # now using root and empty dir - cmd.root = os.path.join(pkg_dir, 'root') - inst3 = os.path.join(cmd.install_dir, 'inst3') - inst4 = os.path.join(pkg_dir, 'inst4') - three = os.path.join(cmd.install_dir, 'three') - self.write_file(three, 'xx') - cmd.data_files = [one, (inst2, [two]), - ('inst3', [three]), - (inst4, [])] - cmd.ensure_finalized() - cmd.run() - - # let's check the result - self.assertEqual(len(cmd.get_outputs()), 4) - self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) - self.assertTrue(os.path.exists(os.path.join(inst, rone))) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(InstallDataTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_install_headers.py b/Lib/distutils/tests/test_install_headers.py deleted file mode 100644 index 1aa4d09cdef731c..000000000000000 --- a/Lib/distutils/tests/test_install_headers.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Tests for distutils.command.install_headers.""" -import os -import unittest - -from distutils.command.install_headers import install_headers -from distutils.tests import support -from test.support import run_unittest - -class InstallHeadersTestCase(support.TempdirManager, - support.LoggingSilencer, - support.EnvironGuard, - unittest.TestCase): - - def test_simple_run(self): - # we have two headers - header_list = self.mkdtemp() - header1 = os.path.join(header_list, 'header1') - header2 = os.path.join(header_list, 'header2') - self.write_file(header1) - self.write_file(header2) - headers = [header1, header2] - - pkg_dir, dist = self.create_dist(headers=headers) - cmd = install_headers(dist) - self.assertEqual(cmd.get_inputs(), headers) - - # let's run the command - cmd.install_dir = os.path.join(pkg_dir, 'inst') - cmd.ensure_finalized() - cmd.run() - - # let's check the results - self.assertEqual(len(cmd.get_outputs()), 2) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(InstallHeadersTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_install_lib.py b/Lib/distutils/tests/test_install_lib.py deleted file mode 100644 index f840d1a94665ed5..000000000000000 --- a/Lib/distutils/tests/test_install_lib.py +++ /dev/null @@ -1,117 +0,0 @@ -"""Tests for distutils.command.install_data.""" -import sys -import os -import importlib.util -import unittest - -from distutils.command.install_lib import install_lib -from distutils.extension import Extension -from distutils.tests import support -from distutils.errors import DistutilsOptionError -from test.support import run_unittest, requires_subprocess - - -class InstallLibTestCase(support.TempdirManager, - support.LoggingSilencer, - support.EnvironGuard, - unittest.TestCase): - - def test_finalize_options(self): - dist = self.create_dist()[1] - cmd = install_lib(dist) - - cmd.finalize_options() - self.assertEqual(cmd.compile, 1) - self.assertEqual(cmd.optimize, 0) - - # optimize must be 0, 1, or 2 - cmd.optimize = 'foo' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - cmd.optimize = '4' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - - cmd.optimize = '2' - cmd.finalize_options() - self.assertEqual(cmd.optimize, 2) - - @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled') - @requires_subprocess() - def test_byte_compile(self): - project_dir, dist = self.create_dist() - os.chdir(project_dir) - cmd = install_lib(dist) - cmd.compile = cmd.optimize = 1 - - f = os.path.join(project_dir, 'foo.py') - self.write_file(f, '# python file') - cmd.byte_compile([f]) - pyc_file = importlib.util.cache_from_source('foo.py', optimization='') - pyc_opt_file = importlib.util.cache_from_source('foo.py', - optimization=cmd.optimize) - self.assertTrue(os.path.exists(pyc_file)) - self.assertTrue(os.path.exists(pyc_opt_file)) - - def test_get_outputs(self): - project_dir, dist = self.create_dist() - os.chdir(project_dir) - os.mkdir('spam') - cmd = install_lib(dist) - - # setting up a dist environment - cmd.compile = cmd.optimize = 1 - cmd.install_dir = self.mkdtemp() - f = os.path.join(project_dir, 'spam', '__init__.py') - self.write_file(f, '# python package') - cmd.distribution.ext_modules = [Extension('foo', ['xxx'])] - cmd.distribution.packages = ['spam'] - cmd.distribution.script_name = 'setup.py' - - # get_outputs should return 4 elements: spam/__init__.py and .pyc, - # foo.import-tag-abiflags.so / foo.pyd - outputs = cmd.get_outputs() - self.assertEqual(len(outputs), 4, outputs) - - def test_get_inputs(self): - project_dir, dist = self.create_dist() - os.chdir(project_dir) - os.mkdir('spam') - cmd = install_lib(dist) - - # setting up a dist environment - cmd.compile = cmd.optimize = 1 - cmd.install_dir = self.mkdtemp() - f = os.path.join(project_dir, 'spam', '__init__.py') - self.write_file(f, '# python package') - cmd.distribution.ext_modules = [Extension('foo', ['xxx'])] - cmd.distribution.packages = ['spam'] - cmd.distribution.script_name = 'setup.py' - - # get_inputs should return 2 elements: spam/__init__.py and - # foo.import-tag-abiflags.so / foo.pyd - inputs = cmd.get_inputs() - self.assertEqual(len(inputs), 2, inputs) - - @requires_subprocess() - def test_dont_write_bytecode(self): - # makes sure byte_compile is not used - dist = self.create_dist()[1] - cmd = install_lib(dist) - cmd.compile = 1 - cmd.optimize = 1 - - old_dont_write_bytecode = sys.dont_write_bytecode - sys.dont_write_bytecode = True - try: - cmd.byte_compile([]) - finally: - sys.dont_write_bytecode = old_dont_write_bytecode - - self.assertIn('byte-compiling is disabled', - self.logs[0][1] % self.logs[0][2]) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(InstallLibTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_install_scripts.py b/Lib/distutils/tests/test_install_scripts.py deleted file mode 100644 index 648db3b11da7456..000000000000000 --- a/Lib/distutils/tests/test_install_scripts.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Tests for distutils.command.install_scripts.""" - -import os -import unittest - -from distutils.command.install_scripts import install_scripts -from distutils.core import Distribution - -from distutils.tests import support -from test.support import run_unittest - - -class InstallScriptsTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - def test_default_settings(self): - dist = Distribution() - dist.command_obj["build"] = support.DummyCommand( - build_scripts="/foo/bar") - dist.command_obj["install"] = support.DummyCommand( - install_scripts="/splat/funk", - force=1, - skip_build=1, - ) - cmd = install_scripts(dist) - self.assertFalse(cmd.force) - self.assertFalse(cmd.skip_build) - self.assertIsNone(cmd.build_dir) - self.assertIsNone(cmd.install_dir) - - cmd.finalize_options() - - self.assertTrue(cmd.force) - self.assertTrue(cmd.skip_build) - self.assertEqual(cmd.build_dir, "/foo/bar") - self.assertEqual(cmd.install_dir, "/splat/funk") - - def test_installation(self): - source = self.mkdtemp() - expected = [] - - def write_script(name, text): - expected.append(name) - f = open(os.path.join(source, name), "w") - try: - f.write(text) - finally: - f.close() - - write_script("script1.py", ("#! /usr/bin/env python2.3\n" - "# bogus script w/ Python sh-bang\n" - "pass\n")) - write_script("script2.py", ("#!/usr/bin/python\n" - "# bogus script w/ Python sh-bang\n" - "pass\n")) - write_script("shell.sh", ("#!/bin/sh\n" - "# bogus shell script w/ sh-bang\n" - "exit 0\n")) - - target = self.mkdtemp() - dist = Distribution() - dist.command_obj["build"] = support.DummyCommand(build_scripts=source) - dist.command_obj["install"] = support.DummyCommand( - install_scripts=target, - force=1, - skip_build=1, - ) - cmd = install_scripts(dist) - cmd.finalize_options() - cmd.run() - - installed = os.listdir(target) - for name in expected: - self.assertIn(name, installed) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(InstallScriptsTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_log.py b/Lib/distutils/tests/test_log.py deleted file mode 100644 index ec2ae028de87774..000000000000000 --- a/Lib/distutils/tests/test_log.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Tests for distutils.log""" - -import io -import sys -import unittest -from test.support import swap_attr, run_unittest - -from distutils import log - -class TestLog(unittest.TestCase): - def test_non_ascii(self): - # Issues #8663, #34421: test that non-encodable text is escaped with - # backslashreplace error handler and encodable non-ASCII text is - # output as is. - for errors in ('strict', 'backslashreplace', 'surrogateescape', - 'replace', 'ignore'): - with self.subTest(errors=errors): - stdout = io.TextIOWrapper(io.BytesIO(), - encoding='cp437', errors=errors) - stderr = io.TextIOWrapper(io.BytesIO(), - encoding='cp437', errors=errors) - old_threshold = log.set_threshold(log.DEBUG) - try: - with swap_attr(sys, 'stdout', stdout), \ - swap_attr(sys, 'stderr', stderr): - log.debug('Dεbug\tMÄ—ssãge') - log.fatal('Fαtal\tÈrrÅr') - finally: - log.set_threshold(old_threshold) - - stdout.seek(0) - self.assertEqual(stdout.read().rstrip(), - 'Dεbug\tM?ss?ge' if errors == 'replace' else - 'Dεbug\tMssge' if errors == 'ignore' else - 'Dεbug\tM\\u0117ss\\xe3ge') - stderr.seek(0) - self.assertEqual(stderr.read().rstrip(), - 'Fαtal\t?rr?r' if errors == 'replace' else - 'Fαtal\trrr' if errors == 'ignore' else - 'Fαtal\t\\xc8rr\\u014dr') - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(TestLog) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_msvc9compiler.py b/Lib/distutils/tests/test_msvc9compiler.py deleted file mode 100644 index 6235405e31201b2..000000000000000 --- a/Lib/distutils/tests/test_msvc9compiler.py +++ /dev/null @@ -1,184 +0,0 @@ -"""Tests for distutils.msvc9compiler.""" -import sys -import unittest -import os - -from distutils.errors import DistutilsPlatformError -from distutils.tests import support -from test.support import run_unittest - -# A manifest with the only assembly reference being the msvcrt assembly, so -# should have the assembly completely stripped. Note that although the -# assembly has a reference the assembly is removed - that is -# currently a "feature", not a bug :) -_MANIFEST_WITH_ONLY_MSVC_REFERENCE = """\ - - - - - - - - - - - - - - - - - -""" - -# A manifest with references to assemblies other than msvcrt. When processed, -# this assembly should be returned with just the msvcrt part removed. -_MANIFEST_WITH_MULTIPLE_REFERENCES = """\ - - - - - - - - - - - - - - - - - - - - - - -""" - -_CLEANED_MANIFEST = """\ - - - - - - - - - - - - - - - - - - -""" - -if sys.platform=="win32": - from distutils.msvccompiler import get_build_version - if get_build_version()>=8.0: - SKIP_MESSAGE = None - else: - SKIP_MESSAGE = "These tests are only for MSVC8.0 or above" -else: - SKIP_MESSAGE = "These tests are only for win32" - -@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE) -class msvc9compilerTestCase(support.TempdirManager, - unittest.TestCase): - - def test_no_compiler(self): - # makes sure query_vcvarsall raises - # a DistutilsPlatformError if the compiler - # is not found - from distutils.msvc9compiler import query_vcvarsall - def _find_vcvarsall(version): - return None - - from distutils import msvc9compiler - old_find_vcvarsall = msvc9compiler.find_vcvarsall - msvc9compiler.find_vcvarsall = _find_vcvarsall - try: - self.assertRaises(DistutilsPlatformError, query_vcvarsall, - 'wont find this version') - finally: - msvc9compiler.find_vcvarsall = old_find_vcvarsall - - def test_reg_class(self): - from distutils.msvc9compiler import Reg - self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') - - # looking for values that should exist on all - # windows registry versions. - path = r'Control Panel\Desktop' - v = Reg.get_value(path, 'dragfullwindows') - self.assertIn(v, ('0', '1', '2')) - - import winreg - HKCU = winreg.HKEY_CURRENT_USER - keys = Reg.read_keys(HKCU, 'xxxx') - self.assertEqual(keys, None) - - keys = Reg.read_keys(HKCU, r'Control Panel') - self.assertIn('Desktop', keys) - - def test_remove_visual_c_ref(self): - from distutils.msvc9compiler import MSVCCompiler - tempdir = self.mkdtemp() - manifest = os.path.join(tempdir, 'manifest') - f = open(manifest, 'w') - try: - f.write(_MANIFEST_WITH_MULTIPLE_REFERENCES) - finally: - f.close() - - compiler = MSVCCompiler() - compiler._remove_visual_c_ref(manifest) - - # see what we got - f = open(manifest) - try: - # removing trailing spaces - content = '\n'.join([line.rstrip() for line in f.readlines()]) - finally: - f.close() - - # makes sure the manifest was properly cleaned - self.assertEqual(content, _CLEANED_MANIFEST) - - def test_remove_entire_manifest(self): - from distutils.msvc9compiler import MSVCCompiler - tempdir = self.mkdtemp() - manifest = os.path.join(tempdir, 'manifest') - f = open(manifest, 'w') - try: - f.write(_MANIFEST_WITH_ONLY_MSVC_REFERENCE) - finally: - f.close() - - compiler = MSVCCompiler() - got = compiler._remove_visual_c_ref(manifest) - self.assertIsNone(got) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(msvc9compilerTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_msvccompiler.py b/Lib/distutils/tests/test_msvccompiler.py deleted file mode 100644 index dd67c3eb6d519d0..000000000000000 --- a/Lib/distutils/tests/test_msvccompiler.py +++ /dev/null @@ -1,81 +0,0 @@ -"""Tests for distutils._msvccompiler.""" -import sys -import unittest -import os - -from distutils.errors import DistutilsPlatformError -from distutils.tests import support -from test.support import run_unittest - - -SKIP_MESSAGE = (None if sys.platform == "win32" else - "These tests are only for win32") - -@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE) -class msvccompilerTestCase(support.TempdirManager, - unittest.TestCase): - - def test_no_compiler(self): - import distutils._msvccompiler as _msvccompiler - # makes sure query_vcvarsall raises - # a DistutilsPlatformError if the compiler - # is not found - def _find_vcvarsall(plat_spec): - return None, None - - old_find_vcvarsall = _msvccompiler._find_vcvarsall - _msvccompiler._find_vcvarsall = _find_vcvarsall - try: - self.assertRaises(DistutilsPlatformError, - _msvccompiler._get_vc_env, - 'wont find this version') - finally: - _msvccompiler._find_vcvarsall = old_find_vcvarsall - - def test_get_vc_env_unicode(self): - import distutils._msvccompiler as _msvccompiler - - test_var = 'ṰḖṤṪ┅ṼẨṜ' - test_value = '₃â´â‚…' - - # Ensure we don't early exit from _get_vc_env - old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None) - os.environ[test_var] = test_value - try: - env = _msvccompiler._get_vc_env('x86') - self.assertIn(test_var.lower(), env) - self.assertEqual(test_value, env[test_var.lower()]) - finally: - os.environ.pop(test_var) - if old_distutils_use_sdk: - os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk - - def test_get_vc2017(self): - import distutils._msvccompiler as _msvccompiler - - # This function cannot be mocked, so pass it if we find VS 2017 - # and mark it skipped if we do not. - version, path = _msvccompiler._find_vc2017() - if version: - self.assertGreaterEqual(version, 15) - self.assertTrue(os.path.isdir(path)) - else: - raise unittest.SkipTest("VS 2017 is not installed") - - def test_get_vc2015(self): - import distutils._msvccompiler as _msvccompiler - - # This function cannot be mocked, so pass it if we find VS 2015 - # and mark it skipped if we do not. - version, path = _msvccompiler._find_vc2015() - if version: - self.assertGreaterEqual(version, 14) - self.assertTrue(os.path.isdir(path)) - else: - raise unittest.SkipTest("VS 2015 is not installed") - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(msvccompilerTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_register.py b/Lib/distutils/tests/test_register.py deleted file mode 100644 index 7805c6d3c9f7b9c..000000000000000 --- a/Lib/distutils/tests/test_register.py +++ /dev/null @@ -1,324 +0,0 @@ -"""Tests for distutils.command.register.""" -import os -import unittest -import getpass -import urllib -import warnings - -from test.support import run_unittest -from test.support.warnings_helper import check_warnings - -from distutils.command import register as register_module -from distutils.command.register import register -from distutils.errors import DistutilsSetupError -from distutils.log import INFO - -from distutils.tests.test_config import BasePyPIRCCommandTestCase - -try: - import docutils -except ImportError: - docutils = None - -PYPIRC_NOPASSWORD = """\ -[distutils] - -index-servers = - server1 - -[server1] -username:me -""" - -WANTED_PYPIRC = """\ -[distutils] -index-servers = - pypi - -[pypi] -username:tarek -password:password -""" - -class Inputs(object): - """Fakes user inputs.""" - def __init__(self, *answers): - self.answers = answers - self.index = 0 - - def __call__(self, prompt=''): - try: - return self.answers[self.index] - finally: - self.index += 1 - -class FakeOpener(object): - """Fakes a PyPI server""" - def __init__(self): - self.reqs = [] - - def __call__(self, *args): - return self - - def open(self, req, data=None, timeout=None): - self.reqs.append(req) - return self - - def read(self): - return b'xxx' - - def getheader(self, name, default=None): - return { - 'content-type': 'text/plain; charset=utf-8', - }.get(name.lower(), default) - - -class RegisterTestCase(BasePyPIRCCommandTestCase): - - def setUp(self): - super(RegisterTestCase, self).setUp() - # patching the password prompt - self._old_getpass = getpass.getpass - def _getpass(prompt): - return 'password' - getpass.getpass = _getpass - urllib.request._opener = None - self.old_opener = urllib.request.build_opener - self.conn = urllib.request.build_opener = FakeOpener() - - def tearDown(self): - getpass.getpass = self._old_getpass - urllib.request._opener = None - urllib.request.build_opener = self.old_opener - super(RegisterTestCase, self).tearDown() - - def _get_cmd(self, metadata=None): - if metadata is None: - metadata = {'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx', - 'name': 'xxx', 'version': 'xxx'} - pkg_info, dist = self.create_dist(**metadata) - return register(dist) - - def test_create_pypirc(self): - # this test makes sure a .pypirc file - # is created when requested. - - # let's create a register instance - cmd = self._get_cmd() - - # we shouldn't have a .pypirc file yet - self.assertFalse(os.path.exists(self.rc)) - - # patching input and getpass.getpass - # so register gets happy - # - # Here's what we are faking : - # use your existing login (choice 1.) - # Username : 'tarek' - # Password : 'password' - # Save your login (y/N)? : 'y' - inputs = Inputs('1', 'tarek', 'y') - register_module.input = inputs.__call__ - # let's run the command - try: - cmd.run() - finally: - del register_module.input - - # we should have a brand new .pypirc file - self.assertTrue(os.path.exists(self.rc)) - - # with the content similar to WANTED_PYPIRC - f = open(self.rc) - try: - content = f.read() - self.assertEqual(content, WANTED_PYPIRC) - finally: - f.close() - - # now let's make sure the .pypirc file generated - # really works : we shouldn't be asked anything - # if we run the command again - def _no_way(prompt=''): - raise AssertionError(prompt) - register_module.input = _no_way - - cmd.show_response = 1 - cmd.run() - - # let's see what the server received : we should - # have 2 similar requests - self.assertEqual(len(self.conn.reqs), 2) - req1 = dict(self.conn.reqs[0].headers) - req2 = dict(self.conn.reqs[1].headers) - - self.assertEqual(req1['Content-length'], '1374') - self.assertEqual(req2['Content-length'], '1374') - self.assertIn(b'xxx', self.conn.reqs[1].data) - - def test_password_not_in_file(self): - - self.write_file(self.rc, PYPIRC_NOPASSWORD) - cmd = self._get_cmd() - cmd._set_config() - cmd.finalize_options() - cmd.send_metadata() - - # dist.password should be set - # therefore used afterwards by other commands - self.assertEqual(cmd.distribution.password, 'password') - - def test_registering(self): - # this test runs choice 2 - cmd = self._get_cmd() - inputs = Inputs('2', 'tarek', 'tarek@ziade.org') - register_module.input = inputs.__call__ - try: - # let's run the command - cmd.run() - finally: - del register_module.input - - # we should have send a request - self.assertEqual(len(self.conn.reqs), 1) - req = self.conn.reqs[0] - headers = dict(req.headers) - self.assertEqual(headers['Content-length'], '608') - self.assertIn(b'tarek', req.data) - - def test_password_reset(self): - # this test runs choice 3 - cmd = self._get_cmd() - inputs = Inputs('3', 'tarek@ziade.org') - register_module.input = inputs.__call__ - try: - # let's run the command - cmd.run() - finally: - del register_module.input - - # we should have send a request - self.assertEqual(len(self.conn.reqs), 1) - req = self.conn.reqs[0] - headers = dict(req.headers) - self.assertEqual(headers['Content-length'], '290') - self.assertIn(b'tarek', req.data) - - @unittest.skipUnless(docutils is not None, 'needs docutils') - def test_strict(self): - # testing the script option - # when on, the register command stops if - # the metadata is incomplete or if - # long_description is not reSt compliant - - # empty metadata - cmd = self._get_cmd({}) - cmd.ensure_finalized() - cmd.strict = 1 - self.assertRaises(DistutilsSetupError, cmd.run) - - # metadata are OK but long_description is broken - metadata = {'url': 'xxx', 'author': 'xxx', - 'author_email': 'éxéxé', - 'name': 'xxx', 'version': 'xxx', - 'long_description': 'title\n==\n\ntext'} - - cmd = self._get_cmd(metadata) - cmd.ensure_finalized() - cmd.strict = 1 - self.assertRaises(DistutilsSetupError, cmd.run) - - # now something that works - metadata['long_description'] = 'title\n=====\n\ntext' - cmd = self._get_cmd(metadata) - cmd.ensure_finalized() - cmd.strict = 1 - inputs = Inputs('1', 'tarek', 'y') - register_module.input = inputs.__call__ - # let's run the command - try: - cmd.run() - finally: - del register_module.input - - # strict is not by default - cmd = self._get_cmd() - cmd.ensure_finalized() - inputs = Inputs('1', 'tarek', 'y') - register_module.input = inputs.__call__ - # let's run the command - try: - cmd.run() - finally: - del register_module.input - - # and finally a Unicode test (bug #12114) - metadata = {'url': 'xxx', 'author': '\u00c9ric', - 'author_email': 'xxx', 'name': 'xxx', - 'version': 'xxx', - 'description': 'Something about esszet \u00df', - 'long_description': 'More things about esszet \u00df'} - - cmd = self._get_cmd(metadata) - cmd.ensure_finalized() - cmd.strict = 1 - inputs = Inputs('1', 'tarek', 'y') - register_module.input = inputs.__call__ - # let's run the command - try: - cmd.run() - finally: - del register_module.input - - @unittest.skipUnless(docutils is not None, 'needs docutils') - def test_register_invalid_long_description(self): - description = ':funkie:`str`' # mimic Sphinx-specific markup - metadata = {'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx', - 'name': 'xxx', 'version': 'xxx', - 'long_description': description} - cmd = self._get_cmd(metadata) - cmd.ensure_finalized() - cmd.strict = True - inputs = Inputs('2', 'tarek', 'tarek@ziade.org') - register_module.input = inputs - self.addCleanup(delattr, register_module, 'input') - - self.assertRaises(DistutilsSetupError, cmd.run) - - def test_check_metadata_deprecated(self): - # makes sure make_metadata is deprecated - cmd = self._get_cmd() - with check_warnings() as w: - warnings.simplefilter("always") - cmd.check_metadata() - self.assertEqual(len(w.warnings), 1) - - def test_list_classifiers(self): - cmd = self._get_cmd() - cmd.list_classifiers = 1 - cmd.run() - results = self.get_logs(INFO) - self.assertEqual(results, ['running check', 'xxx']) - - def test_show_response(self): - # test that the --show-response option return a well formatted response - cmd = self._get_cmd() - inputs = Inputs('1', 'tarek', 'y') - register_module.input = inputs.__call__ - cmd.show_response = 1 - try: - cmd.run() - finally: - del register_module.input - - results = self.get_logs(INFO) - self.assertEqual(results[3], 75 * '-' + '\nxxx\n' + 75 * '-') - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(RegisterTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_sdist.py b/Lib/distutils/tests/test_sdist.py deleted file mode 100644 index 46b3a13e470c4ea..000000000000000 --- a/Lib/distutils/tests/test_sdist.py +++ /dev/null @@ -1,493 +0,0 @@ -"""Tests for distutils.command.sdist.""" -import os -import tarfile -import unittest -import warnings -import zipfile -from os.path import join -from textwrap import dedent -from test.support import captured_stdout, run_unittest -from test.support.warnings_helper import check_warnings - -try: - import zlib - ZLIB_SUPPORT = True -except ImportError: - ZLIB_SUPPORT = False - -try: - import grp - import pwd - UID_GID_SUPPORT = True -except ImportError: - UID_GID_SUPPORT = False - -from distutils.command.sdist import sdist, show_formats -from distutils.core import Distribution -from distutils.tests.test_config import BasePyPIRCCommandTestCase -from distutils.errors import DistutilsOptionError -from distutils.spawn import find_executable -from distutils.log import WARN -from distutils.filelist import FileList -from distutils.archive_util import ARCHIVE_FORMATS - -SETUP_PY = """ -from distutils.core import setup -import somecode - -setup(name='fake') -""" - -MANIFEST = """\ -# file GENERATED by distutils, do NOT edit -README -buildout.cfg -inroot.txt -setup.py -data%(sep)sdata.dt -scripts%(sep)sscript.py -some%(sep)sfile.txt -some%(sep)sother_file.txt -somecode%(sep)s__init__.py -somecode%(sep)sdoc.dat -somecode%(sep)sdoc.txt -""" - -class SDistTestCase(BasePyPIRCCommandTestCase): - - def setUp(self): - # PyPIRCCommandTestCase creates a temp dir already - # and put it in self.tmp_dir - super(SDistTestCase, self).setUp() - # setting up an environment - self.old_path = os.getcwd() - os.mkdir(join(self.tmp_dir, 'somecode')) - os.mkdir(join(self.tmp_dir, 'dist')) - # a package, and a README - self.write_file((self.tmp_dir, 'README'), 'xxx') - self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#') - self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY) - os.chdir(self.tmp_dir) - - def tearDown(self): - # back to normal - os.chdir(self.old_path) - super(SDistTestCase, self).tearDown() - - def get_cmd(self, metadata=None): - """Returns a cmd""" - if metadata is None: - metadata = {'name': 'fake', 'version': '1.0', - 'url': 'xxx', 'author': 'xxx', - 'author_email': 'xxx'} - dist = Distribution(metadata) - dist.script_name = 'setup.py' - dist.packages = ['somecode'] - dist.include_package_data = True - cmd = sdist(dist) - cmd.dist_dir = 'dist' - return dist, cmd - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_prune_file_list(self): - # this test creates a project with some VCS dirs and an NFS rename - # file, then launches sdist to check they get pruned on all systems - - # creating VCS directories with some files in them - os.mkdir(join(self.tmp_dir, 'somecode', '.svn')) - self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx') - - os.mkdir(join(self.tmp_dir, 'somecode', '.hg')) - self.write_file((self.tmp_dir, 'somecode', '.hg', - 'ok'), 'xxx') - - os.mkdir(join(self.tmp_dir, 'somecode', '.git')) - self.write_file((self.tmp_dir, 'somecode', '.git', - 'ok'), 'xxx') - - self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx') - - # now building a sdist - dist, cmd = self.get_cmd() - - # zip is available universally - # (tar might not be installed under win32) - cmd.formats = ['zip'] - - cmd.ensure_finalized() - cmd.run() - - # now let's check what we have - dist_folder = join(self.tmp_dir, 'dist') - files = os.listdir(dist_folder) - self.assertEqual(files, ['fake-1.0.zip']) - - zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip')) - try: - content = zip_file.namelist() - finally: - zip_file.close() - - # making sure everything has been pruned correctly - expected = ['', 'PKG-INFO', 'README', 'setup.py', - 'somecode/', 'somecode/__init__.py'] - self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected]) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - @unittest.skipIf(find_executable('tar') is None, - "The tar command is not found") - @unittest.skipIf(find_executable('gzip') is None, - "The gzip command is not found") - def test_make_distribution(self): - # now building a sdist - dist, cmd = self.get_cmd() - - # creating a gztar then a tar - cmd.formats = ['gztar', 'tar'] - cmd.ensure_finalized() - cmd.run() - - # making sure we have two files - dist_folder = join(self.tmp_dir, 'dist') - result = os.listdir(dist_folder) - result.sort() - self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz']) - - os.remove(join(dist_folder, 'fake-1.0.tar')) - os.remove(join(dist_folder, 'fake-1.0.tar.gz')) - - # now trying a tar then a gztar - cmd.formats = ['tar', 'gztar'] - - cmd.ensure_finalized() - cmd.run() - - result = os.listdir(dist_folder) - result.sort() - self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz']) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_add_defaults(self): - - # http://bugs.python.org/issue2279 - - # add_default should also include - # data_files and package_data - dist, cmd = self.get_cmd() - - # filling data_files by pointing files - # in package_data - dist.package_data = {'': ['*.cfg', '*.dat'], - 'somecode': ['*.txt']} - self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') - self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#') - - # adding some data in data_files - data_dir = join(self.tmp_dir, 'data') - os.mkdir(data_dir) - self.write_file((data_dir, 'data.dt'), '#') - some_dir = join(self.tmp_dir, 'some') - os.mkdir(some_dir) - # make sure VCS directories are pruned (#14004) - hg_dir = join(self.tmp_dir, '.hg') - os.mkdir(hg_dir) - self.write_file((hg_dir, 'last-message.txt'), '#') - # a buggy regex used to prevent this from working on windows (#6884) - self.write_file((self.tmp_dir, 'buildout.cfg'), '#') - self.write_file((self.tmp_dir, 'inroot.txt'), '#') - self.write_file((some_dir, 'file.txt'), '#') - self.write_file((some_dir, 'other_file.txt'), '#') - - dist.data_files = [('data', ['data/data.dt', - 'buildout.cfg', - 'inroot.txt', - 'notexisting']), - 'some/file.txt', - 'some/other_file.txt'] - - # adding a script - script_dir = join(self.tmp_dir, 'scripts') - os.mkdir(script_dir) - self.write_file((script_dir, 'script.py'), '#') - dist.scripts = [join('scripts', 'script.py')] - - cmd.formats = ['zip'] - cmd.use_defaults = True - - cmd.ensure_finalized() - cmd.run() - - # now let's check what we have - dist_folder = join(self.tmp_dir, 'dist') - files = os.listdir(dist_folder) - self.assertEqual(files, ['fake-1.0.zip']) - - zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip')) - try: - content = zip_file.namelist() - finally: - zip_file.close() - - # making sure everything was added - expected = ['', 'PKG-INFO', 'README', 'buildout.cfg', - 'data/', 'data/data.dt', 'inroot.txt', - 'scripts/', 'scripts/script.py', 'setup.py', - 'some/', 'some/file.txt', 'some/other_file.txt', - 'somecode/', 'somecode/__init__.py', 'somecode/doc.dat', - 'somecode/doc.txt'] - self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected]) - - # checking the MANIFEST - f = open(join(self.tmp_dir, 'MANIFEST')) - try: - manifest = f.read() - finally: - f.close() - self.assertEqual(manifest, MANIFEST % {'sep': os.sep}) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_metadata_check_option(self): - # testing the `medata-check` option - dist, cmd = self.get_cmd(metadata={}) - - # this should raise some warnings ! - # with the `check` subcommand - cmd.ensure_finalized() - cmd.run() - warnings = [msg for msg in self.get_logs(WARN) if - msg.startswith('warning: check:')] - self.assertEqual(len(warnings), 2) - - # trying with a complete set of metadata - self.clear_logs() - dist, cmd = self.get_cmd() - cmd.ensure_finalized() - cmd.metadata_check = 0 - cmd.run() - warnings = [msg for msg in self.get_logs(WARN) if - msg.startswith('warning: check:')] - self.assertEqual(len(warnings), 0) - - def test_check_metadata_deprecated(self): - # makes sure make_metadata is deprecated - dist, cmd = self.get_cmd() - with check_warnings() as w: - warnings.simplefilter("always") - cmd.check_metadata() - self.assertEqual(len(w.warnings), 1) - - def test_show_formats(self): - with captured_stdout() as stdout: - show_formats() - - # the output should be a header line + one line per format - num_formats = len(ARCHIVE_FORMATS.keys()) - output = [line for line in stdout.getvalue().split('\n') - if line.strip().startswith('--formats=')] - self.assertEqual(len(output), num_formats) - - def test_finalize_options(self): - dist, cmd = self.get_cmd() - cmd.finalize_options() - - # default options set by finalize - self.assertEqual(cmd.manifest, 'MANIFEST') - self.assertEqual(cmd.template, 'MANIFEST.in') - self.assertEqual(cmd.dist_dir, 'dist') - - # formats has to be a string splitable on (' ', ',') or - # a stringlist - cmd.formats = 1 - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - cmd.formats = ['zip'] - cmd.finalize_options() - - # formats has to be known - cmd.formats = 'supazipa' - self.assertRaises(DistutilsOptionError, cmd.finalize_options) - - # the following tests make sure there is a nice error message instead - # of a traceback when parsing an invalid manifest template - - def _check_template(self, content): - dist, cmd = self.get_cmd() - os.chdir(self.tmp_dir) - self.write_file('MANIFEST.in', content) - cmd.ensure_finalized() - cmd.filelist = FileList() - cmd.read_template() - warnings = self.get_logs(WARN) - self.assertEqual(len(warnings), 1) - - def test_invalid_template_unknown_command(self): - self._check_template('taunt knights *') - - def test_invalid_template_wrong_arguments(self): - # this manifest command takes one argument - self._check_template('prune') - - @unittest.skipIf(os.name != 'nt', 'test relevant for Windows only') - def test_invalid_template_wrong_path(self): - # on Windows, trailing slashes are not allowed - # this used to crash instead of raising a warning: #8286 - self._check_template('include examples/') - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_get_file_list(self): - # make sure MANIFEST is recalculated - dist, cmd = self.get_cmd() - - # filling data_files by pointing files in package_data - dist.package_data = {'somecode': ['*.txt']} - self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') - cmd.formats = ['gztar'] - cmd.ensure_finalized() - cmd.run() - - f = open(cmd.manifest) - try: - manifest = [line.strip() for line in f.read().split('\n') - if line.strip() != ''] - finally: - f.close() - - self.assertEqual(len(manifest), 5) - - # adding a file - self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#') - - # make sure build_py is reinitialized, like a fresh run - build_py = dist.get_command_obj('build_py') - build_py.finalized = False - build_py.ensure_finalized() - - cmd.run() - - f = open(cmd.manifest) - try: - manifest2 = [line.strip() for line in f.read().split('\n') - if line.strip() != ''] - finally: - f.close() - - # do we have the new file in MANIFEST ? - self.assertEqual(len(manifest2), 6) - self.assertIn('doc2.txt', manifest2[-1]) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_manifest_marker(self): - # check that autogenerated MANIFESTs have a marker - dist, cmd = self.get_cmd() - cmd.ensure_finalized() - cmd.run() - - f = open(cmd.manifest) - try: - manifest = [line.strip() for line in f.read().split('\n') - if line.strip() != ''] - finally: - f.close() - - self.assertEqual(manifest[0], - '# file GENERATED by distutils, do NOT edit') - - @unittest.skipUnless(ZLIB_SUPPORT, "Need zlib support to run") - def test_manifest_comments(self): - # make sure comments don't cause exceptions or wrong includes - contents = dedent("""\ - # bad.py - #bad.py - good.py - """) - dist, cmd = self.get_cmd() - cmd.ensure_finalized() - self.write_file((self.tmp_dir, cmd.manifest), contents) - self.write_file((self.tmp_dir, 'good.py'), '# pick me!') - self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!") - self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!") - cmd.run() - self.assertEqual(cmd.filelist.files, ['good.py']) - - @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') - def test_manual_manifest(self): - # check that a MANIFEST without a marker is left alone - dist, cmd = self.get_cmd() - cmd.formats = ['gztar'] - cmd.ensure_finalized() - self.write_file((self.tmp_dir, cmd.manifest), 'README.manual') - self.write_file((self.tmp_dir, 'README.manual'), - 'This project maintains its MANIFEST file itself.') - cmd.run() - self.assertEqual(cmd.filelist.files, ['README.manual']) - - f = open(cmd.manifest) - try: - manifest = [line.strip() for line in f.read().split('\n') - if line.strip() != ''] - finally: - f.close() - - self.assertEqual(manifest, ['README.manual']) - - archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz') - archive = tarfile.open(archive_name) - try: - filenames = [tarinfo.name for tarinfo in archive] - finally: - archive.close() - self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO', - 'fake-1.0/README.manual']) - - @unittest.skipUnless(ZLIB_SUPPORT, "requires zlib") - @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support") - @unittest.skipIf(find_executable('tar') is None, - "The tar command is not found") - @unittest.skipIf(find_executable('gzip') is None, - "The gzip command is not found") - def test_make_distribution_owner_group(self): - # now building a sdist - dist, cmd = self.get_cmd() - - # creating a gztar and specifying the owner+group - cmd.formats = ['gztar'] - cmd.owner = pwd.getpwuid(0)[0] - cmd.group = grp.getgrgid(0)[0] - cmd.ensure_finalized() - cmd.run() - - # making sure we have the good rights - archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz') - archive = tarfile.open(archive_name) - try: - for member in archive.getmembers(): - self.assertEqual(member.uid, 0) - self.assertEqual(member.gid, 0) - finally: - archive.close() - - # building a sdist again - dist, cmd = self.get_cmd() - - # creating a gztar - cmd.formats = ['gztar'] - cmd.ensure_finalized() - cmd.run() - - # making sure we have the good rights - archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz') - archive = tarfile.open(archive_name) - - # note that we are not testing the group ownership here - # because, depending on the platforms and the container - # rights (see #7408) - try: - for member in archive.getmembers(): - self.assertEqual(member.uid, os.getuid()) - finally: - archive.close() - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(SDistTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_spawn.py b/Lib/distutils/tests/test_spawn.py deleted file mode 100644 index a0a1145da5df8e0..000000000000000 --- a/Lib/distutils/tests/test_spawn.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Tests for distutils.spawn.""" -import os -import stat -import sys -import unittest.mock -from test.support import run_unittest, unix_shell, requires_subprocess -from test.support import os_helper - -from distutils.spawn import find_executable -from distutils.spawn import spawn -from distutils.errors import DistutilsExecError -from distutils.tests import support - - -@requires_subprocess() -class SpawnTestCase(support.TempdirManager, - support.LoggingSilencer, - unittest.TestCase): - - @unittest.skipUnless(os.name in ('nt', 'posix'), - 'Runs only under posix or nt') - def test_spawn(self): - tmpdir = self.mkdtemp() - - # creating something executable - # through the shell that returns 1 - if sys.platform != 'win32': - exe = os.path.join(tmpdir, 'foo.sh') - self.write_file(exe, '#!%s\nexit 1' % unix_shell) - else: - exe = os.path.join(tmpdir, 'foo.bat') - self.write_file(exe, 'exit 1') - - os.chmod(exe, 0o777) - self.assertRaises(DistutilsExecError, spawn, [exe]) - - # now something that works - if sys.platform != 'win32': - exe = os.path.join(tmpdir, 'foo.sh') - self.write_file(exe, '#!%s\nexit 0' % unix_shell) - else: - exe = os.path.join(tmpdir, 'foo.bat') - self.write_file(exe, 'exit 0') - - os.chmod(exe, 0o777) - spawn([exe]) # should work without any error - - def test_find_executable(self): - with os_helper.temp_dir() as tmp_dir: - # use TESTFN to get a pseudo-unique filename - program_noeext = os_helper.TESTFN - # Give the temporary program an ".exe" suffix for all. - # It's needed on Windows and not harmful on other platforms. - program = program_noeext + ".exe" - - filename = os.path.join(tmp_dir, program) - with open(filename, "wb"): - pass - os.chmod(filename, stat.S_IXUSR) - - # test path parameter - rv = find_executable(program, path=tmp_dir) - self.assertEqual(rv, filename) - - if sys.platform == 'win32': - # test without ".exe" extension - rv = find_executable(program_noeext, path=tmp_dir) - self.assertEqual(rv, filename) - - # test find in the current directory - with os_helper.change_cwd(tmp_dir): - rv = find_executable(program) - self.assertEqual(rv, program) - - # test non-existent program - dont_exist_program = "dontexist_" + program - rv = find_executable(dont_exist_program , path=tmp_dir) - self.assertIsNone(rv) - - # PATH='': no match, except in the current directory - with os_helper.EnvironmentVarGuard() as env: - env['PATH'] = '' - with unittest.mock.patch('distutils.spawn.os.confstr', - return_value=tmp_dir, create=True), \ - unittest.mock.patch('distutils.spawn.os.defpath', - tmp_dir): - rv = find_executable(program) - self.assertIsNone(rv) - - # look in current directory - with os_helper.change_cwd(tmp_dir): - rv = find_executable(program) - self.assertEqual(rv, program) - - # PATH=':': explicitly looks in the current directory - with os_helper.EnvironmentVarGuard() as env: - env['PATH'] = os.pathsep - with unittest.mock.patch('distutils.spawn.os.confstr', - return_value='', create=True), \ - unittest.mock.patch('distutils.spawn.os.defpath', ''): - rv = find_executable(program) - self.assertIsNone(rv) - - # look in current directory - with os_helper.change_cwd(tmp_dir): - rv = find_executable(program) - self.assertEqual(rv, program) - - # missing PATH: test os.confstr("CS_PATH") and os.defpath - with os_helper.EnvironmentVarGuard() as env: - env.pop('PATH', None) - - # without confstr - with unittest.mock.patch('distutils.spawn.os.confstr', - side_effect=ValueError, - create=True), \ - unittest.mock.patch('distutils.spawn.os.defpath', - tmp_dir): - rv = find_executable(program) - self.assertEqual(rv, filename) - - # with confstr - with unittest.mock.patch('distutils.spawn.os.confstr', - return_value=tmp_dir, create=True), \ - unittest.mock.patch('distutils.spawn.os.defpath', ''): - rv = find_executable(program) - self.assertEqual(rv, filename) - - def test_spawn_missing_exe(self): - with self.assertRaises(DistutilsExecError) as ctx: - spawn(['does-not-exist']) - self.assertIn("command 'does-not-exist' failed", str(ctx.exception)) - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(SpawnTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_sysconfig.py b/Lib/distutils/tests/test_sysconfig.py deleted file mode 100644 index ae0eca897bc7bdd..000000000000000 --- a/Lib/distutils/tests/test_sysconfig.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Tests for distutils.sysconfig.""" -import contextlib -import os -import shutil -import subprocess -import sys -import textwrap -import unittest - -from distutils import sysconfig -from distutils.ccompiler import get_default_compiler -from distutils.tests import support -from test.support import run_unittest, swap_item, requires_subprocess, is_wasi -from test.support.os_helper import TESTFN - - -class SysconfigTestCase(support.EnvironGuard, unittest.TestCase): - def setUp(self): - super(SysconfigTestCase, self).setUp() - self.makefile = None - - def tearDown(self): - if self.makefile is not None: - os.unlink(self.makefile) - self.cleanup_testfn() - super(SysconfigTestCase, self).tearDown() - - def cleanup_testfn(self): - if os.path.isfile(TESTFN): - os.remove(TESTFN) - elif os.path.isdir(TESTFN): - shutil.rmtree(TESTFN) - - @unittest.skipIf(is_wasi, "Incompatible with WASI mapdir and OOT builds") - def test_get_config_h_filename(self): - config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) - - def test_get_python_lib(self): - # XXX doesn't work on Linux when Python was never installed before - #self.assertTrue(os.path.isdir(lib_dir), lib_dir) - # test for pythonxx.lib? - self.assertNotEqual(sysconfig.get_python_lib(), - sysconfig.get_python_lib(prefix=TESTFN)) - - def test_get_config_vars(self): - cvars = sysconfig.get_config_vars() - self.assertIsInstance(cvars, dict) - self.assertTrue(cvars) - - @unittest.skipIf(is_wasi, "Incompatible with WASI mapdir and OOT builds") - def test_srcdir(self): - # See Issues #15322, #15364. - srcdir = sysconfig.get_config_var('srcdir') - - self.assertTrue(os.path.isabs(srcdir), srcdir) - self.assertTrue(os.path.isdir(srcdir), srcdir) - - if sysconfig.python_build: - # The python executable has not been installed so srcdir - # should be a full source checkout. - Python_h = os.path.join(srcdir, 'Include', 'Python.h') - self.assertTrue(os.path.exists(Python_h), Python_h) - # /PC/pyconfig.h always exists even if unused on POSIX. - pyconfig_h = os.path.join(srcdir, 'PC', 'pyconfig.h') - self.assertTrue(os.path.exists(pyconfig_h), pyconfig_h) - pyconfig_h_in = os.path.join(srcdir, 'pyconfig.h.in') - self.assertTrue(os.path.exists(pyconfig_h_in), pyconfig_h_in) - elif os.name == 'posix': - self.assertEqual( - os.path.dirname(sysconfig.get_makefile_filename()), - srcdir) - - def test_srcdir_independent_of_cwd(self): - # srcdir should be independent of the current working directory - # See Issues #15322, #15364. - srcdir = sysconfig.get_config_var('srcdir') - cwd = os.getcwd() - try: - os.chdir('..') - srcdir2 = sysconfig.get_config_var('srcdir') - finally: - os.chdir(cwd) - self.assertEqual(srcdir, srcdir2) - - def customize_compiler(self): - # make sure AR gets caught - class compiler: - compiler_type = 'unix' - - def set_executables(self, **kw): - self.exes = kw - - sysconfig_vars = { - 'AR': 'sc_ar', - 'CC': 'sc_cc', - 'CXX': 'sc_cxx', - 'ARFLAGS': '--sc-arflags', - 'CFLAGS': '--sc-cflags', - 'CCSHARED': '--sc-ccshared', - 'LDSHARED': 'sc_ldshared', - 'SHLIB_SUFFIX': 'sc_shutil_suffix', - - # On macOS, disable _osx_support.customize_compiler() - 'CUSTOMIZED_OSX_COMPILER': 'True', - } - - comp = compiler() - with contextlib.ExitStack() as cm: - for key, value in sysconfig_vars.items(): - cm.enter_context(swap_item(sysconfig._config_vars, key, value)) - sysconfig.customize_compiler(comp) - - return comp - - @unittest.skipUnless(get_default_compiler() == 'unix', - 'not testing if default compiler is not unix') - def test_customize_compiler(self): - # Make sure that sysconfig._config_vars is initialized - sysconfig.get_config_vars() - - os.environ['AR'] = 'env_ar' - os.environ['CC'] = 'env_cc' - os.environ['CPP'] = 'env_cpp' - os.environ['CXX'] = 'env_cxx --env-cxx-flags' - os.environ['LDSHARED'] = 'env_ldshared' - os.environ['LDFLAGS'] = '--env-ldflags' - os.environ['ARFLAGS'] = '--env-arflags' - os.environ['CFLAGS'] = '--env-cflags' - os.environ['CPPFLAGS'] = '--env-cppflags' - - comp = self.customize_compiler() - self.assertEqual(comp.exes['archiver'], - 'env_ar --env-arflags') - self.assertEqual(comp.exes['preprocessor'], - 'env_cpp --env-cppflags') - self.assertEqual(comp.exes['compiler'], - 'env_cc --sc-cflags --env-cflags --env-cppflags') - self.assertEqual(comp.exes['compiler_so'], - ('env_cc --sc-cflags ' - '--env-cflags ''--env-cppflags --sc-ccshared')) - self.assertEqual(comp.exes['compiler_cxx'], - 'env_cxx --env-cxx-flags') - self.assertEqual(comp.exes['linker_exe'], - 'env_cc') - self.assertEqual(comp.exes['linker_so'], - ('env_ldshared --env-ldflags --env-cflags' - ' --env-cppflags')) - self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix') - - del os.environ['AR'] - del os.environ['CC'] - del os.environ['CPP'] - del os.environ['CXX'] - del os.environ['LDSHARED'] - del os.environ['LDFLAGS'] - del os.environ['ARFLAGS'] - del os.environ['CFLAGS'] - del os.environ['CPPFLAGS'] - - comp = self.customize_compiler() - self.assertEqual(comp.exes['archiver'], - 'sc_ar --sc-arflags') - self.assertEqual(comp.exes['preprocessor'], - 'sc_cc -E') - self.assertEqual(comp.exes['compiler'], - 'sc_cc --sc-cflags') - self.assertEqual(comp.exes['compiler_so'], - 'sc_cc --sc-cflags --sc-ccshared') - self.assertEqual(comp.exes['compiler_cxx'], - 'sc_cxx') - self.assertEqual(comp.exes['linker_exe'], - 'sc_cc') - self.assertEqual(comp.exes['linker_so'], - 'sc_ldshared') - self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix') - - def test_parse_makefile_base(self): - self.makefile = TESTFN - fd = open(self.makefile, 'w') - try: - fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'" '\n') - fd.write('VAR=$OTHER\nOTHER=foo') - finally: - fd.close() - d = sysconfig.parse_makefile(self.makefile) - self.assertEqual(d, {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'", - 'OTHER': 'foo'}) - - def test_parse_makefile_literal_dollar(self): - self.makefile = TESTFN - fd = open(self.makefile, 'w') - try: - fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=\$$LIB'" '\n') - fd.write('VAR=$OTHER\nOTHER=foo') - finally: - fd.close() - d = sysconfig.parse_makefile(self.makefile) - self.assertEqual(d, {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'", - 'OTHER': 'foo'}) - - - def test_sysconfig_module(self): - import sysconfig as global_sysconfig - self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), - sysconfig.get_config_var('CFLAGS')) - self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), - sysconfig.get_config_var('LDFLAGS')) - - @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'), - 'compiler flags customized') - def test_sysconfig_compiler_vars(self): - # On OS X, binary installers support extension module building on - # various levels of the operating system with differing Xcode - # configurations. This requires customization of some of the - # compiler configuration directives to suit the environment on - # the installed machine. Some of these customizations may require - # running external programs and, so, are deferred until needed by - # the first extension module build. With Python 3.3, only - # the Distutils version of sysconfig is used for extension module - # builds, which happens earlier in the Distutils tests. This may - # cause the following tests to fail since no tests have caused - # the global version of sysconfig to call the customization yet. - # The solution for now is to simply skip this test in this case. - # The longer-term solution is to only have one version of sysconfig. - - import sysconfig as global_sysconfig - if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): - self.skipTest('compiler flags customized') - self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), - sysconfig.get_config_var('LDSHARED')) - self.assertEqual(global_sysconfig.get_config_var('CC'), - sysconfig.get_config_var('CC')) - - @requires_subprocess() - def test_customize_compiler_before_get_config_vars(self): - # Issue #21923: test that a Distribution compiler - # instance can be called without an explicit call to - # get_config_vars(). - with open(TESTFN, 'w') as f: - f.writelines(textwrap.dedent('''\ - from distutils.core import Distribution - config = Distribution().get_command_obj('config') - # try_compile may pass or it may fail if no compiler - # is found but it should not raise an exception. - rc = config.try_compile('int x;') - ''')) - p = subprocess.Popen([str(sys.executable), TESTFN], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - outs, errs = p.communicate() - self.assertEqual(0, p.returncode, "Subprocess failed: " + outs) - - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(SysconfigTestCase)) - return suite - - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_text_file.py b/Lib/distutils/tests/test_text_file.py deleted file mode 100644 index ebac3d52f9097c2..000000000000000 --- a/Lib/distutils/tests/test_text_file.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Tests for distutils.text_file.""" -import os -import unittest -from distutils.text_file import TextFile -from distutils.tests import support -from test.support import run_unittest - -TEST_DATA = """# test file - -line 3 \\ -# intervening comment - continues on next line -""" - -class TextFileTestCase(support.TempdirManager, unittest.TestCase): - - def test_class(self): - # old tests moved from text_file.__main__ - # so they are really called by the buildbots - - # result 1: no fancy options - result1 = ['# test file\n', '\n', 'line 3 \\\n', - '# intervening comment\n', - ' continues on next line\n'] - - # result 2: just strip comments - result2 = ["\n", - "line 3 \\\n", - " continues on next line\n"] - - # result 3: just strip blank lines - result3 = ["# test file\n", - "line 3 \\\n", - "# intervening comment\n", - " continues on next line\n"] - - # result 4: default, strip comments, blank lines, - # and trailing whitespace - result4 = ["line 3 \\", - " continues on next line"] - - # result 5: strip comments and blanks, plus join lines (but don't - # "collapse" joined lines - result5 = ["line 3 continues on next line"] - - # result 6: strip comments and blanks, plus join lines (and - # "collapse" joined lines - result6 = ["line 3 continues on next line"] - - def test_input(count, description, file, expected_result): - result = file.readlines() - self.assertEqual(result, expected_result) - - tmpdir = self.mkdtemp() - filename = os.path.join(tmpdir, "test.txt") - out_file = open(filename, "w") - try: - out_file.write(TEST_DATA) - finally: - out_file.close() - - in_file = TextFile(filename, strip_comments=0, skip_blanks=0, - lstrip_ws=0, rstrip_ws=0) - try: - test_input(1, "no processing", in_file, result1) - finally: - in_file.close() - - in_file = TextFile(filename, strip_comments=1, skip_blanks=0, - lstrip_ws=0, rstrip_ws=0) - try: - test_input(2, "strip comments", in_file, result2) - finally: - in_file.close() - - in_file = TextFile(filename, strip_comments=0, skip_blanks=1, - lstrip_ws=0, rstrip_ws=0) - try: - test_input(3, "strip blanks", in_file, result3) - finally: - in_file.close() - - in_file = TextFile(filename) - try: - test_input(4, "default processing", in_file, result4) - finally: - in_file.close() - - in_file = TextFile(filename, strip_comments=1, skip_blanks=1, - join_lines=1, rstrip_ws=1) - try: - test_input(5, "join lines without collapsing", in_file, result5) - finally: - in_file.close() - - in_file = TextFile(filename, strip_comments=1, skip_blanks=1, - join_lines=1, rstrip_ws=1, collapse_join=1) - try: - test_input(6, "join lines with collapsing", in_file, result6) - finally: - in_file.close() - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(TextFileTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_unixccompiler.py b/Lib/distutils/tests/test_unixccompiler.py deleted file mode 100644 index a3484d4f94cd921..000000000000000 --- a/Lib/distutils/tests/test_unixccompiler.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Tests for distutils.unixccompiler.""" -import sys -import unittest -from test.support import run_unittest -from test.support.os_helper import EnvironmentVarGuard - -from distutils import sysconfig -from distutils.unixccompiler import UnixCCompiler - -class UnixCCompilerTestCase(unittest.TestCase): - - def setUp(self): - self._backup_platform = sys.platform - self._backup_get_config_var = sysconfig.get_config_var - self._backup_config_vars = dict(sysconfig._config_vars) - class CompilerWrapper(UnixCCompiler): - def rpath_foo(self): - return self.runtime_library_dir_option('/foo') - self.cc = CompilerWrapper() - - def tearDown(self): - sys.platform = self._backup_platform - sysconfig.get_config_var = self._backup_get_config_var - sysconfig._config_vars.clear() - sysconfig._config_vars.update(self._backup_config_vars) - - @unittest.skipIf(sys.platform == 'win32', "can't test on Windows") - def test_runtime_libdir_option(self): - # Issue#5900 - # - # Ensure RUNPATH is added to extension modules with RPATH if - # GNU ld is used - - # darwin - sys.platform = 'darwin' - self.assertEqual(self.cc.rpath_foo(), '-L/foo') - - # hp-ux - sys.platform = 'hp-ux' - old_gcv = sysconfig.get_config_var - def gcv(v): - return 'xxx' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), ['+s', '-L/foo']) - - def gcv(v): - return 'gcc' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo']) - - def gcv(v): - return 'g++' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo']) - - sysconfig.get_config_var = old_gcv - - # GCC GNULD - sys.platform = 'bar' - def gcv(v): - if v == 'CC': - return 'gcc' - elif v == 'GNULD': - return 'yes' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo') - - # GCC non-GNULD - sys.platform = 'bar' - def gcv(v): - if v == 'CC': - return 'gcc' - elif v == 'GNULD': - return 'no' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo') - - # GCC GNULD with fully qualified configuration prefix - # see #7617 - sys.platform = 'bar' - def gcv(v): - if v == 'CC': - return 'x86_64-pc-linux-gnu-gcc-4.4.2' - elif v == 'GNULD': - return 'yes' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo') - - # non-GCC GNULD - sys.platform = 'bar' - def gcv(v): - if v == 'CC': - return 'cc' - elif v == 'GNULD': - return 'yes' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), '-R/foo') - - # non-GCC non-GNULD - sys.platform = 'bar' - def gcv(v): - if v == 'CC': - return 'cc' - elif v == 'GNULD': - return 'no' - sysconfig.get_config_var = gcv - self.assertEqual(self.cc.rpath_foo(), '-R/foo') - - @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') - def test_osx_cc_overrides_ldshared(self): - # Issue #18080: - # ensure that setting CC env variable also changes default linker - def gcv(v): - if v == 'LDSHARED': - return 'gcc-4.2 -bundle -undefined dynamic_lookup ' - return 'gcc-4.2' - sysconfig.get_config_var = gcv - with EnvironmentVarGuard() as env: - env['CC'] = 'my_cc' - del env['LDSHARED'] - sysconfig.customize_compiler(self.cc) - self.assertEqual(self.cc.linker_so[0], 'my_cc') - - @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') - def test_osx_explicit_ldshared(self): - # Issue #18080: - # ensure that setting CC env variable does not change - # explicit LDSHARED setting for linker - def gcv(v): - if v == 'LDSHARED': - return 'gcc-4.2 -bundle -undefined dynamic_lookup ' - return 'gcc-4.2' - sysconfig.get_config_var = gcv - with EnvironmentVarGuard() as env: - env['CC'] = 'my_cc' - env['LDSHARED'] = 'my_ld -bundle -dynamic' - sysconfig.customize_compiler(self.cc) - self.assertEqual(self.cc.linker_so[0], 'my_ld') - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(UnixCCompilerTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py deleted file mode 100644 index d67974148830609..000000000000000 --- a/Lib/distutils/tests/test_upload.py +++ /dev/null @@ -1,223 +0,0 @@ -"""Tests for distutils.command.upload.""" -import os -import unittest -import unittest.mock as mock -from urllib.error import HTTPError - -from test.support import run_unittest - -from distutils.command import upload as upload_mod -from distutils.command.upload import upload -from distutils.core import Distribution -from distutils.errors import DistutilsError -from distutils.log import ERROR, INFO - -from distutils.tests.test_config import PYPIRC, BasePyPIRCCommandTestCase - -PYPIRC_LONG_PASSWORD = """\ -[distutils] - -index-servers = - server1 - server2 - -[server1] -username:me -password:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - -[server2] -username:meagain -password: secret -realm:acme -repository:http://another.pypi/ -""" - - -PYPIRC_NOPASSWORD = """\ -[distutils] - -index-servers = - server1 - -[server1] -username:me -""" - -class FakeOpen(object): - - def __init__(self, url, msg=None, code=None): - self.url = url - if not isinstance(url, str): - self.req = url - else: - self.req = None - self.msg = msg or 'OK' - self.code = code or 200 - - def getheader(self, name, default=None): - return { - 'content-type': 'text/plain; charset=utf-8', - }.get(name.lower(), default) - - def read(self): - return b'xyzzy' - - def getcode(self): - return self.code - - -class uploadTestCase(BasePyPIRCCommandTestCase): - - def setUp(self): - super(uploadTestCase, self).setUp() - self.old_open = upload_mod.urlopen - upload_mod.urlopen = self._urlopen - self.last_open = None - self.next_msg = None - self.next_code = None - - def tearDown(self): - upload_mod.urlopen = self.old_open - super(uploadTestCase, self).tearDown() - - def _urlopen(self, url): - self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code) - return self.last_open - - def test_finalize_options(self): - - # new format - self.write_file(self.rc, PYPIRC) - dist = Distribution() - cmd = upload(dist) - cmd.finalize_options() - for attr, waited in (('username', 'me'), ('password', 'secret'), - ('realm', 'pypi'), - ('repository', 'https://upload.pypi.org/legacy/')): - self.assertEqual(getattr(cmd, attr), waited) - - def test_saved_password(self): - # file with no password - self.write_file(self.rc, PYPIRC_NOPASSWORD) - - # make sure it passes - dist = Distribution() - cmd = upload(dist) - cmd.finalize_options() - self.assertEqual(cmd.password, None) - - # make sure we get it as well, if another command - # initialized it at the dist level - dist.password = 'xxx' - cmd = upload(dist) - cmd.finalize_options() - self.assertEqual(cmd.password, 'xxx') - - def test_upload(self): - tmp = self.mkdtemp() - path = os.path.join(tmp, 'xxx') - self.write_file(path) - command, pyversion, filename = 'xxx', '2.6', path - dist_files = [(command, pyversion, filename)] - self.write_file(self.rc, PYPIRC_LONG_PASSWORD) - - # lets run it - pkg_dir, dist = self.create_dist(dist_files=dist_files) - cmd = upload(dist) - cmd.show_response = 1 - cmd.ensure_finalized() - cmd.run() - - # what did we send ? - headers = dict(self.last_open.req.headers) - self.assertGreaterEqual(int(headers['Content-length']), 2162) - content_type = headers['Content-type'] - self.assertTrue(content_type.startswith('multipart/form-data')) - self.assertEqual(self.last_open.req.get_method(), 'POST') - expected_url = 'https://upload.pypi.org/legacy/' - self.assertEqual(self.last_open.req.get_full_url(), expected_url) - data = self.last_open.req.data - self.assertIn(b'xxx',data) - self.assertIn(b'protocol_version', data) - self.assertIn(b'sha256_digest', data) - self.assertIn( - b'cd2eb0837c9b4c962c22d2ff8b5441b7b45805887f051d39bf133b583baf' - b'6860', - data - ) - if b'md5_digest' in data: - self.assertIn(b'f561aaf6ef0bf14d4208bb46a4ccb3ad', data) - if b'blake2_256_digest' in data: - self.assertIn( - b'b6f289a27d4fe90da63c503bfe0a9b761a8f76bb86148565065f040be' - b'6d1c3044cf7ded78ef800509bccb4b648e507d88dc6383d67642aadcc' - b'ce443f1534330a', - data - ) - - # The PyPI response body was echoed - results = self.get_logs(INFO) - self.assertEqual(results[-1], 75 * '-' + '\nxyzzy\n' + 75 * '-') - - # bpo-32304: archives whose last byte was b'\r' were corrupted due to - # normalization intended for Mac OS 9. - def test_upload_correct_cr(self): - # content that ends with \r should not be modified. - tmp = self.mkdtemp() - path = os.path.join(tmp, 'xxx') - self.write_file(path, content='yy\r') - command, pyversion, filename = 'xxx', '2.6', path - dist_files = [(command, pyversion, filename)] - self.write_file(self.rc, PYPIRC_LONG_PASSWORD) - - # other fields that ended with \r used to be modified, now are - # preserved. - pkg_dir, dist = self.create_dist( - dist_files=dist_files, - description='long description\r' - ) - cmd = upload(dist) - cmd.show_response = 1 - cmd.ensure_finalized() - cmd.run() - - headers = dict(self.last_open.req.headers) - self.assertGreaterEqual(int(headers['Content-length']), 2172) - self.assertIn(b'long description\r', self.last_open.req.data) - - def test_upload_fails(self): - self.next_msg = "Not Found" - self.next_code = 404 - self.assertRaises(DistutilsError, self.test_upload) - - def test_wrong_exception_order(self): - tmp = self.mkdtemp() - path = os.path.join(tmp, 'xxx') - self.write_file(path) - dist_files = [('xxx', '2.6', path)] # command, pyversion, filename - self.write_file(self.rc, PYPIRC_LONG_PASSWORD) - - pkg_dir, dist = self.create_dist(dist_files=dist_files) - tests = [ - (OSError('oserror'), 'oserror', OSError), - (HTTPError('url', 400, 'httperror', {}, None), - 'Upload failed (400): httperror', DistutilsError), - ] - for exception, expected, raised_exception in tests: - with self.subTest(exception=type(exception).__name__): - with mock.patch('distutils.command.upload.urlopen', - new=mock.Mock(side_effect=exception)): - with self.assertRaises(raised_exception): - cmd = upload(dist) - cmd.ensure_finalized() - cmd.run() - results = self.get_logs(ERROR) - self.assertIn(expected, results[-1]) - self.clear_logs() - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(uploadTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_util.py b/Lib/distutils/tests/test_util.py deleted file mode 100644 index f9c223f06ec68d8..000000000000000 --- a/Lib/distutils/tests/test_util.py +++ /dev/null @@ -1,313 +0,0 @@ -"""Tests for distutils.util.""" -import os -import sys -import unittest -from copy import copy -from test.support import run_unittest -from unittest import mock - -from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError -from distutils.util import (get_platform, convert_path, change_root, - check_environ, split_quoted, strtobool, - rfc822_escape, byte_compile, - grok_environment_error) -from distutils import util # used to patch _environ_checked -from distutils.sysconfig import get_config_vars -from distutils import sysconfig -from distutils.tests import support -import _osx_support - -class UtilTestCase(support.EnvironGuard, unittest.TestCase): - - def setUp(self): - super(UtilTestCase, self).setUp() - # saving the environment - self.name = os.name - self.platform = sys.platform - self.version = sys.version - self.sep = os.sep - self.join = os.path.join - self.isabs = os.path.isabs - self.splitdrive = os.path.splitdrive - self._config_vars = copy(sysconfig._config_vars) - - # patching os.uname - if hasattr(os, 'uname'): - self.uname = os.uname - self._uname = os.uname() - else: - self.uname = None - self._uname = None - - os.uname = self._get_uname - - def tearDown(self): - # getting back the environment - os.name = self.name - sys.platform = self.platform - sys.version = self.version - os.sep = self.sep - os.path.join = self.join - os.path.isabs = self.isabs - os.path.splitdrive = self.splitdrive - if self.uname is not None: - os.uname = self.uname - else: - del os.uname - sysconfig._config_vars.clear() - sysconfig._config_vars.update(self._config_vars) - super(UtilTestCase, self).tearDown() - - def _set_uname(self, uname): - self._uname = uname - - def _get_uname(self): - return self._uname - - def test_get_platform(self): - - # windows XP, 32bits - os.name = 'nt' - sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' - '[MSC v.1310 32 bit (Intel)]') - sys.platform = 'win32' - self.assertEqual(get_platform(), 'win32') - - # windows XP, amd64 - os.name = 'nt' - sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' - '[MSC v.1310 32 bit (Amd64)]') - sys.platform = 'win32' - self.assertEqual(get_platform(), 'win-amd64') - - # macbook - os.name = 'posix' - sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) ' - '\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]') - sys.platform = 'darwin' - self._set_uname(('Darwin', 'macziade', '8.11.1', - ('Darwin Kernel Version 8.11.1: ' - 'Wed Oct 10 18:23:28 PDT 2007; ' - 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' - - get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g ' - '-fwrapv -O3 -Wall -Wstrict-prototypes') - - cursize = sys.maxsize - sys.maxsize = (2 ** 31)-1 - try: - self.assertEqual(get_platform(), 'macosx-10.3-i386') - finally: - sys.maxsize = cursize - - # macbook with fat binaries (fat, universal or fat64) - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4' - get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3') - - self.assertEqual(get_platform(), 'macosx-10.4-fat') - - _osx_support._remove_original_values(get_config_vars()) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1' - self.assertEqual(get_platform(), 'macosx-10.4-fat') - - - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3') - - self.assertEqual(get_platform(), 'macosx-10.4-intel') - - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3') - self.assertEqual(get_platform(), 'macosx-10.4-fat3') - - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3') - self.assertEqual(get_platform(), 'macosx-10.4-universal') - - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3') - - self.assertEqual(get_platform(), 'macosx-10.4-fat64') - - for arch in ('ppc', 'i386', 'x86_64', 'ppc64'): - _osx_support._remove_original_values(get_config_vars()) - get_config_vars()['CFLAGS'] = ('-arch %s -isysroot ' - '/Developer/SDKs/MacOSX10.4u.sdk ' - '-fno-strict-aliasing -fno-common ' - '-dynamic -DNDEBUG -g -O3'%(arch,)) - - self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,)) - - - # linux debian sarge - os.name = 'posix' - sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) ' - '\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]') - sys.platform = 'linux2' - self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7', - '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686')) - - self.assertEqual(get_platform(), 'linux-i686') - - # XXX more platforms to tests here - - def test_convert_path(self): - # linux/mac - os.sep = '/' - def _join(path): - return '/'.join(path) - os.path.join = _join - - self.assertEqual(convert_path('/home/to/my/stuff'), - '/home/to/my/stuff') - - # win - os.sep = '\\' - def _join(*path): - return '\\'.join(path) - os.path.join = _join - - self.assertRaises(ValueError, convert_path, '/home/to/my/stuff') - self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/') - - self.assertEqual(convert_path('home/to/my/stuff'), - 'home\\to\\my\\stuff') - self.assertEqual(convert_path('.'), - os.curdir) - - def test_change_root(self): - # linux/mac - os.name = 'posix' - def _isabs(path): - return path[0] == '/' - os.path.isabs = _isabs - def _join(*path): - return '/'.join(path) - os.path.join = _join - - self.assertEqual(change_root('/root', '/old/its/here'), - '/root/old/its/here') - self.assertEqual(change_root('/root', 'its/here'), - '/root/its/here') - - # windows - os.name = 'nt' - def _isabs(path): - return path.startswith('c:\\') - os.path.isabs = _isabs - def _splitdrive(path): - if path.startswith('c:'): - return ('', path.replace('c:', '')) - return ('', path) - os.path.splitdrive = _splitdrive - def _join(*path): - return '\\'.join(path) - os.path.join = _join - - self.assertEqual(change_root('c:\\root', 'c:\\old\\its\\here'), - 'c:\\root\\old\\its\\here') - self.assertEqual(change_root('c:\\root', 'its\\here'), - 'c:\\root\\its\\here') - - # BugsBunny os (it's a great os) - os.name = 'BugsBunny' - self.assertRaises(DistutilsPlatformError, - change_root, 'c:\\root', 'its\\here') - - # XXX platforms to be covered: mac - - def test_check_environ(self): - util._environ_checked = 0 - os.environ.pop('HOME', None) - - check_environ() - - self.assertEqual(os.environ['PLAT'], get_platform()) - self.assertEqual(util._environ_checked, 1) - - @unittest.skipUnless(os.name == 'posix', 'specific to posix') - def test_check_environ_getpwuid(self): - util._environ_checked = 0 - os.environ.pop('HOME', None) - - try: - import pwd - except ImportError: - raise unittest.SkipTest("Test requires pwd module.") - - # only set pw_dir field, other fields are not used - result = pwd.struct_passwd((None, None, None, None, None, - '/home/distutils', None)) - with mock.patch.object(pwd, 'getpwuid', return_value=result): - check_environ() - self.assertEqual(os.environ['HOME'], '/home/distutils') - - util._environ_checked = 0 - os.environ.pop('HOME', None) - - # bpo-10496: Catch pwd.getpwuid() error - with mock.patch.object(pwd, 'getpwuid', side_effect=KeyError): - check_environ() - self.assertNotIn('HOME', os.environ) - - def test_split_quoted(self): - self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'), - ['one', 'two', 'three', 'four']) - - def test_strtobool(self): - yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1') - no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N') - - for y in yes: - self.assertTrue(strtobool(y)) - - for n in no: - self.assertFalse(strtobool(n)) - - def test_rfc822_escape(self): - header = 'I am a\npoor\nlonesome\nheader\n' - res = rfc822_escape(header) - wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s' - 'header%(8s)s') % {'8s': '\n'+8*' '} - self.assertEqual(res, wanted) - - def test_dont_write_bytecode(self): - # makes sure byte_compile raise a DistutilsError - # if sys.dont_write_bytecode is True - old_dont_write_bytecode = sys.dont_write_bytecode - sys.dont_write_bytecode = True - try: - self.assertRaises(DistutilsByteCompileError, byte_compile, []) - finally: - sys.dont_write_bytecode = old_dont_write_bytecode - - def test_grok_environment_error(self): - # test obsolete function to ensure backward compat (#4931) - exc = IOError("Unable to find batch file") - msg = grok_environment_error(exc) - self.assertEqual(msg, "error: Unable to find batch file") - - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(UtilTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_version.py b/Lib/distutils/tests/test_version.py deleted file mode 100644 index 1563e0227b60dde..000000000000000 --- a/Lib/distutils/tests/test_version.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Tests for distutils.version.""" -import unittest -from distutils.version import LooseVersion -from distutils.version import StrictVersion -from test.support import run_unittest - -class VersionTestCase(unittest.TestCase): - - def test_prerelease(self): - version = StrictVersion('1.2.3a1') - self.assertEqual(version.version, (1, 2, 3)) - self.assertEqual(version.prerelease, ('a', 1)) - self.assertEqual(str(version), '1.2.3a1') - - version = StrictVersion('1.2.0') - self.assertEqual(str(version), '1.2') - - def test_cmp_strict(self): - versions = (('1.5.1', '1.5.2b2', -1), - ('161', '3.10a', ValueError), - ('8.02', '8.02', 0), - ('3.4j', '1996.07.12', ValueError), - ('3.2.pl0', '3.1.1.6', ValueError), - ('2g6', '11g', ValueError), - ('0.9', '2.2', -1), - ('1.2.1', '1.2', 1), - ('1.1', '1.2.2', -1), - ('1.2', '1.1', 1), - ('1.2.1', '1.2.2', -1), - ('1.2.2', '1.2', 1), - ('1.2', '1.2.2', -1), - ('0.4.0', '0.4', 0), - ('1.13++', '5.5.kw', ValueError)) - - for v1, v2, wanted in versions: - try: - res = StrictVersion(v1)._cmp(StrictVersion(v2)) - except ValueError: - if wanted is ValueError: - continue - else: - raise AssertionError(("cmp(%s, %s) " - "shouldn't raise ValueError") - % (v1, v2)) - self.assertEqual(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) - res = StrictVersion(v1)._cmp(v2) - self.assertEqual(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) - res = StrictVersion(v1)._cmp(object()) - self.assertIs(res, NotImplemented, - 'cmp(%s, %s) should be NotImplemented, got %s' % - (v1, v2, res)) - - - def test_cmp(self): - versions = (('1.5.1', '1.5.2b2', -1), - ('161', '3.10a', 1), - ('8.02', '8.02', 0), - ('3.4j', '1996.07.12', -1), - ('3.2.pl0', '3.1.1.6', 1), - ('2g6', '11g', -1), - ('0.960923', '2.2beta29', -1), - ('1.13++', '5.5.kw', -1)) - - - for v1, v2, wanted in versions: - res = LooseVersion(v1)._cmp(LooseVersion(v2)) - self.assertEqual(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) - res = LooseVersion(v1)._cmp(v2) - self.assertEqual(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) - res = LooseVersion(v1)._cmp(object()) - self.assertIs(res, NotImplemented, - 'cmp(%s, %s) should be NotImplemented, got %s' % - (v1, v2, res)) - -def test_suite(): - return unittest.TestLoader().loadTestsFromTestCase(VersionTestCase) - -if __name__ == "__main__": - run_unittest(test_suite()) diff --git a/Lib/distutils/tests/test_versionpredicate.py b/Lib/distutils/tests/test_versionpredicate.py deleted file mode 100644 index 28ae09dc2058dab..000000000000000 --- a/Lib/distutils/tests/test_versionpredicate.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Tests harness for distutils.versionpredicate. - -""" - -import distutils.versionpredicate -import doctest -from test.support import run_unittest - -def test_suite(): - return doctest.DocTestSuite(distutils.versionpredicate) - -if __name__ == '__main__': - run_unittest(test_suite()) diff --git a/Lib/distutils/text_file.py b/Lib/distutils/text_file.py deleted file mode 100644 index 93abad38f434d7e..000000000000000 --- a/Lib/distutils/text_file.py +++ /dev/null @@ -1,286 +0,0 @@ -"""text_file - -provides the TextFile class, which gives an interface to text files -that (optionally) takes care of stripping comments, ignoring blank -lines, and joining lines with backslashes.""" - -import sys, io - - -class TextFile: - """Provides a file-like object that takes care of all the things you - commonly want to do when processing a text file that has some - line-by-line syntax: strip comments (as long as "#" is your - comment character), skip blank lines, join adjacent lines by - escaping the newline (ie. backslash at end of line), strip - leading and/or trailing whitespace. All of these are optional - and independently controllable. - - Provides a 'warn()' method so you can generate warning messages that - report physical line number, even if the logical line in question - spans multiple physical lines. Also provides 'unreadline()' for - implementing line-at-a-time lookahead. - - Constructor is called as: - - TextFile (filename=None, file=None, **options) - - It bombs (RuntimeError) if both 'filename' and 'file' are None; - 'filename' should be a string, and 'file' a file object (or - something that provides 'readline()' and 'close()' methods). It is - recommended that you supply at least 'filename', so that TextFile - can include it in warning messages. If 'file' is not supplied, - TextFile creates its own using 'io.open()'. - - The options are all boolean, and affect the value returned by - 'readline()': - strip_comments [default: true] - strip from "#" to end-of-line, as well as any whitespace - leading up to the "#" -- unless it is escaped by a backslash - lstrip_ws [default: false] - strip leading whitespace from each line before returning it - rstrip_ws [default: true] - strip trailing whitespace (including line terminator!) from - each line before returning it - skip_blanks [default: true} - skip lines that are empty *after* stripping comments and - whitespace. (If both lstrip_ws and rstrip_ws are false, - then some lines may consist of solely whitespace: these will - *not* be skipped, even if 'skip_blanks' is true.) - join_lines [default: false] - if a backslash is the last non-newline character on a line - after stripping comments and whitespace, join the following line - to it to form one "logical line"; if N consecutive lines end - with a backslash, then N+1 physical lines will be joined to - form one logical line. - collapse_join [default: false] - strip leading whitespace from lines that are joined to their - predecessor; only matters if (join_lines and not lstrip_ws) - errors [default: 'strict'] - error handler used to decode the file content - - Note that since 'rstrip_ws' can strip the trailing newline, the - semantics of 'readline()' must differ from those of the builtin file - object's 'readline()' method! In particular, 'readline()' returns - None for end-of-file: an empty string might just be a blank line (or - an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is - not.""" - - default_options = { 'strip_comments': 1, - 'skip_blanks': 1, - 'lstrip_ws': 0, - 'rstrip_ws': 1, - 'join_lines': 0, - 'collapse_join': 0, - 'errors': 'strict', - } - - def __init__(self, filename=None, file=None, **options): - """Construct a new TextFile object. At least one of 'filename' - (a string) and 'file' (a file-like object) must be supplied. - They keyword argument options are described above and affect - the values returned by 'readline()'.""" - if filename is None and file is None: - raise RuntimeError("you must supply either or both of 'filename' and 'file'") - - # set values for all options -- either from client option hash - # or fallback to default_options - for opt in self.default_options.keys(): - if opt in options: - setattr(self, opt, options[opt]) - else: - setattr(self, opt, self.default_options[opt]) - - # sanity check client option hash - for opt in options.keys(): - if opt not in self.default_options: - raise KeyError("invalid TextFile option '%s'" % opt) - - if file is None: - self.open(filename) - else: - self.filename = filename - self.file = file - self.current_line = 0 # assuming that file is at BOF! - - # 'linebuf' is a stack of lines that will be emptied before we - # actually read from the file; it's only populated by an - # 'unreadline()' operation - self.linebuf = [] - - def open(self, filename): - """Open a new file named 'filename'. This overrides both the - 'filename' and 'file' arguments to the constructor.""" - self.filename = filename - self.file = io.open(self.filename, 'r', errors=self.errors) - self.current_line = 0 - - def close(self): - """Close the current file and forget everything we know about it - (filename, current line number).""" - file = self.file - self.file = None - self.filename = None - self.current_line = None - file.close() - - def gen_error(self, msg, line=None): - outmsg = [] - if line is None: - line = self.current_line - outmsg.append(self.filename + ", ") - if isinstance(line, (list, tuple)): - outmsg.append("lines %d-%d: " % tuple(line)) - else: - outmsg.append("line %d: " % line) - outmsg.append(str(msg)) - return "".join(outmsg) - - def error(self, msg, line=None): - raise ValueError("error: " + self.gen_error(msg, line)) - - def warn(self, msg, line=None): - """Print (to stderr) a warning message tied to the current logical - line in the current file. If the current logical line in the - file spans multiple physical lines, the warning refers to the - whole range, eg. "lines 3-5". If 'line' supplied, it overrides - the current line number; it may be a list or tuple to indicate a - range of physical lines, or an integer for a single physical - line.""" - sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n") - - def readline(self): - """Read and return a single logical line from the current file (or - from an internal buffer if lines have previously been "unread" - with 'unreadline()'). If the 'join_lines' option is true, this - may involve reading multiple physical lines concatenated into a - single string. Updates the current line number, so calling - 'warn()' after 'readline()' emits a warning about the physical - line(s) just read. Returns None on end-of-file, since the empty - string can occur if 'rstrip_ws' is true but 'strip_blanks' is - not.""" - # If any "unread" lines waiting in 'linebuf', return the top - # one. (We don't actually buffer read-ahead data -- lines only - # get put in 'linebuf' if the client explicitly does an - # 'unreadline()'. - if self.linebuf: - line = self.linebuf[-1] - del self.linebuf[-1] - return line - - buildup_line = '' - - while True: - # read the line, make it None if EOF - line = self.file.readline() - if line == '': - line = None - - if self.strip_comments and line: - - # Look for the first "#" in the line. If none, never - # mind. If we find one and it's the first character, or - # is not preceded by "\", then it starts a comment -- - # strip the comment, strip whitespace before it, and - # carry on. Otherwise, it's just an escaped "#", so - # unescape it (and any other escaped "#"'s that might be - # lurking in there) and otherwise leave the line alone. - - pos = line.find("#") - if pos == -1: # no "#" -- no comments - pass - - # It's definitely a comment -- either "#" is the first - # character, or it's elsewhere and unescaped. - elif pos == 0 or line[pos-1] != "\\": - # Have to preserve the trailing newline, because it's - # the job of a later step (rstrip_ws) to remove it -- - # and if rstrip_ws is false, we'd better preserve it! - # (NB. this means that if the final line is all comment - # and has no trailing newline, we will think that it's - # EOF; I think that's OK.) - eol = (line[-1] == '\n') and '\n' or '' - line = line[0:pos] + eol - - # If all that's left is whitespace, then skip line - # *now*, before we try to join it to 'buildup_line' -- - # that way constructs like - # hello \\ - # # comment that should be ignored - # there - # result in "hello there". - if line.strip() == "": - continue - else: # it's an escaped "#" - line = line.replace("\\#", "#") - - # did previous line end with a backslash? then accumulate - if self.join_lines and buildup_line: - # oops: end of file - if line is None: - self.warn("continuation line immediately precedes " - "end-of-file") - return buildup_line - - if self.collapse_join: - line = line.lstrip() - line = buildup_line + line - - # careful: pay attention to line number when incrementing it - if isinstance(self.current_line, list): - self.current_line[1] = self.current_line[1] + 1 - else: - self.current_line = [self.current_line, - self.current_line + 1] - # just an ordinary line, read it as usual - else: - if line is None: # eof - return None - - # still have to be careful about incrementing the line number! - if isinstance(self.current_line, list): - self.current_line = self.current_line[1] + 1 - else: - self.current_line = self.current_line + 1 - - # strip whitespace however the client wants (leading and - # trailing, or one or the other, or neither) - if self.lstrip_ws and self.rstrip_ws: - line = line.strip() - elif self.lstrip_ws: - line = line.lstrip() - elif self.rstrip_ws: - line = line.rstrip() - - # blank line (whether we rstrip'ed or not)? skip to next line - # if appropriate - if (line == '' or line == '\n') and self.skip_blanks: - continue - - if self.join_lines: - if line[-1] == '\\': - buildup_line = line[:-1] - continue - - if line[-2:] == '\\\n': - buildup_line = line[0:-2] + '\n' - continue - - # well, I guess there's some actual content there: return it - return line - - def readlines(self): - """Read and return the list of all logical lines remaining in the - current file.""" - lines = [] - while True: - line = self.readline() - if line is None: - return lines - lines.append(line) - - def unreadline(self, line): - """Push 'line' (a string) onto an internal buffer that will be - checked by future 'readline()' calls. Handy for implementing - a parser with line-at-a-time lookahead.""" - self.linebuf.append(line) diff --git a/Lib/distutils/unixccompiler.py b/Lib/distutils/unixccompiler.py deleted file mode 100644 index d00c48981eb6d65..000000000000000 --- a/Lib/distutils/unixccompiler.py +++ /dev/null @@ -1,329 +0,0 @@ -"""distutils.unixccompiler - -Contains the UnixCCompiler class, a subclass of CCompiler that handles -the "typical" Unix-style command-line C compiler: - * macros defined with -Dname[=value] - * macros undefined with -Uname - * include search directories specified with -Idir - * libraries specified with -lllib - * library search directories specified with -Ldir - * compile handled by 'cc' (or similar) executable with -c option: - compiles .c to .o - * link static library handled by 'ar' command (possibly with 'ranlib') - * link shared library handled by 'cc -shared' -""" - -import os, sys, re - -from distutils import sysconfig -from distutils.dep_util import newer -from distutils.ccompiler import \ - CCompiler, gen_preprocess_options, gen_lib_options -from distutils.errors import \ - DistutilsExecError, CompileError, LibError, LinkError -from distutils import log - -if sys.platform == 'darwin': - import _osx_support - -# XXX Things not currently handled: -# * optimization/debug/warning flags; we just use whatever's in Python's -# Makefile and live with it. Is this adequate? If not, we might -# have to have a bunch of subclasses GNUCCompiler, SGICCompiler, -# SunCCompiler, and I suspect down that road lies madness. -# * even if we don't know a warning flag from an optimization flag, -# we need some way for outsiders to feed preprocessor/compiler/linker -# flags in to us -- eg. a sysadmin might want to mandate certain flags -# via a site config file, or a user might want to set something for -# compiling this module distribution only via the setup.py command -# line, whatever. As long as these options come from something on the -# current system, they can be as system-dependent as they like, and we -# should just happily stuff them into the preprocessor/compiler/linker -# options and carry on. - - -class UnixCCompiler(CCompiler): - - compiler_type = 'unix' - - # These are used by CCompiler in two places: the constructor sets - # instance attributes 'preprocessor', 'compiler', etc. from them, and - # 'set_executable()' allows any of these to be set. The defaults here - # are pretty generic; they will probably have to be set by an outsider - # (eg. using information discovered by the sysconfig about building - # Python extensions). - executables = {'preprocessor' : None, - 'compiler' : ["cc"], - 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], - 'linker_so' : ["cc", "-shared"], - 'linker_exe' : ["cc"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None, - } - - if sys.platform[:6] == "darwin": - executables['ranlib'] = ["ranlib"] - - # Needed for the filename generation methods provided by the base - # class, CCompiler. NB. whoever instantiates/uses a particular - # UnixCCompiler instance should set 'shared_lib_ext' -- we set a - # reasonable common default here, but it's not necessarily used on all - # Unices! - - src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"] - obj_extension = ".o" - static_lib_extension = ".a" - shared_lib_extension = ".so" - dylib_lib_extension = ".dylib" - xcode_stub_lib_extension = ".tbd" - static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s" - xcode_stub_lib_format = dylib_lib_format - if sys.platform == "cygwin": - exe_extension = ".exe" - - def preprocess(self, source, output_file=None, macros=None, - include_dirs=None, extra_preargs=None, extra_postargs=None): - fixed_args = self._fix_compile_args(None, macros, include_dirs) - ignore, macros, include_dirs = fixed_args - pp_opts = gen_preprocess_options(macros, include_dirs) - pp_args = self.preprocessor + pp_opts - if output_file: - pp_args.extend(['-o', output_file]) - if extra_preargs: - pp_args[:0] = extra_preargs - if extra_postargs: - pp_args.extend(extra_postargs) - pp_args.append(source) - - # We need to preprocess: either we're being forced to, or we're - # generating output to stdout, or there's a target output file and - # the source file is newer than the target (or the target doesn't - # exist). - if self.force or output_file is None or newer(source, output_file): - if output_file: - self.mkpath(os.path.dirname(output_file)) - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - raise CompileError(msg) - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - compiler_so = self.compiler_so - if sys.platform == 'darwin': - compiler_so = _osx_support.compiler_fixup(compiler_so, - cc_args + extra_postargs) - try: - self.spawn(compiler_so + cc_args + [src, '-o', obj] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - def create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - self.mkpath(os.path.dirname(output_filename)) - self.spawn(self.archiver + - [output_filename] + - objects + self.objects) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - try: - self.spawn(self.ranlib + [output_filename]) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if not isinstance(output_dir, (str, type(None))): - raise TypeError("'output_dir' must be a string or None") - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ld_args = (objects + self.objects + - lib_opts + ['-o', output_filename]) - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - try: - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - if target_lang == "c++" and self.compiler_cxx: - # skip over environment variable settings if /usr/bin/env - # is used to set up the linker's environment. - # This is needed on OSX. Note: this assumes that the - # normal and C++ compiler have the same environment - # settings. - i = 0 - if os.path.basename(linker[0]) == "env": - i = 1 - while '=' in linker[i]: - i += 1 - - if os.path.basename(linker[i]) == 'ld_so_aix': - # AIX platforms prefix the compiler with the ld_so_aix - # script, so we need to adjust our linker index - offset = 1 - else: - offset = 0 - - linker[i+offset] = self.compiler_cxx[i] - - if sys.platform == 'darwin': - linker = _osx_support.compiler_fixup(linker, ld_args) - - self.spawn(linker + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "-L" + dir - - def _is_gcc(self, compiler_name): - # clang uses same syntax for rpath as gcc - return any(name in compiler_name for name in ("gcc", "g++", "clang")) - - def runtime_library_dir_option(self, dir): - # XXX Hackish, at the very least. See Python bug #445902: - # http://sourceforge.net/tracker/index.php - # ?func=detail&aid=445902&group_id=5470&atid=105470 - # Linkers on different platforms need different options to - # specify that directories need to be added to the list of - # directories searched for dependencies when a dynamic library - # is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to - # be told to pass the -R option through to the linker, whereas - # other compilers and gcc on other systems just know this. - # Other compilers may need something slightly different. At - # this time, there's no way to determine this information from - # the configuration data stored in the Python installation, so - # we use this hack. - compiler = os.path.basename(sysconfig.get_config_var("CC")) - if sys.platform[:6] == "darwin": - # MacOSX's linker doesn't understand the -R flag at all - return "-L" + dir - elif sys.platform[:7] == "freebsd": - return "-Wl,-rpath=" + dir - elif sys.platform[:5] == "hp-ux": - if self._is_gcc(compiler): - return ["-Wl,+s", "-L" + dir] - return ["+s", "-L" + dir] - else: - if self._is_gcc(compiler): - # gcc on non-GNU systems does not need -Wl, but can - # use it anyway. Since distutils has always passed in - # -Wl whenever gcc was used in the past it is probably - # safest to keep doing so. - if sysconfig.get_config_var("GNULD") == "yes": - # GNU ld needs an extra option to get a RUNPATH - # instead of just an RPATH. - return "-Wl,--enable-new-dtags,-R" + dir - else: - return "-Wl,-R" + dir - else: - # No idea how --enable-new-dtags would be passed on to - # ld if this system was using GNU ld. Don't know if a - # system like this even exists. - return "-R" + dir - - def library_option(self, lib): - return "-l" + lib - - def find_library_file(self, dirs, lib, debug=0): - shared_f = self.library_filename(lib, lib_type='shared') - dylib_f = self.library_filename(lib, lib_type='dylib') - xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub') - static_f = self.library_filename(lib, lib_type='static') - - if sys.platform == 'darwin': - # On OSX users can specify an alternate SDK using - # '-isysroot', calculate the SDK root if it is specified - # (and use it further on) - # - # Note that, as of Xcode 7, Apple SDKs may contain textual stub - # libraries with .tbd extensions rather than the normal .dylib - # shared libraries installed in /. The Apple compiler tool - # chain handles this transparently but it can cause problems - # for programs that are being built with an SDK and searching - # for specific libraries. Callers of find_library_file need to - # keep in mind that the base filename of the returned SDK library - # file might have a different extension from that of the library - # file installed on the running system, for example: - # /Applications/Xcode.app/Contents/Developer/Platforms/ - # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ - # usr/lib/libedit.tbd - # vs - # /usr/lib/libedit.dylib - cflags = sysconfig.get_config_var('CFLAGS') - m = re.search(r'-isysroot\s*(\S+)', cflags) - if m is None: - sysroot = _osx_support._default_sysroot(sysconfig.get_config_var('CC')) - else: - sysroot = m.group(1) - - - - for dir in dirs: - shared = os.path.join(dir, shared_f) - dylib = os.path.join(dir, dylib_f) - static = os.path.join(dir, static_f) - xcode_stub = os.path.join(dir, xcode_stub_f) - - if sys.platform == 'darwin' and ( - dir.startswith('/System/') or ( - dir.startswith('/usr/') and not dir.startswith('/usr/local/'))): - - shared = os.path.join(sysroot, dir[1:], shared_f) - dylib = os.path.join(sysroot, dir[1:], dylib_f) - static = os.path.join(sysroot, dir[1:], static_f) - xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f) - - # We're second-guessing the linker here, with not much hard - # data to go on: GCC seems to prefer the shared library, so I'm - # assuming that *all* Unix C compilers do. And of course I'm - # ignoring even GCC's "-static" option. So sue me. - if os.path.exists(dylib): - return dylib - elif os.path.exists(xcode_stub): - return xcode_stub - elif os.path.exists(shared): - return shared - elif os.path.exists(static): - return static - - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/Lib/distutils/util.py b/Lib/distutils/util.py deleted file mode 100644 index 2ce5c5b64d62fa0..000000000000000 --- a/Lib/distutils/util.py +++ /dev/null @@ -1,562 +0,0 @@ -"""distutils.util - -Miscellaneous utility functions -- anything that doesn't fit into -one of the other *util.py modules. -""" - -import os -import re -import importlib.util -import string -import sys -import distutils -from distutils.errors import DistutilsPlatformError -from distutils.dep_util import newer -from distutils.spawn import spawn -from distutils import log -from distutils.errors import DistutilsByteCompileError - -def get_host_platform(): - """Return a string that identifies the current platform. This is used mainly to - distinguish platform-specific build directories and platform-specific built - distributions. Typically includes the OS name and version and the - architecture (as supplied by 'os.uname()'), although the exact information - included depends on the OS; eg. on Linux, the kernel version isn't - particularly important. - - Examples of returned values: - linux-i586 - linux-alpha (?) - solaris-2.6-sun4u - - Windows will return one of: - win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) - win32 (all others - specifically, sys.platform is returned) - - For other non-POSIX platforms, currently just returns 'sys.platform'. - - """ - if os.name == 'nt': - if 'amd64' in sys.version.lower(): - return 'win-amd64' - if '(arm)' in sys.version.lower(): - return 'win-arm32' - if '(arm64)' in sys.version.lower(): - return 'win-arm64' - return sys.platform - - # Set for cross builds explicitly - if "_PYTHON_HOST_PLATFORM" in os.environ: - return os.environ["_PYTHON_HOST_PLATFORM"] - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha, - # Mac OS is M68k or PPC, etc. - return sys.platform - - # Try to distinguish various flavours of Unix - - (osname, host, release, version, machine) = os.uname() - - # Convert the OS name to lowercase, remove '/' characters, and translate - # spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return "%s-%s" % (osname, machine) - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = "%d.%s" % (int(release[0]) - 3, release[2:]) - # We can't use "platform.architecture()[0]" because a - # bootstrap problem. We use a dict to get an error - # if some suspicious happens. - bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} - machine += ".%s" % bitness[sys.maxsize] - # fall through to standard osname-release-machine representation - elif osname[:3] == "aix": - from _aix_support import aix_platform - return aix_platform() - elif osname[:6] == "cygwin": - osname = "cygwin" - rel_re = re.compile (r'[\d.]+', re.ASCII) - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - import _osx_support, distutils.sysconfig - osname, release, machine = _osx_support.get_platform_osx( - distutils.sysconfig.get_config_vars(), - osname, release, machine) - - return "%s-%s-%s" % (osname, release, machine) - -def get_platform(): - if os.name == 'nt': - TARGET_TO_PLAT = { - 'x86' : 'win32', - 'x64' : 'win-amd64', - 'arm' : 'win-arm32', - } - return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform() - else: - return get_host_platform() - -def convert_path (pathname): - """Return 'pathname' as a name that will work on the native filesystem, - i.e. split it on '/' and put it back together again using the current - directory separator. Needed because filenames in the setup script are - always supplied in Unix style, and have to be converted to the local - convention before we can actually use them in the filesystem. Raises - ValueError on non-Unix-ish systems if 'pathname' either starts or - ends with a slash. - """ - if os.sep == '/': - return pathname - if not pathname: - return pathname - if pathname[0] == '/': - raise ValueError("path '%s' cannot be absolute" % pathname) - if pathname[-1] == '/': - raise ValueError("path '%s' cannot end with '/'" % pathname) - - paths = pathname.split('/') - while '.' in paths: - paths.remove('.') - if not paths: - return os.curdir - return os.path.join(*paths) - -# convert_path () - - -def change_root (new_root, pathname): - """Return 'pathname' with 'new_root' prepended. If 'pathname' is - relative, this is equivalent to "os.path.join(new_root,pathname)". - Otherwise, it requires making 'pathname' relative and then joining the - two, which is tricky on DOS/Windows and Mac OS. - """ - if os.name == 'posix': - if not os.path.isabs(pathname): - return os.path.join(new_root, pathname) - else: - return os.path.join(new_root, pathname[1:]) - - elif os.name == 'nt': - (drive, path) = os.path.splitdrive(pathname) - if path[0] == '\\': - path = path[1:] - return os.path.join(new_root, path) - - else: - raise DistutilsPlatformError("nothing known about platform '%s'" % os.name) - - -_environ_checked = 0 -def check_environ (): - """Ensure that 'os.environ' has all the environment variables we - guarantee that users can use in config files, command-line options, - etc. Currently this includes: - HOME - user's home directory (Unix only) - PLAT - description of the current platform, including hardware - and OS (see 'get_platform()') - """ - global _environ_checked - if _environ_checked: - return - - if os.name == 'posix' and 'HOME' not in os.environ: - try: - import pwd - os.environ['HOME'] = pwd.getpwuid(os.getuid())[5] - except (ImportError, KeyError): - # bpo-10496: if the current user identifier doesn't exist in the - # password database, do nothing - pass - - if 'PLAT' not in os.environ: - os.environ['PLAT'] = get_platform() - - _environ_checked = 1 - - -def subst_vars (s, local_vars): - """Perform shell/Perl-style variable substitution on 'string'. Every - occurrence of '$' followed by a name is considered a variable, and - variable is substituted by the value found in the 'local_vars' - dictionary, or in 'os.environ' if it's not in 'local_vars'. - 'os.environ' is first checked/augmented to guarantee that it contains - certain values: see 'check_environ()'. Raise ValueError for any - variables not found in either 'local_vars' or 'os.environ'. - """ - check_environ() - def _subst (match, local_vars=local_vars): - var_name = match.group(1) - if var_name in local_vars: - return str(local_vars[var_name]) - else: - return os.environ[var_name] - - try: - return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s) - except KeyError as var: - raise ValueError("invalid variable '$%s'" % var) - -# subst_vars () - - -def grok_environment_error (exc, prefix="error: "): - # Function kept for backward compatibility. - # Used to try clever things with EnvironmentErrors, - # but nowadays str(exception) produces good messages. - return prefix + str(exc) - - -# Needed by 'split_quoted()' -_wordchars_re = _squote_re = _dquote_re = None -def _init_regex(): - global _wordchars_re, _squote_re, _dquote_re - _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) - _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") - _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') - -def split_quoted (s): - """Split a string up according to Unix shell-like rules for quotes and - backslashes. In short: words are delimited by spaces, as long as those - spaces are not escaped by a backslash, or inside a quoted string. - Single and double quotes are equivalent, and the quote characters can - be backslash-escaped. The backslash is stripped from any two-character - escape sequence, leaving only the escaped character. The quote - characters are stripped from any quoted string. Returns a list of - words. - """ - - # This is a nice algorithm for splitting up a single string, since it - # doesn't require character-by-character examination. It was a little - # bit of a brain-bender to get it working right, though... - if _wordchars_re is None: _init_regex() - - s = s.strip() - words = [] - pos = 0 - - while s: - m = _wordchars_re.match(s, pos) - end = m.end() - if end == len(s): - words.append(s[:end]) - break - - if s[end] in string.whitespace: # unescaped, unquoted whitespace: now - words.append(s[:end]) # we definitely have a word delimiter - s = s[end:].lstrip() - pos = 0 - - elif s[end] == '\\': # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end+1:] - pos = end+1 - - else: - if s[end] == "'": # slurp singly-quoted string - m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string - m = _dquote_re.match(s, end) - else: - raise RuntimeError("this can't happen (bad char '%c')" % s[end]) - - if m is None: - raise ValueError("bad string (mismatched %s quotes?)" % s[end]) - - (beg, end) = m.span() - s = s[:beg] + s[beg+1:end-1] + s[end:] - pos = m.end() - 2 - - if pos >= len(s): - words.append(s) - break - - return words - -# split_quoted () - - -def execute (func, args, msg=None, verbose=0, dry_run=0): - """Perform some action that affects the outside world (eg. by - writing to the filesystem). Such actions are special because they - are disabled by the 'dry_run' flag. This method takes care of all - that bureaucracy for you; all you have to do is supply the - function to call and an argument tuple for it (to embody the - "external action" being performed), and an optional message to - print. - """ - if msg is None: - msg = "%s%r" % (func.__name__, args) - if msg[-2:] == ',)': # correct for singleton tuple - msg = msg[0:-2] + ')' - - log.info(msg) - if not dry_run: - func(*args) - - -def strtobool (val): - """Convert a string representation of truth to true (1) or false (0). - - True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values - are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if - 'val' is anything else. - """ - val = val.lower() - if val in ('y', 'yes', 't', 'true', 'on', '1'): - return 1 - elif val in ('n', 'no', 'f', 'false', 'off', '0'): - return 0 - else: - raise ValueError("invalid truth value %r" % (val,)) - - -def byte_compile (py_files, - optimize=0, force=0, - prefix=None, base_dir=None, - verbose=1, dry_run=0, - direct=None): - """Byte-compile a collection of Python source files to .pyc - files in a __pycache__ subdirectory. 'py_files' is a list - of files to compile; any files that don't end in ".py" are silently - skipped. 'optimize' must be one of the following: - 0 - don't optimize - 1 - normal optimization (like "python -O") - 2 - extra optimization (like "python -OO") - If 'force' is true, all files are recompiled regardless of - timestamps. - - The source filename encoded in each bytecode file defaults to the - filenames listed in 'py_files'; you can modify these with 'prefix' and - 'basedir'. 'prefix' is a string that will be stripped off of each - source filename, and 'base_dir' is a directory name that will be - prepended (after 'prefix' is stripped). You can supply either or both - (or neither) of 'prefix' and 'base_dir', as you wish. - - If 'dry_run' is true, doesn't actually do anything that would - affect the filesystem. - - Byte-compilation is either done directly in this interpreter process - with the standard py_compile module, or indirectly by writing a - temporary script and executing it. Normally, you should let - 'byte_compile()' figure out to use direct compilation or not (see - the source for details). The 'direct' flag is used by the script - generated in indirect mode; unless you know what you're doing, leave - it set to None. - """ - - # Late import to fix a bootstrap issue: _posixsubprocess is built by - # setup.py, but setup.py uses distutils. - import subprocess - - # nothing is done if sys.dont_write_bytecode is True - if sys.dont_write_bytecode: - raise DistutilsByteCompileError('byte-compiling is disabled.') - - # First, if the caller didn't force us into direct or indirect mode, - # figure out which mode we should be in. We take a conservative - # approach: choose direct mode *only* if the current interpreter is - # in debug mode and optimize is 0. If we're not in debug mode (-O - # or -OO), we don't know which level of optimization this - # interpreter is running with, so we can't do direct - # byte-compilation and be certain that it's the right thing. Thus, - # always compile indirectly if the current interpreter is in either - # optimize mode, or if either optimization level was requested by - # the caller. - if direct is None: - direct = (__debug__ and optimize == 0) - - # "Indirect" byte-compilation: write a temporary script and then - # run it with the appropriate flags. - if not direct: - try: - from tempfile import mkstemp - (script_fd, script_name) = mkstemp(".py") - except ImportError: - from tempfile import mktemp - (script_fd, script_name) = None, mktemp(".py") - log.info("writing byte-compilation script '%s'", script_name) - if not dry_run: - if script_fd is not None: - script = os.fdopen(script_fd, "w") - else: - script = open(script_name, "w") - - with script: - script.write("""\ -from distutils.util import byte_compile -files = [ -""") - - # XXX would be nice to write absolute filenames, just for - # safety's sake (script should be more robust in the face of - # chdir'ing before running it). But this requires abspath'ing - # 'prefix' as well, and that breaks the hack in build_lib's - # 'byte_compile()' method that carefully tacks on a trailing - # slash (os.sep really) to make sure the prefix here is "just - # right". This whole prefix business is rather delicate -- the - # problem is that it's really a directory, but I'm treating it - # as a dumb string, so trailing slashes and so forth matter. - - #py_files = map(os.path.abspath, py_files) - #if prefix: - # prefix = os.path.abspath(prefix) - - script.write(",\n".join(map(repr, py_files)) + "]\n") - script.write(""" -byte_compile(files, optimize=%r, force=%r, - prefix=%r, base_dir=%r, - verbose=%r, dry_run=0, - direct=1) -""" % (optimize, force, prefix, base_dir, verbose)) - - msg = distutils._DEPRECATION_MESSAGE - cmd = [sys.executable] - cmd.extend(subprocess._optim_args_from_interpreter_flags()) - cmd.append(f'-Wignore:{msg}:DeprecationWarning') - cmd.append(script_name) - spawn(cmd, dry_run=dry_run) - execute(os.remove, (script_name,), "removing %s" % script_name, - dry_run=dry_run) - - # "Direct" byte-compilation: use the py_compile module to compile - # right here, right now. Note that the script generated in indirect - # mode simply calls 'byte_compile()' in direct mode, a weird sort of - # cross-process recursion. Hey, it works! - else: - from py_compile import compile - - for file in py_files: - if file[-3:] != ".py": - # This lets us be lazy and not filter filenames in - # the "install_lib" command. - continue - - # Terminology from the py_compile module: - # cfile - byte-compiled file - # dfile - purported source filename (same as 'file' by default) - if optimize >= 0: - opt = '' if optimize == 0 else optimize - cfile = importlib.util.cache_from_source( - file, optimization=opt) - else: - cfile = importlib.util.cache_from_source(file) - dfile = file - if prefix: - if file[:len(prefix)] != prefix: - raise ValueError("invalid prefix: filename %r doesn't start with %r" - % (file, prefix)) - dfile = dfile[len(prefix):] - if base_dir: - dfile = os.path.join(base_dir, dfile) - - cfile_base = os.path.basename(cfile) - if direct: - if force or newer(file, cfile): - log.info("byte-compiling %s to %s", file, cfile_base) - if not dry_run: - compile(file, cfile, dfile) - else: - log.debug("skipping byte-compilation of %s to %s", - file, cfile_base) - -# byte_compile () - -def rfc822_escape (header): - """Return a version of the string escaped for inclusion in an - RFC-822 header, by ensuring there are 8 spaces space after each newline. - """ - lines = header.split('\n') - sep = '\n' + 8 * ' ' - return sep.join(lines) - -# 2to3 support - -def run_2to3(files, fixer_names=None, options=None, explicit=None): - """Invoke 2to3 on a list of Python files. - The files should all come from the build area, as the - modification is done in-place. To reduce the build time, - only files modified since the last invocation of this - function should be passed in the files argument.""" - - if not files: - return - - # Make this class local, to delay import of 2to3 - from lib2to3.refactor import RefactoringTool, get_fixers_from_package - class DistutilsRefactoringTool(RefactoringTool): - def log_error(self, msg, *args, **kw): - log.error(msg, *args) - - def log_message(self, msg, *args): - log.info(msg, *args) - - def log_debug(self, msg, *args): - log.debug(msg, *args) - - if fixer_names is None: - fixer_names = get_fixers_from_package('lib2to3.fixes') - r = DistutilsRefactoringTool(fixer_names, options=options) - r.refactor(files, write=True) - -def copydir_run_2to3(src, dest, template=None, fixer_names=None, - options=None, explicit=None): - """Recursively copy a directory, only copying new and changed files, - running run_2to3 over all newly copied Python modules afterward. - - If you give a template string, it's parsed like a MANIFEST.in. - """ - from distutils.dir_util import mkpath - from distutils.file_util import copy_file - from distutils.filelist import FileList - filelist = FileList() - curdir = os.getcwd() - os.chdir(src) - try: - filelist.findall() - finally: - os.chdir(curdir) - filelist.files[:] = filelist.allfiles - if template: - for line in template.splitlines(): - line = line.strip() - if not line: continue - filelist.process_template_line(line) - copied = [] - for filename in filelist.files: - outname = os.path.join(dest, filename) - mkpath(os.path.dirname(outname)) - res = copy_file(os.path.join(src, filename), outname, update=1) - if res[1]: copied.append(outname) - run_2to3([fn for fn in copied if fn.lower().endswith('.py')], - fixer_names=fixer_names, options=options, explicit=explicit) - return copied - -class Mixin2to3: - '''Mixin class for commands that run 2to3. - To configure 2to3, setup scripts may either change - the class variables, or inherit from individual commands - to override how 2to3 is invoked.''' - - # provide list of fixers to run; - # defaults to all from lib2to3.fixers - fixer_names = None - - # options dictionary - options = None - - # list of fixers to invoke even though they are marked as explicit - explicit = None - - def run_2to3(self, files): - return run_2to3(files, self.fixer_names, self.options, self.explicit) diff --git a/Lib/distutils/version.py b/Lib/distutils/version.py deleted file mode 100644 index c33bebaed26aeea..000000000000000 --- a/Lib/distutils/version.py +++ /dev/null @@ -1,347 +0,0 @@ -# -# distutils/version.py -# -# Implements multiple version numbering conventions for the -# Python Module Distribution Utilities. -# -# $Id$ -# - -"""Provides classes to represent module version numbers (one class for -each style of version numbering). There are currently two such classes -implemented: StrictVersion and LooseVersion. - -Every version number class implements the following interface: - * the 'parse' method takes a string and parses it to some internal - representation; if the string is an invalid version number, - 'parse' raises a ValueError exception - * the class constructor takes an optional string argument which, - if supplied, is passed to 'parse' - * __str__ reconstructs the string that was passed to 'parse' (or - an equivalent string -- ie. one that will generate an equivalent - version number instance) - * __repr__ generates Python code to recreate the version number instance - * _cmp compares the current instance with either another instance - of the same class or a string (which will be parsed to an instance - of the same class, thus must follow the same rules) -""" - -import re - -class Version: - """Abstract base class for version numbering classes. Just provides - constructor (__init__) and reproducer (__repr__), because those - seem to be the same for all version numbering classes; and route - rich comparisons to _cmp. - """ - - def __init__ (self, vstring=None): - if vstring: - self.parse(vstring) - - def __repr__ (self): - return "%s ('%s')" % (self.__class__.__name__, str(self)) - - def __eq__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c == 0 - - def __lt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c < 0 - - def __le__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c <= 0 - - def __gt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c > 0 - - def __ge__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c >= 0 - - -# Interface for version-number classes -- must be implemented -# by the following classes (the concrete ones -- Version should -# be treated as an abstract class). -# __init__ (string) - create and take same action as 'parse' -# (string parameter is optional) -# parse (string) - convert a string representation to whatever -# internal representation is appropriate for -# this style of version numbering -# __str__ (self) - convert back to a string; should be very similar -# (if not identical to) the string supplied to parse -# __repr__ (self) - generate Python code to recreate -# the instance -# _cmp (self, other) - compare two version numbers ('other' may -# be an unparsed version string, or another -# instance of your version class) - - -class StrictVersion (Version): - - """Version numbering for anal retentives and software idealists. - Implements the standard interface for version number classes as - described above. A version number consists of two or three - dot-separated numeric components, with an optional "pre-release" tag - on the end. The pre-release tag consists of the letter 'a' or 'b' - followed by a number. If the numeric components of two version - numbers are equal, then one with a pre-release tag will always - be deemed earlier (lesser) than one without. - - The following are valid version numbers (shown in the order that - would be obtained by sorting according to the supplied cmp function): - - 0.4 0.4.0 (these two are equivalent) - 0.4.1 - 0.5a1 - 0.5b3 - 0.5 - 0.9.6 - 1.0 - 1.0.4a3 - 1.0.4b1 - 1.0.4 - - The following are examples of invalid version numbers: - - 1 - 2.7.2.2 - 1.3.a4 - 1.3pl1 - 1.3c4 - - The rationale for this version numbering system will be explained - in the distutils documentation. - """ - - version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$', - re.VERBOSE | re.ASCII) - - - def parse (self, vstring): - match = self.version_re.match(vstring) - if not match: - raise ValueError("invalid version number '%s'" % vstring) - - (major, minor, patch, prerelease, prerelease_num) = \ - match.group(1, 2, 4, 5, 6) - - if patch: - self.version = tuple(map(int, [major, minor, patch])) - else: - self.version = tuple(map(int, [major, minor])) + (0,) - - if prerelease: - self.prerelease = (prerelease[0], int(prerelease_num)) - else: - self.prerelease = None - - - def __str__ (self): - - if self.version[2] == 0: - vstring = '.'.join(map(str, self.version[0:2])) - else: - vstring = '.'.join(map(str, self.version)) - - if self.prerelease: - vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) - - return vstring - - - def _cmp (self, other): - if isinstance(other, str): - other = StrictVersion(other) - elif not isinstance(other, StrictVersion): - return NotImplemented - - if self.version != other.version: - # numeric versions don't match - # prerelease stuff doesn't matter - if self.version < other.version: - return -1 - else: - return 1 - - # have to compare prerelease - # case 1: neither has prerelease; they're equal - # case 2: self has prerelease, other doesn't; other is greater - # case 3: self doesn't have prerelease, other does: self is greater - # case 4: both have prerelease: must compare them! - - if (not self.prerelease and not other.prerelease): - return 0 - elif (self.prerelease and not other.prerelease): - return -1 - elif (not self.prerelease and other.prerelease): - return 1 - elif (self.prerelease and other.prerelease): - if self.prerelease == other.prerelease: - return 0 - elif self.prerelease < other.prerelease: - return -1 - else: - return 1 - else: - assert False, "never get here" - -# end class StrictVersion - - -# The rules according to Greg Stein: -# 1) a version number has 1 or more numbers separated by a period or by -# sequences of letters. If only periods, then these are compared -# left-to-right to determine an ordering. -# 2) sequences of letters are part of the tuple for comparison and are -# compared lexicographically -# 3) recognize the numeric components may have leading zeroes -# -# The LooseVersion class below implements these rules: a version number -# string is split up into a tuple of integer and string components, and -# comparison is a simple tuple comparison. This means that version -# numbers behave in a predictable and obvious way, but a way that might -# not necessarily be how people *want* version numbers to behave. There -# wouldn't be a problem if people could stick to purely numeric version -# numbers: just split on period and compare the numbers as tuples. -# However, people insist on putting letters into their version numbers; -# the most common purpose seems to be: -# - indicating a "pre-release" version -# ('alpha', 'beta', 'a', 'b', 'pre', 'p') -# - indicating a post-release patch ('p', 'pl', 'patch') -# but of course this can't cover all version number schemes, and there's -# no way to know what a programmer means without asking him. -# -# The problem is what to do with letters (and other non-numeric -# characters) in a version number. The current implementation does the -# obvious and predictable thing: keep them as strings and compare -# lexically within a tuple comparison. This has the desired effect if -# an appended letter sequence implies something "post-release": -# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". -# -# However, if letters in a version number imply a pre-release version, -# the "obvious" thing isn't correct. Eg. you would expect that -# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison -# implemented here, this just isn't so. -# -# Two possible solutions come to mind. The first is to tie the -# comparison algorithm to a particular set of semantic rules, as has -# been done in the StrictVersion class above. This works great as long -# as everyone can go along with bondage and discipline. Hopefully a -# (large) subset of Python module programmers will agree that the -# particular flavour of bondage and discipline provided by StrictVersion -# provides enough benefit to be worth using, and will submit their -# version numbering scheme to its domination. The free-thinking -# anarchists in the lot will never give in, though, and something needs -# to be done to accommodate them. -# -# Perhaps a "moderately strict" version class could be implemented that -# lets almost anything slide (syntactically), and makes some heuristic -# assumptions about non-digits in version number strings. This could -# sink into special-case-hell, though; if I was as talented and -# idiosyncratic as Larry Wall, I'd go ahead and implement a class that -# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is -# just as happy dealing with things like "2g6" and "1.13++". I don't -# think I'm smart enough to do it right though. -# -# In any case, I've coded the test suite for this module (see -# ../test/test_version.py) specifically to fail on things like comparing -# "1.2a2" and "1.2". That's not because the *code* is doing anything -# wrong, it's because the simple, obvious design doesn't match my -# complicated, hairy expectations for real-world version numbers. It -# would be a snap to fix the test suite to say, "Yep, LooseVersion does -# the Right Thing" (ie. the code matches the conception). But I'd rather -# have a conception that matches common notions about version numbers. - -class LooseVersion (Version): - - """Version numbering for anarchists and software realists. - Implements the standard interface for version number classes as - described above. A version number consists of a series of numbers, - separated by either periods or strings of letters. When comparing - version numbers, the numeric components will be compared - numerically, and the alphabetic components lexically. The following - are all valid version numbers, in no particular order: - - 1.5.1 - 1.5.2b2 - 161 - 3.10a - 8.02 - 3.4j - 1996.07.12 - 3.2.pl0 - 3.1.1.6 - 2g6 - 11g - 0.960923 - 2.2beta29 - 1.13++ - 5.5.kw - 2.0b1pl0 - - In fact, there is no such thing as an invalid version number under - this scheme; the rules for comparison are simple and predictable, - but may not always give the results you want (for some definition - of "want"). - """ - - component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) - - def __init__ (self, vstring=None): - if vstring: - self.parse(vstring) - - - def parse (self, vstring): - # I've given up on thinking I can reconstruct the version string - # from the parsed tuple -- so I just store the string here for - # use by __str__ - self.vstring = vstring - components = [x for x in self.component_re.split(vstring) - if x and x != '.'] - for i, obj in enumerate(components): - try: - components[i] = int(obj) - except ValueError: - pass - - self.version = components - - - def __str__ (self): - return self.vstring - - - def __repr__ (self): - return "LooseVersion ('%s')" % str(self) - - - def _cmp (self, other): - if isinstance(other, str): - other = LooseVersion(other) - elif not isinstance(other, LooseVersion): - return NotImplemented - - if self.version == other.version: - return 0 - if self.version < other.version: - return -1 - if self.version > other.version: - return 1 - - -# end class LooseVersion diff --git a/Lib/distutils/versionpredicate.py b/Lib/distutils/versionpredicate.py deleted file mode 100644 index 062c98f2489951a..000000000000000 --- a/Lib/distutils/versionpredicate.py +++ /dev/null @@ -1,166 +0,0 @@ -"""Module for parsing and testing package version predicate strings. -""" -import re -import distutils.version -import operator - - -re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)", - re.ASCII) -# (package) (rest) - -re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses -re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$") -# (comp) (version) - - -def splitUp(pred): - """Parse a single version comparison. - - Return (comparison string, StrictVersion) - """ - res = re_splitComparison.match(pred) - if not res: - raise ValueError("bad package restriction syntax: %r" % pred) - comp, verStr = res.groups() - return (comp, distutils.version.StrictVersion(verStr)) - -compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq, - ">": operator.gt, ">=": operator.ge, "!=": operator.ne} - -class VersionPredicate: - """Parse and test package version predicates. - - >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)') - - The `name` attribute provides the full dotted name that is given:: - - >>> v.name - 'pyepat.abc' - - The str() of a `VersionPredicate` provides a normalized - human-readable version of the expression:: - - >>> print(v) - pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3) - - The `satisfied_by()` method can be used to determine with a given - version number is included in the set described by the version - restrictions:: - - >>> v.satisfied_by('1.1') - True - >>> v.satisfied_by('1.4') - True - >>> v.satisfied_by('1.0') - False - >>> v.satisfied_by('4444.4') - False - >>> v.satisfied_by('1555.1b3') - False - - `VersionPredicate` is flexible in accepting extra whitespace:: - - >>> v = VersionPredicate(' pat( == 0.1 ) ') - >>> v.name - 'pat' - >>> v.satisfied_by('0.1') - True - >>> v.satisfied_by('0.2') - False - - If any version numbers passed in do not conform to the - restrictions of `StrictVersion`, a `ValueError` is raised:: - - >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)') - Traceback (most recent call last): - ... - ValueError: invalid version number '1.2zb3' - - It the module or package name given does not conform to what's - allowed as a legal module or package name, `ValueError` is - raised:: - - >>> v = VersionPredicate('foo-bar') - Traceback (most recent call last): - ... - ValueError: expected parenthesized list: '-bar' - - >>> v = VersionPredicate('foo bar (12.21)') - Traceback (most recent call last): - ... - ValueError: expected parenthesized list: 'bar (12.21)' - - """ - - def __init__(self, versionPredicateStr): - """Parse a version predicate string. - """ - # Fields: - # name: package name - # pred: list of (comparison string, StrictVersion) - - versionPredicateStr = versionPredicateStr.strip() - if not versionPredicateStr: - raise ValueError("empty package restriction") - match = re_validPackage.match(versionPredicateStr) - if not match: - raise ValueError("bad package name in %r" % versionPredicateStr) - self.name, paren = match.groups() - paren = paren.strip() - if paren: - match = re_paren.match(paren) - if not match: - raise ValueError("expected parenthesized list: %r" % paren) - str = match.groups()[0] - self.pred = [splitUp(aPred) for aPred in str.split(",")] - if not self.pred: - raise ValueError("empty parenthesized list in %r" - % versionPredicateStr) - else: - self.pred = [] - - def __str__(self): - if self.pred: - seq = [cond + " " + str(ver) for cond, ver in self.pred] - return self.name + " (" + ", ".join(seq) + ")" - else: - return self.name - - def satisfied_by(self, version): - """True if version is compatible with all the predicates in self. - The parameter version must be acceptable to the StrictVersion - constructor. It may be either a string or StrictVersion. - """ - for cond, ver in self.pred: - if not compmap[cond](version, ver): - return False - return True - - -_provision_rx = None - -def split_provision(value): - """Return the name and optional version number of a provision. - - The version number, if given, will be returned as a `StrictVersion` - instance, otherwise it will be `None`. - - >>> split_provision('mypkg') - ('mypkg', None) - >>> split_provision(' mypkg( 1.2 ) ') - ('mypkg', StrictVersion ('1.2')) - """ - global _provision_rx - if _provision_rx is None: - _provision_rx = re.compile( - r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", - re.ASCII) - value = value.strip() - m = _provision_rx.match(value) - if not m: - raise ValueError("illegal provides specification: %r" % value) - ver = m.group(2) or None - if ver: - ver = distutils.version.StrictVersion(ver) - return m.group(1), ver diff --git a/Lib/doctest.py b/Lib/doctest.py index b2ef2ce63672ebd..d109b6c9e373438 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -105,7 +105,23 @@ def _test(): from io import StringIO, IncrementalNewlineDecoder from collections import namedtuple -TestResults = namedtuple('TestResults', 'failed attempted') + +class TestResults(namedtuple('TestResults', 'failed attempted')): + def __new__(cls, failed, attempted, *, skipped=0): + results = super().__new__(cls, failed, attempted) + results.skipped = skipped + return results + + def __repr__(self): + if self.skipped: + return (f'TestResults(failed={self.failed}, ' + f'attempted={self.attempted}, ' + f'skipped={self.skipped})') + else: + # Leave the repr() unchanged for backward compatibility + # if skipped is zero + return super().__repr__() + # There are 4 basic classes: # - Example: a pair, plus an intra-docstring line number. @@ -207,7 +223,13 @@ def _normalize_module(module, depth=2): elif isinstance(module, str): return __import__(module, globals(), locals(), ["*"]) elif module is None: - return sys.modules[sys._getframe(depth).f_globals['__name__']] + try: + try: + return sys.modules[sys._getframemodulename(depth)] + except AttributeError: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + except KeyError: + pass else: raise TypeError("Expected a module, string, or None") @@ -569,9 +591,11 @@ def __hash__(self): def __lt__(self, other): if not isinstance(other, DocTest): return NotImplemented - return ((self.name, self.filename, self.lineno, id(self)) + self_lno = self.lineno if self.lineno is not None else -1 + other_lno = other.lineno if other.lineno is not None else -1 + return ((self.name, self.filename, self_lno, id(self)) < - (other.name, other.filename, other.lineno, id(other))) + (other.name, other.filename, other_lno, id(other))) ###################################################################### ## 3. DocTestParser @@ -956,7 +980,8 @@ def _from_module(self, module, object): return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ - elif inspect.ismethoddescriptor(object): + elif (inspect.ismethoddescriptor(object) or + inspect.ismethodwrapper(object)): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): @@ -1103,7 +1128,7 @@ def _find_lineno(self, obj, source_lines): if source_lines is None: return None pat = re.compile(r'^\s*class\s*%s\b' % - getattr(obj, '__name__', '-')) + re.escape(getattr(obj, '__name__', '-'))) for i, line in enumerate(source_lines): if pat.match(line): lineno = i @@ -1143,8 +1168,7 @@ class DocTestRunner: """ A class used to run DocTest test cases, and accumulate statistics. The `run` method is used to process a single DocTest case. It - returns a tuple `(f, t)`, where `t` is the number of test cases - tried, and `f` is the number of test cases that failed. + returns a TestResults instance. >>> tests = DocTestFinder().find(_TestClass) >>> runner = DocTestRunner(verbose=False) @@ -1157,8 +1181,8 @@ class DocTestRunner: _TestClass.square -> TestResults(failed=0, attempted=1) The `summarize` method prints a summary of all the test cases that - have been run by the runner, and returns an aggregated `(f, t)` - tuple: + have been run by the runner, and returns an aggregated TestResults + instance: >>> runner.summarize(verbose=1) 4 items passed all tests: @@ -1171,13 +1195,15 @@ class DocTestRunner: Test passed. TestResults(failed=0, attempted=7) - The aggregated number of tried examples and failed examples is - also available via the `tries` and `failures` attributes: + The aggregated number of tried examples and failed examples is also + available via the `tries`, `failures` and `skips` attributes: >>> runner.tries 7 >>> runner.failures 0 + >>> runner.skips + 0 The comparison between expected outputs and actual outputs is done by an `OutputChecker`. This comparison may be customized with a @@ -1226,7 +1252,8 @@ def __init__(self, checker=None, verbose=None, optionflags=0): # Keep track of the examples we've run. self.tries = 0 self.failures = 0 - self._name2ft = {} + self.skips = 0 + self._stats = {} # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() @@ -1295,13 +1322,11 @@ def __run(self, test, compileflags, out): Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler - flags that should be used to execute examples. Return a tuple - `(f, t)`, where `t` is the number of examples tried, and `f` - is the number of examples that failed. The examples are run - in the namespace `test.globs`. + flags that should be used to execute examples. Return a TestResults + instance. The examples are run in the namespace `test.globs`. """ - # Keep track of the number of failures and tries. - failures = tries = 0 + # Keep track of the number of failed, attempted, skipped examples. + failures = attempted = skips = 0 # Save the option flags (since option directives can be used # to modify them). @@ -1313,6 +1338,7 @@ def __run(self, test, compileflags, out): # Process each example. for examplenum, example in enumerate(test.examples): + attempted += 1 # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. @@ -1330,10 +1356,10 @@ def __run(self, test, compileflags, out): # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: + skips += 1 continue # Record that we started this example. - tries += 1 if not quiet: self.report_start(out, test, example) @@ -1369,7 +1395,24 @@ def __run(self, test, compileflags, out): # The example raised an exception: check if it was expected. else: - exc_msg = traceback.format_exception_only(*exception[:2])[-1] + formatted_ex = traceback.format_exception_only(*exception[:2]) + if issubclass(exception[0], SyntaxError): + # SyntaxError / IndentationError is special: + # we don't care about the carets / suggestions / etc + # We only care about the error message and notes. + # They start with `SyntaxError:` (or any other class name) + exception_line_prefixes = ( + f"{exception[0].__qualname__}:", + f"{exception[0].__module__}.{exception[0].__qualname__}:", + ) + exc_msg_index = next( + index + for index, line in enumerate(formatted_ex) + if line.startswith(exception_line_prefixes) + ) + formatted_ex = formatted_ex[exc_msg_index:] + + exc_msg = "".join(formatted_ex) if not quiet: got += _exception_traceback(exception) @@ -1411,19 +1454,22 @@ def __run(self, test, compileflags, out): # Restore the option flags (in case they were modified) self.optionflags = original_optionflags - # Record and return the number of failures and tries. - self.__record_outcome(test, failures, tries) - return TestResults(failures, tries) + # Record and return the number of failures and attempted. + self.__record_outcome(test, failures, attempted, skips) + return TestResults(failures, attempted, skipped=skips) - def __record_outcome(self, test, f, t): + def __record_outcome(self, test, failures, tries, skips): """ - Record the fact that the given DocTest (`test`) generated `f` - failures out of `t` tried examples. + Record the fact that the given DocTest (`test`) generated `failures` + failures out of `tries` tried examples. """ - f2, t2 = self._name2ft.get(test.name, (0,0)) - self._name2ft[test.name] = (f+f2, t+t2) - self.failures += f - self.tries += t + failures2, tries2, skips2 = self._stats.get(test.name, (0, 0, 0)) + self._stats[test.name] = (failures + failures2, + tries + tries2, + skips + skips2) + self.failures += failures + self.tries += tries + self.skips += skips __LINECACHE_FILENAME_RE = re.compile(r'.+)' @@ -1512,9 +1558,7 @@ def out(s): def summarize(self, verbose=None): """ Print a summary of all the test cases that have been run by - this DocTestRunner, and return a tuple `(f, t)`, where `f` is - the total number of failed examples, and `t` is the total - number of tried examples. + this DocTestRunner, and return a TestResults instance. The optional `verbose` argument controls how detailed the summary is. If the verbosity is not specified, then the @@ -1525,59 +1569,61 @@ def summarize(self, verbose=None): notests = [] passed = [] failed = [] - totalt = totalf = 0 - for x in self._name2ft.items(): - name, (f, t) = x - assert f <= t - totalt += t - totalf += f - if t == 0: + total_tries = total_failures = total_skips = 0 + for item in self._stats.items(): + name, (failures, tries, skips) = item + assert failures <= tries + total_tries += tries + total_failures += failures + total_skips += skips + if tries == 0: notests.append(name) - elif f == 0: - passed.append( (name, t) ) + elif failures == 0: + passed.append((name, tries)) else: - failed.append(x) + failed.append(item) if verbose: if notests: - print(len(notests), "items had no tests:") + print(f"{len(notests)} items had no tests:") notests.sort() - for thing in notests: - print(" ", thing) + for name in notests: + print(f" {name}") if passed: - print(len(passed), "items passed all tests:") + print(f"{len(passed)} items passed all tests:") passed.sort() - for thing, count in passed: - print(" %3d tests in %s" % (count, thing)) + for name, count in passed: + print(f" {count:3d} tests in {name}") if failed: print(self.DIVIDER) - print(len(failed), "items had failures:") + print(f"{len(failed)} items had failures:") failed.sort() - for thing, (f, t) in failed: - print(" %3d of %3d in %s" % (f, t, thing)) + for name, (failures, tries, skips) in failed: + print(f" {failures:3d} of {tries:3d} in {name}") if verbose: - print(totalt, "tests in", len(self._name2ft), "items.") - print(totalt - totalf, "passed and", totalf, "failed.") - if totalf: - print("***Test Failed***", totalf, "failures.") + print(f"{total_tries} tests in {len(self._stats)} items.") + print(f"{total_tries - total_failures} passed and {total_failures} failed.") + if total_failures: + msg = f"***Test Failed*** {total_failures} failures" + if total_skips: + msg = f"{msg} and {total_skips} skipped tests" + print(f"{msg}.") elif verbose: print("Test passed.") - return TestResults(totalf, totalt) + return TestResults(total_failures, total_tries, skipped=total_skips) #///////////////////////////////////////////////////////////////// # Backward compatibility cruft to maintain doctest.master. #///////////////////////////////////////////////////////////////// def merge(self, other): - d = self._name2ft - for name, (f, t) in other._name2ft.items(): + d = self._stats + for name, (failures, tries, skips) in other._stats.items(): if name in d: - # Don't print here by default, since doing - # so breaks some of the buildbots - #print("*** DocTestRunner.merge: '" + name + "' in both" \ - # " testers; summing outcomes.") - f2, t2 = d[name] - f = f + f2 - t = t + t2 - d[name] = f, t + failures2, tries2, skips2 = d[name] + failures = failures + failures2 + tries = tries + tries2 + skips = skips + skips2 + d[name] = (failures, tries, skips) + class OutputChecker: """ @@ -1977,7 +2023,8 @@ class doctest.Tester, then merges the results into (or creates) else: master.merge(runner) - return TestResults(runner.failures, runner.tries) + return TestResults(runner.failures, runner.tries, skipped=runner.skips) + def testfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, @@ -2100,7 +2147,8 @@ class doctest.Tester, then merges the results into (or creates) else: master.merge(runner) - return TestResults(runner.failures, runner.tries) + return TestResults(runner.failures, runner.tries, skipped=runner.skips) + def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): diff --git a/Lib/email/__init__.py b/Lib/email/__init__.py index fae872439edc66d..9fa477830041859 100644 --- a/Lib/email/__init__.py +++ b/Lib/email/__init__.py @@ -25,7 +25,6 @@ ] - # Some convenience routines. Don't import Parser and Message as side-effects # of importing email since those cascadingly import most of the rest of the # email package. diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py index e637e6df06612d5..0d6bd812475eea0 100644 --- a/Lib/email/_header_value_parser.py +++ b/Lib/email/_header_value_parser.py @@ -1987,7 +1987,7 @@ def get_address_list(value): try: token, value = get_address(value) address_list.append(token) - except errors.HeaderParseError as err: + except errors.HeaderParseError: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) @@ -2096,7 +2096,7 @@ def get_msg_id(value): except errors.HeaderParseError: try: token, value = get_no_fold_literal(value) - except errors.HeaderParseError as e: + except errors.HeaderParseError: try: token, value = get_domain(value) msg_id.defects.append(errors.ObsoleteHeaderDefect( @@ -2443,7 +2443,6 @@ def get_parameter(value): raise errors.HeaderParseError("Parameter not followed by '='") param.append(ValueTerminal('=', 'parameter-separator')) value = value[1:] - leader = None if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) param.append(token) @@ -2568,7 +2567,7 @@ def parse_mime_parameters(value): try: token, value = get_parameter(value) mime_parameters.append(token) - except errors.HeaderParseError as err: + except errors.HeaderParseError: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) @@ -2626,7 +2625,6 @@ def parse_content_type_header(value): don't do that. """ ctype = ContentType() - recover = False if not value: ctype.defects.append(errors.HeaderMissingRequiredValue( "Missing content type specification")) diff --git a/Lib/email/base64mime.py b/Lib/email/base64mime.py index a7cc37365c6f9a1..4cdf22666e30161 100644 --- a/Lib/email/base64mime.py +++ b/Lib/email/base64mime.py @@ -45,7 +45,6 @@ MISC_LEN = 7 - # Helpers def header_length(bytearray): """Return the length of s when it is encoded with base64.""" @@ -57,7 +56,6 @@ def header_length(bytearray): return n - def header_encode(header_bytes, charset='iso-8859-1'): """Encode a single header line with Base64 encoding in a given charset. @@ -72,7 +70,6 @@ def header_encode(header_bytes, charset='iso-8859-1'): return '=?%s?b?%s?=' % (charset, encoded) - def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. @@ -98,7 +95,6 @@ def body_encode(s, maxlinelen=76, eol=NL): return EMPTYSTRING.join(encvec) - def decode(string): """Decode a raw base64 string, returning a bytes object. diff --git a/Lib/email/charset.py b/Lib/email/charset.py index 791b6584b247572..043801107b60e51 100644 --- a/Lib/email/charset.py +++ b/Lib/email/charset.py @@ -18,7 +18,6 @@ from email.encoders import encode_7or8bit - # Flags for types of header encodings QP = 1 # Quoted-Printable BASE64 = 2 # Base64 @@ -32,7 +31,6 @@ EMPTYSTRING = '' - # Defaults CHARSETS = { # input header enc body enc output conv @@ -104,7 +102,6 @@ } - # Convenience functions for extending the above mappings def add_charset(charset, header_enc=None, body_enc=None, output_charset=None): """Add character set properties to the global registry. @@ -153,7 +150,6 @@ def add_codec(charset, codecname): CODEC_MAP[charset] = codecname - # Convenience function for encoding strings, taking into account # that they might be unknown-8bit (ie: have surrogate-escaped bytes) def _encode(string, codec): @@ -163,7 +159,6 @@ def _encode(string, codec): return string.encode(codec) - class Charset: """Map character sets to their email properties. @@ -346,7 +341,6 @@ def header_encode_lines(self, string, maxlengths): if not lines and not current_line: lines.append(None) else: - separator = (' ' if lines else '') joined_line = EMPTYSTRING.join(current_line) header_bytes = _encode(joined_line, codec) lines.append(encoder(header_bytes)) diff --git a/Lib/email/encoders.py b/Lib/email/encoders.py index 0a66acb6240bd78..17bd1ab7b19f325 100644 --- a/Lib/email/encoders.py +++ b/Lib/email/encoders.py @@ -16,7 +16,6 @@ from quopri import encodestring as _encodestring - def _qencode(s): enc = _encodestring(s, quotetabs=True) # Must encode spaces, which quopri.encodestring() doesn't do @@ -34,7 +33,6 @@ def encode_base64(msg): msg['Content-Transfer-Encoding'] = 'base64' - def encode_quopri(msg): """Encode the message's payload in quoted-printable. @@ -46,7 +44,6 @@ def encode_quopri(msg): msg['Content-Transfer-Encoding'] = 'quoted-printable' - def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload(decode=True) @@ -64,6 +61,5 @@ def encode_7or8bit(msg): msg['Content-Transfer-Encoding'] = '7bit' - def encode_noop(msg): """Do nothing.""" diff --git a/Lib/email/feedparser.py b/Lib/email/feedparser.py index 97d3f5144d606ff..06d6b4a3afcd070 100644 --- a/Lib/email/feedparser.py +++ b/Lib/email/feedparser.py @@ -37,11 +37,12 @@ headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])') EMPTYSTRING = '' NL = '\n' +boundaryendRE = re.compile( + r'(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$') NeedMoreData = object() - class BufferedSubFile(object): """A file-ish object that can have new data loaded into it. @@ -132,7 +133,6 @@ def __next__(self): return line - class FeedParser: """A feed-style parser of email.""" @@ -189,7 +189,7 @@ def close(self): assert not self._msgstack # Look for final set of defects if root.get_content_maintype() == 'multipart' \ - and not root.is_multipart(): + and not root.is_multipart() and not self._headersonly: defect = errors.MultipartInvariantViolationDefect() self.policy.handle_defect(root, defect) return root @@ -266,7 +266,7 @@ def _parsegen(self): yield NeedMoreData continue break - msg = self._pop_message() + self._pop_message() # We need to pop the EOF matcher in order to tell if we're at # the end of the current file, not the end of the last block # of message headers. @@ -329,9 +329,10 @@ def _parsegen(self): # this onto the input stream until we've scanned past the # preamble. separator = '--' + boundary - boundaryre = re.compile( - '(?P' + re.escape(separator) + - r')(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$') + def boundarymatch(line): + if not line.startswith(separator): + return None + return boundaryendRE.match(line, len(separator)) capturing_preamble = True preamble = [] linesep = False @@ -343,7 +344,7 @@ def _parsegen(self): continue if line == '': break - mo = boundaryre.match(line) + mo = boundarymatch(line) if mo: # If we're looking at the end boundary, we're done with # this multipart. If there was a newline at the end of @@ -375,13 +376,13 @@ def _parsegen(self): if line is NeedMoreData: yield NeedMoreData continue - mo = boundaryre.match(line) + mo = boundarymatch(line) if not mo: self._input.unreadline(line) break # Recurse to parse this subpart; the input stream points # at the subpart's first line. - self._input.push_eof_matcher(boundaryre.match) + self._input.push_eof_matcher(boundarymatch) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData diff --git a/Lib/email/generator.py b/Lib/email/generator.py index c9b121624e08d5e..7ccbe10eb768567 100644 --- a/Lib/email/generator.py +++ b/Lib/email/generator.py @@ -22,7 +22,6 @@ fcre = re.compile(r'^From ', re.MULTILINE) - class Generator: """Generates output from a Message object tree. @@ -170,7 +169,7 @@ def _write(self, msg): # parameter. # # The way we do this, so as to make the _handle_*() methods simpler, - # is to cache any subpart writes into a buffer. The we write the + # is to cache any subpart writes into a buffer. Then we write the # headers and the buffer contents. That way, subpart handlers can # Do The Right Thing, and can still modify the Content-Type: header if # necessary. @@ -392,7 +391,7 @@ def _make_boundary(cls, text=None): def _compile_re(cls, s, flags): return re.compile(s, flags) - + class BytesGenerator(Generator): """Generates a bytes version of a Message object tree. @@ -443,7 +442,6 @@ def _compile_re(cls, s, flags): return re.compile(s.encode('ascii'), flags) - _FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]' class DecodedGenerator(Generator): @@ -503,7 +501,6 @@ def _dispatch(self, msg): }, file=self) - # Helper used by Generator._make_boundary _width = len(repr(sys.maxsize-1)) _fmt = '%%0%dd' % _width diff --git a/Lib/email/header.py b/Lib/email/header.py index 4ab0032bc661234..984851a7d9a679b 100644 --- a/Lib/email/header.py +++ b/Lib/email/header.py @@ -52,12 +52,10 @@ _embedded_header = re.compile(r'\n[^ \t]+:') - # Helpers _max_append = email.quoprimime._max_append - def decode_header(header): """Decode a message header value without converting charset. @@ -152,7 +150,6 @@ def decode_header(header): return collapsed - def make_header(decoded_seq, maxlinelen=None, header_name=None, continuation_ws=' '): """Create a Header from a sequence of pairs as returned by decode_header() @@ -175,7 +172,6 @@ def make_header(decoded_seq, maxlinelen=None, header_name=None, return h - class Header: def __init__(self, s=None, charset=None, maxlinelen=None, header_name=None, @@ -409,7 +405,6 @@ def _normalize(self): self._chunks = chunks - class _ValueFormatter: def __init__(self, headerlen, maxlen, continuation_ws, splitchars): self._maxlen = maxlen diff --git a/Lib/email/iterators.py b/Lib/email/iterators.py index b5502ee975266ba..3410935e38f4760 100644 --- a/Lib/email/iterators.py +++ b/Lib/email/iterators.py @@ -15,7 +15,6 @@ from io import StringIO - # This function will become a method of the Message class def walk(self): """Walk over the message tree, yielding each subpart. @@ -29,7 +28,6 @@ def walk(self): yield from subpart.walk() - # These two functions are imported into the Iterators.py interface module. def body_line_iterator(msg, decode=False): """Iterate over the parts, returning string payloads line-by-line. @@ -55,7 +53,6 @@ def typed_subpart_iterator(msg, maintype='text', subtype=None): yield subpart - def _structure(msg, fp=None, level=0, include_default=False): """A handy debugging aid""" if fp is None: diff --git a/Lib/email/message.py b/Lib/email/message.py index 65fda507251ce37..411118c74dabb41 100644 --- a/Lib/email/message.py +++ b/Lib/email/message.py @@ -14,7 +14,7 @@ # Intrapackage imports from email import utils from email import errors -from email._policybase import Policy, compat32 +from email._policybase import compat32 from email import charset as _charset from email._encoded_words import decode_b Charset = _charset.Charset @@ -448,7 +448,11 @@ def __delitem__(self, name): self._headers = newheaders def __contains__(self, name): - return name.lower() in [k.lower() for k, v in self._headers] + name_lower = name.lower() + for k, v in self._headers: + if name_lower == k.lower(): + return True + return False def __iter__(self): for field, value in self._headers: diff --git a/Lib/email/mime/application.py b/Lib/email/mime/application.py index 6877e554e102717..f67cbad3f034076 100644 --- a/Lib/email/mime/application.py +++ b/Lib/email/mime/application.py @@ -17,7 +17,7 @@ def __init__(self, _data, _subtype='octet-stream', _encoder=encoders.encode_base64, *, policy=None, **_params): """Create an application/* type MIME document. - _data is a string containing the raw application data. + _data contains the bytes for the raw application data. _subtype is the MIME content type subtype, defaulting to 'octet-stream'. diff --git a/Lib/email/mime/audio.py b/Lib/email/mime/audio.py index 8815f5c5ec06d57..aa0c4905cbb2b4d 100644 --- a/Lib/email/mime/audio.py +++ b/Lib/email/mime/audio.py @@ -6,7 +6,6 @@ __all__ = ['MIMEAudio'] -from io import BytesIO from email import encoders from email.mime.nonmultipart import MIMENonMultipart @@ -18,7 +17,7 @@ def __init__(self, _audiodata, _subtype=None, _encoder=encoders.encode_base64, *, policy=None, **_params): """Create an audio/* type MIME document. - _audiodata is a string containing the raw audio data. If this data + _audiodata contains the bytes for the raw audio data. If this data can be decoded as au, wav, aiff, or aifc, then the subtype will be automatically included in the Content-Type header. Otherwise, you can specify the specific audio subtype via the @@ -59,10 +58,8 @@ def _what(data): # sndhdr.what() had a pretty cruddy interface, unfortunately. This is why # we re-do it here. It would be easier to reverse engineer the Unix 'file' # command and use the standard 'magic' file, as shipped with a modern Unix. - hdr = data[:512] - fakefile = BytesIO(hdr) for testfn in _rules: - if res := testfn(hdr, fakefile): + if res := testfn(data): return res else: return None @@ -74,7 +71,7 @@ def rule(rulefunc): @rule -def _aiff(h, f): +def _aiff(h): if not h.startswith(b'FORM'): return None if h[8:12] in {b'AIFC', b'AIFF'}: @@ -84,7 +81,7 @@ def _aiff(h, f): @rule -def _au(h, f): +def _au(h): if h.startswith(b'.snd'): return 'basic' else: @@ -92,7 +89,7 @@ def _au(h, f): @rule -def _wav(h, f): +def _wav(h): # 'RIFF' 'WAVE' 'fmt ' if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ': return None diff --git a/Lib/email/mime/base.py b/Lib/email/mime/base.py index 1a3f9b51f6c0456..f601f621cec3933 100644 --- a/Lib/email/mime/base.py +++ b/Lib/email/mime/base.py @@ -11,7 +11,6 @@ from email import message - class MIMEBase(message.Message): """Base class for MIME specializations.""" diff --git a/Lib/email/mime/image.py b/Lib/email/mime/image.py index e19dea91c0c9912..4b7f2f9cbad4252 100644 --- a/Lib/email/mime/image.py +++ b/Lib/email/mime/image.py @@ -17,7 +17,7 @@ def __init__(self, _imagedata, _subtype=None, _encoder=encoders.encode_base64, *, policy=None, **_params): """Create an image/* type MIME document. - _imagedata is a string containing the raw image data. If the data + _imagedata contains the bytes for the raw image data. If the data type can be detected (jpeg, png, gif, tiff, rgb, pbm, pgm, ppm, rast, xbm, bmp, webp, and exr attempted), then the subtype will be automatically included in the Content-Type header. Otherwise, you can diff --git a/Lib/email/mime/message.py b/Lib/email/mime/message.py index 07e4f2d11961516..61836b5a7861fca 100644 --- a/Lib/email/mime/message.py +++ b/Lib/email/mime/message.py @@ -10,7 +10,6 @@ from email.mime.nonmultipart import MIMENonMultipart - class MIMEMessage(MIMENonMultipart): """Class representing message/* MIME documents.""" diff --git a/Lib/email/mime/multipart.py b/Lib/email/mime/multipart.py index 2d3f288810dd919..94d81c771a474eb 100644 --- a/Lib/email/mime/multipart.py +++ b/Lib/email/mime/multipart.py @@ -9,7 +9,6 @@ from email.mime.base import MIMEBase - class MIMEMultipart(MIMEBase): """Base class for MIME multipart/* type messages.""" diff --git a/Lib/email/mime/nonmultipart.py b/Lib/email/mime/nonmultipart.py index e1f51968b59eb15..a41386eb148c0c8 100644 --- a/Lib/email/mime/nonmultipart.py +++ b/Lib/email/mime/nonmultipart.py @@ -10,7 +10,6 @@ from email.mime.base import MIMEBase - class MIMENonMultipart(MIMEBase): """Base class for MIME non-multipart type messages.""" diff --git a/Lib/email/mime/text.py b/Lib/email/mime/text.py index 35b442383002b22..7672b7891386009 100644 --- a/Lib/email/mime/text.py +++ b/Lib/email/mime/text.py @@ -6,11 +6,9 @@ __all__ = ['MIMEText'] -from email.charset import Charset from email.mime.nonmultipart import MIMENonMultipart - class MIMEText(MIMENonMultipart): """Class for generating text/* type MIME documents.""" @@ -37,6 +35,6 @@ def __init__(self, _text, _subtype='plain', _charset=None, *, policy=None): _charset = 'utf-8' MIMENonMultipart.__init__(self, 'text', _subtype, policy=policy, - **{'charset': str(_charset)}) + charset=str(_charset)) self.set_payload(_text, _charset) diff --git a/Lib/email/parser.py b/Lib/email/parser.py index 7db4da1ff081c1c..06d99b17f2f9c49 100644 --- a/Lib/email/parser.py +++ b/Lib/email/parser.py @@ -49,10 +49,7 @@ def parse(self, fp, headersonly=False): feedparser = FeedParser(self._class, policy=self.policy) if headersonly: feedparser._set_headersonly() - while True: - data = fp.read(8192) - if not data: - break + while data := fp.read(8192): feedparser.feed(data) return feedparser.close() @@ -67,7 +64,6 @@ def parsestr(self, text, headersonly=False): return self.parse(StringIO(text), headersonly=headersonly) - class HeaderParser(Parser): def parse(self, fp, headersonly=True): return Parser.parse(self, fp, True) @@ -75,7 +71,7 @@ def parse(self, fp, headersonly=True): def parsestr(self, text, headersonly=True): return Parser.parsestr(self, text, True) - + class BytesParser: def __init__(self, *args, **kw): diff --git a/Lib/email/utils.py b/Lib/email/utils.py index cfdfeb3f1a86e42..a49a8fa986ce0cf 100644 --- a/Lib/email/utils.py +++ b/Lib/email/utils.py @@ -25,8 +25,6 @@ import os import re import time -import random -import socket import datetime import urllib.parse @@ -36,9 +34,6 @@ from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz -# Intrapackage imports -from email.charset import Charset - COMMASPACE = ', ' EMPTYSTRING = '' UEMPTYSTRING = '' @@ -94,6 +89,8 @@ def formataddr(pair, charset='utf-8'): name.encode('ascii') except UnicodeEncodeError: if isinstance(charset, str): + # lazy import to improve module import time + from email.charset import Charset charset = Charset(charset) encoded_name = charset.header_encode(name) return "%s <%s>" % (encoded_name, address) @@ -143,13 +140,13 @@ def formatdate(timeval=None, localtime=False, usegmt=False): # 2822 requires that day and month names be the English abbreviations. if timeval is None: timeval = time.time() - if localtime or usegmt: - dt = datetime.datetime.fromtimestamp(timeval, datetime.timezone.utc) - else: - dt = datetime.datetime.utcfromtimestamp(timeval) + dt = datetime.datetime.fromtimestamp(timeval, datetime.timezone.utc) + if localtime: dt = dt.astimezone() usegmt = False + elif not usegmt: + dt = dt.replace(tzinfo=None) return format_datetime(dt, usegmt) def format_datetime(dt, usegmt=False): @@ -181,6 +178,11 @@ def make_msgid(idstring=None, domain=None): portion of the message id after the '@'. It defaults to the locally defined hostname. """ + # Lazy imports to speedup module import time + # (no other functions in email.utils need these modules) + import random + import socket + timeval = int(time.time()*100) pid = os.getpid() randint = random.getrandbits(64) @@ -331,41 +333,23 @@ def collapse_rfc2231_value(value, errors='replace', # better than not having it. # -def localtime(dt=None, isdst=-1): +def localtime(dt=None, isdst=None): """Return local time as an aware datetime object. If called without arguments, return current time. Otherwise *dt* argument should be a datetime instance, and it is converted to the local time zone according to the system time zone database. If *dt* is naive (that is, dt.tzinfo is None), it is assumed to be in local time. - In this case, a positive or zero value for *isdst* causes localtime to - presume initially that summer time (for example, Daylight Saving Time) - is or is not (respectively) in effect for the specified time. A - negative value for *isdst* causes the localtime() function to attempt - to divine whether summer time is in effect for the specified time. + The isdst parameter is ignored. """ + if isdst is not None: + import warnings + warnings._deprecated( + "The 'isdst' parameter to 'localtime'", + message='{name} is deprecated and slated for removal in Python {remove}', + remove=(3, 14), + ) if dt is None: - return datetime.datetime.now(datetime.timezone.utc).astimezone() - if dt.tzinfo is not None: - return dt.astimezone() - # We have a naive datetime. Convert to a (localtime) timetuple and pass to - # system mktime together with the isdst hint. System mktime will return - # seconds since epoch. - tm = dt.timetuple()[:-1] + (isdst,) - seconds = time.mktime(tm) - localtm = time.localtime(seconds) - try: - delta = datetime.timedelta(seconds=localtm.tm_gmtoff) - tz = datetime.timezone(delta, localtm.tm_zone) - except AttributeError: - # Compute UTC offset and compare with the value implied by tm_isdst. - # If the values match, use the zone name implied by tm_isdst. - delta = dt - datetime.datetime(*time.gmtime(seconds)[:6]) - dst = time.daylight and localtm.tm_isdst > 0 - gmtoff = -(time.altzone if dst else time.timezone) - if delta == datetime.timedelta(seconds=gmtoff): - tz = datetime.timezone(delta, time.tzname[dst]) - else: - tz = datetime.timezone(delta) - return dt.replace(tzinfo=tz) + dt = datetime.datetime.now() + return dt.astimezone() diff --git a/Lib/encodings/idna.py b/Lib/encodings/idna.py index ea4058512fe366d..5396047a7fb0b87 100644 --- a/Lib/encodings/idna.py +++ b/Lib/encodings/idna.py @@ -39,23 +39,21 @@ def nameprep(label): # Check bidi RandAL = [stringprep.in_table_d1(x) for x in label] - for c in RandAL: - if c: - # There is a RandAL char in the string. Must perform further - # tests: - # 1) The characters in section 5.8 MUST be prohibited. - # This is table C.8, which was already checked - # 2) If a string contains any RandALCat character, the string - # MUST NOT contain any LCat character. - if any(stringprep.in_table_d2(x) for x in label): - raise UnicodeError("Violation of BIDI requirement 2") - - # 3) If a string contains any RandALCat character, a - # RandALCat character MUST be the first character of the - # string, and a RandALCat character MUST be the last - # character of the string. - if not RandAL[0] or not RandAL[-1]: - raise UnicodeError("Violation of BIDI requirement 3") + if any(RandAL): + # There is a RandAL char in the string. Must perform further + # tests: + # 1) The characters in section 5.8 MUST be prohibited. + # This is table C.8, which was already checked + # 2) If a string contains any RandALCat character, the string + # MUST NOT contain any LCat character. + if any(stringprep.in_table_d2(x) for x in label): + raise UnicodeError("Violation of BIDI requirement 2") + # 3) If a string contains any RandALCat character, a + # RandALCat character MUST be the first character of the + # string, and a RandALCat character MUST be the last + # character of the string. + if not RandAL[0] or not RandAL[-1]: + raise UnicodeError("Violation of BIDI requirement 3") return label @@ -103,6 +101,16 @@ def ToASCII(label): raise UnicodeError("label empty or too long") def ToUnicode(label): + if len(label) > 1024: + # Protection from https://github.com/python/cpython/issues/98433. + # https://datatracker.ietf.org/doc/html/rfc5894#section-6 + # doesn't specify a label size limit prior to NAMEPREP. But having + # one makes practical sense. + # This leaves ample room for nameprep() to remove Nothing characters + # per https://www.rfc-editor.org/rfc/rfc3454#section-3.1 while still + # preventing us from wasting time decoding a big thing that'll just + # hit the actual <= 63 length limit in Step 6. + raise UnicodeError("label way too long") # Step 1: Check for ASCII if isinstance(label, bytes): pure_ascii = True diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py index 4a6ba9cedf3d1c8..1fb1d505cfd0c53 100644 --- a/Lib/ensurepip/__init__.py +++ b/Lib/ensurepip/__init__.py @@ -9,11 +9,9 @@ __all__ = ["version", "bootstrap"] -_PACKAGE_NAMES = ('setuptools', 'pip') -_SETUPTOOLS_VERSION = "65.5.0" -_PIP_VERSION = "22.3" +_PACKAGE_NAMES = ('pip',) +_PIP_VERSION = "23.2.1" _PROJECTS = [ - ("setuptools", _SETUPTOOLS_VERSION, "py3"), ("pip", _PIP_VERSION, "py3"), ] @@ -153,17 +151,17 @@ def _bootstrap(*, root=None, upgrade=False, user=False, _disable_pip_configuration_settings() - # By default, installing pip and setuptools installs all of the + # By default, installing pip installs all of the # following scripts (X.Y == running Python version): # - # pip, pipX, pipX.Y, easy_install, easy_install-X.Y + # pip, pipX, pipX.Y # # pip 1.5+ allows ensurepip to request that some of those be left out if altinstall: - # omit pip, pipX and easy_install + # omit pip, pipX os.environ["ENSUREPIP_OPTIONS"] = "altinstall" elif not default_pip: - # omit pip and easy_install + # omit pip os.environ["ENSUREPIP_OPTIONS"] = "install" with tempfile.TemporaryDirectory() as tmpdir: @@ -271,14 +269,14 @@ def _main(argv=None): action="store_true", default=False, help=("Make an alternate install, installing only the X.Y versioned " - "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."), + "scripts (Default: pipX, pipX.Y)."), ) parser.add_argument( "--default-pip", action="store_true", default=False, help=("Make a default pip install, installing the unqualified pip " - "and easy_install in addition to the versioned scripts."), + "in addition to the versioned scripts."), ) args = parser.parse_args(argv) diff --git a/Lib/ensurepip/_bundled/pip-22.3-py3-none-any.whl b/Lib/ensurepip/_bundled/pip-22.3-py3-none-any.whl deleted file mode 100644 index d6fccd9de92afcb..000000000000000 Binary files a/Lib/ensurepip/_bundled/pip-22.3-py3-none-any.whl and /dev/null differ diff --git a/Lib/ensurepip/_bundled/pip-23.2.1-py3-none-any.whl b/Lib/ensurepip/_bundled/pip-23.2.1-py3-none-any.whl new file mode 100644 index 000000000000000..ba28ef02e265f03 Binary files /dev/null and b/Lib/ensurepip/_bundled/pip-23.2.1-py3-none-any.whl differ diff --git a/Lib/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl b/Lib/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl deleted file mode 100644 index 123a13e2c6b2543..000000000000000 Binary files a/Lib/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl and /dev/null differ diff --git a/Lib/enum.py b/Lib/enum.py index a66c344dbc3db27..4e76e96d2c916ec 100644 --- a/Lib/enum.py +++ b/Lib/enum.py @@ -1,8 +1,6 @@ import sys import builtins as bltns from types import MappingProxyType, DynamicClassAttribute -from operator import or_ as _or_ -from functools import reduce __all__ = [ @@ -12,6 +10,7 @@ 'FlagBoundary', 'STRICT', 'CONFORM', 'EJECT', 'KEEP', 'global_flag_repr', 'global_enum_repr', 'global_str', 'global_enum', 'EnumCheck', 'CONTINUOUS', 'NAMED_FLAGS', 'UNIQUE', + 'pickle_by_global_name', 'pickle_by_enum_name', ] @@ -62,8 +61,8 @@ def _is_sunder(name): return ( len(name) > 2 and name[0] == name[-1] == '_' and - name[1:2] != '_' and - name[-2:-1] != '_' + name[1] != '_' and + name[-2] != '_' ) def _is_internal_class(cls_name, obj): @@ -82,7 +81,6 @@ def _is_private(cls_name, name): if ( len(name) > pat_len and name.startswith(pattern) - and name[pat_len:pat_len+1] != ['_'] and (name[-1] != '_' or name[-2] != '_') ): return True @@ -114,9 +112,12 @@ def _break_on_call_reduce(self, proto): setattr(obj, '__module__', '') def _iter_bits_lsb(num): - # num must be an integer + # num must be a positive integer + original = num if isinstance(num, Enum): num = num.value + if num < 0: + raise ValueError('%r is not a positive integer' % original) while num: b = num & (~num + 1) yield b @@ -154,7 +155,6 @@ def _dedent(text): Like textwrap.dedent. Rewritten because we cannot import textwrap. """ lines = text.split('\n') - blanks = 0 for i, ch in enumerate(lines[0]): if ch != ' ': break @@ -171,7 +171,8 @@ class auto: """ Instances are replaced with an appropriate value in Enum class suites. """ - value = _auto_null + def __init__(self, value=_auto_null): + self.value = value def __repr__(self): return "auto(%r)" % self.value @@ -186,6 +187,8 @@ class property(DynamicClassAttribute): """ member = None + _attr_type = None + _cls_type = None def __get__(self, instance, ownerclass=None): if instance is None: @@ -195,43 +198,36 @@ def __get__(self, instance, ownerclass=None): raise AttributeError( '%r has no attribute %r' % (ownerclass, self.name) ) - else: - if self.fget is None: - if self.member is None: # not sure this can happen, but just in case - raise AttributeError( - '%r has no attribute %r' % (ownerclass, self.name) - ) - # issue warning deprecating this behavior - import warnings - warnings.warn( - "`member.member` access (e.g. `Color.RED.BLUE`) is " - "deprecated and will be removed in 3.14.", - DeprecationWarning, - stacklevel=2, - ) - return self.member - # XXX: uncomment in 3.14 and remove warning above - # raise AttributeError( - # '%r member has no attribute %r' % (ownerclass, self.name) - # ) - else: - return self.fget(instance) + if self.fget is not None: + # use previous enum.property + return self.fget(instance) + elif self._attr_type == 'attr': + # look up previous attibute + return getattr(self._cls_type, self.name) + elif self._attr_type == 'desc': + # use previous descriptor + return getattr(instance._value_, self.name) + # look for a member by this name. + try: + return ownerclass._member_map_[self.name] + except KeyError: + raise AttributeError( + '%r has no attribute %r' % (ownerclass, self.name) + ) from None def __set__(self, instance, value): - if self.fset is None: - raise AttributeError( - " cannot set attribute %r" % (self.clsname, self.name) - ) - else: + if self.fset is not None: return self.fset(instance, value) + raise AttributeError( + " cannot set attribute %r" % (self.clsname, self.name) + ) def __delete__(self, instance): - if self.fdel is None: - raise AttributeError( - " cannot delete attribute %r" % (self.clsname, self.name) - ) - else: + if self.fdel is not None: return self.fdel(instance) + raise AttributeError( + " cannot delete attribute %r" % (self.clsname, self.name) + ) def __set_name__(self, ownerclass, name): self.name = name @@ -262,28 +258,32 @@ def __set_name__(self, enum_class, member_name): args = (args, ) # wrap it one more time if not enum_class._use_args_: enum_member = enum_class._new_member_(enum_class) - if not hasattr(enum_member, '_value_'): + else: + enum_member = enum_class._new_member_(enum_class, *args) + if not hasattr(enum_member, '_value_'): + if enum_class._member_type_ is object: + enum_member._value_ = value + else: try: enum_member._value_ = enum_class._member_type_(*args) except Exception as exc: - enum_member._value_ = value - else: - enum_member = enum_class._new_member_(enum_class, *args) - if not hasattr(enum_member, '_value_'): - if enum_class._member_type_ is object: - enum_member._value_ = value - else: - try: - enum_member._value_ = enum_class._member_type_(*args) - except Exception as exc: - raise TypeError( - '_value_ not set in __new__, unable to create it' - ) from None + new_exc = TypeError( + '_value_ not set in __new__, unable to create it' + ) + new_exc.__cause__ = exc + raise new_exc value = enum_member._value_ enum_member._name_ = member_name enum_member.__objclass__ = enum_class enum_member.__init__(*args) enum_member._sort_order_ = len(enum_class._member_names_) + + if Flag is not None and issubclass(enum_class, Flag): + enum_class._flag_mask_ |= value + if _is_single_bit(value): + enum_class._singles_mask_ |= value + enum_class._all_bits_ = 2 ** ((enum_class._flag_mask_).bit_length()) - 1 + # If another member with the same value was already defined, the # new member becomes an alias to the existing one. try: @@ -313,22 +313,43 @@ def __set_name__(self, enum_class, member_name): ): # no other instances found, record this member in _member_names_ enum_class._member_names_.append(member_name) - # get redirect in place before adding to _member_map_ - # but check for other instances in parent classes first - descriptor = None + # if necessary, get redirect in place and then add it to _member_map_ + found_descriptor = None + descriptor_type = None + class_type = None for base in enum_class.__mro__[1:]: - descriptor = base.__dict__.get(member_name) - if descriptor is not None: - if isinstance(descriptor, (property, DynamicClassAttribute)): + attr = base.__dict__.get(member_name) + if attr is not None: + if isinstance(attr, (property, DynamicClassAttribute)): + found_descriptor = attr + class_type = base + descriptor_type = 'enum' break - redirect = property() - redirect.member = enum_member - redirect.__set_name__(enum_class, member_name) - if descriptor: - redirect.fget = getattr(descriptor, 'fget', None) - redirect.fset = getattr(descriptor, 'fset', None) - redirect.fdel = getattr(descriptor, 'fdel', None) - setattr(enum_class, member_name, redirect) + elif _is_descriptor(attr): + found_descriptor = attr + descriptor_type = descriptor_type or 'desc' + class_type = class_type or base + continue + else: + descriptor_type = 'attr' + class_type = base + if found_descriptor: + redirect = property() + redirect.member = enum_member + redirect.__set_name__(enum_class, member_name) + if descriptor_type in ('enum','desc'): + # earlier descriptor found; copy fget, fset, fdel to this one. + redirect.fget = getattr(found_descriptor, 'fget', None) + redirect._get = getattr(found_descriptor, '__get__', None) + redirect.fset = getattr(found_descriptor, 'fset', None) + redirect._set = getattr(found_descriptor, '__set__', None) + redirect.fdel = getattr(found_descriptor, 'fdel', None) + redirect._del = getattr(found_descriptor, '__delete__', None) + redirect._attr_type = descriptor_type + redirect._cls_type = class_type + setattr(enum_class, member_name, redirect) + else: + setattr(enum_class, member_name, enum_member) # now add to _member_map_ (even aliases) enum_class._member_map_[member_name] = enum_member try: @@ -364,16 +385,8 @@ def __setitem__(self, key, value): Single underscore (sunder) names are reserved. """ - if _is_internal_class(self._cls_name, value): - import warnings - warnings.warn( - "In 3.13 classes created inside an enum will not become a member. " - "Use the `member` decorator to keep the current behavior.", - DeprecationWarning, - stacklevel=2, - ) if _is_private(self._cls_name, key): - # also do nothing, name will be a normal attribute + # do nothing, name will be a normal attribute pass elif _is_sunder(key): if key not in ( @@ -416,10 +429,9 @@ def __setitem__(self, key, value): value = value.value elif _is_descriptor(value): pass - # TODO: uncomment next three lines in 3.12 - # elif _is_internal_class(self._cls_name, value): - # # do nothing, name will be a normal attribute - # pass + elif _is_internal_class(self._cls_name, value): + # do nothing, name will be a normal attribute + pass else: if key in self: # enum overwriting a descriptor? @@ -427,15 +439,33 @@ def __setitem__(self, key, value): elif isinstance(value, member): # unwrap value here -- it will become a member value = value.value + non_auto_store = True + single = False if isinstance(value, auto): - if value.value == _auto_null: - value.value = self._generate_next_value( - key, 1, len(self._member_names), self._last_values[:], - ) - self._auto_called = True - value = value.value + single = True + value = (value, ) + if type(value) is tuple and any(isinstance(v, auto) for v in value): + # insist on an actual tuple, no subclasses, in keeping with only supporting + # top-level auto() usage (not contained in any other data structure) + auto_valued = [] + for v in value: + if isinstance(v, auto): + non_auto_store = False + if v.value == _auto_null: + v.value = self._generate_next_value( + key, 1, len(self._member_names), self._last_values[:], + ) + self._auto_called = True + v = v.value + self._last_values.append(v) + auto_valued.append(v) + if single: + value = auto_valued[0] + else: + value = tuple(auto_valued) self._member_names[key] = None - self._last_values.append(value) + if non_auto_store: + self._last_values.append(value) super().__setitem__(key, value) def update(self, members, **more_members): @@ -496,8 +526,13 @@ def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **k # # adjust the sunders _order_ = classdict.pop('_order_', None) + _gnv = classdict.get('_generate_next_value_') + if _gnv is not None and type(_gnv) is not staticmethod: + _gnv = staticmethod(_gnv) # convert to normal dict classdict = dict(classdict.items()) + if _gnv is not None: + classdict['_generate_next_value_'] = _gnv # # data type of member and the controlling Enum class member_type, first_enum = metacls._get_mixins_(cls, bases) @@ -508,12 +543,8 @@ def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **k classdict['_use_args_'] = use_args # # convert future enum members into temporary _proto_members - # and record integer values in case this will be a Flag - flag_mask = 0 for name in member_names: value = classdict[name] - if isinstance(value, int): - flag_mask |= value classdict[name] = _proto_member(value) # # house-keeping structures @@ -530,8 +561,9 @@ def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **k boundary or getattr(first_enum, '_boundary_', None) ) - classdict['_flag_mask_'] = flag_mask - classdict['_all_bits_'] = 2 ** ((flag_mask).bit_length()) - 1 + classdict['_flag_mask_'] = 0 + classdict['_singles_mask_'] = 0 + classdict['_all_bits_'] = 0 classdict['_inverted_'] = None try: exc = None @@ -620,21 +652,10 @@ def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **k ): delattr(enum_class, '_boundary_') delattr(enum_class, '_flag_mask_') + delattr(enum_class, '_singles_mask_') delattr(enum_class, '_all_bits_') delattr(enum_class, '_inverted_') elif Flag is not None and issubclass(enum_class, Flag): - # ensure _all_bits_ is correct and there are no missing flags - single_bit_total = 0 - multi_bit_total = 0 - for flag in enum_class._member_map_.values(): - flag_value = flag._value_ - if _is_single_bit(flag_value): - single_bit_total |= flag_value - else: - # multi-bit flags are considered aliases - multi_bit_total |= flag_value - enum_class._flag_mask_ = single_bit_total - # # set correct __iter__ member_list = [m._value_ for m in enum_class] if member_list != sorted(member_list): @@ -663,7 +684,7 @@ def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **k 'member order does not match _order_:\n %r\n %r' % (enum_class._member_names_, _order_) ) - + # return enum_class def __bool__(cls): @@ -672,7 +693,7 @@ def __bool__(cls): """ return True - def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None): + def __call__(cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None): """ Either returns an existing member, or creates a new enum class. @@ -680,6 +701,8 @@ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, s to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + The value lookup branch is chosen if the enum is final. + When used for the functional API: `value` will be the name of the new class. @@ -697,12 +720,20 @@ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, s `type`, if set, will be mixed in as the first base class. """ - if names is None: # simple value lookup + if cls._member_map_: + # simple value lookup if members exist + if names: + value = (value, names) + values return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type + if names is None and type is None: + # no body? no data-type? possibly wrong usage + raise TypeError( + f"{cls} has no members; specify `names=()` if you meant to create a new, empty, enum" + ) return cls._create_( - value, - names, + class_name=value, + names=names, module=module, qualname=qualname, type=type, @@ -826,6 +857,8 @@ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, s value = first_enum._generate_next_value_(name, start, count, last_values[:]) last_values.append(value) names.append((name, value)) + if names is None: + names = () # Here, names is either an iterable of (name, value) or a mapping. for item in names: @@ -835,13 +868,15 @@ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, s member_name, member_value = item classdict[member_name] = member_value - # TODO: replace the frame hack if a blessed way to know the calling - # module is ever developed if module is None: try: - module = sys._getframe(2).f_globals['__name__'] - except (AttributeError, ValueError, KeyError): - pass + module = sys._getframemodulename(2) + except AttributeError: + # Fall back on _getframe if _getframemodulename is missing + try: + module = sys._getframe(2).f_globals['__name__'] + except (AttributeError, ValueError, KeyError): + pass if module is None: _make_class_unpicklable(classdict) else: @@ -882,7 +917,6 @@ def _convert_(cls, name, module, filter, source=None, *, boundary=None, as_globa body['__module__'] = module tmp_cls = type(name, (object, ), body) cls = _simple_enum(etype=cls, boundary=boundary or KEEP)(tmp_cls) - cls.__reduce_ex__ = _reduce_ex_by_global_name if as_global: global_enum(cls) else: @@ -894,7 +928,7 @@ def _convert_(cls, name, module, filter, source=None, *, boundary=None, as_globa def _check_for_existing_members_(mcls, class_name, bases): for chain in bases: for base in chain.__mro__: - if issubclass(base, Enum) and base._member_names_: + if isinstance(base, EnumType) and base._member_names_: raise TypeError( " cannot extend %r" % (class_name, base) @@ -913,7 +947,7 @@ def _get_mixins_(mcls, class_name, bases): # ensure final parent class is an Enum derivative, find any concrete # data type, and check that Enum has no members first_enum = bases[-1] - if not issubclass(first_enum, Enum): + if not isinstance(first_enum, EnumType): raise TypeError("new enumerations should be created as " "`EnumName([mixin_type, ...] [data_type,] enum_type)`") member_type = mcls._find_data_type_(class_name, bases) or object @@ -925,16 +959,25 @@ def _find_data_repr_(mcls, class_name, bases): for base in chain.__mro__: if base is object: continue - elif issubclass(base, Enum): + elif isinstance(base, EnumType): # if we hit an Enum, use it's _value_repr_ return base._value_repr_ elif '__repr__' in base.__dict__: # this is our data repr - return base.__dict__['__repr__'] + # double-check if a dataclass with a default __repr__ + if ( + '__dataclass_fields__' in base.__dict__ + and '__dataclass_params__' in base.__dict__ + and base.__dict__['__dataclass_params__'].repr + ): + return _dataclass_repr + else: + return base.__dict__['__repr__'] return None @classmethod def _find_data_type_(mcls, class_name, bases): + # a datatype has a __new__ method, or a __dataclass_fields__ attribute data_types = set() base_chain = set() for chain in bases: @@ -943,13 +986,11 @@ def _find_data_type_(mcls, class_name, bases): base_chain.add(base) if base is object: continue - elif issubclass(base, Enum): + elif isinstance(base, EnumType): if base._member_type_ is not object: data_types.add(base._member_type_) break - elif '__new__' in base.__dict__ or '__init__' in base.__dict__: - if issubclass(base, Enum): - continue + elif '__new__' in base.__dict__ or '__dataclass_fields__' in base.__dict__: data_types.add(candidate or base) break else: @@ -1021,20 +1062,20 @@ class Enum(metaclass=EnumType): Access them by: - - attribute access:: + - attribute access: - >>> Color.RED - + >>> Color.RED + - value lookup: - >>> Color(1) - + >>> Color(1) + - name lookup: - >>> Color['RED'] - + >>> Color['RED'] + Enumerations can be iterated over, and know how many members they have: @@ -1048,6 +1089,13 @@ class Enum(metaclass=EnumType): attributes -- see the documentation for details. """ + @classmethod + def __signature__(cls): + if cls._member_names_: + return '(*values)' + else: + return '(new_class_name, /, names, *, module=None, qualname=None, type=None, start=1, boundary=None)' + def __new__(cls, value): # all enum instances are actually created during class construction # without calling this method; this method is called by the metaclass' @@ -1067,6 +1115,11 @@ def __new__(cls, value): for member in cls._member_map_.values(): if member._value_ == value: return member + # still not found -- verify that members exist, in-case somebody got here mistakenly + # (such as via super when trying to override __new__) + if not cls._member_map_: + raise TypeError("%r has no members defined" % cls) + # # still not found -- try _missing_ hook try: exc = None @@ -1102,6 +1155,7 @@ def __new__(cls, value): def __init__(self, *args, **kwds): pass + @staticmethod def _generate_next_value_(name, start, count, last_values): """ Generate the next value when not given. @@ -1114,28 +1168,13 @@ def _generate_next_value_(name, start, count, last_values): if not last_values: return start try: - last = last_values[-1] - last_values.sort() - if last == last_values[-1]: - # no difference between old and new methods - return last + 1 - else: - # trigger old method (with warning) - raise TypeError + last_value = sorted(last_values).pop() except TypeError: - import warnings - warnings.warn( - "In 3.13 the default `auto()`/`_generate_next_value_` will require all values to be sortable and support adding +1\n" - "and the value returned will be the largest value in the enum incremented by 1", - DeprecationWarning, - stacklevel=3, - ) - for v in last_values: - try: - return v + 1 - except TypeError: - pass - return start + raise TypeError('unable to sort non-numeric values') from None + try: + return last_value + 1 + except TypeError: + raise TypeError('unable to increment %r' % (last_value, )) from None @classmethod def _missing_(cls, value): @@ -1150,14 +1189,13 @@ def __str__(self): def __dir__(self): """ - Returns all members and all public methods + Returns public methods and other interesting attributes. """ - if self.__class__._member_type_ is object: - interesting = set(['__class__', '__doc__', '__eq__', '__hash__', '__module__', 'name', 'value']) - else: + interesting = set() + if self.__class__._member_type_ is not object: interesting = set(object.__dir__(self)) for name in getattr(self, '__dict__', []): - if name[0] != '_': + if name[0] != '_' and name not in self._member_map_: interesting.add(name) for cls in self.__class__.mro(): for name, obj in cls.__dict__.items(): @@ -1170,7 +1208,7 @@ def __dir__(self): else: # in case it was added by `dir(self)` interesting.discard(name) - else: + elif name not in self._member_map_: interesting.add(name) names = sorted( set(['__class__', '__doc__', '__eq__', '__hash__', '__module__']) @@ -1185,7 +1223,13 @@ def __hash__(self): return hash(self._name_) def __reduce_ex__(self, proto): - return getattr, (self.__class__, self._name_) + return self.__class__, (self._value_, ) + + def __deepcopy__(self,memo): + return self + + def __copy__(self): + return self # enum.property is used to provide access to the `name` and # `value` attributes of enum members while keeping some measure of @@ -1244,6 +1288,7 @@ def __new__(cls, *values): member._value_ = value return member + @staticmethod def _generate_next_value_(name, start, count, last_values): """ Return the lower-cased version of the member name. @@ -1251,16 +1296,22 @@ def _generate_next_value_(name, start, count, last_values): return name.lower() -def _reduce_ex_by_global_name(self, proto): +def pickle_by_global_name(self, proto): + # should not be used with Flag-type enums return self.name +_reduce_ex_by_global_name = pickle_by_global_name + +def pickle_by_enum_name(self, proto): + # should not be used with Flag-type enums + return getattr, (self.__class__, self._name_) class FlagBoundary(StrEnum): """ control how out of range values are handled - "strict" -> error is raised [default for Flag] + "strict" -> error is raised [default for Flag] "conform" -> extra bits are discarded - "eject" -> lose flag status [default for IntFlag] - "keep" -> keep flag status and all bits + "eject" -> lose flag status + "keep" -> keep flag status and all bits [default for IntFlag] """ STRICT = auto() CONFORM = auto() @@ -1269,30 +1320,14 @@ class FlagBoundary(StrEnum): STRICT, CONFORM, EJECT, KEEP = FlagBoundary -class Flag(Enum, boundary=CONFORM): +class Flag(Enum, boundary=STRICT): """ Support for flags """ - def __reduce_ex__(self, proto): - cls = self.__class__ - unknown = self._value_ & ~cls._flag_mask_ - member_value = self._value_ & cls._flag_mask_ - if unknown and member_value: - return _or_, (cls(member_value), unknown) - for val in _iter_bits_lsb(member_value): - rest = member_value & ~val - if rest: - return _or_, (cls(rest), cls._value2member_map_.get(val)) - else: - break - if self._name_ is None: - return cls, (self._value_,) - else: - return getattr, (cls, self._name_) - _numeric_repr_ = repr + @staticmethod def _generate_next_value_(name, start, count, last_values): """ Generate the next value when not given. @@ -1347,6 +1382,7 @@ def _missing_(cls, value): # - value must not include any skipped flags (e.g. if bit 2 is not # defined, then 0d10 is invalid) flag_mask = cls._flag_mask_ + singles_mask = cls._singles_mask_ all_bits = cls._all_bits_ neg_value = None if ( @@ -1378,36 +1414,49 @@ def _missing_(cls, value): value = all_bits + 1 + value # get members and unknown unknown = value & ~flag_mask - member_value = value & flag_mask + aliases = value & ~singles_mask + member_value = value & singles_mask if unknown and cls._boundary_ is not KEEP: raise ValueError( '%s(%r) --> unknown values %r [%s]' % (cls.__name__, value, unknown, bin(unknown)) ) # normal Flag? - __new__ = getattr(cls, '__new_member__', None) - if cls._member_type_ is object and not __new__: + if cls._member_type_ is object: # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) else: - pseudo_member = (__new__ or cls._member_type_.__new__)(cls, value) + pseudo_member = cls._member_type_.__new__(cls, value) if not hasattr(pseudo_member, '_value_'): pseudo_member._value_ = value - if member_value: - pseudo_member._name_ = '|'.join([ - m._name_ for m in cls._iter_member_(member_value) - ]) - if unknown: + if member_value or aliases: + members = [] + combined_value = 0 + for m in cls._iter_member_(member_value): + members.append(m) + combined_value |= m._value_ + if aliases: + value = member_value | aliases + for n, pm in cls._member_map_.items(): + if pm not in members and pm._value_ and pm._value_ & value == pm._value_: + members.append(pm) + combined_value |= pm._value_ + unknown = value ^ combined_value + pseudo_member._name_ = '|'.join([m._name_ for m in members]) + if not combined_value: + pseudo_member._name_ = None + elif unknown and cls._boundary_ is STRICT: + raise ValueError('%r: no members with value %r' % (cls, unknown)) + elif unknown: pseudo_member._name_ += '|%s' % cls._numeric_repr_(unknown) else: pseudo_member._name_ = None # use setdefault in case another thread already created a composite - # with this value, but only if all members are known - # note: zero is a special case -- add it - if not unknown: - pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) - if neg_value is not None: - cls._value2member_map_[neg_value] = pseudo_member + # with this value + # note: zero is a special case -- always add it + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + if neg_value is not None: + cls._value2member_map_[neg_value] = pseudo_member return pseudo_member def __contains__(self, other): @@ -1479,14 +1528,10 @@ def __xor__(self, other): def __invert__(self): if self._inverted_ is None: - if self._boundary_ is KEEP: - # use all bits + if self._boundary_ in (EJECT, KEEP): self._inverted_ = self.__class__(~self._value_) else: - # calculate flags not in this member - self._inverted_ = self.__class__(self._flag_mask_ ^ self._value_) - if isinstance(self._inverted_, self.__class__): - self._inverted_._inverted_ = self + self._inverted_ = self.__class__(self._singles_mask_ & ~self._value_) return self._inverted_ __rand__ = __and__ @@ -1521,10 +1566,13 @@ def unique(enumeration): (enumeration, alias_details)) return enumeration -def _power_of_two(value): - if value < 1: - return False - return value == 2 ** _high_bit(value) +def _dataclass_repr(self): + dcf = self.__dataclass_fields__ + return ', '.join( + '%s=%r' % (k, getattr(self, k)) + for k in dcf.keys() + if dcf[k].repr + ) def global_enum_repr(self): """ @@ -1588,7 +1636,7 @@ def _simple_enum(etype=Enum, *, boundary=None, use_args=None): Class decorator that converts a normal class into an :class:`Enum`. No safety checks are done, and some advanced behavior (such as :func:`__init_subclass__`) is not available. Enum creation can be faster - using :func:`simple_enum`. + using :func:`_simple_enum`. >>> from enum import Enum, _simple_enum >>> @_simple_enum(Enum) @@ -1626,6 +1674,7 @@ def convert_class(cls): body['_boundary_'] = boundary or etype._boundary_ body['_flag_mask_'] = None body['_all_bits_'] = None + body['_singles_mask_'] = None body['_inverted_'] = None body['__or__'] = Flag.__or__ body['__xor__'] = Flag.__xor__ @@ -1701,7 +1750,8 @@ def convert_class(cls): else: multi_bits |= value gnv_last_values.append(value) - enum_class._flag_mask_ = single_bits + enum_class._flag_mask_ = single_bits | multi_bits + enum_class._singles_mask_ = single_bits enum_class._all_bits_ = 2 ** ((single_bits|multi_bits).bit_length()) - 1 # set correct __iter__ member_list = [m._value_ for m in enum_class] @@ -1822,11 +1872,15 @@ def __call__(self, enumeration): if name in member_names: # not an alias continue + if alias.value < 0: + # negative numbers are not checked + continue values = list(_iter_bits_lsb(alias.value)) missed = [v for v in values if v not in member_values] if missed: missing_names.append(name) - missing_value |= reduce(_or_, missed) + for val in missed: + missing_value |= val if missing_names: if len(missing_names) == 1: alias = 'alias %s is missing' % missing_names[0] @@ -1993,7 +2047,6 @@ def _old_convert_(etype, name, module, filter, source=None, *, boundary=None): # unless some values aren't comparable, in which case sort by name members.sort(key=lambda t: t[0]) cls = etype(name, members, module=module, boundary=boundary or KEEP) - cls.__reduce_ex__ = _reduce_ex_by_global_name return cls _stdlib_enums = IntEnum, StrEnum, IntFlag diff --git a/Lib/fileinput.py b/Lib/fileinput.py index e234dc9ea65f15a..1b25f28f3d34326 100644 --- a/Lib/fileinput.py +++ b/Lib/fileinput.py @@ -399,7 +399,7 @@ def isstdin(self): def hook_compressed(filename, mode, *, encoding=None, errors=None): - if encoding is None: # EncodingWarning is emitted in FileInput() already. + if encoding is None and "b" not in mode: # EncodingWarning is emitted in FileInput() already. encoding = "locale" ext = os.path.splitext(filename)[1] if ext == '.gz': diff --git a/Lib/fnmatch.py b/Lib/fnmatch.py index d5e296f7748c1c6..73acb1fe8d41060 100644 --- a/Lib/fnmatch.py +++ b/Lib/fnmatch.py @@ -78,6 +78,11 @@ def translate(pat): """ STAR = object() + parts = _translate(pat, STAR, '.') + return _join_translated_parts(parts, STAR) + + +def _translate(pat, STAR, QUESTION_MARK): res = [] add = res.append i, n = 0, len(pat) @@ -89,7 +94,7 @@ def translate(pat): if (not res) or res[-1] is not STAR: add(STAR) elif c == '?': - add('.') + add(QUESTION_MARK) elif c == '[': j = i if j < n and pat[j] == '!': @@ -146,9 +151,11 @@ def translate(pat): else: add(re.escape(c)) assert i == n + return res + +def _join_translated_parts(inp, STAR): # Deal with STARs. - inp = res res = [] add = res.append i, n = 0, len(inp) diff --git a/Lib/fractions.py b/Lib/fractions.py index 75c7df14e1b9c77..c95db0730e5b6d2 100644 --- a/Lib/fractions.py +++ b/Lib/fractions.py @@ -69,6 +69,96 @@ def _hash_algorithm(numerator, denominator): """, re.VERBOSE | re.IGNORECASE) +# Helpers for formatting + +def _round_to_exponent(n, d, exponent, no_neg_zero=False): + """Round a rational number to the nearest multiple of a given power of 10. + + Rounds the rational number n/d to the nearest integer multiple of + 10**exponent, rounding to the nearest even integer multiple in the case of + a tie. Returns a pair (sign: bool, significand: int) representing the + rounded value (-1)**sign * significand * 10**exponent. + + If no_neg_zero is true, then the returned sign will always be False when + the significand is zero. Otherwise, the sign reflects the sign of the + input. + + d must be positive, but n and d need not be relatively prime. + """ + if exponent >= 0: + d *= 10**exponent + else: + n *= 10**-exponent + + # The divmod quotient is correct for round-ties-towards-positive-infinity; + # In the case of a tie, we zero out the least significant bit of q. + q, r = divmod(n + (d >> 1), d) + if r == 0 and d & 1 == 0: + q &= -2 + + sign = q < 0 if no_neg_zero else n < 0 + return sign, abs(q) + + +def _round_to_figures(n, d, figures): + """Round a rational number to a given number of significant figures. + + Rounds the rational number n/d to the given number of significant figures + using the round-ties-to-even rule, and returns a triple + (sign: bool, significand: int, exponent: int) representing the rounded + value (-1)**sign * significand * 10**exponent. + + In the special case where n = 0, returns a significand of zero and + an exponent of 1 - figures, for compatibility with formatting. + Otherwise, the returned significand satisfies + 10**(figures - 1) <= significand < 10**figures. + + d must be positive, but n and d need not be relatively prime. + figures must be positive. + """ + # Special case for n == 0. + if n == 0: + return False, 0, 1 - figures + + # Find integer m satisfying 10**(m - 1) <= abs(n)/d <= 10**m. (If abs(n)/d + # is a power of 10, either of the two possible values for m is fine.) + str_n, str_d = str(abs(n)), str(d) + m = len(str_n) - len(str_d) + (str_d <= str_n) + + # Round to a multiple of 10**(m - figures). The significand we get + # satisfies 10**(figures - 1) <= significand <= 10**figures. + exponent = m - figures + sign, significand = _round_to_exponent(n, d, exponent) + + # Adjust in the case where significand == 10**figures, to ensure that + # 10**(figures - 1) <= significand < 10**figures. + if len(str(significand)) == figures + 1: + significand //= 10 + exponent += 1 + + return sign, significand, exponent + + +# Pattern for matching float-style format specifications; +# supports 'e', 'E', 'f', 'F', 'g', 'G' and '%' presentation types. +_FLOAT_FORMAT_SPECIFICATION_MATCHER = re.compile(r""" + (?: + (?P.)? + (?P[<>=^]) + )? + (?P[-+ ]?) + (?Pz)? + (?P\#)? + # A '0' that's *not* followed by another digit is parsed as a minimum width + # rather than a zeropad flag. + (?P0(?=[0-9]))? + (?P0|[1-9][0-9]*)? + (?P[,_])? + (?:\.(?P0|[1-9][0-9]*))? + (?P[eEfFgG%]) +""", re.DOTALL | re.VERBOSE).fullmatch + + class Fraction(numbers.Rational): """This class implements rational numbers. @@ -93,7 +183,7 @@ class Fraction(numbers.Rational): __slots__ = ('_numerator', '_denominator') # We're immutable, so use __new__ not __init__ - def __new__(cls, numerator=0, denominator=None, *, _normalize=True): + def __new__(cls, numerator=0, denominator=None): """Constructs a Rational. Takes a string like '3/2' or '1.5', another Rational instance, a @@ -189,12 +279,11 @@ def __new__(cls, numerator=0, denominator=None, *, _normalize=True): if denominator == 0: raise ZeroDivisionError('Fraction(%s, 0)' % numerator) - if _normalize: - g = math.gcd(numerator, denominator) - if denominator < 0: - g = -g - numerator //= g - denominator //= g + g = math.gcd(numerator, denominator) + if denominator < 0: + g = -g + numerator //= g + denominator //= g self._numerator = numerator self._denominator = denominator return self @@ -211,7 +300,7 @@ def from_float(cls, f): elif not isinstance(f, float): raise TypeError("%s.from_float() only takes floats, not %r (%s)" % (cls.__name__, f, type(f).__name__)) - return cls(*f.as_integer_ratio()) + return cls._from_coprime_ints(*f.as_integer_ratio()) @classmethod def from_decimal(cls, dec): @@ -223,13 +312,28 @@ def from_decimal(cls, dec): raise TypeError( "%s.from_decimal() only takes Decimals, not %r (%s)" % (cls.__name__, dec, type(dec).__name__)) - return cls(*dec.as_integer_ratio()) + return cls._from_coprime_ints(*dec.as_integer_ratio()) + + @classmethod + def _from_coprime_ints(cls, numerator, denominator, /): + """Convert a pair of ints to a rational number, for internal use. + + The ratio of integers should be in lowest terms and the denominator + should be positive. + """ + obj = super(Fraction, cls).__new__(cls) + obj._numerator = numerator + obj._denominator = denominator + return obj + + def is_integer(self): + """Return True if the Fraction is an integer.""" + return self._denominator == 1 def as_integer_ratio(self): - """Return the integer ratio as a tuple. + """Return a pair of integers, whose ratio is equal to the original Fraction. - Return a tuple of two integers, whose ratio is equal to the - Fraction and with a positive denominator. + The ratio is in lowest terms and has a positive denominator. """ return (self._numerator, self._denominator) @@ -286,9 +390,9 @@ def limit_denominator(self, max_denominator=1000000): # the distance from p1/q1 to self is d/(q1*self._denominator). So we # need to compare 2*(q0+k*q1) with self._denominator/d. if 2*d*(q0+k*q1) <= self._denominator: - return Fraction(p1, q1, _normalize=False) + return Fraction._from_coprime_ints(p1, q1) else: - return Fraction(p0+k*p1, q0+k*q1, _normalize=False) + return Fraction._from_coprime_ints(p0+k*p1, q0+k*q1) @property def numerator(a): @@ -310,6 +414,122 @@ def __str__(self): else: return '%s/%s' % (self._numerator, self._denominator) + def __format__(self, format_spec, /): + """Format this fraction according to the given format specification.""" + + # Backwards compatiblility with existing formatting. + if not format_spec: + return str(self) + + # Validate and parse the format specifier. + match = _FLOAT_FORMAT_SPECIFICATION_MATCHER(format_spec) + if match is None: + raise ValueError( + f"Invalid format specifier {format_spec!r} " + f"for object of type {type(self).__name__!r}" + ) + elif match["align"] is not None and match["zeropad"] is not None: + # Avoid the temptation to guess. + raise ValueError( + f"Invalid format specifier {format_spec!r} " + f"for object of type {type(self).__name__!r}; " + "can't use explicit alignment when zero-padding" + ) + fill = match["fill"] or " " + align = match["align"] or ">" + pos_sign = "" if match["sign"] == "-" else match["sign"] + no_neg_zero = bool(match["no_neg_zero"]) + alternate_form = bool(match["alt"]) + zeropad = bool(match["zeropad"]) + minimumwidth = int(match["minimumwidth"] or "0") + thousands_sep = match["thousands_sep"] + precision = int(match["precision"] or "6") + presentation_type = match["presentation_type"] + trim_zeros = presentation_type in "gG" and not alternate_form + trim_point = not alternate_form + exponent_indicator = "E" if presentation_type in "EFG" else "e" + + # Round to get the digits we need, figure out where to place the point, + # and decide whether to use scientific notation. 'point_pos' is the + # relative to the _end_ of the digit string: that is, it's the number + # of digits that should follow the point. + if presentation_type in "fF%": + exponent = -precision + if presentation_type == "%": + exponent -= 2 + negative, significand = _round_to_exponent( + self._numerator, self._denominator, exponent, no_neg_zero) + scientific = False + point_pos = precision + else: # presentation_type in "eEgG" + figures = ( + max(precision, 1) + if presentation_type in "gG" + else precision + 1 + ) + negative, significand, exponent = _round_to_figures( + self._numerator, self._denominator, figures) + scientific = ( + presentation_type in "eE" + or exponent > 0 + or exponent + figures <= -4 + ) + point_pos = figures - 1 if scientific else -exponent + + # Get the suffix - the part following the digits, if any. + if presentation_type == "%": + suffix = "%" + elif scientific: + suffix = f"{exponent_indicator}{exponent + point_pos:+03d}" + else: + suffix = "" + + # String of output digits, padded sufficiently with zeros on the left + # so that we'll have at least one digit before the decimal point. + digits = f"{significand:0{point_pos + 1}d}" + + # Before padding, the output has the form f"{sign}{leading}{trailing}", + # where `leading` includes thousands separators if necessary and + # `trailing` includes the decimal separator where appropriate. + sign = "-" if negative else pos_sign + leading = digits[: len(digits) - point_pos] + frac_part = digits[len(digits) - point_pos :] + if trim_zeros: + frac_part = frac_part.rstrip("0") + separator = "" if trim_point and not frac_part else "." + trailing = separator + frac_part + suffix + + # Do zero padding if required. + if zeropad: + min_leading = minimumwidth - len(sign) - len(trailing) + # When adding thousands separators, they'll be added to the + # zero-padded portion too, so we need to compensate. + leading = leading.zfill( + 3 * min_leading // 4 + 1 if thousands_sep else min_leading + ) + + # Insert thousands separators if required. + if thousands_sep: + first_pos = 1 + (len(leading) - 1) % 3 + leading = leading[:first_pos] + "".join( + thousands_sep + leading[pos : pos + 3] + for pos in range(first_pos, len(leading), 3) + ) + + # We now have a sign and a body. Pad with fill character if necessary + # and return. + body = leading + trailing + padding = fill * (minimumwidth - len(sign) - len(body)) + if align == ">": + return padding + sign + body + elif align == "<": + return sign + body + padding + elif align == "^": + half = len(padding) // 2 + return padding[:half] + sign + body + padding[half:] + else: # align == "=" + return sign + padding + body + def _operator_fallbacks(monomorphic_operator, fallback_operator): """Generates forward and reverse operators given a purely-rational operator and a function from the operator module. @@ -391,8 +611,10 @@ class doesn't subclass a concrete type, there's no """ def forward(a, b): - if isinstance(b, (int, Fraction)): + if isinstance(b, Fraction): return monomorphic_operator(a, b) + elif isinstance(b, int): + return monomorphic_operator(a, Fraction(b)) elif isinstance(b, float): return fallback_operator(float(a), b) elif isinstance(b, complex): @@ -405,7 +627,7 @@ def forward(a, b): def reverse(b, a): if isinstance(a, numbers.Rational): # Includes ints. - return monomorphic_operator(a, b) + return monomorphic_operator(Fraction(a), b) elif isinstance(a, numbers.Real): return fallback_operator(float(a), float(b)) elif isinstance(a, numbers.Complex): @@ -487,40 +709,40 @@ def reverse(b, a): def _add(a, b): """a + b""" - na, da = a.numerator, a.denominator - nb, db = b.numerator, b.denominator + na, da = a._numerator, a._denominator + nb, db = b._numerator, b._denominator g = math.gcd(da, db) if g == 1: - return Fraction(na * db + da * nb, da * db, _normalize=False) + return Fraction._from_coprime_ints(na * db + da * nb, da * db) s = da // g t = na * (db // g) + nb * s g2 = math.gcd(t, g) if g2 == 1: - return Fraction(t, s * db, _normalize=False) - return Fraction(t // g2, s * (db // g2), _normalize=False) + return Fraction._from_coprime_ints(t, s * db) + return Fraction._from_coprime_ints(t // g2, s * (db // g2)) __add__, __radd__ = _operator_fallbacks(_add, operator.add) def _sub(a, b): """a - b""" - na, da = a.numerator, a.denominator - nb, db = b.numerator, b.denominator + na, da = a._numerator, a._denominator + nb, db = b._numerator, b._denominator g = math.gcd(da, db) if g == 1: - return Fraction(na * db - da * nb, da * db, _normalize=False) + return Fraction._from_coprime_ints(na * db - da * nb, da * db) s = da // g t = na * (db // g) - nb * s g2 = math.gcd(t, g) if g2 == 1: - return Fraction(t, s * db, _normalize=False) - return Fraction(t // g2, s * (db // g2), _normalize=False) + return Fraction._from_coprime_ints(t, s * db) + return Fraction._from_coprime_ints(t // g2, s * (db // g2)) __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) def _mul(a, b): """a * b""" - na, da = a.numerator, a.denominator - nb, db = b.numerator, b.denominator + na, da = a._numerator, a._denominator + nb, db = b._numerator, b._denominator g1 = math.gcd(na, db) if g1 > 1: na //= g1 @@ -529,15 +751,17 @@ def _mul(a, b): if g2 > 1: nb //= g2 da //= g2 - return Fraction(na * nb, db * da, _normalize=False) + return Fraction._from_coprime_ints(na * nb, db * da) __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) def _div(a, b): """a / b""" # Same as _mul(), with inversed b. - na, da = a.numerator, a.denominator - nb, db = b.numerator, b.denominator + nb, db = b._numerator, b._denominator + if nb == 0: + raise ZeroDivisionError('Fraction(%s, 0)' % db) + na, da = a._numerator, a._denominator g1 = math.gcd(na, nb) if g1 > 1: na //= g1 @@ -549,7 +773,7 @@ def _div(a, b): n, d = na * db, nb * da if d < 0: n, d = -n, -d - return Fraction(n, d, _normalize=False) + return Fraction._from_coprime_ints(n, d) __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) @@ -586,17 +810,17 @@ def __pow__(a, b): if b.denominator == 1: power = b.numerator if power >= 0: - return Fraction(a._numerator ** power, - a._denominator ** power, - _normalize=False) - elif a._numerator >= 0: - return Fraction(a._denominator ** -power, - a._numerator ** -power, - _normalize=False) + return Fraction._from_coprime_ints(a._numerator ** power, + a._denominator ** power) + elif a._numerator > 0: + return Fraction._from_coprime_ints(a._denominator ** -power, + a._numerator ** -power) + elif a._numerator == 0: + raise ZeroDivisionError('Fraction(%s, 0)' % + a._denominator ** -power) else: - return Fraction((-a._denominator) ** -power, - (-a._numerator) ** -power, - _normalize=False) + return Fraction._from_coprime_ints((-a._denominator) ** -power, + (-a._numerator) ** -power) else: # A fractional power will generally produce an # irrational number. @@ -620,15 +844,15 @@ def __rpow__(b, a): def __pos__(a): """+a: Coerces a subclass instance to Fraction""" - return Fraction(a._numerator, a._denominator, _normalize=False) + return Fraction._from_coprime_ints(a._numerator, a._denominator) def __neg__(a): """-a""" - return Fraction(-a._numerator, a._denominator, _normalize=False) + return Fraction._from_coprime_ints(-a._numerator, a._denominator) def __abs__(a): """abs(a)""" - return Fraction(abs(a._numerator), a._denominator, _normalize=False) + return Fraction._from_coprime_ints(abs(a._numerator), a._denominator) def __int__(a, _index=operator.index): """int(a)""" @@ -646,12 +870,12 @@ def __trunc__(a): def __floor__(a): """math.floor(a)""" - return a.numerator // a.denominator + return a._numerator // a._denominator def __ceil__(a): """math.ceil(a)""" # The negations cleverly convince floordiv to return the ceiling. - return -(-a.numerator // a.denominator) + return -(-a._numerator // a._denominator) def __round__(self, ndigits=None): """round(self, ndigits) @@ -659,10 +883,11 @@ def __round__(self, ndigits=None): Rounds half toward even. """ if ndigits is None: - floor, remainder = divmod(self.numerator, self.denominator) - if remainder * 2 < self.denominator: + d = self._denominator + floor, remainder = divmod(self._numerator, d) + if remainder * 2 < d: return floor - elif remainder * 2 > self.denominator: + elif remainder * 2 > d: return floor + 1 # Deal with the half case: elif floor % 2 == 0: diff --git a/Lib/ftplib.py b/Lib/ftplib.py index dc9a8afbd8d2479..a56e0c3085701b8 100644 --- a/Lib/ftplib.py +++ b/Lib/ftplib.py @@ -434,10 +434,7 @@ def retrbinary(self, cmd, callback, blocksize=8192, rest=None): """ self.voidcmd('TYPE I') with self.transfercmd(cmd, rest) as conn: - while 1: - data = conn.recv(blocksize) - if not data: - break + while data := conn.recv(blocksize): callback(data) # shutdown ssl layer if _SSLSocket is not None and isinstance(conn, _SSLSocket): @@ -496,10 +493,7 @@ def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None): """ self.voidcmd('TYPE I') with self.transfercmd(cmd, rest) as conn: - while 1: - buf = fp.read(blocksize) - if not buf: - break + while buf := fp.read(blocksize): conn.sendall(buf) if callback: callback(buf) @@ -713,28 +707,12 @@ class FTP_TLS(FTP): '221 Goodbye.' >>> ''' - ssl_version = ssl.PROTOCOL_TLS_CLIENT def __init__(self, host='', user='', passwd='', acct='', - keyfile=None, certfile=None, context=None, - timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, - encoding='utf-8'): - if context is not None and keyfile is not None: - raise ValueError("context and keyfile arguments are mutually " - "exclusive") - if context is not None and certfile is not None: - raise ValueError("context and certfile arguments are mutually " - "exclusive") - if keyfile is not None or certfile is not None: - import warnings - warnings.warn("keyfile and certfile are deprecated, use a " - "custom context instead", DeprecationWarning, 2) - self.keyfile = keyfile - self.certfile = certfile + *, context=None, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None, encoding='utf-8'): if context is None: - context = ssl._create_stdlib_context(self.ssl_version, - certfile=certfile, - keyfile=keyfile) + context = ssl._create_stdlib_context() self.context = context self._prot_p = False super().__init__(host, user, passwd, acct, @@ -749,7 +727,7 @@ def auth(self): '''Set up secure control connection by using TLS/SSL.''' if isinstance(self.sock, ssl.SSLSocket): raise ValueError("Already using TLS") - if self.ssl_version >= ssl.PROTOCOL_TLS: + if self.context.protocol >= ssl.PROTOCOL_TLS: resp = self.voidcmd('AUTH TLS') else: resp = self.voidcmd('AUTH SSL') diff --git a/Lib/functools.py b/Lib/functools.py index 43ead512e1ea4e6..55990e742bf23fd 100644 --- a/Lib/functools.py +++ b/Lib/functools.py @@ -19,8 +19,9 @@ # import types, weakref # Deferred to single_dispatch() from reprlib import recursive_repr from _thread import RLock -from types import GenericAlias +# Avoid importing types, so we can speedup import time +GenericAlias = type(list[int]) ################################################################################ ### update_wrapper() and wraps() decorator @@ -30,7 +31,7 @@ # wrapper functions that can handle naive introspection WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', - '__annotations__') + '__annotations__', '__type_params__') WRAPPER_UPDATES = ('__dict__',) def update_wrapper(wrapper, wrapped, @@ -236,7 +237,7 @@ def __ge__(self, other): def reduce(function, sequence, initial=_initial_missing): """ - reduce(function, iterable[, initial]) -> value + reduce(function, iterable[, initial], /) -> value Apply a function of two arguments cumulatively to the items of a sequence or iterable, from left to right, so as to reduce the iterable to a single @@ -483,8 +484,9 @@ def lru_cache(maxsize=128, typed=False): can grow without bound. If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. + For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as + distinct calls with distinct results. Some types such as str and int may + be cached separately even when typed is false. Arguments to the cached function must be hashable. @@ -933,6 +935,9 @@ def __init__(self, func): self.dispatcher = singledispatch(func) self.func = func + import weakref # see comment in singledispatch function + self._method_cache = weakref.WeakKeyDictionary() + def register(self, cls, method=None): """generic_method.register(cls, func) -> func @@ -941,13 +946,27 @@ def register(self, cls, method=None): return self.dispatcher.register(cls, func=method) def __get__(self, obj, cls=None): + if self._method_cache is not None: + try: + _method = self._method_cache[obj] + except TypeError: + self._method_cache = None + except KeyError: + pass + else: + return _method + + dispatch = self.dispatcher.dispatch def _method(*args, **kwargs): - method = self.dispatcher.dispatch(args[0].__class__) - return method.__get__(obj, cls)(*args, **kwargs) + return dispatch(args[0].__class__).__get__(obj, cls)(*args, **kwargs) _method.__isabstractmethod__ = self.__isabstractmethod__ _method.register = self.register update_wrapper(_method, self.func) + + if self._method_cache is not None: + self._method_cache[obj] = _method + return _method @property @@ -956,18 +975,17 @@ def __isabstractmethod__(self): ################################################################################ -### cached_property() - computed once per instance, cached as attribute +### cached_property() - property result cached as instance attribute ################################################################################ _NOT_FOUND = object() - class cached_property: def __init__(self, func): self.func = func self.attrname = None self.__doc__ = func.__doc__ - self.lock = RLock() + self.__module__ = func.__module__ def __set_name__(self, owner, name): if self.attrname is None: @@ -994,19 +1012,15 @@ def __get__(self, instance, owner=None): raise TypeError(msg) from None val = cache.get(self.attrname, _NOT_FOUND) if val is _NOT_FOUND: - with self.lock: - # check if another thread filled cache while we awaited lock - val = cache.get(self.attrname, _NOT_FOUND) - if val is _NOT_FOUND: - val = self.func(instance) - try: - cache[self.attrname] = val - except TypeError: - msg = ( - f"The '__dict__' attribute on {type(instance).__name__!r} instance " - f"does not support item assignment for caching {self.attrname!r} property." - ) - raise TypeError(msg) from None + val = self.func(instance) + try: + cache[self.attrname] = val + except TypeError: + msg = ( + f"The '__dict__' attribute on {type(instance).__name__!r} instance " + f"does not support item assignment for caching {self.attrname!r} property." + ) + raise TypeError(msg) from None return val __class_getitem__ = classmethod(GenericAlias) diff --git a/Lib/genericpath.py b/Lib/genericpath.py index ce36451a3af01cb..1bd5b3897c3af9c 100644 --- a/Lib/genericpath.py +++ b/Lib/genericpath.py @@ -7,7 +7,7 @@ import stat __all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', - 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', + 'getsize', 'isdir', 'isfile', 'islink', 'samefile', 'sameopenfile', 'samestat'] @@ -45,6 +45,18 @@ def isdir(s): return stat.S_ISDIR(st.st_mode) +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link""" + try: + st = os.lstat(path) + except (OSError, ValueError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + + def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size diff --git a/Lib/getpass.py b/Lib/getpass.py index 6970d8adfbab367..bd0097ced94c5e0 100644 --- a/Lib/getpass.py +++ b/Lib/getpass.py @@ -18,7 +18,6 @@ import io import os import sys -import warnings __all__ = ["getpass","getuser","GetPassWarning"] @@ -118,6 +117,7 @@ def win_getpass(prompt='Password: ', stream=None): def fallback_getpass(prompt='Password: ', stream=None): + import warnings warnings.warn("Can not control echo on the terminal.", GetPassWarning, stacklevel=2) if not stream: @@ -156,7 +156,11 @@ def getuser(): First try various environment variables, then the password database. This works on Windows as long as USERNAME is set. + Any failure to find a username raises OSError. + .. versionchanged:: 3.13 + Previously, various exceptions beyond just :exc:`OSError` + were raised. """ for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): @@ -164,9 +168,12 @@ def getuser(): if user: return user - # If this fails, the exception will "explain" why - import pwd - return pwd.getpwuid(os.getuid())[0] + try: + import pwd + return pwd.getpwuid(os.getuid())[0] + except (ImportError, KeyError) as e: + raise OSError('No username set in the environment') from e + # Bind the name getpass to the appropriate function try: diff --git a/Lib/gettext.py b/Lib/gettext.py index 6c5ec4e517f6372..62cff81b7b3d496 100644 --- a/Lib/gettext.py +++ b/Lib/gettext.py @@ -46,6 +46,7 @@ # find this format documented anywhere. +import operator import os import re import sys @@ -166,14 +167,28 @@ def _parse(tokens, priority=-1): def _as_int(n): try: - i = round(n) + round(n) except TypeError: raise TypeError('Plural value must be an integer, got %s' % (n.__class__.__name__,)) from None + return _as_int2(n) + +def _as_int2(n): + try: + return operator.index(n) + except TypeError: + pass + import warnings + frame = sys._getframe(1) + stacklevel = 2 + while frame.f_back is not None and frame.f_globals.get('__name__') == __name__: + stacklevel += 1 + frame = frame.f_back warnings.warn('Plural value must be an integer, got %s' % (n.__class__.__name__,), - DeprecationWarning, 4) + DeprecationWarning, + stacklevel) return n @@ -200,7 +215,7 @@ def c2py(plural): elif c == ')': depth -= 1 - ns = {'_as_int': _as_int} + ns = {'_as_int': _as_int, '__name__': __name__} exec('''if True: def func(n): if not isinstance(n, int): @@ -280,6 +295,7 @@ def gettext(self, message): def ngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) + n = _as_int2(n) if n == 1: return msgid1 else: @@ -293,6 +309,7 @@ def pgettext(self, context, message): def npgettext(self, context, msgid1, msgid2, n): if self._fallback: return self._fallback.npgettext(context, msgid1, msgid2, n) + n = _as_int2(n) if n == 1: return msgid1 else: @@ -422,10 +439,12 @@ def gettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: - if self._fallback: - return self._fallback.gettext(message) - return message - return tmsg + tmsg = self._catalog.get((message, self.plural(1)), missing) + if tmsg is not missing: + return tmsg + if self._fallback: + return self._fallback.gettext(message) + return message def ngettext(self, msgid1, msgid2, n): try: @@ -444,10 +463,12 @@ def pgettext(self, context, message): missing = object() tmsg = self._catalog.get(ctxt_msg_id, missing) if tmsg is missing: - if self._fallback: - return self._fallback.pgettext(context, message) - return message - return tmsg + tmsg = self._catalog.get((ctxt_msg_id, self.plural(1)), missing) + if tmsg is not missing: + return tmsg + if self._fallback: + return self._fallback.pgettext(context, message) + return message def npgettext(self, context, msgid1, msgid2, n): ctxt_msg_id = self.CONTEXT % (context, msgid1) @@ -575,6 +596,7 @@ def dngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None)) except OSError: + n = _as_int2(n) if n == 1: return msgid1 else: @@ -594,6 +616,7 @@ def dnpgettext(domain, context, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None)) except OSError: + n = _as_int2(n) if n == 1: return msgid1 else: diff --git a/Lib/glob.py b/Lib/glob.py index a7256422d520fb6..4a335a10766cf40 100644 --- a/Lib/glob.py +++ b/Lib/glob.py @@ -249,3 +249,63 @@ def escape(pathname): _dir_open_flags = os.O_RDONLY | getattr(os, 'O_DIRECTORY', 0) + + +def translate(pat, *, recursive=False, include_hidden=False, seps=None): + """Translate a pathname with shell wildcards to a regular expression. + + If `recursive` is true, the pattern segment '**' will match any number of + path segments; if '**' appears outside its own segment, ValueError will be + raised. + + If `include_hidden` is true, wildcards can match path segments beginning + with a dot ('.'). + + If a sequence of separator characters is given to `seps`, they will be + used to split the pattern into segments and match path separators. If not + given, os.path.sep and os.path.altsep (where available) are used. + """ + if not seps: + if os.path.altsep: + seps = (os.path.sep, os.path.altsep) + else: + seps = os.path.sep + escaped_seps = ''.join(map(re.escape, seps)) + any_sep = f'[{escaped_seps}]' if len(seps) > 1 else escaped_seps + not_sep = f'[^{escaped_seps}]' + if include_hidden: + one_last_segment = f'{not_sep}+' + one_segment = f'{one_last_segment}{any_sep}' + any_segments = f'(?:.+{any_sep})?' + any_last_segments = '.*' + else: + one_last_segment = f'[^{escaped_seps}.]{not_sep}*' + one_segment = f'{one_last_segment}{any_sep}' + any_segments = f'(?:{one_segment})*' + any_last_segments = f'{any_segments}(?:{one_last_segment})?' + + results = [] + parts = re.split(any_sep, pat) + last_part_idx = len(parts) - 1 + for idx, part in enumerate(parts): + if part == '*': + results.append(one_segment if idx < last_part_idx else one_last_segment) + continue + if recursive: + if part == '**': + if idx < last_part_idx: + if parts[idx + 1] != '**': + results.append(any_segments) + else: + results.append(any_last_segments) + continue + elif '**' in part: + raise ValueError("Invalid pattern: '**' can only be an entire path component") + if part: + if not include_hidden and part[0] in '*?': + results.append(r'(?!\.)') + results.extend(fnmatch._translate(part, f'{not_sep}*', not_sep)) + if idx < last_part_idx: + results.append(any_sep) + res = ''.join(results) + return fr'(?s:{res})\Z' diff --git a/Lib/gzip.py b/Lib/gzip.py index 75c6ddc3f2cffb9..177f9080dc5af8b 100644 --- a/Lib/gzip.py +++ b/Lib/gzip.py @@ -22,6 +22,7 @@ _COMPRESS_LEVEL_BEST = 9 READ_BUFFER_SIZE = 128 * 1024 +_WRITE_BUFFER_SIZE = 4 * io.DEFAULT_BUFFER_SIZE def open(filename, mode="rb", compresslevel=_COMPRESS_LEVEL_BEST, @@ -120,6 +121,21 @@ class BadGzipFile(OSError): """Exception raised in some cases for invalid gzip files.""" +class _WriteBufferStream(io.RawIOBase): + """Minimal object to pass WriteBuffer flushes into GzipFile""" + def __init__(self, gzip_file): + self.gzip_file = gzip_file + + def write(self, data): + return self.gzip_file._write_raw(data) + + def seekable(self): + return False + + def writable(self): + return True + + class GzipFile(_compression.BaseStream): """The GzipFile class simulates most of the methods of a file object with the exception of the truncate() method. @@ -184,6 +200,7 @@ def __init__(self, filename=None, mode=None, if mode is None: mode = getattr(fileobj, 'mode', 'rb') + if mode.startswith('r'): self.mode = READ raw = _GzipReader(fileobj) @@ -206,6 +223,9 @@ def __init__(self, filename=None, mode=None, zlib.DEF_MEM_LEVEL, 0) self._write_mtime = mtime + self._buffer_size = _WRITE_BUFFER_SIZE + self._buffer = io.BufferedWriter(_WriteBufferStream(self), + buffer_size=self._buffer_size) else: raise ValueError("Invalid mode: {!r}".format(mode)) @@ -231,6 +251,11 @@ def _init_write(self, filename): self.bufsize = 0 self.offset = 0 # Current file offset for seek(), tell(), etc + def tell(self): + self._check_not_closed() + self._buffer.flush() + return super().tell() + def _write_gzip_header(self, compresslevel): self.fileobj.write(b'\037\213') # magic header self.fileobj.write(b'\010') # compression method @@ -272,6 +297,10 @@ def write(self,data): if self.fileobj is None: raise ValueError("write() on closed GzipFile object") + return self._buffer.write(data) + + def _write_raw(self, data): + # Called by our self._buffer underlying WriteBufferStream. if isinstance(data, (bytes, bytearray)): length = len(data) else: @@ -322,9 +351,9 @@ def close(self): fileobj = self.fileobj if fileobj is None: return - self.fileobj = None try: if self.mode == WRITE: + self._buffer.flush() fileobj.write(self.compress.flush()) write32u(fileobj, self.crc) # self.size may exceed 2 GiB, or even 4 GiB @@ -332,6 +361,7 @@ def close(self): elif self.mode == READ: self._buffer.close() finally: + self.fileobj = None myfileobj = self.myfileobj if myfileobj: self.myfileobj = None @@ -340,6 +370,7 @@ def close(self): def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): self._check_not_closed() if self.mode == WRITE: + self._buffer.flush() # Ensure the compressor's buffer is flushed self.fileobj.write(self.compress.flush(zlib_mode)) self.fileobj.flush() @@ -370,6 +401,9 @@ def seekable(self): def seek(self, offset, whence=io.SEEK_SET): if self.mode == WRITE: + self._check_not_closed() + # Flush buffer to ensure validity of self.offset + self._buffer.flush() if whence != io.SEEK_SET: if whence == io.SEEK_CUR: offset = self.offset + offset @@ -378,10 +412,10 @@ def seek(self, offset, whence=io.SEEK_SET): if offset < self.offset: raise OSError('Negative seek in write mode') count = offset - self.offset - chunk = b'\0' * 1024 - for i in range(count // 1024): + chunk = b'\0' * self._buffer_size + for i in range(count // self._buffer_size): self.write(chunk) - self.write(b'\0' * (count % 1024)) + self.write(b'\0' * (count % self._buffer_size)) elif self.mode == READ: self._check_not_closed() return self._buffer.seek(offset, whence) diff --git a/Lib/hashlib.py b/Lib/hashlib.py index 21b5e912f3c771a..1b16441cb60ba77 100644 --- a/Lib/hashlib.py +++ b/Lib/hashlib.py @@ -92,13 +92,13 @@ def __get_builtin_constructor(name): import _md5 cache['MD5'] = cache['md5'] = _md5.md5 elif name in {'SHA256', 'sha256', 'SHA224', 'sha224'}: - import _sha256 - cache['SHA224'] = cache['sha224'] = _sha256.sha224 - cache['SHA256'] = cache['sha256'] = _sha256.sha256 + import _sha2 + cache['SHA224'] = cache['sha224'] = _sha2.sha224 + cache['SHA256'] = cache['sha256'] = _sha2.sha256 elif name in {'SHA512', 'sha512', 'SHA384', 'sha384'}: - import _sha512 - cache['SHA384'] = cache['sha384'] = _sha512.sha384 - cache['SHA512'] = cache['sha512'] = _sha512.sha512 + import _sha2 + cache['SHA384'] = cache['sha384'] = _sha2.sha384 + cache['SHA512'] = cache['sha512'] = _sha2.sha512 elif name in {'blake2b', 'blake2s'}: import _blake2 cache['blake2b'] = _blake2.blake2b diff --git a/Lib/html/__init__.py b/Lib/html/__init__.py index da0a0a3ce70eede..1543460ca33b0ae 100644 --- a/Lib/html/__init__.py +++ b/Lib/html/__init__.py @@ -25,7 +25,7 @@ def escape(s, quote=True): return s -# see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references +# see https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state _invalid_charrefs = { 0x00: '\ufffd', # REPLACEMENT CHARACTER diff --git a/Lib/http/client.py b/Lib/http/client.py index 0720990f84e7ed3..7bb5d824bb6da43 100644 --- a/Lib/http/client.py +++ b/Lib/http/client.py @@ -172,6 +172,13 @@ def _encode(data, name='data'): "if you want to send it encoded in UTF-8." % (name.title(), data[err.start:err.end], name)) from None +def _strip_ipv6_iface(enc_name: bytes) -> bytes: + """Remove interface scope from IPv6 address.""" + enc_name, percent, _ = enc_name.partition(b"%") + if percent: + assert enc_name.startswith(b'['), enc_name + enc_name += b']' + return enc_name class HTTPMessage(email.message.Message): # XXX The only usage of this method is in @@ -221,8 +228,9 @@ def _read_headers(fp): break return headers -def parse_headers(fp, _class=HTTPMessage): - """Parses only RFC2822 headers from a file pointer. +def _parse_header_lines(header_lines, _class=HTTPMessage): + """ + Parses only RFC2822 headers from header lines. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes @@ -231,10 +239,15 @@ def parse_headers(fp, _class=HTTPMessage): to parse. """ - headers = _read_headers(fp) - hstring = b''.join(headers).decode('iso-8859-1') + hstring = b''.join(header_lines).decode('iso-8859-1') return email.parser.Parser(_class=_class).parsestr(hstring) +def parse_headers(fp, _class=HTTPMessage): + """Parses only RFC2822 headers from a file pointer.""" + + headers = _read_headers(fp) + return _parse_header_lines(headers, _class) + class HTTPResponse(io.BufferedIOBase): @@ -448,6 +461,7 @@ def isclosed(self): return self.fp is None def read(self, amt=None): + """Read and return the response body, or up to the next amt bytes.""" if self.fp is None: return b"" @@ -578,11 +592,7 @@ def _read_chunked(self, amt=None): assert self.chunked != _UNKNOWN value = [] try: - while True: - chunk_left = self._get_chunk_left() - if chunk_left is None: - break - + while (chunk_left := self._get_chunk_left()) is not None: if amt is not None and amt <= chunk_left: value.append(self._safe_read(amt)) self.chunk_left = chunk_left - amt @@ -861,6 +871,7 @@ def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, self._tunnel_host = None self._tunnel_port = None self._tunnel_headers = {} + self._raw_proxy_headers = None (self.host, self.port) = self._get_hostport(host, port) @@ -873,9 +884,9 @@ def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, def set_tunnel(self, host, port=None, headers=None): """Set up host and port for HTTP CONNECT tunnelling. - In a connection that uses HTTP CONNECT tunneling, the host passed to the - constructor is used as a proxy server that relays all communication to - the endpoint passed to `set_tunnel`. This done by sending an HTTP + In a connection that uses HTTP CONNECT tunnelling, the host passed to + the constructor is used as a proxy server that relays all communication + to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been @@ -883,6 +894,13 @@ def set_tunnel(self, host, port=None, headers=None): The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. + + As HTTP/1.1 is used for HTTP CONNECT tunnelling request, as per the RFC + (https://tools.ietf.org/html/rfc7231#section-4.3.6), a HTTP Host: + header must be provided, matching the authority-form of the request + target provided as the destination for the CONNECT request. If a + HTTP Host: header is not provided via the headers argument, one + is generated and transmitted automatically. """ if self.sock: @@ -890,10 +908,15 @@ def set_tunnel(self, host, port=None, headers=None): self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: - self._tunnel_headers = headers + self._tunnel_headers = headers.copy() else: self._tunnel_headers.clear() + if not any(header.lower() == "host" for header in self._tunnel_headers): + encoded_host = self._tunnel_host.encode("idna").decode("ascii") + self._tunnel_headers["Host"] = "%s:%d" % ( + encoded_host, self._tunnel_port) + def _get_hostport(self, host, port): if port is None: i = host.rfind(':') @@ -918,8 +941,9 @@ def set_debuglevel(self, level): self.debuglevel = level def _tunnel(self): - connect = b"CONNECT %s:%d HTTP/1.0\r\n" % ( - self._tunnel_host.encode("ascii"), self._tunnel_port) + connect = b"CONNECT %s:%d %s\r\n" % ( + self._tunnel_host.encode("idna"), self._tunnel_port, + self._http_vsn_str.encode("ascii")) headers = [connect] for header, value in self._tunnel_headers.items(): headers.append(f"{header}: {value}\r\n".encode("latin-1")) @@ -931,23 +955,35 @@ def _tunnel(self): del headers response = self.response_class(self.sock, method=self._method) - (version, code, message) = response._read_status() + try: + (version, code, message) = response._read_status() - if code != http.HTTPStatus.OK: - self.close() - raise OSError(f"Tunnel connection failed: {code} {message.strip()}") - while True: - line = response.fp.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise LineTooLong("header line") - if not line: - # for sites which EOF without sending a trailer - break - if line in (b'\r\n', b'\n', b''): - break + self._raw_proxy_headers = _read_headers(response.fp) if self.debuglevel > 0: - print('header:', line.decode()) + for header in self._raw_proxy_headers: + print('header:', header.decode()) + + if code != http.HTTPStatus.OK: + self.close() + raise OSError(f"Tunnel connection failed: {code} {message.strip()}") + + finally: + response.close() + + def get_proxy_response_headers(self): + """ + Returns a dictionary with the headers of the response + received from the proxy server to the CONNECT request + sent to set the tunnel. + + If the CONNECT request was not sent, the method returns None. + """ + return ( + _parse_header_lines(self._raw_proxy_headers) + if self._raw_proxy_headers is not None + else None + ) def connect(self): """Connect to the host and port specified in __init__.""" @@ -994,14 +1030,11 @@ def send(self, data): print("send:", repr(data)) if hasattr(data, "read") : if self.debuglevel > 0: - print("sendIng a read()able") + print("sending a readable") encode = self._is_textIO(data) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") - while 1: - datablock = data.read(self.blocksize) - if not datablock: - break + while datablock := data.read(self.blocksize): if encode: datablock = datablock.encode("iso-8859-1") sys.audit("http.client.send", self, datablock) @@ -1027,14 +1060,11 @@ def _output(self, s): def _read_readable(self, readable): if self.debuglevel > 0: - print("sendIng a read()able") + print("reading a readable") encode = self._is_textIO(readable) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") - while True: - datablock = readable.read(self.blocksize) - if not datablock: - break + while datablock := readable.read(self.blocksize): if encode: datablock = datablock.encode("iso-8859-1") yield datablock @@ -1171,7 +1201,7 @@ def putrequest(self, method, url, skip_host=False, netloc_enc = netloc.encode("ascii") except UnicodeEncodeError: netloc_enc = netloc.encode("idna") - self.putheader('Host', netloc_enc) + self.putheader('Host', _strip_ipv6_iface(netloc_enc)) else: if self._tunnel_host: host = self._tunnel_host @@ -1188,8 +1218,9 @@ def putrequest(self, method, url, skip_host=False, # As per RFC 273, IPv6 address should be wrapped with [] # when used as Host header - if host.find(':') >= 0: + if ":" in host: host_enc = b'[' + host_enc + b']' + host_enc = _strip_ipv6_iface(host_enc) if port == self.default_port: self.putheader('Host', host_enc) @@ -1414,33 +1445,14 @@ class HTTPSConnection(HTTPConnection): default_port = HTTPS_PORT - # XXX Should key_file and cert_file be deprecated in favour of context? - - def __init__(self, host, port=None, key_file=None, cert_file=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, *, context=None, - check_hostname=None, blocksize=8192): + def __init__(self, host, port=None, + *, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, context=None, blocksize=8192): super(HTTPSConnection, self).__init__(host, port, timeout, source_address, blocksize=blocksize) - if (key_file is not None or cert_file is not None or - check_hostname is not None): - import warnings - warnings.warn("key_file, cert_file and check_hostname are " - "deprecated, use a custom context instead.", - DeprecationWarning, 2) - self.key_file = key_file - self.cert_file = cert_file if context is None: context = _create_https_context(self._http_vsn) - if check_hostname is not None: - context.check_hostname = check_hostname - if key_file or cert_file: - context.load_cert_chain(cert_file, key_file) - # cert and key file means the user wants to authenticate. - # enable TLS 1.3 PHA implicitly even for custom contexts. - if context.post_handshake_auth is not None: - context.post_handshake_auth = True self._context = context def connect(self): diff --git a/Lib/http/cookiejar.py b/Lib/http/cookiejar.py index 65c45e2b17dfc01..bd89370e16831e4 100644 --- a/Lib/http/cookiejar.py +++ b/Lib/http/cookiejar.py @@ -104,9 +104,9 @@ def time2isoz(t=None): """ if t is None: - dt = datetime.datetime.utcnow() + dt = datetime.datetime.now(tz=datetime.UTC) else: - dt = datetime.datetime.utcfromtimestamp(t) + dt = datetime.datetime.fromtimestamp(t, tz=datetime.UTC) return "%04d-%02d-%02d %02d:%02d:%02dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) @@ -122,9 +122,9 @@ def time2netscape(t=None): """ if t is None: - dt = datetime.datetime.utcnow() + dt = datetime.datetime.now(tz=datetime.UTC) else: - dt = datetime.datetime.utcfromtimestamp(t) + dt = datetime.datetime.fromtimestamp(t, tz=datetime.UTC) return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1], dt.year, dt.hour, dt.minute, dt.second) @@ -640,7 +640,7 @@ def eff_request_host(request): """ erhn = req_host = request_host(request) - if req_host.find(".") == -1 and not IPV4_RE.search(req_host): + if "." not in req_host: erhn = req_host + ".local" return req_host, erhn @@ -1890,7 +1890,10 @@ def save(self, filename=None, ignore_discard=False, ignore_expires=False): if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) - with os.fdopen(os.open(filename, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with os.fdopen( + os.open(filename, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600), + 'w', + ) as f: # There really isn't an LWP Cookies 2.0 format, but this indicates # that there is extra information in here (domain_dot and # port_spec) while still being compatible with libwww-perl, I hope. @@ -1915,9 +1918,7 @@ def _really_load(self, f, filename, ignore_discard, ignore_expires): "comment", "commenturl") try: - while 1: - line = f.readline() - if line == "": break + while (line := f.readline()) != "": if not line.startswith(header): continue line = line[len(header):].strip() @@ -2017,12 +2018,9 @@ def _really_load(self, f, filename, ignore_discard, ignore_expires): filename) try: - while 1: - line = f.readline() + while (line := f.readline()) != "": rest = {} - if line == "": break - # httponly is a cookie flag as defined in rfc6265 # when encoded in a netscape cookie file, # the line is prepended with "#HttpOnly_" @@ -2086,7 +2084,10 @@ def save(self, filename=None, ignore_discard=False, ignore_expires=False): if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) - with os.fdopen(os.open(filename, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with os.fdopen( + os.open(filename, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600), + 'w', + ) as f: f.write(NETSCAPE_HEADER_TEXT) now = time.time() for cookie in self: diff --git a/Lib/http/server.py b/Lib/http/server.py index 8aee31bac2752a7..ee7a9b6aa55b887 100644 --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -2,18 +2,18 @@ Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, -and CGIHTTPRequestHandler for CGI scripts. +and (deprecated) CGIHTTPRequestHandler for CGI scripts. -It does, however, optionally implement HTTP/1.1 persistent connections, -as of version 0.3. +It does, however, optionally implement HTTP/1.1 persistent connections. Notes on CGIHTTPRequestHandler ------------------------------ -This class implements GET and POST requests to cgi-bin scripts. +This class is deprecated. It implements GET and POST requests to cgi-bin scripts. -If the os.fork() function is not present (e.g. on Windows), -subprocess.Popen() is used as a fallback, with slightly altered semantics. +If the os.fork() function is not present (Windows), subprocess.Popen() is used, +with slightly altered but never documented semantics. Use from a threaded +process is likely to trigger a warning at os.fork() time. In all cases, the implementation is intentionally naive -- all requests are executed synchronously. @@ -93,6 +93,7 @@ import html import http.client import io +import itertools import mimetypes import os import posixpath @@ -299,6 +300,10 @@ def parse_request(self): # - Leading zeros MUST be ignored by recipients. if len(version_number) != 2: raise ValueError + if any(not component.isdigit() for component in version_number): + raise ValueError("non digit in http version") + if any(len(component) > 10 for component in version_number): + raise ValueError("unreasonable length http version") version_number = int(version_number[0]), int(version_number[1]) except (ValueError, IndexError): self.send_error( @@ -562,6 +567,11 @@ def log_error(self, format, *args): self.log_message(format, *args) + # https://en.wikipedia.org/wiki/List_of_Unicode_characters#Control_codes + _control_char_table = str.maketrans( + {c: fr'\x{c:02x}' for c in itertools.chain(range(0x20), range(0x7f,0xa0))}) + _control_char_table[ord('\\')] = r'\\' + def log_message(self, format, *args): """Log an arbitrary message. @@ -577,12 +587,16 @@ def log_message(self, format, *args): The client ip and current date/time are prefixed to every message. + Unicode control characters are replaced with escaped hex + before writing the output to stderr. + """ + message = format % args sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), - format%args)) + message.translate(self._control_char_table))) def version_string(self): """Return the server software version string.""" @@ -642,8 +656,8 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): """ - index_pages = ["index.html", "index.htm"] server_version = "SimpleHTTP/" + __version__ + index_pages = ("index.html", "index.htm") extensions_map = _encodings_map_default = { '.gz': 'application/gzip', '.Z': 'application/octet-stream', @@ -651,11 +665,9 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): '.xz': 'application/x-xz', } - def __init__(self, *args, directory=None, index_pages=None, **kwargs): + def __init__(self, *args, directory=None, **kwargs): if directory is None: directory = os.getcwd() - if index_pages is not None: - self.index_pages = index_pages self.directory = os.fspath(directory) super().__init__(*args, **kwargs) @@ -701,7 +713,7 @@ def send_head(self): return None for index in self.index_pages: index = os.path.join(path, index) - if os.path.exists(index): + if os.path.isfile(index): path = index break else: @@ -783,7 +795,7 @@ def list_directory(self, path): displaypath = urllib.parse.unquote(self.path, errors='surrogatepass') except UnicodeDecodeError: - displaypath = urllib.parse.unquote(path) + displaypath = urllib.parse.unquote(self.path) displaypath = html.escape(displaypath, quote=False) enc = sys.getfilesystemencoding() title = f'Directory listing for {displaypath}' @@ -974,6 +986,12 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): """ + def __init__(self, *args, **kwargs): + import warnings + warnings._deprecated("http.server.CGIHTTPRequestHandler", + remove=(3, 15)) + super().__init__(*args, **kwargs) + # Determine platform specifics have_fork = hasattr(os, 'fork') diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt index e64e96f75e6cfc2..f258797c6e0fb35 100644 --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -1,9 +1,28 @@ +What's New in IDLE 3.12.0 +(since 3.11.0) +Released on 2023-10-02 +========================= + + +gh-104719: Remove IDLE's modification of tokenize.tabsize and test +other uses of tokenize data and methods. + +gh-104499: Fix completions for Tk Aqua 8.7 (currently blank). + +gh-104486: Make About print both tcl and tk versions if different, +as is expected someday. + +gh-88496 Fix IDLE test hang on macOS. + +gh-103314 Support sys.last_exc after exceptions in Shell. +Patch by Irit Katriel. + + What's New in IDLE 3.11.0 (since 3.10.0) -Released on 2022-10-03 +Released on 2022-10-24 ========================= - gh-97527: Fix a bug in the previous bugfix that caused IDLE to not start when run with 3.10.8, 3.12.0a1, and at least Microsoft Python 3.10.2288.0 installed without the Lib/test package. 3.11.0 was never diff --git a/Lib/idlelib/autocomplete_w.py b/Lib/idlelib/autocomplete_w.py index 0f835a9cc1d010b..24320b5a3bffab3 100644 --- a/Lib/idlelib/autocomplete_w.py +++ b/Lib/idlelib/autocomplete_w.py @@ -182,16 +182,11 @@ def show_window(self, comp_lists, index, complete, mode, userWantsWin): self.userwantswindow = userWantsWin self.lasttypedstart = self.start - # Put widgets in place self.autocompletewindow = acw = Toplevel(self.widget) - # Put it in a position so that it is not seen. - acw.wm_geometry("+10000+10000") - # Make it float + acw.withdraw() acw.wm_overrideredirect(1) try: - # This command is only needed and available on Tk >= 8.4.0 for OSX - # Without it, call tips intrude on the typing process by grabbing - # the focus. + # Prevent grabbing focus on macOS. acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w, "help", "noActivates") except TclError: @@ -271,6 +266,7 @@ def winconfig_event(self, event): # place acw above current line new_y -= acw_height acw.wm_geometry("+%d+%d" % (new_x, new_y)) + acw.deiconify() acw.update_idletasks() except TclError: pass diff --git a/Lib/idlelib/calltip_w.py b/Lib/idlelib/calltip_w.py index 1e0404aa49f5628..278546064adde29 100644 --- a/Lib/idlelib/calltip_w.py +++ b/Lib/idlelib/calltip_w.py @@ -25,7 +25,7 @@ def __init__(self, text_widget): text_widget: a Text widget with code for which call-tips are desired """ # Note: The Text widget will be accessible as self.anchor_widget - super(CalltipWindow, self).__init__(text_widget) + super().__init__(text_widget) self.label = self.text = None self.parenline = self.parencol = self.lastline = None @@ -54,7 +54,7 @@ def position_window(self): return self.lastline = curline self.anchor_widget.see("insert") - super(CalltipWindow, self).position_window() + super().position_window() def showtip(self, text, parenleft, parenright): """Show the call-tip, bind events which will close it and reposition it. @@ -73,7 +73,7 @@ def showtip(self, text, parenleft, parenright): self.parenline, self.parencol = map( int, self.anchor_widget.index(parenleft).split(".")) - super(CalltipWindow, self).showtip() + super().showtip() self._bind_events() @@ -143,7 +143,7 @@ def hidetip(self): # ValueError may be raised by MultiCall pass - super(CalltipWindow, self).hidetip() + super().hidetip() def _bind_events(self): """Bind event handlers.""" diff --git a/Lib/idlelib/colorizer.py b/Lib/idlelib/colorizer.py index e9f19c145c86739..b4df353012b7880 100644 --- a/Lib/idlelib/colorizer.py +++ b/Lib/idlelib/colorizer.py @@ -310,7 +310,7 @@ def recolorize_main(self): # crumb telling the next invocation to resume here # in case update tells us to leave. self.tag_add("TODO", next) - self.update() + self.update_idletasks() if self.stop_colorizing: if DEBUG: print("colorizing stopped") return diff --git a/Lib/idlelib/config.py b/Lib/idlelib/config.py index 2b09d79470b47c4..898efeb4dd15508 100644 --- a/Lib/idlelib/config.py +++ b/Lib/idlelib/config.py @@ -597,7 +597,9 @@ def GetCoreKeys(self, keySetName=None): problem getting any core binding there will be an 'ultimate last resort fallback' to the CUA-ish bindings defined here. """ + # TODO: = dict(sorted([(v-event, keys), ...]))? keyBindings={ + # vitual-event: list of key events. '<>': ['', ''], '<>': ['', ''], '<>': ['', ''], @@ -880,7 +882,7 @@ def _dump(): # htest # (not really, but ignore in coverage) line, crc = 0, 0 def sprint(obj): - global line, crc + nonlocal line, crc txt = str(obj) line += 1 crc = crc32(txt.encode(encoding='utf-8'), crc) @@ -889,7 +891,7 @@ def sprint(obj): def dumpCfg(cfg): print('\n', cfg, '\n') # Cfg has variable '0xnnnnnnnn' address. - for key in sorted(cfg.keys()): + for key in sorted(cfg): sections = cfg[key].sections() sprint(key) sprint(sections) @@ -908,4 +910,6 @@ def dumpCfg(cfg): from unittest import main main('idlelib.idle_test.test_config', verbosity=2, exit=False) - # Run revised _dump() as htest? + _dump() + # Run revised _dump() (700+ lines) as htest? More sorting. + # Perhaps as window with tabs for textviews, making it config viewer. diff --git a/Lib/idlelib/configdialog.py b/Lib/idlelib/configdialog.py index cda7966d558a51c..eedf97bf74fe6af 100644 --- a/Lib/idlelib/configdialog.py +++ b/Lib/idlelib/configdialog.py @@ -211,14 +211,8 @@ def help(self): contents=help_common+help_pages.get(page, '')) def deactivate_current_config(self): - """Remove current key bindings. - Iterate over window instances defined in parent and remove - the keybindings. - """ - # Before a config is saved, some cleanup of current - # config must be done - remove the previous keybindings. - win_instances = self.parent.instance_dict.keys() - for instance in win_instances: + """Remove current key bindings in current windows.""" + for instance in self.parent.instance_dict: instance.RemoveKeybindings() def activate_config_changes(self): @@ -227,8 +221,7 @@ def activate_config_changes(self): Dynamically update the current parent window instances with some of the configuration changes. """ - win_instances = self.parent.instance_dict.keys() - for instance in win_instances: + for instance in self.parent.instance_dict: instance.ResetColorizer() instance.ResetFont() instance.set_notabs_indentwidth() @@ -583,22 +576,23 @@ def create_page_highlight(self): (*)theme_message: Label """ self.theme_elements = { - 'Normal Code or Text': ('normal', '00'), - 'Code Context': ('context', '01'), - 'Python Keywords': ('keyword', '02'), - 'Python Definitions': ('definition', '03'), - 'Python Builtins': ('builtin', '04'), - 'Python Comments': ('comment', '05'), - 'Python Strings': ('string', '06'), - 'Selected Text': ('hilite', '07'), - 'Found Text': ('hit', '08'), - 'Cursor': ('cursor', '09'), - 'Editor Breakpoint': ('break', '10'), - 'Shell Prompt': ('console', '11'), - 'Error Text': ('error', '12'), - 'Shell User Output': ('stdout', '13'), - 'Shell User Exception': ('stderr', '14'), - 'Line Number': ('linenumber', '16'), + # Display-name: internal-config-tag-name. + 'Normal Code or Text': 'normal', + 'Code Context': 'context', + 'Python Keywords': 'keyword', + 'Python Definitions': 'definition', + 'Python Builtins': 'builtin', + 'Python Comments': 'comment', + 'Python Strings': 'string', + 'Selected Text': 'hilite', + 'Found Text': 'hit', + 'Cursor': 'cursor', + 'Editor Breakpoint': 'break', + 'Shell Prompt': 'console', + 'Error Text': 'error', + 'Shell User Output': 'stdout', + 'Shell User Exception': 'stderr', + 'Line Number': 'linenumber', } self.builtin_name = tracers.add( StringVar(self), self.var_changed_builtin_name) @@ -658,7 +652,7 @@ def tem(event, elem=element): # event.widget.winfo_top_level().highlight_target.set(elem) self.highlight_target.set(elem) text.tag_bind( - self.theme_elements[element][0], '', tem) + self.theme_elements[element], '', tem) text['state'] = 'disabled' self.style.configure('frame_color_set.TFrame', borderwidth=1, relief='solid') @@ -765,8 +759,7 @@ def load_theme_cfg(self): self.builtinlist.SetMenu(item_list, item_list[0]) self.set_theme_type() # Load theme element option menu. - theme_names = list(self.theme_elements.keys()) - theme_names.sort(key=lambda x: self.theme_elements[x][1]) + theme_names = list(self.theme_elements) self.targetlist.SetMenu(theme_names, theme_names[0]) self.paint_theme_sample() self.set_highlight_target() @@ -893,7 +886,7 @@ def on_new_color_set(self): new_color = self.color.get() self.style.configure('frame_color_set.TFrame', background=new_color) plane = 'foreground' if self.fg_bg_toggle.get() else 'background' - sample_element = self.theme_elements[self.highlight_target.get()][0] + sample_element = self.theme_elements[self.highlight_target.get()] self.highlight_sample.tag_config(sample_element, **{plane: new_color}) theme = self.custom_name.get() theme_element = sample_element + '-' + plane @@ -1007,7 +1000,7 @@ def set_color_sample(self): frame_color_set """ # Set the color sample area. - tag = self.theme_elements[self.highlight_target.get()][0] + tag = self.theme_elements[self.highlight_target.get()] plane = 'foreground' if self.fg_bg_toggle.get() else 'background' color = self.highlight_sample.tag_cget(tag, plane) self.style.configure('frame_color_set.TFrame', background=color) @@ -1037,7 +1030,7 @@ def paint_theme_sample(self): else: # User theme theme = self.custom_name.get() for element_title in self.theme_elements: - element = self.theme_elements[element_title][0] + element = self.theme_elements[element_title] colors = idleConf.GetHighlight(theme, element) if element == 'cursor': # Cursor sample needs special painting. colors['background'] = idleConf.GetHighlight( @@ -1477,12 +1470,13 @@ def load_keys_list(self, keyset_name): reselect = True list_index = self.bindingslist.index(ANCHOR) keyset = idleConf.GetKeySet(keyset_name) - bind_names = list(keyset.keys()) + # 'set' is dict mapping virtual event to list of key events. + bind_names = list(keyset) bind_names.sort() self.bindingslist.delete(0, END) for bind_name in bind_names: key = ' '.join(keyset[bind_name]) - bind_name = bind_name[2:-2] # Trim off the angle brackets. + bind_name = bind_name[2:-2] # Trim double angle brackets. if keyset_name in changes['keys']: # Handle any unsaved changes to this key set. if bind_name in changes['keys'][keyset_name]: diff --git a/Lib/idlelib/debugger.py b/Lib/idlelib/debugger.py index ccd03e46e161479..f487b4c4b16a601 100644 --- a/Lib/idlelib/debugger.py +++ b/Lib/idlelib/debugger.py @@ -1,3 +1,20 @@ +"""Debug user code with a GUI interface to a subclass of bdb.Bdb. + +The Idb idb and Debugger gui instances each need a reference to each +other or to an rpc proxy for each other. + +If IDLE is started with '-n', so that user code and idb both run in the +IDLE process, Debugger is called without an idb. Debugger.__init__ +calls Idb with its incomplete self. Idb.__init__ stores gui and gui +then stores idb. + +If IDLE is started normally, so that user code executes in a separate +process, debugger_r.start_remote_debugger is called, executing in the +IDLE process. It calls 'start the debugger' in the remote process, +which calls Idb with a gui proxy. Then Debugger is called in the IDLE +for more. +""" + import bdb import os @@ -10,66 +27,95 @@ class Idb(bdb.Bdb): + "Supply user_line and user_exception functions for Bdb." def __init__(self, gui): - self.gui = gui # An instance of Debugger or proxy of remote. - bdb.Bdb.__init__(self) + self.gui = gui # An instance of Debugger or proxy thereof. + super().__init__() def user_line(self, frame): - if self.in_rpc_code(frame): + """Handle a user stopping or breaking at a line. + + Convert frame to a string and send it to gui. + """ + if _in_rpc_code(frame): self.set_step() return - message = self.__frame2message(frame) + message = _frame2message(frame) try: self.gui.interaction(message, frame) except TclError: # When closing debugger window with [x] in 3.x pass - def user_exception(self, frame, info): - if self.in_rpc_code(frame): + def user_exception(self, frame, exc_info): + """Handle an the occurrence of an exception.""" + if _in_rpc_code(frame): self.set_step() return - message = self.__frame2message(frame) - self.gui.interaction(message, frame, info) - - def in_rpc_code(self, frame): - if frame.f_code.co_filename.count('rpc.py'): - return True - else: - prev_frame = frame.f_back - prev_name = prev_frame.f_code.co_filename - if 'idlelib' in prev_name and 'debugger' in prev_name: - # catch both idlelib/debugger.py and idlelib/debugger_r.py - # on both Posix and Windows - return False - return self.in_rpc_code(prev_frame) - - def __frame2message(self, frame): - code = frame.f_code - filename = code.co_filename - lineno = frame.f_lineno - basename = os.path.basename(filename) - message = "%s:%s" % (basename, lineno) - if code.co_name != "?": - message = "%s: %s()" % (message, code.co_name) - return message + message = _frame2message(frame) + self.gui.interaction(message, frame, exc_info) + +def _in_rpc_code(frame): + "Determine if debugger is within RPC code." + if frame.f_code.co_filename.count('rpc.py'): + return True # Skip this frame. + else: + prev_frame = frame.f_back + if prev_frame is None: + return False + prev_name = prev_frame.f_code.co_filename + if 'idlelib' in prev_name and 'debugger' in prev_name: + # catch both idlelib/debugger.py and idlelib/debugger_r.py + # on both Posix and Windows + return False + return _in_rpc_code(prev_frame) + +def _frame2message(frame): + """Return a message string for frame.""" + code = frame.f_code + filename = code.co_filename + lineno = frame.f_lineno + basename = os.path.basename(filename) + message = f"{basename}:{lineno}" + if code.co_name != "?": + message = f"{message}: {code.co_name}()" + return message class Debugger: - - vstack = vsource = vlocals = vglobals = None + """The debugger interface. + + This class handles the drawing of the debugger window and + the interactions with the underlying debugger session. + """ + vstack = None + vsource = None + vlocals = None + vglobals = None + stackviewer = None + localsviewer = None + globalsviewer = None def __init__(self, pyshell, idb=None): + """Instantiate and draw a debugger window. + + :param pyshell: An instance of the PyShell Window + :type pyshell: :class:`idlelib.pyshell.PyShell` + + :param idb: An instance of the IDLE debugger (optional) + :type idb: :class:`idlelib.debugger.Idb` + """ if idb is None: idb = Idb(self) self.pyshell = pyshell self.idb = idb # If passed, a proxy of remote instance. self.frame = None self.make_gui() - self.interacting = 0 + self.interacting = False self.nesting_level = 0 def run(self, *args): + """Run the debugger.""" # Deal with the scenario where we've already got a program running # in the debugger and we want to start another. If that is the case, # our second 'run' was invoked from an event dispatched not from @@ -104,12 +150,13 @@ def run(self, *args): self.root.after(100, lambda: self.run(*args)) return try: - self.interacting = 1 + self.interacting = True return self.idb.run(*args) finally: - self.interacting = 0 + self.interacting = False def close(self, event=None): + """Close the debugger and window.""" try: self.quit() except Exception: @@ -127,6 +174,7 @@ def close(self, event=None): self.top.destroy() def make_gui(self): + """Draw the debugger gui on the screen.""" pyshell = self.pyshell self.flist = pyshell.flist self.root = root = pyshell.root @@ -135,11 +183,11 @@ def make_gui(self): self.top.wm_iconname("Debug") top.wm_protocol("WM_DELETE_WINDOW", self.close) self.top.bind("", self.close) - # + self.bframe = bframe = Frame(top) self.bframe.pack(anchor="w") self.buttons = bl = [] - # + self.bcont = b = Button(bframe, text="Go", command=self.cont) bl.append(b) self.bstep = b = Button(bframe, text="Step", command=self.step) @@ -150,14 +198,14 @@ def make_gui(self): bl.append(b) self.bret = b = Button(bframe, text="Quit", command=self.quit) bl.append(b) - # + for b in bl: b.configure(state="disabled") b.pack(side="left") - # + self.cframe = cframe = Frame(bframe) self.cframe.pack(side="left") - # + if not self.vstack: self.__class__.vstack = BooleanVar(top) self.vstack.set(1) @@ -180,20 +228,20 @@ def make_gui(self): self.bglobals = Checkbutton(cframe, text="Globals", command=self.show_globals, variable=self.vglobals) self.bglobals.grid(row=1, column=1) - # + self.status = Label(top, anchor="w") self.status.pack(anchor="w") self.error = Label(top, anchor="w") self.error.pack(anchor="w", fill="x") self.errorbg = self.error.cget("background") - # + self.fstack = Frame(top, height=1) self.fstack.pack(expand=1, fill="both") self.flocals = Frame(top) self.flocals.pack(expand=1, fill="both") self.fglobals = Frame(top, height=1) self.fglobals.pack(expand=1, fill="both") - # + if self.vstack.get(): self.show_stack() if self.vlocals.get(): @@ -204,7 +252,7 @@ def make_gui(self): def interaction(self, message, frame, info=None): self.frame = frame self.status.configure(text=message) - # + if info: type, value, tb = info try: @@ -213,7 +261,8 @@ def interaction(self, message, frame, info=None): m1 = "%s" % str(type) if value is not None: try: - m1 = "%s: %s" % (m1, str(value)) + # TODO redo entire section, tries not needed. + m1 = f"{m1}: {value}" except: pass bg = "yellow" @@ -222,28 +271,28 @@ def interaction(self, message, frame, info=None): tb = None bg = self.errorbg self.error.configure(text=m1, background=bg) - # + sv = self.stackviewer if sv: stack, i = self.idb.get_stack(self.frame, tb) sv.load_stack(stack, i) - # + self.show_variables(1) - # + if self.vsource.get(): self.sync_source_line() - # + for b in self.buttons: b.configure(state="normal") - # + self.top.wakeup() # Nested main loop: Tkinter's main loop is not reentrant, so use # Tcl's vwait facility, which reenters the event loop until an - # event handler sets the variable we're waiting on + # event handler sets the variable we're waiting on. self.nesting_level += 1 self.root.tk.call('vwait', '::idledebugwait') self.nesting_level -= 1 - # + for b in self.buttons: b.configure(state="disabled") self.status.configure(text="") @@ -287,8 +336,6 @@ def quit(self): def abort_loop(self): self.root.tk.call('set', '::idledebugwait', '1') - stackviewer = None - def show_stack(self): if not self.stackviewer and self.vstack.get(): self.stackviewer = sv = StackViewer(self.fstack, self.flist, self) @@ -310,9 +357,6 @@ def show_frame(self, stackitem): self.frame = stackitem[0] # lineno is stackitem[1] self.show_variables() - localsviewer = None - globalsviewer = None - def show_locals(self): lv = self.localsviewer if self.vlocals.get(): @@ -353,26 +397,32 @@ def show_variables(self, force=0): if gv: gv.load_dict(gdict, force, self.pyshell.interp.rpcclt) - def set_breakpoint_here(self, filename, lineno): + def set_breakpoint(self, filename, lineno): + """Set a filename-lineno breakpoint in the debugger. + + Called from self.load_breakpoints and EW.setbreakpoint + """ self.idb.set_break(filename, lineno) - def clear_breakpoint_here(self, filename, lineno): + def clear_breakpoint(self, filename, lineno): self.idb.clear_break(filename, lineno) def clear_file_breaks(self, filename): self.idb.clear_all_file_breaks(filename) def load_breakpoints(self): - "Load PyShellEditorWindow breakpoints into subprocess debugger" + """Load PyShellEditorWindow breakpoints into subprocess debugger.""" for editwin in self.pyshell.flist.inversedict: filename = editwin.io.filename try: for lineno in editwin.breakpoints: - self.set_breakpoint_here(filename, lineno) + self.set_breakpoint(filename, lineno) except AttributeError: continue + class StackViewer(ScrolledList): + "Code stack viewer for debugger GUI." def __init__(self, master, flist, gui): if macosx.isAquaTk(): @@ -413,12 +463,12 @@ def load_stack(self, stack, index=None): self.select(index) def popup_event(self, event): - "override base method" + "Override base method." if self.stack: return ScrolledList.popup_event(self, event) def fill_menu(self): - "override base method" + "Override base method." menu = self.menu menu.add_command(label="Go to source line", command=self.goto_source_line) @@ -426,12 +476,12 @@ def fill_menu(self): command=self.show_stack_frame) def on_select(self, index): - "override base method" + "Override base method." if 0 <= index < len(self.stack): self.gui.show_frame(self.stack[index]) def on_double(self, index): - "override base method" + "Override base method." self.show_source(index) def goto_source_line(self): @@ -456,6 +506,7 @@ def show_source(self, index): class NamespaceViewer: + "Global/local namespace viewer for debugger GUI." def __init__(self, master, title, dict=None): width = 0 @@ -508,7 +559,7 @@ def load_dict(self, dict, force=0, rpc_client=None): # There is also an obscure bug in sorted(dict) where the # interpreter gets into a loop requesting non-existing dict[0], # dict[1], dict[2], etc from the debugger_r.DictProxy. - ### + # TODO recheck above; see debugger_r 159ff, debugobj 60. keys_list = dict.keys() names = sorted(keys_list) ### @@ -543,6 +594,7 @@ def load_dict(self, dict, force=0, rpc_client=None): def close(self): self.frame.destroy() + if __name__ == "__main__": from unittest import main main('idlelib.idle_test.test_debugger', verbosity=2, exit=False) diff --git a/Lib/idlelib/debugobj.py b/Lib/idlelib/debugobj.py index 5a4c99788420357..032b686f379378b 100644 --- a/Lib/idlelib/debugobj.py +++ b/Lib/idlelib/debugobj.py @@ -87,13 +87,14 @@ def GetSubList(self): continue def setfunction(value, key=key, object=self.object): object[key] = value - item = make_objecttreeitem("%r:" % (key,), value, setfunction) + item = make_objecttreeitem(f"{key!r}:", value, setfunction) sublist.append(item) return sublist class DictTreeItem(SequenceTreeItem): def keys(self): - keys = list(self.object.keys()) + # TODO return sorted(self.object) + keys = list(self.object) try: keys.sort() except: diff --git a/Lib/idlelib/dynoption.py b/Lib/idlelib/dynoption.py index 9c6ffa435a1089e..d5dfc3eda13f60a 100644 --- a/Lib/idlelib/dynoption.py +++ b/Lib/idlelib/dynoption.py @@ -2,24 +2,19 @@ OptionMenu widget modified to allow dynamic menu reconfiguration and setting of highlightthickness """ -import copy - from tkinter import OptionMenu, _setit, StringVar, Button class DynOptionMenu(OptionMenu): - """ - unlike OptionMenu, our kwargs can include highlightthickness + """Add SetMenu and highlightthickness to OptionMenu. + + Highlightthickness adds space around menu button. """ def __init__(self, master, variable, value, *values, **kwargs): - # TODO copy value instead of whole dict - kwargsCopy=copy.copy(kwargs) - if 'highlightthickness' in list(kwargs.keys()): - del(kwargs['highlightthickness']) + highlightthickness = kwargs.pop('highlightthickness', None) OptionMenu.__init__(self, master, variable, value, *values, **kwargs) - self.config(highlightthickness=kwargsCopy.get('highlightthickness')) - #self.menu=self['menu'] - self.variable=variable - self.command=kwargs.get('command') + self['highlightthickness'] = highlightthickness + self.variable = variable + self.command = kwargs.get('command') def SetMenu(self,valueList,value=None): """ @@ -38,14 +33,15 @@ def _dyn_option_menu(parent): # htest # from tkinter import Toplevel # + StringVar, Button top = Toplevel(parent) - top.title("Tets dynamic option menu") + top.title("Test dynamic option menu") x, y = map(int, parent.geometry().split('+')[1:]) top.geometry("200x100+%d+%d" % (x + 250, y + 175)) top.focus_set() var = StringVar(top) var.set("Old option set") #Set the default value - dyn = DynOptionMenu(top,var, "old1","old2","old3","old4") + dyn = DynOptionMenu(top, var, "old1","old2","old3","old4", + highlightthickness=5) dyn.pack() def update(): @@ -54,5 +50,6 @@ def update(): button.pack() if __name__ == '__main__': + # Only module without unittests because of intention to replace. from idlelib.idle_test.htest import run run(_dyn_option_menu) diff --git a/Lib/idlelib/editor.py b/Lib/idlelib/editor.py index 08d6aa2efde22ad..69b27d0683a1045 100644 --- a/Lib/idlelib/editor.py +++ b/Lib/idlelib/editor.py @@ -38,12 +38,13 @@ def _sphinx_version(): "Format sys.version_info to produce the Sphinx version string used to install the chm docs" major, minor, micro, level, serial = sys.version_info - release = '%s%s' % (major, minor) - release += '%s' % (micro,) + # TODO remove unneeded function since .chm no longer installed + release = f'{major}{minor}' + release += f'{micro}' if level == 'candidate': - release += 'rc%s' % (serial,) + release += f'rc{serial}' elif level != 'final': - release += '%s%s' % (level[0], serial) + release += f'{level[0]}{serial}' return release @@ -445,6 +446,26 @@ def set_line_and_column(self, event=None): self.status_bar.set_label('column', 'Col: %s' % column) self.status_bar.set_label('line', 'Ln: %s' % line) + + """ Menu definitions and functions. + * self.menubar - the always visible horizontal menu bar. + * mainmenu.menudefs - a list of tuples, one for each menubar item. + Each tuple pairs a lower-case name and list of dropdown items. + Each item is a name, virtual event pair or None for separator. + * mainmenu.default_keydefs - maps events to keys. + * text.keydefs - same. + * cls.menu_specs - menubar name, titlecase display form pairs + with Alt-hotkey indicator. A subset of menudefs items. + * self.menudict - map menu name to dropdown menu. + * self.recent_files_menu - 2nd level cascade in the file cascade. + * self.wmenu_end - set in __init__ (purpose unclear). + + createmenubar, postwindowsmenu, update_menu_label, update_menu_state, + ApplyKeybings (2nd part), reset_help_menu_entries, + _extra_help_callback, update_recent_files_list, + apply_bindings, fill_menus, (other functions?) + """ + menu_specs = [ ("file", "_File"), ("edit", "_Edit"), @@ -455,8 +476,22 @@ def set_line_and_column(self, event=None): ("help", "_Help"), ] - def createmenubar(self): + """Populate the menu bar widget for the editor window. + + Each option on the menubar is itself a cascade-type Menu widget + with the menubar as the parent. The names, labels, and menu + shortcuts for the menubar items are stored in menu_specs. Each + submenu is subsequently populated in fill_menus(), except for + 'Recent Files' which is added to the File menu here. + + Instance variables: + menubar: Menu widget containing first level menu items. + menudict: Dictionary of {menuname: Menu instance} items. The keys + represent the valid menu items for this window and may be a + subset of all the menudefs available. + recent_files_menu: Menu widget contained within the 'file' menudict. + """ mbar = self.menubar self.menudict = menudict = {} for name, label in self.menu_specs: @@ -479,7 +514,10 @@ def createmenubar(self): self.reset_help_menu_entries() def postwindowsmenu(self): - # Only called when Window menu exists + """Callback to register window. + + Only called when Window menu exists. + """ menu = self.menudict['window'] end = menu.index("end") if end is None: @@ -858,8 +896,11 @@ def ResetFont(self): self.set_width() def RemoveKeybindings(self): - "Remove the keybindings before they are changed." - # Called from configdialog.py + """Remove the virtual, configurable keybindings. + + Leaves the default Tk Text keybindings. + """ + # Called from configdialog.deactivate_current_config. self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet() for event, keylist in keydefs.items(): self.text.event_delete(event, *keylist) @@ -870,15 +911,19 @@ def RemoveKeybindings(self): self.text.event_delete(event, *keylist) def ApplyKeybindings(self): - "Update the keybindings after they are changed" - # Called from configdialog.py + """Apply the virtual, configurable keybindings. + + Alse update hotkeys to current keyset. + """ + # Called from configdialog.activate_config_changes. self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet() self.apply_bindings() for extensionName in self.get_standard_extension_names(): xkeydefs = idleConf.GetExtensionBindings(extensionName) if xkeydefs: self.apply_bindings(xkeydefs) - #update menu accelerators + + # Update menu accelerators. menuEventDict = {} for menu in self.mainmenu.menudefs: menuEventDict[menu[0]] = {} @@ -913,25 +958,25 @@ def set_notabs_indentwidth(self): type='int') def reset_help_menu_entries(self): - "Update the additional help entries on the Help menu" + """Update the additional help entries on the Help menu.""" help_list = idleConf.GetAllExtraHelpSourcesList() helpmenu = self.menudict['help'] - # first delete the extra help entries, if any + # First delete the extra help entries, if any. helpmenu_length = helpmenu.index(END) if helpmenu_length > self.base_helpmenu_length: helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length) - # then rebuild them + # Then rebuild them. if help_list: helpmenu.add_separator() for entry in help_list: - cmd = self.__extra_help_callback(entry[1]) + cmd = self._extra_help_callback(entry[1]) helpmenu.add_command(label=entry[0], command=cmd) - # and update the menu dictionary + # And update the menu dictionary. self.menudict['help'] = helpmenu - def __extra_help_callback(self, helpfile): - "Create a callback with the helpfile value frozen at definition time" - def display_extra_help(helpfile=helpfile): + def _extra_help_callback(self, resource): + """Return a callback that loads resource (file or web page).""" + def display_extra_help(helpfile=resource): if not helpfile.startswith(('www', 'http')): helpfile = os.path.normpath(helpfile) if sys.platform[:3] == 'win': @@ -950,7 +995,7 @@ def update_recent_files_list(self, new_file=None): rf_list = [] file_path = self.recent_files_path if file_path and os.path.exists(file_path): - with open(file_path, 'r', + with open(file_path, encoding='utf_8', errors='replace') as rf_list_file: rf_list = rf_list_file.readlines() if new_file: @@ -1157,6 +1202,7 @@ def load_extension(self, name): self.text.bind(vevent, getattr(ins, methodname)) def apply_bindings(self, keydefs=None): + """Add events with keys to self.text.""" if keydefs is None: keydefs = self.mainmenu.default_keydefs text = self.text @@ -1166,9 +1212,10 @@ def apply_bindings(self, keydefs=None): text.event_add(event, *keylist) def fill_menus(self, menudefs=None, keydefs=None): - """Add appropriate entries to the menus and submenus + """Fill in dropdown menus used by this window. - Menus that are absent or None in self.menudict are ignored. + Items whose name begins with '!' become checkbuttons. + Other names indicate commands. None becomes a separator. """ if menudefs is None: menudefs = self.mainmenu.menudefs @@ -1181,7 +1228,7 @@ def fill_menus(self, menudefs=None, keydefs=None): if not menu: continue for entry in entrylist: - if not entry: + if entry is None: menu.add_separator() else: label, eventname = entry @@ -1217,11 +1264,13 @@ def setvar(self, name, value, vartype=None): else: raise NameError(name) - def get_var_obj(self, name, vartype=None): - var = self.tkinter_vars.get(name) + def get_var_obj(self, eventname, vartype=None): + """Return a tkinter variable instance for the event. + """ + var = self.tkinter_vars.get(eventname) if not var and vartype: - # create a Tkinter variable object with self.text as master: - self.tkinter_vars[name] = var = vartype(self.text) + # Create a Tkinter variable object. + self.tkinter_vars[eventname] = var = vartype(self.text) return var # Tk implementations of "virtual text methods" -- each platform @@ -1458,7 +1507,7 @@ def newline_and_indent_event(self, event): else: self.reindent_to(y.compute_backslash_indent()) else: - assert 0, "bogus continuation type %r" % (c,) + assert 0, f"bogus continuation type {c!r}" return "break" # This line starts a brand new statement; indent relative to @@ -1522,7 +1571,7 @@ def reindent_to(self, column): # blocks are found). def guess_indent(self): - opener, indented = IndentSearcher(self.text, self.tabwidth).run() + opener, indented = IndentSearcher(self.text).run() if opener and indented: raw, indentsmall = get_line_indent(opener, self.tabwidth) raw, indentlarge = get_line_indent(indented, self.tabwidth) @@ -1560,15 +1609,10 @@ def get_line_indent(line, tabwidth): class IndentSearcher: + "Manage initial indent guess, returned by run method." - # .run() chews over the Text widget, looking for a block opener - # and the stmt following it. Returns a pair, - # (line containing block opener, line containing stmt) - # Either or both may be None. - - def __init__(self, text, tabwidth): + def __init__(self, text): self.text = text - self.tabwidth = tabwidth self.i = self.finished = 0 self.blkopenline = self.indentedline = None @@ -1584,7 +1628,8 @@ def readline(self): def tokeneater(self, type, token, start, end, line, INDENT=tokenize.INDENT, NAME=tokenize.NAME, - OPENERS=('class', 'def', 'for', 'if', 'try', 'while')): + OPENERS=('class', 'def', 'for', 'if', 'match', 'try', + 'while', 'with')): if self.finished: pass elif type == NAME and token in OPENERS: @@ -1594,26 +1639,33 @@ def tokeneater(self, type, token, start, end, line, self.finished = 1 def run(self): - save_tabsize = tokenize.tabsize - tokenize.tabsize = self.tabwidth + """Return 2 lines containing block opener and and indent. + + Either the indent line or both may be None. + """ try: - try: - tokens = tokenize.generate_tokens(self.readline) - for token in tokens: - self.tokeneater(*token) - except (tokenize.TokenError, SyntaxError): - # since we cut off the tokenizer early, we can trigger - # spurious errors - pass - finally: - tokenize.tabsize = save_tabsize + tokens = tokenize.generate_tokens(self.readline) + for token in tokens: + self.tokeneater(*token) + except (tokenize.TokenError, SyntaxError): + # Stopping the tokenizer early can trigger spurious errors. + pass return self.blkopenline, self.indentedline ### end autoindent code ### + def prepstr(s): - # Helper to extract the underscore from a string, e.g. - # prepstr("Co_py") returns (2, "Copy"). + """Extract the underscore from a string. + + For example, prepstr("Co_py") returns (2, "Copy"). + + Args: + s: String with underscore. + + Returns: + Tuple of (position of underscore, string without underscore). + """ i = s.find('_') if i >= 0: s = s[:i] + s[i+1:] @@ -1627,6 +1679,18 @@ def prepstr(s): } def get_accelerator(keydefs, eventname): + """Return a formatted string for the keybinding of an event. + + Convert the first keybinding for a given event to a form that + can be displayed as an accelerator on the menu. + + Args: + keydefs: Dictionary of valid events to keybindings. + eventname: Event to retrieve keybinding for. + + Returns: + Formatted string of the keybinding. + """ keylist = keydefs.get(eventname) # issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5 # if not keylist: @@ -1636,14 +1700,23 @@ def get_accelerator(keydefs, eventname): "<>"}): return "" s = keylist[0] + # Convert strings of the form -singlelowercase to -singleuppercase. s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s) + # Convert certain keynames to their symbol. s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s) + # Remove Key- from string. s = re.sub("Key-", "", s) - s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu + # Convert Cancel to Ctrl-Break. + s = re.sub("Cancel", "Ctrl-Break", s) # dscherer@cmu.edu + # Convert Control to Ctrl-. s = re.sub("Control-", "Ctrl-", s) + # Change - to +. s = re.sub("-", "+", s) + # Change >< to space. s = re.sub("><", " ", s) + # Remove <. s = re.sub("<", "", s) + # Remove >. s = re.sub(">", "", s) return s diff --git a/Lib/idlelib/filelist.py b/Lib/idlelib/filelist.py index 254f5caf6b81b05..f87781d2570fe06 100644 --- a/Lib/idlelib/filelist.py +++ b/Lib/idlelib/filelist.py @@ -22,7 +22,7 @@ def open(self, filename, action=None): # This can happen when bad filename is passed on command line: messagebox.showerror( "File Error", - "%r is a directory." % (filename,), + f"{filename!r} is a directory.", master=self.root) return None key = os.path.normcase(filename) @@ -90,7 +90,7 @@ def filename_changed_edit(self, edit): self.inversedict[conflict] = None messagebox.showerror( "Name Conflict", - "You now have multiple edit windows open for %r" % (filename,), + f"You now have multiple edit windows open for {filename!r}", master=self.root) self.dict[newkey] = edit self.inversedict[edit] = newkey diff --git a/Lib/idlelib/help_about.py b/Lib/idlelib/help_about.py index a0085a40b980ef5..cfa4ca781f087d0 100644 --- a/Lib/idlelib/help_about.py +++ b/Lib/idlelib/help_about.py @@ -11,15 +11,12 @@ from idlelib import textview -version = python_version() +pyver = python_version() - -def build_bits(): - "Return bits for platform." - if sys.platform == 'darwin': - return '64' if sys.maxsize > 2**32 else '32' - else: - return architecture()[0][:2] +if sys.platform == 'darwin': + bits = '64' if sys.maxsize > 2**32 else '32' +else: + bits = architecture()[0][:2] class AboutDialog(Toplevel): @@ -45,7 +42,7 @@ def __init__(self, parent, title=None, *, _htest=False, _utest=False): self.create_widgets() self.resizable(height=False, width=False) self.title(title or - f'About IDLE {version} ({build_bits()} bit)') + f'About IDLE {pyver} ({bits} bit)') self.transient(parent) self.grab_set() self.protocol("WM_DELETE_WINDOW", self.ok) @@ -76,8 +73,8 @@ def create_widgets(self): bg=self.bg, font=('courier', 24, 'bold')) header.grid(row=0, column=0, sticky=E, padx=10, pady=10) - tk_patchlevel = self.info_patchlevel() - ext = '.png' if tk_patchlevel >= (8, 6) else '.gif' + tkpatch = self._root().getvar('tk_patchLevel') + ext = '.png' if tkpatch >= '8.6' else '.gif' icon = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Icons', f'idle_48{ext}') self.icon_image = PhotoImage(master=self._root(), file=icon) @@ -102,13 +99,11 @@ def create_widgets(self): height=2, bg=self.bg).grid(row=8, column=0, sticky=EW, columnspan=3, padx=5, pady=5) - pyver = Label(frame_background, - text='Python version: ' + version, - fg=self.fg, bg=self.bg) - pyver.grid(row=9, column=0, sticky=W, padx=10, pady=0) - tkver = Label(frame_background, text=f'Tk version: {tk_patchlevel}', - fg=self.fg, bg=self.bg) - tkver.grid(row=9, column=1, sticky=W, padx=2, pady=0) + tclver = str(self.info_patchlevel()) + tkver = ' and ' + tkpatch if tkpatch != tclver else '' + versions = f"Python {pyver} with tcl/tk {tclver}{tkver}" + vers = Label(frame_background, text=versions, fg=self.fg, bg=self.bg) + vers.grid(row=9, column=0, sticky=W, padx=10, pady=0) py_buttons = Frame(frame_background, bg=self.bg) py_buttons.grid(row=10, column=0, columnspan=2, sticky=NSEW) self.py_license = Button(py_buttons, text='License', width=8, @@ -128,10 +123,10 @@ def create_widgets(self): height=2, bg=self.bg).grid(row=11, column=0, sticky=EW, columnspan=3, padx=5, pady=5) - idlever = Label(frame_background, - text='IDLE version: ' + version, + idle = Label(frame_background, + text='IDLE', fg=self.fg, bg=self.bg) - idlever.grid(row=12, column=0, sticky=W, padx=10, pady=0) + idle.grid(row=12, column=0, sticky=W, padx=10, pady=0) idle_buttons = Frame(frame_background, bg=self.bg) idle_buttons.grid(row=13, column=0, columnspan=3, sticky=NSEW) self.readme = Button(idle_buttons, text='README', width=8, diff --git a/Lib/idlelib/idle_test/README.txt b/Lib/idlelib/idle_test/README.txt index 566bfd179fdf1b4..cacd06db873d039 100644 --- a/Lib/idlelib/idle_test/README.txt +++ b/Lib/idlelib/idle_test/README.txt @@ -146,14 +146,17 @@ python -m unittest -v idlelib.idle_test python -m test -v -ugui test_idle python -m test.test_idle -The idle tests are 'discovered' by -idlelib.idle_test.__init__.load_tests, which is also imported into -test.test_idle. Normally, neither file should be changed when working on -individual test modules. The third command runs unittest indirectly -through regrtest. The same happens when the entire test suite is run -with 'python -m test'. So that command must work for buildbots to stay -green. Idle tests must not disturb the environment in a way that makes -other tests fail (issue 18081). +IDLE tests are 'discovered' by idlelib.idle_test.__init__.load_tests +when this is imported into test.test_idle. Normally, neither file +should be changed when working on individual test modules. The third +command runs unittest indirectly through regrtest. The same happens when +the entire test suite is run with 'python -m test'. So that command must +work for buildbots to stay green. IDLE tests must not disturb the +environment in a way that makes other tests fail (GH-62281). + +To test subsets of modules, see idlelib.idle_test.__init__. This +can be used to find refleaks or possible sources of "Theme changed" +tcl messages (GH-71383). To run an individual Testcase or test method, extend the dotted name given to unittest on the command line or use the test -m option. The diff --git a/Lib/idlelib/idle_test/__init__.py b/Lib/idlelib/idle_test/__init__.py index ad067b405cab671..79b5d102dd7da5e 100644 --- a/Lib/idlelib/idle_test/__init__.py +++ b/Lib/idlelib/idle_test/__init__.py @@ -1,17 +1,27 @@ -'''idlelib.idle_test is a private implementation of test.test_idle, -which tests the IDLE application as part of the stdlib test suite. -Run IDLE tests alone with "python -m test.test_idle". -Starting with Python 3.6, IDLE requires tcl/tk 8.5 or later. +"""idlelib.idle_test implements test.test_idle, which tests the IDLE +application as part of the stdlib test suite. +Run IDLE tests alone with "python -m test.test_idle (-v)". This package and its contained modules are subject to change and any direct use is at your own risk. -''' +""" from os.path import dirname +# test_idle imports load_tests for test discovery (default all). +# To run subsets of idlelib module tests, insert '[]' after '_'. +# Example: insert '[ac]' for modules beginning with 'a' or 'c'. +# Additional .discover/.addTest pairs with separate inserts work. +# Example: pairs with 'c' and 'g' test c* files and grep. + def load_tests(loader, standard_tests, pattern): this_dir = dirname(__file__) top_dir = dirname(dirname(this_dir)) - package_tests = loader.discover(start_dir=this_dir, pattern='test*.py', + module_tests = loader.discover(start_dir=this_dir, + pattern='test_*.py', # Insert here. top_level_dir=top_dir) - standard_tests.addTests(package_tests) + standard_tests.addTests(module_tests) +## module_tests = loader.discover(start_dir=this_dir, +## pattern='test_*.py', # Insert here. +## top_level_dir=top_dir) +## standard_tests.addTests(module_tests) return standard_tests diff --git a/Lib/idlelib/idle_test/test_browser.py b/Lib/idlelib/idle_test/test_browser.py index 343d50a6e37bd65..6cfea3888cd6a90 100644 --- a/Lib/idlelib/idle_test/test_browser.py +++ b/Lib/idlelib/idle_test/test_browser.py @@ -170,8 +170,7 @@ def test_ondoubleclick(self, fopen): with mock.patch('os.path.exists', return_value=True): mbt.OnDoubleClick() - fopen.assert_called() - fopen.called_with(fname) + fopen.assert_called_once_with(fname) class ChildBrowserTreeItemTest(unittest.TestCase): diff --git a/Lib/idlelib/idle_test/test_config.py b/Lib/idlelib/idle_test/test_config.py index 697fda527968ded..6d75cf7aa67dcce 100644 --- a/Lib/idlelib/idle_test/test_config.py +++ b/Lib/idlelib/idle_test/test_config.py @@ -85,8 +85,8 @@ def test_load_nothing(self): self.assertEqual(parser.sections(), []) def test_load_file(self): - # Borrow test/cfgparser.1 from test_configparser. - config_path = findfile('cfgparser.1') + # Borrow test/configdata/cfgparser.1 from test_configparser. + config_path = findfile('cfgparser.1', subdir='configdata') parser = config.IdleConfParser(config_path) parser.Load() @@ -191,7 +191,7 @@ def setUpClass(cls): idle_dir = os.path.abspath(sys.path[0]) for ctype in conf.config_types: config_path = os.path.join(idle_dir, '../config-%s.def' % ctype) - with open(config_path, 'r') as f: + with open(config_path) as f: cls.config_string[ctype] = f.read() cls.orig_warn = config._warn @@ -274,8 +274,8 @@ def test_create_config_handlers(self): conf.CreateConfigHandlers() # Check keys are equal - self.assertCountEqual(conf.defaultCfg.keys(), conf.config_types) - self.assertCountEqual(conf.userCfg.keys(), conf.config_types) + self.assertCountEqual(conf.defaultCfg, conf.config_types) + self.assertCountEqual(conf.userCfg, conf.config_types) # Check conf parser are correct type for default_parser in conf.defaultCfg.values(): @@ -294,8 +294,8 @@ def test_create_config_handlers(self): def test_load_cfg_files(self): conf = self.new_config(_utest=True) - # Borrow test/cfgparser.1 from test_configparser. - config_path = findfile('cfgparser.1') + # Borrow test/configdata/cfgparser.1 from test_configparser. + config_path = findfile('cfgparser.1', subdir='configdata') conf.defaultCfg['foo'] = config.IdleConfParser(config_path) conf.userCfg['foo'] = config.IdleUserConfParser(config_path) diff --git a/Lib/idlelib/idle_test/test_configdialog.py b/Lib/idlelib/idle_test/test_configdialog.py index e5d5b4013fca571..6f8518a9bb19d01 100644 --- a/Lib/idlelib/idle_test/test_configdialog.py +++ b/Lib/idlelib/idle_test/test_configdialog.py @@ -430,7 +430,7 @@ def test_highlight_target_text_mouse(self): def tag_to_element(elem): for element, tag in d.theme_elements.items(): - elem[tag[0]] = element + elem[tag] = element def click_it(start): x, y, dx, dy = hs.bbox(start) diff --git a/Lib/idlelib/idle_test/test_debugger.py b/Lib/idlelib/idle_test/test_debugger.py index 35efb3411c73b54..d1c9638dd5d711e 100644 --- a/Lib/idlelib/idle_test/test_debugger.py +++ b/Lib/idlelib/idle_test/test_debugger.py @@ -1,16 +1,286 @@ -"Test debugger, coverage 19%" +"""Test debugger, coverage 66% + +Try to make tests pass with draft bdbx, which may replace bdb in 3.13+. +""" from idlelib import debugger -import unittest -from test.support import requires -requires('gui') +from collections import namedtuple +from textwrap import dedent from tkinter import Tk +from test.support import requires +import unittest +from unittest import mock +from unittest.mock import Mock, patch + +"""A test python script for the debug tests.""" +TEST_CODE = dedent(""" + i = 1 + i += 2 + if i == 3: + print(i) + """) + + +class MockFrame: + "Minimal mock frame." + + def __init__(self, code, lineno): + self.f_code = code + self.f_lineno = lineno + + +class IdbTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.gui = Mock() + cls.idb = debugger.Idb(cls.gui) + + # Create test and code objects to simulate a debug session. + code_obj = compile(TEST_CODE, 'idlelib/file.py', mode='exec') + frame1 = MockFrame(code_obj, 1) + frame1.f_back = None + frame2 = MockFrame(code_obj, 2) + frame2.f_back = frame1 + cls.frame = frame2 + cls.msg = 'file.py:2: ()' + + def test_init(self): + self.assertIs(self.idb.gui, self.gui) + # Won't test super call since two Bdbs are very different. + + def test_user_line(self): + # Test that .user_line() creates a string message for a frame. + self.gui.interaction = Mock() + self.idb.user_line(self.frame) + self.gui.interaction.assert_called_once_with(self.msg, self.frame) + + def test_user_exception(self): + # Test that .user_exception() creates a string message for a frame. + exc_info = (type(ValueError), ValueError(), None) + self.gui.interaction = Mock() + self.idb.user_exception(self.frame, exc_info) + self.gui.interaction.assert_called_once_with( + self.msg, self.frame, exc_info) + + +class FunctionTest(unittest.TestCase): + # Test module functions together. + + def test_functions(self): + rpc_obj = compile(TEST_CODE,'rpc.py', mode='exec') + rpc_frame = MockFrame(rpc_obj, 2) + rpc_frame.f_back = rpc_frame + self.assertTrue(debugger._in_rpc_code(rpc_frame)) + self.assertEqual(debugger._frame2message(rpc_frame), + 'rpc.py:2: ()') + + code_obj = compile(TEST_CODE, 'idlelib/debugger.py', mode='exec') + code_frame = MockFrame(code_obj, 1) + code_frame.f_back = None + self.assertFalse(debugger._in_rpc_code(code_frame)) + self.assertEqual(debugger._frame2message(code_frame), + 'debugger.py:1: ()') + + code_frame.f_back = code_frame + self.assertFalse(debugger._in_rpc_code(code_frame)) + code_frame.f_back = rpc_frame + self.assertTrue(debugger._in_rpc_code(code_frame)) + + +class DebuggerTest(unittest.TestCase): + "Tests for Debugger that do not need a real root." + + @classmethod + def setUpClass(cls): + cls.pyshell = Mock() + cls.pyshell.root = Mock() + cls.idb = Mock() + with patch.object(debugger.Debugger, 'make_gui'): + cls.debugger = debugger.Debugger(cls.pyshell, cls.idb) + cls.debugger.root = Mock() + + def test_cont(self): + self.debugger.cont() + self.idb.set_continue.assert_called_once() + + def test_step(self): + self.debugger.step() + self.idb.set_step.assert_called_once() + + def test_quit(self): + self.debugger.quit() + self.idb.set_quit.assert_called_once() + + def test_next(self): + with patch.object(self.debugger, 'frame') as frame: + self.debugger.next() + self.idb.set_next.assert_called_once_with(frame) + + def test_ret(self): + with patch.object(self.debugger, 'frame') as frame: + self.debugger.ret() + self.idb.set_return.assert_called_once_with(frame) + + def test_clear_breakpoint(self): + self.debugger.clear_breakpoint('test.py', 4) + self.idb.clear_break.assert_called_once_with('test.py', 4) + + def test_clear_file_breaks(self): + self.debugger.clear_file_breaks('test.py') + self.idb.clear_all_file_breaks.assert_called_once_with('test.py') + + def test_set_load_breakpoints(self): + # Test the .load_breakpoints() method calls idb. + FileIO = namedtuple('FileIO', 'filename') + + class MockEditWindow(object): + def __init__(self, fn, breakpoints): + self.io = FileIO(fn) + self.breakpoints = breakpoints + + self.pyshell.flist = Mock() + self.pyshell.flist.inversedict = ( + MockEditWindow('test1.py', [4, 4]), + MockEditWindow('test2.py', [13, 44, 45]), + ) + self.debugger.set_breakpoint('test0.py', 1) + self.idb.set_break.assert_called_once_with('test0.py', 1) + self.debugger.load_breakpoints() # Call set_breakpoint 5 times. + self.idb.set_break.assert_has_calls( + [mock.call('test0.py', 1), + mock.call('test1.py', 4), + mock.call('test1.py', 4), + mock.call('test2.py', 13), + mock.call('test2.py', 44), + mock.call('test2.py', 45)]) + + def test_sync_source_line(self): + # Test that .sync_source_line() will set the flist.gotofileline with fixed frame. + test_code = compile(TEST_CODE, 'test_sync.py', 'exec') + test_frame = MockFrame(test_code, 1) + self.debugger.frame = test_frame + + self.debugger.flist = Mock() + with patch('idlelib.debugger.os.path.exists', return_value=True): + self.debugger.sync_source_line() + self.debugger.flist.gotofileline.assert_called_once_with('test_sync.py', 1) + + +class DebuggerGuiTest(unittest.TestCase): + """Tests for debugger.Debugger that need tk root. + + close needs debugger.top set in make_gui. + """ + + @classmethod + def setUpClass(cls): + requires('gui') + cls.root = root = Tk() + root.withdraw() + cls.pyshell = Mock() + cls.pyshell.root = root + cls.idb = Mock() +# stack tests fail with debugger here. +## cls.debugger = debugger.Debugger(cls.pyshell, cls.idb) +## cls.debugger.root = root +## # real root needed for real make_gui +## # run, interacting, abort_loop + + @classmethod + def tearDownClass(cls): + cls.root.destroy() + del cls.root + + def setUp(self): + self.debugger = debugger.Debugger(self.pyshell, self.idb) + self.debugger.root = self.root + # real root needed for real make_gui + # run, interacting, abort_loop + + def test_run_debugger(self): + self.debugger.run(1, 'two') + self.idb.run.assert_called_once_with(1, 'two') + self.assertEqual(self.debugger.interacting, 0) + + def test_close(self): + # Test closing the window in an idle state. + self.debugger.close() + self.pyshell.close_debugger.assert_called_once() + + def test_show_stack(self): + self.debugger.show_stack() + self.assertEqual(self.debugger.stackviewer.gui, self.debugger) + + def test_show_stack_with_frame(self): + test_frame = MockFrame(None, None) + self.debugger.frame = test_frame + + # Reset the stackviewer to force it to be recreated. + self.debugger.stackviewer = None + self.idb.get_stack.return_value = ([], 0) + self.debugger.show_stack() + + # Check that the newly created stackviewer has the test gui as a field. + self.assertEqual(self.debugger.stackviewer.gui, self.debugger) + self.idb.get_stack.assert_called_once_with(test_frame, None) + + +class StackViewerTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + requires('gui') + cls.root = Tk() + cls.root.withdraw() + + @classmethod + def tearDownClass(cls): + cls.root.destroy() + del cls.root + + def setUp(self): + self.code = compile(TEST_CODE, 'test_stackviewer.py', 'exec') + self.stack = [ + (MockFrame(self.code, 1), 1), + (MockFrame(self.code, 2), 2) + ] + # Create a stackviewer and load the test stack. + self.sv = debugger.StackViewer(self.root, None, None) + self.sv.load_stack(self.stack) + + def test_init(self): + # Test creation of StackViewer. + gui = None + flist = None + master_window = self.root + sv = debugger.StackViewer(master_window, flist, gui) + self.assertTrue(hasattr(sv, 'stack')) + + def test_load_stack(self): + # Test the .load_stack() method against a fixed test stack. + # Check the test stack is assigned and the list contains the repr of them. + self.assertEqual(self.sv.stack, self.stack) + self.assertTrue('?.(), line 1:' in self.sv.get(0)) + self.assertEqual(self.sv.get(1), '?.(), line 2: ') + + def test_show_source(self): + # Test the .show_source() method against a fixed test stack. + # Patch out the file list to monitor it + self.sv.flist = Mock() + # Patch out isfile to pretend file exists. + with patch('idlelib.debugger.os.path.isfile', return_value=True) as isfile: + self.sv.show_source(1) + isfile.assert_called_once_with('test_stackviewer.py') + self.sv.flist.open.assert_called_once_with('test_stackviewer.py') + class NameSpaceTest(unittest.TestCase): @classmethod def setUpClass(cls): + requires('gui') cls.root = Tk() cls.root.withdraw() @@ -23,7 +293,5 @@ def test_init(self): debugger.NamespaceViewer(self.root, 'Test') -# Other classes are Idb, Debugger, and StackViewer. - if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/Lib/idlelib/idle_test/test_debugobj.py b/Lib/idlelib/idle_test/test_debugobj.py index 131ce22b8bb69b0..90ace4e1bc4f9ef 100644 --- a/Lib/idlelib/idle_test/test_debugobj.py +++ b/Lib/idlelib/idle_test/test_debugobj.py @@ -37,7 +37,7 @@ def test_isexpandable(self): def test_keys(self): ti = debugobj.SequenceTreeItem('label', 'abc') - self.assertEqual(list(ti.keys()), [0, 1, 2]) + self.assertEqual(list(ti.keys()), [0, 1, 2]) # keys() is a range. class DictTreeItemTest(unittest.TestCase): @@ -50,7 +50,7 @@ def test_isexpandable(self): def test_keys(self): ti = debugobj.DictTreeItem('label', {1:1, 0:0, 2:2}) - self.assertEqual(ti.keys(), [0, 1, 2]) + self.assertEqual(ti.keys(), [0, 1, 2]) # keys() is a sorted list. if __name__ == '__main__': diff --git a/Lib/idlelib/idle_test/test_editor.py b/Lib/idlelib/idle_test/test_editor.py index fdb47abf43fb775..9296a6d235fbbe3 100644 --- a/Lib/idlelib/idle_test/test_editor.py +++ b/Lib/idlelib/idle_test/test_editor.py @@ -1,10 +1,10 @@ -"Test editor, coverage 35%." +"Test editor, coverage 53%." from idlelib import editor import unittest from collections import namedtuple from test.support import requires -from tkinter import Tk +from tkinter import Tk, Text Editor = editor.EditorWindow @@ -31,7 +31,7 @@ def test_init(self): e._close() -class TestGetLineIndent(unittest.TestCase): +class GetLineIndentTest(unittest.TestCase): def test_empty_lines(self): for tabwidth in [1, 2, 4, 6, 8]: for line in ['', '\n']: @@ -181,6 +181,36 @@ def test_indent_and_newline_event(self): eq(get('1.0', 'end'), ' def f1(self, a,\n \n return a + b\n') +class IndentSearcherTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + requires('gui') + cls.root = Tk() + cls.root.withdraw() + cls.text = Text(cls.root) + + @classmethod + def tearDownClass(cls): + cls.root.destroy() + del cls.root + + def test_searcher(self): + text = self.text + searcher = (self.text) + test_info = (# text, (block, indent)) + ("", (None, None)), + ("[1,", (None, None)), # TokenError + ("if 1:\n", ('if 1:\n', None)), + ("if 1:\n 2\n 3\n", ('if 1:\n', ' 2\n')), + ) + for code, expected_pair in test_info: + with self.subTest(code=code): + insert(text, code) + actual_pair = editor.IndentSearcher(text).run() + self.assertEqual(actual_pair, expected_pair) + + class RMenuTest(unittest.TestCase): @classmethod diff --git a/Lib/idlelib/idle_test/test_help_about.py b/Lib/idlelib/idle_test/test_help_about.py index b915535acac0cc4..8b79487b15d4cdd 100644 --- a/Lib/idlelib/idle_test/test_help_about.py +++ b/Lib/idlelib/idle_test/test_help_about.py @@ -36,7 +36,7 @@ def tearDownClass(cls): del cls.root def test_build_bits(self): - self.assertIn(help_about.build_bits(), ('32', '64')) + self.assertIn(help_about.bits, ('32', '64')) def test_dialog_title(self): """Test about dialog title""" @@ -107,7 +107,7 @@ def test_dialog_title(self): """Test about dialog title""" self.assertEqual(self.dialog.title(), f'About IDLE {python_version()}' - f' ({help_about.build_bits()} bit)') + f' ({help_about.bits} bit)') class CloseTest(unittest.TestCase): diff --git a/Lib/idlelib/idle_test/test_iomenu.py b/Lib/idlelib/idle_test/test_iomenu.py index 2fb836dba216721..e0642cf0cabef04 100644 --- a/Lib/idlelib/idle_test/test_iomenu.py +++ b/Lib/idlelib/idle_test/test_iomenu.py @@ -8,6 +8,12 @@ from idlelib import util from idlelib.idle_test.mock_idle import Func +# Fail if either tokenize.open and t.detect_encoding does not exist. +# These are used in loadfile and encode. +# Also used in pyshell.MI.execfile and runscript.tabnanny. +from tokenize import open, detect_encoding +# Remove when we have proper tests that use both. + class IOBindingTest(unittest.TestCase): diff --git a/Lib/idlelib/idle_test/test_outwin.py b/Lib/idlelib/idle_test/test_outwin.py index e347bfca7f191a4..d6e85ad674417c1 100644 --- a/Lib/idlelib/idle_test/test_outwin.py +++ b/Lib/idlelib/idle_test/test_outwin.py @@ -159,7 +159,7 @@ def test_file_line_helper(self, mock_open): for line, expected_output in test_lines: self.assertEqual(flh(line), expected_output) if expected_output: - mock_open.assert_called_with(expected_output[0], 'r') + mock_open.assert_called_with(expected_output[0]) if __name__ == '__main__': diff --git a/Lib/idlelib/idle_test/test_sidebar.py b/Lib/idlelib/idle_test/test_sidebar.py index 049531e66a414e0..fb52b3a0179553d 100644 --- a/Lib/idlelib/idle_test/test_sidebar.py +++ b/Lib/idlelib/idle_test/test_sidebar.py @@ -57,7 +57,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.editwin.per.close() - cls.root.update() + cls.root.update_idletasks() cls.root.destroy() del cls.text, cls.text_frame, cls.editwin, cls.root @@ -328,7 +328,7 @@ def test_scroll(self): self.assertEqual(self.linenumber.sidebar_text.index('@0,0'), '11.0') # Generate a mouse-wheel event and make sure it scrolled up or down. - # The meaning of the "delta" is OS-dependant, so this just checks for + # The meaning of the "delta" is OS-dependent, so this just checks for # any change. self.linenumber.sidebar_text.event_generate('', x=0, y=0, @@ -691,11 +691,12 @@ def test_mousewheel(self): self.assertIsNotNone(text.dlineinfo(text.index(f'{last_lineno}.0'))) # Scroll up using the event. - # The meaning delta is platform-dependant. + # The meaning of delta is platform-dependent. delta = -1 if sys.platform == 'darwin' else 120 sidebar.canvas.event_generate('', x=0, y=0, delta=delta) yield - self.assertIsNone(text.dlineinfo(text.index(f'{last_lineno}.0'))) + if sys.platform != 'darwin': # .update_idletasks() does not work. + self.assertIsNone(text.dlineinfo(text.index(f'{last_lineno}.0'))) # Scroll back down using the event. sidebar.canvas.event_generate('', x=0, y=0) diff --git a/Lib/idlelib/idle_test/test_stackviewer.py b/Lib/idlelib/idle_test/test_stackviewer.py index 98f53f9537bb257..55f510382bf4c36 100644 --- a/Lib/idlelib/idle_test/test_stackviewer.py +++ b/Lib/idlelib/idle_test/test_stackviewer.py @@ -6,19 +6,12 @@ from tkinter import Tk from idlelib.tree import TreeNode, ScrolledCanvas -import sys class StackBrowserTest(unittest.TestCase): @classmethod def setUpClass(cls): - svs = stackviewer.sys - try: - abc - except NameError: - svs.last_type, svs.last_value, svs.last_traceback = ( - sys.exc_info()) requires('gui') cls.root = Tk() @@ -26,8 +19,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - svs = stackviewer.sys - del svs.last_traceback, svs.last_type, svs.last_value cls.root.update_idletasks() ## for id in cls.root.tk.call('after', 'info'): @@ -36,7 +27,10 @@ def tearDownClass(cls): del cls.root def test_init(self): - sb = stackviewer.StackBrowser(self.root) + try: + abc + except NameError as exc: + sb = stackviewer.StackBrowser(self.root, exc) isi = self.assertIsInstance isi(stackviewer.sc, ScrolledCanvas) isi(stackviewer.item, stackviewer.StackTreeItem) diff --git a/Lib/idlelib/macosx.py b/Lib/idlelib/macosx.py index 89b645702d334d0..2ea02ec04d661ac 100644 --- a/Lib/idlelib/macosx.py +++ b/Lib/idlelib/macosx.py @@ -174,9 +174,8 @@ def overrideRootMenu(root, flist): del mainmenu.menudefs[-3][1][0:2] menubar = Menu(root) root.configure(menu=menubar) - menudict = {} - menudict['window'] = menu = Menu(menubar, name='window', tearoff=0) + menu = Menu(menubar, name='window', tearoff=0) menubar.add_cascade(label='Window', menu=menu, underline=0) def postwindowsmenu(menu=menu): @@ -226,8 +225,7 @@ def help_dialog(event=None): if isCarbonTk(): # for Carbon AquaTk, replace the default Tk apple menu - menudict['application'] = menu = Menu(menubar, name='apple', - tearoff=0) + menu = Menu(menubar, name='apple', tearoff=0) menubar.add_cascade(label='IDLE', menu=menu) mainmenu.menudefs.insert(0, ('application', [ diff --git a/Lib/idlelib/multicall.py b/Lib/idlelib/multicall.py index dc02001292fc14e..0200f445cc9340b 100644 --- a/Lib/idlelib/multicall.py +++ b/Lib/idlelib/multicall.py @@ -52,9 +52,9 @@ _modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META) # a dictionary to map a modifier name into its number -_modifier_names = dict([(name, number) +_modifier_names = {name: number for number in range(len(_modifiers)) - for name in _modifiers[number]]) + for name in _modifiers[number]} # In 3.4, if no shell window is ever open, the underlying Tk widget is # destroyed before .__del__ methods here are called. The following @@ -134,7 +134,7 @@ def nbits(n): return nb statelist = [] for state in states: - substates = list(set(state & x for x in states)) + substates = list({state & x for x in states}) substates.sort(key=nbits, reverse=True) statelist.append(substates) return statelist @@ -258,9 +258,9 @@ def __del__(self): _binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4) # A dictionary to map a type name into its number -_type_names = dict([(name, number) +_type_names = {name: number for number in range(len(_types)) - for name in _types[number]]) + for name in _types[number]} _keysym_re = re.compile(r"^\w+$") _button_re = re.compile(r"^[1-5]$") diff --git a/Lib/idlelib/outwin.py b/Lib/idlelib/outwin.py index 5ab08bbaf4bc955..610031e26f1dffe 100644 --- a/Lib/idlelib/outwin.py +++ b/Lib/idlelib/outwin.py @@ -42,7 +42,7 @@ def file_line_helper(line): if match: filename, lineno = match.group(1, 2) try: - f = open(filename, "r") + f = open(filename) f.close() break except OSError: @@ -112,7 +112,7 @@ def write(self, s, tags=(), mark="insert"): assert isinstance(s, str) self.text.insert(mark, s, tags) self.text.see(mark) - self.text.update() + self.text.update_idletasks() return len(s) def writelines(self, lines): diff --git a/Lib/idlelib/pyshell.py b/Lib/idlelib/pyshell.py index e68233a5a4131e4..00b3732a7bc4eb4 100755 --- a/Lib/idlelib/pyshell.py +++ b/Lib/idlelib/pyshell.py @@ -133,8 +133,8 @@ class PyShellEditorWindow(EditorWindow): def __init__(self, *args): self.breakpoints = [] EditorWindow.__init__(self, *args) - self.text.bind("<>", self.set_breakpoint_here) - self.text.bind("<>", self.clear_breakpoint_here) + self.text.bind("<>", self.set_breakpoint_event) + self.text.bind("<>", self.clear_breakpoint_event) self.text.bind("<>", self.flist.open_shell) #TODO: don't read/write this from/to .idlerc when testing @@ -155,8 +155,8 @@ def filename_changed_hook(old_hook=self.io.filename_change_hook, ("Copy", "<>", "rmenu_check_copy"), ("Paste", "<>", "rmenu_check_paste"), (None, None, None), - ("Set Breakpoint", "<>", None), - ("Clear Breakpoint", "<>", None) + ("Set Breakpoint", "<>", None), + ("Clear Breakpoint", "<>", None) ] def color_breakpoint_text(self, color=True): @@ -181,11 +181,11 @@ def set_breakpoint(self, lineno): self.breakpoints.append(lineno) try: # update the subprocess debugger debug = self.flist.pyshell.interp.debugger - debug.set_breakpoint_here(filename, lineno) + debug.set_breakpoint(filename, lineno) except: # but debugger may not be active right now.... pass - def set_breakpoint_here(self, event=None): + def set_breakpoint_event(self, event=None): text = self.text filename = self.io.filename if not filename: @@ -194,7 +194,7 @@ def set_breakpoint_here(self, event=None): lineno = int(float(text.index("insert"))) self.set_breakpoint(lineno) - def clear_breakpoint_here(self, event=None): + def clear_breakpoint_event(self, event=None): text = self.text filename = self.io.filename if not filename: @@ -209,7 +209,7 @@ def clear_breakpoint_here(self, event=None): "insert lineend +1char") try: debug = self.flist.pyshell.interp.debugger - debug.clear_breakpoint_here(filename, lineno) + debug.clear_breakpoint(filename, lineno) except: pass @@ -249,7 +249,7 @@ def store_file_breaks(self): breaks = self.breakpoints filename = self.io.filename try: - with open(self.breakpointPath, "r") as fp: + with open(self.breakpointPath) as fp: lines = fp.readlines() except OSError: lines = [] @@ -279,7 +279,7 @@ def restore_file_breaks(self): if filename is None: return if os.path.isfile(self.breakpointPath): - with open(self.breakpointPath, "r") as fp: + with open(self.breakpointPath) as fp: lines = fp.readlines() for line in lines: if line.startswith(filename + '='): @@ -441,7 +441,7 @@ def build_subprocess_arglist(self): # run from the IDLE source directory. del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc', default=False, type='bool') - command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,) + command = f"__import__('idlelib.run').run.main({del_exitf!r})" return [sys.executable] + w + ["-c", command, str(self.port)] def start_subprocess(self): @@ -574,9 +574,9 @@ def transfer_path(self, with_cwd=False): self.runcommand("""if 1: import sys as _sys - _sys.path = %r + _sys.path = {!r} del _sys - \n""" % (path,)) + \n""".format(path)) active_seq = None @@ -703,14 +703,14 @@ def stuffsource(self, source): def prepend_syspath(self, filename): "Prepend sys.path with file's directory if not already included" self.runcommand("""if 1: - _filename = %r + _filename = {!r} import sys as _sys from os.path import dirname as _dirname _dir = _dirname(_filename) if not _dir in _sys.path: _sys.path.insert(0, _dir) del _filename, _sys, _dirname, _dir - \n""" % (filename,)) + \n""".format(filename)) def showsyntaxerror(self, filename=None): """Override Interactive Interpreter method: Use Colorizing @@ -747,10 +747,11 @@ def showtraceback(self): self.tkconsole.open_stack_viewer() def checklinecache(self): - c = linecache.cache - for key in list(c.keys()): + "Remove keys other than ''." + cache = linecache.cache + for key in list(cache): # Iterate list because mutate cache. if key[:1] + key[-1:] != "<>": - del c[key] + del cache[key] def runcommand(self, code): "Run the code without invoking the debugger" @@ -1363,19 +1364,19 @@ def runit(self): self.text.tag_remove(self.user_input_insert_tags, index_before) self.shell_sidebar.update_sidebar() - def open_stack_viewer(self, event=None): + def open_stack_viewer(self, event=None): # -n mode only if self.interp.rpcclt: return self.interp.remote_stack_viewer() + + from idlelib.stackviewer import StackBrowser try: - sys.last_traceback + StackBrowser(self.root, sys.last_exc, self.flist) except: messagebox.showerror("No stack trace", "There is no stack trace yet.\n" - "(sys.last_traceback is not defined)", + "(sys.last_exc is not defined)", parent=self.text) - return - from idlelib.stackviewer import StackBrowser - StackBrowser(self.root, self.flist) + return None def view_restart_mark(self, event=None): self.text.see("iomark") @@ -1536,7 +1537,7 @@ def main(): try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: - print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) + print(f"Error: {msg}\n{usage_msg}", file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': @@ -1668,9 +1669,9 @@ def main(): if cmd or script: shell.interp.runcommand("""if 1: import sys as _sys - _sys.argv = %r + _sys.argv = {!r} del _sys - \n""" % (sys.argv,)) + \n""".format(sys.argv)) if cmd: shell.interp.execsource(cmd) elif script: diff --git a/Lib/idlelib/redirector.py b/Lib/idlelib/redirector.py index 9ab34c5acfb22c6..4928340e98df686 100644 --- a/Lib/idlelib/redirector.py +++ b/Lib/idlelib/redirector.py @@ -47,9 +47,8 @@ def __init__(self, widget): tk.createcommand(w, self.dispatch) def __repr__(self): - return "%s(%s<%s>)" % (self.__class__.__name__, - self.widget.__class__.__name__, - self.widget._w) + w = self.widget + return f"{self.__class__.__name__,}({w.__class__.__name__}<{w._w}>)" def close(self): "Unregister operations and revert redirection created by .__init__." @@ -143,8 +142,7 @@ def __init__(self, redir, operation): self.orig_and_operation = (redir.orig, operation) def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, - self.redir, self.operation) + return f"{self.__class__.__name__,}({self.redir!r}, {self.operation!r})" def __call__(self, *args): return self.tk_call(self.orig_and_operation + args) diff --git a/Lib/idlelib/rpc.py b/Lib/idlelib/rpc.py index 62eec84c9c8d090..b08b80c90045511 100644 --- a/Lib/idlelib/rpc.py +++ b/Lib/idlelib/rpc.py @@ -174,7 +174,7 @@ def localcall(self, seq, request): except TypeError: return ("ERROR", "Bad request format") if oid not in self.objtable: - return ("ERROR", "Unknown object id: %r" % (oid,)) + return ("ERROR", f"Unknown object id: {oid!r}") obj = self.objtable[oid] if methodname == "__methods__": methods = {} @@ -185,7 +185,7 @@ def localcall(self, seq, request): _getattributes(obj, attributes) return ("OK", attributes) if not hasattr(obj, methodname): - return ("ERROR", "Unsupported method name: %r" % (methodname,)) + return ("ERROR", f"Unsupported method name: {methodname!r}") method = getattr(obj, methodname) try: if how == 'CALL': diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py index 577c49eb67b20d1..53e80a9b42801fb 100644 --- a/Lib/idlelib/run.py +++ b/Lib/idlelib/run.py @@ -52,13 +52,13 @@ def idle_formatwarning(message, category, filename, lineno, line=None): """Format warnings the IDLE way.""" s = "\nWarning (from warnings module):\n" - s += ' File \"%s\", line %s\n' % (filename, lineno) + s += f' File \"{filename}\", line {lineno}\n' if line is None: line = linecache.getline(filename, lineno) line = line.strip() if line: s += " %s\n" % line - s += "%s: %s\n" % (category.__name__, message) + s += f"{category.__name__}: {message}\n" return s def idle_showwarning_subproc( @@ -140,11 +140,12 @@ def main(del_exitfunc=False): capture_warnings(True) sys.argv[:] = [""] - sockthread = threading.Thread(target=manage_socket, - name='SockThread', - args=((LOCALHOST, port),)) - sockthread.daemon = True - sockthread.start() + threading.Thread(target=manage_socket, + name='SockThread', + args=((LOCALHOST, port),), + daemon=True, + ).start() + while True: try: if exit_now: @@ -239,6 +240,7 @@ def print_exception(): efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo + sys.last_exc = val seen = set() def print_exc(typ, exc, tb): @@ -621,7 +623,7 @@ def get_the_completion_list(self, what, mode): def stackviewer(self, flist_oid=None): if self.user_exc_info: - typ, val, tb = self.user_exc_info + _, exc, tb = self.user_exc_info else: return None flist = None @@ -629,9 +631,8 @@ def stackviewer(self, flist_oid=None): flist = self.rpchandler.get_remote_proxy(flist_oid) while tb and tb.tb_frame.f_globals["__name__"] in ["rpc", "run"]: tb = tb.tb_next - sys.last_type = typ - sys.last_value = val - item = stackviewer.StackTreeItem(flist, tb) + exc.__traceback__ = tb + item = stackviewer.StackTreeItem(exc, flist) return debugobj_r.remote_object_tree_item(item) diff --git a/Lib/idlelib/sidebar.py b/Lib/idlelib/sidebar.py index fb1084dbf3f18b6..166c04342907f99 100644 --- a/Lib/idlelib/sidebar.py +++ b/Lib/idlelib/sidebar.py @@ -25,10 +25,9 @@ def get_end_linenumber(text): def get_displaylines(text, index): """Display height, in lines, of a logical line in a Tk text widget.""" - res = text.count(f"{index} linestart", - f"{index} lineend", - "displaylines") - return res[0] if res else 0 + return text.count(f"{index} linestart", + f"{index} lineend", + "displaylines") def get_widget_padding(widget): """Get the total padding of a Tk widget, including its border.""" diff --git a/Lib/idlelib/stackviewer.py b/Lib/idlelib/stackviewer.py index 94ffb4eff4dd26e..f8e60fd9b6d818a 100644 --- a/Lib/idlelib/stackviewer.py +++ b/Lib/idlelib/stackviewer.py @@ -1,33 +1,32 @@ +# Rename to stackbrowser or possibly consolidate with browser. + import linecache import os -import sys import tkinter as tk from idlelib.debugobj import ObjectTreeItem, make_objecttreeitem from idlelib.tree import TreeNode, TreeItem, ScrolledCanvas -def StackBrowser(root, flist=None, tb=None, top=None): +def StackBrowser(root, exc, flist=None, top=None): global sc, item, node # For testing. if top is None: top = tk.Toplevel(root) sc = ScrolledCanvas(top, bg="white", highlightthickness=0) sc.frame.pack(expand=1, fill="both") - item = StackTreeItem(flist, tb) + item = StackTreeItem(exc, flist) node = TreeNode(sc.canvas, None, item) node.expand() class StackTreeItem(TreeItem): - def __init__(self, flist=None, tb=None): + def __init__(self, exc, flist=None): self.flist = flist - self.stack = self.get_stack(tb) - self.text = self.get_exception() + self.stack = self.get_stack(None if exc is None else exc.__traceback__) + self.text = f"{type(exc).__name__}: {str(exc)}" def get_stack(self, tb): - if tb is None: - tb = sys.last_traceback stack = [] if tb and tb.tb_frame is None: tb = tb.tb_next @@ -36,17 +35,7 @@ def get_stack(self, tb): tb = tb.tb_next return stack - def get_exception(self): - type = sys.last_type - value = sys.last_value - if hasattr(type, "__name__"): - type = type.__name__ - s = str(type) - if value is not None: - s = s + ": " + str(value) - return s - - def GetText(self): + def GetText(self): # Titlecase names are overrides. return self.text def GetSubList(self): @@ -112,7 +101,7 @@ def IsExpandable(self): def GetSubList(self): sublist = [] - for key in self.object.keys(): + for key in self.object.keys(): # self.object not necessarily dict. try: value = self.object[key] except KeyError: @@ -133,19 +122,9 @@ def _stack_viewer(parent): # htest # flist = PyShellFileList(top) try: # to obtain a traceback object intentional_name_error - except NameError: - exc_type, exc_value, exc_tb = sys.exc_info() - # inject stack trace to sys - sys.last_type = exc_type - sys.last_value = exc_value - sys.last_traceback = exc_tb - - StackBrowser(top, flist=flist, top=top, tb=exc_tb) - - # restore sys to original state - del sys.last_type - del sys.last_value - del sys.last_traceback + except NameError as e: + StackBrowser(top, e, flist=flist, top=top) + if __name__ == '__main__': from unittest import main diff --git a/Lib/idlelib/textview.py b/Lib/idlelib/textview.py index a66c1a4309a617f..23f0f4cb5027ec3 100644 --- a/Lib/idlelib/textview.py +++ b/Lib/idlelib/textview.py @@ -169,7 +169,7 @@ def view_file(parent, title, filename, encoding, modal=True, wrap='word', with contents of the file. """ try: - with open(filename, 'r', encoding=encoding) as file: + with open(filename, encoding=encoding) as file: contents = file.read() except OSError: showerror(title='File Load Error', diff --git a/Lib/idlelib/tooltip.py b/Lib/idlelib/tooltip.py index d714318dae8ef19..3983690dd411771 100644 --- a/Lib/idlelib/tooltip.py +++ b/Lib/idlelib/tooltip.py @@ -92,7 +92,7 @@ def __init__(self, anchor_widget, hover_delay=1000): e.g. after hovering over the anchor widget with the mouse for enough time. """ - super(OnHoverTooltipBase, self).__init__(anchor_widget) + super().__init__(anchor_widget) self.hover_delay = hover_delay self._after_id = None @@ -107,7 +107,7 @@ def __del__(self): self.anchor_widget.unbind("