')
# in case you wonder about the seemingly redundant
here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
- yield 0, (
- '
' % self.cssclass + filename_tr +
+ yield 0, (f'
' + filename_tr +
'
' +
- ls + '
'
- )
+ ls + '
')
+ yield 0, '
'
yield 0, dummyoutfile.getvalue()
+ yield 0, '
'
yield 0, '
'
+
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
@@ -729,7 +730,7 @@ def _wrap_inlinelinenos(self, inner):
st = self.linenostep
num = self.linenostart
mw = len(str(len(inner_lines) + num - 1))
- la = self.lineanchors
+ anchor_name = self.lineanchors or self.linespans
aln = self.anchorlinenos
nocls = self.noclasses
@@ -759,7 +760,7 @@ def _wrap_inlinelinenos(self, inner):
linenos = line
if aln:
- yield 1, ('%s' % (la, num, linenos) +
+ yield 1, ('%s' % (anchor_name, num, linenos) +
inner_line)
else:
yield 1, linenos + inner_line
@@ -933,16 +934,20 @@ def _highlight_lines(self, tokensource):
else:
yield 1, value
- def wrap(self, source, outfile):
+ def wrap(self, source):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
+
+ output = source
if self.wrapcode:
- return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
- else:
- return self._wrap_div(self._wrap_pre(source))
+ output = self._wrap_code(output)
+
+ output = self._wrap_pre(output)
+
+ return output
def format_unencoded(self, tokensource, outfile):
"""
@@ -973,9 +978,10 @@ def format_unencoded(self, tokensource, outfile):
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
- source = self.wrap(source, outfile)
+ source = self.wrap(source)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
+ source = self._wrap_div(source)
if self.full:
source = self._wrap_full(source, outfile)
diff --git a/src/pip/_vendor/pygments/formatters/img.py b/src/pip/_vendor/pygments/formatters/img.py
index 978559237a6..2cc0b2b5bd7 100644
--- a/src/pip/_vendor/pygments/formatters/img.py
+++ b/src/pip/_vendor/pygments/formatters/img.py
@@ -4,7 +4,7 @@
Formatter for Pixmap output.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/irc.py b/src/pip/_vendor/pygments/formatters/irc.py
index ad986e04077..3f6d52deb4c 100644
--- a/src/pip/_vendor/pygments/formatters/irc.py
+++ b/src/pip/_vendor/pygments/formatters/irc.py
@@ -4,7 +4,7 @@
Formatter for IRC output
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/latex.py b/src/pip/_vendor/pygments/formatters/latex.py
index 60e98921f9b..4a7375a5ceb 100644
--- a/src/pip/_vendor/pygments/formatters/latex.py
+++ b/src/pip/_vendor/pygments/formatters/latex.py
@@ -4,7 +4,7 @@
Formatter for LaTeX fancyvrb output.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -159,6 +159,8 @@ class LatexFormatter(Formatter):
\PY{k}{pass}
\end{Verbatim}
+ Wrapping can be disabled using the `nowrap` option.
+
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
@@ -171,6 +173,11 @@ class LatexFormatter(Formatter):
Additional options accepted:
+ `nowrap`
+ If set to ``True``, don't wrap the tokens at all, not even inside a
+ ``\begin{Verbatim}`` environment. This disables most other options
+ (default: ``False``).
+
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
@@ -248,6 +255,7 @@ class LatexFormatter(Formatter):
def __init__(self, **options):
Formatter.__init__(self, **options)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
@@ -334,18 +342,19 @@ def format_unencoded(self, tokensource, outfile):
realoutfile = outfile
outfile = StringIO()
- outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
- if self.linenos:
- start, step = self.linenostart, self.linenostep
- outfile.write(',numbers=left' +
- (start and ',firstnumber=%d' % start or '') +
- (step and ',stepnumber=%d' % step or ''))
- if self.mathescape or self.texcomments or self.escapeinside:
- outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
- '\\catcode`\\_=8\\relax}')
- if self.verboptions:
- outfile.write(',' + self.verboptions)
- outfile.write(']\n')
+ if not self.nowrap:
+ outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
+ if self.linenos:
+ start, step = self.linenostart, self.linenostep
+ outfile.write(',numbers=left' +
+ (start and ',firstnumber=%d' % start or '') +
+ (step and ',stepnumber=%d' % step or ''))
+ if self.mathescape or self.texcomments or self.escapeinside:
+ outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
+ '\\catcode`\\_=8\\relax}')
+ if self.verboptions:
+ outfile.write(',' + self.verboptions)
+ outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
@@ -408,7 +417,8 @@ def format_unencoded(self, tokensource, outfile):
else:
outfile.write(value)
- outfile.write('\\end{' + self.envname + '}\n')
+ if not self.nowrap:
+ outfile.write('\\end{' + self.envname + '}\n')
if self.full:
encoding = self.encoding or 'utf8'
diff --git a/src/pip/_vendor/pygments/formatters/other.py b/src/pip/_vendor/pygments/formatters/other.py
index 4fdf5e72baf..1e39cd42a8c 100644
--- a/src/pip/_vendor/pygments/formatters/other.py
+++ b/src/pip/_vendor/pygments/formatters/other.py
@@ -4,7 +4,7 @@
Other formatters: NullFormatter, RawTokenFormatter.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/pangomarkup.py b/src/pip/_vendor/pygments/formatters/pangomarkup.py
index b0657a5f0fd..bd00866b8b9 100644
--- a/src/pip/_vendor/pygments/formatters/pangomarkup.py
+++ b/src/pip/_vendor/pygments/formatters/pangomarkup.py
@@ -4,7 +4,7 @@
Formatter for Pango markup output.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/rtf.py b/src/pip/_vendor/pygments/formatters/rtf.py
index b4b0acab9b5..4114d1688c3 100644
--- a/src/pip/_vendor/pygments/formatters/rtf.py
+++ b/src/pip/_vendor/pygments/formatters/rtf.py
@@ -4,7 +4,7 @@
A formatter that generates RTF files.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/svg.py b/src/pip/_vendor/pygments/formatters/svg.py
index d4de51f0e66..075150a4b58 100644
--- a/src/pip/_vendor/pygments/formatters/svg.py
+++ b/src/pip/_vendor/pygments/formatters/svg.py
@@ -4,7 +4,7 @@
Formatter for SVG output.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/terminal.py b/src/pip/_vendor/pygments/formatters/terminal.py
index ae660224ae5..e0bda16a236 100644
--- a/src/pip/_vendor/pygments/formatters/terminal.py
+++ b/src/pip/_vendor/pygments/formatters/terminal.py
@@ -4,7 +4,7 @@
Formatter for terminal output with ANSI sequences.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/formatters/terminal256.py b/src/pip/_vendor/pygments/formatters/terminal256.py
index b5eab140056..201b3c32832 100644
--- a/src/pip/_vendor/pygments/formatters/terminal256.py
+++ b/src/pip/_vendor/pygments/formatters/terminal256.py
@@ -10,7 +10,7 @@
Formatter version 1.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/lexer.py b/src/pip/_vendor/pygments/lexer.py
index b6d4b238845..ec7f4de32cf 100644
--- a/src/pip/_vendor/pygments/lexer.py
+++ b/src/pip/_vendor/pygments/lexer.py
@@ -4,7 +4,7 @@
Base lexer classes.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -76,6 +76,9 @@ class Lexer(metaclass=LexerMeta):
#: Name of the lexer
name = None
+ #: URL of the language specification/definition
+ url = None
+
#: Shortcuts for the lexer
aliases = []
@@ -618,7 +621,7 @@ def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
- ``stack`` is the inital stack (default: ``['root']``)
+ ``stack`` is the initial stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
@@ -738,7 +741,7 @@ def get_tokens_unprocessed(self, text=None, context=None):
elif isinstance(new_state, int):
# see RegexLexer for why this check is made
if abs(new_state) >= len(ctx.stack):
- del ctx.state[1:]
+ del ctx.stack[1:]
else:
del ctx.stack[new_state:]
elif new_state == '#push':
@@ -792,7 +795,7 @@ def do_insertions(insertions, tokens):
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
- # first iteration. store the postition of first item
+ # first iteration. store the position of first item
if realpos is None:
realpos = i
oldi = 0
diff --git a/src/pip/_vendor/pygments/lexers/__init__.py b/src/pip/_vendor/pygments/lexers/__init__.py
index 6981b8d1187..3f404e4f747 100644
--- a/src/pip/_vendor/pygments/lexers/__init__.py
+++ b/src/pip/_vendor/pygments/lexers/__init__.py
@@ -4,7 +4,7 @@
Pygments lexers.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -47,14 +47,18 @@ def _load_lexers(module_name):
_lexer_cache[cls.name] = cls
-def get_all_lexers():
+def get_all_lexers(plugins=True):
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
+
+ If *plugins* is true (the default), plugin lexers supplied by entrypoints
+ are also returned. Otherwise, only builtin ones are considered.
"""
for item in LEXERS.values():
yield item[1:]
- for lexer in find_plugin_lexers():
- yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
+ if plugins:
+ for lexer in find_plugin_lexers():
+ yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
diff --git a/src/pip/_vendor/pygments/lexers/_mapping.py b/src/pip/_vendor/pygments/lexers/_mapping.py
index c972e3a2b3b..44dbfe67717 100644
--- a/src/pip/_vendor/pygments/lexers/_mapping.py
+++ b/src/pip/_vendor/pygments/lexers/_mapping.py
@@ -2,7 +2,7 @@
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
- Lexer mapping definitions. This file is generated by itself. Everytime
+ Lexer mapping definitions. This file is generated by itself. Every time
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
@@ -19,7 +19,7 @@
'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
- 'AdaLexer': ('pip._vendor.pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
+ 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
@@ -59,6 +59,7 @@
'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
+ 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
@@ -73,6 +74,7 @@
'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
+ 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
@@ -93,7 +95,7 @@
'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
- 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
+ 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
@@ -104,7 +106,8 @@
'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
- 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
+ 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
+ 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
@@ -113,7 +116,7 @@
'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
- 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
+ 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
@@ -195,6 +198,7 @@
'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
+ 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
@@ -203,7 +207,7 @@
'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
- 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
+ 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
@@ -229,12 +233,13 @@
'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
- 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
+ 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
+ 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
@@ -244,6 +249,7 @@
'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
+ 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
@@ -273,9 +279,11 @@
'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
+ 'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
+ 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
@@ -379,8 +387,11 @@
'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
+ 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
+ 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
+ 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
@@ -408,7 +419,7 @@
'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
- 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
+ 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
@@ -419,6 +430,7 @@
'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
+ 'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
@@ -465,6 +477,7 @@
'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
+ 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
@@ -490,8 +503,10 @@
'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
+ 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
+ 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
@@ -513,8 +528,9 @@
'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
+ 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
- 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
+ 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
@@ -523,7 +539,7 @@
'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
- 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
+ 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
@@ -563,7 +579,7 @@
content = fp.read()
# replace crnl to nl for Windows.
#
- # Note that, originally, contributers should keep nl of master
+ # Note that, originally, contributors should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# `.
diff --git a/src/pip/_vendor/pygments/lexers/python.py b/src/pip/_vendor/pygments/lexers/python.py
index 0e7bab9945a..6bc7a78b6a4 100644
--- a/src/pip/_vendor/pygments/lexers/python.py
+++ b/src/pip/_vendor/pygments/lexers/python.py
@@ -4,7 +4,7 @@
Lexers for Python and related languages.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -27,7 +27,7 @@
class PythonLexer(RegexLexer):
"""
- For `Python `_ source code (version 3.x).
+ For Python source code (version 3.x).
.. versionadded:: 0.10
@@ -37,6 +37,7 @@ class PythonLexer(RegexLexer):
"""
name = 'Python'
+ url = 'http://www.python.org'
aliases = ['python', 'py', 'sage', 'python3', 'py3']
filenames = [
'*.py',
@@ -61,8 +62,6 @@ class PythonLexer(RegexLexer):
mimetypes = ['text/x-python', 'application/x-python',
'text/x-python3', 'application/x-python3']
- flags = re.MULTILINE | re.UNICODE
-
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
def innerstring_rules(ttype):
@@ -214,7 +213,7 @@ def fstring_rules(ttype):
(r'(^[ \t]*)' # at beginning of line + possible indentation
r'(match|case)\b' # a possible keyword
r'(?![ \t]*(?:' # not followed by...
- r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
+ r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
bygroups(Text, Keyword), 'soft-keywords-inner'),
],
@@ -259,7 +258,8 @@ def fstring_rules(ttype):
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
'PermissionError', 'ProcessLookupError', 'TimeoutError',
# others new in Python 3
- 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError'),
+ 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
+ 'EncodingWarning'),
prefix=r'(?`_ source code.
+ For Python 2.x source code.
.. versionchanged:: 2.5
This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
@@ -412,6 +412,7 @@ class Python2Lexer(RegexLexer):
"""
name = 'Python 2.x'
+ url = 'http://www.python.org'
aliases = ['python2', 'py2']
filenames = [] # now taken over by PythonLexer (3.x)
mimetypes = ['text/x-python2', 'application/x-python2']
@@ -816,12 +817,13 @@ class Python2TracebackLexer(RegexLexer):
class CythonLexer(RegexLexer):
"""
- For Pyrex and `Cython `_ source code.
+ For Pyrex and Cython source code.
.. versionadded:: 1.1
"""
name = 'Cython'
+ url = 'http://cython.org'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
@@ -995,7 +997,7 @@ class CythonLexer(RegexLexer):
class DgLexer(RegexLexer):
"""
- Lexer for `dg `_,
+ Lexer for dg,
a functional and object-oriented programming language
running on the CPython 3 VM.
@@ -1100,6 +1102,7 @@ class NumPyLexer(PythonLexer):
"""
name = 'NumPy'
+ url = 'https://numpy.org/'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
diff --git a/src/pip/_vendor/pygments/modeline.py b/src/pip/_vendor/pygments/modeline.py
index 047d86d6be6..43630835ca6 100644
--- a/src/pip/_vendor/pygments/modeline.py
+++ b/src/pip/_vendor/pygments/modeline.py
@@ -4,7 +4,7 @@
A simple modeline parser (based on pymodeline).
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/plugin.py b/src/pip/_vendor/pygments/plugin.py
index 958ca21a3e2..a0431bf720e 100644
--- a/src/pip/_vendor/pygments/plugin.py
+++ b/src/pip/_vendor/pygments/plugin.py
@@ -31,7 +31,7 @@
yourfilter = yourfilter:YourFilter
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
diff --git a/src/pip/_vendor/pygments/regexopt.py b/src/pip/_vendor/pygments/regexopt.py
index cb2c8e21a9e..ae0079199b9 100644
--- a/src/pip/_vendor/pygments/regexopt.py
+++ b/src/pip/_vendor/pygments/regexopt.py
@@ -5,7 +5,7 @@
An algorithm that generates optimized regexes for matching long lists of
literal strings.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/scanner.py b/src/pip/_vendor/pygments/scanner.py
index 5f32a22c3c0..d47ed4828a0 100644
--- a/src/pip/_vendor/pygments/scanner.py
+++ b/src/pip/_vendor/pygments/scanner.py
@@ -11,7 +11,7 @@
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
@@ -72,7 +72,7 @@ def test(self, pattern):
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
- and related fields. The return value is a boolen that
+ and related fields. The return value is a boolean that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
diff --git a/src/pip/_vendor/pygments/sphinxext.py b/src/pip/_vendor/pygments/sphinxext.py
index 2412dee0ac3..c41bd49dd45 100644
--- a/src/pip/_vendor/pygments/sphinxext.py
+++ b/src/pip/_vendor/pygments/sphinxext.py
@@ -5,7 +5,7 @@
Sphinx extension to generate automatic documentation of lexers,
formatters and filters.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/style.py b/src/pip/_vendor/pygments/style.py
index 6b7469c4fc5..84abbc20599 100644
--- a/src/pip/_vendor/pygments/style.py
+++ b/src/pip/_vendor/pygments/style.py
@@ -4,7 +4,7 @@
Basic style object.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/styles/__init__.py b/src/pip/_vendor/pygments/styles/__init__.py
index e437d170ed7..951ca1794db 100644
--- a/src/pip/_vendor/pygments/styles/__init__.py
+++ b/src/pip/_vendor/pygments/styles/__init__.py
@@ -4,7 +4,7 @@
Contains built-in styles.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/token.py b/src/pip/_vendor/pygments/token.py
index 9013acb709c..8aee88a8350 100644
--- a/src/pip/_vendor/pygments/token.py
+++ b/src/pip/_vendor/pygments/token.py
@@ -4,7 +4,7 @@
Basic token types and the standard tokens.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/src/pip/_vendor/pygments/unistring.py b/src/pip/_vendor/pygments/unistring.py
index 2872985c14e..2e3c80869d9 100644
--- a/src/pip/_vendor/pygments/unistring.py
+++ b/src/pip/_vendor/pygments/unistring.py
@@ -7,7 +7,7 @@
Inspired by chartypes_create.py from the MoinMoin project.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -122,7 +122,7 @@ def _handle_runs(char_list): # pragma: no cover
c = chr(code)
cat = unicodedata.category(c)
if ord(c) == 0xdc00:
- # Hack to avoid combining this combining with the preceeding high
+ # Hack to avoid combining this combining with the preceding high
# surrogate, 0xdbff, when doing a repr.
c = '\\' + c
elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
diff --git a/src/pip/_vendor/pygments/util.py b/src/pip/_vendor/pygments/util.py
index 5d6ddc3f5bc..8032962dc99 100644
--- a/src/pip/_vendor/pygments/util.py
+++ b/src/pip/_vendor/pygments/util.py
@@ -4,7 +4,7 @@
Utility functions.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -23,7 +23,7 @@
[^>]*>
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?',
- re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE)
+ re.IGNORECASE | re.DOTALL | re.MULTILINE)
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
diff --git a/src/pip/_vendor/pyparsing/__init__.py b/src/pip/_vendor/pyparsing/__init__.py
index 24ba1b9b087..75372500ed9 100644
--- a/src/pip/_vendor/pyparsing/__init__.py
+++ b/src/pip/_vendor/pyparsing/__init__.py
@@ -128,8 +128,8 @@ def __repr__(self):
)
-__version_info__ = version_info(3, 0, 8, "final", 0)
-__version_time__ = "09 Apr 2022 23:29 UTC"
+__version_info__ = version_info(3, 0, 9, "final", 0)
+__version_time__ = "05 May 2022 07:02 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire "
diff --git a/src/pip/_vendor/pyparsing/actions.py b/src/pip/_vendor/pyparsing/actions.py
index 2bcc5502b07..f72c66e7431 100644
--- a/src/pip/_vendor/pyparsing/actions.py
+++ b/src/pip/_vendor/pyparsing/actions.py
@@ -55,7 +55,7 @@ def replace_with(repl_str):
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
term = na | num
- OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
+ term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [repl_str]
diff --git a/src/pip/_vendor/pyparsing/core.py b/src/pip/_vendor/pyparsing/core.py
index 5afbfd0bdc6..6ff3c766f7d 100644
--- a/src/pip/_vendor/pyparsing/core.py
+++ b/src/pip/_vendor/pyparsing/core.py
@@ -2,9 +2,8 @@
# core.py
#
import os
+import typing
from typing import (
- Optional as OptionalType,
- Iterable as IterableType,
NamedTuple,
Union,
Callable,
@@ -14,7 +13,6 @@
List,
TextIO,
Set,
- Dict as DictType,
Sequence,
)
from abc import ABC, abstractmethod
@@ -192,7 +190,7 @@ def enable_all_warnings() -> None:
def _should_enable_warnings(
- cmd_line_warn_options: IterableType[str], warn_env_var: OptionalType[str]
+ cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
@@ -404,7 +402,7 @@ class ParserElement(ABC):
DEFAULT_WHITE_CHARS: str = " \n\t\r"
verbose_stacktrace: bool = False
- _literalStringClass: OptionalType[type] = None
+ _literalStringClass: typing.Optional[type] = None
@staticmethod
def set_default_whitespace_chars(chars: str) -> None:
@@ -414,11 +412,11 @@ def set_default_whitespace_chars(chars: str) -> None:
Example::
# default whitespace chars are space, and newline
- OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+ Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.set_default_whitespace_chars(" \t")
- OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def']
+ Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@@ -450,13 +448,13 @@ def inline_literals_using(cls: type) -> None:
ParserElement._literalStringClass = cls
class DebugActions(NamedTuple):
- debug_try: OptionalType[DebugStartAction]
- debug_match: OptionalType[DebugSuccessAction]
- debug_fail: OptionalType[DebugExceptionAction]
+ debug_try: typing.Optional[DebugStartAction]
+ debug_match: typing.Optional[DebugSuccessAction]
+ debug_fail: typing.Optional[DebugExceptionAction]
def __init__(self, savelist: bool = False):
self.parseAction: List[ParseAction] = list()
- self.failAction: OptionalType[ParseFailAction] = None
+ self.failAction: typing.Optional[ParseFailAction] = None
self.customName = None
self._defaultName = None
self.resultsName = None
@@ -510,7 +508,7 @@ def copy(self) -> "ParserElement":
integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
- print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M"))
+ print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
prints::
@@ -895,7 +893,7 @@ def can_parse_next(self, instring: str, loc: int) -> bool:
# cache for left-recursion in Forward references
recursion_lock = RLock()
- recursion_memos: DictType[
+ recursion_memos: typing.Dict[
Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
] = {}
@@ -985,7 +983,7 @@ def disable_memoization() -> None:
@staticmethod
def enable_left_recursion(
- cache_size_limit: OptionalType[int] = None, *, force=False
+ cache_size_limit: typing.Optional[int] = None, *, force=False
) -> None:
"""
Enables "bounded recursion" parsing, which allows for both direct and indirect
@@ -1738,7 +1736,7 @@ def ignore(self, other: "ParserElement") -> "ParserElement":
Example::
- patt = OneOrMore(Word(alphas))
+ patt = Word(alphas)[1, ...]
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj']
@@ -1798,7 +1796,7 @@ def set_debug(self, flag: bool = True) -> "ParserElement":
# turn on debugging for wd
wd.set_debug()
- OneOrMore(term).parse_string("abc 123 xyz 890")
+ term[1, ...].parse_string("abc 123 xyz 890")
prints::
@@ -1953,12 +1951,12 @@ def run_tests(
self,
tests: Union[str, List[str]],
parse_all: bool = True,
- comment: OptionalType[Union["ParserElement", str]] = "#",
+ comment: typing.Optional[Union["ParserElement", str]] = "#",
full_dump: bool = True,
print_results: bool = True,
failure_tests: bool = False,
post_parse: Callable[[str, ParseResults], str] = None,
- file: OptionalType[TextIO] = None,
+ file: typing.Optional[TextIO] = None,
with_line_numbers: bool = False,
*,
parseAll: bool = True,
@@ -2385,11 +2383,11 @@ class Keyword(Token):
def __init__(
self,
match_string: str = "",
- ident_chars: OptionalType[str] = None,
+ ident_chars: typing.Optional[str] = None,
caseless: bool = False,
*,
matchString: str = "",
- identChars: OptionalType[str] = None,
+ identChars: typing.Optional[str] = None,
):
super().__init__()
identChars = identChars or ident_chars
@@ -2479,7 +2477,7 @@ class CaselessLiteral(Literal):
Example::
- OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10")
+ CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
@@ -2504,7 +2502,7 @@ class CaselessKeyword(Keyword):
Example::
- OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10")
+ CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
@@ -2513,10 +2511,10 @@ class CaselessKeyword(Keyword):
def __init__(
self,
match_string: str = "",
- ident_chars: OptionalType[str] = None,
+ ident_chars: typing.Optional[str] = None,
*,
matchString: str = "",
- identChars: OptionalType[str] = None,
+ identChars: typing.Optional[str] = None,
):
identChars = identChars or ident_chars
match_string = matchString or match_string
@@ -2680,17 +2678,17 @@ class Word(Token):
def __init__(
self,
init_chars: str = "",
- body_chars: OptionalType[str] = None,
+ body_chars: typing.Optional[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
- exclude_chars: OptionalType[str] = None,
+ exclude_chars: typing.Optional[str] = None,
*,
- initChars: OptionalType[str] = None,
- bodyChars: OptionalType[str] = None,
+ initChars: typing.Optional[str] = None,
+ bodyChars: typing.Optional[str] = None,
asKeyword: bool = False,
- excludeChars: OptionalType[str] = None,
+ excludeChars: typing.Optional[str] = None,
):
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
@@ -2872,10 +2870,10 @@ def __init__(
self,
charset: str,
as_keyword: bool = False,
- exclude_chars: OptionalType[str] = None,
+ exclude_chars: typing.Optional[str] = None,
*,
asKeyword: bool = False,
- excludeChars: OptionalType[str] = None,
+ excludeChars: typing.Optional[str] = None,
):
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
@@ -3088,18 +3086,18 @@ class QuotedString(Token):
def __init__(
self,
quote_char: str = "",
- esc_char: OptionalType[str] = None,
- esc_quote: OptionalType[str] = None,
+ esc_char: typing.Optional[str] = None,
+ esc_quote: typing.Optional[str] = None,
multiline: bool = False,
unquote_results: bool = True,
- end_quote_char: OptionalType[str] = None,
+ end_quote_char: typing.Optional[str] = None,
convert_whitespace_escapes: bool = True,
*,
quoteChar: str = "",
- escChar: OptionalType[str] = None,
- escQuote: OptionalType[str] = None,
+ escChar: typing.Optional[str] = None,
+ escQuote: typing.Optional[str] = None,
unquoteResults: bool = True,
- endQuoteChar: OptionalType[str] = None,
+ endQuoteChar: typing.Optional[str] = None,
convertWhitespaceEscapes: bool = True,
):
super().__init__()
@@ -3600,7 +3598,7 @@ class ParseExpression(ParserElement):
post-processing parsed tokens.
"""
- def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(savelist)
self.exprs: List[ParserElement]
if isinstance(exprs, _generatorType):
@@ -3767,7 +3765,7 @@ class And(ParseExpression):
Example::
integer = Word(nums)
- name_expr = OneOrMore(Word(alphas))
+ name_expr = Word(alphas)[1, ...]
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
@@ -3782,7 +3780,9 @@ def __init__(self, *args, **kwargs):
def _generateDefaultName(self):
return "-"
- def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True):
+ def __init__(
+ self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
+ ):
exprs: List[ParserElement] = list(exprs_arg)
if exprs and Ellipsis in exprs:
tmp = []
@@ -3926,7 +3926,7 @@ class Or(ParseExpression):
[['123'], ['3.1416'], ['789']]
"""
- def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
@@ -4081,7 +4081,7 @@ class MatchFirst(ParseExpression):
print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
- def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
@@ -4232,7 +4232,7 @@ class Each(ParseExpression):
- size: 20
"""
- def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True):
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
@@ -4568,7 +4568,7 @@ class FollowedBy(ParseElementEnhance):
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
+ attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
@@ -4619,7 +4619,7 @@ class PrecededBy(ParseElementEnhance):
"""
def __init__(
- self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None
+ self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
):
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
@@ -4730,7 +4730,7 @@ class NotAny(ParseElementEnhance):
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infix_notation
- boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
+ boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
@@ -4758,9 +4758,9 @@ class _MultipleMatch(ParseElementEnhance):
def __init__(
self,
expr: ParserElement,
- stop_on: OptionalType[Union[ParserElement, str]] = None,
+ stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
- stopOn: OptionalType[Union[ParserElement, str]] = None,
+ stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr)
stopOn = stopOn or stop_on
@@ -4849,7 +4849,7 @@ class OneOrMore(_MultipleMatch):
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
- OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+ attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stop_on attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
@@ -4879,9 +4879,9 @@ class ZeroOrMore(_MultipleMatch):
def __init__(
self,
expr: ParserElement,
- stop_on: OptionalType[Union[ParserElement, str]] = None,
+ stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
- stopOn: OptionalType[Union[ParserElement, str]] = None,
+ stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr, stopOn=stopOn or stop_on)
self.mayReturnEmpty = True
@@ -5046,7 +5046,7 @@ def __init__(
other: Union[ParserElement, str],
include: bool = False,
ignore: bool = None,
- fail_on: OptionalType[Union[ParserElement, str]] = None,
+ fail_on: typing.Optional[Union[ParserElement, str]] = None,
*,
failOn: Union[ParserElement, str] = None,
):
@@ -5143,7 +5143,7 @@ class Forward(ParseElementEnhance):
parser created using ``Forward``.
"""
- def __init__(self, other: OptionalType[Union[ParserElement, str]] = None):
+ def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
self.caller_frame = traceback.extract_stack(limit=2)[0]
super().__init__(other, savelist=False)
self.lshift_line = None
@@ -5395,7 +5395,7 @@ def __init__(
join_string: str = "",
adjacent: bool = True,
*,
- joinString: OptionalType[str] = None,
+ joinString: typing.Optional[str] = None,
):
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
@@ -5482,10 +5482,10 @@ class Dict(TokenConverter):
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
# print attributes as plain groups
- print(OneOrMore(attr_expr).parse_string(text).dump())
+ print(attr_expr[1, ...].parse_string(text).dump())
- # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
- result = Dict(OneOrMore(Group(attr_expr))).parse_string(text)
+ # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
+ result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
print(result.dump())
# access named fields as dict entries, or output as dict
@@ -5558,12 +5558,12 @@ class Suppress(TokenConverter):
source = "a, b, c,d"
wd = Word(alphas)
- wd_list1 = wd + ZeroOrMore(',' + wd)
+ wd_list1 = wd + (',' + wd)[...]
print(wd_list1.parse_string(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+ wd_list2 = wd + (Suppress(',') + wd)[...]
print(wd_list2.parse_string(source))
# Skipped text (using '...') can be suppressed as well
@@ -5622,7 +5622,7 @@ def trace_parse_action(f: ParseAction) -> ParseAction:
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
- wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars)
+ wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
prints::
@@ -5728,18 +5728,18 @@ def token_map(func, *args) -> ParseAction:
Example (compare the last to example in :class:`ParserElement.transform_string`::
- hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16))
+ hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
hex_ints.run_tests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).set_parse_action(token_map(str.upper))
- OneOrMore(upperword).run_tests('''
+ upperword[1, ...].run_tests('''
my kingdom for a horse
''')
wd = Word(alphas).set_parse_action(token_map(str.title))
- OneOrMore(wd).set_parse_action(' '.join).run_tests('''
+ wd[1, ...].set_parse_action(' '.join).run_tests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
@@ -5795,7 +5795,9 @@ def autoname_elements() -> None:
# build list of built-in expressions, for future reference if a global default value
# gets updated
-_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
+_builtin_exprs: List[ParserElement] = [
+ v for v in vars().values() if isinstance(v, ParserElement)
+]
# backward compatibility names
tokenMap = token_map
diff --git a/src/pip/_vendor/pyparsing/diagram/__init__.py b/src/pip/_vendor/pyparsing/diagram/__init__.py
index 93c8c081e0f..1506d66bf4e 100644
--- a/src/pip/_vendor/pyparsing/diagram/__init__.py
+++ b/src/pip/_vendor/pyparsing/diagram/__init__.py
@@ -1,9 +1,8 @@
import railroad
from pip._vendor import pyparsing
-from pip._vendor.pkg_resources import resource_filename
+import typing
from typing import (
List,
- Optional,
NamedTuple,
Generic,
TypeVar,
@@ -17,13 +16,41 @@
import inspect
-with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp:
- template = Template(fp.read())
+jinja2_template_source = """\
+
+
+
+ {% if not head %}
+
+ {% else %}
+ {{ head | safe }}
+ {% endif %}
+
+
+{{ body | safe }}
+{% for diagram in diagrams %}
+
+
{{ diagram.title }}
+
{{ diagram.text }}
+
+ {{ diagram.svg }}
+
+
+{% endfor %}
+
+
+"""
+
+template = Template(jinja2_template_source)
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
NamedDiagram = NamedTuple(
"NamedDiagram",
- [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)],
+ [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
)
"""
A simple structure for associating a name with a railroad diagram
@@ -107,6 +134,8 @@ def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
"""
data = []
for diagram in diagrams:
+ if diagram.diagram is None:
+ continue
io = StringIO()
diagram.diagram.writeSvg(io.write)
title = diagram.name
@@ -135,7 +164,7 @@ def resolve_partial(partial: "EditablePartial[T]") -> T:
def to_railroad(
element: pyparsing.ParserElement,
- diagram_kwargs: Optional[dict] = None,
+ diagram_kwargs: typing.Optional[dict] = None,
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
@@ -216,12 +245,12 @@ def __init__(
parent: EditablePartial,
number: int,
name: str = None,
- parent_index: Optional[int] = None,
+ parent_index: typing.Optional[int] = None,
):
#: The pyparsing element that this represents
self.element: pyparsing.ParserElement = element
#: The name of the element
- self.name: str = name
+ self.name: typing.Optional[str] = name
#: The output Railroad element in an unconverted state
self.converted: EditablePartial = converted
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
@@ -229,7 +258,7 @@ def __init__(
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
self.number: int = number
#: The index of this inside its parent
- self.parent_index: Optional[int] = parent_index
+ self.parent_index: typing.Optional[int] = parent_index
#: If true, we should extract this out into a subdiagram
self.extract: bool = False
#: If true, all of this element's children have been filled out
@@ -270,7 +299,7 @@ class ConverterState:
Stores some state that persists between recursions into the element tree
"""
- def __init__(self, diagram_kwargs: Optional[dict] = None):
+ def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
#: A dictionary mapping ParserElements to state relating to them
self._element_diagram_states: Dict[int, ElementState] = {}
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
@@ -361,14 +390,14 @@ def _apply_diagram_item_enhancements(fn):
def _inner(
element: pyparsing.ParserElement,
- parent: Optional[EditablePartial],
+ parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
- ) -> Optional[EditablePartial]:
+ ) -> typing.Optional[EditablePartial]:
ret = fn(
element,
@@ -412,14 +441,14 @@ def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
@_apply_diagram_item_enhancements
def _to_diagram_element(
element: pyparsing.ParserElement,
- parent: Optional[EditablePartial],
+ parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
-) -> Optional[EditablePartial]:
+) -> typing.Optional[EditablePartial]:
"""
Recursively converts a PyParsing Element to a railroad Element
:param lookup: The shared converter state that keeps track of useful things
@@ -526,7 +555,9 @@ def _to_diagram_element(
else:
ret = EditablePartial.from_call(railroad.Group, label="", item="")
elif isinstance(element, pyparsing.TokenConverter):
- ret = EditablePartial.from_call(AnnotatedItem, label=type(element).__name__.lower(), item="")
+ ret = EditablePartial.from_call(
+ AnnotatedItem, label=type(element).__name__.lower(), item=""
+ )
elif isinstance(element, pyparsing.Opt):
ret = EditablePartial.from_call(railroad.Optional, item="")
elif isinstance(element, pyparsing.OneOrMore):
diff --git a/src/pip/_vendor/pyparsing/diagram/template.jinja2 b/src/pip/_vendor/pyparsing/diagram/template.jinja2
deleted file mode 100644
index d2219fb0115..00000000000
--- a/src/pip/_vendor/pyparsing/diagram/template.jinja2
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
- {% if not head %}
-
- {% else %}
- {{ hear | safe }}
- {% endif %}
-
-
-{{ body | safe }}
-{% for diagram in diagrams %}
-
-
{{ diagram.title }}
-
{{ diagram.text }}
-
- {{ diagram.svg }}
-
-
-{% endfor %}
-
-
diff --git a/src/pip/_vendor/pyparsing/exceptions.py b/src/pip/_vendor/pyparsing/exceptions.py
index e06513eb00f..a38447bb05b 100644
--- a/src/pip/_vendor/pyparsing/exceptions.py
+++ b/src/pip/_vendor/pyparsing/exceptions.py
@@ -2,7 +2,7 @@
import re
import sys
-from typing import Optional
+import typing
from .util import col, line, lineno, _collapse_string_to_ranges
from .unicode import pyparsing_unicode as ppu
@@ -25,7 +25,7 @@ def __init__(
self,
pstr: str,
loc: int = 0,
- msg: Optional[str] = None,
+ msg: typing.Optional[str] = None,
elem=None,
):
self.loc = loc
diff --git a/src/pip/_vendor/pyparsing/helpers.py b/src/pip/_vendor/pyparsing/helpers.py
index be8a3657884..9588b3b7801 100644
--- a/src/pip/_vendor/pyparsing/helpers.py
+++ b/src/pip/_vendor/pyparsing/helpers.py
@@ -1,6 +1,7 @@
# helpers.py
import html.entities
import re
+import typing
from . import __diag__
from .core import *
@@ -14,8 +15,8 @@ def delimited_list(
expr: Union[str, ParserElement],
delim: Union[str, ParserElement] = ",",
combine: bool = False,
- min: OptionalType[int] = None,
- max: OptionalType[int] = None,
+ min: typing.Optional[int] = None,
+ max: typing.Optional[int] = None,
*,
allow_trailing_delim: bool = False,
) -> ParserElement:
@@ -69,9 +70,9 @@ def delimited_list(
def counted_array(
expr: ParserElement,
- int_expr: OptionalType[ParserElement] = None,
+ int_expr: typing.Optional[ParserElement] = None,
*,
- intExpr: OptionalType[ParserElement] = None,
+ intExpr: typing.Optional[ParserElement] = None,
) -> ParserElement:
"""Helper to define a counted list of expressions.
@@ -197,7 +198,7 @@ def must_match_these_tokens(s, l, t):
def one_of(
- strs: Union[IterableType[str], str],
+ strs: Union[typing.Iterable[str], str],
caseless: bool = False,
use_regex: bool = True,
as_keyword: bool = False,
@@ -337,7 +338,7 @@ def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- print(OneOrMore(attr_expr).parse_string(text).dump())
+ print(attr_expr[1, ...].parse_string(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
@@ -461,7 +462,7 @@ def locatedExpr(expr: ParserElement) -> ParserElement:
def nested_expr(
opener: Union[str, ParserElement] = "(",
closer: Union[str, ParserElement] = ")",
- content: OptionalType[ParserElement] = None,
+ content: typing.Optional[ParserElement] = None,
ignore_expr: ParserElement = quoted_string(),
*,
ignoreExpr: ParserElement = quoted_string(),
@@ -682,6 +683,8 @@ def make_xml_tags(
return _makeTags(tag_str, True)
+any_open_tag: ParserElement
+any_close_tag: ParserElement
any_open_tag, any_close_tag = make_html_tags(
Word(alphas, alphanums + "_:").set_name("any tag")
)
@@ -710,7 +713,7 @@ class OpAssoc(Enum):
InfixNotationOperatorArgType,
int,
OpAssoc,
- OptionalType[ParseAction],
+ typing.Optional[ParseAction],
],
Tuple[
InfixNotationOperatorArgType,
@@ -840,7 +843,7 @@ def parseImpl(self, instring, loc, doActions=True):
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
raise ValueError("operator must indicate right or left associativity")
- thisExpr = Forward().set_name(term_name)
+ thisExpr: Forward = Forward().set_name(term_name)
if rightLeftAssoc is OpAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
@@ -945,7 +948,7 @@ def eggs(z):
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
- module_body = OneOrMore(stmt)
+ module_body = stmt[1, ...]
parseTree = module_body.parseString(data)
parseTree.pprint()
@@ -1055,7 +1058,9 @@ def checkUnindent(s, l, t):
# build list of built-in expressions, for future reference if a global default value
# gets updated
-_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
+_builtin_exprs: List[ParserElement] = [
+ v for v in vars().values() if isinstance(v, ParserElement)
+]
# pre-PEP8 compatible names
diff --git a/src/pip/_vendor/pyparsing/results.py b/src/pip/_vendor/pyparsing/results.py
index bb444df4e5b..00c9421d3b0 100644
--- a/src/pip/_vendor/pyparsing/results.py
+++ b/src/pip/_vendor/pyparsing/results.py
@@ -287,7 +287,7 @@ def remove_first(tokens):
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
label = Word(alphas)
- patt = label("LABEL") + OneOrMore(Word(nums))
+ patt = label("LABEL") + Word(nums)[1, ...]
print(patt.parse_string("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
@@ -394,7 +394,7 @@ def extend(self, itemseq):
Example::
- patt = OneOrMore(Word(alphas))
+ patt = Word(alphas)[1, ...]
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
@@ -487,7 +487,7 @@ def as_list(self) -> list:
Example::
- patt = OneOrMore(Word(alphas))
+ patt = Word(alphas)[1, ...]
result = patt.parse_string("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj']
@@ -554,7 +554,7 @@ def get_name(self):
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
- user_info = OneOrMore(user_data)
+ user_info = user_data[1, ...]
result = user_info.parse_string("22 111-22-3333 #221B")
for item in result:
diff --git a/src/pip/_vendor/pyparsing/testing.py b/src/pip/_vendor/pyparsing/testing.py
index 991972f3fb2..84a0ef17078 100644
--- a/src/pip/_vendor/pyparsing/testing.py
+++ b/src/pip/_vendor/pyparsing/testing.py
@@ -1,7 +1,7 @@
# testing.py
from contextlib import contextmanager
-from typing import Optional
+import typing
from .core import (
ParserElement,
@@ -237,12 +237,12 @@ def assertRaisesParseException(self, exc_type=ParseException, msg=None):
@staticmethod
def with_line_numbers(
s: str,
- start_line: Optional[int] = None,
- end_line: Optional[int] = None,
+ start_line: typing.Optional[int] = None,
+ end_line: typing.Optional[int] = None,
expand_tabs: bool = True,
eol_mark: str = "|",
- mark_spaces: Optional[str] = None,
- mark_control: Optional[str] = None,
+ mark_spaces: typing.Optional[str] = None,
+ mark_control: typing.Optional[str] = None,
) -> str:
"""
Helpful method for debugging a parser - prints a string with line and column numbers.
diff --git a/src/pip/_vendor/pyparsing/unicode.py b/src/pip/_vendor/pyparsing/unicode.py
index 92261487c7a..06526203911 100644
--- a/src/pip/_vendor/pyparsing/unicode.py
+++ b/src/pip/_vendor/pyparsing/unicode.py
@@ -120,7 +120,18 @@ class pyparsing_unicode(unicode_set):
A namespace class for defining common language unicode_sets.
"""
- _ranges: UnicodeRangeList = [(32, sys.maxunicode)]
+ # fmt: off
+
+ # define ranges in language character sets
+ _ranges: UnicodeRangeList = [
+ (0x0020, sys.maxunicode),
+ ]
+
+ class BasicMultilingualPlane(unicode_set):
+ "Unicode set for the Basic Multilingual Plane"
+ _ranges: UnicodeRangeList = [
+ (0x0020, 0xFFFF),
+ ]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
@@ -278,11 +289,13 @@ class Hangul(unicode_set):
class CJK(Chinese, Japanese, Hangul):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
- pass
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
- _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)]
+ _ranges: UnicodeRangeList = [
+ (0x0E01, 0x0E3A),
+ (0x0E3F, 0x0E5B)
+ ]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
@@ -308,7 +321,12 @@ class Hebrew(unicode_set):
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
- _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)]
+ _ranges: UnicodeRangeList = [
+ (0x0900, 0x097F),
+ (0xA8E0, 0xA8FF)
+ ]
+
+ # fmt: on
pyparsing_unicode.Japanese._ranges = (
@@ -317,7 +335,9 @@ class Devanagari(unicode_set):
+ pyparsing_unicode.Japanese.Katakana._ranges
)
-# define ranges in language character sets
+pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
+
+# add language identifiers using language Unicode
pyparsing_unicode.العربية = pyparsing_unicode.Arabic
pyparsing_unicode.中文 = pyparsing_unicode.Chinese
pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
diff --git a/src/pip/_vendor/requests/__init__.py b/src/pip/_vendor/requests/__init__.py
index 75a633bf9dc..9e97059d1db 100644
--- a/src/pip/_vendor/requests/__init__.py
+++ b/src/pip/_vendor/requests/__init__.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
@@ -40,8 +38,10 @@
:license: Apache 2.0, see LICENSE for more details.
"""
-from pip._vendor import urllib3
import warnings
+
+from pip._vendor import urllib3
+
from .exceptions import RequestsDependencyWarning
charset_normalizer_version = None
@@ -51,13 +51,14 @@
except ImportError:
chardet_version = None
+
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
- urllib3_version = urllib3_version.split('.')
- assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
+ urllib3_version = urllib3_version.split(".")
+ assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
- urllib3_version.append('0')
+ urllib3_version.append("0")
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
@@ -69,36 +70,46 @@ def check_compatibility(urllib3_version, chardet_version, charset_normalizer_ver
# Check charset_normalizer for compatibility.
if chardet_version:
- major, minor, patch = chardet_version.split('.')[:3]
+ major, minor, patch = chardet_version.split(".")[:3]
major, minor, patch = int(major), int(minor), int(patch)
- # chardet_version >= 3.0.2, < 5.0.0
- assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
+ # chardet_version >= 3.0.2, < 6.0.0
+ assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
elif charset_normalizer_version:
- major, minor, patch = charset_normalizer_version.split('.')[:3]
+ major, minor, patch = charset_normalizer_version.split(".")[:3]
major, minor, patch = int(major), int(minor), int(patch)
# charset_normalizer >= 2.0.0 < 3.0.0
assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0)
else:
raise Exception("You need either charset_normalizer or chardet installed")
+
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
- cryptography_version = list(map(int, cryptography_version.split('.')))
+ cryptography_version = list(map(int, cryptography_version.split(".")))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
- warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
+ warning = "Old version of cryptography ({}) may cause slowdown.".format(
+ cryptography_version
+ )
warnings.warn(warning, RequestsDependencyWarning)
+
# Check imported dependencies for compatibility.
try:
- check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version)
+ check_compatibility(
+ urllib3.__version__, chardet_version, charset_normalizer_version
+ )
except (AssertionError, ValueError):
- warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
- "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version),
- RequestsDependencyWarning)
+ warnings.warn(
+ "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
+ "version!".format(
+ urllib3.__version__, chardet_version, charset_normalizer_version
+ ),
+ RequestsDependencyWarning,
+ )
# Attempt to enable urllib3's fallback for SNI support
# if the standard library doesn't support SNI or the
@@ -116,39 +127,56 @@ def _check_cryptography(cryptography_version):
if not getattr(ssl, "HAS_SNI", False):
from pip._vendor.urllib3.contrib import pyopenssl
+
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
+
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from pip._vendor.urllib3.exceptions import DependencyWarning
-warnings.simplefilter('ignore', DependencyWarning)
-from .__version__ import __title__, __description__, __url__, __version__
-from .__version__ import __build__, __author__, __author_email__, __license__
-from .__version__ import __copyright__, __cake__
-
-from . import utils
-from . import packages
-from .models import Request, Response, PreparedRequest
-from .api import request, get, head, post, patch, put, delete, options
-from .sessions import session, Session
-from .status_codes import codes
-from .exceptions import (
- RequestException, Timeout, URLRequired,
- TooManyRedirects, HTTPError, ConnectionError,
- FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError
-)
+warnings.simplefilter("ignore", DependencyWarning)
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
+from . import packages, utils
+from .__version__ import (
+ __author__,
+ __author_email__,
+ __build__,
+ __cake__,
+ __copyright__,
+ __description__,
+ __license__,
+ __title__,
+ __url__,
+ __version__,
+)
+from .api import delete, get, head, options, patch, post, put, request
+from .exceptions import (
+ ConnectionError,
+ ConnectTimeout,
+ FileModeWarning,
+ HTTPError,
+ JSONDecodeError,
+ ReadTimeout,
+ RequestException,
+ Timeout,
+ TooManyRedirects,
+ URLRequired,
+)
+from .models import PreparedRequest, Request, Response
+from .sessions import Session, session
+from .status_codes import codes
+
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
-warnings.simplefilter('default', FileModeWarning, append=True)
+warnings.simplefilter("default", FileModeWarning, append=True)
diff --git a/src/pip/_vendor/requests/__version__.py b/src/pip/_vendor/requests/__version__.py
index e973b03b5ff..e725ada6550 100644
--- a/src/pip/_vendor/requests/__version__.py
+++ b/src/pip/_vendor/requests/__version__.py
@@ -2,13 +2,13 @@
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
-__title__ = 'requests'
-__description__ = 'Python HTTP for Humans.'
-__url__ = 'https://requests.readthedocs.io'
-__version__ = '2.27.1'
-__build__ = 0x022701
-__author__ = 'Kenneth Reitz'
-__author_email__ = 'me@kennethreitz.org'
-__license__ = 'Apache 2.0'
-__copyright__ = 'Copyright 2022 Kenneth Reitz'
-__cake__ = u'\u2728 \U0001f370 \u2728'
+__title__ = "requests"
+__description__ = "Python HTTP for Humans."
+__url__ = "https://requests.readthedocs.io"
+__version__ = "2.28.1"
+__build__ = 0x022801
+__author__ = "Kenneth Reitz"
+__author_email__ = "me@kennethreitz.org"
+__license__ = "Apache 2.0"
+__copyright__ = "Copyright 2022 Kenneth Reitz"
+__cake__ = "\u2728 \U0001f370 \u2728"
diff --git a/src/pip/_vendor/requests/_internal_utils.py b/src/pip/_vendor/requests/_internal_utils.py
index 759d9a56ba0..7dc9bc53360 100644
--- a/src/pip/_vendor/requests/_internal_utils.py
+++ b/src/pip/_vendor/requests/_internal_utils.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests._internal_utils
~~~~~~~~~~~~~~
@@ -7,11 +5,22 @@
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
+import re
+
+from .compat import builtin_str
+
+_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
+_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
+_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
+_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
-from .compat import is_py2, builtin_str, str
+HEADER_VALIDATORS = {
+ bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE),
+ str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR),
+}
-def to_native_string(string, encoding='ascii'):
+def to_native_string(string, encoding="ascii"):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
@@ -19,10 +28,7 @@ def to_native_string(string, encoding='ascii'):
if isinstance(string, builtin_str):
out = string
else:
- if is_py2:
- out = string.encode(encoding)
- else:
- out = string.decode(encoding)
+ out = string.decode(encoding)
return out
@@ -36,7 +42,7 @@ def unicode_is_ascii(u_string):
"""
assert isinstance(u_string, str)
try:
- u_string.encode('ascii')
+ u_string.encode("ascii")
return True
except UnicodeEncodeError:
return False
diff --git a/src/pip/_vendor/requests/adapters.py b/src/pip/_vendor/requests/adapters.py
index b3dfa570637..f68f7d46753 100644
--- a/src/pip/_vendor/requests/adapters.py
+++ b/src/pip/_vendor/requests/adapters.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
@@ -9,58 +7,76 @@
"""
import os.path
-import socket
+import socket # noqa: F401
-from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
-from pip._vendor.urllib3.response import HTTPResponse
-from pip._vendor.urllib3.util import parse_url
-from pip._vendor.urllib3.util import Timeout as TimeoutSauce
-from pip._vendor.urllib3.util.retry import Retry
-from pip._vendor.urllib3.exceptions import ClosedPoolError
-from pip._vendor.urllib3.exceptions import ConnectTimeoutError
+from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader
-from pip._vendor.urllib3.exceptions import MaxRetryError
-from pip._vendor.urllib3.exceptions import NewConnectionError
+from pip._vendor.urllib3.exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ NewConnectionError,
+ ProtocolError,
+)
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
-from pip._vendor.urllib3.exceptions import ProtocolError
-from pip._vendor.urllib3.exceptions import ReadTimeoutError
+from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
-from pip._vendor.urllib3.exceptions import ResponseError
-from pip._vendor.urllib3.exceptions import LocationValueError
+from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
+from pip._vendor.urllib3.response import HTTPResponse
+from pip._vendor.urllib3.util import Timeout as TimeoutSauce
+from pip._vendor.urllib3.util import parse_url
+from pip._vendor.urllib3.util.retry import Retry
+from .auth import _basic_auth_str
+from .compat import basestring, urlparse
+from .cookies import extract_cookies_to_jar
+from .exceptions import (
+ ConnectionError,
+ ConnectTimeout,
+ InvalidHeader,
+ InvalidProxyURL,
+ InvalidSchema,
+ InvalidURL,
+ ProxyError,
+ ReadTimeout,
+ RetryError,
+ SSLError,
+)
from .models import Response
-from .compat import urlparse, basestring
-from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
- get_encoding_from_headers, prepend_scheme_if_needed,
- get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
-from .cookies import extract_cookies_to_jar
-from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
- ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
- InvalidURL, InvalidHeader)
-from .auth import _basic_auth_str
+from .utils import (
+ DEFAULT_CA_BUNDLE_PATH,
+ extract_zipped_paths,
+ get_auth_from_url,
+ get_encoding_from_headers,
+ prepend_scheme_if_needed,
+ select_proxy,
+ urldefragauth,
+)
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
+
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
+
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
-class BaseAdapter(object):
+class BaseAdapter:
"""The Base Transport Adapter"""
def __init__(self):
- super(BaseAdapter, self).__init__()
+ super().__init__()
- def send(self, request, stream=False, timeout=None, verify=True,
- cert=None, proxies=None):
+ def send(
+ self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
+ ):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest ` being sent.
@@ -108,12 +124,22 @@ class HTTPAdapter(BaseAdapter):
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
- __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
- '_pool_block']
- def __init__(self, pool_connections=DEFAULT_POOLSIZE,
- pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
- pool_block=DEFAULT_POOLBLOCK):
+ __attrs__ = [
+ "max_retries",
+ "config",
+ "_pool_connections",
+ "_pool_maxsize",
+ "_pool_block",
+ ]
+
+ def __init__(
+ self,
+ pool_connections=DEFAULT_POOLSIZE,
+ pool_maxsize=DEFAULT_POOLSIZE,
+ max_retries=DEFAULT_RETRIES,
+ pool_block=DEFAULT_POOLBLOCK,
+ ):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
@@ -121,7 +147,7 @@ def __init__(self, pool_connections=DEFAULT_POOLSIZE,
self.config = {}
self.proxy_manager = {}
- super(HTTPAdapter, self).__init__()
+ super().__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
@@ -141,10 +167,13 @@ def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
- self.init_poolmanager(self._pool_connections, self._pool_maxsize,
- block=self._pool_block)
+ self.init_poolmanager(
+ self._pool_connections, self._pool_maxsize, block=self._pool_block
+ )
- def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
+ def init_poolmanager(
+ self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
+ ):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
@@ -161,8 +190,13 @@ def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool
self._pool_maxsize = maxsize
self._pool_block = block
- self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
- block=block, strict=True, **pool_kwargs)
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ strict=True,
+ **pool_kwargs,
+ )
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
@@ -178,7 +212,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs):
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
- elif proxy.lower().startswith('socks'):
+ elif proxy.lower().startswith("socks"):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
@@ -187,7 +221,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs):
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
- **proxy_kwargs
+ **proxy_kwargs,
)
else:
proxy_headers = self.proxy_headers(proxy)
@@ -197,7 +231,8 @@ def proxy_manager_for(self, proxy, **proxy_kwargs):
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
- **proxy_kwargs)
+ **proxy_kwargs,
+ )
return manager
@@ -213,7 +248,7 @@ def cert_verify(self, conn, url, verify, cert):
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
- if url.lower().startswith('https') and verify:
+ if url.lower().startswith("https") and verify:
cert_loc = None
@@ -225,17 +260,19 @@ def cert_verify(self, conn, url, verify, cert):
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
- raise IOError("Could not find a suitable TLS CA certificate bundle, "
- "invalid path: {}".format(cert_loc))
+ raise OSError(
+ f"Could not find a suitable TLS CA certificate bundle, "
+ f"invalid path: {cert_loc}"
+ )
- conn.cert_reqs = 'CERT_REQUIRED'
+ conn.cert_reqs = "CERT_REQUIRED"
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
- conn.cert_reqs = 'CERT_NONE'
+ conn.cert_reqs = "CERT_NONE"
conn.ca_certs = None
conn.ca_cert_dir = None
@@ -247,11 +284,14 @@ def cert_verify(self, conn, url, verify, cert):
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
- raise IOError("Could not find the TLS certificate file, "
- "invalid path: {}".format(conn.cert_file))
+ raise OSError(
+ f"Could not find the TLS certificate file, "
+ f"invalid path: {conn.cert_file}"
+ )
if conn.key_file and not os.path.exists(conn.key_file):
- raise IOError("Could not find the TLS key file, "
- "invalid path: {}".format(conn.key_file))
+ raise OSError(
+ f"Could not find the TLS key file, invalid path: {conn.key_file}"
+ )
def build_response(self, req, resp):
"""Builds a :class:`Response ` object from a urllib3
@@ -266,10 +306,10 @@ def build_response(self, req, resp):
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
- response.status_code = getattr(resp, 'status', None)
+ response.status_code = getattr(resp, "status", None)
# Make headers case-insensitive.
- response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
+ response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
@@ -277,7 +317,7 @@ def build_response(self, req, resp):
response.reason = response.raw.reason
if isinstance(req.url, bytes):
- response.url = req.url.decode('utf-8')
+ response.url = req.url.decode("utf-8")
else:
response.url = req.url
@@ -302,11 +342,13 @@ def get_connection(self, url, proxies=None):
proxy = select_proxy(url, proxies)
if proxy:
- proxy = prepend_scheme_if_needed(proxy, 'http')
+ proxy = prepend_scheme_if_needed(proxy, "http")
proxy_url = parse_url(proxy)
if not proxy_url.host:
- raise InvalidProxyURL("Please check proxy URL. It is malformed"
- " and could be missing the host.")
+ raise InvalidProxyURL(
+ "Please check proxy URL. It is malformed "
+ "and could be missing the host."
+ )
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
@@ -344,11 +386,11 @@ def request_url(self, request, proxies):
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
- is_proxied_http_request = (proxy and scheme != 'https')
+ is_proxied_http_request = proxy and scheme != "https"
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
- using_socks_proxy = proxy_scheme.startswith('socks')
+ using_socks_proxy = proxy_scheme.startswith("socks")
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
@@ -387,12 +429,13 @@ def proxy_headers(self, proxy):
username, password = get_auth_from_url(proxy)
if username:
- headers['Proxy-Authorization'] = _basic_auth_str(username,
- password)
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
return headers
- def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ def send(
+ self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
+ ):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest ` being sent.
@@ -416,20 +459,26 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
- self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
+ self.add_headers(
+ request,
+ stream=stream,
+ timeout=timeout,
+ verify=verify,
+ cert=cert,
+ proxies=proxies,
+ )
- chunked = not (request.body is None or 'Content-Length' in request.headers)
+ chunked = not (request.body is None or "Content-Length" in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
- except ValueError as e:
- # this may raise a string formatting error.
- err = ("Invalid timeout {}. Pass a (connect, read) "
- "timeout tuple, or a single float to set "
- "both timeouts to the same value".format(timeout))
- raise ValueError(err)
+ except ValueError:
+ raise ValueError(
+ f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
+ f"or a single float to set both timeouts to the same value."
+ )
elif isinstance(timeout, TimeoutSauce):
pass
else:
@@ -447,22 +496,24 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox
preload_content=False,
decode_content=False,
retries=self.max_retries,
- timeout=timeout
+ timeout=timeout,
)
# Send the request.
else:
- if hasattr(conn, 'proxy_pool'):
+ if hasattr(conn, "proxy_pool"):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
- skip_host = 'Host' in request.headers
- low_conn.putrequest(request.method,
- url,
- skip_accept_encoding=True,
- skip_host=skip_host)
+ skip_host = "Host" in request.headers
+ low_conn.putrequest(
+ request.method,
+ url,
+ skip_accept_encoding=True,
+ skip_host=skip_host,
+ )
for header, value in request.headers.items():
low_conn.putheader(header, value)
@@ -470,34 +521,29 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox
low_conn.endheaders()
for i in request.body:
- low_conn.send(hex(len(i))[2:].encode('utf-8'))
- low_conn.send(b'\r\n')
+ low_conn.send(hex(len(i))[2:].encode("utf-8"))
+ low_conn.send(b"\r\n")
low_conn.send(i)
- low_conn.send(b'\r\n')
- low_conn.send(b'0\r\n\r\n')
+ low_conn.send(b"\r\n")
+ low_conn.send(b"0\r\n\r\n")
# Receive the response from the server
- try:
- # For Python 2.7, use buffering of HTTP responses
- r = low_conn.getresponse(buffering=True)
- except TypeError:
- # For compatibility with Python 3.3+
- r = low_conn.getresponse()
+ r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
- decode_content=False
+ decode_content=False,
)
- except:
+ except Exception:
# If we hit any problems here, clean up the connection.
- # Then, reraise so that we can handle the actual exception.
+ # Then, raise so that we can handle the actual exception.
low_conn.close()
raise
- except (ProtocolError, socket.error) as err:
+ except (ProtocolError, OSError) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
diff --git a/src/pip/_vendor/requests/api.py b/src/pip/_vendor/requests/api.py
index 4cba90eefe8..2f71aaed1af 100644
--- a/src/pip/_vendor/requests/api.py
+++ b/src/pip/_vendor/requests/api.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.api
~~~~~~~~~~~~
@@ -72,7 +70,7 @@ def get(url, params=None, **kwargs):
:rtype: requests.Response
"""
- return request('get', url, params=params, **kwargs)
+ return request("get", url, params=params, **kwargs)
def options(url, **kwargs):
@@ -84,7 +82,7 @@ def options(url, **kwargs):
:rtype: requests.Response
"""
- return request('options', url, **kwargs)
+ return request("options", url, **kwargs)
def head(url, **kwargs):
@@ -98,8 +96,8 @@ def head(url, **kwargs):
:rtype: requests.Response
"""
- kwargs.setdefault('allow_redirects', False)
- return request('head', url, **kwargs)
+ kwargs.setdefault("allow_redirects", False)
+ return request("head", url, **kwargs)
def post(url, data=None, json=None, **kwargs):
@@ -114,7 +112,7 @@ def post(url, data=None, json=None, **kwargs):
:rtype: requests.Response
"""
- return request('post', url, data=data, json=json, **kwargs)
+ return request("post", url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
@@ -129,7 +127,7 @@ def put(url, data=None, **kwargs):
:rtype: requests.Response
"""
- return request('put', url, data=data, **kwargs)
+ return request("put", url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
@@ -144,7 +142,7 @@ def patch(url, data=None, **kwargs):
:rtype: requests.Response
"""
- return request('patch', url, data=data, **kwargs)
+ return request("patch", url, data=data, **kwargs)
def delete(url, **kwargs):
@@ -156,4 +154,4 @@ def delete(url, **kwargs):
:rtype: requests.Response
"""
- return request('delete', url, **kwargs)
+ return request("delete", url, **kwargs)
diff --git a/src/pip/_vendor/requests/auth.py b/src/pip/_vendor/requests/auth.py
index eeface39ae6..9733686ddb3 100644
--- a/src/pip/_vendor/requests/auth.py
+++ b/src/pip/_vendor/requests/auth.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.auth
~~~~~~~~~~~~~
@@ -7,22 +5,21 @@
This module contains the authentication handlers for Requests.
"""
+import hashlib
import os
import re
-import time
-import hashlib
import threading
+import time
import warnings
-
from base64 import b64encode
-from .compat import urlparse, str, basestring
-from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
+from .compat import basestring, str, urlparse
+from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
-CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
-CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
+CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
+CONTENT_TYPE_MULTI_PART = "multipart/form-data"
def _basic_auth_str(username, password):
@@ -57,23 +54,23 @@ def _basic_auth_str(username, password):
# -- End Removal --
if isinstance(username, str):
- username = username.encode('latin1')
+ username = username.encode("latin1")
if isinstance(password, str):
- password = password.encode('latin1')
+ password = password.encode("latin1")
- authstr = 'Basic ' + to_native_string(
- b64encode(b':'.join((username, password))).strip()
+ authstr = "Basic " + to_native_string(
+ b64encode(b":".join((username, password))).strip()
)
return authstr
-class AuthBase(object):
+class AuthBase:
"""Base class that all auth implementations derive from"""
def __call__(self, r):
- raise NotImplementedError('Auth hooks must be callable.')
+ raise NotImplementedError("Auth hooks must be callable.")
class HTTPBasicAuth(AuthBase):
@@ -84,16 +81,18 @@ def __init__(self, username, password):
self.password = password
def __eq__(self, other):
- return all([
- self.username == getattr(other, 'username', None),
- self.password == getattr(other, 'password', None)
- ])
+ return all(
+ [
+ self.username == getattr(other, "username", None),
+ self.password == getattr(other, "password", None),
+ ]
+ )
def __ne__(self, other):
return not self == other
def __call__(self, r):
- r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
+ r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
return r
@@ -101,7 +100,7 @@ class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
- r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
+ r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
return r
@@ -116,9 +115,9 @@ def __init__(self, username, password):
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
- if not hasattr(self._thread_local, 'init'):
+ if not hasattr(self._thread_local, "init"):
self._thread_local.init = True
- self._thread_local.last_nonce = ''
+ self._thread_local.last_nonce = ""
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
@@ -129,44 +128,52 @@ def build_digest_header(self, method, url):
:rtype: str
"""
- realm = self._thread_local.chal['realm']
- nonce = self._thread_local.chal['nonce']
- qop = self._thread_local.chal.get('qop')
- algorithm = self._thread_local.chal.get('algorithm')
- opaque = self._thread_local.chal.get('opaque')
+ realm = self._thread_local.chal["realm"]
+ nonce = self._thread_local.chal["nonce"]
+ qop = self._thread_local.chal.get("qop")
+ algorithm = self._thread_local.chal.get("algorithm")
+ opaque = self._thread_local.chal.get("opaque")
hash_utf8 = None
if algorithm is None:
- _algorithm = 'MD5'
+ _algorithm = "MD5"
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
- if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
+ if _algorithm == "MD5" or _algorithm == "MD5-SESS":
+
def md5_utf8(x):
if isinstance(x, str):
- x = x.encode('utf-8')
+ x = x.encode("utf-8")
return hashlib.md5(x).hexdigest()
+
hash_utf8 = md5_utf8
- elif _algorithm == 'SHA':
+ elif _algorithm == "SHA":
+
def sha_utf8(x):
if isinstance(x, str):
- x = x.encode('utf-8')
+ x = x.encode("utf-8")
return hashlib.sha1(x).hexdigest()
+
hash_utf8 = sha_utf8
- elif _algorithm == 'SHA-256':
+ elif _algorithm == "SHA-256":
+
def sha256_utf8(x):
if isinstance(x, str):
- x = x.encode('utf-8')
+ x = x.encode("utf-8")
return hashlib.sha256(x).hexdigest()
+
hash_utf8 = sha256_utf8
- elif _algorithm == 'SHA-512':
+ elif _algorithm == "SHA-512":
+
def sha512_utf8(x):
if isinstance(x, str):
- x = x.encode('utf-8')
+ x = x.encode("utf-8")
return hashlib.sha512(x).hexdigest()
+
hash_utf8 = sha512_utf8
- KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
+ KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
if hash_utf8 is None:
return None
@@ -177,10 +184,10 @@ def sha512_utf8(x):
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
- path += '?' + p_parsed.query
+ path += f"?{p_parsed.query}"
- A1 = '%s:%s:%s' % (self.username, realm, self.password)
- A2 = '%s:%s' % (method, path)
+ A1 = f"{self.username}:{realm}:{self.password}"
+ A2 = f"{method}:{path}"
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
@@ -189,22 +196,20 @@ def sha512_utf8(x):
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
- ncvalue = '%08x' % self._thread_local.nonce_count
- s = str(self._thread_local.nonce_count).encode('utf-8')
- s += nonce.encode('utf-8')
- s += time.ctime().encode('utf-8')
+ ncvalue = f"{self._thread_local.nonce_count:08x}"
+ s = str(self._thread_local.nonce_count).encode("utf-8")
+ s += nonce.encode("utf-8")
+ s += time.ctime().encode("utf-8")
s += os.urandom(8)
- cnonce = (hashlib.sha1(s).hexdigest()[:16])
- if _algorithm == 'MD5-SESS':
- HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
+ cnonce = hashlib.sha1(s).hexdigest()[:16]
+ if _algorithm == "MD5-SESS":
+ HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
if not qop:
- respdig = KD(HA1, "%s:%s" % (nonce, HA2))
- elif qop == 'auth' or 'auth' in qop.split(','):
- noncebit = "%s:%s:%s:%s:%s" % (
- nonce, ncvalue, cnonce, 'auth', HA2
- )
+ respdig = KD(HA1, f"{nonce}:{HA2}")
+ elif qop == "auth" or "auth" in qop.split(","):
+ noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
@@ -213,18 +218,20 @@ def sha512_utf8(x):
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
- base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
- 'response="%s"' % (self.username, realm, nonce, path, respdig)
+ base = (
+ f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
+ f'uri="{path}", response="{respdig}"'
+ )
if opaque:
- base += ', opaque="%s"' % opaque
+ base += f', opaque="{opaque}"'
if algorithm:
- base += ', algorithm="%s"' % algorithm
+ base += f', algorithm="{algorithm}"'
if entdig:
- base += ', digest="%s"' % entdig
+ base += f', digest="{entdig}"'
if qop:
- base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
+ base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
- return 'Digest %s' % (base)
+ return f"Digest {base}"
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
@@ -248,13 +255,13 @@ def handle_401(self, r, **kwargs):
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
- s_auth = r.headers.get('www-authenticate', '')
+ s_auth = r.headers.get("www-authenticate", "")
- if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
+ if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
- pat = re.compile(r'digest ', flags=re.IGNORECASE)
- self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
+ pat = re.compile(r"digest ", flags=re.IGNORECASE)
+ self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
@@ -264,8 +271,9 @@ def handle_401(self, r, **kwargs):
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
- prep.headers['Authorization'] = self.build_digest_header(
- prep.method, prep.url)
+ prep.headers["Authorization"] = self.build_digest_header(
+ prep.method, prep.url
+ )
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
@@ -280,7 +288,7 @@ def __call__(self, r):
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
- r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
+ r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
@@ -289,17 +297,19 @@ def __call__(self, r):
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
- r.register_hook('response', self.handle_401)
- r.register_hook('response', self.handle_redirect)
+ r.register_hook("response", self.handle_401)
+ r.register_hook("response", self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
- return all([
- self.username == getattr(other, 'username', None),
- self.password == getattr(other, 'password', None)
- ])
+ return all(
+ [
+ self.username == getattr(other, "username", None),
+ self.password == getattr(other, "password", None),
+ ]
+ )
def __ne__(self, other):
return not self == other
diff --git a/src/pip/_vendor/requests/certs.py b/src/pip/_vendor/requests/certs.py
index 06a594e58f6..2743144b994 100644
--- a/src/pip/_vendor/requests/certs.py
+++ b/src/pip/_vendor/requests/certs.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
"""
requests.certs
@@ -14,5 +13,5 @@
"""
from pip._vendor.certifi import where
-if __name__ == '__main__':
+if __name__ == "__main__":
print(where())
diff --git a/src/pip/_vendor/requests/compat.py b/src/pip/_vendor/requests/compat.py
index f98cc910f9b..9ab2bb48656 100644
--- a/src/pip/_vendor/requests/compat.py
+++ b/src/pip/_vendor/requests/compat.py
@@ -1,11 +1,10 @@
-# -*- coding: utf-8 -*-
-
"""
requests.compat
~~~~~~~~~~~~~~~
-This module handles import compatibility issues between Python 2 and
-Python 3.
+This module previously handled import compatibility issues
+between Python 2 and Python 3. It remains for backwards
+compatibility until the next major version.
"""
from pip._vendor import chardet
@@ -20,58 +19,49 @@
_ver = sys.version_info
#: Python 2.x?
-is_py2 = (_ver[0] == 2)
+is_py2 = _ver[0] == 2
#: Python 3.x?
-is_py3 = (_ver[0] == 3)
+is_py3 = _ver[0] == 3
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
-# try:
-# import simplejson as json
-# except (ImportError, SyntaxError):
-# # simplejson does not support Python 3.2, it throws a SyntaxError
-# # because of u'...' Unicode literals.
import json
+from json import JSONDecodeError
-# ---------
-# Specifics
-# ---------
-
-if is_py2:
- from urllib import (
- quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
- proxy_bypass, proxy_bypass_environment, getproxies_environment)
- from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
- from urllib2 import parse_http_list
- import cookielib
- from Cookie import Morsel
- from StringIO import StringIO
- # Keep OrderedDict for backwards compatibility.
- from collections import Callable, Mapping, MutableMapping, OrderedDict
-
- builtin_str = str
- bytes = str
- str = unicode
- basestring = basestring
- numeric_types = (int, long, float)
- integer_types = (int, long)
- JSONDecodeError = ValueError
+# Keep OrderedDict for backwards compatibility.
+from collections import OrderedDict
+from collections.abc import Callable, Mapping, MutableMapping
+from http import cookiejar as cookielib
+from http.cookies import Morsel
+from io import StringIO
-elif is_py3:
- from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
- from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
- from http import cookiejar as cookielib
- from http.cookies import Morsel
- from io import StringIO
- # Keep OrderedDict for backwards compatibility.
- from collections import OrderedDict
- from collections.abc import Callable, Mapping, MutableMapping
- from json import JSONDecodeError
+# --------------
+# Legacy Imports
+# --------------
+from urllib.parse import (
+ quote,
+ quote_plus,
+ unquote,
+ unquote_plus,
+ urldefrag,
+ urlencode,
+ urljoin,
+ urlparse,
+ urlsplit,
+ urlunparse,
+)
+from urllib.request import (
+ getproxies,
+ getproxies_environment,
+ parse_http_list,
+ proxy_bypass,
+ proxy_bypass_environment,
+)
- builtin_str = str
- str = str
- bytes = bytes
- basestring = (str, bytes)
- numeric_types = (int, float)
- integer_types = (int,)
+builtin_str = str
+str = str
+bytes = bytes
+basestring = (str, bytes)
+numeric_types = (int, float)
+integer_types = (int,)
diff --git a/src/pip/_vendor/requests/cookies.py b/src/pip/_vendor/requests/cookies.py
index 56fccd9c257..bf54ab237e4 100644
--- a/src/pip/_vendor/requests/cookies.py
+++ b/src/pip/_vendor/requests/cookies.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.cookies
~~~~~~~~~~~~~~~~
@@ -9,12 +7,12 @@
requests.utils imports from here, so be careful with imports.
"""
+import calendar
import copy
import time
-import calendar
from ._internal_utils import to_native_string
-from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
+from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
try:
import threading
@@ -22,7 +20,7 @@
import dummy_threading as threading
-class MockRequest(object):
+class MockRequest:
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
@@ -51,16 +49,22 @@ def get_origin_req_host(self):
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
- if not self._r.headers.get('Host'):
+ if not self._r.headers.get("Host"):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
- host = to_native_string(self._r.headers['Host'], encoding='utf-8')
+ host = to_native_string(self._r.headers["Host"], encoding="utf-8")
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
- return urlunparse([
- parsed.scheme, host, parsed.path, parsed.params, parsed.query,
- parsed.fragment
- ])
+ return urlunparse(
+ [
+ parsed.scheme,
+ host,
+ parsed.path,
+ parsed.params,
+ parsed.query,
+ parsed.fragment,
+ ]
+ )
def is_unverifiable(self):
return True
@@ -73,7 +77,9 @@ def get_header(self, name, default=None):
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
- raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
+ raise NotImplementedError(
+ "Cookie headers should be added with add_unredirected_header()"
+ )
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
@@ -94,7 +100,7 @@ def host(self):
return self.get_host()
-class MockResponse(object):
+class MockResponse:
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
@@ -122,8 +128,7 @@ def extract_cookies_to_jar(jar, request, response):
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
- if not (hasattr(response, '_original_response') and
- response._original_response):
+ if not (hasattr(response, "_original_response") and response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
@@ -140,7 +145,7 @@ def get_cookie_header(jar, request):
"""
r = MockRequest(request)
jar.add_cookie_header(r)
- return r.get_new_headers().get('Cookie')
+ return r.get_new_headers().get("Cookie")
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
@@ -205,7 +210,9 @@ def set(self, name, value, **kwargs):
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
- remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
+ remove_cookie_by_name(
+ self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
+ )
return
if isinstance(value, Morsel):
@@ -305,16 +312,15 @@ def get_dict(self, domain=None, path=None):
"""
dictionary = {}
for cookie in iter(self):
- if (
- (domain is None or cookie.domain == domain) and
- (path is None or cookie.path == path)
+ if (domain is None or cookie.domain == domain) and (
+ path is None or cookie.path == path
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
- return super(RequestsCookieJar, self).__contains__(name)
+ return super().__contains__(name)
except CookieConflictError:
return True
@@ -341,9 +347,13 @@ def __delitem__(self, name):
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
- if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
- cookie.value = cookie.value.replace('\\"', '')
- return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
+ if (
+ hasattr(cookie.value, "startswith")
+ and cookie.value.startswith('"')
+ and cookie.value.endswith('"')
+ ):
+ cookie.value = cookie.value.replace('\\"', "")
+ return super().set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
@@ -351,7 +361,7 @@ def update(self, other):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
- super(RequestsCookieJar, self).update(other)
+ super().update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
@@ -371,7 +381,7 @@ def _find(self, name, domain=None, path=None):
if path is None or cookie.path == path:
return cookie.value
- raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
@@ -390,25 +400,29 @@ def _find_no_duplicates(self, name, domain=None, path=None):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
- if toReturn is not None: # if there are multiple cookies that meet passed in criteria
- raise CookieConflictError('There are multiple cookies with name, %r' % (name))
- toReturn = cookie.value # we will eventually return this as long as no cookie conflict
+ if toReturn is not None:
+ # if there are multiple cookies that meet passed in criteria
+ raise CookieConflictError(
+ f"There are multiple cookies with name, {name!r}"
+ )
+ # we will eventually return this as long as no cookie conflict
+ toReturn = cookie.value
if toReturn:
return toReturn
- raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
- state.pop('_cookies_lock')
+ state.pop("_cookies_lock")
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
- if '_cookies_lock' not in self.__dict__:
+ if "_cookies_lock" not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
@@ -427,7 +441,7 @@ def _copy_cookie_jar(jar):
if jar is None:
return None
- if hasattr(jar, 'copy'):
+ if hasattr(jar, "copy"):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
@@ -445,31 +459,32 @@ def create_cookie(name, value, **kwargs):
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
- 'version': 0,
- 'name': name,
- 'value': value,
- 'port': None,
- 'domain': '',
- 'path': '/',
- 'secure': False,
- 'expires': None,
- 'discard': True,
- 'comment': None,
- 'comment_url': None,
- 'rest': {'HttpOnly': None},
- 'rfc2109': False,
+ "version": 0,
+ "name": name,
+ "value": value,
+ "port": None,
+ "domain": "",
+ "path": "/",
+ "secure": False,
+ "expires": None,
+ "discard": True,
+ "comment": None,
+ "comment_url": None,
+ "rest": {"HttpOnly": None},
+ "rfc2109": False,
}
badargs = set(kwargs) - set(result)
if badargs:
- err = 'create_cookie() got unexpected keyword arguments: %s'
- raise TypeError(err % list(badargs))
+ raise TypeError(
+ f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
+ )
result.update(kwargs)
- result['port_specified'] = bool(result['port'])
- result['domain_specified'] = bool(result['domain'])
- result['domain_initial_dot'] = result['domain'].startswith('.')
- result['path_specified'] = bool(result['path'])
+ result["port_specified"] = bool(result["port"])
+ result["domain_specified"] = bool(result["domain"])
+ result["domain_initial_dot"] = result["domain"].startswith(".")
+ result["path_specified"] = bool(result["path"])
return cookielib.Cookie(**result)
@@ -478,30 +493,28 @@ def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
- if morsel['max-age']:
+ if morsel["max-age"]:
try:
- expires = int(time.time() + int(morsel['max-age']))
+ expires = int(time.time() + int(morsel["max-age"]))
except ValueError:
- raise TypeError('max-age: %s must be integer' % morsel['max-age'])
- elif morsel['expires']:
- time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
- expires = calendar.timegm(
- time.strptime(morsel['expires'], time_template)
- )
+ raise TypeError(f"max-age: {morsel['max-age']} must be integer")
+ elif morsel["expires"]:
+ time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
+ expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
return create_cookie(
- comment=morsel['comment'],
- comment_url=bool(morsel['comment']),
+ comment=morsel["comment"],
+ comment_url=bool(morsel["comment"]),
discard=False,
- domain=morsel['domain'],
+ domain=morsel["domain"],
expires=expires,
name=morsel.key,
- path=morsel['path'],
+ path=morsel["path"],
port=None,
- rest={'HttpOnly': morsel['httponly']},
+ rest={"HttpOnly": morsel["httponly"]},
rfc2109=False,
- secure=bool(morsel['secure']),
+ secure=bool(morsel["secure"]),
value=morsel.value,
- version=morsel['version'] or 0,
+ version=morsel["version"] or 0,
)
@@ -534,11 +547,10 @@ def merge_cookies(cookiejar, cookies):
:rtype: CookieJar
"""
if not isinstance(cookiejar, cookielib.CookieJar):
- raise ValueError('You can only merge into CookieJar')
+ raise ValueError("You can only merge into CookieJar")
if isinstance(cookies, dict):
- cookiejar = cookiejar_from_dict(
- cookies, cookiejar=cookiejar, overwrite=False)
+ cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
diff --git a/src/pip/_vendor/requests/exceptions.py b/src/pip/_vendor/requests/exceptions.py
index 83b9232e4cb..168d07390df 100644
--- a/src/pip/_vendor/requests/exceptions.py
+++ b/src/pip/_vendor/requests/exceptions.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
@@ -18,13 +16,12 @@ class RequestException(IOError):
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
- response = kwargs.pop('response', None)
+ response = kwargs.pop("response", None)
self.response = response
- self.request = kwargs.pop('request', None)
- if (response is not None and not self.request and
- hasattr(response, 'request')):
+ self.request = kwargs.pop("request", None)
+ if response is not None and not self.request and hasattr(response, "request"):
self.request = self.response.request
- super(RequestException, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
class InvalidJSONError(RequestException):
@@ -34,6 +31,16 @@ class InvalidJSONError(RequestException):
class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
"""Couldn't decode the text into json"""
+ def __init__(self, *args, **kwargs):
+ """
+ Construct the JSONDecodeError instance first with all
+ args. Then use it's args to construct the IOError so that
+ the json specific args aren't used as IOError specific args
+ and the error message from JSONDecodeError is preserved.
+ """
+ CompatJSONDecodeError.__init__(self, *args)
+ InvalidJSONError.__init__(self, *self.args, **kwargs)
+
class HTTPError(RequestException):
"""An HTTP error occurred."""
@@ -118,6 +125,7 @@ class RetryError(RequestException):
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body."""
+
# Warnings
diff --git a/src/pip/_vendor/requests/help.py b/src/pip/_vendor/requests/help.py
index 745f0d7b346..2d292c2f062 100644
--- a/src/pip/_vendor/requests/help.py
+++ b/src/pip/_vendor/requests/help.py
@@ -1,10 +1,9 @@
"""Module containing bug report helper(s)."""
-from __future__ import print_function
import json
import platform
-import sys
import ssl
+import sys
from pip._vendor import idna
from pip._vendor import urllib3
@@ -25,16 +24,16 @@
OpenSSL = None
cryptography = None
else:
- import OpenSSL
import cryptography
+ import OpenSSL
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
- currently running. For example, on CPython 2.7.5 it will return
- {'name': 'CPython', 'version': '2.7.5'}.
+ currently running. For example, on CPython 3.10.3 it will return
+ {'name': 'CPython', 'version': '3.10.3'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
@@ -42,83 +41,83 @@ def _implementation():
"""
implementation = platform.python_implementation()
- if implementation == 'CPython':
+ if implementation == "CPython":
implementation_version = platform.python_version()
- elif implementation == 'PyPy':
- implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
- sys.pypy_version_info.minor,
- sys.pypy_version_info.micro)
- if sys.pypy_version_info.releaselevel != 'final':
- implementation_version = ''.join([
- implementation_version, sys.pypy_version_info.releaselevel
- ])
- elif implementation == 'Jython':
+ elif implementation == "PyPy":
+ implementation_version = "{}.{}.{}".format(
+ sys.pypy_version_info.major,
+ sys.pypy_version_info.minor,
+ sys.pypy_version_info.micro,
+ )
+ if sys.pypy_version_info.releaselevel != "final":
+ implementation_version = "".join(
+ [implementation_version, sys.pypy_version_info.releaselevel]
+ )
+ elif implementation == "Jython":
implementation_version = platform.python_version() # Complete Guess
- elif implementation == 'IronPython':
+ elif implementation == "IronPython":
implementation_version = platform.python_version() # Complete Guess
else:
- implementation_version = 'Unknown'
+ implementation_version = "Unknown"
- return {'name': implementation, 'version': implementation_version}
+ return {"name": implementation, "version": implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
- 'system': platform.system(),
- 'release': platform.release(),
+ "system": platform.system(),
+ "release": platform.release(),
}
- except IOError:
+ except OSError:
platform_info = {
- 'system': 'Unknown',
- 'release': 'Unknown',
+ "system": "Unknown",
+ "release": "Unknown",
}
implementation_info = _implementation()
- urllib3_info = {'version': urllib3.__version__}
- charset_normalizer_info = {'version': None}
- chardet_info = {'version': None}
+ urllib3_info = {"version": urllib3.__version__}
+ charset_normalizer_info = {"version": None}
+ chardet_info = {"version": None}
if charset_normalizer:
- charset_normalizer_info = {'version': charset_normalizer.__version__}
+ charset_normalizer_info = {"version": charset_normalizer.__version__}
if chardet:
- chardet_info = {'version': chardet.__version__}
+ chardet_info = {"version": chardet.__version__}
pyopenssl_info = {
- 'version': None,
- 'openssl_version': '',
+ "version": None,
+ "openssl_version": "",
}
if OpenSSL:
pyopenssl_info = {
- 'version': OpenSSL.__version__,
- 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
+ "version": OpenSSL.__version__,
+ "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}",
}
cryptography_info = {
- 'version': getattr(cryptography, '__version__', ''),
+ "version": getattr(cryptography, "__version__", ""),
}
idna_info = {
- 'version': getattr(idna, '__version__', ''),
+ "version": getattr(idna, "__version__", ""),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
- system_ssl_info = {
- 'version': '%x' % system_ssl if system_ssl is not None else ''
- }
+ system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""}
return {
- 'platform': platform_info,
- 'implementation': implementation_info,
- 'system_ssl': system_ssl_info,
- 'using_pyopenssl': pyopenssl is not None,
- 'using_charset_normalizer': chardet is None,
- 'pyOpenSSL': pyopenssl_info,
- 'urllib3': urllib3_info,
- 'chardet': chardet_info,
- 'charset_normalizer': charset_normalizer_info,
- 'cryptography': cryptography_info,
- 'idna': idna_info,
- 'requests': {
- 'version': requests_version,
+ "platform": platform_info,
+ "implementation": implementation_info,
+ "system_ssl": system_ssl_info,
+ "using_pyopenssl": pyopenssl is not None,
+ "using_charset_normalizer": chardet is None,
+ "pyOpenSSL": pyopenssl_info,
+ "urllib3": urllib3_info,
+ "chardet": chardet_info,
+ "charset_normalizer": charset_normalizer_info,
+ "cryptography": cryptography_info,
+ "idna": idna_info,
+ "requests": {
+ "version": requests_version,
},
}
@@ -128,5 +127,5 @@ def main():
print(json.dumps(info(), sort_keys=True, indent=2))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/src/pip/_vendor/requests/hooks.py b/src/pip/_vendor/requests/hooks.py
index 7a51f212c8a..d181ba2ec2e 100644
--- a/src/pip/_vendor/requests/hooks.py
+++ b/src/pip/_vendor/requests/hooks.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.hooks
~~~~~~~~~~~~~~
@@ -11,12 +9,13 @@
``response``:
The response generated from a Request.
"""
-HOOKS = ['response']
+HOOKS = ["response"]
def default_hooks():
return {event: [] for event in HOOKS}
+
# TODO: response is the only one
@@ -25,7 +24,7 @@ def dispatch_hook(key, hooks, hook_data, **kwargs):
hooks = hooks or {}
hooks = hooks.get(key)
if hooks:
- if hasattr(hooks, '__call__'):
+ if hasattr(hooks, "__call__"):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data, **kwargs)
diff --git a/src/pip/_vendor/requests/models.py b/src/pip/_vendor/requests/models.py
index f538c1054d5..b45e8103258 100644
--- a/src/pip/_vendor/requests/models.py
+++ b/src/pip/_vendor/requests/models.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.models
~~~~~~~~~~~~~~~
@@ -8,48 +6,72 @@
"""
import datetime
-import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
-import encodings.idna
+import encodings.idna # noqa: F401
+from io import UnsupportedOperation
+from pip._vendor.urllib3.exceptions import (
+ DecodeError,
+ LocationParseError,
+ ProtocolError,
+ ReadTimeoutError,
+ SSLError,
+)
from pip._vendor.urllib3.fields import RequestField
from pip._vendor.urllib3.filepost import encode_multipart_formdata
from pip._vendor.urllib3.util import parse_url
-from pip._vendor.urllib3.exceptions import (
- DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
-
-from io import UnsupportedOperation
-from .hooks import default_hooks
-from .structures import CaseInsensitiveDict
-from .auth import HTTPBasicAuth
-from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
-from .exceptions import (
- HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
- ContentDecodingError, ConnectionError, StreamConsumedError,
- InvalidJSONError)
-from .exceptions import JSONDecodeError as RequestsJSONDecodeError
from ._internal_utils import to_native_string, unicode_is_ascii
-from .utils import (
- guess_filename, get_auth_from_url, requote_uri,
- stream_decode_response_unicode, to_key_val_list, parse_header_links,
- iter_slices, guess_json_utf, super_len, check_header_validity)
+from .auth import HTTPBasicAuth
from .compat import (
- Callable, Mapping,
- cookielib, urlunparse, urlsplit, urlencode, str, bytes,
- is_py2, chardet, builtin_str, basestring, JSONDecodeError)
+ Callable,
+ JSONDecodeError,
+ Mapping,
+ basestring,
+ builtin_str,
+ chardet,
+ cookielib,
+)
from .compat import json as complexjson
+from .compat import urlencode, urlsplit, urlunparse
+from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
+from .exceptions import (
+ ChunkedEncodingError,
+ ConnectionError,
+ ContentDecodingError,
+ HTTPError,
+ InvalidJSONError,
+ InvalidURL,
+)
+from .exceptions import JSONDecodeError as RequestsJSONDecodeError
+from .exceptions import MissingSchema
+from .exceptions import SSLError as RequestsSSLError
+from .exceptions import StreamConsumedError
+from .hooks import default_hooks
from .status_codes import codes
+from .structures import CaseInsensitiveDict
+from .utils import (
+ check_header_validity,
+ get_auth_from_url,
+ guess_filename,
+ guess_json_utf,
+ iter_slices,
+ parse_header_links,
+ requote_uri,
+ stream_decode_response_unicode,
+ super_len,
+ to_key_val_list,
+)
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
- codes.moved, # 301
- codes.found, # 302
- codes.other, # 303
+ codes.moved, # 301
+ codes.found, # 302
+ codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
@@ -59,7 +81,7 @@
ITER_CHUNK_SIZE = 512
-class RequestEncodingMixin(object):
+class RequestEncodingMixin:
@property
def path_url(self):
"""Build the path URL to use."""
@@ -70,16 +92,16 @@ def path_url(self):
path = p.path
if not path:
- path = '/'
+ path = "/"
url.append(path)
query = p.query
if query:
- url.append('?')
+ url.append("?")
url.append(query)
- return ''.join(url)
+ return "".join(url)
@staticmethod
def _encode_params(data):
@@ -92,18 +114,21 @@ def _encode_params(data):
if isinstance(data, (str, bytes)):
return data
- elif hasattr(data, 'read'):
+ elif hasattr(data, "read"):
return data
- elif hasattr(data, '__iter__'):
+ elif hasattr(data, "__iter__"):
result = []
for k, vs in to_key_val_list(data):
- if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
+ if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
vs = [vs]
for v in vs:
if v is not None:
result.append(
- (k.encode('utf-8') if isinstance(k, str) else k,
- v.encode('utf-8') if isinstance(v, str) else v))
+ (
+ k.encode("utf-8") if isinstance(k, str) else k,
+ v.encode("utf-8") if isinstance(v, str) else v,
+ )
+ )
return urlencode(result, doseq=True)
else:
return data
@@ -118,7 +143,7 @@ def _encode_files(files, data):
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
- if (not files):
+ if not files:
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
@@ -128,7 +153,7 @@ def _encode_files(files, data):
files = to_key_val_list(files or {})
for field, val in fields:
- if isinstance(val, basestring) or not hasattr(val, '__iter__'):
+ if isinstance(val, basestring) or not hasattr(val, "__iter__"):
val = [val]
for v in val:
if v is not None:
@@ -137,8 +162,13 @@ def _encode_files(files, data):
v = str(v)
new_fields.append(
- (field.decode('utf-8') if isinstance(field, bytes) else field,
- v.encode('utf-8') if isinstance(v, str) else v))
+ (
+ field.decode("utf-8")
+ if isinstance(field, bytes)
+ else field,
+ v.encode("utf-8") if isinstance(v, str) else v,
+ )
+ )
for (k, v) in files:
# support for explicit filename
@@ -157,7 +187,7 @@ def _encode_files(files, data):
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
- elif hasattr(fp, 'read'):
+ elif hasattr(fp, "read"):
fdata = fp.read()
elif fp is None:
continue
@@ -173,16 +203,16 @@ def _encode_files(files, data):
return body, content_type
-class RequestHooksMixin(object):
+class RequestHooksMixin:
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
- raise ValueError('Unsupported event specified, with event name "%s"' % (event))
+ raise ValueError(f'Unsupported event specified, with event name "{event}"')
if isinstance(hook, Callable):
self.hooks[event].append(hook)
- elif hasattr(hook, '__iter__'):
+ elif hasattr(hook, "__iter__"):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
@@ -225,9 +255,19 @@ class Request(RequestHooksMixin):
"""
- def __init__(self,
- method=None, url=None, headers=None, files=None, data=None,
- params=None, auth=None, cookies=None, hooks=None, json=None):
+ def __init__(
+ self,
+ method=None,
+ url=None,
+ headers=None,
+ files=None,
+ data=None,
+ params=None,
+ auth=None,
+ cookies=None,
+ hooks=None,
+ json=None,
+ ):
# Default empty dicts for dict params.
data = [] if data is None else data
@@ -251,7 +291,7 @@ def __init__(self,
self.cookies = cookies
def __repr__(self):
- return '' % (self.method)
+ return f""
def prepare(self):
"""Constructs a :class:`PreparedRequest ` for transmission and returns it."""
@@ -309,9 +349,19 @@ def __init__(self):
#: integer denoting starting position of a readable file-like body.
self._body_position = None
- def prepare(self,
- method=None, url=None, headers=None, files=None, data=None,
- params=None, auth=None, cookies=None, hooks=None, json=None):
+ def prepare(
+ self,
+ method=None,
+ url=None,
+ headers=None,
+ files=None,
+ data=None,
+ params=None,
+ auth=None,
+ cookies=None,
+ hooks=None,
+ json=None,
+ ):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
@@ -328,7 +378,7 @@ def prepare(self,
self.prepare_hooks(hooks)
def __repr__(self):
- return '' % (self.method)
+ return f""
def copy(self):
p = PreparedRequest()
@@ -352,7 +402,7 @@ def _get_idna_encoded_host(host):
from pip._vendor import idna
try:
- host = idna.encode(host, uts46=True).decode('utf-8')
+ host = idna.encode(host, uts46=True).decode("utf-8")
except idna.IDNAError:
raise UnicodeError
return host
@@ -365,9 +415,9 @@ def prepare_url(self, url, params):
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
- url = url.decode('utf8')
+ url = url.decode("utf8")
else:
- url = unicode(url) if is_py2 else str(url)
+ url = str(url)
# Remove leading whitespaces from url
url = url.lstrip()
@@ -375,7 +425,7 @@ def prepare_url(self, url, params):
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
- if ':' in url and not url.lower().startswith('http'):
+ if ":" in url and not url.lower().startswith("http"):
self.url = url
return
@@ -386,13 +436,13 @@ def prepare_url(self, url, params):
raise InvalidURL(*e.args)
if not scheme:
- error = ("Invalid URL {0!r}: No scheme supplied. Perhaps you meant http://{0}?")
- error = error.format(to_native_string(url, 'utf8'))
-
- raise MissingSchema(error)
+ raise MissingSchema(
+ f"Invalid URL {url!r}: No scheme supplied. "
+ f"Perhaps you meant http://{url}?"
+ )
if not host:
- raise InvalidURL("Invalid URL %r: No host supplied" % url)
+ raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
@@ -402,33 +452,21 @@ def prepare_url(self, url, params):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
- raise InvalidURL('URL has an invalid label.')
- elif host.startswith((u'*', u'.')):
- raise InvalidURL('URL has an invalid label.')
+ raise InvalidURL("URL has an invalid label.")
+ elif host.startswith(("*", ".")):
+ raise InvalidURL("URL has an invalid label.")
# Carefully reconstruct the network location
- netloc = auth or ''
+ netloc = auth or ""
if netloc:
- netloc += '@'
+ netloc += "@"
netloc += host
if port:
- netloc += ':' + str(port)
+ netloc += f":{port}"
# Bare domains aren't valid URLs.
if not path:
- path = '/'
-
- if is_py2:
- if isinstance(scheme, str):
- scheme = scheme.encode('utf-8')
- if isinstance(netloc, str):
- netloc = netloc.encode('utf-8')
- if isinstance(path, str):
- path = path.encode('utf-8')
- if isinstance(query, str):
- query = query.encode('utf-8')
- if isinstance(fragment, str):
- fragment = fragment.encode('utf-8')
+ path = "/"
if isinstance(params, (str, bytes)):
params = to_native_string(params)
@@ -436,7 +474,7 @@ def prepare_url(self, url, params):
enc_params = self._encode_params(params)
if enc_params:
if query:
- query = '%s&%s' % (query, enc_params)
+ query = f"{query}&{enc_params}"
else:
query = enc_params
@@ -467,7 +505,7 @@ def prepare_body(self, data, files, json=None):
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
- content_type = 'application/json'
+ content_type = "application/json"
try:
body = complexjson.dumps(json, allow_nan=False)
@@ -475,12 +513,14 @@ def prepare_body(self, data, files, json=None):
raise InvalidJSONError(ve, request=self)
if not isinstance(body, bytes):
- body = body.encode('utf-8')
+ body = body.encode("utf-8")
- is_stream = all([
- hasattr(data, '__iter__'),
- not isinstance(data, (basestring, list, tuple, Mapping))
- ])
+ is_stream = all(
+ [
+ hasattr(data, "__iter__"),
+ not isinstance(data, (basestring, list, tuple, Mapping)),
+ ]
+ )
if is_stream:
try:
@@ -490,24 +530,26 @@ def prepare_body(self, data, files, json=None):
body = data
- if getattr(body, 'tell', None) is not None:
+ if getattr(body, "tell", None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
- except (IOError, OSError):
+ except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
- raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
+ raise NotImplementedError(
+ "Streamed bodies and files are mutually exclusive."
+ )
if length:
- self.headers['Content-Length'] = builtin_str(length)
+ self.headers["Content-Length"] = builtin_str(length)
else:
- self.headers['Transfer-Encoding'] = 'chunked'
+ self.headers["Transfer-Encoding"] = "chunked"
else:
# Multi-part file uploads.
if files:
@@ -515,16 +557,16 @@ def prepare_body(self, data, files, json=None):
else:
if data:
body = self._encode_params(data)
- if isinstance(data, basestring) or hasattr(data, 'read'):
+ if isinstance(data, basestring) or hasattr(data, "read"):
content_type = None
else:
- content_type = 'application/x-www-form-urlencoded'
+ content_type = "application/x-www-form-urlencoded"
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
- if content_type and ('content-type' not in self.headers):
- self.headers['Content-Type'] = content_type
+ if content_type and ("content-type" not in self.headers):
+ self.headers["Content-Type"] = content_type
self.body = body
@@ -535,13 +577,16 @@ def prepare_content_length(self, body):
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
- self.headers['Content-Length'] = builtin_str(length)
- elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
+ self.headers["Content-Length"] = builtin_str(length)
+ elif (
+ self.method not in ("GET", "HEAD")
+ and self.headers.get("Content-Length") is None
+ ):
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
- self.headers['Content-Length'] = '0'
+ self.headers["Content-Length"] = "0"
- def prepare_auth(self, auth, url=''):
+ def prepare_auth(self, auth, url=""):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
@@ -581,7 +626,7 @@ def prepare_cookies(self, cookies):
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
- self.headers['Cookie'] = cookie_header
+ self.headers["Cookie"] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
@@ -593,14 +638,22 @@ def prepare_hooks(self, hooks):
self.register_hook(event, hooks[event])
-class Response(object):
+class Response:
"""The :class:`Response ` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
- '_content', 'status_code', 'headers', 'url', 'history',
- 'encoding', 'reason', 'cookies', 'elapsed', 'request'
+ "_content",
+ "status_code",
+ "headers",
+ "url",
+ "history",
+ "encoding",
+ "reason",
+ "cookies",
+ "elapsed",
+ "request",
]
def __init__(self):
@@ -669,11 +722,11 @@ def __setstate__(self, state):
setattr(self, name, value)
# pickled objects do not have .raw
- setattr(self, '_content_consumed', True)
- setattr(self, 'raw', None)
+ setattr(self, "_content_consumed", True)
+ setattr(self, "raw", None)
def __repr__(self):
- return '' % (self.status_code)
+ return f""
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
@@ -719,12 +772,15 @@ def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
- return ('location' in self.headers and self.status_code in REDIRECT_STATI)
+ return "location" in self.headers and self.status_code in REDIRECT_STATI
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
- return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
+ return "location" in self.headers and self.status_code in (
+ codes.moved_permanently,
+ codes.permanent_redirect,
+ )
@property
def next(self):
@@ -734,7 +790,7 @@ def next(self):
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
- return chardet.detect(self.content)['encoding']
+ return chardet.detect(self.content)["encoding"]
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
@@ -755,16 +811,17 @@ def iter_content(self, chunk_size=1, decode_unicode=False):
def generate():
# Special case for urllib3.
- if hasattr(self.raw, 'stream'):
+ if hasattr(self.raw, "stream"):
try:
- for chunk in self.raw.stream(chunk_size, decode_content=True):
- yield chunk
+ yield from self.raw.stream(chunk_size, decode_content=True)
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
+ except SSLError as e:
+ raise RequestsSSLError(e)
else:
# Standard file-like object.
while True:
@@ -778,7 +835,9 @@ def generate():
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
- raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
+ raise TypeError(
+ f"chunk_size must be an int, it is instead a {type(chunk_size)}."
+ )
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
@@ -791,7 +850,9 @@ def generate():
return chunks
- def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
+ def iter_lines(
+ self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
+ ):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
@@ -801,7 +862,9 @@ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter
pending = None
- for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
+ for chunk in self.iter_content(
+ chunk_size=chunk_size, decode_unicode=decode_unicode
+ ):
if pending is not None:
chunk = pending + chunk
@@ -816,8 +879,7 @@ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter
else:
pending = None
- for line in lines:
- yield line
+ yield from lines
if pending is not None:
yield pending
@@ -829,13 +891,12 @@ def content(self):
if self._content is False:
# Read the contents.
if self._content_consumed:
- raise RuntimeError(
- 'The content for this response was already consumed')
+ raise RuntimeError("The content for this response was already consumed")
if self.status_code == 0 or self.raw is None:
self._content = None
else:
- self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
+ self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
@@ -860,7 +921,7 @@ def text(self):
encoding = self.encoding
if not self.content:
- return str('')
+ return ""
# Fallback to auto-detected encoding.
if self.encoding is None:
@@ -868,7 +929,7 @@ def text(self):
# Decode unicode from given encoding.
try:
- content = str(self.content, encoding, errors='replace')
+ content = str(self.content, encoding, errors="replace")
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
@@ -876,7 +937,7 @@ def text(self):
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
- content = str(self.content, errors='replace')
+ content = str(self.content, errors="replace")
return content
@@ -896,65 +957,65 @@ def json(self, **kwargs):
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
- return complexjson.loads(
- self.content.decode(encoding), **kwargs
- )
+ return complexjson.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
+ except JSONDecodeError as e:
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
try:
return complexjson.loads(self.text, **kwargs)
except JSONDecodeError as e:
# Catch JSON-related errors and raise as requests.JSONDecodeError
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
- if is_py2: # e is a ValueError
- raise RequestsJSONDecodeError(e.message)
- else:
- raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
- header = self.headers.get('link')
+ header = self.headers.get("link")
- # l = MultiDict()
- l = {}
+ resolved_links = {}
if header:
links = parse_header_links(header)
for link in links:
- key = link.get('rel') or link.get('url')
- l[key] = link
+ key = link.get("rel") or link.get("url")
+ resolved_links[key] = link
- return l
+ return resolved_links
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
- http_error_msg = ''
+ http_error_msg = ""
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
- reason = self.reason.decode('utf-8')
+ reason = self.reason.decode("utf-8")
except UnicodeDecodeError:
- reason = self.reason.decode('iso-8859-1')
+ reason = self.reason.decode("iso-8859-1")
else:
reason = self.reason
if 400 <= self.status_code < 500:
- http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
+ http_error_msg = (
+ f"{self.status_code} Client Error: {reason} for url: {self.url}"
+ )
elif 500 <= self.status_code < 600:
- http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
+ http_error_msg = (
+ f"{self.status_code} Server Error: {reason} for url: {self.url}"
+ )
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
@@ -968,6 +1029,6 @@ def close(self):
if not self._content_consumed:
self.raw.close()
- release_conn = getattr(self.raw, 'release_conn', None)
+ release_conn = getattr(self.raw, "release_conn", None)
if release_conn is not None:
release_conn()
diff --git a/src/pip/_vendor/requests/sessions.py b/src/pip/_vendor/requests/sessions.py
index 3f59cab9225..6cb3b4dae39 100644
--- a/src/pip/_vendor/requests/sessions.py
+++ b/src/pip/_vendor/requests/sessions.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.sessions
~~~~~~~~~~~~~~~~~
@@ -10,39 +8,52 @@
import os
import sys
import time
-from datetime import timedelta
from collections import OrderedDict
+from datetime import timedelta
+from ._internal_utils import to_native_string
+from .adapters import HTTPAdapter
from .auth import _basic_auth_str
-from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
+from .compat import Mapping, cookielib, urljoin, urlparse
from .cookies import (
- cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
-from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
-from .hooks import default_hooks, dispatch_hook
-from ._internal_utils import to_native_string
-from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
+ RequestsCookieJar,
+ cookiejar_from_dict,
+ extract_cookies_to_jar,
+ merge_cookies,
+)
from .exceptions import (
- TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
-
-from .structures import CaseInsensitiveDict
-from .adapters import HTTPAdapter
-
-from .utils import (
- requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
- get_auth_from_url, rewind_body, resolve_proxies
+ ChunkedEncodingError,
+ ContentDecodingError,
+ InvalidSchema,
+ TooManyRedirects,
)
-
-from .status_codes import codes
+from .hooks import default_hooks, dispatch_hook
# formerly defined here, reexposed here for backward compatibility
-from .models import REDIRECT_STATI
+from .models import ( # noqa: F401
+ DEFAULT_REDIRECT_LIMIT,
+ REDIRECT_STATI,
+ PreparedRequest,
+ Request,
+)
+from .status_codes import codes
+from .structures import CaseInsensitiveDict
+from .utils import ( # noqa: F401
+ DEFAULT_PORTS,
+ default_headers,
+ get_auth_from_url,
+ get_environ_proxies,
+ get_netrc_auth,
+ requote_uri,
+ resolve_proxies,
+ rewind_body,
+ should_bypass_proxies,
+ to_key_val_list,
+)
# Preferred clock, based on which one is more accurate on a given system.
-if sys.platform == 'win32':
- try: # Python 3.4+
- preferred_clock = time.perf_counter
- except AttributeError: # Earlier than Python 3.
- preferred_clock = time.clock
+if sys.platform == "win32":
+ preferred_clock = time.perf_counter
else:
preferred_clock = time.time
@@ -61,8 +72,7 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
# Bypass if not a dictionary (e.g. verify)
if not (
- isinstance(session_setting, Mapping) and
- isinstance(request_setting, Mapping)
+ isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
):
return request_setting
@@ -84,17 +94,16 @@ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
- if session_hooks is None or session_hooks.get('response') == []:
+ if session_hooks is None or session_hooks.get("response") == []:
return request_hooks
- if request_hooks is None or request_hooks.get('response') == []:
+ if request_hooks is None or request_hooks.get("response") == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
-class SessionRedirectMixin(object):
-
+class SessionRedirectMixin:
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
@@ -104,16 +113,15 @@ def get_redirect_target(self, resp):
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
- location = resp.headers['location']
+ location = resp.headers["location"]
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
- if is_py3:
- location = location.encode('latin1')
- return to_native_string(location, 'utf8')
+ location = location.encode("latin1")
+ return to_native_string(location, "utf8")
return None
def should_strip_auth(self, old_url, new_url):
@@ -126,23 +134,40 @@ def should_strip_auth(self, old_url, new_url):
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
- if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
- and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
+ if (
+ old_parsed.scheme == "http"
+ and old_parsed.port in (80, None)
+ and new_parsed.scheme == "https"
+ and new_parsed.port in (443, None)
+ ):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
- if (not changed_scheme and old_parsed.port in default_port
- and new_parsed.port in default_port):
+ if (
+ not changed_scheme
+ and old_parsed.port in default_port
+ and new_parsed.port in default_port
+ ):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
- def resolve_redirects(self, resp, req, stream=False, timeout=None,
- verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
+ def resolve_redirects(
+ self,
+ resp,
+ req,
+ stream=False,
+ timeout=None,
+ verify=True,
+ cert=None,
+ proxies=None,
+ yield_requests=False,
+ **adapter_kwargs,
+ ):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
@@ -163,19 +188,21 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
- raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
+ raise TooManyRedirects(
+ f"Exceeded {self.max_redirects} redirects.", response=resp
+ )
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
- if url.startswith('//'):
+ if url.startswith("//"):
parsed_rurl = urlparse(resp.url)
- url = ':'.join([to_native_string(parsed_rurl.scheme), url])
+ url = ":".join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
- if parsed.fragment == '' and previous_fragment:
+ if parsed.fragment == "" and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
@@ -194,15 +221,18 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
- if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
+ if resp.status_code not in (
+ codes.temporary_redirect,
+ codes.permanent_redirect,
+ ):
# https://github.com/psf/requests/issues/3490
- purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
+ purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
- headers.pop('Cookie', None)
+ headers.pop("Cookie", None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
@@ -218,9 +248,8 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
- rewindable = (
- prepared_request._body_position is not None and
- ('Content-Length' in headers or 'Transfer-Encoding' in headers)
+ rewindable = prepared_request._body_position is not None and (
+ "Content-Length" in headers or "Transfer-Encoding" in headers
)
# Attempt to rewind consumed file-like object.
@@ -242,7 +271,7 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,
cert=cert,
proxies=proxies,
allow_redirects=False,
- **adapter_kwargs
+ **adapter_kwargs,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
@@ -259,10 +288,12 @@ def rebuild_auth(self, prepared_request, response):
headers = prepared_request.headers
url = prepared_request.url
- if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
+ if "Authorization" in headers and self.should_strip_auth(
+ response.request.url, url
+ ):
# If we get redirected to a new host, we should strip out any
# authentication headers.
- del headers['Authorization']
+ del headers["Authorization"]
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
@@ -285,8 +316,8 @@ def rebuild_proxies(self, prepared_request, proxies):
scheme = urlparse(prepared_request.url).scheme
new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
- if 'Proxy-Authorization' in headers:
- del headers['Proxy-Authorization']
+ if "Proxy-Authorization" in headers:
+ del headers["Proxy-Authorization"]
try:
username, password = get_auth_from_url(new_proxies[scheme])
@@ -294,7 +325,7 @@ def rebuild_proxies(self, prepared_request, proxies):
username, password = None, None
if username and password:
- headers['Proxy-Authorization'] = _basic_auth_str(username, password)
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
return new_proxies
@@ -305,18 +336,18 @@ def rebuild_method(self, prepared_request, response):
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
- if response.status_code == codes.see_other and method != 'HEAD':
- method = 'GET'
+ if response.status_code == codes.see_other and method != "HEAD":
+ method = "GET"
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
- if response.status_code == codes.found and method != 'HEAD':
- method = 'GET'
+ if response.status_code == codes.found and method != "HEAD":
+ method = "GET"
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
- if response.status_code == codes.moved and method == 'POST':
- method = 'GET'
+ if response.status_code == codes.moved and method == "POST":
+ method = "GET"
prepared_request.method = method
@@ -341,9 +372,18 @@ class Session(SessionRedirectMixin):
"""
__attrs__ = [
- 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
- 'cert', 'adapters', 'stream', 'trust_env',
- 'max_redirects',
+ "headers",
+ "cookies",
+ "auth",
+ "proxies",
+ "hooks",
+ "params",
+ "verify",
+ "cert",
+ "adapters",
+ "stream",
+ "trust_env",
+ "max_redirects",
]
def __init__(self):
@@ -405,8 +445,8 @@ def __init__(self):
# Default connection adapters.
self.adapters = OrderedDict()
- self.mount('https://', HTTPAdapter())
- self.mount('http://', HTTPAdapter())
+ self.mount("https://", HTTPAdapter())
+ self.mount("http://", HTTPAdapter())
def __enter__(self):
return self
@@ -432,7 +472,8 @@ def prepare_request(self, request):
# Merge with session cookies
merged_cookies = merge_cookies(
- merge_cookies(RequestsCookieJar(), self.cookies), cookies)
+ merge_cookies(RequestsCookieJar(), self.cookies), cookies
+ )
# Set environment's basic authentication if not explicitly set.
auth = request.auth
@@ -446,7 +487,9 @@ def prepare_request(self, request):
files=request.files,
data=request.data,
json=request.json,
- headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
+ headers=merge_setting(
+ request.headers, self.headers, dict_class=CaseInsensitiveDict
+ ),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
@@ -454,10 +497,25 @@ def prepare_request(self, request):
)
return p
- def request(self, method, url,
- params=None, data=None, headers=None, cookies=None, files=None,
- auth=None, timeout=None, allow_redirects=True, proxies=None,
- hooks=None, stream=None, verify=None, cert=None, json=None):
+ def request(
+ self,
+ method,
+ url,
+ params=None,
+ data=None,
+ headers=None,
+ cookies=None,
+ files=None,
+ auth=None,
+ timeout=None,
+ allow_redirects=True,
+ proxies=None,
+ hooks=None,
+ stream=None,
+ verify=None,
+ cert=None,
+ json=None,
+ ):
"""Constructs a :class:`Request `, prepares it and sends it.
Returns :class:`Response ` object.
@@ -493,7 +551,7 @@ def request(self, method, url,
``False``, requests will accept any TLS certificate presented by
the server, and will ignore hostname mismatches and/or expired
certificates, which will make your application vulnerable to
- man-in-the-middle (MitM) attacks. Setting verify to ``False``
+ man-in-the-middle (MitM) attacks. Setting verify to ``False``
may be useful during local development or testing.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
@@ -522,8 +580,8 @@ def request(self, method, url,
# Send the request.
send_kwargs = {
- 'timeout': timeout,
- 'allow_redirects': allow_redirects,
+ "timeout": timeout,
+ "allow_redirects": allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
@@ -538,8 +596,8 @@ def get(self, url, **kwargs):
:rtype: requests.Response
"""
- kwargs.setdefault('allow_redirects', True)
- return self.request('GET', url, **kwargs)
+ kwargs.setdefault("allow_redirects", True)
+ return self.request("GET", url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
@@ -549,8 +607,8 @@ def options(self, url, **kwargs):
:rtype: requests.Response
"""
- kwargs.setdefault('allow_redirects', True)
- return self.request('OPTIONS', url, **kwargs)
+ kwargs.setdefault("allow_redirects", True)
+ return self.request("OPTIONS", url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
@@ -560,8 +618,8 @@ def head(self, url, **kwargs):
:rtype: requests.Response
"""
- kwargs.setdefault('allow_redirects', False)
- return self.request('HEAD', url, **kwargs)
+ kwargs.setdefault("allow_redirects", False)
+ return self.request("HEAD", url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
@@ -574,7 +632,7 @@ def post(self, url, data=None, json=None, **kwargs):
:rtype: requests.Response
"""
- return self.request('POST', url, data=data, json=json, **kwargs)
+ return self.request("POST", url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
@@ -586,7 +644,7 @@ def put(self, url, data=None, **kwargs):
:rtype: requests.Response
"""
- return self.request('PUT', url, data=data, **kwargs)
+ return self.request("PUT", url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
@@ -598,7 +656,7 @@ def patch(self, url, data=None, **kwargs):
:rtype: requests.Response
"""
- return self.request('PATCH', url, data=data, **kwargs)
+ return self.request("PATCH", url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
@@ -608,7 +666,7 @@ def delete(self, url, **kwargs):
:rtype: requests.Response
"""
- return self.request('DELETE', url, **kwargs)
+ return self.request("DELETE", url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
@@ -617,22 +675,20 @@ def send(self, request, **kwargs):
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
- kwargs.setdefault('stream', self.stream)
- kwargs.setdefault('verify', self.verify)
- kwargs.setdefault('cert', self.cert)
- if 'proxies' not in kwargs:
- kwargs['proxies'] = resolve_proxies(
- request, self.proxies, self.trust_env
- )
+ kwargs.setdefault("stream", self.stream)
+ kwargs.setdefault("verify", self.verify)
+ kwargs.setdefault("cert", self.cert)
+ if "proxies" not in kwargs:
+ kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
- raise ValueError('You can only send PreparedRequests.')
+ raise ValueError("You can only send PreparedRequests.")
# Set up variables needed for resolve_redirects and dispatching of hooks
- allow_redirects = kwargs.pop('allow_redirects', True)
- stream = kwargs.get('stream')
+ allow_redirects = kwargs.pop("allow_redirects", True)
+ stream = kwargs.get("stream")
hooks = request.hooks
# Get the appropriate adapter to use
@@ -649,7 +705,7 @@ def send(self, request, **kwargs):
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
- r = dispatch_hook('response', hooks, r, **kwargs)
+ r = dispatch_hook("response", hooks, r, **kwargs)
# Persist cookies
if r.history:
@@ -679,7 +735,9 @@ def send(self, request, **kwargs):
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
- r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
+ r._next = next(
+ self.resolve_redirects(r, request, yield_requests=True, **kwargs)
+ )
except StopIteration:
pass
@@ -697,16 +755,19 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert):
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
- no_proxy = proxies.get('no_proxy') if proxies is not None else None
+ no_proxy = proxies.get("no_proxy") if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
- # Look for requests environment configuration and be compatible
- # with cURL.
+ # Look for requests environment configuration
+ # and be compatible with cURL.
if verify is True or verify is None:
- verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
- os.environ.get('CURL_CA_BUNDLE'))
+ verify = (
+ os.environ.get("REQUESTS_CA_BUNDLE")
+ or os.environ.get("CURL_CA_BUNDLE")
+ or verify
+ )
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
@@ -714,8 +775,7 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert):
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
- return {'verify': verify, 'proxies': proxies, 'stream': stream,
- 'cert': cert}
+ return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
def get_adapter(self, url):
"""
@@ -729,7 +789,7 @@ def get_adapter(self, url):
return adapter
# Nothing matches :-/
- raise InvalidSchema("No connection adapters were found for {!r}".format(url))
+ raise InvalidSchema(f"No connection adapters were found for {url!r}")
def close(self):
"""Closes all adapters and as such the session"""
diff --git a/src/pip/_vendor/requests/status_codes.py b/src/pip/_vendor/requests/status_codes.py
index d80a7cd4dd4..4bd072be976 100644
--- a/src/pip/_vendor/requests/status_codes.py
+++ b/src/pip/_vendor/requests/status_codes.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
r"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
@@ -23,101 +21,108 @@
from .structures import LookupDict
_codes = {
-
# Informational.
- 100: ('continue',),
- 101: ('switching_protocols',),
- 102: ('processing',),
- 103: ('checkpoint',),
- 122: ('uri_too_long', 'request_uri_too_long'),
- 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
- 201: ('created',),
- 202: ('accepted',),
- 203: ('non_authoritative_info', 'non_authoritative_information'),
- 204: ('no_content',),
- 205: ('reset_content', 'reset'),
- 206: ('partial_content', 'partial'),
- 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
- 208: ('already_reported',),
- 226: ('im_used',),
-
+ 100: ("continue",),
+ 101: ("switching_protocols",),
+ 102: ("processing",),
+ 103: ("checkpoint",),
+ 122: ("uri_too_long", "request_uri_too_long"),
+ 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
+ 201: ("created",),
+ 202: ("accepted",),
+ 203: ("non_authoritative_info", "non_authoritative_information"),
+ 204: ("no_content",),
+ 205: ("reset_content", "reset"),
+ 206: ("partial_content", "partial"),
+ 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
+ 208: ("already_reported",),
+ 226: ("im_used",),
# Redirection.
- 300: ('multiple_choices',),
- 301: ('moved_permanently', 'moved', '\\o-'),
- 302: ('found',),
- 303: ('see_other', 'other'),
- 304: ('not_modified',),
- 305: ('use_proxy',),
- 306: ('switch_proxy',),
- 307: ('temporary_redirect', 'temporary_moved', 'temporary'),
- 308: ('permanent_redirect',
- 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
-
+ 300: ("multiple_choices",),
+ 301: ("moved_permanently", "moved", "\\o-"),
+ 302: ("found",),
+ 303: ("see_other", "other"),
+ 304: ("not_modified",),
+ 305: ("use_proxy",),
+ 306: ("switch_proxy",),
+ 307: ("temporary_redirect", "temporary_moved", "temporary"),
+ 308: (
+ "permanent_redirect",
+ "resume_incomplete",
+ "resume",
+ ), # "resume" and "resume_incomplete" to be removed in 3.0
# Client Error.
- 400: ('bad_request', 'bad'),
- 401: ('unauthorized',),
- 402: ('payment_required', 'payment'),
- 403: ('forbidden',),
- 404: ('not_found', '-o-'),
- 405: ('method_not_allowed', 'not_allowed'),
- 406: ('not_acceptable',),
- 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
- 408: ('request_timeout', 'timeout'),
- 409: ('conflict',),
- 410: ('gone',),
- 411: ('length_required',),
- 412: ('precondition_failed', 'precondition'),
- 413: ('request_entity_too_large',),
- 414: ('request_uri_too_large',),
- 415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
- 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
- 417: ('expectation_failed',),
- 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
- 421: ('misdirected_request',),
- 422: ('unprocessable_entity', 'unprocessable'),
- 423: ('locked',),
- 424: ('failed_dependency', 'dependency'),
- 425: ('unordered_collection', 'unordered'),
- 426: ('upgrade_required', 'upgrade'),
- 428: ('precondition_required', 'precondition'),
- 429: ('too_many_requests', 'too_many'),
- 431: ('header_fields_too_large', 'fields_too_large'),
- 444: ('no_response', 'none'),
- 449: ('retry_with', 'retry'),
- 450: ('blocked_by_windows_parental_controls', 'parental_controls'),
- 451: ('unavailable_for_legal_reasons', 'legal_reasons'),
- 499: ('client_closed_request',),
-
+ 400: ("bad_request", "bad"),
+ 401: ("unauthorized",),
+ 402: ("payment_required", "payment"),
+ 403: ("forbidden",),
+ 404: ("not_found", "-o-"),
+ 405: ("method_not_allowed", "not_allowed"),
+ 406: ("not_acceptable",),
+ 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
+ 408: ("request_timeout", "timeout"),
+ 409: ("conflict",),
+ 410: ("gone",),
+ 411: ("length_required",),
+ 412: ("precondition_failed", "precondition"),
+ 413: ("request_entity_too_large",),
+ 414: ("request_uri_too_large",),
+ 415: ("unsupported_media_type", "unsupported_media", "media_type"),
+ 416: (
+ "requested_range_not_satisfiable",
+ "requested_range",
+ "range_not_satisfiable",
+ ),
+ 417: ("expectation_failed",),
+ 418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
+ 421: ("misdirected_request",),
+ 422: ("unprocessable_entity", "unprocessable"),
+ 423: ("locked",),
+ 424: ("failed_dependency", "dependency"),
+ 425: ("unordered_collection", "unordered"),
+ 426: ("upgrade_required", "upgrade"),
+ 428: ("precondition_required", "precondition"),
+ 429: ("too_many_requests", "too_many"),
+ 431: ("header_fields_too_large", "fields_too_large"),
+ 444: ("no_response", "none"),
+ 449: ("retry_with", "retry"),
+ 450: ("blocked_by_windows_parental_controls", "parental_controls"),
+ 451: ("unavailable_for_legal_reasons", "legal_reasons"),
+ 499: ("client_closed_request",),
# Server Error.
- 500: ('internal_server_error', 'server_error', '/o\\', '✗'),
- 501: ('not_implemented',),
- 502: ('bad_gateway',),
- 503: ('service_unavailable', 'unavailable'),
- 504: ('gateway_timeout',),
- 505: ('http_version_not_supported', 'http_version'),
- 506: ('variant_also_negotiates',),
- 507: ('insufficient_storage',),
- 509: ('bandwidth_limit_exceeded', 'bandwidth'),
- 510: ('not_extended',),
- 511: ('network_authentication_required', 'network_auth', 'network_authentication'),
+ 500: ("internal_server_error", "server_error", "/o\\", "✗"),
+ 501: ("not_implemented",),
+ 502: ("bad_gateway",),
+ 503: ("service_unavailable", "unavailable"),
+ 504: ("gateway_timeout",),
+ 505: ("http_version_not_supported", "http_version"),
+ 506: ("variant_also_negotiates",),
+ 507: ("insufficient_storage",),
+ 509: ("bandwidth_limit_exceeded", "bandwidth"),
+ 510: ("not_extended",),
+ 511: ("network_authentication_required", "network_auth", "network_authentication"),
}
-codes = LookupDict(name='status_codes')
+codes = LookupDict(name="status_codes")
+
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
- if not title.startswith(('\\', '/')):
+ if not title.startswith(("\\", "/")):
setattr(codes, title.upper(), code)
def doc(code):
- names = ', '.join('``%s``' % n for n in _codes[code])
- return '* %d: %s' % (code, names)
+ names = ", ".join(f"``{n}``" for n in _codes[code])
+ return "* %d: %s" % (code, names)
global __doc__
- __doc__ = (__doc__ + '\n' +
- '\n'.join(doc(code) for code in sorted(_codes))
- if __doc__ is not None else None)
+ __doc__ = (
+ __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
+ if __doc__ is not None
+ else None
+ )
+
_init()
diff --git a/src/pip/_vendor/requests/structures.py b/src/pip/_vendor/requests/structures.py
index 8ee0ba7a082..188e13e4829 100644
--- a/src/pip/_vendor/requests/structures.py
+++ b/src/pip/_vendor/requests/structures.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
@@ -64,11 +62,7 @@ def __len__(self):
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
- return (
- (lowerkey, keyval[1])
- for (lowerkey, keyval)
- in self._store.items()
- )
+ return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
@@ -91,10 +85,10 @@ class LookupDict(dict):
def __init__(self, name=None):
self.name = name
- super(LookupDict, self).__init__()
+ super().__init__()
def __repr__(self):
- return '' % (self.name)
+ return f""
def __getitem__(self, key):
# We allow fall-through here, so values default to None
diff --git a/src/pip/_vendor/requests/utils.py b/src/pip/_vendor/requests/utils.py
index 1e5857ad8af..33f394d265d 100644
--- a/src/pip/_vendor/requests/utils.py
+++ b/src/pip/_vendor/requests/utils.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""
requests.utils
~~~~~~~~~~~~~~
@@ -20,28 +18,46 @@
import warnings
import zipfile
from collections import OrderedDict
-from pip._vendor.urllib3.util import make_headers
-from pip._vendor.urllib3.util import parse_url
-from .__version__ import __version__
+from pip._vendor.urllib3.util import make_headers, parse_url
+
from . import certs
+from .__version__ import __version__
+
# to_native_string is unused here, but imported here for backwards compatibility
-from ._internal_utils import to_native_string
+from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
+from .compat import (
+ Mapping,
+ basestring,
+ bytes,
+ getproxies,
+ getproxies_environment,
+ integer_types,
+)
from .compat import parse_http_list as _parse_list_header
from .compat import (
- quote, urlparse, bytes, str, unquote, getproxies,
- proxy_bypass, urlunparse, basestring, integer_types, is_py3,
- proxy_bypass_environment, getproxies_environment, Mapping)
+ proxy_bypass,
+ proxy_bypass_environment,
+ quote,
+ str,
+ unquote,
+ urlparse,
+ urlunparse,
+)
from .cookies import cookiejar_from_dict
-from .structures import CaseInsensitiveDict
from .exceptions import (
- InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
+ FileModeWarning,
+ InvalidHeader,
+ InvalidURL,
+ UnrewindableBodyError,
+)
+from .structures import CaseInsensitiveDict
-NETRC_FILES = ('.netrc', '_netrc')
+NETRC_FILES = (".netrc", "_netrc")
DEFAULT_CA_BUNDLE_PATH = certs.where()
-DEFAULT_PORTS = {'http': 80, 'https': 443}
+DEFAULT_PORTS = {"http": 80, "https": 443}
# Ensure that ', ' is used to preserve previous delimiter behavior.
DEFAULT_ACCEPT_ENCODING = ", ".join(
@@ -49,28 +65,25 @@
)
-if sys.platform == 'win32':
+if sys.platform == "win32":
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
- if is_py3:
- import winreg
- else:
- import _winreg as winreg
+ import winreg
except ImportError:
return False
try:
- internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
- r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
+ internetSettings = winreg.OpenKey(
+ winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
+ )
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
- proxyEnable = int(winreg.QueryValueEx(internetSettings,
- 'ProxyEnable')[0])
+ proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
# ProxyOverride is almost always a string
- proxyOverride = winreg.QueryValueEx(internetSettings,
- 'ProxyOverride')[0]
- except OSError:
+ proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
+ except (OSError, ValueError):
return False
if not proxyEnable or not proxyOverride:
return False
@@ -78,15 +91,15 @@ def proxy_bypass_registry(host):
# make a check value list from the registry entry: replace the
# '' string by the localhost entry and the corresponding
# canonical entry.
- proxyOverride = proxyOverride.split(';')
+ proxyOverride = proxyOverride.split(";")
# now check if we match one of the registry values.
for test in proxyOverride:
- if test == '':
- if '.' not in host:
+ if test == "":
+ if "." not in host:
return True
- test = test.replace(".", r"\.") # mask dots
- test = test.replace("*", r".*") # change glob sequence
- test = test.replace("?", r".") # change glob char
+ test = test.replace(".", r"\.") # mask dots
+ test = test.replace("*", r".*") # change glob sequence
+ test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
@@ -106,7 +119,7 @@ def proxy_bypass(host): # noqa
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
- if hasattr(d, 'items'):
+ if hasattr(d, "items"):
d = d.items()
return d
@@ -116,13 +129,13 @@ def super_len(o):
total_length = None
current_position = 0
- if hasattr(o, '__len__'):
+ if hasattr(o, "__len__"):
total_length = len(o)
- elif hasattr(o, 'len'):
+ elif hasattr(o, "len"):
total_length = o.len
- elif hasattr(o, 'fileno'):
+ elif hasattr(o, "fileno"):
try:
fileno = o.fileno()
except (io.UnsupportedOperation, AttributeError):
@@ -135,21 +148,23 @@ def super_len(o):
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
- if 'b' not in o.mode:
- warnings.warn((
- "Requests has determined the content-length for this "
- "request using the binary size of the file: however, the "
- "file has been opened in text mode (i.e. without the 'b' "
- "flag in the mode). This may lead to an incorrect "
- "content-length. In Requests 3.0, support will be removed "
- "for files in text mode."),
- FileModeWarning
+ if "b" not in o.mode:
+ warnings.warn(
+ (
+ "Requests has determined the content-length for this "
+ "request using the binary size of the file: however, the "
+ "file has been opened in text mode (i.e. without the 'b' "
+ "flag in the mode). This may lead to an incorrect "
+ "content-length. In Requests 3.0, support will be removed "
+ "for files in text mode."
+ ),
+ FileModeWarning,
)
- if hasattr(o, 'tell'):
+ if hasattr(o, "tell"):
try:
current_position = o.tell()
- except (OSError, IOError):
+ except OSError:
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
@@ -157,7 +172,7 @@ def super_len(o):
if total_length is not None:
current_position = total_length
else:
- if hasattr(o, 'seek') and total_length is None:
+ if hasattr(o, "seek") and total_length is None:
# StringIO and BytesIO have seek but no usable fileno
try:
# seek to end of file
@@ -167,7 +182,7 @@ def super_len(o):
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
- except (OSError, IOError):
+ except OSError:
total_length = 0
if total_length is None:
@@ -179,14 +194,14 @@ def super_len(o):
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
- netrc_file = os.environ.get('NETRC')
+ netrc_file = os.environ.get("NETRC")
if netrc_file is not None:
netrc_locations = (netrc_file,)
else:
- netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES)
+ netrc_locations = (f"~/{f}" for f in NETRC_FILES)
try:
- from netrc import netrc, NetrcParseError
+ from netrc import NetrcParseError, netrc
netrc_path = None
@@ -211,18 +226,18 @@ def get_netrc_auth(url, raise_errors=False):
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
- splitstr = b':'
+ splitstr = b":"
if isinstance(url, str):
- splitstr = splitstr.decode('ascii')
+ splitstr = splitstr.decode("ascii")
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
- login_i = (0 if _netrc[0] else 1)
+ login_i = 0 if _netrc[0] else 1
return (_netrc[login_i], _netrc[2])
- except (NetrcParseError, IOError):
+ except (NetrcParseError, OSError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
@@ -235,9 +250,8 @@ def get_netrc_auth(url, raise_errors=False):
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
- name = getattr(obj, 'name', None)
- if (name and isinstance(name, basestring) and name[0] != '<' and
- name[-1] != '>'):
+ name = getattr(obj, "name", None)
+ if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
return os.path.basename(name)
@@ -259,7 +273,7 @@ def extract_zipped_paths(path):
# If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
# we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
break
- member = '/'.join([prefix, member])
+ member = "/".join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
@@ -270,7 +284,7 @@ def extract_zipped_paths(path):
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
- extracted_path = os.path.join(tmp, member.split('/')[-1])
+ extracted_path = os.path.join(tmp, member.split("/")[-1])
if not os.path.exists(extracted_path):
# use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
with atomic_open(extracted_path) as file_handler:
@@ -281,12 +295,11 @@ def extract_zipped_paths(path):
@contextlib.contextmanager
def atomic_open(filename):
"""Write a file to the disk in an atomic fashion"""
- replacer = os.rename if sys.version_info[0] == 2 else os.replace
tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
- with os.fdopen(tmp_descriptor, 'wb') as tmp_handler:
+ with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
yield tmp_handler
- replacer(tmp_name, filename)
+ os.replace(tmp_name, filename)
except BaseException:
os.remove(tmp_name)
raise
@@ -314,7 +327,7 @@ def from_key_val_list(value):
return None
if isinstance(value, (str, bytes, bool, int)):
- raise ValueError('cannot encode objects that are not 2-tuples')
+ raise ValueError("cannot encode objects that are not 2-tuples")
return OrderedDict(value)
@@ -340,7 +353,7 @@ def to_key_val_list(value):
return None
if isinstance(value, (str, bytes, bool, int)):
- raise ValueError('cannot encode objects that are not 2-tuples')
+ raise ValueError("cannot encode objects that are not 2-tuples")
if isinstance(value, Mapping):
value = value.items()
@@ -405,10 +418,10 @@ def parse_dict_header(value):
"""
result = {}
for item in _parse_list_header(value):
- if '=' not in item:
+ if "=" not in item:
result[item] = None
continue
- name, value = item.split('=', 1)
+ name, value = item.split("=", 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
@@ -436,8 +449,8 @@ def unquote_header_value(value, is_filename=False):
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
- if not is_filename or value[:2] != '\\\\':
- return value.replace('\\\\', '\\').replace('\\"', '"')
+ if not is_filename or value[:2] != "\\\\":
+ return value.replace("\\\\", "\\").replace('\\"', '"')
return value
@@ -472,19 +485,24 @@ def get_encodings_from_content(content):
:param content: bytestring to extract encodings from.
"""
- warnings.warn((
- 'In requests 3.0, get_encodings_from_content will be removed. For '
- 'more information, please see the discussion on issue #2266. (This'
- ' warning should only appear once.)'),
- DeprecationWarning)
+ warnings.warn(
+ (
+ "In requests 3.0, get_encodings_from_content will be removed. For "
+ "more information, please see the discussion on issue #2266. (This"
+ " warning should only appear once.)"
+ ),
+ DeprecationWarning,
+ )
charset_re = re.compile(r']', flags=re.I)
pragma_re = re.compile(r']', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
- return (charset_re.findall(content) +
- pragma_re.findall(content) +
- xml_re.findall(content))
+ return (
+ charset_re.findall(content)
+ + pragma_re.findall(content)
+ + xml_re.findall(content)
+ )
def _parse_content_type_header(header):
@@ -495,7 +513,7 @@ def _parse_content_type_header(header):
parameters
"""
- tokens = header.split(';')
+ tokens = header.split(";")
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
@@ -507,7 +525,7 @@ def _parse_content_type_header(header):
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
- value = param[index_of_equals + 1:].strip(items_to_strip)
+ value = param[index_of_equals + 1 :].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
@@ -519,38 +537,37 @@ def get_encoding_from_headers(headers):
:rtype: str
"""
- content_type = headers.get('content-type')
+ content_type = headers.get("content-type")
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
- if 'charset' in params:
- return params['charset'].strip("'\"")
+ if "charset" in params:
+ return params["charset"].strip("'\"")
- if 'text' in content_type:
- return 'ISO-8859-1'
+ if "text" in content_type:
+ return "ISO-8859-1"
- if 'application/json' in content_type:
+ if "application/json" in content_type:
# Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
- return 'utf-8'
+ return "utf-8"
def stream_decode_response_unicode(iterator, r):
- """Stream decodes a iterator."""
+ """Stream decodes an iterator."""
if r.encoding is None:
- for item in iterator:
- yield item
+ yield from iterator
return
- decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
+ decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
- rv = decoder.decode(b'', final=True)
+ rv = decoder.decode(b"", final=True)
if rv:
yield rv
@@ -561,7 +578,7 @@ def iter_slices(string, slice_length):
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
- yield string[pos:pos + slice_length]
+ yield string[pos : pos + slice_length]
pos += slice_length
@@ -577,11 +594,14 @@ def get_unicode_from_response(r):
:rtype: str
"""
- warnings.warn((
- 'In requests 3.0, get_unicode_from_response will be removed. For '
- 'more information, please see the discussion on issue #2266. (This'
- ' warning should only appear once.)'),
- DeprecationWarning)
+ warnings.warn(
+ (
+ "In requests 3.0, get_unicode_from_response will be removed. For "
+ "more information, please see the discussion on issue #2266. (This"
+ " warning should only appear once.)"
+ ),
+ DeprecationWarning,
+ )
tried_encodings = []
@@ -596,14 +616,15 @@ def get_unicode_from_response(r):
# Fall back:
try:
- return str(r.content, encoding, errors='replace')
+ return str(r.content, encoding, errors="replace")
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
+)
def unquote_unreserved(uri):
@@ -612,22 +633,22 @@ def unquote_unreserved(uri):
:rtype: str
"""
- parts = uri.split('%')
+ parts = uri.split("%")
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
- raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
+ raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
- parts[i] = '%' + parts[i]
+ parts[i] = f"%{parts[i]}"
else:
- parts[i] = '%' + parts[i]
- return ''.join(parts)
+ parts[i] = f"%{parts[i]}"
+ return "".join(parts)
def requote_uri(uri):
@@ -660,10 +681,10 @@ def address_in_network(ip, net):
:rtype: bool
"""
- ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
- netaddr, bits = net.split('/')
- netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
- network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
+ ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
+ netaddr, bits = net.split("/")
+ netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
+ network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
@@ -674,8 +695,8 @@ def dotted_netmask(mask):
:rtype: str
"""
- bits = 0xffffffff ^ (1 << 32 - mask) - 1
- return socket.inet_ntoa(struct.pack('>I', bits))
+ bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
+ return socket.inet_ntoa(struct.pack(">I", bits))
def is_ipv4_address(string_ip):
@@ -684,7 +705,7 @@ def is_ipv4_address(string_ip):
"""
try:
socket.inet_aton(string_ip)
- except socket.error:
+ except OSError:
return False
return True
@@ -695,9 +716,9 @@ def is_valid_cidr(string_network):
:rtype: bool
"""
- if string_network.count('/') == 1:
+ if string_network.count("/") == 1:
try:
- mask = int(string_network.split('/')[1])
+ mask = int(string_network.split("/")[1])
except ValueError:
return False
@@ -705,8 +726,8 @@ def is_valid_cidr(string_network):
return False
try:
- socket.inet_aton(string_network.split('/')[0])
- except socket.error:
+ socket.inet_aton(string_network.split("/")[0])
+ except OSError:
return False
else:
return False
@@ -743,13 +764,14 @@ def should_bypass_proxies(url, no_proxy):
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
- get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
+ def get_proxy(key):
+ return os.environ.get(key) or os.environ.get(key.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
- no_proxy = get_proxy('no_proxy')
+ no_proxy = get_proxy("no_proxy")
parsed = urlparse(url)
if parsed.hostname is None:
@@ -759,9 +781,7 @@ def should_bypass_proxies(url, no_proxy):
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
- no_proxy = (
- host for host in no_proxy.replace(' ', '').split(',') if host
- )
+ no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
@@ -775,7 +795,7 @@ def should_bypass_proxies(url, no_proxy):
else:
host_with_port = parsed.hostname
if parsed.port:
- host_with_port += ':{}'.format(parsed.port)
+ host_with_port += f":{parsed.port}"
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
@@ -783,7 +803,7 @@ def should_bypass_proxies(url, no_proxy):
# to apply the proxies on this URL.
return True
- with set_environ('no_proxy', no_proxy_arg):
+ with set_environ("no_proxy", no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
@@ -817,13 +837,13 @@ def select_proxy(url, proxies):
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
- return proxies.get(urlparts.scheme, proxies.get('all'))
+ return proxies.get(urlparts.scheme, proxies.get("all"))
proxy_keys = [
- urlparts.scheme + '://' + urlparts.hostname,
+ urlparts.scheme + "://" + urlparts.hostname,
urlparts.scheme,
- 'all://' + urlparts.hostname,
- 'all',
+ "all://" + urlparts.hostname,
+ "all",
]
proxy = None
for proxy_key in proxy_keys:
@@ -848,13 +868,13 @@ def resolve_proxies(request, proxies, trust_env=True):
proxies = proxies if proxies is not None else {}
url = request.url
scheme = urlparse(url).scheme
- no_proxy = proxies.get('no_proxy')
+ no_proxy = proxies.get("no_proxy")
new_proxies = proxies.copy()
if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
- proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
+ proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
if proxy:
new_proxies.setdefault(scheme, proxy)
@@ -867,19 +887,21 @@ def default_user_agent(name="python-requests"):
:rtype: str
"""
- return '%s/%s' % (name, __version__)
+ return f"{name}/{__version__}"
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
- return CaseInsensitiveDict({
- 'User-Agent': default_user_agent(),
- 'Accept-Encoding': DEFAULT_ACCEPT_ENCODING,
- 'Accept': '*/*',
- 'Connection': 'keep-alive',
- })
+ return CaseInsensitiveDict(
+ {
+ "User-Agent": default_user_agent(),
+ "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
+ "Accept": "*/*",
+ "Connection": "keep-alive",
+ }
+ )
def parse_header_links(value):
@@ -892,23 +914,23 @@ def parse_header_links(value):
links = []
- replace_chars = ' \'"'
+ replace_chars = " '\""
value = value.strip(replace_chars)
if not value:
return links
- for val in re.split(', *<', value):
+ for val in re.split(", *<", value):
try:
- url, params = val.split(';', 1)
+ url, params = val.split(";", 1)
except ValueError:
- url, params = val, ''
+ url, params = val, ""
- link = {'url': url.strip('<> \'"')}
+ link = {"url": url.strip("<> '\"")}
- for param in params.split(';'):
+ for param in params.split(";"):
try:
- key, value = param.split('=')
+ key, value = param.split("=")
except ValueError:
break
@@ -920,7 +942,7 @@ def parse_header_links(value):
# Null bytes; no need to recreate these on each call to guess_json_utf
-_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
+_null = "\x00".encode("ascii") # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
@@ -934,25 +956,25 @@ def guess_json_utf(data):
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
- return 'utf-32' # BOM included
+ return "utf-32" # BOM included
if sample[:3] == codecs.BOM_UTF8:
- return 'utf-8-sig' # BOM included, MS style (discouraged)
+ return "utf-8-sig" # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
- return 'utf-16' # BOM included
+ return "utf-16" # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
- return 'utf-8'
+ return "utf-8"
if nullcount == 2:
- if sample[::2] == _null2: # 1st and 3rd are null
- return 'utf-16-be'
+ if sample[::2] == _null2: # 1st and 3rd are null
+ return "utf-16-be"
if sample[1::2] == _null2: # 2nd and 4th are null
- return 'utf-16-le'
+ return "utf-16-le"
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
- return 'utf-32-be'
+ return "utf-32-be"
if sample[1:] == _null3:
- return 'utf-32-le'
+ return "utf-32-le"
# Did not detect a valid UTF-32 ascii-range character
return None
@@ -977,13 +999,13 @@ def prepend_scheme_if_needed(url, new_scheme):
if auth:
# parse_url doesn't provide the netloc with auth
# so we'll add it ourselves.
- netloc = '@'.join([auth, netloc])
+ netloc = "@".join([auth, netloc])
if scheme is None:
scheme = new_scheme
if path is None:
- path = ''
+ path = ""
- return urlunparse((scheme, netloc, path, '', query, fragment))
+ return urlunparse((scheme, netloc, path, "", query, fragment))
def get_auth_from_url(url):
@@ -997,35 +1019,36 @@ def get_auth_from_url(url):
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
- auth = ('', '')
+ auth = ("", "")
return auth
-# Moved outside of function to avoid recompile every call
-_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
-_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
-
-
def check_header_validity(header):
- """Verifies that header value is a string which doesn't contain
- leading whitespace or return characters. This prevents unintended
- header injection.
+ """Verifies that header parts don't contain leading whitespace
+ reserved characters, or return characters.
:param header: tuple, in the format (name, value).
"""
name, value = header
- if isinstance(value, bytes):
- pat = _CLEAN_HEADER_REGEX_BYTE
- else:
- pat = _CLEAN_HEADER_REGEX_STR
- try:
- if not pat.match(value):
- raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
- except TypeError:
- raise InvalidHeader("Value for header {%s: %s} must be of type str or "
- "bytes, not %s" % (name, value, type(value)))
+ for part in header:
+ if type(part) not in HEADER_VALIDATORS:
+ raise InvalidHeader(
+ f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
+ f"of type str or bytes, not {type(part)}"
+ )
+
+ _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
+ _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
+
+
+def _validate_header_part(header_part, header_kind, validator):
+ if not validator.match(header_part):
+ raise InvalidHeader(
+ f"Invalid leading whitespace, reserved character(s), or return"
+ f"character(s) in header {header_kind}: {header_part!r}"
+ )
def urldefragauth(url):
@@ -1040,21 +1063,24 @@ def urldefragauth(url):
if not netloc:
netloc, path = path, netloc
- netloc = netloc.rsplit('@', 1)[-1]
+ netloc = netloc.rsplit("@", 1)[-1]
- return urlunparse((scheme, netloc, path, params, query, ''))
+ return urlunparse((scheme, netloc, path, params, query, ""))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
- body_seek = getattr(prepared_request.body, 'seek', None)
- if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
+ body_seek = getattr(prepared_request.body, "seek", None)
+ if body_seek is not None and isinstance(
+ prepared_request._body_position, integer_types
+ ):
try:
body_seek(prepared_request._body_position)
- except (IOError, OSError):
- raise UnrewindableBodyError("An error occurred when rewinding request "
- "body for redirect.")
+ except OSError:
+ raise UnrewindableBodyError(
+ "An error occurred when rewinding request body for redirect."
+ )
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
diff --git a/src/pip/_vendor/rich/__init__.py b/src/pip/_vendor/rich/__init__.py
index 657811b5ebb..d35875dbb81 100644
--- a/src/pip/_vendor/rich/__init__.py
+++ b/src/pip/_vendor/rich/__init__.py
@@ -1,7 +1,7 @@
"""Rich text and beautiful formatting in the terminal."""
import os
-from typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union
+from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
from ._extension import load_ipython_extension # noqa: F401
@@ -13,7 +13,11 @@
# Global console used by alternative print
_console: Optional["Console"] = None
-_IMPORT_CWD = os.path.abspath(os.getcwd())
+try:
+ _IMPORT_CWD = os.path.abspath(os.getcwd())
+except FileNotFoundError:
+ # Can happen if the cwd has been deleted
+ _IMPORT_CWD = ""
def get_console() -> "Console":
diff --git a/src/pip/_vendor/rich/_export_format.py b/src/pip/_vendor/rich/_export_format.py
new file mode 100644
index 00000000000..b79c13069b9
--- /dev/null
+++ b/src/pip/_vendor/rich/_export_format.py
@@ -0,0 +1,78 @@
+CONSOLE_HTML_FORMAT = """\
+
+
+
+
+
+
+
+
+
{code}
+
+
+
+"""
+
+CONSOLE_SVG_FORMAT = """\
+
+"""
+
+_SVG_FONT_FAMILY = "Rich Fira Code"
+_SVG_CLASSES_PREFIX = "rich-svg"
diff --git a/src/pip/_vendor/rich/_inspect.py b/src/pip/_vendor/rich/_inspect.py
index 01713e57673..30446ceb3f0 100644
--- a/src/pip/_vendor/rich/_inspect.py
+++ b/src/pip/_vendor/rich/_inspect.py
@@ -2,9 +2,10 @@
import inspect
from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
-from typing import Any, Iterable, Optional, Tuple
+from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
from .console import Group, RenderableType
+from .control import escape_control_codes
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin
from .panel import Panel
@@ -19,12 +20,6 @@ def _first_paragraph(doc: str) -> str:
return paragraph
-def _reformat_doc(doc: str) -> str:
- """Reformat docstring."""
- doc = cleandoc(doc).strip()
- return doc
-
-
class Inspect(JupyterMixin):
"""A renderable to inspect any Python Object.
@@ -112,11 +107,13 @@ def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
# If obj is a module, there may be classes (which are callable) to display
if inspect.isclass(obj):
prefix = "class"
+ elif inspect.iscoroutinefunction(obj):
+ prefix = "async def"
else:
prefix = "def"
qual_signature = Text.assemble(
- (f"{prefix} ", f"inspect.{prefix}"),
+ (f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
(qualname, "inspect.callable"),
signature_text,
)
@@ -161,11 +158,9 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
yield ""
if self.docs:
- _doc = getdoc(obj)
+ _doc = self._get_formatted_doc(obj)
if _doc is not None:
- if not self.help:
- _doc = _first_paragraph(_doc)
- doc_text = Text(_reformat_doc(_doc), style="inspect.help")
+ doc_text = Text(_doc, style="inspect.help")
doc_text = highlighter(doc_text)
yield doc_text
yield ""
@@ -200,13 +195,10 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
add_row(key_text, Pretty(value, highlighter=highlighter))
else:
if self.docs:
- docs = getdoc(value)
+ docs = self._get_formatted_doc(value)
if docs is not None:
- _doc = _reformat_doc(str(docs))
- if not self.help:
- _doc = _first_paragraph(_doc)
- _signature_text.append("\n" if "\n" in _doc else " ")
- doc = highlighter(_doc)
+ _signature_text.append("\n" if "\n" in docs else " ")
+ doc = highlighter(docs)
doc.stylize("inspect.doc")
_signature_text.append(doc)
@@ -220,3 +212,59 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
)
+
+ def _get_formatted_doc(self, object_: Any) -> Optional[str]:
+ """
+ Extract the docstring of an object, process it and returns it.
+ The processing consists in cleaning up the doctring's indentation,
+ taking only its 1st paragraph if `self.help` is not True,
+ and escape its control codes.
+
+ Args:
+ object_ (Any): the object to get the docstring from.
+
+ Returns:
+ Optional[str]: the processed docstring, or None if no docstring was found.
+ """
+ docs = getdoc(object_)
+ if docs is None:
+ return None
+ docs = cleandoc(docs).strip()
+ if not self.help:
+ docs = _first_paragraph(docs)
+ return escape_control_codes(docs)
+
+
+def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
+ """Returns the MRO of an object's class, or of the object itself if it's a class."""
+ if not hasattr(obj, "__mro__"):
+ # N.B. we cannot use `if type(obj) is type` here because it doesn't work with
+ # some types of classes, such as the ones that use abc.ABCMeta.
+ obj = type(obj)
+ return getattr(obj, "__mro__", ())
+
+
+def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
+ """
+ Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
+
+ Examples:
+ `object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
+ """
+ return [
+ f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
+ for type_ in get_object_types_mro(obj)
+ ]
+
+
+def is_object_one_of_types(
+ obj: object, fully_qualified_types_names: Collection[str]
+) -> bool:
+ """
+ Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
+ fully qualified names in its MRO.
+ """
+ for type_name in get_object_types_mro_as_strings(obj):
+ if type_name in fully_qualified_types_names:
+ return True
+ return False
diff --git a/src/pip/_vendor/rich/_lru_cache.py b/src/pip/_vendor/rich/_lru_cache.py
deleted file mode 100644
index 10c818743be..00000000000
--- a/src/pip/_vendor/rich/_lru_cache.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from typing import Dict, Generic, TypeVar, TYPE_CHECKING
-import sys
-
-CacheKey = TypeVar("CacheKey")
-CacheValue = TypeVar("CacheValue")
-
-if sys.version_info < (3, 9):
- from pip._vendor.typing_extensions import OrderedDict
-else:
- from collections import OrderedDict
-
-
-class LRUCache(OrderedDict[CacheKey, CacheValue]):
- """
- A dictionary-like container that stores a given maximum items.
-
- If an additional item is added when the LRUCache is full, the least
- recently used key is discarded to make room for the new item.
-
- """
-
- def __init__(self, cache_size: int) -> None:
- self.cache_size = cache_size
- super().__init__()
-
- def __setitem__(self, key: CacheKey, value: CacheValue) -> None:
- """Store a new views, potentially discarding an old value."""
- if key not in self:
- if len(self) >= self.cache_size:
- self.popitem(last=False)
- super().__setitem__(key, value)
-
- def __getitem__(self, key: CacheKey) -> CacheValue:
- """Gets the item, but also makes it most recent."""
- value: CacheValue = super().__getitem__(key)
- super().__delitem__(key)
- super().__setitem__(key, value)
- return value
diff --git a/src/pip/_vendor/rich/_win32_console.py b/src/pip/_vendor/rich/_win32_console.py
index d42cd791607..81b10829053 100644
--- a/src/pip/_vendor/rich/_win32_console.py
+++ b/src/pip/_vendor/rich/_win32_console.py
@@ -263,6 +263,30 @@ def SetConsoleCursorPosition(
return bool(_SetConsoleCursorPosition(std_handle, coords))
+_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
+_GetConsoleCursorInfo.argtypes = [
+ wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_CURSOR_INFO),
+]
+_GetConsoleCursorInfo.restype = wintypes.BOOL
+
+
+def GetConsoleCursorInfo(
+ std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
+) -> bool:
+ """Get the cursor info - used to get cursor visibility and width
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
+ about the console's cursor.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
+ return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
+
+
_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
_SetConsoleCursorInfo.argtypes = [
wintypes.HANDLE,
@@ -523,12 +547,14 @@ def move_cursor_backward(self) -> None:
def hide_cursor(self) -> None:
"""Hide the cursor"""
- invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=100, bVisible=0)
+ current_cursor_size = self._get_cursor_size()
+ invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
"""Show the cursor"""
- visible_cursor = CONSOLE_CURSOR_INFO(dwSize=100, bVisible=1)
+ current_cursor_size = self._get_cursor_size()
+ visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
@@ -540,6 +566,12 @@ def set_title(self, title: str) -> None:
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
+ def _get_cursor_size(self) -> int:
+ """Get the percentage of the character cell that is filled by the cursor"""
+ cursor_info = CONSOLE_CURSOR_INFO()
+ GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
+ return int(cursor_info.dwSize)
+
if __name__ == "__main__":
handle = GetStdHandle()
diff --git a/src/pip/_vendor/rich/_windows_renderer.py b/src/pip/_vendor/rich/_windows_renderer.py
index 066e585ddc0..5ece05649e7 100644
--- a/src/pip/_vendor/rich/_windows_renderer.py
+++ b/src/pip/_vendor/rich/_windows_renderer.py
@@ -51,3 +51,6 @@ def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) ->
term.erase_start_of_line()
elif mode == 2:
term.erase_line()
+ elif control_type == ControlType.SET_WINDOW_TITLE:
+ _, title = cast(Tuple[ControlType, str], control_code)
+ term.set_title(title)
diff --git a/src/pip/_vendor/rich/_wrap.py b/src/pip/_vendor/rich/_wrap.py
index b537757a573..c45f193f74a 100644
--- a/src/pip/_vendor/rich/_wrap.py
+++ b/src/pip/_vendor/rich/_wrap.py
@@ -1,8 +1,8 @@
import re
from typing import Iterable, List, Tuple
-from .cells import cell_len, chop_cells
from ._loop import loop_last
+from .cells import cell_len, chop_cells
re_word = re.compile(r"\s*\S+\s*")
@@ -27,14 +27,15 @@ def divide_line(text: str, width: int, fold: bool = True) -> List[int]:
if line_position + word_length > width:
if word_length > width:
if fold:
- for last, line in loop_last(
- chop_cells(word, width, position=line_position)
- ):
+ chopped_words = chop_cells(word, max_size=width, position=0)
+ for last, line in loop_last(chopped_words):
+ if start:
+ append(start)
+
if last:
line_position = _cell_len(line)
else:
start += len(line)
- append(start)
else:
if start:
append(start)
diff --git a/src/pip/_vendor/rich/box.py b/src/pip/_vendor/rich/box.py
index aec2926bea2..d0b07cf57e0 100644
--- a/src/pip/_vendor/rich/box.py
+++ b/src/pip/_vendor/rich/box.py
@@ -88,6 +88,16 @@ def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
box = ASCII
return box
+ def get_plain_headed_box(self) -> "Box":
+ """If this box uses special characters for the borders of the header, then
+ return the equivalent box that does not.
+
+ Returns:
+ Box: The most similar Box that doesn't use header-specific box characters.
+ If the current Box already satisfies this criterion, then it's returned.
+ """
+ return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
+
def get_top(self, widths: Iterable[int]) -> str:
"""Get the top of a simple box.
@@ -419,6 +429,20 @@ def get_bottom(self, widths: Iterable[int]) -> str:
"""
)
+MARKDOWN: Box = Box(
+ """\
+
+| ||
+|-||
+| ||
+|-||
+|-||
+| ||
+
+""",
+ ascii=True,
+)
+
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
@@ -429,6 +453,15 @@ def get_bottom(self, widths: Iterable[int]) -> str:
HEAVY_HEAD: SQUARE,
}
+# Map headed boxes to their headerless equivalents
+PLAIN_HEADED_SUBSTITUTIONS = {
+ HEAVY_HEAD: SQUARE,
+ SQUARE_DOUBLE_HEAD: SQUARE,
+ MINIMAL_DOUBLE_HEAD: MINIMAL,
+ MINIMAL_HEAVY_HEAD: MINIMAL,
+ ASCII_DOUBLE_HEAD: ASCII2,
+}
+
if __name__ == "__main__": # pragma: no cover
@@ -461,6 +494,7 @@ def get_bottom(self, widths: Iterable[int]) -> str:
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
+ "MARKDOWN",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
diff --git a/src/pip/_vendor/rich/cells.py b/src/pip/_vendor/rich/cells.py
index d7adf5a046d..139b949f7f2 100644
--- a/src/pip/_vendor/rich/cells.py
+++ b/src/pip/_vendor/rich/cells.py
@@ -1,31 +1,44 @@
import re
from functools import lru_cache
-from typing import Dict, List
+from typing import Callable, List
from ._cell_widths import CELL_WIDTHS
-from ._lru_cache import LRUCache
# Regex to match sequence of the most common character ranges
_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match
-def cell_len(text: str, _cache: Dict[str, int] = LRUCache(1024 * 4)) -> int:
+@lru_cache(4096)
+def cached_cell_len(text: str) -> int:
"""Get the number of cells required to display text.
+ This method always caches, which may use up a lot of memory. It is recommended to use
+ `cell_len` over this method.
+
Args:
text (str): Text to display.
Returns:
int: Get the number of cells required to display text.
"""
- cached_result = _cache.get(text, None)
- if cached_result is not None:
- return cached_result
+ _get_size = get_character_cell_size
+ total_size = sum(_get_size(character) for character in text)
+ return total_size
+
+
+def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int:
+ """Get the number of cells required to display text.
+ Args:
+ text (str): Text to display.
+
+ Returns:
+ int: Get the number of cells required to display text.
+ """
+ if len(text) < 512:
+ return _cell_len(text)
_get_size = get_character_cell_size
total_size = sum(_get_size(character) for character in text)
- if len(text) <= 512:
- _cache[text] = total_size
return total_size
@@ -80,7 +93,7 @@ def set_cell_size(text: str, total: int) -> str:
return text + " " * (total - size)
return text[:total]
- if not total:
+ if total <= 0:
return ""
cell_size = cell_len(text)
if cell_size == total:
@@ -109,7 +122,8 @@ def set_cell_size(text: str, total: int) -> str:
# TODO: This is inefficient
# TODO: This might not work with CWJ type characters
def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]:
- """Break text in to equal (cell) length strings."""
+ """Break text in to equal (cell) length strings, returning the characters in reverse
+ order"""
_get_character_cell_size = get_character_cell_size
characters = [
(character, _get_character_cell_size(character)) for character in text
diff --git a/src/pip/_vendor/rich/console.py b/src/pip/_vendor/rich/console.py
index 8c305712dc7..93a10b0b500 100644
--- a/src/pip/_vendor/rich/console.py
+++ b/src/pip/_vendor/rich/console.py
@@ -4,6 +4,7 @@
import platform
import sys
import threading
+import zlib
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
@@ -12,6 +13,7 @@
from html import escape
from inspect import isclass
from itertools import islice
+from math import ceil
from time import monotonic
from types import FrameType, ModuleType, TracebackType
from typing import (
@@ -43,9 +45,10 @@
from . import errors, themes
from ._emoji_replace import _emoji_replace
+from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT
from ._log_render import FormatTimeCallable, LogRender
from .align import Align, AlignMethod
-from .color import ColorSystem
+from .color import ColorSystem, blend_rgb
from .control import Control
from .emoji import EmojiVariant
from .highlighter import NullHighlighter, ReprHighlighter
@@ -69,6 +72,8 @@
from .live import Live
from .status import Status
+JUPYTER_DEFAULT_COLUMNS = 115
+JUPYTER_DEFAULT_LINES = 100
WINDOWS = platform.system() == "Windows"
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
@@ -82,159 +87,22 @@ class NoChange:
NO_CHANGE = NoChange()
+try:
+ _STDIN_FILENO = sys.__stdin__.fileno()
+except Exception:
+ _STDIN_FILENO = 0
try:
_STDOUT_FILENO = sys.__stdout__.fileno()
except Exception:
_STDOUT_FILENO = 1
-
try:
_STDERR_FILENO = sys.__stderr__.fileno()
except Exception:
_STDERR_FILENO = 2
-_STD_STREAMS = (_STDOUT_FILENO, _STDERR_FILENO)
-
-CONSOLE_HTML_FORMAT = """\
-
-
-
-
-
-
-
-
-
{code}
-
-
-
-"""
-
-CONSOLE_SVG_FORMAT = """\
-
-"""
+_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO)
+_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO)
+
_TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD}
@@ -790,12 +658,19 @@ def __init__(
self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
if self.is_jupyter:
- width = width or 93
- height = height or 100
+ if width is None:
+ jupyter_columns = self._environ.get("JUPYTER_COLUMNS")
+ if jupyter_columns is not None and jupyter_columns.isdigit():
+ width = int(jupyter_columns)
+ else:
+ width = JUPYTER_DEFAULT_COLUMNS
+ if height is None:
+ jupyter_lines = self._environ.get("JUPYTER_LINES")
+ if jupyter_lines is not None and jupyter_lines.isdigit():
+ height = int(jupyter_lines)
+ else:
+ height = JUPYTER_DEFAULT_LINES
- self.soft_wrap = soft_wrap
- self._width = width
- self._height = height
self.tab_size = tab_size
self.record = record
self._markup = markup
@@ -807,6 +682,7 @@ def __init__(
if legacy_windows is None
else legacy_windows
)
+
if width is None:
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
@@ -1045,6 +921,13 @@ def is_terminal(self) -> bool:
"""
if self._force_terminal is not None:
return self._force_terminal
+
+ if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith(
+ "idlelib"
+ ):
+ # Return False for Idle which claims to be a tty but can't handle ansi codes
+ return False
+
isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
try:
return False if isatty is None else isatty()
@@ -1099,16 +982,16 @@ def size(self) -> ConsoleDimensions:
if WINDOWS: # pragma: no cover
try:
width, height = os.get_terminal_size()
- except OSError: # Probably not a terminal
+ except (AttributeError, ValueError, OSError): # Probably not a terminal
pass
else:
- try:
- width, height = os.get_terminal_size(sys.__stdin__.fileno())
- except (AttributeError, ValueError, OSError):
+ for file_descriptor in _STD_STREAMS:
try:
- width, height = os.get_terminal_size(sys.__stdout__.fileno())
+ width, height = os.get_terminal_size(file_descriptor)
except (AttributeError, ValueError, OSError):
pass
+ else:
+ break
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
@@ -1311,6 +1194,38 @@ def is_alt_screen(self) -> bool:
"""
return self._is_alt_screen
+ def set_window_title(self, title: str) -> bool:
+ """Set the title of the console terminal window.
+
+ Warning: There is no means within Rich of "resetting" the window title to its
+ previous value, meaning the title you set will persist even after your application
+ exits.
+
+ ``fish`` shell resets the window title before and after each command by default,
+ negating this issue. Windows Terminal and command prompt will also reset the title for you.
+ Most other shells and terminals, however, do not do this.
+
+ Some terminals may require configuration changes before you can set the title.
+ Some terminals may not support setting the title at all.
+
+ Other software (including the terminal itself, the shell, custom prompts, plugins, etc.)
+ may also set the terminal window title. This could result in whatever value you write
+ using this method being overwritten.
+
+ Args:
+ title (str): The new title of the terminal window.
+
+ Returns:
+ bool: True if the control code to change the terminal title was
+ written, otherwise False. Note that a return value of True
+ does not guarantee that the window title has actually changed,
+ since the feature may be unsupported/disabled in some terminals.
+ """
+ if self.is_terminal:
+ self.control(Control.title(title))
+ return True
+ return False
+
def screen(
self, hide_cursor: bool = True, style: Optional[StyleType] = None
) -> "ScreenContext":
@@ -1422,6 +1337,11 @@ def render_lines(
_rendered = self.render(renderable, render_options)
if style:
_rendered = Segment.apply_style(_rendered, style)
+
+ render_height = render_options.height
+ if render_height is not None:
+ render_height = max(0, render_height)
+
lines = list(
islice(
Segment.split_and_crop_lines(
@@ -1429,9 +1349,10 @@ def render_lines(
render_options.max_width,
include_new_lines=new_lines,
pad=pad,
+ style=style,
),
None,
- render_options.height,
+ render_height,
)
)
if render_options.height is not None:
@@ -1901,7 +1822,7 @@ def print_exception(
"""Prints a rich render of the last exception and traceback.
Args:
- width (Optional[int], optional): Number of characters used to render code. Defaults to 88.
+ width (Optional[int], optional): Number of characters used to render code. Defaults to 100.
extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
theme (str, optional): Override pygments theme used in traceback
word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
@@ -1947,7 +1868,7 @@ def _caller_frame_info(
frame = currentframe()
if frame is not None:
# Use the faster currentframe where implemented
- while offset and frame:
+ while offset and frame is not None:
frame = frame.f_back
offset -= 1
assert frame is not None
@@ -2049,11 +1970,11 @@ def _check_buffer(self) -> None:
del self._buffer[:]
return
with self._lock:
- if self._buffer_index == 0:
+ if self.record:
+ with self._record_buffer_lock:
+ self._record_buffer.extend(self._buffer[:])
- if self.record:
- with self._record_buffer_lock:
- self._record_buffer.extend(self._buffer[:])
+ if self._buffer_index == 0:
if self.is_jupyter: # pragma: no cover
from .jupyter import display
@@ -2066,7 +1987,7 @@ def _check_buffer(self) -> None:
if self.legacy_windows:
try:
use_legacy_windows_render = (
- self.file.fileno() in _STD_STREAMS
+ self.file.fileno() in _STD_STREAMS_OUTPUT
)
except (ValueError, io.UnsupportedOperation):
pass
@@ -2318,135 +2239,240 @@ def export_svg(
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
) -> str:
- """Generate an SVG string from the console contents (requires record=True in Console constructor)
+ """
+ Generate an SVG from the console contents (requires record=True in Console constructor).
Args:
+ path (str): The path to write the SVG to.
title (str): The title of the tab in the output image
theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
code_format (str): Format string used to generate the SVG. Rich will inject a number of variables
into the string in order to form the final SVG output. The default template used and the variables
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
-
- Returns:
- str: The string representation of the SVG. That is, the ``code_format`` template with content injected.
"""
- assert (
- self.record
- ), "To export console contents set record=True in the constructor or instance"
- _theme = theme or SVG_EXPORT_THEME
+ from pip._vendor.rich.cells import cell_len
- with self._record_buffer_lock:
- segments = Segment.simplify(self._record_buffer)
- segments = Segment.filter_control(segments)
- parts = [(text, style or Style.null()) for text, style, _ in segments]
- terminal_text = Text.assemble(*parts)
- lines = terminal_text.wrap(self, width=self.width, overflow="fold")
- segments = self.render(lines, options=self.options)
- segment_lines = list(
- Segment.split_and_crop_lines(
- segments, length=self.width, include_new_lines=False
- )
- )
-
- fragments: List[str] = []
- theme_foreground_color = _theme.foreground_color.hex
- theme_background_color = _theme.background_color.hex
-
- theme_foreground_css = f"color: {theme_foreground_color}; text-decoration-color: {theme_foreground_color};"
- theme_background_css = f"background-color: {theme_background_color};"
+ style_cache: Dict[Style, str] = {}
- theme_css = theme_foreground_css + theme_background_css
-
- styles: Dict[str, int] = {}
- styles[theme_css] = 1
+ def get_svg_style(style: Style) -> str:
+ """Convert a Style to CSS rules for SVG."""
+ if style in style_cache:
+ return style_cache[style]
+ css_rules = []
+ color = (
+ _theme.foreground_color
+ if (style.color is None or style.color.is_default)
+ else style.color.get_truecolor(_theme)
+ )
+ bgcolor = (
+ _theme.background_color
+ if (style.bgcolor is None or style.bgcolor.is_default)
+ else style.bgcolor.get_truecolor(_theme)
+ )
+ if style.reverse:
+ color, bgcolor = bgcolor, color
+ if style.dim:
+ color = blend_rgb(color, bgcolor, 0.4)
+ css_rules.append(f"fill: {color.hex}")
+ if style.bold:
+ css_rules.append("font-weight: bold")
+ if style.italic:
+ css_rules.append("font-style: italic;")
+ if style.underline:
+ css_rules.append("text-decoration: underline;")
+ if style.strike:
+ css_rules.append("text-decoration: line-through;")
+
+ css = ";".join(css_rules)
+ style_cache[style] = css
+ return css
- for line in segment_lines:
- line_spans = []
- for segment in line:
- text, style, _ = segment
- text = escape(text)
- if style:
- rules = style.get_html_style(_theme)
- if style.link:
- text = f'{text}'
+ _theme = theme or SVG_EXPORT_THEME
- if style.blink or style.blink2:
- text = f'{text}'
+ width = self.width
+ char_height = 20
+ char_width = char_height * 0.61
+ line_height = char_height * 1.22
+
+ margin_top = 1
+ margin_right = 1
+ margin_bottom = 1
+ margin_left = 1
+
+ padding_top = 40
+ padding_right = 8
+ padding_bottom = 8
+ padding_left = 8
+
+ padding_width = padding_left + padding_right
+ padding_height = padding_top + padding_bottom
+ margin_width = margin_left + margin_right
+ margin_height = margin_top + margin_bottom
+
+ text_backgrounds: List[str] = []
+ text_group: List[str] = []
+ classes: Dict[str, int] = {}
+ style_no = 1
+
+ def escape_text(text: str) -> str:
+ """HTML escape text and replace spaces with nbsp."""
+ return escape(text).replace(" ", " ")
+
+ def make_tag(
+ name: str, content: Optional[str] = None, **attribs: object
+ ) -> str:
+ """Make a tag from name, content, and attributes."""
+
+ def stringify(value: object) -> str:
+ if isinstance(value, (float)):
+ return format(value, "g")
+ return str(value)
+
+ tag_attribs = " ".join(
+ f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
+ for k, v in attribs.items()
+ )
+ return (
+ f"<{name} {tag_attribs}>{content}{name}>"
+ if content
+ else f"<{name} {tag_attribs}/>"
+ )
- # If the style doesn't contain a color, we still
- # need to make sure we output the default foreground color
- # from the TerminalTheme.
- if not style.reverse:
- foreground_css = theme_foreground_css
- background_css = theme_background_css
- else:
- foreground_css = f"color: {theme_background_color}; text-decoration-color: {theme_background_color};"
- background_css = (
- f"background-color: {theme_foreground_color};"
- )
+ with self._record_buffer_lock:
+ segments = list(Segment.filter_control(self._record_buffer))
+ if clear:
+ self._record_buffer.clear()
- if style.color is None:
- rules += f";{foreground_css}"
- if style.bgcolor is None:
- rules += f";{background_css}"
+ unique_id = "terminal-" + str(
+ zlib.adler32(
+ ("".join(segment.text for segment in segments)).encode(
+ "utf-8", "ignore"
+ )
+ + title.encode("utf-8", "ignore")
+ )
+ )
+ y = 0
+ for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
+ x = 0
+ for text, style, _control in line:
+ style = style or Style()
+ rules = get_svg_style(style)
+ if rules not in classes:
+ classes[rules] = style_no
+ style_no += 1
+ class_name = f"r{classes[rules]}"
+
+ if style.reverse:
+ has_background = True
+ background = (
+ _theme.foreground_color.hex
+ if style.color is None
+ else style.color.get_truecolor(_theme).hex
+ )
+ else:
+ bgcolor = style.bgcolor
+ has_background = bgcolor is not None and not bgcolor.is_default
+ background = (
+ _theme.background_color.hex
+ if style.bgcolor is None
+ else style.bgcolor.get_truecolor(_theme).hex
+ )
- style_number = styles.setdefault(rules, len(styles) + 1)
- text = f'{text}'
- else:
- text = f'{text}'
- line_spans.append(text)
+ text_length = cell_len(text)
+ if has_background:
+ text_backgrounds.append(
+ make_tag(
+ "rect",
+ fill=background,
+ x=x * char_width,
+ y=y * line_height + 1.5,
+ width=char_width * text_length,
+ height=line_height + 0.25,
+ shape_rendering="crispEdges",
+ )
+ )
- fragments.append(f"
{''.join(line_spans)}
")
+ if text != " " * len(text):
+ text_group.append(
+ make_tag(
+ "text",
+ escape_text(text),
+ _class=f"{unique_id}-{class_name}",
+ x=x * char_width,
+ y=y * line_height + char_height,
+ textLength=char_width * len(text),
+ clip_path=f"url(#{unique_id}-line-{y})",
+ )
+ )
+ x += cell_len(text)
+
+ line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
+ lines = "\n".join(
+ f"""
+ {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
+ """
+ for line_no, offset in enumerate(line_offsets)
+ )
- stylesheet_rules = []
- for style_rule, style_number in styles.items():
- if style_rule:
- stylesheet_rules.append(f".r{style_number} {{{ style_rule }}}")
- stylesheet = "\n".join(stylesheet_rules)
+ styles = "\n".join(
+ f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
+ )
+ backgrounds = "".join(text_backgrounds)
+ matrix = "".join(text_group)
+
+ terminal_width = ceil(width * char_width + padding_width)
+ terminal_height = (y + 1) * line_height + padding_height
+ chrome = make_tag(
+ "rect",
+ fill=_theme.background_color.hex,
+ stroke="rgba(255,255,255,0.35)",
+ stroke_width="1",
+ x=margin_left,
+ y=margin_top,
+ width=terminal_width,
+ height=terminal_height,
+ rx=8,
+ )
- if clear:
- self._record_buffer.clear()
+ title_color = _theme.foreground_color.hex
+ if title:
+ chrome += make_tag(
+ "text",
+ escape_text(title),
+ _class=f"{unique_id}-title",
+ fill=title_color,
+ text_anchor="middle",
+ x=terminal_width // 2,
+ y=margin_top + char_height + 6,
+ )
+ chrome += f"""
+
+
+
+
+
+ """
- # These values are the ones that I found to work well after experimentation.
- # Many of them can be tweaked, but too much variation from these values could
- # result in visually broken output/clipping issues.
- terminal_padding = 12
- font_size = 18
- line_height = font_size + 4
- code_start_y = 60
- required_code_height = line_height * len(lines)
- margin = 140
-
- # Monospace fonts are generally around 0.5-0.55 width/height ratio, but I've
- # added extra width to ensure that the output SVG is big enough.
- monospace_font_width_scale = 0.60
-
- # This works out as a good heuristic for the final size of the drawn terminal.
- terminal_height = required_code_height + code_start_y
- terminal_width = (
- self.width * monospace_font_width_scale * font_size
- + 2 * terminal_padding
- + self.width
- )
- total_height = terminal_height + 2 * margin
- total_width = terminal_width + 2 * margin
-
- rendered_code = code_format.format(
- code="\n".join(fragments),
- total_height=total_height,
- total_width=total_width,
- theme_foreground_color=theme_foreground_color,
- theme_background_color=theme_background_color,
- margin=margin,
- font_size=font_size,
+ svg = code_format.format(
+ unique_id=unique_id,
+ char_width=char_width,
+ char_height=char_height,
line_height=line_height,
- title=title,
- stylesheet=stylesheet,
+ terminal_width=char_width * width - 1,
+ terminal_height=(y + 1) * line_height - 1,
+ width=terminal_width + margin_width,
+ height=terminal_height + margin_height,
+ terminal_x=margin_left + padding_left,
+ terminal_y=margin_top + padding_top,
+ styles=styles,
+ chrome=chrome,
+ backgrounds=backgrounds,
+ matrix=matrix,
+ lines=lines,
)
-
- return rendered_code
+ return svg
def save_svg(
self,
@@ -2469,12 +2495,27 @@ def save_svg(
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
"""
svg = self.export_svg(
- title=title, theme=theme, clear=clear, code_format=code_format
+ title=title,
+ theme=theme,
+ clear=clear,
+ code_format=code_format,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(svg)
+def _svg_hash(svg_main_code: str) -> str:
+ """Returns a unique hash for the given SVG main code.
+
+ Args:
+ svg_main_code (str): The content we're going to inject in the SVG envelope.
+
+ Returns:
+ str: a hash of the given content
+ """
+ return str(zlib.adler32(svg_main_code.encode()))
+
+
if __name__ == "__main__": # pragma: no cover
console = Console(record=True)
diff --git a/src/pip/_vendor/rich/control.py b/src/pip/_vendor/rich/control.py
index e17b2c6349c..88fcb929516 100644
--- a/src/pip/_vendor/rich/control.py
+++ b/src/pip/_vendor/rich/control.py
@@ -1,18 +1,35 @@
-from typing import Callable, Dict, Iterable, List, TYPE_CHECKING, Union
+import sys
+import time
+from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
+
+if sys.version_info >= (3, 8):
+ from typing import Final
+else:
+ from pip._vendor.typing_extensions import Final # pragma: no cover
from .segment import ControlCode, ControlType, Segment
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult
-STRIP_CONTROL_CODES = [
+STRIP_CONTROL_CODES: Final = [
+ 7, # Bell
8, # Backspace
11, # Vertical tab
12, # Form feed
13, # Carriage return
]
-_CONTROL_TRANSLATE = {_codepoint: None for _codepoint in STRIP_CONTROL_CODES}
+_CONTROL_STRIP_TRANSLATE: Final = {
+ _codepoint: None for _codepoint in STRIP_CONTROL_CODES
+}
+CONTROL_ESCAPE: Final = {
+ 7: "\\a",
+ 8: "\\b",
+ 11: "\\v",
+ 12: "\\f",
+ 13: "\\r",
+}
CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
ControlType.BELL: lambda: "\x07",
@@ -30,6 +47,7 @@
ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
+ ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
}
@@ -147,6 +165,15 @@ def alt_screen(cls, enable: bool) -> "Control":
else:
return cls(ControlType.DISABLE_ALT_SCREEN)
+ @classmethod
+ def title(cls, title: str) -> "Control":
+ """Set the terminal window title
+
+ Args:
+ title (str): The new terminal window title
+ """
+ return cls((ControlType.SET_WINDOW_TITLE, title))
+
def __str__(self) -> str:
return self.segment.text
@@ -158,7 +185,7 @@ def __rich_console__(
def strip_control_codes(
- text: str, _translate_table: Dict[int, None] = _CONTROL_TRANSLATE
+ text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
) -> str:
"""Remove control codes from text.
@@ -171,5 +198,28 @@ def strip_control_codes(
return text.translate(_translate_table)
+def escape_control_codes(
+ text: str,
+ _translate_table: Dict[int, str] = CONTROL_ESCAPE,
+) -> str:
+ """Replace control codes with their "escaped" equivalent in the given text.
+ (e.g. "\b" becomes "\\b")
+
+ Args:
+ text (str): A string possibly containing control codes.
+
+ Returns:
+ str: String with control codes replaced with their escaped version.
+ """
+ return text.translate(_translate_table)
+
+
if __name__ == "__main__": # pragma: no cover
- print(strip_control_codes("hello\rWorld"))
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ console.print("Look at the title of your terminal window ^")
+ # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
+ for i in range(10):
+ console.set_window_title("🚀 Loading" + "." * i)
+ time.sleep(0.5)
diff --git a/src/pip/_vendor/rich/default_styles.py b/src/pip/_vendor/rich/default_styles.py
index cb7bfc19253..46e9ea52c54 100644
--- a/src/pip/_vendor/rich/default_styles.py
+++ b/src/pip/_vendor/rich/default_styles.py
@@ -39,6 +39,7 @@
"inspect.attr": Style(color="yellow", italic=True),
"inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
"inspect.callable": Style(bold=True, color="red"),
+ "inspect.async_def": Style(italic=True, color="bright_cyan"),
"inspect.def": Style(italic=True, color="bright_cyan"),
"inspect.class": Style(italic=True, color="bright_cyan"),
"inspect.error": Style(bold=True, color="red"),
@@ -78,6 +79,7 @@
"repr.attrib_equal": Style(bold=True),
"repr.attrib_value": Style(color="magenta", italic=False),
"repr.number": Style(color="cyan", bold=True, italic=False),
+ "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same
"repr.bool_true": Style(color="bright_green", italic=True),
"repr.bool_false": Style(color="bright_red", italic=True),
"repr.none": Style(color="magenta", italic=True),
@@ -156,6 +158,9 @@
"markdown.h7": Style(italic=True, dim=True),
"markdown.link": Style(color="bright_blue"),
"markdown.link_url": Style(color="blue"),
+ "iso8601.date": Style(color="blue"),
+ "iso8601.time": Style(color="magenta"),
+ "iso8601.timezone": Style(color="yellow"),
}
diff --git a/src/pip/_vendor/rich/diagnose.py b/src/pip/_vendor/rich/diagnose.py
index 518586ea876..ad36183898e 100644
--- a/src/pip/_vendor/rich/diagnose.py
+++ b/src/pip/_vendor/rich/diagnose.py
@@ -22,6 +22,8 @@ def report() -> None: # pragma: no cover
"TERM_PROGRAM",
"COLUMNS",
"LINES",
+ "JUPYTER_COLUMNS",
+ "JUPYTER_LINES",
"JPY_PARENT_PID",
"VSCODE_VERBOSE_LOGGING",
)
diff --git a/src/pip/_vendor/rich/file_proxy.py b/src/pip/_vendor/rich/file_proxy.py
index 3ec593a5a48..cc69f22f3cc 100644
--- a/src/pip/_vendor/rich/file_proxy.py
+++ b/src/pip/_vendor/rich/file_proxy.py
@@ -1,5 +1,5 @@
import io
-from typing import List, Any, IO, TYPE_CHECKING
+from typing import IO, TYPE_CHECKING, Any, List
from .ansi import AnsiDecoder
from .text import Text
@@ -48,7 +48,7 @@ def write(self, text: str) -> int:
return len(text)
def flush(self) -> None:
- buffer = self.__buffer
- if buffer:
- self.__console.print("".join(buffer))
- del buffer[:]
+ output = "".join(self.__buffer)
+ if output:
+ self.__console.print(output)
+ del self.__buffer[:]
diff --git a/src/pip/_vendor/rich/highlighter.py b/src/pip/_vendor/rich/highlighter.py
index 7bee4167e43..82293dffc49 100644
--- a/src/pip/_vendor/rich/highlighter.py
+++ b/src/pip/_vendor/rich/highlighter.py
@@ -94,6 +94,7 @@ class ReprHighlighter(RegexHighlighter):
r"(?P[\w.]*?)\(",
r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b",
r"(?P\.\.\.)",
+ r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?",
r"(?b?'''.*?(? None:
break
+class ISO8601Highlighter(RegexHighlighter):
+ """Highlights the ISO8601 date time strings.
+ Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
+ """
+
+ base_style = "iso8601."
+ highlights = [
+ #
+ # Dates
+ #
+ # Calendar month (e.g. 2008-08). The hyphen is required
+ r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$",
+ # Calendar date w/o hyphens (e.g. 20080830)
+ r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$",
+ # Ordinal date (e.g. 2008-243). The hyphen is optional
+ r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
+ #
+ # Weeks
+ #
+ # Week of the year (e.g., 2008-W35). The hyphen is optional
+ r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$",
+ # Week date (e.g., 2008-W35-6). The hyphens are optional
+ r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$",
+ #
+ # Times
+ #
+ # Hours and minutes (e.g., 17:21). The colon is optional
+ r"^(?P