diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml
index 29fe3b65..3274d56a 100644
--- a/.github/workflows/label.yml
+++ b/.github/workflows/label.yml
@@ -1,6 +1,6 @@
name: "Pull Request Labeler"
on:
- pull_request:
+ pull_request_target:
paths:
- '**/__init__.py'
jobs:
diff --git a/CODEOWNERS b/CODEOWNERS
index 978e1173..395d45fe 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -10,4 +10,4 @@
# For all file changes, github would automatically include the following people in the PRs.
#
-* @vrdmr @gavin-aguiar @YunchuWang @pdthummar @hallvictoria
+* @vrdmr @gavin-aguiar @hallvictoria
diff --git a/README.md b/README.md
index f83b7c35..e5b9c07e 100644
--- a/README.md
+++ b/README.md
@@ -7,20 +7,20 @@
## Overview
-Python support for Azure Functions is based on Python 3.7, 3.8, 3.9, 3.10, and 3.11, serverless hosting on Linux, and the Functions 2.x ([EOL](https://learn.microsoft.com/azure/azure-functions/functions-versions?#retired-versions)), 3.x ([EOL](https://learn.microsoft.com/azure/azure-functions/functions-versions?#retired-versions)) and 4.0 runtime.
+Python support for Azure Functions is based on Python 3.8, 3.9, 3.10, 3.11, and 3.12 serverless hosting on Linux, and the Functions 2.x ([EOL](https://learn.microsoft.com/azure/azure-functions/functions-versions?#retired-versions)), 3.x ([EOL](https://learn.microsoft.com/azure/azure-functions/functions-versions?#retired-versions)) and 4.0 runtime.
Here is the current status of Python in Azure Functions:
_What are the supported Python versions?_
-| Azure Functions Runtime | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 | Python 3.11 |
-|-------------------------|--------|-------|-------|--------|--------------|-------------|
-| Azure Functions 3.0 | [EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4)|[EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4)|[EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4)| [EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4)| - |- |
-| Azure Functions 4.0 | [EOL](https://azure.microsoft.com/en-au/updates/azure-functions-support-for-python-36-is-ending-on-30-september-2022/) | ✓ | ✓ | ✓ | ✓ | ✓ |
+| Azure Functions Runtime | Python 3.8 | Python 3.9 | Python 3.10 | Python 3.11 | Python 3.12 |
+|-------------------------|--------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|-------------|-------------|-------------|
+| Azure Functions 3.0 | [EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4) | [EOL](https://learn.microsoft.com/azure/azure-functions/migrate-version-3-version-4) | - | - | - |
+| Azure Functions 4.0 | ✓ | ✓ | ✓ | ✓ | ✓ |
_What's available?_
- Build, test, debug and publish using Azure Functions Core Tools (CLI) or Visual Studio Code
-- Triggers / Bindings : HTTP, Blob, Queue, Timer, Cosmos DB, Event Grid, Event Hubs and Service Bus
+- Triggers / Bindings : Blob, Cosmos DB, Event Grid, Event Hub, HTTP, Kafka, MySQL, Queue, ServiceBus, SQL, Timer, and Warmup
- Create a Python Function on Linux using a custom docker image
- Triggers / Bindings : Custom binding support
diff --git a/azure/functions/__init__.py b/azure/functions/__init__.py
index 7de175dd..3b9288c7 100644
--- a/azure/functions/__init__.py
+++ b/azure/functions/__init__.py
@@ -23,6 +23,7 @@
from ._queue import QueueMessage
from ._servicebus import ServiceBusMessage
from ._sql import SqlRow, SqlRowList
+from ._mysql import MySqlRow, MySqlRowList
# Import binding implementations to register them
from . import blob # NoQA
@@ -37,6 +38,7 @@
from . import durable_functions # NoQA
from . import sql # NoQA
from . import warmup # NoQA
+from . import mysql # NoQA
__all__ = (
@@ -67,6 +69,8 @@
'SqlRowList',
'TimerRequest',
'WarmUpContext',
+ 'MySqlRow',
+ 'MySqlRowList',
# Middlewares
'WsgiMiddleware',
@@ -98,4 +102,4 @@
'BlobSource'
)
-__version__ = '1.21.0b3'
+__version__ = '1.23.0b1'
diff --git a/azure/functions/_abc.py b/azure/functions/_abc.py
index 17b4822c..5812787a 100644
--- a/azure/functions/_abc.py
+++ b/azure/functions/_abc.py
@@ -7,7 +7,7 @@
import threading
import typing
-from azure.functions._thirdparty.werkzeug.datastructures import Headers
+from werkzeug.datastructures import Headers
T = typing.TypeVar('T')
diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index ce6ec812..7e349b1b 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -8,11 +8,12 @@
import types
import typing
+from werkzeug import formparser as _wk_parser
+from werkzeug import http as _wk_http
+from werkzeug.datastructures import (Headers, FileStorage, MultiDict,
+ ImmutableMultiDict)
+
from . import _abc
-from ._thirdparty.werkzeug import datastructures as _wk_datastructures
-from ._thirdparty.werkzeug import formparser as _wk_parser
-from ._thirdparty.werkzeug import http as _wk_http
-from ._thirdparty.werkzeug.datastructures import Headers
class BaseHeaders(collections.abc.Mapping):
@@ -174,8 +175,8 @@ def __init__(self,
self.__route_params = types.MappingProxyType(route_params or {})
self.__body_bytes = body
self.__form_parsed = False
- self.__form = None
- self.__files = None
+ self.__form: MultiDict[str, str]
+ self.__files: MultiDict[str, FileStorage]
@property
def url(self):
@@ -222,12 +223,9 @@ def _parse_form_data(self):
content_length = len(body)
mimetype, options = _wk_http.parse_options_header(content_type)
parser = _wk_parser.FormDataParser(
- _wk_parser.default_stream_factory,
- options.get('charset') or 'utf-8',
- 'replace',
- None,
- None,
- _wk_datastructures.ImmutableMultiDict,
+ _wk_parser.default_stream_factory, max_form_memory_size=None,
+ max_content_length=None,
+ cls=ImmutableMultiDict
)
body_stream = io.BytesIO(body)
diff --git a/azure/functions/_http_wsgi.py b/azure/functions/_http_wsgi.py
index 48d64733..f51317e1 100644
--- a/azure/functions/_http_wsgi.py
+++ b/azure/functions/_http_wsgi.py
@@ -1,15 +1,18 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
-from typing import Dict, List, Optional, Any
import logging
from io import BytesIO, StringIO
from os import linesep
+from typing import Dict, List, Optional, Any
from urllib.parse import ParseResult, urlparse, unquote_to_bytes
from wsgiref.headers import Headers
from ._abc import Context
from ._http import HttpRequest, HttpResponse
-from ._thirdparty.werkzeug._compat import string_types, wsgi_encoding_dance
+
+
+def wsgi_encoding_dance(value):
+ return value.encode().decode("latin1")
class WsgiRequest:
@@ -98,7 +101,7 @@ def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:
# Ensure WSGI string fits in IOS-8859-1 code points
for k, v in environ.items():
- if isinstance(v, string_types):
+ if isinstance(v, (str,)):
environ[k] = wsgi_encoding_dance(v)
# Remove None values
diff --git a/azure/functions/_mysql.py b/azure/functions/_mysql.py
new file mode 100644
index 00000000..9c7515d9
--- /dev/null
+++ b/azure/functions/_mysql.py
@@ -0,0 +1,71 @@
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+import abc
+import collections
+import json
+
+
+class BaseMySqlRow(abc.ABC):
+
+ @classmethod
+ @abc.abstractmethod
+ def from_json(cls, json_data: str) -> 'BaseMySqlRow':
+ raise NotImplementedError
+
+ @classmethod
+ @abc.abstractmethod
+ def from_dict(cls, dct: dict) -> 'BaseMySqlRow':
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def __setitem__(self, key, value):
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def to_json(self) -> str:
+ raise NotImplementedError
+
+
+class BaseMySqlRowList(abc.ABC):
+ pass
+
+
+class MySqlRow(BaseMySqlRow, collections.UserDict):
+ """A MySql Row.
+
+ MySqlRow objects are ''UserDict'' subclasses and behave like dicts.
+ """
+
+ @classmethod
+ def from_json(cls, json_data: str) -> 'BaseMySqlRow':
+ """Create a MySqlRow from a JSON string."""
+ return cls.from_dict(json.loads(json_data))
+
+ @classmethod
+ def from_dict(cls, dct: dict) -> 'BaseMySqlRow':
+ """Create a MySqlRow from a dict object"""
+ return cls({k: v for k, v in dct.items()})
+
+ def to_json(self) -> str:
+ """Return the JSON representation of the MySqlRow"""
+ return json.dumps(dict(self))
+
+ def __getitem__(self, key):
+ return collections.UserDict.__getitem__(self, key)
+
+ def __setitem__(self, key, value):
+ return collections.UserDict.__setitem__(self, key, value)
+
+ def __repr__(self) -> str:
+ return (
+ f' the Swiss Army knife of Python web development.Werkzeug
-%s\n\n\n
-
-"""
- % gyver
- ).encode("latin1")
- ]
-
- return easteregged
diff --git a/azure/functions/_thirdparty/werkzeug/datastructures.py b/azure/functions/_thirdparty/werkzeug/datastructures.py
deleted file mode 100644
index 4df573b7..00000000
--- a/azure/functions/_thirdparty/werkzeug/datastructures.py
+++ /dev/null
@@ -1,2846 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- werkzeug.datastructures
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- This module provides mixins and classes with an immutable interface.
-
- :copyright: 2007 Pallets
- :license: BSD-3-Clause
-"""
-import codecs
-import mimetypes
-import re
-from copy import deepcopy
-from itertools import repeat
-
-from ._compat import BytesIO
-from ._compat import collections_abc
-from ._compat import integer_types
-from ._compat import iteritems
-from ._compat import iterkeys
-from ._compat import iterlists
-from ._compat import itervalues
-from ._compat import make_literal_wrapper
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import to_native
-
-_locale_delim_re = re.compile(r"[_-]")
-
-
-class _Missing(object):
- def __repr__(self):
- return "no value"
-
- def __reduce__(self):
- return "_missing"
-
-
-_missing = _Missing()
-
-
-def is_immutable(self):
- raise TypeError("%r objects are immutable" % self.__class__.__name__)
-
-
-def iter_multi_items(mapping):
- """Iterates over the items of a mapping yielding keys and values
- without dropping any from more complex structures.
- """
- if isinstance(mapping, MultiDict):
- for item in iteritems(mapping, multi=True):
- yield item
- elif isinstance(mapping, dict):
- for key, value in iteritems(mapping):
- if isinstance(value, (tuple, list)):
- for value in value:
- yield key, value
- else:
- yield key, value
- else:
- for item in mapping:
- yield item
-
-
-def native_itermethods(names):
- if not PY2:
- return lambda x: x
-
- def setviewmethod(cls, name):
- viewmethod_name = "view%s" % name
- repr_name = "view_%s" % name
-
- def viewmethod(self, *a, **kw):
- return ViewItems(self, name, repr_name, *a, **kw)
-
- viewmethod.__name__ = viewmethod_name
- viewmethod.__doc__ = "`%s()` object providing a view on %s" % (
- viewmethod_name,
- name,
- )
- setattr(cls, viewmethod_name, viewmethod)
-
- def setitermethod(cls, name):
- itermethod = getattr(cls, name)
- setattr(cls, "iter%s" % name, itermethod)
-
- def listmethod(self, *a, **kw):
- return list(itermethod(self, *a, **kw))
-
- listmethod.__name__ = name
- listmethod.__doc__ = "Like :py:meth:`iter%s`, but returns a list." % name
- setattr(cls, name, listmethod)
-
- def wrap(cls):
- for name in names:
- setitermethod(cls, name)
- setviewmethod(cls, name)
- return cls
-
- return wrap
-
-
-class ImmutableListMixin(object):
- """Makes a :class:`list` immutable.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- _hash_cache = None
-
- def __hash__(self):
- if self._hash_cache is not None:
- return self._hash_cache
- rv = self._hash_cache = hash(tuple(self))
- return rv
-
- def __reduce_ex__(self, protocol):
- return type(self), (list(self),)
-
- def __delitem__(self, key):
- is_immutable(self)
-
- def __iadd__(self, other):
- is_immutable(self)
-
- __imul__ = __iadd__
-
- def __setitem__(self, key, value):
- is_immutable(self)
-
- def append(self, item):
- is_immutable(self)
-
- remove = append
-
- def extend(self, iterable):
- is_immutable(self)
-
- def insert(self, pos, value):
- is_immutable(self)
-
- def pop(self, index=-1):
- is_immutable(self)
-
- def reverse(self):
- is_immutable(self)
-
- def sort(self, cmp=None, key=None, reverse=None):
- is_immutable(self)
-
-
-class ImmutableList(ImmutableListMixin, list):
- """An immutable :class:`list`.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
-
-
-class ImmutableDictMixin(object):
- """Makes a :class:`dict` immutable.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- _hash_cache = None
-
- @classmethod
- def fromkeys(cls, keys, value=None):
- instance = super(cls, cls).__new__(cls)
- instance.__init__(zip(keys, repeat(value)))
- return instance
-
- def __reduce_ex__(self, protocol):
- return type(self), (dict(self),)
-
- def _iter_hashitems(self):
- return iteritems(self)
-
- def __hash__(self):
- if self._hash_cache is not None:
- return self._hash_cache
- rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
- return rv
-
- def setdefault(self, key, default=None):
- is_immutable(self)
-
- def update(self, *args, **kwargs):
- is_immutable(self)
-
- def pop(self, key, default=None):
- is_immutable(self)
-
- def popitem(self):
- is_immutable(self)
-
- def __setitem__(self, key, value):
- is_immutable(self)
-
- def __delitem__(self, key):
- is_immutable(self)
-
- def clear(self):
- is_immutable(self)
-
-
-class ImmutableMultiDictMixin(ImmutableDictMixin):
- """Makes a :class:`MultiDict` immutable.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- def __reduce_ex__(self, protocol):
- return type(self), (list(iteritems(self, multi=True)),)
-
- def _iter_hashitems(self):
- return iteritems(self, multi=True)
-
- def add(self, key, value):
- is_immutable(self)
-
- def popitemlist(self):
- is_immutable(self)
-
- def poplist(self, key):
- is_immutable(self)
-
- def setlist(self, key, new_list):
- is_immutable(self)
-
- def setlistdefault(self, key, default_list=None):
- is_immutable(self)
-
-
-class UpdateDictMixin(object):
- """Makes dicts call `self.on_update` on modifications.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- on_update = None
-
- def calls_update(name): # noqa: B902
- def oncall(self, *args, **kw):
- rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
- if self.on_update is not None:
- self.on_update(self)
- return rv
-
- oncall.__name__ = name
- return oncall
-
- def setdefault(self, key, default=None):
- modified = key not in self
- rv = super(UpdateDictMixin, self).setdefault(key, default)
- if modified and self.on_update is not None:
- self.on_update(self)
- return rv
-
- def pop(self, key, default=_missing):
- modified = key in self
- if default is _missing:
- rv = super(UpdateDictMixin, self).pop(key)
- else:
- rv = super(UpdateDictMixin, self).pop(key, default)
- if modified and self.on_update is not None:
- self.on_update(self)
- return rv
-
- __setitem__ = calls_update("__setitem__")
- __delitem__ = calls_update("__delitem__")
- clear = calls_update("clear")
- popitem = calls_update("popitem")
- update = calls_update("update")
- del calls_update
-
-
-class TypeConversionDict(dict):
- """Works like a regular dict but the :meth:`get` method can perform
- type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
- are subclasses of this class and provide the same feature.
-
- .. versionadded:: 0.5
- """
-
- def get(self, key, default=None, type=None):
- """Return the default value if the requested data doesn't exist.
- If `type` is provided and is a callable it should convert the value,
- return it or raise a :exc:`ValueError` if that is not possible. In
- this case the function will return the default as if the value was not
- found:
-
- >>> d = TypeConversionDict(foo='42', bar='blub')
- >>> d.get('foo', type=int)
- 42
- >>> d.get('bar', -1, type=int)
- -1
-
- :param key: The key to be looked up.
- :param default: The default value to be returned if the key can't
- be looked up. If not further specified `None` is
- returned.
- :param type: A callable that is used to cast the value in the
- :class:`MultiDict`. If a :exc:`ValueError` is raised
- by this callable the default value is returned.
- """
- try:
- rv = self[key]
- except KeyError:
- return default
- if type is not None:
- try:
- rv = type(rv)
- except ValueError:
- rv = default
- return rv
-
-
-class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
- """Works like a :class:`TypeConversionDict` but does not support
- modifications.
-
- .. versionadded:: 0.5
- """
-
- def copy(self):
- """Return a shallow mutable copy of this object. Keep in mind that
- the standard library's :func:`copy` function is a no-op for this class
- like for any other python immutable type (eg: :class:`tuple`).
- """
- return TypeConversionDict(self)
-
- def __copy__(self):
- return self
-
-
-class ViewItems(object):
- def __init__(self, multi_dict, method, repr_name, *a, **kw):
- self.__multi_dict = multi_dict
- self.__method = method
- self.__repr_name = repr_name
- self.__a = a
- self.__kw = kw
-
- def __get_items(self):
- return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw)
-
- def __repr__(self):
- return "%s(%r)" % (self.__repr_name, list(self.__get_items()))
-
- def __iter__(self):
- return iter(self.__get_items())
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class MultiDict(TypeConversionDict):
- """A :class:`MultiDict` is a dictionary subclass customized to deal with
- multiple values for the same key which is for example used by the parsing
- functions in the wrappers. This is necessary because some HTML form
- elements pass multiple values for the same key.
-
- :class:`MultiDict` implements all standard dictionary methods.
- Internally, it saves all values for a key as a list, but the standard dict
- access methods will only return the first value for a key. If you want to
- gain access to the other values, too, you have to use the `list` methods as
- explained below.
-
- Basic Usage:
-
- >>> d = MultiDict([('a', 'b'), ('a', 'c')])
- >>> d
- MultiDict([('a', 'b'), ('a', 'c')])
- >>> d['a']
- 'b'
- >>> d.getlist('a')
- ['b', 'c']
- >>> 'a' in d
- True
-
- It behaves like a normal dict thus all dict functions will only return the
- first value when multiple values for one key are found.
-
- From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
- subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
- render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
- exceptions.
-
- A :class:`MultiDict` can be constructed from an iterable of
- ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
- onwards some keyword parameters.
-
- :param mapping: the initial value for the :class:`MultiDict`. Either a
- regular dict, an iterable of ``(key, value)`` tuples
- or `None`.
- """
-
- def __init__(self, mapping=None):
- if isinstance(mapping, MultiDict):
- dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
- elif isinstance(mapping, dict):
- tmp = {}
- for key, value in iteritems(mapping):
- if isinstance(value, (tuple, list)):
- if len(value) == 0:
- continue
- value = list(value)
- else:
- value = [value]
- tmp[key] = value
- dict.__init__(self, tmp)
- else:
- tmp = {}
- for key, value in mapping or ():
- tmp.setdefault(key, []).append(value)
- dict.__init__(self, tmp)
-
- def __getstate__(self):
- return dict(self.lists())
-
- def __setstate__(self, value):
- dict.clear(self)
- dict.update(self, value)
-
- def __getitem__(self, key):
- """Return the first data value for this key;
- raises KeyError if not found.
-
- :param key: The key to be looked up.
- :raise KeyError: if the key does not exist.
- """
-
- if key in self:
- lst = dict.__getitem__(self, key)
- if len(lst) > 0:
- return lst[0]
- raise exceptions.BadRequestKeyError(key)
-
- def __setitem__(self, key, value):
- """Like :meth:`add` but removes an existing key first.
-
- :param key: the key for the value.
- :param value: the value to set.
- """
- dict.__setitem__(self, key, [value])
-
- def add(self, key, value):
- """Adds a new value for the key.
-
- .. versionadded:: 0.6
-
- :param key: the key for the value.
- :param value: the value to add.
- """
- dict.setdefault(self, key, []).append(value)
-
- def getlist(self, key, type=None):
- """Return the list of items for a given key. If that key is not in the
- `MultiDict`, the return value will be an empty list. Just as `get`
- `getlist` accepts a `type` parameter. All items will be converted
- with the callable defined there.
-
- :param key: The key to be looked up.
- :param type: A callable that is used to cast the value in the
- :class:`MultiDict`. If a :exc:`ValueError` is raised
- by this callable the value will be removed from the list.
- :return: a :class:`list` of all the values for the key.
- """
- try:
- rv = dict.__getitem__(self, key)
- except KeyError:
- return []
- if type is None:
- return list(rv)
- result = []
- for item in rv:
- try:
- result.append(type(item))
- except ValueError:
- pass
- return result
-
- def setlist(self, key, new_list):
- """Remove the old values for a key and add new ones. Note that the list
- you pass the values in will be shallow-copied before it is inserted in
- the dictionary.
-
- >>> d = MultiDict()
- >>> d.setlist('foo', ['1', '2'])
- >>> d['foo']
- '1'
- >>> d.getlist('foo')
- ['1', '2']
-
- :param key: The key for which the values are set.
- :param new_list: An iterable with the new values for the key. Old values
- are removed first.
- """
- dict.__setitem__(self, key, list(new_list))
-
- def setdefault(self, key, default=None):
- """Returns the value for the key if it is in the dict, otherwise it
- returns `default` and sets that value for `key`.
-
- :param key: The key to be looked up.
- :param default: The default value to be returned if the key is not
- in the dict. If not further specified it's `None`.
- """
- if key not in self:
- self[key] = default
- else:
- default = self[key]
- return default
-
- def setlistdefault(self, key, default_list=None):
- """Like `setdefault` but sets multiple values. The list returned
- is not a copy, but the list that is actually used internally. This
- means that you can put new values into the dict by appending items
- to the list:
-
- >>> d = MultiDict({"foo": 1})
- >>> d.setlistdefault("foo").extend([2, 3])
- >>> d.getlist("foo")
- [1, 2, 3]
-
- :param key: The key to be looked up.
- :param default_list: An iterable of default values. It is either copied
- (in case it was a list) or converted into a list
- before returned.
- :return: a :class:`list`
- """
- if key not in self:
- default_list = list(default_list or ())
- dict.__setitem__(self, key, default_list)
- else:
- default_list = dict.__getitem__(self, key)
- return default_list
-
- def items(self, multi=False):
- """Return an iterator of ``(key, value)`` pairs.
-
- :param multi: If set to `True` the iterator returned will have a pair
- for each value of each key. Otherwise it will only
- contain pairs for the first value of each key.
- """
-
- for key, values in iteritems(dict, self):
- if multi:
- for value in values:
- yield key, value
- else:
- yield key, values[0]
-
- def lists(self):
- """Return a iterator of ``(key, values)`` pairs, where values is the list
- of all values associated with the key."""
-
- for key, values in iteritems(dict, self):
- yield key, list(values)
-
- def keys(self):
- return iterkeys(dict, self)
-
- __iter__ = keys
-
- def values(self):
- """Returns an iterator of the first value on every key's value list."""
- for values in itervalues(dict, self):
- yield values[0]
-
- def listvalues(self):
- """Return an iterator of all values associated with a key. Zipping
- :meth:`keys` and this is the same as calling :meth:`lists`:
-
- >>> d = MultiDict({"foo": [1, 2, 3]})
- >>> zip(d.keys(), d.listvalues()) == d.lists()
- True
- """
-
- return itervalues(dict, self)
-
- def copy(self):
- """Return a shallow copy of this object."""
- return self.__class__(self)
-
- def deepcopy(self, memo=None):
- """Return a deep copy of this object."""
- return self.__class__(deepcopy(self.to_dict(flat=False), memo))
-
- def to_dict(self, flat=True):
- """Return the contents as regular dict. If `flat` is `True` the
- returned dict will only have the first item present, if `flat` is
- `False` all values will be returned as lists.
-
- :param flat: If set to `False` the dict returned will have lists
- with all the values in it. Otherwise it will only
- contain the first value for each key.
- :return: a :class:`dict`
- """
- if flat:
- return dict(iteritems(self))
- return dict(self.lists())
-
- def update(self, other_dict):
- """update() extends rather than replaces existing key lists:
-
- >>> a = MultiDict({'x': 1})
- >>> b = MultiDict({'x': 2, 'y': 3})
- >>> a.update(b)
- >>> a
- MultiDict([('y', 3), ('x', 1), ('x', 2)])
-
- If the value list for a key in ``other_dict`` is empty, no new values
- will be added to the dict and the key will not be created:
-
- >>> x = {'empty_list': []}
- >>> y = MultiDict()
- >>> y.update(x)
- >>> y
- MultiDict([])
- """
- for key, value in iter_multi_items(other_dict):
- MultiDict.add(self, key, value)
-
- def pop(self, key, default=_missing):
- """Pop the first item for a list on the dict. Afterwards the
- key is removed from the dict, so additional values are discarded:
-
- >>> d = MultiDict({"foo": [1, 2, 3]})
- >>> d.pop("foo")
- 1
- >>> "foo" in d
- False
-
- :param key: the key to pop.
- :param default: if provided the value to return if the key was
- not in the dictionary.
- """
- try:
- lst = dict.pop(self, key)
-
- if len(lst) == 0:
- raise exceptions.BadRequestKeyError(key)
-
- return lst[0]
- except KeyError:
- if default is not _missing:
- return default
- raise exceptions.BadRequestKeyError(key)
-
- def popitem(self):
- """Pop an item from the dict."""
- try:
- item = dict.popitem(self)
-
- if len(item[1]) == 0:
- raise exceptions.BadRequestKeyError(item)
-
- return (item[0], item[1][0])
- except KeyError as e:
- raise exceptions.BadRequestKeyError(e.args[0])
-
- def poplist(self, key):
- """Pop the list for a key from the dict. If the key is not in the dict
- an empty list is returned.
-
- .. versionchanged:: 0.5
- If the key does no longer exist a list is returned instead of
- raising an error.
- """
- return dict.pop(self, key, [])
-
- def popitemlist(self):
- """Pop a ``(key, list)`` tuple from the dict."""
- try:
- return dict.popitem(self)
- except KeyError as e:
- raise exceptions.BadRequestKeyError(e.args[0])
-
- def __copy__(self):
- return self.copy()
-
- def __deepcopy__(self, memo):
- return self.deepcopy(memo=memo)
-
- def __repr__(self):
- return "%s(%r)" % (self.__class__.__name__, list(iteritems(self, multi=True)))
-
-
-class _omd_bucket(object):
- """Wraps values in the :class:`OrderedMultiDict`. This makes it
- possible to keep an order over multiple different keys. It requires
- a lot of extra memory and slows down access a lot, but makes it
- possible to access elements in O(1) and iterate in O(n).
- """
-
- __slots__ = ("prev", "key", "value", "next")
-
- def __init__(self, omd, key, value):
- self.prev = omd._last_bucket
- self.key = key
- self.value = value
- self.next = None
-
- if omd._first_bucket is None:
- omd._first_bucket = self
- if omd._last_bucket is not None:
- omd._last_bucket.next = self
- omd._last_bucket = self
-
- def unlink(self, omd):
- if self.prev:
- self.prev.next = self.next
- if self.next:
- self.next.prev = self.prev
- if omd._first_bucket is self:
- omd._first_bucket = self.next
- if omd._last_bucket is self:
- omd._last_bucket = self.prev
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class OrderedMultiDict(MultiDict):
- """Works like a regular :class:`MultiDict` but preserves the
- order of the fields. To convert the ordered multi dict into a
- list you can use the :meth:`items` method and pass it ``multi=True``.
-
- In general an :class:`OrderedMultiDict` is an order of magnitude
- slower than a :class:`MultiDict`.
-
- .. admonition:: note
-
- Due to a limitation in Python you cannot convert an ordered
- multi dict into a regular dict by using ``dict(multidict)``.
- Instead you have to use the :meth:`to_dict` method, otherwise
- the internal bucket objects are exposed.
- """
-
- def __init__(self, mapping=None):
- dict.__init__(self)
- self._first_bucket = self._last_bucket = None
- if mapping is not None:
- OrderedMultiDict.update(self, mapping)
-
- def __eq__(self, other):
- if not isinstance(other, MultiDict):
- return NotImplemented
- if isinstance(other, OrderedMultiDict):
- iter1 = iteritems(self, multi=True)
- iter2 = iteritems(other, multi=True)
- try:
- for k1, v1 in iter1:
- k2, v2 = next(iter2)
- if k1 != k2 or v1 != v2:
- return False
- except StopIteration:
- return False
- try:
- next(iter2)
- except StopIteration:
- return True
- return False
- if len(self) != len(other):
- return False
- for key, values in iterlists(self):
- if other.getlist(key) != values:
- return False
- return True
-
- __hash__ = None
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __reduce_ex__(self, protocol):
- return type(self), (list(iteritems(self, multi=True)),)
-
- def __getstate__(self):
- return list(iteritems(self, multi=True))
-
- def __setstate__(self, values):
- dict.clear(self)
- for key, value in values:
- self.add(key, value)
-
- def __getitem__(self, key):
- if key in self:
- return dict.__getitem__(self, key)[0].value
- raise exceptions.BadRequestKeyError(key)
-
- def __setitem__(self, key, value):
- self.poplist(key)
- self.add(key, value)
-
- def __delitem__(self, key):
- self.pop(key)
-
- def keys(self):
- return (key for key, value in iteritems(self))
-
- __iter__ = keys
-
- def values(self):
- return (value for key, value in iteritems(self))
-
- def items(self, multi=False):
- ptr = self._first_bucket
- if multi:
- while ptr is not None:
- yield ptr.key, ptr.value
- ptr = ptr.next
- else:
- returned_keys = set()
- while ptr is not None:
- if ptr.key not in returned_keys:
- returned_keys.add(ptr.key)
- yield ptr.key, ptr.value
- ptr = ptr.next
-
- def lists(self):
- returned_keys = set()
- ptr = self._first_bucket
- while ptr is not None:
- if ptr.key not in returned_keys:
- yield ptr.key, self.getlist(ptr.key)
- returned_keys.add(ptr.key)
- ptr = ptr.next
-
- def listvalues(self):
- for _key, values in iterlists(self):
- yield values
-
- def add(self, key, value):
- dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
-
- def getlist(self, key, type=None):
- try:
- rv = dict.__getitem__(self, key)
- except KeyError:
- return []
- if type is None:
- return [x.value for x in rv]
- result = []
- for item in rv:
- try:
- result.append(type(item.value))
- except ValueError:
- pass
- return result
-
- def setlist(self, key, new_list):
- self.poplist(key)
- for value in new_list:
- self.add(key, value)
-
- def setlistdefault(self, key, default_list=None):
- raise TypeError("setlistdefault is unsupported for ordered multi dicts")
-
- def update(self, mapping):
- for key, value in iter_multi_items(mapping):
- OrderedMultiDict.add(self, key, value)
-
- def poplist(self, key):
- buckets = dict.pop(self, key, ())
- for bucket in buckets:
- bucket.unlink(self)
- return [x.value for x in buckets]
-
- def pop(self, key, default=_missing):
- try:
- buckets = dict.pop(self, key)
- except KeyError:
- if default is not _missing:
- return default
- raise exceptions.BadRequestKeyError(key)
- for bucket in buckets:
- bucket.unlink(self)
- return buckets[0].value
-
- def popitem(self):
- try:
- key, buckets = dict.popitem(self)
- except KeyError as e:
- raise exceptions.BadRequestKeyError(e.args[0])
- for bucket in buckets:
- bucket.unlink(self)
- return key, buckets[0].value
-
- def popitemlist(self):
- try:
- key, buckets = dict.popitem(self)
- except KeyError as e:
- raise exceptions.BadRequestKeyError(e.args[0])
- for bucket in buckets:
- bucket.unlink(self)
- return key, [x.value for x in buckets]
-
-
-def _options_header_vkw(value, kw):
- return dump_options_header(
- value, dict((k.replace("_", "-"), v) for k, v in kw.items())
- )
-
-
-def _unicodify_header_value(value):
- if isinstance(value, bytes):
- value = value.decode("latin-1")
- if not isinstance(value, text_type):
- value = text_type(value)
- return value
-
-
-@native_itermethods(["keys", "values", "items"])
-class Headers(object):
- """An object that stores some headers. It has a dict-like interface
- but is ordered and can store the same keys multiple times.
-
- This data structure is useful if you want a nicer way to handle WSGI
- headers which are stored as tuples in a list.
-
- From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
- also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
- and will render a page for a ``400 BAD REQUEST`` if caught in a
- catch-all for HTTP exceptions.
-
- Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
- class, with the exception of `__getitem__`. :mod:`wsgiref` will return
- `None` for ``headers['missing']``, whereas :class:`Headers` will raise
- a :class:`KeyError`.
-
- To create a new :class:`Headers` object pass it a list or dict of headers
- which are used as default values. This does not reuse the list passed
- to the constructor for internal usage.
-
- :param defaults: The list of default values for the :class:`Headers`.
-
- .. versionchanged:: 0.9
- This data structure now stores unicode values similar to how the
- multi dicts do it. The main difference is that bytes can be set as
- well which will automatically be latin1 decoded.
-
- .. versionchanged:: 0.9
- The :meth:`linked` function was removed without replacement as it
- was an API that does not support the changes to the encoding model.
- """
-
- def __init__(self, defaults=None):
- self._list = []
- if defaults is not None:
- if isinstance(defaults, (list, Headers)):
- self._list.extend(defaults)
- else:
- self.extend(defaults)
-
- def __getitem__(self, key, _get_mode=False):
- if not _get_mode:
- if isinstance(key, integer_types):
- return self._list[key]
- elif isinstance(key, slice):
- return self.__class__(self._list[key])
- if not isinstance(key, string_types):
- raise exceptions.BadRequestKeyError(key)
- ikey = key.lower()
- for k, v in self._list:
- if k.lower() == ikey:
- return v
- # micro optimization: if we are in get mode we will catch that
- # exception one stack level down so we can raise a standard
- # key error instead of our special one.
- if _get_mode:
- raise KeyError()
- raise exceptions.BadRequestKeyError(key)
-
- def __eq__(self, other):
- return other.__class__ is self.__class__ and set(other._list) == set(self._list)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def get(self, key, default=None, type=None, as_bytes=False):
- """Return the default value if the requested data doesn't exist.
- If `type` is provided and is a callable it should convert the value,
- return it or raise a :exc:`ValueError` if that is not possible. In
- this case the function will return the default as if the value was not
- found:
-
- >>> d = Headers([('Content-Length', '42')])
- >>> d.get('Content-Length', type=int)
- 42
-
- If a headers object is bound you must not add unicode strings
- because no encoding takes place.
-
- .. versionadded:: 0.9
- Added support for `as_bytes`.
-
- :param key: The key to be looked up.
- :param default: The default value to be returned if the key can't
- be looked up. If not further specified `None` is
- returned.
- :param type: A callable that is used to cast the value in the
- :class:`Headers`. If a :exc:`ValueError` is raised
- by this callable the default value is returned.
- :param as_bytes: return bytes instead of unicode strings.
- """
- try:
- rv = self.__getitem__(key, _get_mode=True)
- except KeyError:
- return default
- if as_bytes:
- rv = rv.encode("latin1")
- if type is None:
- return rv
- try:
- return type(rv)
- except ValueError:
- return default
-
- def getlist(self, key, type=None, as_bytes=False):
- """Return the list of items for a given key. If that key is not in the
- :class:`Headers`, the return value will be an empty list. Just as
- :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
- be converted with the callable defined there.
-
- .. versionadded:: 0.9
- Added support for `as_bytes`.
-
- :param key: The key to be looked up.
- :param type: A callable that is used to cast the value in the
- :class:`Headers`. If a :exc:`ValueError` is raised
- by this callable the value will be removed from the list.
- :return: a :class:`list` of all the values for the key.
- :param as_bytes: return bytes instead of unicode strings.
- """
- ikey = key.lower()
- result = []
- for k, v in self:
- if k.lower() == ikey:
- if as_bytes:
- v = v.encode("latin1")
- if type is not None:
- try:
- v = type(v)
- except ValueError:
- continue
- result.append(v)
- return result
-
- def get_all(self, name):
- """Return a list of all the values for the named field.
-
- This method is compatible with the :mod:`wsgiref`
- :meth:`~wsgiref.headers.Headers.get_all` method.
- """
- return self.getlist(name)
-
- def items(self, lower=False):
- for key, value in self:
- if lower:
- key = key.lower()
- yield key, value
-
- def keys(self, lower=False):
- for key, _ in iteritems(self, lower):
- yield key
-
- def values(self):
- for _, value in iteritems(self):
- yield value
-
- def extend(self, iterable):
- """Extend the headers with a dict or an iterable yielding keys and
- values.
- """
- if isinstance(iterable, dict):
- for key, value in iteritems(iterable):
- if isinstance(value, (tuple, list)):
- for v in value:
- self.add(key, v)
- else:
- self.add(key, value)
- else:
- for key, value in iterable:
- self.add(key, value)
-
- def __delitem__(self, key, _index_operation=True):
- if _index_operation and isinstance(key, (integer_types, slice)):
- del self._list[key]
- return
- key = key.lower()
- new = []
- for k, v in self._list:
- if k.lower() != key:
- new.append((k, v))
- self._list[:] = new
-
- def remove(self, key):
- """Remove a key.
-
- :param key: The key to be removed.
- """
- return self.__delitem__(key, _index_operation=False)
-
- def pop(self, key=None, default=_missing):
- """Removes and returns a key or index.
-
- :param key: The key to be popped. If this is an integer the item at
- that position is removed, if it's a string the value for
- that key is. If the key is omitted or `None` the last
- item is removed.
- :return: an item.
- """
- if key is None:
- return self._list.pop()
- if isinstance(key, integer_types):
- return self._list.pop(key)
- try:
- rv = self[key]
- self.remove(key)
- except KeyError:
- if default is not _missing:
- return default
- raise
- return rv
-
- def popitem(self):
- """Removes a key or index and returns a (key, value) item."""
- return self.pop()
-
- def __contains__(self, key):
- """Check if a key is present."""
- try:
- self.__getitem__(key, _get_mode=True)
- except KeyError:
- return False
- return True
-
- has_key = __contains__
-
- def __iter__(self):
- """Yield ``(key, value)`` tuples."""
- return iter(self._list)
-
- def __len__(self):
- return len(self._list)
-
- def add(self, _key, _value, **kw):
- """Add a new header tuple to the list.
-
- Keyword arguments can specify additional parameters for the header
- value, with underscores converted to dashes::
-
- >>> d = Headers()
- >>> d.add('Content-Type', 'text/plain')
- >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
-
- The keyword argument dumping uses :func:`dump_options_header`
- behind the scenes.
-
- .. versionadded:: 0.4.1
- keyword arguments were added for :mod:`wsgiref` compatibility.
- """
- if kw:
- _value = _options_header_vkw(_value, kw)
- _key = _unicodify_header_value(_key)
- _value = _unicodify_header_value(_value)
- self._validate_value(_value)
- self._list.append((_key, _value))
-
- def _validate_value(self, value):
- if not isinstance(value, text_type):
- raise TypeError("Value should be unicode.")
- if u"\n" in value or u"\r" in value:
- raise ValueError(
- "Detected newline in header value. This is "
- "a potential security problem"
- )
-
- def add_header(self, _key, _value, **_kw):
- """Add a new header tuple to the list.
-
- An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
- :meth:`~wsgiref.headers.Headers.add_header` method.
- """
- self.add(_key, _value, **_kw)
-
- def clear(self):
- """Clears all headers."""
- del self._list[:]
-
- def set(self, _key, _value, **kw):
- """Remove all header tuples for `key` and add a new one. The newly
- added key either appears at the end of the list if there was no
- entry or replaces the first one.
-
- Keyword arguments can specify additional parameters for the header
- value, with underscores converted to dashes. See :meth:`add` for
- more information.
-
- .. versionchanged:: 0.6.1
- :meth:`set` now accepts the same arguments as :meth:`add`.
-
- :param key: The key to be inserted.
- :param value: The value to be inserted.
- """
- if kw:
- _value = _options_header_vkw(_value, kw)
- _key = _unicodify_header_value(_key)
- _value = _unicodify_header_value(_value)
- self._validate_value(_value)
- if not self._list:
- self._list.append((_key, _value))
- return
- listiter = iter(self._list)
- ikey = _key.lower()
- for idx, (old_key, _old_value) in enumerate(listiter):
- if old_key.lower() == ikey:
- # replace first ocurrence
- self._list[idx] = (_key, _value)
- break
- else:
- self._list.append((_key, _value))
- return
- self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
-
- def setdefault(self, key, default):
- """Returns the value for the key if it is in the dict, otherwise it
- returns `default` and sets that value for `key`.
-
- :param key: The key to be looked up.
- :param default: The default value to be returned if the key is not
- in the dict. If not further specified it's `None`.
- """
- if key in self:
- return self[key]
- self.set(key, default)
- return default
-
- def __setitem__(self, key, value):
- """Like :meth:`set` but also supports index/slice based setting."""
- if isinstance(key, (slice, integer_types)):
- if isinstance(key, integer_types):
- value = [value]
- value = [
- (_unicodify_header_value(k), _unicodify_header_value(v))
- for (k, v) in value
- ]
- [self._validate_value(v) for (k, v) in value]
- if isinstance(key, integer_types):
- self._list[key] = value[0]
- else:
- self._list[key] = value
- else:
- self.set(key, value)
-
- def to_wsgi_list(self):
- """Convert the headers into a list suitable for WSGI.
-
- The values are byte strings in Python 2 converted to latin1 and unicode
- strings in Python 3 for the WSGI server to encode.
-
- :return: list
- """
- if PY2:
- return [(to_native(k), v.encode("latin1")) for k, v in self]
- return list(self)
-
- def copy(self):
- return self.__class__(self._list)
-
- def __copy__(self):
- return self.copy()
-
- def __str__(self):
- """Returns formatted headers suitable for HTTP transmission."""
- strs = []
- for key, value in self.to_wsgi_list():
- strs.append("%s: %s" % (key, value))
- strs.append("\r\n")
- return "\r\n".join(strs)
-
- def __repr__(self):
- return "%s(%r)" % (self.__class__.__name__, list(self))
-
-
-class ImmutableHeadersMixin(object):
- """Makes a :class:`Headers` immutable. We do not mark them as
- hashable though since the only usecase for this datastructure
- in Werkzeug is a view on a mutable structure.
-
- .. versionadded:: 0.5
-
- :private:
- """
-
- def __delitem__(self, key, **kwargs):
- is_immutable(self)
-
- def __setitem__(self, key, value):
- is_immutable(self)
-
- set = __setitem__
-
- def add(self, item):
- is_immutable(self)
-
- remove = add_header = add
-
- def extend(self, iterable):
- is_immutable(self)
-
- def insert(self, pos, value):
- is_immutable(self)
-
- def pop(self, index=-1):
- is_immutable(self)
-
- def popitem(self):
- is_immutable(self)
-
- def setdefault(self, key, default):
- is_immutable(self)
-
-
-class EnvironHeaders(ImmutableHeadersMixin, Headers):
- """Read only version of the headers from a WSGI environment. This
- provides the same interface as `Headers` and is constructed from
- a WSGI environment.
-
- From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
- subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
- render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
- HTTP exceptions.
- """
-
- def __init__(self, environ):
- self.environ = environ
-
- def __eq__(self, other):
- return self.environ is other.environ
-
- __hash__ = None
-
- def __getitem__(self, key, _get_mode=False):
- # _get_mode is a no-op for this class as there is no index but
- # used because get() calls it.
- if not isinstance(key, string_types):
- raise KeyError(key)
- key = key.upper().replace("-", "_")
- if key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
- return _unicodify_header_value(self.environ[key])
- return _unicodify_header_value(self.environ["HTTP_" + key])
-
- def __len__(self):
- # the iter is necessary because otherwise list calls our
- # len which would call list again and so forth.
- return len(list(iter(self)))
-
- def __iter__(self):
- for key, value in iteritems(self.environ):
- if key.startswith("HTTP_") and key not in (
- "HTTP_CONTENT_TYPE",
- "HTTP_CONTENT_LENGTH",
- ):
- yield (
- key[5:].replace("_", "-").title(),
- _unicodify_header_value(value),
- )
- elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value:
- yield (key.replace("_", "-").title(), _unicodify_header_value(value))
-
- def copy(self):
- raise TypeError("cannot create %r copies" % self.__class__.__name__)
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
- """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
- instances as sequence and it will combine the return values of all wrapped
- dicts:
-
- >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
- >>> post = MultiDict([('foo', 'bar')])
- >>> get = MultiDict([('blub', 'blah')])
- >>> combined = CombinedMultiDict([get, post])
- >>> combined['foo']
- 'bar'
- >>> combined['blub']
- 'blah'
-
- This works for all read operations and will raise a `TypeError` for
- methods that usually change data which isn't possible.
-
- From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
- subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
- render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
- exceptions.
- """
-
- def __reduce_ex__(self, protocol):
- return type(self), (self.dicts,)
-
- def __init__(self, dicts=None):
- self.dicts = dicts or []
-
- @classmethod
- def fromkeys(cls):
- raise TypeError("cannot create %r instances by fromkeys" % cls.__name__)
-
- def __getitem__(self, key):
- for d in self.dicts:
- if key in d:
- return d[key]
- raise exceptions.BadRequestKeyError(key)
-
- def get(self, key, default=None, type=None):
- for d in self.dicts:
- if key in d:
- if type is not None:
- try:
- return type(d[key])
- except ValueError:
- continue
- return d[key]
- return default
-
- def getlist(self, key, type=None):
- rv = []
- for d in self.dicts:
- rv.extend(d.getlist(key, type))
- return rv
-
- def _keys_impl(self):
- """This function exists so __len__ can be implemented more efficiently,
- saving one list creation from an iterator.
-
- Using this for Python 2's ``dict.keys`` behavior would be useless since
- `dict.keys` in Python 2 returns a list, while we have a set here.
- """
- rv = set()
- for d in self.dicts:
- rv.update(iterkeys(d))
- return rv
-
- def keys(self):
- return iter(self._keys_impl())
-
- __iter__ = keys
-
- def items(self, multi=False):
- found = set()
- for d in self.dicts:
- for key, value in iteritems(d, multi):
- if multi:
- yield key, value
- elif key not in found:
- found.add(key)
- yield key, value
-
- def values(self):
- for _key, value in iteritems(self):
- yield value
-
- def lists(self):
- rv = {}
- for d in self.dicts:
- for key, values in iterlists(d):
- rv.setdefault(key, []).extend(values)
- return iteritems(rv)
-
- def listvalues(self):
- return (x[1] for x in self.lists())
-
- def copy(self):
- """Return a shallow mutable copy of this object.
-
- This returns a :class:`MultiDict` representing the data at the
- time of copying. The copy will no longer reflect changes to the
- wrapped dicts.
-
- .. versionchanged:: 0.15
- Return a mutable :class:`MultiDict`.
- """
- return MultiDict(self)
-
- def to_dict(self, flat=True):
- """Return the contents as regular dict. If `flat` is `True` the
- returned dict will only have the first item present, if `flat` is
- `False` all values will be returned as lists.
-
- :param flat: If set to `False` the dict returned will have lists
- with all the values in it. Otherwise it will only
- contain the first item for each key.
- :return: a :class:`dict`
- """
- rv = {}
- for d in reversed(self.dicts):
- rv.update(d.to_dict(flat))
- return rv
-
- def __len__(self):
- return len(self._keys_impl())
-
- def __contains__(self, key):
- for d in self.dicts:
- if key in d:
- return True
- return False
-
- has_key = __contains__
-
- def __repr__(self):
- return "%s(%r)" % (self.__class__.__name__, self.dicts)
-
-
-class FileMultiDict(MultiDict):
- """A special :class:`MultiDict` that has convenience methods to add
- files to it. This is used for :class:`EnvironBuilder` and generally
- useful for unittesting.
-
- .. versionadded:: 0.5
- """
-
- def add_file(self, name, file, filename=None, content_type=None):
- """Adds a new file to the dict. `file` can be a file name or
- a :class:`file`-like or a :class:`FileStorage` object.
-
- :param name: the name of the field.
- :param file: a filename or :class:`file`-like object
- :param filename: an optional filename
- :param content_type: an optional content type
- """
- if isinstance(file, FileStorage):
- value = file
- else:
- if isinstance(file, string_types):
- if filename is None:
- filename = file
- file = open(file, "rb")
- if filename and content_type is None:
- content_type = (
- mimetypes.guess_type(filename)[0] or "application/octet-stream"
- )
- value = FileStorage(file, filename, name, content_type)
-
- self.add(name, value)
-
-
-class ImmutableDict(ImmutableDictMixin, dict):
- """An immutable :class:`dict`.
-
- .. versionadded:: 0.5
- """
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
-
- def copy(self):
- """Return a shallow mutable copy of this object. Keep in mind that
- the standard library's :func:`copy` function is a no-op for this class
- like for any other python immutable type (eg: :class:`tuple`).
- """
- return dict(self)
-
- def __copy__(self):
- return self
-
-
-class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
- """An immutable :class:`MultiDict`.
-
- .. versionadded:: 0.5
- """
-
- def copy(self):
- """Return a shallow mutable copy of this object. Keep in mind that
- the standard library's :func:`copy` function is a no-op for this class
- like for any other python immutable type (eg: :class:`tuple`).
- """
- return MultiDict(self)
-
- def __copy__(self):
- return self
-
-
-class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
- """An immutable :class:`OrderedMultiDict`.
-
- .. versionadded:: 0.6
- """
-
- def _iter_hashitems(self):
- return enumerate(iteritems(self, multi=True))
-
- def copy(self):
- """Return a shallow mutable copy of this object. Keep in mind that
- the standard library's :func:`copy` function is a no-op for this class
- like for any other python immutable type (eg: :class:`tuple`).
- """
- return OrderedMultiDict(self)
-
- def __copy__(self):
- return self
-
-
-@native_itermethods(["values"])
-class Accept(ImmutableList):
- """An :class:`Accept` object is just a list subclass for lists of
- ``(value, quality)`` tuples. It is automatically sorted by specificity
- and quality.
-
- All :class:`Accept` objects work similar to a list but provide extra
- functionality for working with the data. Containment checks are
- normalized to the rules of that header:
-
- >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
- >>> a.best
- 'ISO-8859-1'
- >>> 'iso-8859-1' in a
- True
- >>> 'UTF8' in a
- True
- >>> 'utf7' in a
- False
-
- To get the quality for an item you can use normal item lookup:
-
- >>> print a['utf-8']
- 0.7
- >>> a['utf7']
- 0
-
- .. versionchanged:: 0.5
- :class:`Accept` objects are forced immutable now.
- """
-
- def __init__(self, values=()):
- if values is None:
- list.__init__(self)
- self.provided = False
- elif isinstance(values, Accept):
- self.provided = values.provided
- list.__init__(self, values)
- else:
- self.provided = True
- values = sorted(
- values,
- key=lambda x: (self._specificity(x[0]), x[1], x[0]),
- reverse=True,
- )
- list.__init__(self, values)
-
- def _specificity(self, value):
- """Returns a tuple describing the value's specificity."""
- return (value != "*",)
-
- def _value_matches(self, value, item):
- """Check if a value matches a given accept item."""
- return item == "*" or item.lower() == value.lower()
-
- def __getitem__(self, key):
- """Besides index lookup (getting item n) you can also pass it a string
- to get the quality for the item. If the item is not in the list, the
- returned quality is ``0``.
- """
- if isinstance(key, string_types):
- return self.quality(key)
- return list.__getitem__(self, key)
-
- def quality(self, key):
- """Returns the quality of the key.
-
- .. versionadded:: 0.6
- In previous versions you had to use the item-lookup syntax
- (eg: ``obj[key]`` instead of ``obj.quality(key)``)
- """
- for item, quality in self:
- if self._value_matches(key, item):
- return quality
- return 0
-
- def __contains__(self, value):
- for item, _quality in self:
- if self._value_matches(value, item):
- return True
- return False
-
- def __repr__(self):
- return "%s([%s])" % (
- self.__class__.__name__,
- ", ".join("(%r, %s)" % (x, y) for x, y in self),
- )
-
- def index(self, key):
- """Get the position of an entry or raise :exc:`ValueError`.
-
- :param key: The key to be looked up.
-
- .. versionchanged:: 0.5
- This used to raise :exc:`IndexError`, which was inconsistent
- with the list API.
- """
- if isinstance(key, string_types):
- for idx, (item, _quality) in enumerate(self):
- if self._value_matches(key, item):
- return idx
- raise ValueError(key)
- return list.index(self, key)
-
- def find(self, key):
- """Get the position of an entry or return -1.
-
- :param key: The key to be looked up.
- """
- try:
- return self.index(key)
- except ValueError:
- return -1
-
- def values(self):
- """Iterate over all values."""
- for item in self:
- yield item[0]
-
- def to_header(self):
- """Convert the header set into an HTTP header string."""
- result = []
- for value, quality in self:
- if quality != 1:
- value = "%s;q=%s" % (value, quality)
- result.append(value)
- return ",".join(result)
-
- def __str__(self):
- return self.to_header()
-
- def _best_single_match(self, match):
- for client_item, quality in self:
- if self._value_matches(match, client_item):
- # self is sorted by specificity descending, we can exit
- return client_item, quality
-
- def best_match(self, matches, default=None):
- """Returns the best match from a list of possible matches based
- on the specificity and quality of the client. If two items have the
- same quality and specificity, the one is returned that comes first.
-
- :param matches: a list of matches to check for
- :param default: the value that is returned if none match
- """
- result = default
- best_quality = -1
- best_specificity = (-1,)
- for server_item in matches:
- match = self._best_single_match(server_item)
- if not match:
- continue
- client_item, quality = match
- specificity = self._specificity(client_item)
- if quality <= 0 or quality < best_quality:
- continue
- # better quality or same quality but more specific => better match
- if quality > best_quality or specificity > best_specificity:
- result = server_item
- best_quality = quality
- best_specificity = specificity
- return result
-
- @property
- def best(self):
- """The best match as value."""
- if self:
- return self[0][0]
-
-
-class MIMEAccept(Accept):
- """Like :class:`Accept` but with special methods and behavior for
- mimetypes.
- """
-
- def _specificity(self, value):
- return tuple(x != "*" for x in value.split("/", 1))
-
- def _value_matches(self, value, item):
- def _normalize(x):
- x = x.lower()
- return ("*", "*") if x == "*" else x.split("/", 1)
-
- # this is from the application which is trusted. to avoid developer
- # frustration we actually check these for valid values
- if "/" not in value:
- raise ValueError("invalid mimetype %r" % value)
- value_type, value_subtype = _normalize(value)
- if value_type == "*" and value_subtype != "*":
- raise ValueError("invalid mimetype %r" % value)
-
- if "/" not in item:
- return False
- item_type, item_subtype = _normalize(item)
- if item_type == "*" and item_subtype != "*":
- return False
- return (
- item_type == item_subtype == "*" or value_type == value_subtype == "*"
- ) or (
- item_type == value_type
- and (
- item_subtype == "*"
- or value_subtype == "*"
- or item_subtype == value_subtype
- )
- )
-
- @property
- def accept_html(self):
- """True if this object accepts HTML."""
- return (
- "text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
- )
-
- @property
- def accept_xhtml(self):
- """True if this object accepts XHTML."""
- return "application/xhtml+xml" in self or "application/xml" in self
-
- @property
- def accept_json(self):
- """True if this object accepts JSON."""
- return "application/json" in self
-
-
-class LanguageAccept(Accept):
- """Like :class:`Accept` but with normalization for languages."""
-
- def _value_matches(self, value, item):
- def _normalize(language):
- return _locale_delim_re.split(language.lower())
-
- return item == "*" or _normalize(value) == _normalize(item)
-
-
-class CharsetAccept(Accept):
- """Like :class:`Accept` but with normalization for charsets."""
-
- def _value_matches(self, value, item):
- def _normalize(name):
- try:
- return codecs.lookup(name).name
- except LookupError:
- return name.lower()
-
- return item == "*" or _normalize(value) == _normalize(item)
-
-
-def cache_property(key, empty, type):
- """Return a new property object for a cache header. Useful if you
- want to add support for a cache extension in a subclass."""
- return property(
- lambda x: x._get_cache_value(key, empty, type),
- lambda x, v: x._set_cache_value(key, v, type),
- lambda x: x._del_cache_value(key),
- "accessor for %r" % key,
- )
-
-
-class _CacheControl(UpdateDictMixin, dict):
- """Subclass of a dict that stores values for a Cache-Control header. It
- has accessors for all the cache-control directives specified in RFC 2616.
- The class does not differentiate between request and response directives.
-
- Because the cache-control directives in the HTTP header use dashes the
- python descriptors use underscores for that.
-
- To get a header of the :class:`CacheControl` object again you can convert
- the object into a string or call the :meth:`to_header` method. If you plan
- to subclass it and add your own items have a look at the sourcecode for
- that class.
-
- .. versionchanged:: 0.4
-
- Setting `no_cache` or `private` to boolean `True` will set the implicit
- none-value which is ``*``:
-
- >>> cc = ResponseCacheControl()
- >>> cc.no_cache = True
- >>> cc
- {}: {}
%s
" % escape(self.description) - - def get_body(self, environ=None): - """Get the HTML body.""" - return text_type( - ( - u'\n' - u"Content-"
- "Length
header."
- )
-
-
-class PreconditionFailed(HTTPException):
- """*412* `Precondition Failed`
-
- Status code used in combination with ``If-Match``, ``If-None-Match``, or
- ``If-Unmodified-Since``.
- """
-
- code = 412
- description = (
- "The precondition on the request for the URL failed positive evaluation."
- )
-
-
-class RequestEntityTooLarge(HTTPException):
- """*413* `Request Entity Too Large`
-
- The status code one should return if the data submitted exceeded a given
- limit.
- """
-
- code = 413
- description = "The data value transmitted exceeds the capacity limit."
-
-
-class RequestURITooLarge(HTTPException):
- """*414* `Request URI Too Large`
-
- Like *413* but for too long URLs.
- """
-
- code = 414
- description = (
- "The length of the requested URL exceeds the capacity limit for"
- " this server. The request cannot be processed."
- )
-
-
-class UnsupportedMediaType(HTTPException):
- """*415* `Unsupported Media Type`
-
- The status code returned if the server is unable to handle the media type
- the client transmitted.
- """
-
- code = 415
- description = (
- "The server does not support the media type transmitted in the request."
- )
-
-
-class RequestedRangeNotSatisfiable(HTTPException):
- """*416* `Requested Range Not Satisfiable`
-
- The client asked for an invalid part of the file.
-
- .. versionadded:: 0.7
- """
-
- code = 416
- description = "The server cannot provide the requested range."
-
- def __init__(self, length=None, units="bytes", description=None):
- """Takes an optional `Content-Range` header value based on ``length``
- parameter.
- """
- HTTPException.__init__(self, description)
- self.length = length
- self.units = units
-
- def get_headers(self, environ=None):
- headers = HTTPException.get_headers(self, environ)
- if self.length is not None:
- headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
- return headers
-
-
-class ExpectationFailed(HTTPException):
- """*417* `Expectation Failed`
-
- The server cannot meet the requirements of the Expect request-header.
-
- .. versionadded:: 0.7
- """
-
- code = 417
- description = "The server could not meet the requirements of the Expect header"
-
-
-class ImATeapot(HTTPException):
- """*418* `I'm a teapot`
-
- The server should return this if it is a teapot and someone attempted
- to brew coffee with it.
-
- .. versionadded:: 0.7
- """
-
- code = 418
- description = "This server is a teapot, not a coffee machine"
-
-
-class UnprocessableEntity(HTTPException):
- """*422* `Unprocessable Entity`
-
- Used if the request is well formed, but the instructions are otherwise
- incorrect.
- """
-
- code = 422
- description = (
- "The request was well-formed but was unable to be followed due"
- " to semantic errors."
- )
-
-
-class Locked(HTTPException):
- """*423* `Locked`
-
- Used if the resource that is being accessed is locked.
- """
-
- code = 423
- description = "The resource that is being accessed is locked."
-
-
-class FailedDependency(HTTPException):
- """*424* `Failed Dependency`
-
- Used if the method could not be performed on the resource
- because the requested action depended on another action and that action failed.
- """
-
- code = 424
- description = (
- "The method could not be performed on the resource because the"
- " requested action depended on another action and that action"
- " failed."
- )
-
-
-class PreconditionRequired(HTTPException):
- """*428* `Precondition Required`
-
- The server requires this request to be conditional, typically to prevent
- the lost update problem, which is a race condition between two or more
- clients attempting to update a resource through PUT or DELETE. By requiring
- each client to include a conditional header ("If-Match" or "If-Unmodified-
- Since") with the proper value retained from a recent GET request, the
- server ensures that each client has at least seen the previous revision of
- the resource.
- """
-
- code = 428
- description = (
- "This request is required to be conditional; try using"
- ' "If-Match" or "If-Unmodified-Since".'
- )
-
-
-class TooManyRequests(HTTPException):
- """*429* `Too Many Requests`
-
- The server is limiting the rate at which this user receives responses, and
- this request exceeds that rate. (The server may use any convenient method
- to identify users and their request rates). The server may include a
- "Retry-After" header to indicate how long the user should wait before
- retrying.
- """
-
- code = 429
- description = "This user has exceeded an allotted request count. Try again later."
-
-
-class RequestHeaderFieldsTooLarge(HTTPException):
- """*431* `Request Header Fields Too Large`
-
- The server refuses to process the request because the header fields are too
- large. One or more individual fields may be too large, or the set of all
- headers is too large.
- """
-
- code = 431
- description = "One or more header fields exceeds the maximum size."
-
-
-class UnavailableForLegalReasons(HTTPException):
- """*451* `Unavailable For Legal Reasons`
-
- This status code indicates that the server is denying access to the
- resource as a consequence of a legal demand.
- """
-
- code = 451
- description = "Unavailable for legal reasons."
-
-
-class InternalServerError(HTTPException):
- """*500* `Internal Server Error`
-
- Raise if an internal server error occurred. This is a good fallback if an
- unknown error occurred in the dispatcher.
- """
-
- code = 500
- description = (
- "The server encountered an internal error and was unable to"
- " complete your request. Either the server is overloaded or"
- " there is an error in the application."
- )
-
-
-class NotImplemented(HTTPException):
- """*501* `Not Implemented`
-
- Raise if the application does not support the action requested by the
- browser.
- """
-
- code = 501
- description = "The server does not support the action requested by the browser."
-
-
-class BadGateway(HTTPException):
- """*502* `Bad Gateway`
-
- If you do proxying in your application you should return this status code
- if you received an invalid response from the upstream server it accessed
- in attempting to fulfill the request.
- """
-
- code = 502
- description = (
- "The proxy server received an invalid response from an upstream server."
- )
-
-
-class ServiceUnavailable(HTTPException):
- """*503* `Service Unavailable`
-
- Status code you should return if a service is temporarily unavailable.
- """
-
- code = 503
- description = (
- "The server is temporarily unable to service your request due"
- " to maintenance downtime or capacity problems. Please try"
- " again later."
- )
-
-
-class GatewayTimeout(HTTPException):
- """*504* `Gateway Timeout`
-
- Status code you should return if a connection to an upstream server
- times out.
- """
-
- code = 504
- description = "The connection to an upstream server timed out."
-
-
-class HTTPVersionNotSupported(HTTPException):
- """*505* `HTTP Version Not Supported`
-
- The server does not support the HTTP protocol version used in the request.
- """
-
- code = 505
- description = (
- "The server does not support the HTTP protocol version used in the request."
- )
-
-
-default_exceptions = {}
-__all__ = ["HTTPException"]
-
-
-def _find_exceptions():
- for _name, obj in iteritems(globals()):
- try:
- is_http_exception = issubclass(obj, HTTPException)
- except TypeError:
- is_http_exception = False
- if not is_http_exception or obj.code is None:
- continue
- __all__.append(obj.__name__)
- old_obj = default_exceptions.get(obj.code, None)
- if old_obj is not None and issubclass(obj, old_obj):
- continue
- default_exceptions[obj.code] = obj
-
-
-_find_exceptions()
-del _find_exceptions
-
-
-class Aborter(object):
- """When passed a dict of code -> exception items it can be used as
- callable that raises exceptions. If the first argument to the
- callable is an integer it will be looked up in the mapping, if it's
- a WSGI application it will be raised in a proxy exception.
-
- The rest of the arguments are forwarded to the exception constructor.
- """
-
- def __init__(self, mapping=None, extra=None):
- if mapping is None:
- mapping = default_exceptions
- self.mapping = dict(mapping)
- if extra is not None:
- self.mapping.update(extra)
-
- def __call__(self, code, *args, **kwargs):
- if not args and not kwargs and not isinstance(code, integer_types):
- raise HTTPException(response=code)
- if code not in self.mapping:
- raise LookupError("no exception for %r" % code)
- raise self.mapping[code](*args, **kwargs)
-
-
-def abort(status, *args, **kwargs):
- """Raises an :py:exc:`HTTPException` for the given status code or WSGI
- application::
-
- abort(404) # 404 Not Found
- abort(Response('Hello World'))
-
- Can be passed a WSGI application or a status code. If a status code is
- given it's looked up in the list of exceptions and will raise that
- exception, if passed a WSGI application it will wrap it in a proxy WSGI
- exception and raise that::
-
- abort(404)
- abort(Response('Hello World'))
-
- """
- return _aborter(status, *args, **kwargs)
-
-
-_aborter = Aborter()
-
-
-#: an exception that is used internally to signal both a key error and a
-#: bad request. Used by a lot of the datastructures.
-BadRequestKeyError = BadRequest.wrap(KeyError)
-
-# imported here because of circular dependencies of werkzeug.utils
-from .http import HTTP_STATUS_CODES
-from .utils import escape
diff --git a/azure/functions/_thirdparty/werkzeug/formparser.py b/azure/functions/_thirdparty/werkzeug/formparser.py
deleted file mode 100644
index 0ddc5c8f..00000000
--- a/azure/functions/_thirdparty/werkzeug/formparser.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- werkzeug.formparser
- ~~~~~~~~~~~~~~~~~~~
-
- This module implements the form parsing. It supports url-encoded forms
- as well as non-nested multipart uploads.
-
- :copyright: 2007 Pallets
- :license: BSD-3-Clause
-"""
-import codecs
-import re
-from functools import update_wrapper
-from itertools import chain
-from itertools import repeat
-from itertools import tee
-
-from ._compat import BytesIO
-from ._compat import text_type
-from ._compat import to_native
-from .datastructures import FileStorage
-from .datastructures import Headers
-from .datastructures import MultiDict
-from .http import parse_options_header
-from .urls import url_decode_stream
-from .wsgi import get_content_length
-from .wsgi import get_input_stream
-from .wsgi import make_line_iter
-
-# there are some platforms where SpooledTemporaryFile is not available.
-# In that case we need to provide a fallback.
-try:
- from tempfile import SpooledTemporaryFile
-except ImportError:
- from tempfile import TemporaryFile
-
- SpooledTemporaryFile = None
-
-
-#: an iterator that yields empty strings
-_empty_string_iter = repeat("")
-
-#: a regular expression for multipart boundaries
-_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
-
-#: supported http encodings that are also available in python we support
-#: for multipart messages.
-_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
-
-
-def default_stream_factory(
- total_content_length, filename, content_type, content_length=None
-):
- """The stream factory that is used per default."""
- max_size = 1024 * 500
- if SpooledTemporaryFile is not None:
- return SpooledTemporaryFile(max_size=max_size, mode="wb+")
- if total_content_length is None or total_content_length > max_size:
- return TemporaryFile("wb+")
- return BytesIO()
-
-
-def parse_form_data(
- environ,
- stream_factory=None,
- charset="utf-8",
- errors="replace",
- max_form_memory_size=None,
- max_content_length=None,
- cls=None,
- silent=True,
-):
- """Parse the form data in the environ and return it as tuple in the form
- ``(stream, form, files)``. You should only call this method if the
- transport method is `POST`, `PUT`, or `PATCH`.
-
- If the mimetype of the data transmitted is `multipart/form-data` the
- files multidict will be filled with `FileStorage` objects. If the
- mimetype is unknown the input stream is wrapped and returned as first
- argument, else the stream is empty.
-
- This is a shortcut for the common usage of :class:`FormDataParser`.
-
- Have a look at :ref:`dealing-with-request-data` for more details.
-
- .. versionadded:: 0.5
- The `max_form_memory_size`, `max_content_length` and
- `cls` parameters were added.
-
- .. versionadded:: 0.5.1
- The optional `silent` flag was added.
-
- :param environ: the WSGI environment to be used for parsing.
- :param stream_factory: An optional callable that returns a new read and
- writeable file descriptor. This callable works
- the same as :meth:`~BaseResponse._get_file_stream`.
- :param charset: The character set for URL and url encoded form data.
- :param errors: The encoding error behavior.
- :param max_form_memory_size: the maximum number of bytes to be accepted for
- in-memory stored form data. If the data
- exceeds the value specified an
- :exc:`~exceptions.RequestEntityTooLarge`
- exception is raised.
- :param max_content_length: If this is provided and the transmitted data
- is longer than this value an
- :exc:`~exceptions.RequestEntityTooLarge`
- exception is raised.
- :param cls: an optional dict class to use. If this is not specified
- or `None` the default :class:`MultiDict` is used.
- :param silent: If set to False parsing errors will not be caught.
- :return: A tuple in the form ``(stream, form, files)``.
- """
- return FormDataParser(
- stream_factory,
- charset,
- errors,
- max_form_memory_size,
- max_content_length,
- cls,
- silent,
- ).parse_from_environ(environ)
-
-
-def exhaust_stream(f):
- """Helper decorator for methods that exhausts the stream on return."""
-
- def wrapper(self, stream, *args, **kwargs):
- try:
- return f(self, stream, *args, **kwargs)
- finally:
- exhaust = getattr(stream, "exhaust", None)
- if exhaust is not None:
- exhaust()
- else:
- while 1:
- chunk = stream.read(1024 * 64)
- if not chunk:
- break
-
- return update_wrapper(wrapper, f)
-
-
-class FormDataParser(object):
- """This class implements parsing of form data for Werkzeug. By itself
- it can parse multipart and url encoded form data. It can be subclassed
- and extended but for most mimetypes it is a better idea to use the
- untouched stream and expose it as separate attributes on a request
- object.
-
- .. versionadded:: 0.8
-
- :param stream_factory: An optional callable that returns a new read and
- writeable file descriptor. This callable works
- the same as :meth:`~BaseResponse._get_file_stream`.
- :param charset: The character set for URL and url encoded form data.
- :param errors: The encoding error behavior.
- :param max_form_memory_size: the maximum number of bytes to be accepted for
- in-memory stored form data. If the data
- exceeds the value specified an
- :exc:`~exceptions.RequestEntityTooLarge`
- exception is raised.
- :param max_content_length: If this is provided and the transmitted data
- is longer than this value an
- :exc:`~exceptions.RequestEntityTooLarge`
- exception is raised.
- :param cls: an optional dict class to use. If this is not specified
- or `None` the default :class:`MultiDict` is used.
- :param silent: If set to False parsing errors will not be caught.
- """
-
- def __init__(
- self,
- stream_factory=None,
- charset="utf-8",
- errors="replace",
- max_form_memory_size=None,
- max_content_length=None,
- cls=None,
- silent=True,
- ):
- if stream_factory is None:
- stream_factory = default_stream_factory
- self.stream_factory = stream_factory
- self.charset = charset
- self.errors = errors
- self.max_form_memory_size = max_form_memory_size
- self.max_content_length = max_content_length
- if cls is None:
- cls = MultiDict
- self.cls = cls
- self.silent = silent
-
- def get_parse_func(self, mimetype, options):
- return self.parse_functions.get(mimetype)
-
- def parse_from_environ(self, environ):
- """Parses the information from the environment as form data.
-
- :param environ: the WSGI environment to be used for parsing.
- :return: A tuple in the form ``(stream, form, files)``.
- """
- content_type = environ.get("CONTENT_TYPE", "")
- content_length = get_content_length(environ)
- mimetype, options = parse_options_header(content_type)
- return self.parse(get_input_stream(environ), mimetype, content_length, options)
-
- def parse(self, stream, mimetype, content_length, options=None):
- """Parses the information from the given stream, mimetype,
- content length and mimetype parameters.
-
- :param stream: an input stream
- :param mimetype: the mimetype of the data
- :param content_length: the content length of the incoming data
- :param options: optional mimetype parameters (used for
- the multipart boundary for instance)
- :return: A tuple in the form ``(stream, form, files)``.
- """
- if (
- self.max_content_length is not None
- and content_length is not None
- and content_length > self.max_content_length
- ):
- raise exceptions.RequestEntityTooLarge()
- if options is None:
- options = {}
-
- parse_func = self.get_parse_func(mimetype, options)
- if parse_func is not None:
- try:
- return parse_func(self, stream, mimetype, content_length, options)
- except ValueError:
- if not self.silent:
- raise
-
- return stream, self.cls(), self.cls()
-
- @exhaust_stream
- def _parse_multipart(self, stream, mimetype, content_length, options):
- parser = MultiPartParser(
- self.stream_factory,
- self.charset,
- self.errors,
- max_form_memory_size=self.max_form_memory_size,
- cls=self.cls,
- )
- boundary = options.get("boundary")
- if boundary is None:
- raise ValueError("Missing boundary")
- if isinstance(boundary, text_type):
- boundary = boundary.encode("ascii")
- form, files = parser.parse(stream, boundary, content_length)
- return stream, form, files
-
- @exhaust_stream
- def _parse_urlencoded(self, stream, mimetype, content_length, options):
- if (
- self.max_form_memory_size is not None
- and content_length is not None
- and content_length > self.max_form_memory_size
- ):
- raise exceptions.RequestEntityTooLarge()
- form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
- return stream, form, self.cls()
-
- #: mapping of mimetypes to parsing functions
- parse_functions = {
- "multipart/form-data": _parse_multipart,
- "application/x-www-form-urlencoded": _parse_urlencoded,
- "application/x-url-encoded": _parse_urlencoded,
- }
-
-
-def is_valid_multipart_boundary(boundary):
- """Checks if the string given is a valid multipart boundary."""
- return _multipart_boundary_re.match(boundary) is not None
-
-
-def _line_parse(line):
- """Removes line ending characters and returns a tuple (`stripped_line`,
- `is_terminated`).
- """
- if line[-2:] in ["\r\n", b"\r\n"]:
- return line[:-2], True
- elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
- return line[:-1], True
- return line, False
-
-
-def parse_multipart_headers(iterable):
- """Parses multipart headers from an iterable that yields lines (including
- the trailing newline symbol). The iterable has to be newline terminated.
-
- The iterable will stop at the line where the headers ended so it can be
- further consumed.
-
- :param iterable: iterable of strings that are newline terminated
- """
- result = []
- for line in iterable:
- line = to_native(line)
- line, line_terminated = _line_parse(line)
- if not line_terminated:
- raise ValueError("unexpected end of line in multipart header")
- if not line:
- break
- elif line[0] in " \t" and result:
- key, value = result[-1]
- result[-1] = (key, value + "\n " + line[1:])
- else:
- parts = line.split(":", 1)
- if len(parts) == 2:
- result.append((parts[0].strip(), parts[1].strip()))
-
- # we link the list to the headers, no need to create a copy, the
- # list was not shared anyways.
- return Headers(result)
-
-
-_begin_form = "begin_form"
-_begin_file = "begin_file"
-_cont = "cont"
-_end = "end"
-
-
-class MultiPartParser(object):
- def __init__(
- self,
- stream_factory=None,
- charset="utf-8",
- errors="replace",
- max_form_memory_size=None,
- cls=None,
- buffer_size=64 * 1024,
- ):
- self.charset = charset
- self.errors = errors
- self.max_form_memory_size = max_form_memory_size
- self.stream_factory = (
- default_stream_factory if stream_factory is None else stream_factory
- )
- self.cls = MultiDict if cls is None else cls
-
- # make sure the buffer size is divisible by four so that we can base64
- # decode chunk by chunk
- assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
- # also the buffer size has to be at least 1024 bytes long or long headers
- # will freak out the system
- assert buffer_size >= 1024, "buffer size has to be at least 1KB"
-
- self.buffer_size = buffer_size
-
- def _fix_ie_filename(self, filename):
- """Internet Explorer 6 transmits the full file name if a file is
- uploaded. This function strips the full path if it thinks the
- filename is Windows-like absolute.
- """
- if filename[1:3] == ":\\" or filename[:2] == "\\\\":
- return filename.split("\\")[-1]
- return filename
-
- def _find_terminator(self, iterator):
- """The terminator might have some additional newlines before it.
- There is at least one application that sends additional newlines
- before headers (the python setuptools package).
- """
- for line in iterator:
- if not line:
- break
- line = line.strip()
- if line:
- return line
- return b""
-
- def fail(self, message):
- raise ValueError(message)
-
- def get_part_encoding(self, headers):
- transfer_encoding = headers.get("content-transfer-encoding")
- if (
- transfer_encoding is not None
- and transfer_encoding in _supported_multipart_encodings
- ):
- return transfer_encoding
-
- def get_part_charset(self, headers):
- # Figure out input charset for current part
- content_type = headers.get("content-type")
- if content_type:
- mimetype, ct_params = parse_options_header(content_type)
- return ct_params.get("charset", self.charset)
- return self.charset
-
- def start_file_streaming(self, filename, headers, total_content_length):
- if isinstance(filename, bytes):
- filename = filename.decode(self.charset, self.errors)
- filename = self._fix_ie_filename(filename)
- content_type = headers.get("content-type")
- try:
- content_length = int(headers["content-length"])
- except (KeyError, ValueError):
- content_length = 0
- container = self.stream_factory(
- total_content_length=total_content_length,
- filename=filename,
- content_type=content_type,
- content_length=content_length,
- )
- return filename, container
-
- def in_memory_threshold_reached(self, bytes):
- raise exceptions.RequestEntityTooLarge()
-
- def validate_boundary(self, boundary):
- if not boundary:
- self.fail("Missing boundary")
- if not is_valid_multipart_boundary(boundary):
- self.fail("Invalid boundary: %s" % boundary)
- if len(boundary) > self.buffer_size: # pragma: no cover
- # this should never happen because we check for a minimum size
- # of 1024 and boundaries may not be longer than 200. The only
- # situation when this happens is for non debug builds where
- # the assert is skipped.
- self.fail("Boundary longer than buffer size")
-
- def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
- """Generate parts of
- ``('begin_form', (headers, name))``
- ``('begin_file', (headers, name, filename))``
- ``('cont', bytestring)``
- ``('end', None)``
-
- Always obeys the grammar
- parts = ( begin_form cont* end |
- begin_file cont* end )*
- """
- next_part = b"--" + boundary
- last_part = next_part + b"--"
-
- iterator = chain(
- make_line_iter(
- file,
- limit=content_length,
- buffer_size=self.buffer_size,
- cap_at_buffer=cap_at_buffer,
- ),
- _empty_string_iter,
- )
-
- terminator = self._find_terminator(iterator)
-
- if terminator == last_part:
- return
- elif terminator != next_part:
- self.fail("Expected boundary at start of multipart data")
-
- while terminator != last_part:
- headers = parse_multipart_headers(iterator)
-
- disposition = headers.get("content-disposition")
- if disposition is None:
- self.fail("Missing Content-Disposition header")
- disposition, extra = parse_options_header(disposition)
- transfer_encoding = self.get_part_encoding(headers)
- name = extra.get("name")
- filename = extra.get("filename")
-
- # if no content type is given we stream into memory. A list is
- # used as a temporary container.
- if filename is None:
- yield _begin_form, (headers, name)
-
- # otherwise we parse the rest of the headers and ask the stream
- # factory for something we can write in.
- else:
- yield _begin_file, (headers, name, filename)
-
- buf = b""
- for line in iterator:
- if not line:
- self.fail("unexpected end of stream")
-
- if line[:2] == b"--":
- terminator = line.rstrip()
- if terminator in (next_part, last_part):
- break
-
- if transfer_encoding is not None:
- if transfer_encoding == "base64":
- transfer_encoding = "base64_codec"
- try:
- line = codecs.decode(line, transfer_encoding)
- except Exception:
- self.fail("could not decode transfer encoded chunk")
-
- # we have something in the buffer from the last iteration.
- # this is usually a newline delimiter.
- if buf:
- yield _cont, buf
- buf = b""
-
- # If the line ends with windows CRLF we write everything except
- # the last two bytes. In all other cases however we write
- # everything except the last byte. If it was a newline, that's
- # fine, otherwise it does not matter because we will write it
- # the next iteration. this ensures we do not write the
- # final newline into the stream. That way we do not have to
- # truncate the stream. However we do have to make sure that
- # if something else than a newline is in there we write it
- # out.
- if line[-2:] == b"\r\n":
- buf = b"\r\n"
- cutoff = -2
- else:
- buf = line[-1:]
- cutoff = -1
- yield _cont, line[:cutoff]
-
- else: # pragma: no cover
- raise ValueError("unexpected end of part")
-
- # if we have a leftover in the buffer that is not a newline
- # character we have to flush it, otherwise we will chop of
- # certain values.
- if buf not in (b"", b"\r", b"\n", b"\r\n"):
- yield _cont, buf
-
- yield _end, None
-
- def parse_parts(self, file, boundary, content_length):
- """Generate ``('file', (name, val))`` and
- ``('form', (name, val))`` parts.
- """
- in_memory = 0
-
- for ellt, ell in self.parse_lines(file, boundary, content_length):
- if ellt == _begin_file:
- headers, name, filename = ell
- is_file = True
- guard_memory = False
- filename, container = self.start_file_streaming(
- filename, headers, content_length
- )
- _write = container.write
-
- elif ellt == _begin_form:
- headers, name = ell
- is_file = False
- container = []
- _write = container.append
- guard_memory = self.max_form_memory_size is not None
-
- elif ellt == _cont:
- _write(ell)
- # if we write into memory and there is a memory size limit we
- # count the number of bytes in memory and raise an exception if
- # there is too much data in memory.
- if guard_memory:
- in_memory += len(ell)
- if in_memory > self.max_form_memory_size:
- self.in_memory_threshold_reached(in_memory)
-
- elif ellt == _end:
- if is_file:
- container.seek(0)
- yield (
- "file",
- (name, FileStorage(container, filename, name, headers=headers)),
- )
- else:
- part_charset = self.get_part_charset(headers)
- yield (
- "form",
- (name, b"".join(container).decode(part_charset, self.errors)),
- )
-
- def parse(self, file, boundary, content_length):
- formstream, filestream = tee(
- self.parse_parts(file, boundary, content_length), 2
- )
- form = (p[1] for p in formstream if p[0] == "form")
- files = (p[1] for p in filestream if p[0] == "file")
- return self.cls(form), self.cls(files)
-
-
-from . import exceptions
diff --git a/azure/functions/_thirdparty/werkzeug/http.py b/azure/functions/_thirdparty/werkzeug/http.py
deleted file mode 100644
index 3f40b308..00000000
--- a/azure/functions/_thirdparty/werkzeug/http.py
+++ /dev/null
@@ -1,1249 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- werkzeug.http
- ~~~~~~~~~~~~~
-
- Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
- HTTP data. Most of the classes and functions provided by this module are
- used by the wrappers, but they are useful on their own, too, especially if
- the response and request objects are not used.
-
- This covers some of the more HTTP centric features of WSGI, some other
- utilities such as cookie handling are documented in the `werkzeug.utils`
- module.
-
-
- :copyright: 2007 Pallets
- :license: BSD-3-Clause
-"""
-import base64
-import re
-import warnings
-from datetime import datetime
-from datetime import timedelta
-from hashlib import md5
-from time import gmtime
-from time import time
-
-from ._compat import integer_types
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import to_bytes
-from ._compat import to_unicode
-from ._compat import try_coerce_native
-from ._internal import _cookie_parse_impl
-from ._internal import _cookie_quote
-from ._internal import _make_cookie_domain
-
-try:
- from email.utils import parsedate_tz
-except ImportError:
- from email.Utils import parsedate_tz
-
-try:
- from urllib.request import parse_http_list as _parse_list_header
- from urllib.parse import unquote_to_bytes as _unquote
-except ImportError:
- from urllib2 import parse_http_list as _parse_list_header
- from urllib2 import unquote as _unquote
-
-_cookie_charset = "latin1"
-_basic_auth_charset = "utf-8"
-# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
-_accept_re = re.compile(
- r"""
- ( # media-range capturing-parenthesis
- [^\s;,]+ # type/subtype
- (?:[ \t]*;[ \t]* # ";"
- (?: # parameter non-capturing-parenthesis
- [^\s;,q][^\s;,]* # token that doesn't start with "q"
- | # or
- q[^\s;,=][^\s;,]* # token that is more than just "q"
- )
- )* # zero or more parameters
- ) # end of media-range
- (?:[ \t]*;[ \t]*q= # weight is a "q" parameter
- (\d*(?:\.\d+)?) # qvalue capturing-parentheses
- [^,]* # "extension" accept params: who cares?
- )? # accept params are optional
- """,
- re.VERBOSE,
-)
-_token_chars = frozenset(
- "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
-)
-_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
-_unsafe_header_chars = set('()<>@,;:"/[]?={} \t')
-_option_header_piece_re = re.compile(
- r"""
- ;\s*,?\s* # newlines were replaced with commas
- (?P<foo>
' - """ - - _entity_re = re.compile(r"&([^;]+);") - _entities = name2codepoint.copy() - _entities["apos"] = 39 - _empty_elements = { - "area", - "base", - "basefont", - "br", - "col", - "command", - "embed", - "frame", - "hr", - "img", - "input", - "keygen", - "isindex", - "link", - "meta", - "param", - "source", - "wbr", - } - _boolean_attributes = { - "selected", - "checked", - "compact", - "declare", - "defer", - "disabled", - "ismap", - "multiple", - "nohref", - "noresize", - "noshade", - "nowrap", - } - _plaintext_elements = {"textarea"} - _c_like_cdata = {"script", "style"} - - def __init__(self, dialect): - self._dialect = dialect - - def __call__(self, s): - return escape(s) - - def __getattr__(self, tag): - if tag[:2] == "__": - raise AttributeError(tag) - - def proxy(*children, **arguments): - buffer = "<" + tag - for key, value in iteritems(arguments): - if value is None: - continue - if key[-1] == "_": - key = key[:-1] - if key in self._boolean_attributes: - if not value: - continue - if self._dialect == "xhtml": - value = '="' + key + '"' - else: - value = "" - else: - value = '="' + escape(value) + '"' - buffer += " " + key + value - if not children and tag in self._empty_elements: - if self._dialect == "xhtml": - buffer += " />" - else: - buffer += ">" - return buffer - buffer += ">" - - children_as_string = "".join( - [text_type(x) for x in children if x is not None] - ) - - if children_as_string: - if tag in self._plaintext_elements: - children_as_string = escape(children_as_string) - elif tag in self._c_like_cdata and self._dialect == "xhtml": - children_as_string = ( - "/**/" - ) - buffer += children_as_string + "" + tag + ">" - return buffer - - return proxy - - def __repr__(self): - return "<%s for %r>" % (self.__class__.__name__, self._dialect) - - -html = HTMLBuilder("html") -xhtml = HTMLBuilder("xhtml") - -# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in -# https://www.iana.org/assignments/media-types/media-types.xhtml -# Types listed in the XDG mime info that have a charset in the IANA registration. -_charset_mimetypes = { - "application/ecmascript", - "application/javascript", - "application/sql", - "application/xml", - "application/xml-dtd", - "application/xml-external-parsed-entity", -} - - -def get_content_type(mimetype, charset): - """Returns the full content type string with charset for a mimetype. - - If the mimetype represents text, the charset parameter will be - appended, otherwise the mimetype is returned unchanged. - - :param mimetype: The mimetype to be used as content type. - :param charset: The charset to be appended for text mimetypes. - :return: The content type. - - .. verionchanged:: 0.15 - Any type that ends with ``+xml`` gets a charset, not just those - that start with ``application/``. Known text types such as - ``application/javascript`` are also given charsets. - """ - if ( - mimetype.startswith("text/") - or mimetype in _charset_mimetypes - or mimetype.endswith("+xml") - ): - mimetype += "; charset=" + charset - - return mimetype - - -def detect_utf_encoding(data): - """Detect which UTF encoding was used to encode the given bytes. - - The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is - accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big - or little endian. Some editors or libraries may prepend a BOM. - - :internal: - - :param data: Bytes in unknown UTF encoding. - :return: UTF encoding name - - .. versionadded:: 0.15 - """ - head = data[:4] - - if head[:3] == codecs.BOM_UTF8: - return "utf-8-sig" - - if b"\x00" not in head: - return "utf-8" - - if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): - return "utf-32" - - if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): - return "utf-16" - - if len(head) == 4: - if head[:3] == b"\x00\x00\x00": - return "utf-32-be" - - if head[::2] == b"\x00\x00": - return "utf-16-be" - - if head[1:] == b"\x00\x00\x00": - return "utf-32-le" - - if head[1::2] == b"\x00\x00": - return "utf-16-le" - - if len(head) == 2: - return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le" - - return "utf-8" - - -def format_string(string, context): - """String-template format a string: - - >>> format_string('$foo and ${foo}s', dict(foo=42)) - '42 and 42s' - - This does not do any attribute lookup etc. For more advanced string - formattings have a look at the `werkzeug.template` module. - - :param string: the format string. - :param context: a dict with the variables to insert. - """ - - def lookup_arg(match): - x = context[match.group(1) or match.group(2)] - if not isinstance(x, string_types): - x = type(string)(x) - return x - - return _format_re.sub(lookup_arg, string) - - -def secure_filename(filename): - r"""Pass it a filename and it will return a secure version of it. This - filename can then safely be stored on a regular file system and passed - to :func:`os.path.join`. The filename returned is an ASCII only string - for maximum portability. - - On windows systems the function also makes sure that the file is not - named after one of the special device files. - - >>> secure_filename("My cool movie.mov") - 'My_cool_movie.mov' - >>> secure_filename("../../../etc/passwd") - 'etc_passwd' - >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') - 'i_contain_cool_umlauts.txt' - - The function might return an empty filename. It's your responsibility - to ensure that the filename is unique and that you abort or - generate a random filename if the function returned an empty one. - - .. versionadded:: 0.5 - - :param filename: the filename to secure - """ - if isinstance(filename, text_type): - from unicodedata import normalize - - filename = normalize("NFKD", filename).encode("ascii", "ignore") - if not PY2: - filename = filename.decode("ascii") - for sep in os.path.sep, os.path.altsep: - if sep: - filename = filename.replace(sep, " ") - filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip( - "._" - ) - - # on nt a couple of special files are present in each folder. We - # have to ensure that the target file is not such a filename. In - # this case we prepend an underline - if ( - os.name == "nt" - and filename - and filename.split(".")[0].upper() in _windows_device_files - ): - filename = "_" + filename - - return filename - - -def escape(s): - """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. - - There is a special handling for `None` which escapes to an empty string. - - .. versionchanged:: 0.9 - `quote` is now implicitly on. - - :param s: the string to escape. - :param quote: ignored. - """ - if s is None: - return "" - elif hasattr(s, "__html__"): - return text_type(s.__html__()) - - if not isinstance(s, string_types): - s = text_type(s) - - return ( - s.replace("&", "&") - .replace("<", "<") - .replace(">", ">") - .replace('"', """) - ) - - -def unescape(s): - """The reverse function of `escape`. This unescapes all the HTML - entities, not only the XML entities inserted by `escape`. - - :param s: the string to unescape. - """ - - def handle_match(m): - name = m.group(1) - if name in HTMLBuilder._entities: - return unichr(HTMLBuilder._entities[name]) - try: - if name[:2] in ("#x", "#X"): - return unichr(int(name[2:], 16)) - elif name.startswith("#"): - return unichr(int(name[1:])) - except ValueError: - pass - return u"" - - return _entity_re.sub(handle_match, s) - - -def redirect(location, code=302, Response=None): - """Returns a response object (a WSGI application) that, if called, - redirects the client to the target location. Supported codes are - 301, 302, 303, 305, 307, and 308. 300 is not supported because - it's not a real redirect and 304 because it's the answer for a - request with a request with defined If-Modified-Since headers. - - .. versionadded:: 0.6 - The location can now be a unicode string that is encoded using - the :func:`iri_to_uri` function. - - .. versionadded:: 0.10 - The class used for the Response object can now be passed in. - - :param location: the location the response should redirect to. - :param code: the redirect status code. defaults to 302. - :param class Response: a Response class to use when instantiating a - response. The default is :class:`werkzeug.wrappers.Response` if - unspecified. - """ - if Response is None: - from .wrappers import Response - - display_location = escape(location) - if isinstance(location, text_type): - # Safe conversion is necessary here as we might redirect - # to a broken URI scheme (for instance itms-services). - from .urls import iri_to_uri - - location = iri_to_uri(location, safe_conversion=True) - response = Response( - '\n' - "You should be redirected automatically to target URL: " - '%s. If not click the link.' - % (escape(location), display_location), - code, - mimetype="text/html", - ) - response.headers["Location"] = location - return response - - -def append_slash_redirect(environ, code=301): - """Redirects to the same URL but with a slash appended. The behavior - of this function is undefined if the path ends with a slash already. - - :param environ: the WSGI environment for the request that triggers - the redirect. - :param code: the status code for the redirect. - """ - new_path = environ["PATH_INFO"].strip("/") + "/" - query_string = environ.get("QUERY_STRING") - if query_string: - new_path += "?" + query_string - return redirect(new_path, code) - - -def import_string(import_name, silent=False): - """Imports an object based on a string. This is useful if you want to - use import paths as endpoints or something similar. An import path can - be specified either in dotted notation (``xml.sax.saxutils.escape``) - or with a colon as object delimiter (``xml.sax.saxutils:escape``). - - If `silent` is True the return value will be `None` if the import fails. - - :param import_name: the dotted name for the object to import. - :param silent: if set to `True` import errors are ignored and - `None` is returned instead. - :return: imported object - """ - # force the import name to automatically convert to strings - # __import__ is not able to handle unicode strings in the fromlist - # if the module is a package - import_name = str(import_name).replace(":", ".") - try: - try: - __import__(import_name) - except ImportError: - if "." not in import_name: - raise - else: - return sys.modules[import_name] - - module_name, obj_name = import_name.rsplit(".", 1) - module = __import__(module_name, globals(), locals(), [obj_name]) - try: - return getattr(module, obj_name) - except AttributeError as e: - raise ImportError(e) - - except ImportError as e: - if not silent: - reraise( - ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2] - ) - - -def find_modules(import_path, include_packages=False, recursive=False): - """Finds all the modules below a package. This can be useful to - automatically import all views / controllers so that their metaclasses / - function decorators have a chance to register themselves on the - application. - - Packages are not returned unless `include_packages` is `True`. This can - also recursively list modules but in that case it will import all the - packages to get the correct load path of that module. - - :param import_path: the dotted name for the package to find child modules. - :param include_packages: set to `True` if packages should be returned, too. - :param recursive: set to `True` if recursion should happen. - :return: generator - """ - module = import_string(import_path) - path = getattr(module, "__path__", None) - if path is None: - raise ValueError("%r is not a package" % import_path) - basename = module.__name__ + "." - for _importer, modname, ispkg in pkgutil.iter_modules(path): - modname = basename + modname - if ispkg: - if include_packages: - yield modname - if recursive: - for item in find_modules(modname, include_packages, True): - yield item - else: - yield modname - - -def validate_arguments(func, args, kwargs, drop_extra=True): - """Checks if the function accepts the arguments and keyword arguments. - Returns a new ``(args, kwargs)`` tuple that can safely be passed to - the function without causing a `TypeError` because the function signature - is incompatible. If `drop_extra` is set to `True` (which is the default) - any extra positional or keyword arguments are dropped automatically. - - The exception raised provides three attributes: - - `missing` - A set of argument names that the function expected but where - missing. - - `extra` - A dict of keyword arguments that the function can not handle but - where provided. - - `extra_positional` - A list of values that where given by positional argument but the - function cannot accept. - - This can be useful for decorators that forward user submitted data to - a view function:: - - from werkzeug.utils import ArgumentValidationError, validate_arguments - - def sanitize(f): - def proxy(request): - data = request.values.to_dict() - try: - args, kwargs = validate_arguments(f, (request,), data) - except ArgumentValidationError: - raise BadRequest('The browser failed to transmit all ' - 'the data expected.') - return f(*args, **kwargs) - return proxy - - :param func: the function the validation is performed against. - :param args: a tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :param drop_extra: set to `False` if you don't want extra arguments - to be silently dropped. - :return: tuple in the form ``(args, kwargs)``. - """ - parser = _parse_signature(func) - args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] - if missing: - raise ArgumentValidationError(tuple(missing)) - elif (extra or extra_positional) and not drop_extra: - raise ArgumentValidationError(None, extra, extra_positional) - return tuple(args), kwargs - - -def bind_arguments(func, args, kwargs): - """Bind the arguments provided into a dict. When passed a function, - a tuple of arguments and a dict of keyword arguments `bind_arguments` - returns a dict of names as the function would see it. This can be useful - to implement a cache decorator that uses the function arguments to build - the cache key based on the values of the arguments. - - :param func: the function the arguments should be bound for. - :param args: tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :return: a :class:`dict` of bound keyword arguments. - """ - ( - args, - kwargs, - missing, - extra, - extra_positional, - arg_spec, - vararg_var, - kwarg_var, - ) = _parse_signature(func)(args, kwargs) - values = {} - for (name, _has_default, _default), value in zip(arg_spec, args): - values[name] = value - if vararg_var is not None: - values[vararg_var] = tuple(extra_positional) - elif extra_positional: - raise TypeError("too many positional arguments") - if kwarg_var is not None: - multikw = set(extra) & set([x[0] for x in arg_spec]) - if multikw: - raise TypeError( - "got multiple values for keyword argument " + repr(next(iter(multikw))) - ) - values[kwarg_var] = extra - elif extra: - raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) - return values - - -class ArgumentValidationError(ValueError): - - """Raised if :func:`validate_arguments` fails to validate""" - - def __init__(self, missing=None, extra=None, extra_positional=None): - self.missing = set(missing or ()) - self.extra = extra or {} - self.extra_positional = extra_positional or [] - ValueError.__init__( - self, - "function arguments invalid. (%d missing, %d additional)" - % (len(self.missing), len(self.extra) + len(self.extra_positional)), - ) - - -class ImportStringError(ImportError): - """Provides information about a failed :func:`import_string` attempt.""" - - #: String in dotted notation that failed to be imported. - import_name = None - #: Wrapped exception. - exception = None - - def __init__(self, import_name, exception): - self.import_name = import_name - self.exception = exception - - msg = ( - "import_string() failed for %r. Possible reasons are:\n\n" - "- missing __init__.py in a package;\n" - "- package or module path not included in sys.path;\n" - "- duplicated package or module name taking precedence in " - "sys.path;\n" - "- missing module, class, function or variable;\n\n" - "Debugged import:\n\n%s\n\n" - "Original exception:\n\n%s: %s" - ) - - name = "" - tracked = [] - for part in import_name.replace(":", ".").split("."): - name += (name and ".") + part - imported = import_string(name, silent=True) - if imported: - tracked.append((name, getattr(imported, "__file__", None))) - else: - track = ["- %r found in %r." % (n, i) for n, i in tracked] - track.append("- %r not found." % name) - msg = msg % ( - import_name, - "\n".join(track), - exception.__class__.__name__, - str(exception), - ) - break - - ImportError.__init__(self, msg) - - def __repr__(self): - return "<%s(%r, %r)>" % ( - self.__class__.__name__, - self.import_name, - self.exception, - ) diff --git a/azure/functions/_thirdparty/werkzeug/wsgi.py b/azure/functions/_thirdparty/werkzeug/wsgi.py deleted file mode 100644 index 807b462a..00000000 --- a/azure/functions/_thirdparty/werkzeug/wsgi.py +++ /dev/null @@ -1,1000 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.wsgi - ~~~~~~~~~~~~~ - - This module implements WSGI related helpers. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import io -import re -from functools import partial -from functools import update_wrapper -from itertools import chain - -from ._compat import BytesIO -from ._compat import implements_iterator -from ._compat import make_literal_wrapper -from ._compat import string_types -from ._compat import text_type -from ._compat import to_bytes -from ._compat import to_unicode -from ._compat import try_coerce_native -from ._compat import wsgi_get_bytes -from ._internal import _encode_idna -from .urls import uri_to_iri -from .urls import url_join -from .urls import url_parse -from .urls import url_quote - - -def responder(f): - """Marks a function as responder. Decorate a function with it and it - will automatically call the return value as WSGI application. - - Example:: - - @responder - def application(environ, start_response): - return Response('Hello World!') - """ - return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) - - -def get_current_url( - environ, - root_only=False, - strip_querystring=False, - host_only=False, - trusted_hosts=None, -): - """A handy helper function that recreates the full URL as IRI for the - current request or parts of it. Here's an example: - - >>> from werkzeug.test import create_environ - >>> env = create_environ("/?param=foo", "http://localhost/script") - >>> get_current_url(env) - 'http://localhost/script/?param=foo' - >>> get_current_url(env, root_only=True) - 'http://localhost/script/' - >>> get_current_url(env, host_only=True) - 'http://localhost/' - >>> get_current_url(env, strip_querystring=True) - 'http://localhost/script/' - - This optionally it verifies that the host is in a list of trusted hosts. - If the host is not in there it will raise a - :exc:`~werkzeug.exceptions.SecurityError`. - - Note that the string returned might contain unicode characters as the - representation is an IRI not an URI. If you need an ASCII only - representation you can use the :func:`~werkzeug.urls.iri_to_uri` - function: - - >>> from werkzeug.urls import iri_to_uri - >>> iri_to_uri(get_current_url(env)) - 'http://localhost/script/?param=foo' - - :param environ: the WSGI environment to get the current URL from. - :param root_only: set `True` if you only want the root URL. - :param strip_querystring: set to `True` if you don't want the querystring. - :param host_only: set to `True` if the host URL should be returned. - :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` - for more information. - """ - tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)] - cat = tmp.append - if host_only: - return uri_to_iri("".join(tmp) + "/") - cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/")) - cat("/") - if not root_only: - cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/"))) - if not strip_querystring: - qs = get_query_string(environ) - if qs: - cat("?" + qs) - return uri_to_iri("".join(tmp)) - - -def host_is_trusted(hostname, trusted_list): - """Checks if a host is trusted against a list. This also takes care - of port normalization. - - .. versionadded:: 0.9 - - :param hostname: the hostname to check - :param trusted_list: a list of hostnames to check against. If a - hostname starts with a dot it will match against - all subdomains as well. - """ - if not hostname: - return False - - if isinstance(trusted_list, string_types): - trusted_list = [trusted_list] - - def _normalize(hostname): - if ":" in hostname: - hostname = hostname.rsplit(":", 1)[0] - return _encode_idna(hostname) - - try: - hostname = _normalize(hostname) - except UnicodeError: - return False - for ref in trusted_list: - if ref.startswith("."): - ref = ref[1:] - suffix_match = True - else: - suffix_match = False - try: - ref = _normalize(ref) - except UnicodeError: - return False - if ref == hostname: - return True - if suffix_match and hostname.endswith(b"." + ref): - return True - return False - - -def get_host(environ, trusted_hosts=None): - """Return the host for the given WSGI environment. This first checks - the ``Host`` header. If it's not present, then ``SERVER_NAME`` and - ``SERVER_PORT`` are used. The host will only contain the port if it - is different than the standard port for the protocol. - - Optionally, verify that the host is trusted using - :func:`host_is_trusted` and raise a - :exc:`~werkzeug.exceptions.SecurityError` if it is not. - - :param environ: The WSGI environment to get the host from. - :param trusted_hosts: A list of trusted hosts. - :return: Host, with port if necessary. - :raise ~werkzeug.exceptions.SecurityError: If the host is not - trusted. - """ - if "HTTP_HOST" in environ: - rv = environ["HTTP_HOST"] - if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"): - rv = rv[:-3] - elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"): - rv = rv[:-4] - else: - rv = environ["SERVER_NAME"] - if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in ( - ("https", "443"), - ("http", "80"), - ): - rv += ":" + environ["SERVER_PORT"] - if trusted_hosts is not None: - if not host_is_trusted(rv, trusted_hosts): - from .exceptions import SecurityError - - raise SecurityError('Host "%s" is not trusted' % rv) - return rv - - -def get_content_length(environ): - """Returns the content length from the WSGI environment as - integer. If it's not available or chunked transfer encoding is used, - ``None`` is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the content length from. - """ - if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": - return None - - content_length = environ.get("CONTENT_LENGTH") - if content_length is not None: - try: - return max(0, int(content_length)) - except (ValueError, TypeError): - pass - - -def get_input_stream(environ, safe_fallback=True): - """Returns the input stream from the WSGI environment and wraps it - in the most sensible way possible. The stream returned is not the - raw WSGI stream in most cases but one that is safe to read from - without taking into account the content length. - - If content length is not set, the stream will be empty for safety reasons. - If the WSGI server supports chunked or infinite streams, it should set - the ``wsgi.input_terminated`` value in the WSGI environ to indicate that. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the stream from. - :param safe_fallback: use an empty stream as a safe fallback when the - content length is not set. Disabling this allows infinite streams, - which can be a denial-of-service risk. - """ - stream = environ["wsgi.input"] - content_length = get_content_length(environ) - - # A wsgi extension that tells us if the input is terminated. In - # that case we return the stream unchanged as we know we can safely - # read it until the end. - if environ.get("wsgi.input_terminated"): - return stream - - # If the request doesn't specify a content length, returning the stream is - # potentially dangerous because it could be infinite, malicious or not. If - # safe_fallback is true, return an empty stream instead for safety. - if content_length is None: - return BytesIO() if safe_fallback else stream - - # Otherwise limit the stream to the content length - return LimitedStream(stream, content_length) - - -def get_query_string(environ): - """Returns the `QUERY_STRING` from the WSGI environment. This also takes - care about the WSGI decoding dance on Python 3 environments as a - native string. The string returned will be restricted to ASCII - characters. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the query string from. - """ - qs = wsgi_get_bytes(environ.get("QUERY_STRING", "")) - # QUERY_STRING really should be ascii safe but some browsers - # will send us some unicode stuff (I am looking at you IE). - # In that case we want to urllib quote it badly. - return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),")) - - -def get_path_info(environ, charset="utf-8", errors="replace"): - """Returns the `PATH_INFO` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path info, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get("PATH_INFO", "")) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def get_script_name(environ, charset="utf-8", errors="replace"): - """Returns the `SCRIPT_NAME` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get("SCRIPT_NAME", "")) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def pop_path_info(environ, charset="utf-8", errors="replace"): - """Removes and returns the next segment of `PATH_INFO`, pushing it onto - `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. - - If the `charset` is set to `None` a bytestring is returned. - - If there are empty segments (``'/foo//bar``) these are ignored but - properly pushed to the `SCRIPT_NAME`: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> pop_path_info(env) - 'a' - >>> env['SCRIPT_NAME'] - '/foo/a' - >>> pop_path_info(env) - 'b' - >>> env['SCRIPT_NAME'] - '/foo/a/b' - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is modified. - """ - path = environ.get("PATH_INFO") - if not path: - return None - - script_name = environ.get("SCRIPT_NAME", "") - - # shift multiple leading slashes over - old_path = path - path = path.lstrip("/") - if path != old_path: - script_name += "/" * (len(old_path) - len(path)) - - if "/" not in path: - environ["PATH_INFO"] = "" - environ["SCRIPT_NAME"] = script_name + path - rv = wsgi_get_bytes(path) - else: - segment, path = path.split("/", 1) - environ["PATH_INFO"] = "/" + path - environ["SCRIPT_NAME"] = script_name + segment - rv = wsgi_get_bytes(segment) - - return to_unicode(rv, charset, errors, allow_none_charset=True) - - -def peek_path_info(environ, charset="utf-8", errors="replace"): - """Returns the next segment on the `PATH_INFO` or `None` if there - is none. Works like :func:`pop_path_info` without modifying the - environment: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> peek_path_info(env) - 'a' - >>> peek_path_info(env) - 'a' - - If the `charset` is set to `None` a bytestring is returned. - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is checked. - """ - segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1) - if segments: - return to_unicode( - wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True - ) - - -def extract_path_info( - environ_or_baseurl, - path_or_url, - charset="utf-8", - errors="werkzeug.url_quote", - collapse_http_schemes=True, -): - """Extracts the path info from the given URL (or WSGI environment) and - path. The path info returned is a unicode string, not a bytestring - suitable for a WSGI environment. The URLs might also be IRIs. - - If the path info could not be determined, `None` is returned. - - Some examples: - - >>> extract_path_info('http://example.com/app', '/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello', - ... collapse_http_schemes=False) is None - True - - Instead of providing a base URL you can also pass a WSGI environment. - - :param environ_or_baseurl: a WSGI environment dict, a base URL or - base IRI. This is the root of the - application. - :param path_or_url: an absolute path from the server root, a - relative path (in which case it's the path info) - or a full URL. Also accepts IRIs and unicode - parameters. - :param charset: the charset for byte data in URLs - :param errors: the error handling on decode - :param collapse_http_schemes: if set to `False` the algorithm does - not assume that http and https on the - same server point to the same - resource. - - .. versionchanged:: 0.15 - The ``errors`` parameter defaults to leaving invalid bytes - quoted instead of replacing them. - - .. versionadded:: 0.6 - """ - - def _normalize_netloc(scheme, netloc): - parts = netloc.split(u"@", 1)[-1].split(u":", 1) - if len(parts) == 2: - netloc, port = parts - if (scheme == u"http" and port == u"80") or ( - scheme == u"https" and port == u"443" - ): - port = None - else: - netloc = parts[0] - port = None - if port is not None: - netloc += u":" + port - return netloc - - # make sure whatever we are working on is a IRI and parse it - path = uri_to_iri(path_or_url, charset, errors) - if isinstance(environ_or_baseurl, dict): - environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) - base_iri = uri_to_iri(environ_or_baseurl, charset, errors) - base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] - cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3] - - # normalize the network location - base_netloc = _normalize_netloc(base_scheme, base_netloc) - cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) - - # is that IRI even on a known HTTP scheme? - if collapse_http_schemes: - for scheme in base_scheme, cur_scheme: - if scheme not in (u"http", u"https"): - return None - else: - if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme): - return None - - # are the netlocs compatible? - if base_netloc != cur_netloc: - return None - - # are we below the application path? - base_path = base_path.rstrip(u"/") - if not cur_path.startswith(base_path): - return None - - return u"/" + cur_path[len(base_path) :].lstrip(u"/") - - -@implements_iterator -class ClosingIterator(object): - """The WSGI specification requires that all middlewares and gateways - respect the `close` callback of the iterable returned by the application. - Because it is useful to add another close action to a returned iterable - and adding a custom iterable is a boring task this class can be used for - that:: - - return ClosingIterator(app(environ, start_response), [cleanup_session, - cleanup_locals]) - - If there is just one close function it can be passed instead of the list. - - A closing iterator is not needed if the application uses response objects - and finishes the processing if the response is started:: - - try: - return response(environ, start_response) - finally: - cleanup_session() - cleanup_locals() - """ - - def __init__(self, iterable, callbacks=None): - iterator = iter(iterable) - self._next = partial(next, iterator) - if callbacks is None: - callbacks = [] - elif callable(callbacks): - callbacks = [callbacks] - else: - callbacks = list(callbacks) - iterable_close = getattr(iterable, "close", None) - if iterable_close: - callbacks.insert(0, iterable_close) - self._callbacks = callbacks - - def __iter__(self): - return self - - def __next__(self): - return self._next() - - def close(self): - for callback in self._callbacks: - callback() - - -def wrap_file(environ, file, buffer_size=8192): - """Wraps a file. This uses the WSGI server's file wrapper if available - or otherwise the generic :class:`FileWrapper`. - - .. versionadded:: 0.5 - - If the file wrapper from the WSGI server is used it's important to not - iterate over it from inside the application but to pass it through - unchanged. If you want to pass out a file wrapper inside a response - object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. - - More information about file wrappers are available in :pep:`333`. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size) - - -@implements_iterator -class FileWrapper(object): - """This class can be used to convert a :class:`file`-like object into - an iterable. It yields `buffer_size` blocks until the file is fully - read. - - You should not use this class directly but rather use the - :func:`wrap_file` function that uses the WSGI server's file wrapper - support if it's available. - - .. versionadded:: 0.5 - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - - def __init__(self, file, buffer_size=8192): - self.file = file - self.buffer_size = buffer_size - - def close(self): - if hasattr(self.file, "close"): - self.file.close() - - def seekable(self): - if hasattr(self.file, "seekable"): - return self.file.seekable() - if hasattr(self.file, "seek"): - return True - return False - - def seek(self, *args): - if hasattr(self.file, "seek"): - self.file.seek(*args) - - def tell(self): - if hasattr(self.file, "tell"): - return self.file.tell() - return None - - def __iter__(self): - return self - - def __next__(self): - data = self.file.read(self.buffer_size) - if data: - return data - raise StopIteration() - - -@implements_iterator -class _RangeWrapper(object): - # private for now, but should we make it public in the future ? - - """This class can be used to convert an iterable object into - an iterable that will only yield a piece of the underlying content. - It yields blocks until the underlying stream range is fully read. - The yielded blocks will have a size that can't exceed the original - iterator defined block size, but that can be smaller. - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param iterable: an iterable object with a :meth:`__next__` method. - :param start_byte: byte from which read will start. - :param byte_range: how many bytes to read. - """ - - def __init__(self, iterable, start_byte=0, byte_range=None): - self.iterable = iter(iterable) - self.byte_range = byte_range - self.start_byte = start_byte - self.end_byte = None - if byte_range is not None: - self.end_byte = self.start_byte + self.byte_range - self.read_length = 0 - self.seekable = hasattr(iterable, "seekable") and iterable.seekable() - self.end_reached = False - - def __iter__(self): - return self - - def _next_chunk(self): - try: - chunk = next(self.iterable) - self.read_length += len(chunk) - return chunk - except StopIteration: - self.end_reached = True - raise - - def _first_iteration(self): - chunk = None - if self.seekable: - self.iterable.seek(self.start_byte) - self.read_length = self.iterable.tell() - contextual_read_length = self.read_length - else: - while self.read_length <= self.start_byte: - chunk = self._next_chunk() - if chunk is not None: - chunk = chunk[self.start_byte - self.read_length :] - contextual_read_length = self.start_byte - return chunk, contextual_read_length - - def _next(self): - if self.end_reached: - raise StopIteration() - chunk = None - contextual_read_length = self.read_length - if self.read_length == 0: - chunk, contextual_read_length = self._first_iteration() - if chunk is None: - chunk = self._next_chunk() - if self.end_byte is not None and self.read_length >= self.end_byte: - self.end_reached = True - return chunk[: self.end_byte - contextual_read_length] - return chunk - - def __next__(self): - chunk = self._next() - if chunk: - return chunk - self.end_reached = True - raise StopIteration() - - def close(self): - if hasattr(self.iterable, "close"): - self.iterable.close() - - -def _make_chunk_iter(stream, limit, buffer_size): - """Helper for the line and chunk iter functions.""" - if isinstance(stream, (bytes, bytearray, text_type)): - raise TypeError( - "Passed a string or byte object instead of true iterator or stream." - ) - if not hasattr(stream, "read"): - for item in stream: - if item: - yield item - return - if not isinstance(stream, LimitedStream) and limit is not None: - stream = LimitedStream(stream, limit) - _read = stream.read - while 1: - item = _read(buffer_size) - if not item: - break - yield item - - -def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): - """Safely iterates line-based over an input stream. If the input stream - is not a :class:`LimitedStream` the `limit` parameter is mandatory. - - This uses the stream's :meth:`~file.read` method internally as opposite - to the :meth:`~file.readline` method that is unsafe and can only be used - in violation of the WSGI specification. The same problem applies to the - `__iter__` function of the input stream which calls :meth:`~file.readline` - without arguments. - - If you need line-by-line processing it's strongly recommended to iterate - over the input stream using this helper function. - - .. versionchanged:: 0.8 - This function now ensures that the limit was reached. - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is a :class:`LimitedStream`. - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, "") - if not first_item: - return - - s = make_literal_wrapper(first_item) - empty = s("") - cr = s("\r") - lf = s("\n") - crlf = s("\r\n") - - _iter = chain((first_item,), _iter) - - def _iter_basic_lines(): - _join = empty.join - buffer = [] - while 1: - new_data = next(_iter, "") - if not new_data: - break - new_buf = [] - buf_size = 0 - for item in chain(buffer, new_data.splitlines(True)): - new_buf.append(item) - buf_size += len(item) - if item and item[-1:] in crlf: - yield _join(new_buf) - new_buf = [] - elif cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buffer = new_buf - if buffer: - yield _join(buffer) - - # This hackery is necessary to merge 'foo\r' and '\n' into one item - # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. - previous = empty - for item in _iter_basic_lines(): - if item == lf and previous[-1:] == cr: - previous += item - item = empty - if previous: - yield previous - previous = item - if previous: - yield previous - - -def make_chunk_iter( - stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False -): - """Works like :func:`make_line_iter` but accepts a separator - which divides chunks. If you want newline based processing - you should use :func:`make_line_iter` instead as it - supports arbitrary newline markers. - - .. versionadded:: 0.8 - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param separator: the separator that divides chunks. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is otherwise already limited). - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, "") - if not first_item: - return - - _iter = chain((first_item,), _iter) - if isinstance(first_item, text_type): - separator = to_unicode(separator) - _split = re.compile(r"(%s)" % re.escape(separator)).split - _join = u"".join - else: - separator = to_bytes(separator) - _split = re.compile(b"(" + re.escape(separator) + b")").split - _join = b"".join - - buffer = [] - while 1: - new_data = next(_iter, "") - if not new_data: - break - chunks = _split(new_data) - new_buf = [] - buf_size = 0 - for item in chain(buffer, chunks): - if item == separator: - yield _join(new_buf) - new_buf = [] - buf_size = 0 - else: - buf_size += len(item) - new_buf.append(item) - - if cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buf_size = len(rv) - - buffer = new_buf - if buffer: - yield _join(buffer) - - -@implements_iterator -class LimitedStream(io.IOBase): - """Wraps a stream so that it doesn't read more than n bytes. If the - stream is exhausted and the caller tries to get more bytes from it - :func:`on_exhausted` is called which by default returns an empty - string. The return value of that function is forwarded - to the reader function. So if it returns an empty string - :meth:`read` will return an empty string as well. - - The limit however must never be higher than what the stream can - output. Otherwise :meth:`readlines` will try to read past the - limit. - - .. admonition:: Note on WSGI compliance - - calls to :meth:`readline` and :meth:`readlines` are not - WSGI compliant because it passes a size argument to the - readline methods. Unfortunately the WSGI PEP is not safely - implementable without a size argument to :meth:`readline` - because there is no EOF marker in the stream. As a result - of that the use of :meth:`readline` is discouraged. - - For the same reason iterating over the :class:`LimitedStream` - is not portable. It internally calls :meth:`readline`. - - We strongly suggest using :meth:`read` only or using the - :func:`make_line_iter` which safely iterates line-based - over a WSGI input stream. - - :param stream: the stream to wrap. - :param limit: the limit for the stream, must not be longer than - what the string can provide if the stream does not - end with `EOF` (like `wsgi.input`) - """ - - def __init__(self, stream, limit): - self._read = stream.read - self._readline = stream.readline - self._pos = 0 - self.limit = limit - - def __iter__(self): - return self - - @property - def is_exhausted(self): - """If the stream is exhausted this attribute is `True`.""" - return self._pos >= self.limit - - def on_exhausted(self): - """This is called when the stream tries to read past the limit. - The return value of this function is returned from the reading - function. - """ - # Read null bytes from the stream so that we get the - # correct end of stream marker. - return self._read(0) - - def on_disconnect(self): - """What should happen if a disconnect is detected? The return - value of this function is returned from read functions in case - the client went away. By default a - :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. - """ - from .exceptions import ClientDisconnected - - raise ClientDisconnected() - - def exhaust(self, chunk_size=1024 * 64): - """Exhaust the stream. This consumes all the data left until the - limit is reached. - - :param chunk_size: the size for a chunk. It will read the chunk - until the stream is exhausted and throw away - the results. - """ - to_read = self.limit - self._pos - chunk = chunk_size - while to_read > 0: - chunk = min(to_read, chunk) - self.read(chunk) - to_read -= chunk - - def read(self, size=None): - """Read `size` bytes or if size is not provided everything is read. - - :param size: the number of bytes read. - """ - if self._pos >= self.limit: - return self.on_exhausted() - if size is None or size == -1: # -1 is for consistence with file - size = self.limit - to_read = min(self.limit - self._pos, size) - try: - read = self._read(to_read) - except (IOError, ValueError): - return self.on_disconnect() - if to_read and len(read) != to_read: - return self.on_disconnect() - self._pos += len(read) - return read - - def readline(self, size=None): - """Reads one line from the stream.""" - if self._pos >= self.limit: - return self.on_exhausted() - if size is None: - size = self.limit - self._pos - else: - size = min(size, self.limit - self._pos) - try: - line = self._readline(size) - except (ValueError, IOError): - return self.on_disconnect() - if size and not line: - return self.on_disconnect() - self._pos += len(line) - return line - - def readlines(self, size=None): - """Reads a file into a list of strings. It calls :meth:`readline` - until the file is read to the end. It does support the optional - `size` argument if the underlaying stream supports it for - `readline`. - """ - last_pos = self._pos - result = [] - if size is not None: - end = min(self.limit, last_pos + size) - else: - end = self.limit - while 1: - if size is not None: - size -= last_pos - self._pos - if self._pos >= end: - break - result.append(self.readline(size)) - if size is not None: - last_pos = self._pos - return result - - def tell(self): - """Returns the position of the stream. - - .. versionadded:: 0.9 - """ - return self._pos - - def __next__(self): - line = self.readline() - if not line: - raise StopIteration() - return line - - def readable(self): - return True diff --git a/azure/functions/blob.py b/azure/functions/blob.py index b75500ab..7c34f1a9 100644 --- a/azure/functions/blob.py +++ b/azure/functions/blob.py @@ -113,7 +113,8 @@ def decode(cls, data: meta.Datum, *, trigger_metadata) -> Any: trigger_metadata, 'Properties', python_type=dict) if properties: blob_properties = properties - length = properties.get('Length') + length = properties.get('ContentLength') or \ + properties.get('Length') length = int(length) if length else None else: blob_properties = None diff --git a/azure/functions/decorators/blob.py b/azure/functions/decorators/blob.py index 1a2d4122..9ed4ce2f 100644 --- a/azure/functions/decorators/blob.py +++ b/azure/functions/decorators/blob.py @@ -17,7 +17,10 @@ def __init__(self, **kwargs): self.path = path self.connection = connection - self.source = source + if isinstance(source, BlobSource): + self.source = source.value + else: + self.source = source # type: ignore super().__init__(name=name, data_type=data_type) @staticmethod diff --git a/azure/functions/decorators/function_app.py b/azure/functions/decorators/function_app.py index 773bf5da..9c0b99f6 100644 --- a/azure/functions/decorators/function_app.py +++ b/azure/functions/decorators/function_app.py @@ -80,8 +80,9 @@ def __str__(self): return self.get_function_json() def __call__(self, *args, **kwargs): - """This would allow the Function object to be directly callable and runnable - directly using the interpreter locally. + """This would allow the Function object to be directly + callable and runnable directly using the interpreter + locally. Example: @app.route(route="http_trigger") @@ -332,8 +333,8 @@ def decorator(): return wrap def _get_durable_blueprint(self): - """Attempt to import the Durable Functions SDK from which DF decorators are - implemented. + """Attempt to import the Durable Functions SDK from which DF + decorators are implemented. """ try: @@ -3266,6 +3267,8 @@ def assistant_query_input(self, arg_name: str, id: str, timestamp_utc: str, + chat_storage_connection_setting: Optional[str] = "AzureWebJobsStorage", # noqa: E501 + collection_name: Optional[str] = "ChatState", # noqa: E501 data_type: Optional[ Union[DataType, str]] = None, **kwargs) \ @@ -3278,6 +3281,11 @@ def assistant_query_input(self, :param timestamp_utc: the timestamp of the earliest message in the chat history to fetch. The timestamp should be in ISO 8601 format - for example, 2023-08-01T00:00:00Z. + :param chat_storage_connection_setting: The configuration section name + for the table settings for assistant chat storage. The default value is + "AzureWebJobsStorage". + :param collection_name: The table collection name for assistant chat + storage. The default value is "ChatState". :param id: The ID of the Assistant to query. :param data_type: Defines how Functions runtime should treat the parameter value @@ -3295,6 +3303,8 @@ def decorator(): name=arg_name, id=id, timestamp_utc=timestamp_utc, + chat_storage_connection_setting=chat_storage_connection_setting, # noqa: E501 + collection_name=collection_name, data_type=parse_singular_param_to_enum(data_type, DataType), **kwargs)) @@ -3308,6 +3318,8 @@ def assistant_post_input(self, arg_name: str, id: str, user_message: str, model: Optional[str] = None, + chat_storage_connection_setting: Optional[str] = "AzureWebJobsStorage", # noqa: E501 + collection_name: Optional[str] = "ChatState", # noqa: E501 data_type: Optional[ Union[DataType, str]] = None, **kwargs) \ @@ -3321,6 +3333,11 @@ def assistant_post_input(self, arg_name: str, :param user_message: The user message that user has entered for assistant to respond to. :param model: The OpenAI chat model to use. + :param chat_storage_connection_setting: The configuration section name + for the table settings for assistant chat storage. The default value is + "AzureWebJobsStorage". + :param collection_name: The table collection name for assistant chat + storage. The default value is "ChatState". :param data_type: Defines how Functions runtime should treat the parameter value :param kwargs: Keyword arguments for specifying additional binding @@ -3338,6 +3355,8 @@ def decorator(): id=id, user_message=user_message, model=model, + chat_storage_connection_setting=chat_storage_connection_setting, # noqa: E501 + collection_name=collection_name, data_type=parse_singular_param_to_enum(data_type, DataType), **kwargs)) diff --git a/azure/functions/decorators/openai.py b/azure/functions/decorators/openai.py index df459c1c..2563a78e 100644 --- a/azure/functions/decorators/openai.py +++ b/azure/functions/decorators/openai.py @@ -77,10 +77,14 @@ def __init__(self, name: str, id: str, timestamp_utc: str, + chat_storage_connection_setting: Optional[str] = "AzureWebJobsStorage", # noqa: E501 + collection_name: Optional[str] = "ChatState", data_type: Optional[DataType] = None, **kwargs): self.id = id self.timestamp_utc = timestamp_utc + self.chat_storage_connection_setting = chat_storage_connection_setting + self.collection_name = collection_name super().__init__(name=name, data_type=data_type) @@ -165,12 +169,16 @@ def __init__(self, name: str, id: str, user_message: str, model: Optional[str] = None, + chat_storage_connection_setting: Optional[str] = "AzureWebJobsStorage", # noqa: E501 + collection_name: Optional[str] = "ChatState", data_type: Optional[DataType] = None, **kwargs): self.name = name self.id = id self.user_message = user_message self.model = model + self.chat_storage_connection_setting = chat_storage_connection_setting + self.collection_name = collection_name super().__init__(name=name, data_type=data_type) diff --git a/azure/functions/http.py b/azure/functions/http.py index 61b303ec..734f43eb 100644 --- a/azure/functions/http.py +++ b/azure/functions/http.py @@ -10,7 +10,7 @@ from azure.functions import _abc as azf_abc from azure.functions import _http as azf_http from . import meta -from ._thirdparty.werkzeug.datastructures import Headers +from werkzeug.datastructures import Headers class HttpRequest(azf_http.HttpRequest): diff --git a/azure/functions/mysql.py b/azure/functions/mysql.py new file mode 100644 index 00000000..06a04a56 --- /dev/null +++ b/azure/functions/mysql.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import collections.abc +import json +import typing + +from azure.functions import _mysql as mysql + +from . import meta + + +class MySqlConverter(meta.InConverter, meta.OutConverter, + binding='mysql'): + + @classmethod + def check_input_type_annotation(cls, pytype: type) -> bool: + return issubclass(pytype, mysql.BaseMySqlRowList) + + @classmethod + def check_output_type_annotation(cls, pytype: type) -> bool: + return issubclass(pytype, (mysql.BaseMySqlRowList, mysql.BaseMySqlRow)) + + @classmethod + def decode(cls, + data: meta.Datum, + *, + trigger_metadata) -> typing.Optional[mysql.MySqlRowList]: + if data is None or data.type is None: + return None + + data_type = data.type + + if data_type in ['string', 'json']: + body = data.value + + elif data_type == 'bytes': + body = data.value.decode('utf-8') + + else: + raise NotImplementedError( + f'Unsupported payload type: {data_type}') + + rows = json.loads(body) + if not isinstance(rows, list): + rows = [rows] + + return mysql.MySqlRowList( + (None if row is None else mysql.MySqlRow.from_dict(row)) + for row in rows) + + @classmethod + def encode(cls, obj: typing.Any, *, + expected_type: typing.Optional[type]) -> meta.Datum: + if isinstance(obj, mysql.MySqlRow): + data = mysql.MySqlRowList([obj]) + + elif isinstance(obj, mysql.MySqlRowList): + data = obj + + elif isinstance(obj, collections.abc.Iterable): + data = mysql.MySqlRowList() + + for row in obj: + if not isinstance(row, mysql.MySqlRow): + raise NotImplementedError( + f'Unsupported list type: {type(obj)}, \ + lists must contain MySqlRow objects') + else: + data.append(row) + + else: + raise NotImplementedError(f'Unsupported type: {type(obj)}') + + return meta.Datum( + type='json', + value=json.dumps([dict(d) for d in data]) + ) diff --git a/eng/templates/build.yml b/eng/templates/build.yml deleted file mode 100644 index 44603de8..00000000 --- a/eng/templates/build.yml +++ /dev/null @@ -1,21 +0,0 @@ -jobs: - - job: "Build" - displayName: 'Build Python SDK' - - pool: - name: 1es-pool-azfunc - image: 1es-ubuntu-22.04 - os: linux - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: "3.11" - - bash: | - python --version - displayName: 'Check python version' - - bash: | - python -m pip install -U pip - pip install twine wheel - python setup.py sdist bdist_wheel - displayName: 'Build Python SDK' diff --git a/eng/templates/ci-tests.yml b/eng/templates/ci-tests.yml deleted file mode 100644 index 2bee12f8..00000000 --- a/eng/templates/ci-tests.yml +++ /dev/null @@ -1,27 +0,0 @@ -jobs: - - job: "TestPython" - displayName: "Run Python SDK Unit Tests" - - strategy: - matrix: - python-37: - PYTHON_VERSION: '3.7' - python-38: - PYTHON_VERSION: '3.8' - python-39: - PYTHON_VERSION: '3.9' - python-310: - PYTHON_VERSION: '3.10' - python-311: - PYTHON_VERSION: '3.11' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - - bash: | - python -m pip install --upgrade pip - python -m pip install -U -e .[dev] - displayName: 'Install dependencies' - - bash: | - python -m pytest --cache-clear --cov=./azure --cov-report=xml --cov-branch tests - displayName: 'Test with pytest' \ No newline at end of file diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml index faf01484..00bdae5f 100644 --- a/eng/templates/jobs/build.yml +++ b/eng/templates/jobs/build.yml @@ -2,15 +2,38 @@ jobs: - job: "Build" displayName: 'Build Python SDK' + strategy: + matrix: + Python37: + PYTHON_VERSION: '3.7' + Python38: + PYTHON_VERSION: '3.8' + Python39: + PYTHON_VERSION: '3.9' + Python310: + PYTHON_VERSION: '3.10' + Python311: + PYTHON_VERSION: '3.11' + Python312: + PYTHON_VERSION: '3.12' + steps: - task: UsePythonVersion@0 inputs: - versionSpec: "3.11" + versionSpec: $(PYTHON_VERSION) - bash: | python --version displayName: 'Check python version' - bash: | python -m pip install -U pip - pip install twine wheel - python setup.py sdist bdist_wheel - displayName: 'Build Python SDK' + python -m pip install build + if [[ $(PYTHON_VERSION) == "3.7" ]]; then + python -m pip install importlib_metadata + fi + python -m build + displayName: 'Build Python SDK for $(PYTHON_VERSION)' + - bash: | + pip install pip-audit + pip-audit . + displayName: 'Run vulnerability scan' + condition: ne(variables['PYTHON_VERSION'], '3.7') diff --git a/eng/templates/jobs/ci-tests.yml b/eng/templates/jobs/ci-tests.yml index 2bee12f8..883486ff 100644 --- a/eng/templates/jobs/ci-tests.yml +++ b/eng/templates/jobs/ci-tests.yml @@ -14,6 +14,8 @@ jobs: PYTHON_VERSION: '3.10' python-311: PYTHON_VERSION: '3.11' + python-312: + PYTHON_VERSION: '3.12' steps: - task: UsePythonVersion@0 inputs: diff --git a/eng/templates/official/jobs/build-artifacts.yml b/eng/templates/official/jobs/build-artifacts.yml index bb2171e6..37817855 100644 --- a/eng/templates/official/jobs/build-artifacts.yml +++ b/eng/templates/official/jobs/build-artifacts.yml @@ -23,6 +23,10 @@ jobs: displayName: 'Check python version' - bash: | python -m pip install -U pip - pip install twine wheel - python setup.py sdist bdist_wheel + python -m pip install build + python -m build displayName: 'Build Python SDK' + - bash: | + pip install pip-audit + pip-audit . + displayName: 'Run vulnerability scan' diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..68187961 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = ["setuptools >= 61.0", "wheel", "build"] +build-backend = "setuptools.build_meta" + +[project] +name = "azure-functions" +dynamic = ["version"] +requires-python = ">=3.7" +authors = [{ name = "Azure Functions team at Microsoft Corp.", email = "azurefunctions@microsoft.com" }] +description = "Python library for Azure Functions." +readme = "README.md" +license = { text = "MIT License" } +classifiers = [ + 'License :: OSI Approved :: MIT License', + 'Intended Audience :: Developers', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Operating System :: MacOS :: MacOS X', + 'Environment :: Web Environment', + 'Development Status :: 5 - Production/Stable', +] +dependencies = [ + 'werkzeug~=3.1.3; python_version >= "3.9"', + 'werkzeug~=3.0.6; python_version == "3.8"', + 'werkzeug; python_version < "3.8"' +] +[project.optional-dependencies] +dev = [ + 'pytest', + 'pytest-cov', + 'coverage', + 'pytest-instafail', + 'pre-commit', + 'azure-functions-durable', + 'flake8~=4.0.1; python_version < "3.11"', + 'flake8~=7.1.1; python_version >= "3.11"' +] + +[tool.setuptools.packages.find] +exclude = [ + 'azure', 'eng', 'docs', 'tests' +] + +[tool.setuptools.dynamic] +version = {attr = "azure.functions.__version__"} diff --git a/setup.py b/setup.py deleted file mode 100644 index 704dc48a..00000000 --- a/setup.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -from setuptools import find_packages, setup -from azure.functions import __version__ - -EXTRA_REQUIRES = { - 'dev': [ - 'flake8~=4.0.1', - 'flake8-logging-format', - 'mypy', - 'pytest', - 'pytest-cov', - 'requests==2.*', - 'coverage', - 'azure-functions-durable' - ] -} - -with open("README.md") as readme: - long_description = readme.read() - -setup( - name='azure-functions', - version=__version__, - description='Azure Functions for Python', - long_description=long_description, - long_description_content_type='text/markdown', - author='Microsoft Corporation', - author_email='azpysdkhelp@microsoft.com', - classifiers=[ - 'License :: OSI Approved :: MIT License', - 'Intended Audience :: Developers', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: MacOS :: MacOS X', - 'Environment :: Web Environment', - 'Development Status :: 5 - Production/Stable', - ], - license='MIT', - packages=find_packages(exclude=[ - 'azure', 'tests' - ]), - package_data={ - 'azure.functions': ['py.typed'] - }, - extras_require=EXTRA_REQUIRES, - include_package_data=True, - test_suite='tests' -) diff --git a/tests/decorators/test_blob.py b/tests/decorators/test_blob.py index f8712b9f..bad8f99b 100644 --- a/tests/decorators/test_blob.py +++ b/tests/decorators/test_blob.py @@ -42,7 +42,7 @@ def test_blob_trigger_creation_with_default_specified_source(self): "name": "req", "dataType": DataType.UNDEFINED, "path": "dummy_path", - 'source': BlobSource.LOGS_AND_CONTAINER_SCAN, + 'source': 'LogsAndContainerScan', "connection": "dummy_connection" }) @@ -50,7 +50,7 @@ def test_blob_trigger_creation_with_source_as_string(self): trigger = BlobTrigger(name="req", path="dummy_path", connection="dummy_connection", - source=BlobSource.EVENT_GRID, + source="EventGrid", data_type=DataType.UNDEFINED, dummy_field="dummy") @@ -62,7 +62,7 @@ def test_blob_trigger_creation_with_source_as_string(self): "name": "req", "dataType": DataType.UNDEFINED, "path": "dummy_path", - 'source': BlobSource.EVENT_GRID, + 'source': 'EventGrid', "connection": "dummy_connection" }) @@ -82,7 +82,7 @@ def test_blob_trigger_creation_with_source_as_enum(self): "name": "req", "dataType": DataType.UNDEFINED, "path": "dummy_path", - 'source': BlobSource.EVENT_GRID, + 'source': 'EventGrid', "connection": "dummy_connection" }) diff --git a/tests/decorators/test_decorators.py b/tests/decorators/test_decorators.py index 82973ba3..acdd5ccd 100644 --- a/tests/decorators/test_decorators.py +++ b/tests/decorators/test_decorators.py @@ -1628,7 +1628,7 @@ def test_blob_input_binding(): "type": BLOB_TRIGGER, "name": "req", "path": "dummy_path", - "source": BlobSource.EVENT_GRID, + "source": 'EventGrid', "connection": "dummy_conn" }) diff --git a/tests/decorators/test_eventgrid.py b/tests/decorators/test_eventgrid.py index 15800002..170ca573 100644 --- a/tests/decorators/test_eventgrid.py +++ b/tests/decorators/test_eventgrid.py @@ -5,8 +5,9 @@ from azure.functions.decorators.constants import EVENT_GRID_TRIGGER, EVENT_GRID from azure.functions.decorators.core import BindingDirection, \ DataType -from azure.functions.decorators.eventgrid import EventGridTrigger,\ - EventGridOutput +from azure.functions.decorators.eventgrid import ( + EventGridTrigger, + EventGridOutput) class TestEventGrid(unittest.TestCase): diff --git a/tests/decorators/test_openai.py b/tests/decorators/test_openai.py index f2ebdaca..c2009c72 100644 --- a/tests/decorators/test_openai.py +++ b/tests/decorators/test_openai.py @@ -57,6 +57,8 @@ def test_text_completion_input_valid_creation(self): def test_assistant_query_input_valid_creation(self): input = AssistantQueryInput(name="test", timestamp_utc="timestamp_utc", + chat_storage_connection_setting="AzureWebJobsStorage", # noqa: E501 + collection_name="ChatState", data_type=DataType.UNDEFINED, id="test_id", type="assistantQueryInput", @@ -66,6 +68,8 @@ def test_assistant_query_input_valid_creation(self): self.assertEqual(input.get_dict_repr(), {"name": "test", "timestampUtc": "timestamp_utc", + "chatStorageConnectionSetting": "AzureWebJobsStorage", # noqa: E501 + "collectionName": "ChatState", "dataType": DataType.UNDEFINED, "direction": BindingDirection.IN, "type": "assistantQuery", @@ -111,6 +115,8 @@ def test_assistant_post_input_valid_creation(self): input = AssistantPostInput(name="test", id="test_id", model="test_model", + chat_storage_connection_setting="AzureWebJobsStorage", # noqa: E501 + collection_name="ChatState", user_message="test_message", data_type=DataType.UNDEFINED, dummy_field="dummy") @@ -120,6 +126,8 @@ def test_assistant_post_input_valid_creation(self): {"name": "test", "id": "test_id", "model": "test_model", + "chatStorageConnectionSetting": "AzureWebJobsStorage", # noqa: E501 + "collectionName": "ChatState", "userMessage": "test_message", "dataType": DataType.UNDEFINED, "direction": BindingDirection.IN, diff --git a/tests/test_blob.py b/tests/test_blob.py index 3d9ed846..adb1f16a 100644 --- a/tests/test_blob.py +++ b/tests/test_blob.py @@ -84,7 +84,7 @@ def test_blob_input_with_metadata_no_blob_properties(self): self.assertEqual(result.metadata, None) def test_blob_input_with_metadata_no_trigger_metadata(self): - sample_blob_properties = '{"Length": "12"}' + sample_blob_properties = '{"ContentLength": "12"}' datum: Datum = Datum(value=b'blob_content', type='bytes') trigger_metadata: Dict[str, Any] = { 'Properties': Datum(sample_blob_properties, 'json'), @@ -97,7 +97,7 @@ def test_blob_input_with_metadata_no_trigger_metadata(self): # Verify result metadata self.assertIsInstance(result, InputStream) self.assertEqual(result.name, 'blob_trigger_name') - self.assertEqual(result.length, len(b'blob_content')) + self.assertEqual(result.length, 12) self.assertEqual(result.uri, 'https://test.io/blob_trigger') self.assertEqual(result.blob_properties, json.loads(sample_blob_properties)) @@ -115,7 +115,7 @@ def test_blob_input_with_metadata_with_trigger_metadata(self): "LeaseStatus": 2, "LeaseState": 1, "LeaseDuration": 0, - "Length": "12" + "ContentLength": "12" }''' datum: Datum = Datum(value=b'blob_content', type='bytes') trigger_metadata: Dict[str, Any] = { @@ -130,7 +130,7 @@ def test_blob_input_with_metadata_with_trigger_metadata(self): # Verify result metadata self.assertIsInstance(result, InputStream) self.assertEqual(result.name, 'blob_trigger_name') - self.assertEqual(result.length, len(b'blob_content')) + self.assertEqual(result.length, 12) self.assertEqual(result.uri, 'https://test.io/blob_trigger') self.assertEqual(result.blob_properties, json.loads(sample_blob_properties)) @@ -139,7 +139,7 @@ def test_blob_input_with_metadata_with_trigger_metadata(self): def test_blob_input_with_metadata_with_incorrect_trigger_metadata(self): sample_metadata = 'Hello World' - sample_blob_properties = '''{"Length": "12"}''' + sample_blob_properties = '''{"ContentLength": "12"}''' datum: Datum = Datum(value=b'blob_content', type='bytes') trigger_metadata: Dict[str, Any] = { 'Metadata': Datum(sample_metadata, 'string'), @@ -153,7 +153,7 @@ def test_blob_input_with_metadata_with_incorrect_trigger_metadata(self): # Verify result metadata self.assertIsInstance(result, InputStream) self.assertEqual(result.name, 'blob_trigger_name') - self.assertEqual(result.length, len(b'blob_content')) + self.assertEqual(result.length, 12) self.assertEqual(result.uri, 'https://test.io/blob_trigger') self.assertEqual(result.blob_properties, json.loads(sample_blob_properties)) @@ -228,3 +228,46 @@ def read(self) -> Datum: check_output_type = afb.BlobConverter.check_output_type_annotation self.assertTrue(check_output_type(CustomOutput)) + + def test_blob_input_with_metadata_with_length(self): + sample_blob_properties = '{"Length": "12"}' + datum: Datum = Datum(value=b'blob_content', type='bytes') + trigger_metadata: Dict[str, Any] = { + 'Properties': Datum(sample_blob_properties, 'json') + } + result: InputStream = afb. \ + BlobConverter.decode(data=datum, trigger_metadata=trigger_metadata) + + # Verify result metadata + self.assertIsInstance(result, InputStream) + self.assertEqual(result.length, 12) + + def test_blob_input_with_metadata_with_both_length(self): + sample_blob_properties = '''{ + "ContentLength": "12", + "Length": "10" + }''' + datum: Datum = Datum(value=b'blob_content', type='bytes') + trigger_metadata: Dict[str, Any] = { + 'Properties': Datum(sample_blob_properties, 'json') + } + result: InputStream = afb. \ + BlobConverter.decode(data=datum, trigger_metadata=trigger_metadata) + + # Verify result metadata. + # This should be 12, since we check for ContentLength first + self.assertIsInstance(result, InputStream) + self.assertEqual(result.length, 12) + + def test_blob_input_with_metadata_with_no_length(self): + sample_blob_properties = '''{}''' + datum: Datum = Datum(value=b'blob_content', type='bytes') + trigger_metadata: Dict[str, Any] = { + 'Properties': Datum(sample_blob_properties, 'json') + } + result: InputStream = afb. \ + BlobConverter.decode(data=datum, trigger_metadata=trigger_metadata) + + # Verify result metadata. + self.assertIsInstance(result, InputStream) + self.assertEqual(result.length, None) diff --git a/tests/test_mysql.py b/tests/test_mysql.py new file mode 100644 index 00000000..514c066a --- /dev/null +++ b/tests/test_mysql.py @@ -0,0 +1,293 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import unittest + +import azure.functions as func +import azure.functions.mysql as mysql +from azure.functions.meta import Datum + + +class TestMySql(unittest.TestCase): + def test_mysql_decode_none(self): + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=None, trigger_metadata=None) + self.assertIsNone(result) + + def test_mysql_decode_string(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": "test" + } + """, "string") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result, + 'MySqlRowList should be non-None') + self.assertEqual(len(result), + 1, + 'MySqlRowList should have exactly 1 item') + self.assertEqual(result[0]['id'], + '1', + 'MySqlRow item should have id 1') + self.assertEqual(result[0]['name'], + 'test', + 'MySqlRow item should have name test') + + def test_mysql_decode_bytes(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": "test" + } + """.encode(), "bytes") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result, + 'MySqlRowList should be non-None') + self.assertEqual(len(result), + 1, + 'MySqlRowList should have exactly 1 item') + self.assertEqual(result[0]['id'], + '1', + 'MySqlRow item should have id 1') + self.assertEqual(result[0]['name'], + 'test', + 'MySqlRow item should have name test') + + def test_mysql_decode_json(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": "test" + } + """, "json") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result, + 'MySqlRowList should be non-None') + self.assertEqual(len(result), + 1, + 'MySqlRowList should have exactly 1 item') + self.assertEqual(result[0]['id'], + '1', + 'MySqlRow item should have id 1') + self.assertEqual(result[0]['name'], + 'test', + 'MySqlRow item should have name test') + + def test_mysql_decode_json_name_is_null(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": null + } + """, "json") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result, + 'MySqlRowList itself should be non-None') + self.assertEqual(len(result), + 1, + 'MySqlRowList should have exactly 1 item') + self.assertEqual(result[0]['name'], + None, + 'Item in MySqlRowList should be None') + + def test_mysql_decode_json_multiple_entries(self): + datum: Datum = Datum(""" + [ + { + "id": "1", + "name": "test1" + }, + { + "id": "2", + "name": "test2" + } + ] + """, "json") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result) + self.assertEqual(len(result), + 2, + 'MySqlRowList should have exactly 2 items') + self.assertEqual(result[0]['id'], + '1', + 'First MySqlRowList item should have id 1') + self.assertEqual(result[0]['name'], + 'test1', + 'First MySqlRowList item should have name test1') + self.assertEqual(result[1]['id'], + '2', + 'First MySqlRowList item should have id 2') + self.assertEqual(result[1]['name'], + 'test2', + 'Second MySqlRowList item should have name test2') + + def test_mysql_decode_json_multiple_nulls(self): + datum: Datum = Datum("[null]", "json") + result: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + self.assertIsNotNone(result) + self.assertEqual(len(result), + 1, + 'MySqlRowList should have exactly 1 item') + self.assertEqual(result[0], + None, + 'MySqlRow item should be None') + + def test_mysql_encode_mysqlrow(self): + mysqlRow = func.MySqlRow.from_json(""" + { + "id": "1", + "name": "test" + } + """) + datum = mysql.MySqlConverter.encode(obj=mysqlRow, expected_type=None) + self.assertEqual(datum.type, + 'json', + 'Datum type should be JSON') + self.assertEqual(len(datum.python_value), + 1, + 'Encoded value should be list of length 1') + self.assertEqual(datum.python_value[0]['id'], + '1', + 'id should be 1') + self.assertEqual(datum.python_value[0]['name'], + 'test', + 'name should be test') + + def test_mysql_encode_mysqlrowlist(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": "test" + } + """, "json") + mysqlRowList: func.MySqlRowList = mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + datum = mysql.MySqlConverter.encode( + obj=mysqlRowList, expected_type=None) + self.assertEqual(datum.type, + 'json', + 'Datum type should be JSON') + self.assertEqual(len(datum.python_value), + 1, + 'Encoded value should be list of length 1') + self.assertEqual(datum.python_value[0]['id'], + '1', + 'id should be 1') + self.assertEqual(datum.python_value[0]['name'], + 'test', + 'name should be test') + + def test_mysql_encode_list_of_mysqlrows(self): + mysqlRows = [ + func.MySqlRow.from_json(""" + { + "id": "1", + "name": "test" + } + """), + func.MySqlRow.from_json(""" + { + "id": "2", + "name": "test2" + } + """) + ] + datum = mysql.MySqlConverter.encode(obj=mysqlRows, expected_type=None) + self.assertEqual(datum.type, + 'json', + 'Datum type should be JSON') + self.assertEqual(len(datum.python_value), + 2, + 'Encoded value should be list of length 2') + self.assertEqual(datum.python_value[0]['id'], + '1', + 'id should be 1') + self.assertEqual(datum.python_value[0]['name'], + 'test', + 'name should be test') + self.assertEqual(datum.python_value[1]['id'], + '2', + 'id should be 2') + self.assertEqual(datum.python_value[1]['name'], + 'test2', + 'name should be test2') + + def test_mysql_encode_list_of_str_raises(self): + strList = [ + """ + { + "id": "1", + "name": "test" + } + """ + ] + self.assertRaises(NotImplementedError, + mysql.MySqlConverter.encode, + obj=strList, + expected_type=None) + + def test_mysql_encode_list_of_mysqlrowlist_raises(self): + datum: Datum = Datum(""" + { + "id": "1", + "name": "test" + } + """, "json") + mysqlRowListList = [ + mysql.MySqlConverter.decode( + data=datum, trigger_metadata=None) + ] + self.assertRaises(NotImplementedError, + mysql.MySqlConverter.encode, + obj=mysqlRowListList, + expected_type=None) + + def test_mysql_input_type(self): + check_input_type = mysql.MySqlConverter.check_input_type_annotation + self.assertTrue(check_input_type(func.MySqlRowList), + 'MySqlRowList should be accepted') + self.assertFalse(check_input_type(func.MySqlRow), + 'MySqlRow should not be accepted') + self.assertFalse(check_input_type(str), + 'str should not be accepted') + + def test_mysql_output_type(self): + check_output_type = mysql.MySqlConverter.check_output_type_annotation + self.assertTrue(check_output_type(func.MySqlRowList), + 'MySqlRowList should be accepted') + self.assertTrue(check_output_type(func.MySqlRow), + 'MySqlRow should be accepted') + self.assertFalse(check_output_type(str), + 'str should not be accepted') + + def test_mysqlrow_json(self): + # Parse MySqlRow from JSON + mysqlRow = func.MySqlRow.from_json(""" + { + "id": "1", + "name": "test" + } + """) + self.assertEqual(mysqlRow['id'], + '1', + 'Parsed MySqlRow id should be 1') + self.assertEqual(mysqlRow['name'], + 'test', + 'Parsed MySqlRow name should be test') + + # Parse JSON from MySqlRow + mysqlRowJson = json.loads(func.MySqlRow.to_json(mysqlRow)) + self.assertEqual(mysqlRowJson['id'], + '1', + 'Parsed JSON id should be 1') + self.assertEqual(mysqlRowJson['name'], + 'test', + 'Parsed JSON name should be test') diff --git a/tests/test_servicebus.py b/tests/test_servicebus.py index b02635de..88592f7f 100644 --- a/tests/test_servicebus.py +++ b/tests/test_servicebus.py @@ -648,11 +648,12 @@ def _generate_single_trigger_metadata(self) -> Dict[str, meta.Datum]: "ServerBusyExceptionMessage": null } }''') - mocked_metadata['ApplicationProperties'] = meta.Datum(type='json', value=''' + mocked_metadata['ApplicationProperties'] = ( + meta.Datum(type='json', value=''' { "application": "value" } - ''') + ''')) mocked_metadata['UserProperties'] = meta.Datum(type='json', value=''' { "$AzureWebJobsParentId": "6ceef68b-0794-45dd-bb2e-630748515552",