mirror of
https://github.com/datafolklabs/cement.git
synced 2026-02-06 11:37:06 +00:00
remove cli/contrib
This commit is contained in:
parent
5d1e32bbf6
commit
92cf147a64
@ -1,28 +0,0 @@
|
|||||||
Copyright 2007 Pallets
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
3. Neither the name of the copyright holder nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
||||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
||||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
||||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
||||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
"""Jinja is a template engine written in pure Python. It provides a
|
|
||||||
non-XML syntax that supports inline expressions and an optional
|
|
||||||
sandboxed environment.
|
|
||||||
"""
|
|
||||||
from .bccache import BytecodeCache as BytecodeCache
|
|
||||||
from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache
|
|
||||||
from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache
|
|
||||||
from .environment import Environment as Environment
|
|
||||||
from .environment import Template as Template
|
|
||||||
from .exceptions import TemplateAssertionError as TemplateAssertionError
|
|
||||||
from .exceptions import TemplateError as TemplateError
|
|
||||||
from .exceptions import TemplateNotFound as TemplateNotFound
|
|
||||||
from .exceptions import TemplateRuntimeError as TemplateRuntimeError
|
|
||||||
from .exceptions import TemplatesNotFound as TemplatesNotFound
|
|
||||||
from .exceptions import TemplateSyntaxError as TemplateSyntaxError
|
|
||||||
from .exceptions import UndefinedError as UndefinedError
|
|
||||||
from .loaders import BaseLoader as BaseLoader
|
|
||||||
from .loaders import ChoiceLoader as ChoiceLoader
|
|
||||||
from .loaders import DictLoader as DictLoader
|
|
||||||
from .loaders import FileSystemLoader as FileSystemLoader
|
|
||||||
from .loaders import FunctionLoader as FunctionLoader
|
|
||||||
from .loaders import ModuleLoader as ModuleLoader
|
|
||||||
from .loaders import PackageLoader as PackageLoader
|
|
||||||
from .loaders import PrefixLoader as PrefixLoader
|
|
||||||
from .runtime import ChainableUndefined as ChainableUndefined
|
|
||||||
from .runtime import DebugUndefined as DebugUndefined
|
|
||||||
from .runtime import make_logging_undefined as make_logging_undefined
|
|
||||||
from .runtime import StrictUndefined as StrictUndefined
|
|
||||||
from .runtime import Undefined as Undefined
|
|
||||||
from .utils import clear_caches as clear_caches
|
|
||||||
from .utils import is_undefined as is_undefined
|
|
||||||
from .utils import pass_context as pass_context
|
|
||||||
from .utils import pass_environment as pass_environment
|
|
||||||
from .utils import pass_eval_context as pass_eval_context
|
|
||||||
from .utils import select_autoescape as select_autoescape
|
|
||||||
|
|
||||||
__version__ = "3.1.2"
|
|
||||||
@ -1,6 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
# generated by scripts/generate_identifier_pattern.py
|
|
||||||
pattern = re.compile(
|
|
||||||
r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
|
|
||||||
)
|
|
||||||
@ -1,84 +0,0 @@
|
|||||||
import inspect
|
|
||||||
import typing as t
|
|
||||||
from functools import WRAPPER_ASSIGNMENTS
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
from .utils import _PassArg
|
|
||||||
from .utils import pass_eval_context
|
|
||||||
|
|
||||||
V = t.TypeVar("V")
|
|
||||||
|
|
||||||
|
|
||||||
def async_variant(normal_func): # type: ignore
|
|
||||||
def decorator(async_func): # type: ignore
|
|
||||||
pass_arg = _PassArg.from_obj(normal_func)
|
|
||||||
need_eval_context = pass_arg is None
|
|
||||||
|
|
||||||
if pass_arg is _PassArg.environment:
|
|
||||||
|
|
||||||
def is_async(args: t.Any) -> bool:
|
|
||||||
return t.cast(bool, args[0].is_async)
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
def is_async(args: t.Any) -> bool:
|
|
||||||
return t.cast(bool, args[0].environment.is_async)
|
|
||||||
|
|
||||||
# Take the doc and annotations from the sync function, but the
|
|
||||||
# name from the async function. Pallets-Sphinx-Themes
|
|
||||||
# build_function_directive expects __wrapped__ to point to the
|
|
||||||
# sync function.
|
|
||||||
async_func_attrs = ("__module__", "__name__", "__qualname__")
|
|
||||||
normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
|
|
||||||
|
|
||||||
@wraps(normal_func, assigned=normal_func_attrs)
|
|
||||||
@wraps(async_func, assigned=async_func_attrs, updated=())
|
|
||||||
def wrapper(*args, **kwargs): # type: ignore
|
|
||||||
b = is_async(args)
|
|
||||||
|
|
||||||
if need_eval_context:
|
|
||||||
args = args[1:]
|
|
||||||
|
|
||||||
if b:
|
|
||||||
return async_func(*args, **kwargs)
|
|
||||||
|
|
||||||
return normal_func(*args, **kwargs)
|
|
||||||
|
|
||||||
if need_eval_context:
|
|
||||||
wrapper = pass_eval_context(wrapper)
|
|
||||||
|
|
||||||
wrapper.jinja_async_variant = True
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
|
|
||||||
|
|
||||||
|
|
||||||
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
|
|
||||||
# Avoid a costly call to isawaitable
|
|
||||||
if type(value) in _common_primitives:
|
|
||||||
return t.cast("V", value)
|
|
||||||
|
|
||||||
if inspect.isawaitable(value):
|
|
||||||
return await t.cast("t.Awaitable[V]", value)
|
|
||||||
|
|
||||||
return t.cast("V", value)
|
|
||||||
|
|
||||||
|
|
||||||
async def auto_aiter(
|
|
||||||
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
|
|
||||||
) -> "t.AsyncIterator[V]":
|
|
||||||
if hasattr(iterable, "__aiter__"):
|
|
||||||
async for item in t.cast("t.AsyncIterable[V]", iterable):
|
|
||||||
yield item
|
|
||||||
else:
|
|
||||||
for item in t.cast("t.Iterable[V]", iterable):
|
|
||||||
yield item
|
|
||||||
|
|
||||||
|
|
||||||
async def auto_to_list(
|
|
||||||
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
|
|
||||||
) -> t.List["V"]:
|
|
||||||
return [x async for x in auto_aiter(value)]
|
|
||||||
@ -1,406 +0,0 @@
|
|||||||
"""The optional bytecode cache system. This is useful if you have very
|
|
||||||
complex template situations and the compilation of all those templates
|
|
||||||
slows down your application too much.
|
|
||||||
|
|
||||||
Situations where this is useful are often forking web applications that
|
|
||||||
are initialized on the first request.
|
|
||||||
"""
|
|
||||||
import errno
|
|
||||||
import fnmatch
|
|
||||||
import marshal
|
|
||||||
import os
|
|
||||||
import pickle
|
|
||||||
import stat
|
|
||||||
import sys
|
|
||||||
import tempfile
|
|
||||||
import typing as t
|
|
||||||
from hashlib import sha1
|
|
||||||
from io import BytesIO
|
|
||||||
from types import CodeType
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
from .environment import Environment
|
|
||||||
|
|
||||||
class _MemcachedClient(te.Protocol):
|
|
||||||
def get(self, key: str) -> bytes:
|
|
||||||
...
|
|
||||||
|
|
||||||
def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None:
|
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
bc_version = 5
|
|
||||||
# Magic bytes to identify Jinja bytecode cache files. Contains the
|
|
||||||
# Python major and minor version to avoid loading incompatible bytecode
|
|
||||||
# if a project upgrades its Python version.
|
|
||||||
bc_magic = (
|
|
||||||
b"j2"
|
|
||||||
+ pickle.dumps(bc_version, 2)
|
|
||||||
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Bucket:
|
|
||||||
"""Buckets are used to store the bytecode for one template. It's created
|
|
||||||
and initialized by the bytecode cache and passed to the loading functions.
|
|
||||||
|
|
||||||
The buckets get an internal checksum from the cache assigned and use this
|
|
||||||
to automatically reject outdated cache material. Individual bytecode
|
|
||||||
cache subclasses don't have to care about cache invalidation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, environment: "Environment", key: str, checksum: str) -> None:
|
|
||||||
self.environment = environment
|
|
||||||
self.key = key
|
|
||||||
self.checksum = checksum
|
|
||||||
self.reset()
|
|
||||||
|
|
||||||
def reset(self) -> None:
|
|
||||||
"""Resets the bucket (unloads the bytecode)."""
|
|
||||||
self.code: t.Optional[CodeType] = None
|
|
||||||
|
|
||||||
def load_bytecode(self, f: t.BinaryIO) -> None:
|
|
||||||
"""Loads bytecode from a file or file like object."""
|
|
||||||
# make sure the magic header is correct
|
|
||||||
magic = f.read(len(bc_magic))
|
|
||||||
if magic != bc_magic:
|
|
||||||
self.reset()
|
|
||||||
return
|
|
||||||
# the source code of the file changed, we need to reload
|
|
||||||
checksum = pickle.load(f)
|
|
||||||
if self.checksum != checksum:
|
|
||||||
self.reset()
|
|
||||||
return
|
|
||||||
# if marshal_load fails then we need to reload
|
|
||||||
try:
|
|
||||||
self.code = marshal.load(f)
|
|
||||||
except (EOFError, ValueError, TypeError):
|
|
||||||
self.reset()
|
|
||||||
return
|
|
||||||
|
|
||||||
def write_bytecode(self, f: t.IO[bytes]) -> None:
|
|
||||||
"""Dump the bytecode into the file or file like object passed."""
|
|
||||||
if self.code is None:
|
|
||||||
raise TypeError("can't write empty bucket")
|
|
||||||
f.write(bc_magic)
|
|
||||||
pickle.dump(self.checksum, f, 2)
|
|
||||||
marshal.dump(self.code, f)
|
|
||||||
|
|
||||||
def bytecode_from_string(self, string: bytes) -> None:
|
|
||||||
"""Load bytecode from bytes."""
|
|
||||||
self.load_bytecode(BytesIO(string))
|
|
||||||
|
|
||||||
def bytecode_to_string(self) -> bytes:
|
|
||||||
"""Return the bytecode as bytes."""
|
|
||||||
out = BytesIO()
|
|
||||||
self.write_bytecode(out)
|
|
||||||
return out.getvalue()
|
|
||||||
|
|
||||||
|
|
||||||
class BytecodeCache:
|
|
||||||
"""To implement your own bytecode cache you have to subclass this class
|
|
||||||
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
|
|
||||||
these methods are passed a :class:`~jinja2.bccache.Bucket`.
|
|
||||||
|
|
||||||
A very basic bytecode cache that saves the bytecode on the file system::
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
class MyCache(BytecodeCache):
|
|
||||||
|
|
||||||
def __init__(self, directory):
|
|
||||||
self.directory = directory
|
|
||||||
|
|
||||||
def load_bytecode(self, bucket):
|
|
||||||
filename = path.join(self.directory, bucket.key)
|
|
||||||
if path.exists(filename):
|
|
||||||
with open(filename, 'rb') as f:
|
|
||||||
bucket.load_bytecode(f)
|
|
||||||
|
|
||||||
def dump_bytecode(self, bucket):
|
|
||||||
filename = path.join(self.directory, bucket.key)
|
|
||||||
with open(filename, 'wb') as f:
|
|
||||||
bucket.write_bytecode(f)
|
|
||||||
|
|
||||||
A more advanced version of a filesystem based bytecode cache is part of
|
|
||||||
Jinja.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def load_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
"""Subclasses have to override this method to load bytecode into a
|
|
||||||
bucket. If they are not able to find code in the cache for the
|
|
||||||
bucket, it must not do anything.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def dump_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
"""Subclasses have to override this method to write the bytecode
|
|
||||||
from a bucket back to the cache. If it unable to do so it must not
|
|
||||||
fail silently but raise an exception.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
|
||||||
"""Clears the cache. This method is not used by Jinja but should be
|
|
||||||
implemented to allow applications to clear the bytecode cache used
|
|
||||||
by a particular environment.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_cache_key(
|
|
||||||
self, name: str, filename: t.Optional[t.Union[str]] = None
|
|
||||||
) -> str:
|
|
||||||
"""Returns the unique hash key for this template name."""
|
|
||||||
hash = sha1(name.encode("utf-8"))
|
|
||||||
|
|
||||||
if filename is not None:
|
|
||||||
hash.update(f"|{filename}".encode())
|
|
||||||
|
|
||||||
return hash.hexdigest()
|
|
||||||
|
|
||||||
def get_source_checksum(self, source: str) -> str:
|
|
||||||
"""Returns a checksum for the source."""
|
|
||||||
return sha1(source.encode("utf-8")).hexdigest()
|
|
||||||
|
|
||||||
def get_bucket(
|
|
||||||
self,
|
|
||||||
environment: "Environment",
|
|
||||||
name: str,
|
|
||||||
filename: t.Optional[str],
|
|
||||||
source: str,
|
|
||||||
) -> Bucket:
|
|
||||||
"""Return a cache bucket for the given template. All arguments are
|
|
||||||
mandatory but filename may be `None`.
|
|
||||||
"""
|
|
||||||
key = self.get_cache_key(name, filename)
|
|
||||||
checksum = self.get_source_checksum(source)
|
|
||||||
bucket = Bucket(environment, key, checksum)
|
|
||||||
self.load_bytecode(bucket)
|
|
||||||
return bucket
|
|
||||||
|
|
||||||
def set_bucket(self, bucket: Bucket) -> None:
|
|
||||||
"""Put the bucket into the cache."""
|
|
||||||
self.dump_bytecode(bucket)
|
|
||||||
|
|
||||||
|
|
||||||
class FileSystemBytecodeCache(BytecodeCache):
|
|
||||||
"""A bytecode cache that stores bytecode on the filesystem. It accepts
|
|
||||||
two arguments: The directory where the cache items are stored and a
|
|
||||||
pattern string that is used to build the filename.
|
|
||||||
|
|
||||||
If no directory is specified a default cache directory is selected. On
|
|
||||||
Windows the user's temp directory is used, on UNIX systems a directory
|
|
||||||
is created for the user in the system temp directory.
|
|
||||||
|
|
||||||
The pattern can be used to have multiple separate caches operate on the
|
|
||||||
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
|
|
||||||
is replaced with the cache key.
|
|
||||||
|
|
||||||
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
|
|
||||||
|
|
||||||
This bytecode cache supports clearing of the cache using the clear method.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache"
|
|
||||||
) -> None:
|
|
||||||
if directory is None:
|
|
||||||
directory = self._get_default_cache_dir()
|
|
||||||
self.directory = directory
|
|
||||||
self.pattern = pattern
|
|
||||||
|
|
||||||
def _get_default_cache_dir(self) -> str:
|
|
||||||
def _unsafe_dir() -> "te.NoReturn":
|
|
||||||
raise RuntimeError(
|
|
||||||
"Cannot determine safe temp directory. You "
|
|
||||||
"need to explicitly provide one."
|
|
||||||
)
|
|
||||||
|
|
||||||
tmpdir = tempfile.gettempdir()
|
|
||||||
|
|
||||||
# On windows the temporary directory is used specific unless
|
|
||||||
# explicitly forced otherwise. We can just use that.
|
|
||||||
if os.name == "nt":
|
|
||||||
return tmpdir
|
|
||||||
if not hasattr(os, "getuid"):
|
|
||||||
_unsafe_dir()
|
|
||||||
|
|
||||||
dirname = f"_jinja2-cache-{os.getuid()}"
|
|
||||||
actual_dir = os.path.join(tmpdir, dirname)
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.mkdir(actual_dir, stat.S_IRWXU)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.EEXIST:
|
|
||||||
raise
|
|
||||||
try:
|
|
||||||
os.chmod(actual_dir, stat.S_IRWXU)
|
|
||||||
actual_dir_stat = os.lstat(actual_dir)
|
|
||||||
if (
|
|
||||||
actual_dir_stat.st_uid != os.getuid()
|
|
||||||
or not stat.S_ISDIR(actual_dir_stat.st_mode)
|
|
||||||
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
|
|
||||||
):
|
|
||||||
_unsafe_dir()
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.EEXIST:
|
|
||||||
raise
|
|
||||||
|
|
||||||
actual_dir_stat = os.lstat(actual_dir)
|
|
||||||
if (
|
|
||||||
actual_dir_stat.st_uid != os.getuid()
|
|
||||||
or not stat.S_ISDIR(actual_dir_stat.st_mode)
|
|
||||||
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
|
|
||||||
):
|
|
||||||
_unsafe_dir()
|
|
||||||
|
|
||||||
return actual_dir
|
|
||||||
|
|
||||||
def _get_cache_filename(self, bucket: Bucket) -> str:
|
|
||||||
return os.path.join(self.directory, self.pattern % (bucket.key,))
|
|
||||||
|
|
||||||
def load_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
filename = self._get_cache_filename(bucket)
|
|
||||||
|
|
||||||
# Don't test for existence before opening the file, since the
|
|
||||||
# file could disappear after the test before the open.
|
|
||||||
try:
|
|
||||||
f = open(filename, "rb")
|
|
||||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
|
||||||
# PermissionError can occur on Windows when an operation is
|
|
||||||
# in progress, such as calling clear().
|
|
||||||
return
|
|
||||||
|
|
||||||
with f:
|
|
||||||
bucket.load_bytecode(f)
|
|
||||||
|
|
||||||
def dump_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
# Write to a temporary file, then rename to the real name after
|
|
||||||
# writing. This avoids another process reading the file before
|
|
||||||
# it is fully written.
|
|
||||||
name = self._get_cache_filename(bucket)
|
|
||||||
f = tempfile.NamedTemporaryFile(
|
|
||||||
mode="wb",
|
|
||||||
dir=os.path.dirname(name),
|
|
||||||
prefix=os.path.basename(name),
|
|
||||||
suffix=".tmp",
|
|
||||||
delete=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def remove_silent() -> None:
|
|
||||||
try:
|
|
||||||
os.remove(f.name)
|
|
||||||
except OSError:
|
|
||||||
# Another process may have called clear(). On Windows,
|
|
||||||
# another program may be holding the file open.
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
with f:
|
|
||||||
bucket.write_bytecode(f)
|
|
||||||
except BaseException:
|
|
||||||
remove_silent()
|
|
||||||
raise
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.replace(f.name, name)
|
|
||||||
except OSError:
|
|
||||||
# Another process may have called clear(). On Windows,
|
|
||||||
# another program may be holding the file open.
|
|
||||||
remove_silent()
|
|
||||||
except BaseException:
|
|
||||||
remove_silent()
|
|
||||||
raise
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
|
||||||
# imported lazily here because google app-engine doesn't support
|
|
||||||
# write access on the file system and the function does not exist
|
|
||||||
# normally.
|
|
||||||
from os import remove
|
|
||||||
|
|
||||||
files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",))
|
|
||||||
for filename in files:
|
|
||||||
try:
|
|
||||||
remove(os.path.join(self.directory, filename))
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MemcachedBytecodeCache(BytecodeCache):
|
|
||||||
"""This class implements a bytecode cache that uses a memcache cache for
|
|
||||||
storing the information. It does not enforce a specific memcache library
|
|
||||||
(tummy's memcache or cmemcache) but will accept any class that provides
|
|
||||||
the minimal interface required.
|
|
||||||
|
|
||||||
Libraries compatible with this class:
|
|
||||||
|
|
||||||
- `cachelib <https://github.com/pallets/cachelib>`_
|
|
||||||
- `python-memcached <https://pypi.org/project/python-memcached/>`_
|
|
||||||
|
|
||||||
(Unfortunately the django cache interface is not compatible because it
|
|
||||||
does not support storing binary data, only text. You can however pass
|
|
||||||
the underlying cache client to the bytecode cache which is available
|
|
||||||
as `django.core.cache.cache._client`.)
|
|
||||||
|
|
||||||
The minimal interface for the client passed to the constructor is this:
|
|
||||||
|
|
||||||
.. class:: MinimalClientInterface
|
|
||||||
|
|
||||||
.. method:: set(key, value[, timeout])
|
|
||||||
|
|
||||||
Stores the bytecode in the cache. `value` is a string and
|
|
||||||
`timeout` the timeout of the key. If timeout is not provided
|
|
||||||
a default timeout or no timeout should be assumed, if it's
|
|
||||||
provided it's an integer with the number of seconds the cache
|
|
||||||
item should exist.
|
|
||||||
|
|
||||||
.. method:: get(key)
|
|
||||||
|
|
||||||
Returns the value for the cache key. If the item does not
|
|
||||||
exist in the cache the return value must be `None`.
|
|
||||||
|
|
||||||
The other arguments to the constructor are the prefix for all keys that
|
|
||||||
is added before the actual cache key and the timeout for the bytecode in
|
|
||||||
the cache system. We recommend a high (or no) timeout.
|
|
||||||
|
|
||||||
This bytecode cache does not support clearing of used items in the cache.
|
|
||||||
The clear method is a no-operation function.
|
|
||||||
|
|
||||||
.. versionadded:: 2.7
|
|
||||||
Added support for ignoring memcache errors through the
|
|
||||||
`ignore_memcache_errors` parameter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
client: "_MemcachedClient",
|
|
||||||
prefix: str = "jinja2/bytecode/",
|
|
||||||
timeout: t.Optional[int] = None,
|
|
||||||
ignore_memcache_errors: bool = True,
|
|
||||||
):
|
|
||||||
self.client = client
|
|
||||||
self.prefix = prefix
|
|
||||||
self.timeout = timeout
|
|
||||||
self.ignore_memcache_errors = ignore_memcache_errors
|
|
||||||
|
|
||||||
def load_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
try:
|
|
||||||
code = self.client.get(self.prefix + bucket.key)
|
|
||||||
except Exception:
|
|
||||||
if not self.ignore_memcache_errors:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
bucket.bytecode_from_string(code)
|
|
||||||
|
|
||||||
def dump_bytecode(self, bucket: Bucket) -> None:
|
|
||||||
key = self.prefix + bucket.key
|
|
||||||
value = bucket.bytecode_to_string()
|
|
||||||
|
|
||||||
try:
|
|
||||||
if self.timeout is not None:
|
|
||||||
self.client.set(key, value, self.timeout)
|
|
||||||
else:
|
|
||||||
self.client.set(key, value)
|
|
||||||
except Exception:
|
|
||||||
if not self.ignore_memcache_errors:
|
|
||||||
raise
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,20 +0,0 @@
|
|||||||
#: list of lorem ipsum words used by the lipsum() helper function
|
|
||||||
LOREM_IPSUM_WORDS = """\
|
|
||||||
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
|
|
||||||
auctor augue bibendum blandit class commodo condimentum congue consectetuer
|
|
||||||
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
|
|
||||||
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
|
|
||||||
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
|
|
||||||
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
|
|
||||||
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
|
|
||||||
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
|
|
||||||
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
|
|
||||||
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
|
|
||||||
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
|
|
||||||
penatibus per pharetra phasellus placerat platea porta porttitor posuere
|
|
||||||
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
|
|
||||||
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
|
|
||||||
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
|
|
||||||
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
|
|
||||||
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
|
|
||||||
viverra volutpat vulputate"""
|
|
||||||
@ -1,191 +0,0 @@
|
|||||||
import sys
|
|
||||||
import typing as t
|
|
||||||
from types import CodeType
|
|
||||||
from types import TracebackType
|
|
||||||
|
|
||||||
from .exceptions import TemplateSyntaxError
|
|
||||||
from .utils import internal_code
|
|
||||||
from .utils import missing
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .runtime import Context
|
|
||||||
|
|
||||||
|
|
||||||
def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
|
|
||||||
"""Rewrite the current exception to replace any tracebacks from
|
|
||||||
within compiled template code with tracebacks that look like they
|
|
||||||
came from the template source.
|
|
||||||
|
|
||||||
This must be called within an ``except`` block.
|
|
||||||
|
|
||||||
:param source: For ``TemplateSyntaxError``, the original source if
|
|
||||||
known.
|
|
||||||
:return: The original exception with the rewritten traceback.
|
|
||||||
"""
|
|
||||||
_, exc_value, tb = sys.exc_info()
|
|
||||||
exc_value = t.cast(BaseException, exc_value)
|
|
||||||
tb = t.cast(TracebackType, tb)
|
|
||||||
|
|
||||||
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
|
|
||||||
exc_value.translated = True
|
|
||||||
exc_value.source = source
|
|
||||||
# Remove the old traceback, otherwise the frames from the
|
|
||||||
# compiler still show up.
|
|
||||||
exc_value.with_traceback(None)
|
|
||||||
# Outside of runtime, so the frame isn't executing template
|
|
||||||
# code, but it still needs to point at the template.
|
|
||||||
tb = fake_traceback(
|
|
||||||
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Skip the frame for the render function.
|
|
||||||
tb = tb.tb_next
|
|
||||||
|
|
||||||
stack = []
|
|
||||||
|
|
||||||
# Build the stack of traceback object, replacing any in template
|
|
||||||
# code with the source file and line information.
|
|
||||||
while tb is not None:
|
|
||||||
# Skip frames decorated with @internalcode. These are internal
|
|
||||||
# calls that aren't useful in template debugging output.
|
|
||||||
if tb.tb_frame.f_code in internal_code:
|
|
||||||
tb = tb.tb_next
|
|
||||||
continue
|
|
||||||
|
|
||||||
template = tb.tb_frame.f_globals.get("__jinja_template__")
|
|
||||||
|
|
||||||
if template is not None:
|
|
||||||
lineno = template.get_corresponding_lineno(tb.tb_lineno)
|
|
||||||
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
|
|
||||||
stack.append(fake_tb)
|
|
||||||
else:
|
|
||||||
stack.append(tb)
|
|
||||||
|
|
||||||
tb = tb.tb_next
|
|
||||||
|
|
||||||
tb_next = None
|
|
||||||
|
|
||||||
# Assign tb_next in reverse to avoid circular references.
|
|
||||||
for tb in reversed(stack):
|
|
||||||
tb.tb_next = tb_next
|
|
||||||
tb_next = tb
|
|
||||||
|
|
||||||
return exc_value.with_traceback(tb_next)
|
|
||||||
|
|
||||||
|
|
||||||
def fake_traceback( # type: ignore
|
|
||||||
exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
|
|
||||||
) -> TracebackType:
|
|
||||||
"""Produce a new traceback object that looks like it came from the
|
|
||||||
template source instead of the compiled code. The filename, line
|
|
||||||
number, and location name will point to the template, and the local
|
|
||||||
variables will be the current template context.
|
|
||||||
|
|
||||||
:param exc_value: The original exception to be re-raised to create
|
|
||||||
the new traceback.
|
|
||||||
:param tb: The original traceback to get the local variables and
|
|
||||||
code info from.
|
|
||||||
:param filename: The template filename.
|
|
||||||
:param lineno: The line number in the template source.
|
|
||||||
"""
|
|
||||||
if tb is not None:
|
|
||||||
# Replace the real locals with the context that would be
|
|
||||||
# available at that point in the template.
|
|
||||||
locals = get_template_locals(tb.tb_frame.f_locals)
|
|
||||||
locals.pop("__jinja_exception__", None)
|
|
||||||
else:
|
|
||||||
locals = {}
|
|
||||||
|
|
||||||
globals = {
|
|
||||||
"__name__": filename,
|
|
||||||
"__file__": filename,
|
|
||||||
"__jinja_exception__": exc_value,
|
|
||||||
}
|
|
||||||
# Raise an exception at the correct line number.
|
|
||||||
code: CodeType = compile(
|
|
||||||
"\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Build a new code object that points to the template file and
|
|
||||||
# replaces the location with a block name.
|
|
||||||
location = "template"
|
|
||||||
|
|
||||||
if tb is not None:
|
|
||||||
function = tb.tb_frame.f_code.co_name
|
|
||||||
|
|
||||||
if function == "root":
|
|
||||||
location = "top-level template code"
|
|
||||||
elif function.startswith("block_"):
|
|
||||||
location = f"block {function[6:]!r}"
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 8):
|
|
||||||
code = code.replace(co_name=location)
|
|
||||||
else:
|
|
||||||
code = CodeType(
|
|
||||||
code.co_argcount,
|
|
||||||
code.co_kwonlyargcount,
|
|
||||||
code.co_nlocals,
|
|
||||||
code.co_stacksize,
|
|
||||||
code.co_flags,
|
|
||||||
code.co_code,
|
|
||||||
code.co_consts,
|
|
||||||
code.co_names,
|
|
||||||
code.co_varnames,
|
|
||||||
code.co_filename,
|
|
||||||
location,
|
|
||||||
code.co_firstlineno,
|
|
||||||
code.co_lnotab,
|
|
||||||
code.co_freevars,
|
|
||||||
code.co_cellvars,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute the new code, which is guaranteed to raise, and return
|
|
||||||
# the new traceback without this frame.
|
|
||||||
try:
|
|
||||||
exec(code, globals, locals)
|
|
||||||
except BaseException:
|
|
||||||
return sys.exc_info()[2].tb_next # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
|
|
||||||
"""Based on the runtime locals, get the context that would be
|
|
||||||
available at that point in the template.
|
|
||||||
"""
|
|
||||||
# Start with the current template context.
|
|
||||||
ctx: "t.Optional[Context]" = real_locals.get("context")
|
|
||||||
|
|
||||||
if ctx is not None:
|
|
||||||
data: t.Dict[str, t.Any] = ctx.get_all().copy()
|
|
||||||
else:
|
|
||||||
data = {}
|
|
||||||
|
|
||||||
# Might be in a derived context that only sets local variables
|
|
||||||
# rather than pushing a context. Local variables follow the scheme
|
|
||||||
# l_depth_name. Find the highest-depth local that has a value for
|
|
||||||
# each name.
|
|
||||||
local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
|
|
||||||
|
|
||||||
for name, value in real_locals.items():
|
|
||||||
if not name.startswith("l_") or value is missing:
|
|
||||||
# Not a template variable, or no longer relevant.
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
_, depth_str, name = name.split("_", 2)
|
|
||||||
depth = int(depth_str)
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
cur_depth = local_overrides.get(name, (-1,))[0]
|
|
||||||
|
|
||||||
if cur_depth < depth:
|
|
||||||
local_overrides[name] = (depth, value)
|
|
||||||
|
|
||||||
# Modify the context with any derived context.
|
|
||||||
for name, (_, value) in local_overrides.items():
|
|
||||||
if value is missing:
|
|
||||||
data.pop(name, None)
|
|
||||||
else:
|
|
||||||
data[name] = value
|
|
||||||
|
|
||||||
return data
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
import typing as t
|
|
||||||
|
|
||||||
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
|
|
||||||
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
|
|
||||||
from .utils import Cycler
|
|
||||||
from .utils import generate_lorem_ipsum
|
|
||||||
from .utils import Joiner
|
|
||||||
from .utils import Namespace
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
|
|
||||||
# defaults for the parser / lexer
|
|
||||||
BLOCK_START_STRING = "{%"
|
|
||||||
BLOCK_END_STRING = "%}"
|
|
||||||
VARIABLE_START_STRING = "{{"
|
|
||||||
VARIABLE_END_STRING = "}}"
|
|
||||||
COMMENT_START_STRING = "{#"
|
|
||||||
COMMENT_END_STRING = "#}"
|
|
||||||
LINE_STATEMENT_PREFIX: t.Optional[str] = None
|
|
||||||
LINE_COMMENT_PREFIX: t.Optional[str] = None
|
|
||||||
TRIM_BLOCKS = False
|
|
||||||
LSTRIP_BLOCKS = False
|
|
||||||
NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n"
|
|
||||||
KEEP_TRAILING_NEWLINE = False
|
|
||||||
|
|
||||||
# default filters, tests and namespace
|
|
||||||
|
|
||||||
DEFAULT_NAMESPACE = {
|
|
||||||
"range": range,
|
|
||||||
"dict": dict,
|
|
||||||
"lipsum": generate_lorem_ipsum,
|
|
||||||
"cycler": Cycler,
|
|
||||||
"joiner": Joiner,
|
|
||||||
"namespace": Namespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
# default policies
|
|
||||||
DEFAULT_POLICIES: t.Dict[str, t.Any] = {
|
|
||||||
"compiler.ascii_str": True,
|
|
||||||
"urlize.rel": "noopener",
|
|
||||||
"urlize.target": None,
|
|
||||||
"urlize.extra_schemes": None,
|
|
||||||
"truncate.leeway": 5,
|
|
||||||
"json.dumps_function": None,
|
|
||||||
"json.dumps_kwargs": {"sort_keys": True},
|
|
||||||
"ext.i18n.trimmed": False,
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,166 +0,0 @@
|
|||||||
import typing as t
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .runtime import Undefined
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateError(Exception):
|
|
||||||
"""Baseclass for all template errors."""
|
|
||||||
|
|
||||||
def __init__(self, message: t.Optional[str] = None) -> None:
|
|
||||||
super().__init__(message)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def message(self) -> t.Optional[str]:
|
|
||||||
return self.args[0] if self.args else None
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateNotFound(IOError, LookupError, TemplateError):
|
|
||||||
"""Raised if a template does not exist.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.11
|
|
||||||
If the given name is :class:`Undefined` and no message was
|
|
||||||
provided, an :exc:`UndefinedError` is raised.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Silence the Python warning about message being deprecated since
|
|
||||||
# it's not valid here.
|
|
||||||
message: t.Optional[str] = None
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: t.Optional[t.Union[str, "Undefined"]],
|
|
||||||
message: t.Optional[str] = None,
|
|
||||||
) -> None:
|
|
||||||
IOError.__init__(self, name)
|
|
||||||
|
|
||||||
if message is None:
|
|
||||||
from .runtime import Undefined
|
|
||||||
|
|
||||||
if isinstance(name, Undefined):
|
|
||||||
name._fail_with_undefined_error()
|
|
||||||
|
|
||||||
message = name
|
|
||||||
|
|
||||||
self.message = message
|
|
||||||
self.name = name
|
|
||||||
self.templates = [name]
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return str(self.message)
|
|
||||||
|
|
||||||
|
|
||||||
class TemplatesNotFound(TemplateNotFound):
|
|
||||||
"""Like :class:`TemplateNotFound` but raised if multiple templates
|
|
||||||
are selected. This is a subclass of :class:`TemplateNotFound`
|
|
||||||
exception, so just catching the base exception will catch both.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.11
|
|
||||||
If a name in the list of names is :class:`Undefined`, a message
|
|
||||||
about it being undefined is shown rather than the empty string.
|
|
||||||
|
|
||||||
.. versionadded:: 2.2
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
names: t.Sequence[t.Union[str, "Undefined"]] = (),
|
|
||||||
message: t.Optional[str] = None,
|
|
||||||
) -> None:
|
|
||||||
if message is None:
|
|
||||||
from .runtime import Undefined
|
|
||||||
|
|
||||||
parts = []
|
|
||||||
|
|
||||||
for name in names:
|
|
||||||
if isinstance(name, Undefined):
|
|
||||||
parts.append(name._undefined_message)
|
|
||||||
else:
|
|
||||||
parts.append(name)
|
|
||||||
|
|
||||||
parts_str = ", ".join(map(str, parts))
|
|
||||||
message = f"none of the templates given were found: {parts_str}"
|
|
||||||
|
|
||||||
super().__init__(names[-1] if names else None, message)
|
|
||||||
self.templates = list(names)
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateSyntaxError(TemplateError):
|
|
||||||
"""Raised to tell the user that there is a problem with the template."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
message: str,
|
|
||||||
lineno: int,
|
|
||||||
name: t.Optional[str] = None,
|
|
||||||
filename: t.Optional[str] = None,
|
|
||||||
) -> None:
|
|
||||||
super().__init__(message)
|
|
||||||
self.lineno = lineno
|
|
||||||
self.name = name
|
|
||||||
self.filename = filename
|
|
||||||
self.source: t.Optional[str] = None
|
|
||||||
|
|
||||||
# this is set to True if the debug.translate_syntax_error
|
|
||||||
# function translated the syntax error into a new traceback
|
|
||||||
self.translated = False
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
# for translated errors we only return the message
|
|
||||||
if self.translated:
|
|
||||||
return t.cast(str, self.message)
|
|
||||||
|
|
||||||
# otherwise attach some stuff
|
|
||||||
location = f"line {self.lineno}"
|
|
||||||
name = self.filename or self.name
|
|
||||||
if name:
|
|
||||||
location = f'File "{name}", {location}'
|
|
||||||
lines = [t.cast(str, self.message), " " + location]
|
|
||||||
|
|
||||||
# if the source is set, add the line to the output
|
|
||||||
if self.source is not None:
|
|
||||||
try:
|
|
||||||
line = self.source.splitlines()[self.lineno - 1]
|
|
||||||
except IndexError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
lines.append(" " + line.strip())
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
def __reduce__(self): # type: ignore
|
|
||||||
# https://bugs.python.org/issue1692335 Exceptions that take
|
|
||||||
# multiple required arguments have problems with pickling.
|
|
||||||
# Without this, raises TypeError: __init__() missing 1 required
|
|
||||||
# positional argument: 'lineno'
|
|
||||||
return self.__class__, (self.message, self.lineno, self.name, self.filename)
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateAssertionError(TemplateSyntaxError):
|
|
||||||
"""Like a template syntax error, but covers cases where something in the
|
|
||||||
template caused an error at compile time that wasn't necessarily caused
|
|
||||||
by a syntax error. However it's a direct subclass of
|
|
||||||
:exc:`TemplateSyntaxError` and has the same attributes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateRuntimeError(TemplateError):
|
|
||||||
"""A generic runtime error in the template engine. Under some situations
|
|
||||||
Jinja may raise this exception.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class UndefinedError(TemplateRuntimeError):
|
|
||||||
"""Raised if a template tries to operate on :class:`Undefined`."""
|
|
||||||
|
|
||||||
|
|
||||||
class SecurityError(TemplateRuntimeError):
|
|
||||||
"""Raised if a template tries to do something insecure if the
|
|
||||||
sandbox is enabled.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class FilterArgumentError(TemplateRuntimeError):
|
|
||||||
"""This error is raised if a filter was called with inappropriate
|
|
||||||
arguments
|
|
||||||
"""
|
|
||||||
@ -1,859 +0,0 @@
|
|||||||
"""Extension API for adding custom tags and behavior."""
|
|
||||||
import pprint
|
|
||||||
import re
|
|
||||||
import typing as t
|
|
||||||
|
|
||||||
from markupsafe import Markup
|
|
||||||
|
|
||||||
from . import defaults
|
|
||||||
from . import nodes
|
|
||||||
from .environment import Environment
|
|
||||||
from .exceptions import TemplateAssertionError
|
|
||||||
from .exceptions import TemplateSyntaxError
|
|
||||||
from .runtime import concat # type: ignore
|
|
||||||
from .runtime import Context
|
|
||||||
from .runtime import Undefined
|
|
||||||
from .utils import import_string
|
|
||||||
from .utils import pass_context
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
from .lexer import Token
|
|
||||||
from .lexer import TokenStream
|
|
||||||
from .parser import Parser
|
|
||||||
|
|
||||||
class _TranslationsBasic(te.Protocol):
|
|
||||||
def gettext(self, message: str) -> str:
|
|
||||||
...
|
|
||||||
|
|
||||||
def ngettext(self, singular: str, plural: str, n: int) -> str:
|
|
||||||
pass
|
|
||||||
|
|
||||||
class _TranslationsContext(_TranslationsBasic):
|
|
||||||
def pgettext(self, context: str, message: str) -> str:
|
|
||||||
...
|
|
||||||
|
|
||||||
def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
|
|
||||||
...
|
|
||||||
|
|
||||||
_SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
|
|
||||||
|
|
||||||
|
|
||||||
# I18N functions available in Jinja templates. If the I18N library
|
|
||||||
# provides ugettext, it will be assigned to gettext.
|
|
||||||
GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
|
|
||||||
"_",
|
|
||||||
"gettext",
|
|
||||||
"ngettext",
|
|
||||||
"pgettext",
|
|
||||||
"npgettext",
|
|
||||||
)
|
|
||||||
_ws_re = re.compile(r"\s*\n\s*")
|
|
||||||
|
|
||||||
|
|
||||||
class Extension:
|
|
||||||
"""Extensions can be used to add extra functionality to the Jinja template
|
|
||||||
system at the parser level. Custom extensions are bound to an environment
|
|
||||||
but may not store environment specific data on `self`. The reason for
|
|
||||||
this is that an extension can be bound to another environment (for
|
|
||||||
overlays) by creating a copy and reassigning the `environment` attribute.
|
|
||||||
|
|
||||||
As extensions are created by the environment they cannot accept any
|
|
||||||
arguments for configuration. One may want to work around that by using
|
|
||||||
a factory function, but that is not possible as extensions are identified
|
|
||||||
by their import name. The correct way to configure the extension is
|
|
||||||
storing the configuration values on the environment. Because this way the
|
|
||||||
environment ends up acting as central configuration storage the
|
|
||||||
attributes may clash which is why extensions have to ensure that the names
|
|
||||||
they choose for configuration are not too generic. ``prefix`` for example
|
|
||||||
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
|
|
||||||
name as includes the name of the extension (fragment cache).
|
|
||||||
"""
|
|
||||||
|
|
||||||
identifier: t.ClassVar[str]
|
|
||||||
|
|
||||||
def __init_subclass__(cls) -> None:
|
|
||||||
cls.identifier = f"{cls.__module__}.{cls.__name__}"
|
|
||||||
|
|
||||||
#: if this extension parses this is the list of tags it's listening to.
|
|
||||||
tags: t.Set[str] = set()
|
|
||||||
|
|
||||||
#: the priority of that extension. This is especially useful for
|
|
||||||
#: extensions that preprocess values. A lower value means higher
|
|
||||||
#: priority.
|
|
||||||
#:
|
|
||||||
#: .. versionadded:: 2.4
|
|
||||||
priority = 100
|
|
||||||
|
|
||||||
def __init__(self, environment: Environment) -> None:
|
|
||||||
self.environment = environment
|
|
||||||
|
|
||||||
def bind(self, environment: Environment) -> "Extension":
|
|
||||||
"""Create a copy of this extension bound to another environment."""
|
|
||||||
rv = object.__new__(self.__class__)
|
|
||||||
rv.__dict__.update(self.__dict__)
|
|
||||||
rv.environment = environment
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def preprocess(
|
|
||||||
self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
|
|
||||||
) -> str:
|
|
||||||
"""This method is called before the actual lexing and can be used to
|
|
||||||
preprocess the source. The `filename` is optional. The return value
|
|
||||||
must be the preprocessed source.
|
|
||||||
"""
|
|
||||||
return source
|
|
||||||
|
|
||||||
def filter_stream(
|
|
||||||
self, stream: "TokenStream"
|
|
||||||
) -> t.Union["TokenStream", t.Iterable["Token"]]:
|
|
||||||
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
|
|
||||||
to filter tokens returned. This method has to return an iterable of
|
|
||||||
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
|
|
||||||
:class:`~jinja2.lexer.TokenStream`.
|
|
||||||
"""
|
|
||||||
return stream
|
|
||||||
|
|
||||||
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
|
|
||||||
"""If any of the :attr:`tags` matched this method is called with the
|
|
||||||
parser as first argument. The token the parser stream is pointing at
|
|
||||||
is the name token that matched. This method has to return one or a
|
|
||||||
list of multiple nodes.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def attr(
|
|
||||||
self, name: str, lineno: t.Optional[int] = None
|
|
||||||
) -> nodes.ExtensionAttribute:
|
|
||||||
"""Return an attribute node for the current extension. This is useful
|
|
||||||
to pass constants on extensions to generated template code.
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
self.attr('_my_attribute', lineno=lineno)
|
|
||||||
"""
|
|
||||||
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
|
|
||||||
|
|
||||||
def call_method(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
args: t.Optional[t.List[nodes.Expr]] = None,
|
|
||||||
kwargs: t.Optional[t.List[nodes.Keyword]] = None,
|
|
||||||
dyn_args: t.Optional[nodes.Expr] = None,
|
|
||||||
dyn_kwargs: t.Optional[nodes.Expr] = None,
|
|
||||||
lineno: t.Optional[int] = None,
|
|
||||||
) -> nodes.Call:
|
|
||||||
"""Call a method of the extension. This is a shortcut for
|
|
||||||
:meth:`attr` + :class:`jinja2.nodes.Call`.
|
|
||||||
"""
|
|
||||||
if args is None:
|
|
||||||
args = []
|
|
||||||
if kwargs is None:
|
|
||||||
kwargs = []
|
|
||||||
return nodes.Call(
|
|
||||||
self.attr(name, lineno=lineno),
|
|
||||||
args,
|
|
||||||
kwargs,
|
|
||||||
dyn_args,
|
|
||||||
dyn_kwargs,
|
|
||||||
lineno=lineno,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pass_context
|
|
||||||
def _gettext_alias(
|
|
||||||
__context: Context, *args: t.Any, **kwargs: t.Any
|
|
||||||
) -> t.Union[t.Any, Undefined]:
|
|
||||||
return __context.call(__context.resolve("gettext"), *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
|
|
||||||
@pass_context
|
|
||||||
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
|
|
||||||
rv = __context.call(func, __string)
|
|
||||||
if __context.eval_ctx.autoescape:
|
|
||||||
rv = Markup(rv)
|
|
||||||
# Always treat as a format string, even if there are no
|
|
||||||
# variables. This makes translation strings more consistent
|
|
||||||
# and predictable. This requires escaping
|
|
||||||
return rv % variables # type: ignore
|
|
||||||
|
|
||||||
return gettext
|
|
||||||
|
|
||||||
|
|
||||||
def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
|
|
||||||
@pass_context
|
|
||||||
def ngettext(
|
|
||||||
__context: Context,
|
|
||||||
__singular: str,
|
|
||||||
__plural: str,
|
|
||||||
__num: int,
|
|
||||||
**variables: t.Any,
|
|
||||||
) -> str:
|
|
||||||
variables.setdefault("num", __num)
|
|
||||||
rv = __context.call(func, __singular, __plural, __num)
|
|
||||||
if __context.eval_ctx.autoescape:
|
|
||||||
rv = Markup(rv)
|
|
||||||
# Always treat as a format string, see gettext comment above.
|
|
||||||
return rv % variables # type: ignore
|
|
||||||
|
|
||||||
return ngettext
|
|
||||||
|
|
||||||
|
|
||||||
def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
|
|
||||||
@pass_context
|
|
||||||
def pgettext(
|
|
||||||
__context: Context, __string_ctx: str, __string: str, **variables: t.Any
|
|
||||||
) -> str:
|
|
||||||
variables.setdefault("context", __string_ctx)
|
|
||||||
rv = __context.call(func, __string_ctx, __string)
|
|
||||||
|
|
||||||
if __context.eval_ctx.autoescape:
|
|
||||||
rv = Markup(rv)
|
|
||||||
|
|
||||||
# Always treat as a format string, see gettext comment above.
|
|
||||||
return rv % variables # type: ignore
|
|
||||||
|
|
||||||
return pgettext
|
|
||||||
|
|
||||||
|
|
||||||
def _make_new_npgettext(
|
|
||||||
func: t.Callable[[str, str, str, int], str]
|
|
||||||
) -> t.Callable[..., str]:
|
|
||||||
@pass_context
|
|
||||||
def npgettext(
|
|
||||||
__context: Context,
|
|
||||||
__string_ctx: str,
|
|
||||||
__singular: str,
|
|
||||||
__plural: str,
|
|
||||||
__num: int,
|
|
||||||
**variables: t.Any,
|
|
||||||
) -> str:
|
|
||||||
variables.setdefault("context", __string_ctx)
|
|
||||||
variables.setdefault("num", __num)
|
|
||||||
rv = __context.call(func, __string_ctx, __singular, __plural, __num)
|
|
||||||
|
|
||||||
if __context.eval_ctx.autoescape:
|
|
||||||
rv = Markup(rv)
|
|
||||||
|
|
||||||
# Always treat as a format string, see gettext comment above.
|
|
||||||
return rv % variables # type: ignore
|
|
||||||
|
|
||||||
return npgettext
|
|
||||||
|
|
||||||
|
|
||||||
class InternationalizationExtension(Extension):
|
|
||||||
"""This extension adds gettext support to Jinja."""
|
|
||||||
|
|
||||||
tags = {"trans"}
|
|
||||||
|
|
||||||
# TODO: the i18n extension is currently reevaluating values in a few
|
|
||||||
# situations. Take this example:
|
|
||||||
# {% trans count=something() %}{{ count }} foo{% pluralize
|
|
||||||
# %}{{ count }} fooss{% endtrans %}
|
|
||||||
# something is called twice here. One time for the gettext value and
|
|
||||||
# the other time for the n-parameter of the ngettext function.
|
|
||||||
|
|
||||||
def __init__(self, environment: Environment) -> None:
|
|
||||||
super().__init__(environment)
|
|
||||||
environment.globals["_"] = _gettext_alias
|
|
||||||
environment.extend(
|
|
||||||
install_gettext_translations=self._install,
|
|
||||||
install_null_translations=self._install_null,
|
|
||||||
install_gettext_callables=self._install_callables,
|
|
||||||
uninstall_gettext_translations=self._uninstall,
|
|
||||||
extract_translations=self._extract,
|
|
||||||
newstyle_gettext=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _install(
|
|
||||||
self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
|
|
||||||
) -> None:
|
|
||||||
# ugettext and ungettext are preferred in case the I18N library
|
|
||||||
# is providing compatibility with older Python versions.
|
|
||||||
gettext = getattr(translations, "ugettext", None)
|
|
||||||
if gettext is None:
|
|
||||||
gettext = translations.gettext
|
|
||||||
ngettext = getattr(translations, "ungettext", None)
|
|
||||||
if ngettext is None:
|
|
||||||
ngettext = translations.ngettext
|
|
||||||
|
|
||||||
pgettext = getattr(translations, "pgettext", None)
|
|
||||||
npgettext = getattr(translations, "npgettext", None)
|
|
||||||
self._install_callables(
|
|
||||||
gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
|
|
||||||
)
|
|
||||||
|
|
||||||
def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
|
|
||||||
import gettext
|
|
||||||
|
|
||||||
translations = gettext.NullTranslations()
|
|
||||||
|
|
||||||
if hasattr(translations, "pgettext"):
|
|
||||||
# Python < 3.8
|
|
||||||
pgettext = translations.pgettext # type: ignore
|
|
||||||
else:
|
|
||||||
|
|
||||||
def pgettext(c: str, s: str) -> str:
|
|
||||||
return s
|
|
||||||
|
|
||||||
if hasattr(translations, "npgettext"):
|
|
||||||
npgettext = translations.npgettext # type: ignore
|
|
||||||
else:
|
|
||||||
|
|
||||||
def npgettext(c: str, s: str, p: str, n: int) -> str:
|
|
||||||
return s if n == 1 else p
|
|
||||||
|
|
||||||
self._install_callables(
|
|
||||||
gettext=translations.gettext,
|
|
||||||
ngettext=translations.ngettext,
|
|
||||||
newstyle=newstyle,
|
|
||||||
pgettext=pgettext,
|
|
||||||
npgettext=npgettext,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _install_callables(
|
|
||||||
self,
|
|
||||||
gettext: t.Callable[[str], str],
|
|
||||||
ngettext: t.Callable[[str, str, int], str],
|
|
||||||
newstyle: t.Optional[bool] = None,
|
|
||||||
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
|
|
||||||
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
|
|
||||||
) -> None:
|
|
||||||
if newstyle is not None:
|
|
||||||
self.environment.newstyle_gettext = newstyle # type: ignore
|
|
||||||
if self.environment.newstyle_gettext: # type: ignore
|
|
||||||
gettext = _make_new_gettext(gettext)
|
|
||||||
ngettext = _make_new_ngettext(ngettext)
|
|
||||||
|
|
||||||
if pgettext is not None:
|
|
||||||
pgettext = _make_new_pgettext(pgettext)
|
|
||||||
|
|
||||||
if npgettext is not None:
|
|
||||||
npgettext = _make_new_npgettext(npgettext)
|
|
||||||
|
|
||||||
self.environment.globals.update(
|
|
||||||
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
|
|
||||||
)
|
|
||||||
|
|
||||||
def _uninstall(self, translations: "_SupportedTranslations") -> None:
|
|
||||||
for key in ("gettext", "ngettext", "pgettext", "npgettext"):
|
|
||||||
self.environment.globals.pop(key, None)
|
|
||||||
|
|
||||||
def _extract(
|
|
||||||
self,
|
|
||||||
source: t.Union[str, nodes.Template],
|
|
||||||
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
|
|
||||||
) -> t.Iterator[
|
|
||||||
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
|
|
||||||
]:
|
|
||||||
if isinstance(source, str):
|
|
||||||
source = self.environment.parse(source)
|
|
||||||
return extract_from_ast(source, gettext_functions)
|
|
||||||
|
|
||||||
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
|
|
||||||
"""Parse a translatable tag."""
|
|
||||||
lineno = next(parser.stream).lineno
|
|
||||||
|
|
||||||
context = None
|
|
||||||
context_token = parser.stream.next_if("string")
|
|
||||||
|
|
||||||
if context_token is not None:
|
|
||||||
context = context_token.value
|
|
||||||
|
|
||||||
# find all the variables referenced. Additionally a variable can be
|
|
||||||
# defined in the body of the trans block too, but this is checked at
|
|
||||||
# a later state.
|
|
||||||
plural_expr: t.Optional[nodes.Expr] = None
|
|
||||||
plural_expr_assignment: t.Optional[nodes.Assign] = None
|
|
||||||
num_called_num = False
|
|
||||||
variables: t.Dict[str, nodes.Expr] = {}
|
|
||||||
trimmed = None
|
|
||||||
while parser.stream.current.type != "block_end":
|
|
||||||
if variables:
|
|
||||||
parser.stream.expect("comma")
|
|
||||||
|
|
||||||
# skip colon for python compatibility
|
|
||||||
if parser.stream.skip_if("colon"):
|
|
||||||
break
|
|
||||||
|
|
||||||
token = parser.stream.expect("name")
|
|
||||||
if token.value in variables:
|
|
||||||
parser.fail(
|
|
||||||
f"translatable variable {token.value!r} defined twice.",
|
|
||||||
token.lineno,
|
|
||||||
exc=TemplateAssertionError,
|
|
||||||
)
|
|
||||||
|
|
||||||
# expressions
|
|
||||||
if parser.stream.current.type == "assign":
|
|
||||||
next(parser.stream)
|
|
||||||
variables[token.value] = var = parser.parse_expression()
|
|
||||||
elif trimmed is None and token.value in ("trimmed", "notrimmed"):
|
|
||||||
trimmed = token.value == "trimmed"
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
variables[token.value] = var = nodes.Name(token.value, "load")
|
|
||||||
|
|
||||||
if plural_expr is None:
|
|
||||||
if isinstance(var, nodes.Call):
|
|
||||||
plural_expr = nodes.Name("_trans", "load")
|
|
||||||
variables[token.value] = plural_expr
|
|
||||||
plural_expr_assignment = nodes.Assign(
|
|
||||||
nodes.Name("_trans", "store"), var
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
plural_expr = var
|
|
||||||
num_called_num = token.value == "num"
|
|
||||||
|
|
||||||
parser.stream.expect("block_end")
|
|
||||||
|
|
||||||
plural = None
|
|
||||||
have_plural = False
|
|
||||||
referenced = set()
|
|
||||||
|
|
||||||
# now parse until endtrans or pluralize
|
|
||||||
singular_names, singular = self._parse_block(parser, True)
|
|
||||||
if singular_names:
|
|
||||||
referenced.update(singular_names)
|
|
||||||
if plural_expr is None:
|
|
||||||
plural_expr = nodes.Name(singular_names[0], "load")
|
|
||||||
num_called_num = singular_names[0] == "num"
|
|
||||||
|
|
||||||
# if we have a pluralize block, we parse that too
|
|
||||||
if parser.stream.current.test("name:pluralize"):
|
|
||||||
have_plural = True
|
|
||||||
next(parser.stream)
|
|
||||||
if parser.stream.current.type != "block_end":
|
|
||||||
token = parser.stream.expect("name")
|
|
||||||
if token.value not in variables:
|
|
||||||
parser.fail(
|
|
||||||
f"unknown variable {token.value!r} for pluralization",
|
|
||||||
token.lineno,
|
|
||||||
exc=TemplateAssertionError,
|
|
||||||
)
|
|
||||||
plural_expr = variables[token.value]
|
|
||||||
num_called_num = token.value == "num"
|
|
||||||
parser.stream.expect("block_end")
|
|
||||||
plural_names, plural = self._parse_block(parser, False)
|
|
||||||
next(parser.stream)
|
|
||||||
referenced.update(plural_names)
|
|
||||||
else:
|
|
||||||
next(parser.stream)
|
|
||||||
|
|
||||||
# register free names as simple name expressions
|
|
||||||
for name in referenced:
|
|
||||||
if name not in variables:
|
|
||||||
variables[name] = nodes.Name(name, "load")
|
|
||||||
|
|
||||||
if not have_plural:
|
|
||||||
plural_expr = None
|
|
||||||
elif plural_expr is None:
|
|
||||||
parser.fail("pluralize without variables", lineno)
|
|
||||||
|
|
||||||
if trimmed is None:
|
|
||||||
trimmed = self.environment.policies["ext.i18n.trimmed"]
|
|
||||||
if trimmed:
|
|
||||||
singular = self._trim_whitespace(singular)
|
|
||||||
if plural:
|
|
||||||
plural = self._trim_whitespace(plural)
|
|
||||||
|
|
||||||
node = self._make_node(
|
|
||||||
singular,
|
|
||||||
plural,
|
|
||||||
context,
|
|
||||||
variables,
|
|
||||||
plural_expr,
|
|
||||||
bool(referenced),
|
|
||||||
num_called_num and have_plural,
|
|
||||||
)
|
|
||||||
node.set_lineno(lineno)
|
|
||||||
if plural_expr_assignment is not None:
|
|
||||||
return [plural_expr_assignment, node]
|
|
||||||
else:
|
|
||||||
return node
|
|
||||||
|
|
||||||
def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
|
|
||||||
return _ws_re.sub(" ", string.strip())
|
|
||||||
|
|
||||||
def _parse_block(
|
|
||||||
self, parser: "Parser", allow_pluralize: bool
|
|
||||||
) -> t.Tuple[t.List[str], str]:
|
|
||||||
"""Parse until the next block tag with a given name."""
|
|
||||||
referenced = []
|
|
||||||
buf = []
|
|
||||||
|
|
||||||
while True:
|
|
||||||
if parser.stream.current.type == "data":
|
|
||||||
buf.append(parser.stream.current.value.replace("%", "%%"))
|
|
||||||
next(parser.stream)
|
|
||||||
elif parser.stream.current.type == "variable_begin":
|
|
||||||
next(parser.stream)
|
|
||||||
name = parser.stream.expect("name").value
|
|
||||||
referenced.append(name)
|
|
||||||
buf.append(f"%({name})s")
|
|
||||||
parser.stream.expect("variable_end")
|
|
||||||
elif parser.stream.current.type == "block_begin":
|
|
||||||
next(parser.stream)
|
|
||||||
if parser.stream.current.test("name:endtrans"):
|
|
||||||
break
|
|
||||||
elif parser.stream.current.test("name:pluralize"):
|
|
||||||
if allow_pluralize:
|
|
||||||
break
|
|
||||||
parser.fail(
|
|
||||||
"a translatable section can have only one pluralize section"
|
|
||||||
)
|
|
||||||
parser.fail(
|
|
||||||
"control structures in translatable sections are not allowed"
|
|
||||||
)
|
|
||||||
elif parser.stream.eos:
|
|
||||||
parser.fail("unclosed translation block")
|
|
||||||
else:
|
|
||||||
raise RuntimeError("internal parser error")
|
|
||||||
|
|
||||||
return referenced, concat(buf)
|
|
||||||
|
|
||||||
def _make_node(
|
|
||||||
self,
|
|
||||||
singular: str,
|
|
||||||
plural: t.Optional[str],
|
|
||||||
context: t.Optional[str],
|
|
||||||
variables: t.Dict[str, nodes.Expr],
|
|
||||||
plural_expr: t.Optional[nodes.Expr],
|
|
||||||
vars_referenced: bool,
|
|
||||||
num_called_num: bool,
|
|
||||||
) -> nodes.Output:
|
|
||||||
"""Generates a useful node from the data provided."""
|
|
||||||
newstyle = self.environment.newstyle_gettext # type: ignore
|
|
||||||
node: nodes.Expr
|
|
||||||
|
|
||||||
# no variables referenced? no need to escape for old style
|
|
||||||
# gettext invocations only if there are vars.
|
|
||||||
if not vars_referenced and not newstyle:
|
|
||||||
singular = singular.replace("%%", "%")
|
|
||||||
if plural:
|
|
||||||
plural = plural.replace("%%", "%")
|
|
||||||
|
|
||||||
func_name = "gettext"
|
|
||||||
func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
|
|
||||||
|
|
||||||
if context is not None:
|
|
||||||
func_args.insert(0, nodes.Const(context))
|
|
||||||
func_name = f"p{func_name}"
|
|
||||||
|
|
||||||
if plural_expr is not None:
|
|
||||||
func_name = f"n{func_name}"
|
|
||||||
func_args.extend((nodes.Const(plural), plural_expr))
|
|
||||||
|
|
||||||
node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
|
|
||||||
|
|
||||||
# in case newstyle gettext is used, the method is powerful
|
|
||||||
# enough to handle the variable expansion and autoescape
|
|
||||||
# handling itself
|
|
||||||
if newstyle:
|
|
||||||
for key, value in variables.items():
|
|
||||||
# the function adds that later anyways in case num was
|
|
||||||
# called num, so just skip it.
|
|
||||||
if num_called_num and key == "num":
|
|
||||||
continue
|
|
||||||
node.kwargs.append(nodes.Keyword(key, value))
|
|
||||||
|
|
||||||
# otherwise do that here
|
|
||||||
else:
|
|
||||||
# mark the return value as safe if we are in an
|
|
||||||
# environment with autoescaping turned on
|
|
||||||
node = nodes.MarkSafeIfAutoescape(node)
|
|
||||||
if variables:
|
|
||||||
node = nodes.Mod(
|
|
||||||
node,
|
|
||||||
nodes.Dict(
|
|
||||||
[
|
|
||||||
nodes.Pair(nodes.Const(key), value)
|
|
||||||
for key, value in variables.items()
|
|
||||||
]
|
|
||||||
),
|
|
||||||
)
|
|
||||||
return nodes.Output([node])
|
|
||||||
|
|
||||||
|
|
||||||
class ExprStmtExtension(Extension):
|
|
||||||
"""Adds a `do` tag to Jinja that works like the print statement just
|
|
||||||
that it doesn't print the return value.
|
|
||||||
"""
|
|
||||||
|
|
||||||
tags = {"do"}
|
|
||||||
|
|
||||||
def parse(self, parser: "Parser") -> nodes.ExprStmt:
|
|
||||||
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
|
|
||||||
node.node = parser.parse_tuple()
|
|
||||||
return node
|
|
||||||
|
|
||||||
|
|
||||||
class LoopControlExtension(Extension):
|
|
||||||
"""Adds break and continue to the template engine."""
|
|
||||||
|
|
||||||
tags = {"break", "continue"}
|
|
||||||
|
|
||||||
def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
|
|
||||||
token = next(parser.stream)
|
|
||||||
if token.value == "break":
|
|
||||||
return nodes.Break(lineno=token.lineno)
|
|
||||||
return nodes.Continue(lineno=token.lineno)
|
|
||||||
|
|
||||||
|
|
||||||
class DebugExtension(Extension):
|
|
||||||
"""A ``{% debug %}`` tag that dumps the available variables,
|
|
||||||
filters, and tests.
|
|
||||||
|
|
||||||
.. code-block:: html+jinja
|
|
||||||
|
|
||||||
<pre>{% debug %}</pre>
|
|
||||||
|
|
||||||
.. code-block:: text
|
|
||||||
|
|
||||||
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
|
|
||||||
...,
|
|
||||||
'namespace': <class 'jinja2.utils.Namespace'>},
|
|
||||||
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
|
|
||||||
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
|
|
||||||
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
|
|
||||||
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
|
|
||||||
|
|
||||||
.. versionadded:: 2.11.0
|
|
||||||
"""
|
|
||||||
|
|
||||||
tags = {"debug"}
|
|
||||||
|
|
||||||
def parse(self, parser: "Parser") -> nodes.Output:
|
|
||||||
lineno = parser.stream.expect("name:debug").lineno
|
|
||||||
context = nodes.ContextReference()
|
|
||||||
result = self.call_method("_render", [context], lineno=lineno)
|
|
||||||
return nodes.Output([result], lineno=lineno)
|
|
||||||
|
|
||||||
def _render(self, context: Context) -> str:
|
|
||||||
result = {
|
|
||||||
"context": context.get_all(),
|
|
||||||
"filters": sorted(self.environment.filters.keys()),
|
|
||||||
"tests": sorted(self.environment.tests.keys()),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set the depth since the intent is to show the top few names.
|
|
||||||
return pprint.pformat(result, depth=3, compact=True)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_from_ast(
|
|
||||||
ast: nodes.Template,
|
|
||||||
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
|
|
||||||
babel_style: bool = True,
|
|
||||||
) -> t.Iterator[
|
|
||||||
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
|
|
||||||
]:
|
|
||||||
"""Extract localizable strings from the given template node. Per
|
|
||||||
default this function returns matches in babel style that means non string
|
|
||||||
parameters as well as keyword arguments are returned as `None`. This
|
|
||||||
allows Babel to figure out what you really meant if you are using
|
|
||||||
gettext functions that allow keyword arguments for placeholder expansion.
|
|
||||||
If you don't want that behavior set the `babel_style` parameter to `False`
|
|
||||||
which causes only strings to be returned and parameters are always stored
|
|
||||||
in tuples. As a consequence invalid gettext calls (calls without a single
|
|
||||||
string parameter or string parameters after non-string parameters) are
|
|
||||||
skipped.
|
|
||||||
|
|
||||||
This example explains the behavior:
|
|
||||||
|
|
||||||
>>> from jinja2 import Environment
|
|
||||||
>>> env = Environment()
|
|
||||||
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
|
|
||||||
>>> list(extract_from_ast(node))
|
|
||||||
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
|
|
||||||
>>> list(extract_from_ast(node, babel_style=False))
|
|
||||||
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
|
|
||||||
|
|
||||||
For every string found this function yields a ``(lineno, function,
|
|
||||||
message)`` tuple, where:
|
|
||||||
|
|
||||||
* ``lineno`` is the number of the line on which the string was found,
|
|
||||||
* ``function`` is the name of the ``gettext`` function used (if the
|
|
||||||
string was extracted from embedded Python code), and
|
|
||||||
* ``message`` is the string, or a tuple of strings for functions
|
|
||||||
with multiple string arguments.
|
|
||||||
|
|
||||||
This extraction function operates on the AST and is because of that unable
|
|
||||||
to extract any comments. For comment support you have to use the babel
|
|
||||||
extraction interface or extract comments yourself.
|
|
||||||
"""
|
|
||||||
out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
|
|
||||||
|
|
||||||
for node in ast.find_all(nodes.Call):
|
|
||||||
if (
|
|
||||||
not isinstance(node.node, nodes.Name)
|
|
||||||
or node.node.name not in gettext_functions
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
strings: t.List[t.Optional[str]] = []
|
|
||||||
|
|
||||||
for arg in node.args:
|
|
||||||
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
|
|
||||||
strings.append(arg.value)
|
|
||||||
else:
|
|
||||||
strings.append(None)
|
|
||||||
|
|
||||||
for _ in node.kwargs:
|
|
||||||
strings.append(None)
|
|
||||||
if node.dyn_args is not None:
|
|
||||||
strings.append(None)
|
|
||||||
if node.dyn_kwargs is not None:
|
|
||||||
strings.append(None)
|
|
||||||
|
|
||||||
if not babel_style:
|
|
||||||
out = tuple(x for x in strings if x is not None)
|
|
||||||
|
|
||||||
if not out:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if len(strings) == 1:
|
|
||||||
out = strings[0]
|
|
||||||
else:
|
|
||||||
out = tuple(strings)
|
|
||||||
|
|
||||||
yield node.lineno, node.node.name, out
|
|
||||||
|
|
||||||
|
|
||||||
class _CommentFinder:
|
|
||||||
"""Helper class to find comments in a token stream. Can only
|
|
||||||
find comments for gettext calls forwards. Once the comment
|
|
||||||
from line 4 is found, a comment for line 1 will not return a
|
|
||||||
usable value.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
|
|
||||||
) -> None:
|
|
||||||
self.tokens = tokens
|
|
||||||
self.comment_tags = comment_tags
|
|
||||||
self.offset = 0
|
|
||||||
self.last_lineno = 0
|
|
||||||
|
|
||||||
def find_backwards(self, offset: int) -> t.List[str]:
|
|
||||||
try:
|
|
||||||
for _, token_type, token_value in reversed(
|
|
||||||
self.tokens[self.offset : offset]
|
|
||||||
):
|
|
||||||
if token_type in ("comment", "linecomment"):
|
|
||||||
try:
|
|
||||||
prefix, comment = token_value.split(None, 1)
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
if prefix in self.comment_tags:
|
|
||||||
return [comment.rstrip()]
|
|
||||||
return []
|
|
||||||
finally:
|
|
||||||
self.offset = offset
|
|
||||||
|
|
||||||
def find_comments(self, lineno: int) -> t.List[str]:
|
|
||||||
if not self.comment_tags or self.last_lineno > lineno:
|
|
||||||
return []
|
|
||||||
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
|
|
||||||
if token_lineno > lineno:
|
|
||||||
return self.find_backwards(self.offset + idx)
|
|
||||||
return self.find_backwards(len(self.tokens))
|
|
||||||
|
|
||||||
|
|
||||||
def babel_extract(
|
|
||||||
fileobj: t.BinaryIO,
|
|
||||||
keywords: t.Sequence[str],
|
|
||||||
comment_tags: t.Sequence[str],
|
|
||||||
options: t.Dict[str, t.Any],
|
|
||||||
) -> t.Iterator[
|
|
||||||
t.Tuple[
|
|
||||||
int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
|
|
||||||
]
|
|
||||||
]:
|
|
||||||
"""Babel extraction method for Jinja templates.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.3
|
|
||||||
Basic support for translation comments was added. If `comment_tags`
|
|
||||||
is now set to a list of keywords for extraction, the extractor will
|
|
||||||
try to find the best preceding comment that begins with one of the
|
|
||||||
keywords. For best results, make sure to not have more than one
|
|
||||||
gettext call in one line of code and the matching comment in the
|
|
||||||
same line or the line before.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.5.1
|
|
||||||
The `newstyle_gettext` flag can be set to `True` to enable newstyle
|
|
||||||
gettext calls.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.7
|
|
||||||
A `silent` option can now be provided. If set to `False` template
|
|
||||||
syntax errors are propagated instead of being ignored.
|
|
||||||
|
|
||||||
:param fileobj: the file-like object the messages should be extracted from
|
|
||||||
:param keywords: a list of keywords (i.e. function names) that should be
|
|
||||||
recognized as translation functions
|
|
||||||
:param comment_tags: a list of translator tags to search for and include
|
|
||||||
in the results.
|
|
||||||
:param options: a dictionary of additional options (optional)
|
|
||||||
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
|
|
||||||
(comments will be empty currently)
|
|
||||||
"""
|
|
||||||
extensions: t.Dict[t.Type[Extension], None] = {}
|
|
||||||
|
|
||||||
for extension_name in options.get("extensions", "").split(","):
|
|
||||||
extension_name = extension_name.strip()
|
|
||||||
|
|
||||||
if not extension_name:
|
|
||||||
continue
|
|
||||||
|
|
||||||
extensions[import_string(extension_name)] = None
|
|
||||||
|
|
||||||
if InternationalizationExtension not in extensions:
|
|
||||||
extensions[InternationalizationExtension] = None
|
|
||||||
|
|
||||||
def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
|
|
||||||
return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
|
|
||||||
|
|
||||||
silent = getbool(options, "silent", True)
|
|
||||||
environment = Environment(
|
|
||||||
options.get("block_start_string", defaults.BLOCK_START_STRING),
|
|
||||||
options.get("block_end_string", defaults.BLOCK_END_STRING),
|
|
||||||
options.get("variable_start_string", defaults.VARIABLE_START_STRING),
|
|
||||||
options.get("variable_end_string", defaults.VARIABLE_END_STRING),
|
|
||||||
options.get("comment_start_string", defaults.COMMENT_START_STRING),
|
|
||||||
options.get("comment_end_string", defaults.COMMENT_END_STRING),
|
|
||||||
options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
|
|
||||||
options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
|
|
||||||
getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
|
|
||||||
getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
|
|
||||||
defaults.NEWLINE_SEQUENCE,
|
|
||||||
getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
|
|
||||||
tuple(extensions),
|
|
||||||
cache_size=0,
|
|
||||||
auto_reload=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
if getbool(options, "trimmed"):
|
|
||||||
environment.policies["ext.i18n.trimmed"] = True
|
|
||||||
if getbool(options, "newstyle_gettext"):
|
|
||||||
environment.newstyle_gettext = True # type: ignore
|
|
||||||
|
|
||||||
source = fileobj.read().decode(options.get("encoding", "utf-8"))
|
|
||||||
try:
|
|
||||||
node = environment.parse(source)
|
|
||||||
tokens = list(environment.lex(environment.preprocess(source)))
|
|
||||||
except TemplateSyntaxError:
|
|
||||||
if not silent:
|
|
||||||
raise
|
|
||||||
# skip templates with syntax errors
|
|
||||||
return
|
|
||||||
|
|
||||||
finder = _CommentFinder(tokens, comment_tags)
|
|
||||||
for lineno, func, message in extract_from_ast(node, keywords):
|
|
||||||
yield lineno, func, message, finder.find_comments(lineno)
|
|
||||||
|
|
||||||
|
|
||||||
#: nicer import names
|
|
||||||
i18n = InternationalizationExtension
|
|
||||||
do = ExprStmtExtension
|
|
||||||
loopcontrols = LoopControlExtension
|
|
||||||
debug = DebugExtension
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,318 +0,0 @@
|
|||||||
import typing as t
|
|
||||||
|
|
||||||
from . import nodes
|
|
||||||
from .visitor import NodeVisitor
|
|
||||||
|
|
||||||
VAR_LOAD_PARAMETER = "param"
|
|
||||||
VAR_LOAD_RESOLVE = "resolve"
|
|
||||||
VAR_LOAD_ALIAS = "alias"
|
|
||||||
VAR_LOAD_UNDEFINED = "undefined"
|
|
||||||
|
|
||||||
|
|
||||||
def find_symbols(
|
|
||||||
nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
|
|
||||||
) -> "Symbols":
|
|
||||||
sym = Symbols(parent=parent_symbols)
|
|
||||||
visitor = FrameSymbolVisitor(sym)
|
|
||||||
for node in nodes:
|
|
||||||
visitor.visit(node)
|
|
||||||
return sym
|
|
||||||
|
|
||||||
|
|
||||||
def symbols_for_node(
|
|
||||||
node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
|
|
||||||
) -> "Symbols":
|
|
||||||
sym = Symbols(parent=parent_symbols)
|
|
||||||
sym.analyze_node(node)
|
|
||||||
return sym
|
|
||||||
|
|
||||||
|
|
||||||
class Symbols:
|
|
||||||
def __init__(
|
|
||||||
self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
|
|
||||||
) -> None:
|
|
||||||
if level is None:
|
|
||||||
if parent is None:
|
|
||||||
level = 0
|
|
||||||
else:
|
|
||||||
level = parent.level + 1
|
|
||||||
|
|
||||||
self.level: int = level
|
|
||||||
self.parent = parent
|
|
||||||
self.refs: t.Dict[str, str] = {}
|
|
||||||
self.loads: t.Dict[str, t.Any] = {}
|
|
||||||
self.stores: t.Set[str] = set()
|
|
||||||
|
|
||||||
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
|
|
||||||
visitor = RootVisitor(self)
|
|
||||||
visitor.visit(node, **kwargs)
|
|
||||||
|
|
||||||
def _define_ref(
|
|
||||||
self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
|
|
||||||
) -> str:
|
|
||||||
ident = f"l_{self.level}_{name}"
|
|
||||||
self.refs[name] = ident
|
|
||||||
if load is not None:
|
|
||||||
self.loads[ident] = load
|
|
||||||
return ident
|
|
||||||
|
|
||||||
def find_load(self, target: str) -> t.Optional[t.Any]:
|
|
||||||
if target in self.loads:
|
|
||||||
return self.loads[target]
|
|
||||||
|
|
||||||
if self.parent is not None:
|
|
||||||
return self.parent.find_load(target)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def find_ref(self, name: str) -> t.Optional[str]:
|
|
||||||
if name in self.refs:
|
|
||||||
return self.refs[name]
|
|
||||||
|
|
||||||
if self.parent is not None:
|
|
||||||
return self.parent.find_ref(name)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def ref(self, name: str) -> str:
|
|
||||||
rv = self.find_ref(name)
|
|
||||||
if rv is None:
|
|
||||||
raise AssertionError(
|
|
||||||
"Tried to resolve a name to a reference that was"
|
|
||||||
f" unknown to the frame ({name!r})"
|
|
||||||
)
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def copy(self) -> "Symbols":
|
|
||||||
rv = object.__new__(self.__class__)
|
|
||||||
rv.__dict__.update(self.__dict__)
|
|
||||||
rv.refs = self.refs.copy()
|
|
||||||
rv.loads = self.loads.copy()
|
|
||||||
rv.stores = self.stores.copy()
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def store(self, name: str) -> None:
|
|
||||||
self.stores.add(name)
|
|
||||||
|
|
||||||
# If we have not see the name referenced yet, we need to figure
|
|
||||||
# out what to set it to.
|
|
||||||
if name not in self.refs:
|
|
||||||
# If there is a parent scope we check if the name has a
|
|
||||||
# reference there. If it does it means we might have to alias
|
|
||||||
# to a variable there.
|
|
||||||
if self.parent is not None:
|
|
||||||
outer_ref = self.parent.find_ref(name)
|
|
||||||
if outer_ref is not None:
|
|
||||||
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Otherwise we can just set it to undefined.
|
|
||||||
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
|
|
||||||
|
|
||||||
def declare_parameter(self, name: str) -> str:
|
|
||||||
self.stores.add(name)
|
|
||||||
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
|
|
||||||
|
|
||||||
def load(self, name: str) -> None:
|
|
||||||
if self.find_ref(name) is None:
|
|
||||||
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
|
|
||||||
|
|
||||||
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
|
|
||||||
stores: t.Dict[str, int] = {}
|
|
||||||
for branch in branch_symbols:
|
|
||||||
for target in branch.stores:
|
|
||||||
if target in self.stores:
|
|
||||||
continue
|
|
||||||
stores[target] = stores.get(target, 0) + 1
|
|
||||||
|
|
||||||
for sym in branch_symbols:
|
|
||||||
self.refs.update(sym.refs)
|
|
||||||
self.loads.update(sym.loads)
|
|
||||||
self.stores.update(sym.stores)
|
|
||||||
|
|
||||||
for name, branch_count in stores.items():
|
|
||||||
if branch_count == len(branch_symbols):
|
|
||||||
continue
|
|
||||||
|
|
||||||
target = self.find_ref(name) # type: ignore
|
|
||||||
assert target is not None, "should not happen"
|
|
||||||
|
|
||||||
if self.parent is not None:
|
|
||||||
outer_target = self.parent.find_ref(name)
|
|
||||||
if outer_target is not None:
|
|
||||||
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
|
|
||||||
continue
|
|
||||||
self.loads[target] = (VAR_LOAD_RESOLVE, name)
|
|
||||||
|
|
||||||
def dump_stores(self) -> t.Dict[str, str]:
|
|
||||||
rv: t.Dict[str, str] = {}
|
|
||||||
node: t.Optional["Symbols"] = self
|
|
||||||
|
|
||||||
while node is not None:
|
|
||||||
for name in sorted(node.stores):
|
|
||||||
if name not in rv:
|
|
||||||
rv[name] = self.find_ref(name) # type: ignore
|
|
||||||
|
|
||||||
node = node.parent
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def dump_param_targets(self) -> t.Set[str]:
|
|
||||||
rv = set()
|
|
||||||
node: t.Optional["Symbols"] = self
|
|
||||||
|
|
||||||
while node is not None:
|
|
||||||
for target, (instr, _) in self.loads.items():
|
|
||||||
if instr == VAR_LOAD_PARAMETER:
|
|
||||||
rv.add(target)
|
|
||||||
|
|
||||||
node = node.parent
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
class RootVisitor(NodeVisitor):
|
|
||||||
def __init__(self, symbols: "Symbols") -> None:
|
|
||||||
self.sym_visitor = FrameSymbolVisitor(symbols)
|
|
||||||
|
|
||||||
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
|
|
||||||
for child in node.iter_child_nodes():
|
|
||||||
self.sym_visitor.visit(child)
|
|
||||||
|
|
||||||
visit_Template = _simple_visit
|
|
||||||
visit_Block = _simple_visit
|
|
||||||
visit_Macro = _simple_visit
|
|
||||||
visit_FilterBlock = _simple_visit
|
|
||||||
visit_Scope = _simple_visit
|
|
||||||
visit_If = _simple_visit
|
|
||||||
visit_ScopedEvalContextModifier = _simple_visit
|
|
||||||
|
|
||||||
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
|
|
||||||
for child in node.body:
|
|
||||||
self.sym_visitor.visit(child)
|
|
||||||
|
|
||||||
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
|
|
||||||
for child in node.iter_child_nodes(exclude=("call",)):
|
|
||||||
self.sym_visitor.visit(child)
|
|
||||||
|
|
||||||
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
|
|
||||||
for child in node.body:
|
|
||||||
self.sym_visitor.visit(child)
|
|
||||||
|
|
||||||
def visit_For(
|
|
||||||
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
|
|
||||||
) -> None:
|
|
||||||
if for_branch == "body":
|
|
||||||
self.sym_visitor.visit(node.target, store_as_param=True)
|
|
||||||
branch = node.body
|
|
||||||
elif for_branch == "else":
|
|
||||||
branch = node.else_
|
|
||||||
elif for_branch == "test":
|
|
||||||
self.sym_visitor.visit(node.target, store_as_param=True)
|
|
||||||
if node.test is not None:
|
|
||||||
self.sym_visitor.visit(node.test)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise RuntimeError("Unknown for branch")
|
|
||||||
|
|
||||||
if branch:
|
|
||||||
for item in branch:
|
|
||||||
self.sym_visitor.visit(item)
|
|
||||||
|
|
||||||
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
|
|
||||||
for target in node.targets:
|
|
||||||
self.sym_visitor.visit(target)
|
|
||||||
for child in node.body:
|
|
||||||
self.sym_visitor.visit(child)
|
|
||||||
|
|
||||||
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
|
|
||||||
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
|
|
||||||
|
|
||||||
|
|
||||||
class FrameSymbolVisitor(NodeVisitor):
|
|
||||||
"""A visitor for `Frame.inspect`."""
|
|
||||||
|
|
||||||
def __init__(self, symbols: "Symbols") -> None:
|
|
||||||
self.symbols = symbols
|
|
||||||
|
|
||||||
def visit_Name(
|
|
||||||
self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
|
|
||||||
) -> None:
|
|
||||||
"""All assignments to names go through this function."""
|
|
||||||
if store_as_param or node.ctx == "param":
|
|
||||||
self.symbols.declare_parameter(node.name)
|
|
||||||
elif node.ctx == "store":
|
|
||||||
self.symbols.store(node.name)
|
|
||||||
elif node.ctx == "load":
|
|
||||||
self.symbols.load(node.name)
|
|
||||||
|
|
||||||
def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
|
|
||||||
self.symbols.load(node.name)
|
|
||||||
|
|
||||||
def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
|
|
||||||
self.visit(node.test, **kwargs)
|
|
||||||
original_symbols = self.symbols
|
|
||||||
|
|
||||||
def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
|
|
||||||
self.symbols = rv = original_symbols.copy()
|
|
||||||
|
|
||||||
for subnode in nodes:
|
|
||||||
self.visit(subnode, **kwargs)
|
|
||||||
|
|
||||||
self.symbols = original_symbols
|
|
||||||
return rv
|
|
||||||
|
|
||||||
body_symbols = inner_visit(node.body)
|
|
||||||
elif_symbols = inner_visit(node.elif_)
|
|
||||||
else_symbols = inner_visit(node.else_ or ())
|
|
||||||
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
|
|
||||||
|
|
||||||
def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
|
|
||||||
self.symbols.store(node.name)
|
|
||||||
|
|
||||||
def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
|
|
||||||
self.generic_visit(node, **kwargs)
|
|
||||||
self.symbols.store(node.target)
|
|
||||||
|
|
||||||
def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
|
|
||||||
self.generic_visit(node, **kwargs)
|
|
||||||
|
|
||||||
for name in node.names:
|
|
||||||
if isinstance(name, tuple):
|
|
||||||
self.symbols.store(name[1])
|
|
||||||
else:
|
|
||||||
self.symbols.store(name)
|
|
||||||
|
|
||||||
def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
|
|
||||||
"""Visit assignments in the correct order."""
|
|
||||||
self.visit(node.node, **kwargs)
|
|
||||||
self.visit(node.target, **kwargs)
|
|
||||||
|
|
||||||
def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
|
|
||||||
"""Visiting stops at for blocks. However the block sequence
|
|
||||||
is visited as part of the outer scope.
|
|
||||||
"""
|
|
||||||
self.visit(node.iter, **kwargs)
|
|
||||||
|
|
||||||
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
|
|
||||||
self.visit(node.call, **kwargs)
|
|
||||||
|
|
||||||
def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
|
|
||||||
self.visit(node.filter, **kwargs)
|
|
||||||
|
|
||||||
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
|
|
||||||
for target in node.values:
|
|
||||||
self.visit(target)
|
|
||||||
|
|
||||||
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
|
|
||||||
"""Stop visiting at block assigns."""
|
|
||||||
self.visit(node.target, **kwargs)
|
|
||||||
|
|
||||||
def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
|
|
||||||
"""Stop visiting at scopes."""
|
|
||||||
|
|
||||||
def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
|
|
||||||
"""Stop visiting at blocks."""
|
|
||||||
|
|
||||||
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
|
|
||||||
"""Do not visit into overlay scopes."""
|
|
||||||
@ -1,866 +0,0 @@
|
|||||||
"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
|
|
||||||
is used to do some preprocessing. It filters out invalid operators like
|
|
||||||
the bitshift operators we don't allow in templates. It separates
|
|
||||||
template code and python code in expressions.
|
|
||||||
"""
|
|
||||||
import re
|
|
||||||
import typing as t
|
|
||||||
from ast import literal_eval
|
|
||||||
from collections import deque
|
|
||||||
from sys import intern
|
|
||||||
|
|
||||||
from ._identifier import pattern as name_re
|
|
||||||
from .exceptions import TemplateSyntaxError
|
|
||||||
from .utils import LRUCache
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
from .environment import Environment
|
|
||||||
|
|
||||||
# cache for the lexers. Exists in order to be able to have multiple
|
|
||||||
# environments with the same lexer
|
|
||||||
_lexer_cache: t.MutableMapping[t.Tuple, "Lexer"] = LRUCache(50) # type: ignore
|
|
||||||
|
|
||||||
# static regular expressions
|
|
||||||
whitespace_re = re.compile(r"\s+")
|
|
||||||
newline_re = re.compile(r"(\r\n|\r|\n)")
|
|
||||||
string_re = re.compile(
|
|
||||||
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
|
|
||||||
)
|
|
||||||
integer_re = re.compile(
|
|
||||||
r"""
|
|
||||||
(
|
|
||||||
0b(_?[0-1])+ # binary
|
|
||||||
|
|
|
||||||
0o(_?[0-7])+ # octal
|
|
||||||
|
|
|
||||||
0x(_?[\da-f])+ # hex
|
|
||||||
|
|
|
||||||
[1-9](_?\d)* # decimal
|
|
||||||
|
|
|
||||||
0(_?0)* # decimal zero
|
|
||||||
)
|
|
||||||
""",
|
|
||||||
re.IGNORECASE | re.VERBOSE,
|
|
||||||
)
|
|
||||||
float_re = re.compile(
|
|
||||||
r"""
|
|
||||||
(?<!\.) # doesn't start with a .
|
|
||||||
(\d+_)*\d+ # digits, possibly _ separated
|
|
||||||
(
|
|
||||||
(\.(\d+_)*\d+)? # optional fractional part
|
|
||||||
e[+\-]?(\d+_)*\d+ # exponent part
|
|
||||||
|
|
|
||||||
\.(\d+_)*\d+ # required fractional part
|
|
||||||
)
|
|
||||||
""",
|
|
||||||
re.IGNORECASE | re.VERBOSE,
|
|
||||||
)
|
|
||||||
|
|
||||||
# internal the tokens and keep references to them
|
|
||||||
TOKEN_ADD = intern("add")
|
|
||||||
TOKEN_ASSIGN = intern("assign")
|
|
||||||
TOKEN_COLON = intern("colon")
|
|
||||||
TOKEN_COMMA = intern("comma")
|
|
||||||
TOKEN_DIV = intern("div")
|
|
||||||
TOKEN_DOT = intern("dot")
|
|
||||||
TOKEN_EQ = intern("eq")
|
|
||||||
TOKEN_FLOORDIV = intern("floordiv")
|
|
||||||
TOKEN_GT = intern("gt")
|
|
||||||
TOKEN_GTEQ = intern("gteq")
|
|
||||||
TOKEN_LBRACE = intern("lbrace")
|
|
||||||
TOKEN_LBRACKET = intern("lbracket")
|
|
||||||
TOKEN_LPAREN = intern("lparen")
|
|
||||||
TOKEN_LT = intern("lt")
|
|
||||||
TOKEN_LTEQ = intern("lteq")
|
|
||||||
TOKEN_MOD = intern("mod")
|
|
||||||
TOKEN_MUL = intern("mul")
|
|
||||||
TOKEN_NE = intern("ne")
|
|
||||||
TOKEN_PIPE = intern("pipe")
|
|
||||||
TOKEN_POW = intern("pow")
|
|
||||||
TOKEN_RBRACE = intern("rbrace")
|
|
||||||
TOKEN_RBRACKET = intern("rbracket")
|
|
||||||
TOKEN_RPAREN = intern("rparen")
|
|
||||||
TOKEN_SEMICOLON = intern("semicolon")
|
|
||||||
TOKEN_SUB = intern("sub")
|
|
||||||
TOKEN_TILDE = intern("tilde")
|
|
||||||
TOKEN_WHITESPACE = intern("whitespace")
|
|
||||||
TOKEN_FLOAT = intern("float")
|
|
||||||
TOKEN_INTEGER = intern("integer")
|
|
||||||
TOKEN_NAME = intern("name")
|
|
||||||
TOKEN_STRING = intern("string")
|
|
||||||
TOKEN_OPERATOR = intern("operator")
|
|
||||||
TOKEN_BLOCK_BEGIN = intern("block_begin")
|
|
||||||
TOKEN_BLOCK_END = intern("block_end")
|
|
||||||
TOKEN_VARIABLE_BEGIN = intern("variable_begin")
|
|
||||||
TOKEN_VARIABLE_END = intern("variable_end")
|
|
||||||
TOKEN_RAW_BEGIN = intern("raw_begin")
|
|
||||||
TOKEN_RAW_END = intern("raw_end")
|
|
||||||
TOKEN_COMMENT_BEGIN = intern("comment_begin")
|
|
||||||
TOKEN_COMMENT_END = intern("comment_end")
|
|
||||||
TOKEN_COMMENT = intern("comment")
|
|
||||||
TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
|
|
||||||
TOKEN_LINESTATEMENT_END = intern("linestatement_end")
|
|
||||||
TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
|
|
||||||
TOKEN_LINECOMMENT_END = intern("linecomment_end")
|
|
||||||
TOKEN_LINECOMMENT = intern("linecomment")
|
|
||||||
TOKEN_DATA = intern("data")
|
|
||||||
TOKEN_INITIAL = intern("initial")
|
|
||||||
TOKEN_EOF = intern("eof")
|
|
||||||
|
|
||||||
# bind operators to token types
|
|
||||||
operators = {
|
|
||||||
"+": TOKEN_ADD,
|
|
||||||
"-": TOKEN_SUB,
|
|
||||||
"/": TOKEN_DIV,
|
|
||||||
"//": TOKEN_FLOORDIV,
|
|
||||||
"*": TOKEN_MUL,
|
|
||||||
"%": TOKEN_MOD,
|
|
||||||
"**": TOKEN_POW,
|
|
||||||
"~": TOKEN_TILDE,
|
|
||||||
"[": TOKEN_LBRACKET,
|
|
||||||
"]": TOKEN_RBRACKET,
|
|
||||||
"(": TOKEN_LPAREN,
|
|
||||||
")": TOKEN_RPAREN,
|
|
||||||
"{": TOKEN_LBRACE,
|
|
||||||
"}": TOKEN_RBRACE,
|
|
||||||
"==": TOKEN_EQ,
|
|
||||||
"!=": TOKEN_NE,
|
|
||||||
">": TOKEN_GT,
|
|
||||||
">=": TOKEN_GTEQ,
|
|
||||||
"<": TOKEN_LT,
|
|
||||||
"<=": TOKEN_LTEQ,
|
|
||||||
"=": TOKEN_ASSIGN,
|
|
||||||
".": TOKEN_DOT,
|
|
||||||
":": TOKEN_COLON,
|
|
||||||
"|": TOKEN_PIPE,
|
|
||||||
",": TOKEN_COMMA,
|
|
||||||
";": TOKEN_SEMICOLON,
|
|
||||||
}
|
|
||||||
|
|
||||||
reverse_operators = {v: k for k, v in operators.items()}
|
|
||||||
assert len(operators) == len(reverse_operators), "operators dropped"
|
|
||||||
operator_re = re.compile(
|
|
||||||
f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
|
|
||||||
)
|
|
||||||
|
|
||||||
ignored_tokens = frozenset(
|
|
||||||
[
|
|
||||||
TOKEN_COMMENT_BEGIN,
|
|
||||||
TOKEN_COMMENT,
|
|
||||||
TOKEN_COMMENT_END,
|
|
||||||
TOKEN_WHITESPACE,
|
|
||||||
TOKEN_LINECOMMENT_BEGIN,
|
|
||||||
TOKEN_LINECOMMENT_END,
|
|
||||||
TOKEN_LINECOMMENT,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
ignore_if_empty = frozenset(
|
|
||||||
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _describe_token_type(token_type: str) -> str:
|
|
||||||
if token_type in reverse_operators:
|
|
||||||
return reverse_operators[token_type]
|
|
||||||
|
|
||||||
return {
|
|
||||||
TOKEN_COMMENT_BEGIN: "begin of comment",
|
|
||||||
TOKEN_COMMENT_END: "end of comment",
|
|
||||||
TOKEN_COMMENT: "comment",
|
|
||||||
TOKEN_LINECOMMENT: "comment",
|
|
||||||
TOKEN_BLOCK_BEGIN: "begin of statement block",
|
|
||||||
TOKEN_BLOCK_END: "end of statement block",
|
|
||||||
TOKEN_VARIABLE_BEGIN: "begin of print statement",
|
|
||||||
TOKEN_VARIABLE_END: "end of print statement",
|
|
||||||
TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
|
|
||||||
TOKEN_LINESTATEMENT_END: "end of line statement",
|
|
||||||
TOKEN_DATA: "template data / text",
|
|
||||||
TOKEN_EOF: "end of template",
|
|
||||||
}.get(token_type, token_type)
|
|
||||||
|
|
||||||
|
|
||||||
def describe_token(token: "Token") -> str:
|
|
||||||
"""Returns a description of the token."""
|
|
||||||
if token.type == TOKEN_NAME:
|
|
||||||
return token.value
|
|
||||||
|
|
||||||
return _describe_token_type(token.type)
|
|
||||||
|
|
||||||
|
|
||||||
def describe_token_expr(expr: str) -> str:
|
|
||||||
"""Like `describe_token` but for token expressions."""
|
|
||||||
if ":" in expr:
|
|
||||||
type, value = expr.split(":", 1)
|
|
||||||
|
|
||||||
if type == TOKEN_NAME:
|
|
||||||
return value
|
|
||||||
else:
|
|
||||||
type = expr
|
|
||||||
|
|
||||||
return _describe_token_type(type)
|
|
||||||
|
|
||||||
|
|
||||||
def count_newlines(value: str) -> int:
|
|
||||||
"""Count the number of newline characters in the string. This is
|
|
||||||
useful for extensions that filter a stream.
|
|
||||||
"""
|
|
||||||
return len(newline_re.findall(value))
|
|
||||||
|
|
||||||
|
|
||||||
def compile_rules(environment: "Environment") -> t.List[t.Tuple[str, str]]:
|
|
||||||
"""Compiles all the rules from the environment into a list of rules."""
|
|
||||||
e = re.escape
|
|
||||||
rules = [
|
|
||||||
(
|
|
||||||
len(environment.comment_start_string),
|
|
||||||
TOKEN_COMMENT_BEGIN,
|
|
||||||
e(environment.comment_start_string),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
len(environment.block_start_string),
|
|
||||||
TOKEN_BLOCK_BEGIN,
|
|
||||||
e(environment.block_start_string),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
len(environment.variable_start_string),
|
|
||||||
TOKEN_VARIABLE_BEGIN,
|
|
||||||
e(environment.variable_start_string),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
if environment.line_statement_prefix is not None:
|
|
||||||
rules.append(
|
|
||||||
(
|
|
||||||
len(environment.line_statement_prefix),
|
|
||||||
TOKEN_LINESTATEMENT_BEGIN,
|
|
||||||
r"^[ \t\v]*" + e(environment.line_statement_prefix),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if environment.line_comment_prefix is not None:
|
|
||||||
rules.append(
|
|
||||||
(
|
|
||||||
len(environment.line_comment_prefix),
|
|
||||||
TOKEN_LINECOMMENT_BEGIN,
|
|
||||||
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return [x[1:] for x in sorted(rules, reverse=True)]
|
|
||||||
|
|
||||||
|
|
||||||
class Failure:
|
|
||||||
"""Class that raises a `TemplateSyntaxError` if called.
|
|
||||||
Used by the `Lexer` to specify known errors.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, message: str, cls: t.Type[TemplateSyntaxError] = TemplateSyntaxError
|
|
||||||
) -> None:
|
|
||||||
self.message = message
|
|
||||||
self.error_class = cls
|
|
||||||
|
|
||||||
def __call__(self, lineno: int, filename: str) -> "te.NoReturn":
|
|
||||||
raise self.error_class(self.message, lineno, filename)
|
|
||||||
|
|
||||||
|
|
||||||
class Token(t.NamedTuple):
|
|
||||||
lineno: int
|
|
||||||
type: str
|
|
||||||
value: str
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return describe_token(self)
|
|
||||||
|
|
||||||
def test(self, expr: str) -> bool:
|
|
||||||
"""Test a token against a token expression. This can either be a
|
|
||||||
token type or ``'token_type:token_value'``. This can only test
|
|
||||||
against string values and types.
|
|
||||||
"""
|
|
||||||
# here we do a regular string equality check as test_any is usually
|
|
||||||
# passed an iterable of not interned strings.
|
|
||||||
if self.type == expr:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if ":" in expr:
|
|
||||||
return expr.split(":", 1) == [self.type, self.value]
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def test_any(self, *iterable: str) -> bool:
|
|
||||||
"""Test against multiple token expressions."""
|
|
||||||
return any(self.test(expr) for expr in iterable)
|
|
||||||
|
|
||||||
|
|
||||||
class TokenStreamIterator:
|
|
||||||
"""The iterator for tokenstreams. Iterate over the stream
|
|
||||||
until the eof token is reached.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, stream: "TokenStream") -> None:
|
|
||||||
self.stream = stream
|
|
||||||
|
|
||||||
def __iter__(self) -> "TokenStreamIterator":
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __next__(self) -> Token:
|
|
||||||
token = self.stream.current
|
|
||||||
|
|
||||||
if token.type is TOKEN_EOF:
|
|
||||||
self.stream.close()
|
|
||||||
raise StopIteration
|
|
||||||
|
|
||||||
next(self.stream)
|
|
||||||
return token
|
|
||||||
|
|
||||||
|
|
||||||
class TokenStream:
|
|
||||||
"""A token stream is an iterable that yields :class:`Token`\\s. The
|
|
||||||
parser however does not iterate over it but calls :meth:`next` to go
|
|
||||||
one token ahead. The current active token is stored as :attr:`current`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
generator: t.Iterable[Token],
|
|
||||||
name: t.Optional[str],
|
|
||||||
filename: t.Optional[str],
|
|
||||||
):
|
|
||||||
self._iter = iter(generator)
|
|
||||||
self._pushed: "te.Deque[Token]" = deque()
|
|
||||||
self.name = name
|
|
||||||
self.filename = filename
|
|
||||||
self.closed = False
|
|
||||||
self.current = Token(1, TOKEN_INITIAL, "")
|
|
||||||
next(self)
|
|
||||||
|
|
||||||
def __iter__(self) -> TokenStreamIterator:
|
|
||||||
return TokenStreamIterator(self)
|
|
||||||
|
|
||||||
def __bool__(self) -> bool:
|
|
||||||
return bool(self._pushed) or self.current.type is not TOKEN_EOF
|
|
||||||
|
|
||||||
@property
|
|
||||||
def eos(self) -> bool:
|
|
||||||
"""Are we at the end of the stream?"""
|
|
||||||
return not self
|
|
||||||
|
|
||||||
def push(self, token: Token) -> None:
|
|
||||||
"""Push a token back to the stream."""
|
|
||||||
self._pushed.append(token)
|
|
||||||
|
|
||||||
def look(self) -> Token:
|
|
||||||
"""Look at the next token."""
|
|
||||||
old_token = next(self)
|
|
||||||
result = self.current
|
|
||||||
self.push(result)
|
|
||||||
self.current = old_token
|
|
||||||
return result
|
|
||||||
|
|
||||||
def skip(self, n: int = 1) -> None:
|
|
||||||
"""Got n tokens ahead."""
|
|
||||||
for _ in range(n):
|
|
||||||
next(self)
|
|
||||||
|
|
||||||
def next_if(self, expr: str) -> t.Optional[Token]:
|
|
||||||
"""Perform the token test and return the token if it matched.
|
|
||||||
Otherwise the return value is `None`.
|
|
||||||
"""
|
|
||||||
if self.current.test(expr):
|
|
||||||
return next(self)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def skip_if(self, expr: str) -> bool:
|
|
||||||
"""Like :meth:`next_if` but only returns `True` or `False`."""
|
|
||||||
return self.next_if(expr) is not None
|
|
||||||
|
|
||||||
def __next__(self) -> Token:
|
|
||||||
"""Go one token ahead and return the old one.
|
|
||||||
|
|
||||||
Use the built-in :func:`next` instead of calling this directly.
|
|
||||||
"""
|
|
||||||
rv = self.current
|
|
||||||
|
|
||||||
if self._pushed:
|
|
||||||
self.current = self._pushed.popleft()
|
|
||||||
elif self.current.type is not TOKEN_EOF:
|
|
||||||
try:
|
|
||||||
self.current = next(self._iter)
|
|
||||||
except StopIteration:
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def close(self) -> None:
|
|
||||||
"""Close the stream."""
|
|
||||||
self.current = Token(self.current.lineno, TOKEN_EOF, "")
|
|
||||||
self._iter = iter(())
|
|
||||||
self.closed = True
|
|
||||||
|
|
||||||
def expect(self, expr: str) -> Token:
|
|
||||||
"""Expect a given token type and return it. This accepts the same
|
|
||||||
argument as :meth:`jinja2.lexer.Token.test`.
|
|
||||||
"""
|
|
||||||
if not self.current.test(expr):
|
|
||||||
expr = describe_token_expr(expr)
|
|
||||||
|
|
||||||
if self.current.type is TOKEN_EOF:
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
f"unexpected end of template, expected {expr!r}.",
|
|
||||||
self.current.lineno,
|
|
||||||
self.name,
|
|
||||||
self.filename,
|
|
||||||
)
|
|
||||||
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
f"expected token {expr!r}, got {describe_token(self.current)!r}",
|
|
||||||
self.current.lineno,
|
|
||||||
self.name,
|
|
||||||
self.filename,
|
|
||||||
)
|
|
||||||
|
|
||||||
return next(self)
|
|
||||||
|
|
||||||
|
|
||||||
def get_lexer(environment: "Environment") -> "Lexer":
|
|
||||||
"""Return a lexer which is probably cached."""
|
|
||||||
key = (
|
|
||||||
environment.block_start_string,
|
|
||||||
environment.block_end_string,
|
|
||||||
environment.variable_start_string,
|
|
||||||
environment.variable_end_string,
|
|
||||||
environment.comment_start_string,
|
|
||||||
environment.comment_end_string,
|
|
||||||
environment.line_statement_prefix,
|
|
||||||
environment.line_comment_prefix,
|
|
||||||
environment.trim_blocks,
|
|
||||||
environment.lstrip_blocks,
|
|
||||||
environment.newline_sequence,
|
|
||||||
environment.keep_trailing_newline,
|
|
||||||
)
|
|
||||||
lexer = _lexer_cache.get(key)
|
|
||||||
|
|
||||||
if lexer is None:
|
|
||||||
_lexer_cache[key] = lexer = Lexer(environment)
|
|
||||||
|
|
||||||
return lexer
|
|
||||||
|
|
||||||
|
|
||||||
class OptionalLStrip(tuple):
|
|
||||||
"""A special tuple for marking a point in the state that can have
|
|
||||||
lstrip applied.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
# Even though it looks like a no-op, creating instances fails
|
|
||||||
# without this.
|
|
||||||
def __new__(cls, *members, **kwargs): # type: ignore
|
|
||||||
return super().__new__(cls, members)
|
|
||||||
|
|
||||||
|
|
||||||
class _Rule(t.NamedTuple):
|
|
||||||
pattern: t.Pattern[str]
|
|
||||||
tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
|
|
||||||
command: t.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
class Lexer:
|
|
||||||
"""Class that implements a lexer for a given environment. Automatically
|
|
||||||
created by the environment class, usually you don't have to do that.
|
|
||||||
|
|
||||||
Note that the lexer is not automatically bound to an environment.
|
|
||||||
Multiple environments can share the same lexer.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, environment: "Environment") -> None:
|
|
||||||
# shortcuts
|
|
||||||
e = re.escape
|
|
||||||
|
|
||||||
def c(x: str) -> t.Pattern[str]:
|
|
||||||
return re.compile(x, re.M | re.S)
|
|
||||||
|
|
||||||
# lexing rules for tags
|
|
||||||
tag_rules: t.List[_Rule] = [
|
|
||||||
_Rule(whitespace_re, TOKEN_WHITESPACE, None),
|
|
||||||
_Rule(float_re, TOKEN_FLOAT, None),
|
|
||||||
_Rule(integer_re, TOKEN_INTEGER, None),
|
|
||||||
_Rule(name_re, TOKEN_NAME, None),
|
|
||||||
_Rule(string_re, TOKEN_STRING, None),
|
|
||||||
_Rule(operator_re, TOKEN_OPERATOR, None),
|
|
||||||
]
|
|
||||||
|
|
||||||
# assemble the root lexing rule. because "|" is ungreedy
|
|
||||||
# we have to sort by length so that the lexer continues working
|
|
||||||
# as expected when we have parsing rules like <% for block and
|
|
||||||
# <%= for variables. (if someone wants asp like syntax)
|
|
||||||
# variables are just part of the rules if variable processing
|
|
||||||
# is required.
|
|
||||||
root_tag_rules = compile_rules(environment)
|
|
||||||
|
|
||||||
block_start_re = e(environment.block_start_string)
|
|
||||||
block_end_re = e(environment.block_end_string)
|
|
||||||
comment_end_re = e(environment.comment_end_string)
|
|
||||||
variable_end_re = e(environment.variable_end_string)
|
|
||||||
|
|
||||||
# block suffix if trimming is enabled
|
|
||||||
block_suffix_re = "\\n?" if environment.trim_blocks else ""
|
|
||||||
|
|
||||||
self.lstrip_blocks = environment.lstrip_blocks
|
|
||||||
|
|
||||||
self.newline_sequence = environment.newline_sequence
|
|
||||||
self.keep_trailing_newline = environment.keep_trailing_newline
|
|
||||||
|
|
||||||
root_raw_re = (
|
|
||||||
rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
|
|
||||||
rf"(?:\-{block_end_re}\s*|{block_end_re}))"
|
|
||||||
)
|
|
||||||
root_parts_re = "|".join(
|
|
||||||
[root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
|
|
||||||
)
|
|
||||||
|
|
||||||
# global lexing rules
|
|
||||||
self.rules: t.Dict[str, t.List[_Rule]] = {
|
|
||||||
"root": [
|
|
||||||
# directives
|
|
||||||
_Rule(
|
|
||||||
c(rf"(.*?)(?:{root_parts_re})"),
|
|
||||||
OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
|
|
||||||
"#bygroup",
|
|
||||||
),
|
|
||||||
# data
|
|
||||||
_Rule(c(".+"), TOKEN_DATA, None),
|
|
||||||
],
|
|
||||||
# comments
|
|
||||||
TOKEN_COMMENT_BEGIN: [
|
|
||||||
_Rule(
|
|
||||||
c(
|
|
||||||
rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
|
|
||||||
rf"|{comment_end_re}{block_suffix_re}))"
|
|
||||||
),
|
|
||||||
(TOKEN_COMMENT, TOKEN_COMMENT_END),
|
|
||||||
"#pop",
|
|
||||||
),
|
|
||||||
_Rule(c(r"(.)"), (Failure("Missing end of comment tag"),), None),
|
|
||||||
],
|
|
||||||
# blocks
|
|
||||||
TOKEN_BLOCK_BEGIN: [
|
|
||||||
_Rule(
|
|
||||||
c(
|
|
||||||
rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
|
|
||||||
rf"|{block_end_re}{block_suffix_re})"
|
|
||||||
),
|
|
||||||
TOKEN_BLOCK_END,
|
|
||||||
"#pop",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
+ tag_rules,
|
|
||||||
# variables
|
|
||||||
TOKEN_VARIABLE_BEGIN: [
|
|
||||||
_Rule(
|
|
||||||
c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
|
|
||||||
TOKEN_VARIABLE_END,
|
|
||||||
"#pop",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
+ tag_rules,
|
|
||||||
# raw block
|
|
||||||
TOKEN_RAW_BEGIN: [
|
|
||||||
_Rule(
|
|
||||||
c(
|
|
||||||
rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
|
|
||||||
rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
|
|
||||||
rf"|{block_end_re}{block_suffix_re}))"
|
|
||||||
),
|
|
||||||
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
|
|
||||||
"#pop",
|
|
||||||
),
|
|
||||||
_Rule(c(r"(.)"), (Failure("Missing end of raw directive"),), None),
|
|
||||||
],
|
|
||||||
# line statements
|
|
||||||
TOKEN_LINESTATEMENT_BEGIN: [
|
|
||||||
_Rule(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
|
|
||||||
]
|
|
||||||
+ tag_rules,
|
|
||||||
# line comments
|
|
||||||
TOKEN_LINECOMMENT_BEGIN: [
|
|
||||||
_Rule(
|
|
||||||
c(r"(.*?)()(?=\n|$)"),
|
|
||||||
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
|
|
||||||
"#pop",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
def _normalize_newlines(self, value: str) -> str:
|
|
||||||
"""Replace all newlines with the configured sequence in strings
|
|
||||||
and template data.
|
|
||||||
"""
|
|
||||||
return newline_re.sub(self.newline_sequence, value)
|
|
||||||
|
|
||||||
def tokenize(
|
|
||||||
self,
|
|
||||||
source: str,
|
|
||||||
name: t.Optional[str] = None,
|
|
||||||
filename: t.Optional[str] = None,
|
|
||||||
state: t.Optional[str] = None,
|
|
||||||
) -> TokenStream:
|
|
||||||
"""Calls tokeniter + tokenize and wraps it in a token stream."""
|
|
||||||
stream = self.tokeniter(source, name, filename, state)
|
|
||||||
return TokenStream(self.wrap(stream, name, filename), name, filename)
|
|
||||||
|
|
||||||
def wrap(
|
|
||||||
self,
|
|
||||||
stream: t.Iterable[t.Tuple[int, str, str]],
|
|
||||||
name: t.Optional[str] = None,
|
|
||||||
filename: t.Optional[str] = None,
|
|
||||||
) -> t.Iterator[Token]:
|
|
||||||
"""This is called with the stream as returned by `tokenize` and wraps
|
|
||||||
every token in a :class:`Token` and converts the value.
|
|
||||||
"""
|
|
||||||
for lineno, token, value_str in stream:
|
|
||||||
if token in ignored_tokens:
|
|
||||||
continue
|
|
||||||
|
|
||||||
value: t.Any = value_str
|
|
||||||
|
|
||||||
if token == TOKEN_LINESTATEMENT_BEGIN:
|
|
||||||
token = TOKEN_BLOCK_BEGIN
|
|
||||||
elif token == TOKEN_LINESTATEMENT_END:
|
|
||||||
token = TOKEN_BLOCK_END
|
|
||||||
# we are not interested in those tokens in the parser
|
|
||||||
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
|
|
||||||
continue
|
|
||||||
elif token == TOKEN_DATA:
|
|
||||||
value = self._normalize_newlines(value_str)
|
|
||||||
elif token == "keyword":
|
|
||||||
token = value_str
|
|
||||||
elif token == TOKEN_NAME:
|
|
||||||
value = value_str
|
|
||||||
|
|
||||||
if not value.isidentifier():
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
"Invalid character in identifier", lineno, name, filename
|
|
||||||
)
|
|
||||||
elif token == TOKEN_STRING:
|
|
||||||
# try to unescape string
|
|
||||||
try:
|
|
||||||
value = (
|
|
||||||
self._normalize_newlines(value_str[1:-1])
|
|
||||||
.encode("ascii", "backslashreplace")
|
|
||||||
.decode("unicode-escape")
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
msg = str(e).split(":")[-1].strip()
|
|
||||||
raise TemplateSyntaxError(msg, lineno, name, filename) from e
|
|
||||||
elif token == TOKEN_INTEGER:
|
|
||||||
value = int(value_str.replace("_", ""), 0)
|
|
||||||
elif token == TOKEN_FLOAT:
|
|
||||||
# remove all "_" first to support more Python versions
|
|
||||||
value = literal_eval(value_str.replace("_", ""))
|
|
||||||
elif token == TOKEN_OPERATOR:
|
|
||||||
token = operators[value_str]
|
|
||||||
|
|
||||||
yield Token(lineno, token, value)
|
|
||||||
|
|
||||||
def tokeniter(
|
|
||||||
self,
|
|
||||||
source: str,
|
|
||||||
name: t.Optional[str],
|
|
||||||
filename: t.Optional[str] = None,
|
|
||||||
state: t.Optional[str] = None,
|
|
||||||
) -> t.Iterator[t.Tuple[int, str, str]]:
|
|
||||||
"""This method tokenizes the text and returns the tokens in a
|
|
||||||
generator. Use this method if you just want to tokenize a template.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
Only ``\\n``, ``\\r\\n`` and ``\\r`` are treated as line
|
|
||||||
breaks.
|
|
||||||
"""
|
|
||||||
lines = newline_re.split(source)[::2]
|
|
||||||
|
|
||||||
if not self.keep_trailing_newline and lines[-1] == "":
|
|
||||||
del lines[-1]
|
|
||||||
|
|
||||||
source = "\n".join(lines)
|
|
||||||
pos = 0
|
|
||||||
lineno = 1
|
|
||||||
stack = ["root"]
|
|
||||||
|
|
||||||
if state is not None and state != "root":
|
|
||||||
assert state in ("variable", "block"), "invalid state"
|
|
||||||
stack.append(state + "_begin")
|
|
||||||
|
|
||||||
statetokens = self.rules[stack[-1]]
|
|
||||||
source_length = len(source)
|
|
||||||
balancing_stack: t.List[str] = []
|
|
||||||
newlines_stripped = 0
|
|
||||||
line_starting = True
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# tokenizer loop
|
|
||||||
for regex, tokens, new_state in statetokens:
|
|
||||||
m = regex.match(source, pos)
|
|
||||||
|
|
||||||
# if no match we try again with the next rule
|
|
||||||
if m is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# we only match blocks and variables if braces / parentheses
|
|
||||||
# are balanced. continue parsing with the lower rule which
|
|
||||||
# is the operator rule. do this only if the end tags look
|
|
||||||
# like operators
|
|
||||||
if balancing_stack and tokens in (
|
|
||||||
TOKEN_VARIABLE_END,
|
|
||||||
TOKEN_BLOCK_END,
|
|
||||||
TOKEN_LINESTATEMENT_END,
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# tuples support more options
|
|
||||||
if isinstance(tokens, tuple):
|
|
||||||
groups: t.Sequence[str] = m.groups()
|
|
||||||
|
|
||||||
if isinstance(tokens, OptionalLStrip):
|
|
||||||
# Rule supports lstrip. Match will look like
|
|
||||||
# text, block type, whitespace control, type, control, ...
|
|
||||||
text = groups[0]
|
|
||||||
# Skipping the text and first type, every other group is the
|
|
||||||
# whitespace control for each type. One of the groups will be
|
|
||||||
# -, +, or empty string instead of None.
|
|
||||||
strip_sign = next(g for g in groups[2::2] if g is not None)
|
|
||||||
|
|
||||||
if strip_sign == "-":
|
|
||||||
# Strip all whitespace between the text and the tag.
|
|
||||||
stripped = text.rstrip()
|
|
||||||
newlines_stripped = text[len(stripped) :].count("\n")
|
|
||||||
groups = [stripped, *groups[1:]]
|
|
||||||
elif (
|
|
||||||
# Not marked for preserving whitespace.
|
|
||||||
strip_sign != "+"
|
|
||||||
# lstrip is enabled.
|
|
||||||
and self.lstrip_blocks
|
|
||||||
# Not a variable expression.
|
|
||||||
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
|
|
||||||
):
|
|
||||||
# The start of text between the last newline and the tag.
|
|
||||||
l_pos = text.rfind("\n") + 1
|
|
||||||
|
|
||||||
if l_pos > 0 or line_starting:
|
|
||||||
# If there's only whitespace between the newline and the
|
|
||||||
# tag, strip it.
|
|
||||||
if whitespace_re.fullmatch(text, l_pos):
|
|
||||||
groups = [text[:l_pos], *groups[1:]]
|
|
||||||
|
|
||||||
for idx, token in enumerate(tokens):
|
|
||||||
# failure group
|
|
||||||
if token.__class__ is Failure:
|
|
||||||
raise token(lineno, filename)
|
|
||||||
# bygroup is a bit more complex, in that case we
|
|
||||||
# yield for the current token the first named
|
|
||||||
# group that matched
|
|
||||||
elif token == "#bygroup":
|
|
||||||
for key, value in m.groupdict().items():
|
|
||||||
if value is not None:
|
|
||||||
yield lineno, key, value
|
|
||||||
lineno += value.count("\n")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"{regex!r} wanted to resolve the token dynamically"
|
|
||||||
" but no group matched"
|
|
||||||
)
|
|
||||||
# normal group
|
|
||||||
else:
|
|
||||||
data = groups[idx]
|
|
||||||
|
|
||||||
if data or token not in ignore_if_empty:
|
|
||||||
yield lineno, token, data
|
|
||||||
|
|
||||||
lineno += data.count("\n") + newlines_stripped
|
|
||||||
newlines_stripped = 0
|
|
||||||
|
|
||||||
# strings as token just are yielded as it.
|
|
||||||
else:
|
|
||||||
data = m.group()
|
|
||||||
|
|
||||||
# update brace/parentheses balance
|
|
||||||
if tokens == TOKEN_OPERATOR:
|
|
||||||
if data == "{":
|
|
||||||
balancing_stack.append("}")
|
|
||||||
elif data == "(":
|
|
||||||
balancing_stack.append(")")
|
|
||||||
elif data == "[":
|
|
||||||
balancing_stack.append("]")
|
|
||||||
elif data in ("}", ")", "]"):
|
|
||||||
if not balancing_stack:
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
f"unexpected '{data}'", lineno, name, filename
|
|
||||||
)
|
|
||||||
|
|
||||||
expected_op = balancing_stack.pop()
|
|
||||||
|
|
||||||
if expected_op != data:
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
f"unexpected '{data}', expected '{expected_op}'",
|
|
||||||
lineno,
|
|
||||||
name,
|
|
||||||
filename,
|
|
||||||
)
|
|
||||||
|
|
||||||
# yield items
|
|
||||||
if data or tokens not in ignore_if_empty:
|
|
||||||
yield lineno, tokens, data
|
|
||||||
|
|
||||||
lineno += data.count("\n")
|
|
||||||
|
|
||||||
line_starting = m.group()[-1:] == "\n"
|
|
||||||
# fetch new position into new variable so that we can check
|
|
||||||
# if there is a internal parsing error which would result
|
|
||||||
# in an infinite loop
|
|
||||||
pos2 = m.end()
|
|
||||||
|
|
||||||
# handle state changes
|
|
||||||
if new_state is not None:
|
|
||||||
# remove the uppermost state
|
|
||||||
if new_state == "#pop":
|
|
||||||
stack.pop()
|
|
||||||
# resolve the new state by group checking
|
|
||||||
elif new_state == "#bygroup":
|
|
||||||
for key, value in m.groupdict().items():
|
|
||||||
if value is not None:
|
|
||||||
stack.append(key)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"{regex!r} wanted to resolve the new state dynamically"
|
|
||||||
f" but no group matched"
|
|
||||||
)
|
|
||||||
# direct state name given
|
|
||||||
else:
|
|
||||||
stack.append(new_state)
|
|
||||||
|
|
||||||
statetokens = self.rules[stack[-1]]
|
|
||||||
# we are still at the same position and no stack change.
|
|
||||||
# this means a loop without break condition, avoid that and
|
|
||||||
# raise error
|
|
||||||
elif pos2 == pos:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"{regex!r} yielded empty string without stack change"
|
|
||||||
)
|
|
||||||
|
|
||||||
# publish new function and start again
|
|
||||||
pos = pos2
|
|
||||||
break
|
|
||||||
# if loop terminated without break we haven't found a single match
|
|
||||||
# either we are at the end of the file or we have a problem
|
|
||||||
else:
|
|
||||||
# end of text
|
|
||||||
if pos >= source_length:
|
|
||||||
return
|
|
||||||
|
|
||||||
# something went wrong
|
|
||||||
raise TemplateSyntaxError(
|
|
||||||
f"unexpected char {source[pos]!r} at {pos}", lineno, name, filename
|
|
||||||
)
|
|
||||||
@ -1,661 +0,0 @@
|
|||||||
"""API and implementations for loading templates from different data
|
|
||||||
sources.
|
|
||||||
"""
|
|
||||||
import importlib.util
|
|
||||||
import os
|
|
||||||
import posixpath
|
|
||||||
import sys
|
|
||||||
import typing as t
|
|
||||||
import weakref
|
|
||||||
import zipimport
|
|
||||||
from collections import abc
|
|
||||||
from hashlib import sha1
|
|
||||||
from importlib import import_module
|
|
||||||
from types import ModuleType
|
|
||||||
|
|
||||||
from .exceptions import TemplateNotFound
|
|
||||||
from .utils import internalcode
|
|
||||||
from .utils import open_if_exists
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .environment import Environment
|
|
||||||
from .environment import Template
|
|
||||||
|
|
||||||
|
|
||||||
def split_template_path(template: str) -> t.List[str]:
|
|
||||||
"""Split a path into segments and perform a sanity check. If it detects
|
|
||||||
'..' in the path it will raise a `TemplateNotFound` error.
|
|
||||||
"""
|
|
||||||
pieces = []
|
|
||||||
for piece in template.split("/"):
|
|
||||||
if (
|
|
||||||
os.path.sep in piece
|
|
||||||
or (os.path.altsep and os.path.altsep in piece)
|
|
||||||
or piece == os.path.pardir
|
|
||||||
):
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
elif piece and piece != ".":
|
|
||||||
pieces.append(piece)
|
|
||||||
return pieces
|
|
||||||
|
|
||||||
|
|
||||||
class BaseLoader:
|
|
||||||
"""Baseclass for all loaders. Subclass this and override `get_source` to
|
|
||||||
implement a custom loading mechanism. The environment provides a
|
|
||||||
`get_template` method that calls the loader's `load` method to get the
|
|
||||||
:class:`Template` object.
|
|
||||||
|
|
||||||
A very basic example for a loader that looks up templates on the file
|
|
||||||
system could look like this::
|
|
||||||
|
|
||||||
from jinja2 import BaseLoader, TemplateNotFound
|
|
||||||
from os.path import join, exists, getmtime
|
|
||||||
|
|
||||||
class MyLoader(BaseLoader):
|
|
||||||
|
|
||||||
def __init__(self, path):
|
|
||||||
self.path = path
|
|
||||||
|
|
||||||
def get_source(self, environment, template):
|
|
||||||
path = join(self.path, template)
|
|
||||||
if not exists(path):
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
mtime = getmtime(path)
|
|
||||||
with open(path) as f:
|
|
||||||
source = f.read()
|
|
||||||
return source, path, lambda: mtime == getmtime(path)
|
|
||||||
"""
|
|
||||||
|
|
||||||
#: if set to `False` it indicates that the loader cannot provide access
|
|
||||||
#: to the source of templates.
|
|
||||||
#:
|
|
||||||
#: .. versionadded:: 2.4
|
|
||||||
has_source_access = True
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
|
|
||||||
"""Get the template source, filename and reload helper for a template.
|
|
||||||
It's passed the environment and template name and has to return a
|
|
||||||
tuple in the form ``(source, filename, uptodate)`` or raise a
|
|
||||||
`TemplateNotFound` error if it can't locate the template.
|
|
||||||
|
|
||||||
The source part of the returned tuple must be the source of the
|
|
||||||
template as a string. The filename should be the name of the
|
|
||||||
file on the filesystem if it was loaded from there, otherwise
|
|
||||||
``None``. The filename is used by Python for the tracebacks
|
|
||||||
if no loader extension is used.
|
|
||||||
|
|
||||||
The last item in the tuple is the `uptodate` function. If auto
|
|
||||||
reloading is enabled it's always called to check if the template
|
|
||||||
changed. No arguments are passed so the function must store the
|
|
||||||
old state somewhere (for example in a closure). If it returns `False`
|
|
||||||
the template will be reloaded.
|
|
||||||
"""
|
|
||||||
if not self.has_source_access:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"{type(self).__name__} cannot provide access to the source"
|
|
||||||
)
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
"""Iterates over all templates. If the loader does not support that
|
|
||||||
it should raise a :exc:`TypeError` which is the default behavior.
|
|
||||||
"""
|
|
||||||
raise TypeError("this loader cannot iterate over all templates")
|
|
||||||
|
|
||||||
@internalcode
|
|
||||||
def load(
|
|
||||||
self,
|
|
||||||
environment: "Environment",
|
|
||||||
name: str,
|
|
||||||
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
|
|
||||||
) -> "Template":
|
|
||||||
"""Loads a template. This method looks up the template in the cache
|
|
||||||
or loads one by calling :meth:`get_source`. Subclasses should not
|
|
||||||
override this method as loaders working on collections of other
|
|
||||||
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
|
|
||||||
will not call this method but `get_source` directly.
|
|
||||||
"""
|
|
||||||
code = None
|
|
||||||
if globals is None:
|
|
||||||
globals = {}
|
|
||||||
|
|
||||||
# first we try to get the source for this template together
|
|
||||||
# with the filename and the uptodate function.
|
|
||||||
source, filename, uptodate = self.get_source(environment, name)
|
|
||||||
|
|
||||||
# try to load the code from the bytecode cache if there is a
|
|
||||||
# bytecode cache configured.
|
|
||||||
bcc = environment.bytecode_cache
|
|
||||||
if bcc is not None:
|
|
||||||
bucket = bcc.get_bucket(environment, name, filename, source)
|
|
||||||
code = bucket.code
|
|
||||||
|
|
||||||
# if we don't have code so far (not cached, no longer up to
|
|
||||||
# date) etc. we compile the template
|
|
||||||
if code is None:
|
|
||||||
code = environment.compile(source, name, filename)
|
|
||||||
|
|
||||||
# if the bytecode cache is available and the bucket doesn't
|
|
||||||
# have a code so far, we give the bucket the new code and put
|
|
||||||
# it back to the bytecode cache.
|
|
||||||
if bcc is not None and bucket.code is None:
|
|
||||||
bucket.code = code
|
|
||||||
bcc.set_bucket(bucket)
|
|
||||||
|
|
||||||
return environment.template_class.from_code(
|
|
||||||
environment, code, globals, uptodate
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FileSystemLoader(BaseLoader):
|
|
||||||
"""Load templates from a directory in the file system.
|
|
||||||
|
|
||||||
The path can be relative or absolute. Relative paths are relative to
|
|
||||||
the current working directory.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
loader = FileSystemLoader("templates")
|
|
||||||
|
|
||||||
A list of paths can be given. The directories will be searched in
|
|
||||||
order, stopping at the first matching template.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
loader = FileSystemLoader(["/override/templates", "/default/templates"])
|
|
||||||
|
|
||||||
:param searchpath: A path, or list of paths, to the directory that
|
|
||||||
contains the templates.
|
|
||||||
:param encoding: Use this encoding to read the text from template
|
|
||||||
files.
|
|
||||||
:param followlinks: Follow symbolic links in the path.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.8
|
|
||||||
Added the ``followlinks`` parameter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
searchpath: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]],
|
|
||||||
encoding: str = "utf-8",
|
|
||||||
followlinks: bool = False,
|
|
||||||
) -> None:
|
|
||||||
if not isinstance(searchpath, abc.Iterable) or isinstance(searchpath, str):
|
|
||||||
searchpath = [searchpath]
|
|
||||||
|
|
||||||
self.searchpath = [os.fspath(p) for p in searchpath]
|
|
||||||
self.encoding = encoding
|
|
||||||
self.followlinks = followlinks
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, str, t.Callable[[], bool]]:
|
|
||||||
pieces = split_template_path(template)
|
|
||||||
for searchpath in self.searchpath:
|
|
||||||
# Use posixpath even on Windows to avoid "drive:" or UNC
|
|
||||||
# segments breaking out of the search directory.
|
|
||||||
filename = posixpath.join(searchpath, *pieces)
|
|
||||||
f = open_if_exists(filename)
|
|
||||||
if f is None:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
contents = f.read().decode(self.encoding)
|
|
||||||
finally:
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
mtime = os.path.getmtime(filename)
|
|
||||||
|
|
||||||
def uptodate() -> bool:
|
|
||||||
try:
|
|
||||||
return os.path.getmtime(filename) == mtime
|
|
||||||
except OSError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Use normpath to convert Windows altsep to sep.
|
|
||||||
return contents, os.path.normpath(filename), uptodate
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
found = set()
|
|
||||||
for searchpath in self.searchpath:
|
|
||||||
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
|
|
||||||
for dirpath, _, filenames in walk_dir:
|
|
||||||
for filename in filenames:
|
|
||||||
template = (
|
|
||||||
os.path.join(dirpath, filename)[len(searchpath) :]
|
|
||||||
.strip(os.path.sep)
|
|
||||||
.replace(os.path.sep, "/")
|
|
||||||
)
|
|
||||||
if template[:2] == "./":
|
|
||||||
template = template[2:]
|
|
||||||
if template not in found:
|
|
||||||
found.add(template)
|
|
||||||
return sorted(found)
|
|
||||||
|
|
||||||
|
|
||||||
class PackageLoader(BaseLoader):
|
|
||||||
"""Load templates from a directory in a Python package.
|
|
||||||
|
|
||||||
:param package_name: Import name of the package that contains the
|
|
||||||
template directory.
|
|
||||||
:param package_path: Directory within the imported package that
|
|
||||||
contains the templates.
|
|
||||||
:param encoding: Encoding of template files.
|
|
||||||
|
|
||||||
The following example looks up templates in the ``pages`` directory
|
|
||||||
within the ``project.ui`` package.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
loader = PackageLoader("project.ui", "pages")
|
|
||||||
|
|
||||||
Only packages installed as directories (standard pip behavior) or
|
|
||||||
zip/egg files (less common) are supported. The Python API for
|
|
||||||
introspecting data in packages is too limited to support other
|
|
||||||
installation methods the way this loader requires.
|
|
||||||
|
|
||||||
There is limited support for :pep:`420` namespace packages. The
|
|
||||||
template directory is assumed to only be in one namespace
|
|
||||||
contributor. Zip files contributing to a namespace are not
|
|
||||||
supported.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
No longer uses ``setuptools`` as a dependency.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
Limited PEP 420 namespace package support.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
package_name: str,
|
|
||||||
package_path: "str" = "templates",
|
|
||||||
encoding: str = "utf-8",
|
|
||||||
) -> None:
|
|
||||||
package_path = os.path.normpath(package_path).rstrip(os.path.sep)
|
|
||||||
|
|
||||||
# normpath preserves ".", which isn't valid in zip paths.
|
|
||||||
if package_path == os.path.curdir:
|
|
||||||
package_path = ""
|
|
||||||
elif package_path[:2] == os.path.curdir + os.path.sep:
|
|
||||||
package_path = package_path[2:]
|
|
||||||
|
|
||||||
self.package_path = package_path
|
|
||||||
self.package_name = package_name
|
|
||||||
self.encoding = encoding
|
|
||||||
|
|
||||||
# Make sure the package exists. This also makes namespace
|
|
||||||
# packages work, otherwise get_loader returns None.
|
|
||||||
import_module(package_name)
|
|
||||||
spec = importlib.util.find_spec(package_name)
|
|
||||||
assert spec is not None, "An import spec was not found for the package."
|
|
||||||
loader = spec.loader
|
|
||||||
assert loader is not None, "A loader was not found for the package."
|
|
||||||
self._loader = loader
|
|
||||||
self._archive = None
|
|
||||||
template_root = None
|
|
||||||
|
|
||||||
if isinstance(loader, zipimport.zipimporter):
|
|
||||||
self._archive = loader.archive
|
|
||||||
pkgdir = next(iter(spec.submodule_search_locations)) # type: ignore
|
|
||||||
template_root = os.path.join(pkgdir, package_path).rstrip(os.path.sep)
|
|
||||||
else:
|
|
||||||
roots: t.List[str] = []
|
|
||||||
|
|
||||||
# One element for regular packages, multiple for namespace
|
|
||||||
# packages, or None for single module file.
|
|
||||||
if spec.submodule_search_locations:
|
|
||||||
roots.extend(spec.submodule_search_locations)
|
|
||||||
# A single module file, use the parent directory instead.
|
|
||||||
elif spec.origin is not None:
|
|
||||||
roots.append(os.path.dirname(spec.origin))
|
|
||||||
|
|
||||||
for root in roots:
|
|
||||||
root = os.path.join(root, package_path)
|
|
||||||
|
|
||||||
if os.path.isdir(root):
|
|
||||||
template_root = root
|
|
||||||
break
|
|
||||||
|
|
||||||
if template_root is None:
|
|
||||||
raise ValueError(
|
|
||||||
f"The {package_name!r} package was not installed in a"
|
|
||||||
" way that PackageLoader understands."
|
|
||||||
)
|
|
||||||
|
|
||||||
self._template_root = template_root
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, str, t.Optional[t.Callable[[], bool]]]:
|
|
||||||
# Use posixpath even on Windows to avoid "drive:" or UNC
|
|
||||||
# segments breaking out of the search directory. Use normpath to
|
|
||||||
# convert Windows altsep to sep.
|
|
||||||
p = os.path.normpath(
|
|
||||||
posixpath.join(self._template_root, *split_template_path(template))
|
|
||||||
)
|
|
||||||
up_to_date: t.Optional[t.Callable[[], bool]]
|
|
||||||
|
|
||||||
if self._archive is None:
|
|
||||||
# Package is a directory.
|
|
||||||
if not os.path.isfile(p):
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
with open(p, "rb") as f:
|
|
||||||
source = f.read()
|
|
||||||
|
|
||||||
mtime = os.path.getmtime(p)
|
|
||||||
|
|
||||||
def up_to_date() -> bool:
|
|
||||||
return os.path.isfile(p) and os.path.getmtime(p) == mtime
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Package is a zip file.
|
|
||||||
try:
|
|
||||||
source = self._loader.get_data(p) # type: ignore
|
|
||||||
except OSError as e:
|
|
||||||
raise TemplateNotFound(template) from e
|
|
||||||
|
|
||||||
# Could use the zip's mtime for all template mtimes, but
|
|
||||||
# would need to safely reload the module if it's out of
|
|
||||||
# date, so just report it as always current.
|
|
||||||
up_to_date = None
|
|
||||||
|
|
||||||
return source.decode(self.encoding), p, up_to_date
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
results: t.List[str] = []
|
|
||||||
|
|
||||||
if self._archive is None:
|
|
||||||
# Package is a directory.
|
|
||||||
offset = len(self._template_root)
|
|
||||||
|
|
||||||
for dirpath, _, filenames in os.walk(self._template_root):
|
|
||||||
dirpath = dirpath[offset:].lstrip(os.path.sep)
|
|
||||||
results.extend(
|
|
||||||
os.path.join(dirpath, name).replace(os.path.sep, "/")
|
|
||||||
for name in filenames
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if not hasattr(self._loader, "_files"):
|
|
||||||
raise TypeError(
|
|
||||||
"This zip import does not have the required"
|
|
||||||
" metadata to list templates."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Package is a zip file.
|
|
||||||
prefix = (
|
|
||||||
self._template_root[len(self._archive) :].lstrip(os.path.sep)
|
|
||||||
+ os.path.sep
|
|
||||||
)
|
|
||||||
offset = len(prefix)
|
|
||||||
|
|
||||||
for name in self._loader._files.keys(): # type: ignore
|
|
||||||
# Find names under the templates directory that aren't directories.
|
|
||||||
if name.startswith(prefix) and name[-1] != os.path.sep:
|
|
||||||
results.append(name[offset:].replace(os.path.sep, "/"))
|
|
||||||
|
|
||||||
results.sort()
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
class DictLoader(BaseLoader):
|
|
||||||
"""Loads a template from a Python dict mapping template names to
|
|
||||||
template source. This loader is useful for unittesting:
|
|
||||||
|
|
||||||
>>> loader = DictLoader({'index.html': 'source here'})
|
|
||||||
|
|
||||||
Because auto reloading is rarely useful this is disabled per default.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, mapping: t.Mapping[str, str]) -> None:
|
|
||||||
self.mapping = mapping
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, None, t.Callable[[], bool]]:
|
|
||||||
if template in self.mapping:
|
|
||||||
source = self.mapping[template]
|
|
||||||
return source, None, lambda: source == self.mapping.get(template)
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
return sorted(self.mapping)
|
|
||||||
|
|
||||||
|
|
||||||
class FunctionLoader(BaseLoader):
|
|
||||||
"""A loader that is passed a function which does the loading. The
|
|
||||||
function receives the name of the template and has to return either
|
|
||||||
a string with the template source, a tuple in the form ``(source,
|
|
||||||
filename, uptodatefunc)`` or `None` if the template does not exist.
|
|
||||||
|
|
||||||
>>> def load_template(name):
|
|
||||||
... if name == 'index.html':
|
|
||||||
... return '...'
|
|
||||||
...
|
|
||||||
>>> loader = FunctionLoader(load_template)
|
|
||||||
|
|
||||||
The `uptodatefunc` is a function that is called if autoreload is enabled
|
|
||||||
and has to return `True` if the template is still up to date. For more
|
|
||||||
details have a look at :meth:`BaseLoader.get_source` which has the same
|
|
||||||
return value.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
load_func: t.Callable[
|
|
||||||
[str],
|
|
||||||
t.Optional[
|
|
||||||
t.Union[
|
|
||||||
str, t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]
|
|
||||||
]
|
|
||||||
],
|
|
||||||
],
|
|
||||||
) -> None:
|
|
||||||
self.load_func = load_func
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
|
|
||||||
rv = self.load_func(template)
|
|
||||||
|
|
||||||
if rv is None:
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
if isinstance(rv, str):
|
|
||||||
return rv, None, None
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
class PrefixLoader(BaseLoader):
|
|
||||||
"""A loader that is passed a dict of loaders where each loader is bound
|
|
||||||
to a prefix. The prefix is delimited from the template by a slash per
|
|
||||||
default, which can be changed by setting the `delimiter` argument to
|
|
||||||
something else::
|
|
||||||
|
|
||||||
loader = PrefixLoader({
|
|
||||||
'app1': PackageLoader('mypackage.app1'),
|
|
||||||
'app2': PackageLoader('mypackage.app2')
|
|
||||||
})
|
|
||||||
|
|
||||||
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
|
|
||||||
by loading ``'app2/index.html'`` the file from the second.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, mapping: t.Mapping[str, BaseLoader], delimiter: str = "/"
|
|
||||||
) -> None:
|
|
||||||
self.mapping = mapping
|
|
||||||
self.delimiter = delimiter
|
|
||||||
|
|
||||||
def get_loader(self, template: str) -> t.Tuple[BaseLoader, str]:
|
|
||||||
try:
|
|
||||||
prefix, name = template.split(self.delimiter, 1)
|
|
||||||
loader = self.mapping[prefix]
|
|
||||||
except (ValueError, KeyError) as e:
|
|
||||||
raise TemplateNotFound(template) from e
|
|
||||||
return loader, name
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
|
|
||||||
loader, name = self.get_loader(template)
|
|
||||||
try:
|
|
||||||
return loader.get_source(environment, name)
|
|
||||||
except TemplateNotFound as e:
|
|
||||||
# re-raise the exception with the correct filename here.
|
|
||||||
# (the one that includes the prefix)
|
|
||||||
raise TemplateNotFound(template) from e
|
|
||||||
|
|
||||||
@internalcode
|
|
||||||
def load(
|
|
||||||
self,
|
|
||||||
environment: "Environment",
|
|
||||||
name: str,
|
|
||||||
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
|
|
||||||
) -> "Template":
|
|
||||||
loader, local_name = self.get_loader(name)
|
|
||||||
try:
|
|
||||||
return loader.load(environment, local_name, globals)
|
|
||||||
except TemplateNotFound as e:
|
|
||||||
# re-raise the exception with the correct filename here.
|
|
||||||
# (the one that includes the prefix)
|
|
||||||
raise TemplateNotFound(name) from e
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
result = []
|
|
||||||
for prefix, loader in self.mapping.items():
|
|
||||||
for template in loader.list_templates():
|
|
||||||
result.append(prefix + self.delimiter + template)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class ChoiceLoader(BaseLoader):
|
|
||||||
"""This loader works like the `PrefixLoader` just that no prefix is
|
|
||||||
specified. If a template could not be found by one loader the next one
|
|
||||||
is tried.
|
|
||||||
|
|
||||||
>>> loader = ChoiceLoader([
|
|
||||||
... FileSystemLoader('/path/to/user/templates'),
|
|
||||||
... FileSystemLoader('/path/to/system/templates')
|
|
||||||
... ])
|
|
||||||
|
|
||||||
This is useful if you want to allow users to override builtin templates
|
|
||||||
from a different location.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, loaders: t.Sequence[BaseLoader]) -> None:
|
|
||||||
self.loaders = loaders
|
|
||||||
|
|
||||||
def get_source(
|
|
||||||
self, environment: "Environment", template: str
|
|
||||||
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
|
|
||||||
for loader in self.loaders:
|
|
||||||
try:
|
|
||||||
return loader.get_source(environment, template)
|
|
||||||
except TemplateNotFound:
|
|
||||||
pass
|
|
||||||
raise TemplateNotFound(template)
|
|
||||||
|
|
||||||
@internalcode
|
|
||||||
def load(
|
|
||||||
self,
|
|
||||||
environment: "Environment",
|
|
||||||
name: str,
|
|
||||||
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
|
|
||||||
) -> "Template":
|
|
||||||
for loader in self.loaders:
|
|
||||||
try:
|
|
||||||
return loader.load(environment, name, globals)
|
|
||||||
except TemplateNotFound:
|
|
||||||
pass
|
|
||||||
raise TemplateNotFound(name)
|
|
||||||
|
|
||||||
def list_templates(self) -> t.List[str]:
|
|
||||||
found = set()
|
|
||||||
for loader in self.loaders:
|
|
||||||
found.update(loader.list_templates())
|
|
||||||
return sorted(found)
|
|
||||||
|
|
||||||
|
|
||||||
class _TemplateModule(ModuleType):
|
|
||||||
"""Like a normal module but with support for weak references"""
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleLoader(BaseLoader):
|
|
||||||
"""This loader loads templates from precompiled templates.
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
|
|
||||||
>>> loader = ChoiceLoader([
|
|
||||||
... ModuleLoader('/path/to/compiled/templates'),
|
|
||||||
... FileSystemLoader('/path/to/templates')
|
|
||||||
... ])
|
|
||||||
|
|
||||||
Templates can be precompiled with :meth:`Environment.compile_templates`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
has_source_access = False
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, path: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]]
|
|
||||||
) -> None:
|
|
||||||
package_name = f"_jinja2_module_templates_{id(self):x}"
|
|
||||||
|
|
||||||
# create a fake module that looks for the templates in the
|
|
||||||
# path given.
|
|
||||||
mod = _TemplateModule(package_name)
|
|
||||||
|
|
||||||
if not isinstance(path, abc.Iterable) or isinstance(path, str):
|
|
||||||
path = [path]
|
|
||||||
|
|
||||||
mod.__path__ = [os.fspath(p) for p in path]
|
|
||||||
|
|
||||||
sys.modules[package_name] = weakref.proxy(
|
|
||||||
mod, lambda x: sys.modules.pop(package_name, None)
|
|
||||||
)
|
|
||||||
|
|
||||||
# the only strong reference, the sys.modules entry is weak
|
|
||||||
# so that the garbage collector can remove it once the
|
|
||||||
# loader that created it goes out of business.
|
|
||||||
self.module = mod
|
|
||||||
self.package_name = package_name
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_template_key(name: str) -> str:
|
|
||||||
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_module_filename(name: str) -> str:
|
|
||||||
return ModuleLoader.get_template_key(name) + ".py"
|
|
||||||
|
|
||||||
@internalcode
|
|
||||||
def load(
|
|
||||||
self,
|
|
||||||
environment: "Environment",
|
|
||||||
name: str,
|
|
||||||
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
|
|
||||||
) -> "Template":
|
|
||||||
key = self.get_template_key(name)
|
|
||||||
module = f"{self.package_name}.{key}"
|
|
||||||
mod = getattr(self.module, module, None)
|
|
||||||
|
|
||||||
if mod is None:
|
|
||||||
try:
|
|
||||||
mod = __import__(module, None, None, ["root"])
|
|
||||||
except ImportError as e:
|
|
||||||
raise TemplateNotFound(name) from e
|
|
||||||
|
|
||||||
# remove the entry from sys.modules, we only want the attribute
|
|
||||||
# on the module object we have stored on the loader.
|
|
||||||
sys.modules.pop(module, None)
|
|
||||||
|
|
||||||
if globals is None:
|
|
||||||
globals = {}
|
|
||||||
|
|
||||||
return environment.template_class.from_module_dict(
|
|
||||||
environment, mod.__dict__, globals
|
|
||||||
)
|
|
||||||
@ -1,111 +0,0 @@
|
|||||||
"""Functions that expose information about templates that might be
|
|
||||||
interesting for introspection.
|
|
||||||
"""
|
|
||||||
import typing as t
|
|
||||||
|
|
||||||
from . import nodes
|
|
||||||
from .compiler import CodeGenerator
|
|
||||||
from .compiler import Frame
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .environment import Environment
|
|
||||||
|
|
||||||
|
|
||||||
class TrackingCodeGenerator(CodeGenerator):
|
|
||||||
"""We abuse the code generator for introspection."""
|
|
||||||
|
|
||||||
def __init__(self, environment: "Environment") -> None:
|
|
||||||
super().__init__(environment, "<introspection>", "<introspection>")
|
|
||||||
self.undeclared_identifiers: t.Set[str] = set()
|
|
||||||
|
|
||||||
def write(self, x: str) -> None:
|
|
||||||
"""Don't write."""
|
|
||||||
|
|
||||||
def enter_frame(self, frame: Frame) -> None:
|
|
||||||
"""Remember all undeclared identifiers."""
|
|
||||||
super().enter_frame(frame)
|
|
||||||
|
|
||||||
for _, (action, param) in frame.symbols.loads.items():
|
|
||||||
if action == "resolve" and param not in self.environment.globals:
|
|
||||||
self.undeclared_identifiers.add(param)
|
|
||||||
|
|
||||||
|
|
||||||
def find_undeclared_variables(ast: nodes.Template) -> t.Set[str]:
|
|
||||||
"""Returns a set of all variables in the AST that will be looked up from
|
|
||||||
the context at runtime. Because at compile time it's not known which
|
|
||||||
variables will be used depending on the path the execution takes at
|
|
||||||
runtime, all variables are returned.
|
|
||||||
|
|
||||||
>>> from jinja2 import Environment, meta
|
|
||||||
>>> env = Environment()
|
|
||||||
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
|
|
||||||
>>> meta.find_undeclared_variables(ast) == {'bar'}
|
|
||||||
True
|
|
||||||
|
|
||||||
.. admonition:: Implementation
|
|
||||||
|
|
||||||
Internally the code generator is used for finding undeclared variables.
|
|
||||||
This is good to know because the code generator might raise a
|
|
||||||
:exc:`TemplateAssertionError` during compilation and as a matter of
|
|
||||||
fact this function can currently raise that exception as well.
|
|
||||||
"""
|
|
||||||
codegen = TrackingCodeGenerator(ast.environment) # type: ignore
|
|
||||||
codegen.visit(ast)
|
|
||||||
return codegen.undeclared_identifiers
|
|
||||||
|
|
||||||
|
|
||||||
_ref_types = (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
|
|
||||||
_RefType = t.Union[nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include]
|
|
||||||
|
|
||||||
|
|
||||||
def find_referenced_templates(ast: nodes.Template) -> t.Iterator[t.Optional[str]]:
|
|
||||||
"""Finds all the referenced templates from the AST. This will return an
|
|
||||||
iterator over all the hardcoded template extensions, inclusions and
|
|
||||||
imports. If dynamic inheritance or inclusion is used, `None` will be
|
|
||||||
yielded.
|
|
||||||
|
|
||||||
>>> from jinja2 import Environment, meta
|
|
||||||
>>> env = Environment()
|
|
||||||
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
|
|
||||||
>>> list(meta.find_referenced_templates(ast))
|
|
||||||
['layout.html', None]
|
|
||||||
|
|
||||||
This function is useful for dependency tracking. For example if you want
|
|
||||||
to rebuild parts of the website after a layout template has changed.
|
|
||||||
"""
|
|
||||||
template_name: t.Any
|
|
||||||
|
|
||||||
for node in ast.find_all(_ref_types):
|
|
||||||
template: nodes.Expr = node.template # type: ignore
|
|
||||||
|
|
||||||
if not isinstance(template, nodes.Const):
|
|
||||||
# a tuple with some non consts in there
|
|
||||||
if isinstance(template, (nodes.Tuple, nodes.List)):
|
|
||||||
for template_name in template.items:
|
|
||||||
# something const, only yield the strings and ignore
|
|
||||||
# non-string consts that really just make no sense
|
|
||||||
if isinstance(template_name, nodes.Const):
|
|
||||||
if isinstance(template_name.value, str):
|
|
||||||
yield template_name.value
|
|
||||||
# something dynamic in there
|
|
||||||
else:
|
|
||||||
yield None
|
|
||||||
# something dynamic we don't know about here
|
|
||||||
else:
|
|
||||||
yield None
|
|
||||||
continue
|
|
||||||
# constant is a basestring, direct template name
|
|
||||||
if isinstance(template.value, str):
|
|
||||||
yield template.value
|
|
||||||
# a tuple or list (latter *should* not happen) made of consts,
|
|
||||||
# yield the consts that are strings. We could warn here for
|
|
||||||
# non string values
|
|
||||||
elif isinstance(node, nodes.Include) and isinstance(
|
|
||||||
template.value, (tuple, list)
|
|
||||||
):
|
|
||||||
for template_name in template.value:
|
|
||||||
if isinstance(template_name, str):
|
|
||||||
yield template_name
|
|
||||||
# something else we don't care about, we could warn here
|
|
||||||
else:
|
|
||||||
yield None
|
|
||||||
@ -1,130 +0,0 @@
|
|||||||
import typing as t
|
|
||||||
from ast import literal_eval
|
|
||||||
from ast import parse
|
|
||||||
from itertools import chain
|
|
||||||
from itertools import islice
|
|
||||||
from types import GeneratorType
|
|
||||||
|
|
||||||
from . import nodes
|
|
||||||
from .compiler import CodeGenerator
|
|
||||||
from .compiler import Frame
|
|
||||||
from .compiler import has_safe_repr
|
|
||||||
from .environment import Environment
|
|
||||||
from .environment import Template
|
|
||||||
|
|
||||||
|
|
||||||
def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
|
|
||||||
"""Return a native Python type from the list of compiled nodes. If
|
|
||||||
the result is a single node, its value is returned. Otherwise, the
|
|
||||||
nodes are concatenated as strings. If the result can be parsed with
|
|
||||||
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
|
|
||||||
the string is returned.
|
|
||||||
|
|
||||||
:param values: Iterable of outputs to concatenate.
|
|
||||||
"""
|
|
||||||
head = list(islice(values, 2))
|
|
||||||
|
|
||||||
if not head:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if len(head) == 1:
|
|
||||||
raw = head[0]
|
|
||||||
if not isinstance(raw, str):
|
|
||||||
return raw
|
|
||||||
else:
|
|
||||||
if isinstance(values, GeneratorType):
|
|
||||||
values = chain(head, values)
|
|
||||||
raw = "".join([str(v) for v in values])
|
|
||||||
|
|
||||||
try:
|
|
||||||
return literal_eval(
|
|
||||||
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
|
|
||||||
# from the given string. For backwards compatibility we need to
|
|
||||||
# parse the string ourselves without removing leading spaces/tabs.
|
|
||||||
parse(raw, mode="eval")
|
|
||||||
)
|
|
||||||
except (ValueError, SyntaxError, MemoryError):
|
|
||||||
return raw
|
|
||||||
|
|
||||||
|
|
||||||
class NativeCodeGenerator(CodeGenerator):
|
|
||||||
"""A code generator which renders Python types by not adding
|
|
||||||
``str()`` around output nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _default_finalize(value: t.Any) -> t.Any:
|
|
||||||
return value
|
|
||||||
|
|
||||||
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
|
|
||||||
return repr("".join([str(v) for v in group]))
|
|
||||||
|
|
||||||
def _output_child_to_const(
|
|
||||||
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
|
|
||||||
) -> t.Any:
|
|
||||||
const = node.as_const(frame.eval_ctx)
|
|
||||||
|
|
||||||
if not has_safe_repr(const):
|
|
||||||
raise nodes.Impossible()
|
|
||||||
|
|
||||||
if isinstance(node, nodes.TemplateData):
|
|
||||||
return const
|
|
||||||
|
|
||||||
return finalize.const(const) # type: ignore
|
|
||||||
|
|
||||||
def _output_child_pre(
|
|
||||||
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
|
|
||||||
) -> None:
|
|
||||||
if finalize.src is not None:
|
|
||||||
self.write(finalize.src)
|
|
||||||
|
|
||||||
def _output_child_post(
|
|
||||||
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
|
|
||||||
) -> None:
|
|
||||||
if finalize.src is not None:
|
|
||||||
self.write(")")
|
|
||||||
|
|
||||||
|
|
||||||
class NativeEnvironment(Environment):
|
|
||||||
"""An environment that renders templates to native Python types."""
|
|
||||||
|
|
||||||
code_generator_class = NativeCodeGenerator
|
|
||||||
concat = staticmethod(native_concat) # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
class NativeTemplate(Template):
|
|
||||||
environment_class = NativeEnvironment
|
|
||||||
|
|
||||||
def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
|
||||||
"""Render the template to produce a native Python type. If the
|
|
||||||
result is a single node, its value is returned. Otherwise, the
|
|
||||||
nodes are concatenated as strings. If the result can be parsed
|
|
||||||
with :func:`ast.literal_eval`, the parsed value is returned.
|
|
||||||
Otherwise, the string is returned.
|
|
||||||
"""
|
|
||||||
ctx = self.new_context(dict(*args, **kwargs))
|
|
||||||
|
|
||||||
try:
|
|
||||||
return self.environment_class.concat( # type: ignore
|
|
||||||
self.root_render_func(ctx) # type: ignore
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
return self.environment.handle_exception()
|
|
||||||
|
|
||||||
async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
|
||||||
if not self.environment.is_async:
|
|
||||||
raise RuntimeError(
|
|
||||||
"The environment was not created with async mode enabled."
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx = self.new_context(dict(*args, **kwargs))
|
|
||||||
|
|
||||||
try:
|
|
||||||
return self.environment_class.concat( # type: ignore
|
|
||||||
[n async for n in self.root_render_func(ctx)] # type: ignore
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
return self.environment.handle_exception()
|
|
||||||
|
|
||||||
|
|
||||||
NativeEnvironment.template_class = NativeTemplate
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,47 +0,0 @@
|
|||||||
"""The optimizer tries to constant fold expressions and modify the AST
|
|
||||||
in place so that it should be faster to evaluate.
|
|
||||||
|
|
||||||
Because the AST does not contain all the scoping information and the
|
|
||||||
compiler has to find that out, we cannot do all the optimizations we
|
|
||||||
want. For example, loop unrolling doesn't work because unrolled loops
|
|
||||||
would have a different scope. The solution would be a second syntax tree
|
|
||||||
that stored the scoping rules.
|
|
||||||
"""
|
|
||||||
import typing as t
|
|
||||||
|
|
||||||
from . import nodes
|
|
||||||
from .visitor import NodeTransformer
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .environment import Environment
|
|
||||||
|
|
||||||
|
|
||||||
def optimize(node: nodes.Node, environment: "Environment") -> nodes.Node:
|
|
||||||
"""The context hint can be used to perform an static optimization
|
|
||||||
based on the context given."""
|
|
||||||
optimizer = Optimizer(environment)
|
|
||||||
return t.cast(nodes.Node, optimizer.visit(node))
|
|
||||||
|
|
||||||
|
|
||||||
class Optimizer(NodeTransformer):
|
|
||||||
def __init__(self, environment: "t.Optional[Environment]") -> None:
|
|
||||||
self.environment = environment
|
|
||||||
|
|
||||||
def generic_visit(
|
|
||||||
self, node: nodes.Node, *args: t.Any, **kwargs: t.Any
|
|
||||||
) -> nodes.Node:
|
|
||||||
node = super().generic_visit(node, *args, **kwargs)
|
|
||||||
|
|
||||||
# Do constant folding. Some other nodes besides Expr have
|
|
||||||
# as_const, but folding them causes errors later on.
|
|
||||||
if isinstance(node, nodes.Expr):
|
|
||||||
try:
|
|
||||||
return nodes.Const.from_untrusted(
|
|
||||||
node.as_const(args[0] if args else None),
|
|
||||||
lineno=node.lineno,
|
|
||||||
environment=self.environment,
|
|
||||||
)
|
|
||||||
except nodes.Impossible:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return node
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,428 +0,0 @@
|
|||||||
"""A sandbox layer that ensures unsafe operations cannot be performed.
|
|
||||||
Useful when the template itself comes from an untrusted source.
|
|
||||||
"""
|
|
||||||
import operator
|
|
||||||
import types
|
|
||||||
import typing as t
|
|
||||||
from _string import formatter_field_name_split # type: ignore
|
|
||||||
from collections import abc
|
|
||||||
from collections import deque
|
|
||||||
from string import Formatter
|
|
||||||
|
|
||||||
from markupsafe import EscapeFormatter
|
|
||||||
from markupsafe import Markup
|
|
||||||
|
|
||||||
from .environment import Environment
|
|
||||||
from .exceptions import SecurityError
|
|
||||||
from .runtime import Context
|
|
||||||
from .runtime import Undefined
|
|
||||||
|
|
||||||
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
|
|
||||||
|
|
||||||
#: maximum number of items a range may produce
|
|
||||||
MAX_RANGE = 100000
|
|
||||||
|
|
||||||
#: Unsafe function attributes.
|
|
||||||
UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
|
|
||||||
|
|
||||||
#: Unsafe method attributes. Function attributes are unsafe for methods too.
|
|
||||||
UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
|
|
||||||
|
|
||||||
#: unsafe generator attributes.
|
|
||||||
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
|
|
||||||
|
|
||||||
#: unsafe attributes on coroutines
|
|
||||||
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
|
|
||||||
|
|
||||||
#: unsafe attributes on async generators
|
|
||||||
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
|
|
||||||
|
|
||||||
_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
|
|
||||||
(
|
|
||||||
abc.MutableSet,
|
|
||||||
frozenset(
|
|
||||||
[
|
|
||||||
"add",
|
|
||||||
"clear",
|
|
||||||
"difference_update",
|
|
||||||
"discard",
|
|
||||||
"pop",
|
|
||||||
"remove",
|
|
||||||
"symmetric_difference_update",
|
|
||||||
"update",
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
abc.MutableMapping,
|
|
||||||
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
abc.MutableSequence,
|
|
||||||
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
deque,
|
|
||||||
frozenset(
|
|
||||||
[
|
|
||||||
"append",
|
|
||||||
"appendleft",
|
|
||||||
"clear",
|
|
||||||
"extend",
|
|
||||||
"extendleft",
|
|
||||||
"pop",
|
|
||||||
"popleft",
|
|
||||||
"remove",
|
|
||||||
"rotate",
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
|
|
||||||
if not isinstance(
|
|
||||||
callable, (types.MethodType, types.BuiltinMethodType)
|
|
||||||
) or callable.__name__ not in ("format", "format_map"):
|
|
||||||
return None
|
|
||||||
|
|
||||||
obj = callable.__self__
|
|
||||||
|
|
||||||
if isinstance(obj, str):
|
|
||||||
return obj
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def safe_range(*args: int) -> range:
|
|
||||||
"""A range that can't generate ranges with a length of more than
|
|
||||||
MAX_RANGE items.
|
|
||||||
"""
|
|
||||||
rng = range(*args)
|
|
||||||
|
|
||||||
if len(rng) > MAX_RANGE:
|
|
||||||
raise OverflowError(
|
|
||||||
"Range too big. The sandbox blocks ranges larger than"
|
|
||||||
f" MAX_RANGE ({MAX_RANGE})."
|
|
||||||
)
|
|
||||||
|
|
||||||
return rng
|
|
||||||
|
|
||||||
|
|
||||||
def unsafe(f: F) -> F:
|
|
||||||
"""Marks a function or method as unsafe.
|
|
||||||
|
|
||||||
.. code-block: python
|
|
||||||
|
|
||||||
@unsafe
|
|
||||||
def delete(self):
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
f.unsafe_callable = True # type: ignore
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
def is_internal_attribute(obj: t.Any, attr: str) -> bool:
|
|
||||||
"""Test if the attribute given is an internal python attribute. For
|
|
||||||
example this function returns `True` for the `func_code` attribute of
|
|
||||||
python objects. This is useful if the environment method
|
|
||||||
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
|
|
||||||
|
|
||||||
>>> from jinja2.sandbox import is_internal_attribute
|
|
||||||
>>> is_internal_attribute(str, "mro")
|
|
||||||
True
|
|
||||||
>>> is_internal_attribute(str, "upper")
|
|
||||||
False
|
|
||||||
"""
|
|
||||||
if isinstance(obj, types.FunctionType):
|
|
||||||
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
|
|
||||||
return True
|
|
||||||
elif isinstance(obj, types.MethodType):
|
|
||||||
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
|
|
||||||
return True
|
|
||||||
elif isinstance(obj, type):
|
|
||||||
if attr == "mro":
|
|
||||||
return True
|
|
||||||
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
|
|
||||||
return True
|
|
||||||
elif isinstance(obj, types.GeneratorType):
|
|
||||||
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
|
|
||||||
return True
|
|
||||||
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
|
|
||||||
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
|
|
||||||
return True
|
|
||||||
elif hasattr(types, "AsyncGeneratorType") and isinstance(
|
|
||||||
obj, types.AsyncGeneratorType
|
|
||||||
):
|
|
||||||
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
|
|
||||||
return True
|
|
||||||
return attr.startswith("__")
|
|
||||||
|
|
||||||
|
|
||||||
def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
|
|
||||||
"""This function checks if an attribute on a builtin mutable object
|
|
||||||
(list, dict, set or deque) or the corresponding ABCs would modify it
|
|
||||||
if called.
|
|
||||||
|
|
||||||
>>> modifies_known_mutable({}, "clear")
|
|
||||||
True
|
|
||||||
>>> modifies_known_mutable({}, "keys")
|
|
||||||
False
|
|
||||||
>>> modifies_known_mutable([], "append")
|
|
||||||
True
|
|
||||||
>>> modifies_known_mutable([], "index")
|
|
||||||
False
|
|
||||||
|
|
||||||
If called with an unsupported object, ``False`` is returned.
|
|
||||||
|
|
||||||
>>> modifies_known_mutable("foo", "upper")
|
|
||||||
False
|
|
||||||
"""
|
|
||||||
for typespec, unsafe in _mutable_spec:
|
|
||||||
if isinstance(obj, typespec):
|
|
||||||
return attr in unsafe
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class SandboxedEnvironment(Environment):
|
|
||||||
"""The sandboxed environment. It works like the regular environment but
|
|
||||||
tells the compiler to generate sandboxed code. Additionally subclasses of
|
|
||||||
this environment may override the methods that tell the runtime what
|
|
||||||
attributes or functions are safe to access.
|
|
||||||
|
|
||||||
If the template tries to access insecure code a :exc:`SecurityError` is
|
|
||||||
raised. However also other exceptions may occur during the rendering so
|
|
||||||
the caller has to ensure that all exceptions are caught.
|
|
||||||
"""
|
|
||||||
|
|
||||||
sandboxed = True
|
|
||||||
|
|
||||||
#: default callback table for the binary operators. A copy of this is
|
|
||||||
#: available on each instance of a sandboxed environment as
|
|
||||||
#: :attr:`binop_table`
|
|
||||||
default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
|
|
||||||
"+": operator.add,
|
|
||||||
"-": operator.sub,
|
|
||||||
"*": operator.mul,
|
|
||||||
"/": operator.truediv,
|
|
||||||
"//": operator.floordiv,
|
|
||||||
"**": operator.pow,
|
|
||||||
"%": operator.mod,
|
|
||||||
}
|
|
||||||
|
|
||||||
#: default callback table for the unary operators. A copy of this is
|
|
||||||
#: available on each instance of a sandboxed environment as
|
|
||||||
#: :attr:`unop_table`
|
|
||||||
default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
|
|
||||||
"+": operator.pos,
|
|
||||||
"-": operator.neg,
|
|
||||||
}
|
|
||||||
|
|
||||||
#: a set of binary operators that should be intercepted. Each operator
|
|
||||||
#: that is added to this set (empty by default) is delegated to the
|
|
||||||
#: :meth:`call_binop` method that will perform the operator. The default
|
|
||||||
#: operator callback is specified by :attr:`binop_table`.
|
|
||||||
#:
|
|
||||||
#: The following binary operators are interceptable:
|
|
||||||
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
|
|
||||||
#:
|
|
||||||
#: The default operation form the operator table corresponds to the
|
|
||||||
#: builtin function. Intercepted calls are always slower than the native
|
|
||||||
#: operator call, so make sure only to intercept the ones you are
|
|
||||||
#: interested in.
|
|
||||||
#:
|
|
||||||
#: .. versionadded:: 2.6
|
|
||||||
intercepted_binops: t.FrozenSet[str] = frozenset()
|
|
||||||
|
|
||||||
#: a set of unary operators that should be intercepted. Each operator
|
|
||||||
#: that is added to this set (empty by default) is delegated to the
|
|
||||||
#: :meth:`call_unop` method that will perform the operator. The default
|
|
||||||
#: operator callback is specified by :attr:`unop_table`.
|
|
||||||
#:
|
|
||||||
#: The following unary operators are interceptable: ``+``, ``-``
|
|
||||||
#:
|
|
||||||
#: The default operation form the operator table corresponds to the
|
|
||||||
#: builtin function. Intercepted calls are always slower than the native
|
|
||||||
#: operator call, so make sure only to intercept the ones you are
|
|
||||||
#: interested in.
|
|
||||||
#:
|
|
||||||
#: .. versionadded:: 2.6
|
|
||||||
intercepted_unops: t.FrozenSet[str] = frozenset()
|
|
||||||
|
|
||||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.globals["range"] = safe_range
|
|
||||||
self.binop_table = self.default_binop_table.copy()
|
|
||||||
self.unop_table = self.default_unop_table.copy()
|
|
||||||
|
|
||||||
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
|
|
||||||
"""The sandboxed environment will call this method to check if the
|
|
||||||
attribute of an object is safe to access. Per default all attributes
|
|
||||||
starting with an underscore are considered private as well as the
|
|
||||||
special attributes of internal python objects as returned by the
|
|
||||||
:func:`is_internal_attribute` function.
|
|
||||||
"""
|
|
||||||
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
|
|
||||||
|
|
||||||
def is_safe_callable(self, obj: t.Any) -> bool:
|
|
||||||
"""Check if an object is safely callable. By default callables
|
|
||||||
are considered safe unless decorated with :func:`unsafe`.
|
|
||||||
|
|
||||||
This also recognizes the Django convention of setting
|
|
||||||
``func.alters_data = True``.
|
|
||||||
"""
|
|
||||||
return not (
|
|
||||||
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
|
|
||||||
)
|
|
||||||
|
|
||||||
def call_binop(
|
|
||||||
self, context: Context, operator: str, left: t.Any, right: t.Any
|
|
||||||
) -> t.Any:
|
|
||||||
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
|
|
||||||
this function is executed instead of the builtin operator. This can
|
|
||||||
be used to fine tune the behavior of certain operators.
|
|
||||||
|
|
||||||
.. versionadded:: 2.6
|
|
||||||
"""
|
|
||||||
return self.binop_table[operator](left, right)
|
|
||||||
|
|
||||||
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
|
|
||||||
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
|
|
||||||
this function is executed instead of the builtin operator. This can
|
|
||||||
be used to fine tune the behavior of certain operators.
|
|
||||||
|
|
||||||
.. versionadded:: 2.6
|
|
||||||
"""
|
|
||||||
return self.unop_table[operator](arg)
|
|
||||||
|
|
||||||
def getitem(
|
|
||||||
self, obj: t.Any, argument: t.Union[str, t.Any]
|
|
||||||
) -> t.Union[t.Any, Undefined]:
|
|
||||||
"""Subscribe an object from sandboxed code."""
|
|
||||||
try:
|
|
||||||
return obj[argument]
|
|
||||||
except (TypeError, LookupError):
|
|
||||||
if isinstance(argument, str):
|
|
||||||
try:
|
|
||||||
attr = str(argument)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
value = getattr(obj, attr)
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
if self.is_safe_attribute(obj, argument, value):
|
|
||||||
return value
|
|
||||||
return self.unsafe_undefined(obj, argument)
|
|
||||||
return self.undefined(obj=obj, name=argument)
|
|
||||||
|
|
||||||
def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
|
|
||||||
"""Subscribe an object from sandboxed code and prefer the
|
|
||||||
attribute. The attribute passed *must* be a bytestring.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
value = getattr(obj, attribute)
|
|
||||||
except AttributeError:
|
|
||||||
try:
|
|
||||||
return obj[attribute]
|
|
||||||
except (TypeError, LookupError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
if self.is_safe_attribute(obj, attribute, value):
|
|
||||||
return value
|
|
||||||
return self.unsafe_undefined(obj, attribute)
|
|
||||||
return self.undefined(obj=obj, name=attribute)
|
|
||||||
|
|
||||||
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
|
|
||||||
"""Return an undefined object for unsafe attributes."""
|
|
||||||
return self.undefined(
|
|
||||||
f"access to attribute {attribute!r} of"
|
|
||||||
f" {type(obj).__name__!r} object is unsafe.",
|
|
||||||
name=attribute,
|
|
||||||
obj=obj,
|
|
||||||
exc=SecurityError,
|
|
||||||
)
|
|
||||||
|
|
||||||
def format_string(
|
|
||||||
self,
|
|
||||||
s: str,
|
|
||||||
args: t.Tuple[t.Any, ...],
|
|
||||||
kwargs: t.Dict[str, t.Any],
|
|
||||||
format_func: t.Optional[t.Callable] = None,
|
|
||||||
) -> str:
|
|
||||||
"""If a format call is detected, then this is routed through this
|
|
||||||
method so that our safety sandbox can be used for it.
|
|
||||||
"""
|
|
||||||
formatter: SandboxedFormatter
|
|
||||||
if isinstance(s, Markup):
|
|
||||||
formatter = SandboxedEscapeFormatter(self, escape=s.escape)
|
|
||||||
else:
|
|
||||||
formatter = SandboxedFormatter(self)
|
|
||||||
|
|
||||||
if format_func is not None and format_func.__name__ == "format_map":
|
|
||||||
if len(args) != 1 or kwargs:
|
|
||||||
raise TypeError(
|
|
||||||
"format_map() takes exactly one argument"
|
|
||||||
f" {len(args) + (kwargs is not None)} given"
|
|
||||||
)
|
|
||||||
|
|
||||||
kwargs = args[0]
|
|
||||||
args = ()
|
|
||||||
|
|
||||||
rv = formatter.vformat(s, args, kwargs)
|
|
||||||
return type(s)(rv)
|
|
||||||
|
|
||||||
def call(
|
|
||||||
__self, # noqa: B902
|
|
||||||
__context: Context,
|
|
||||||
__obj: t.Any,
|
|
||||||
*args: t.Any,
|
|
||||||
**kwargs: t.Any,
|
|
||||||
) -> t.Any:
|
|
||||||
"""Call an object from sandboxed code."""
|
|
||||||
fmt = inspect_format_method(__obj)
|
|
||||||
if fmt is not None:
|
|
||||||
return __self.format_string(fmt, args, kwargs, __obj)
|
|
||||||
|
|
||||||
# the double prefixes are to avoid double keyword argument
|
|
||||||
# errors when proxying the call.
|
|
||||||
if not __self.is_safe_callable(__obj):
|
|
||||||
raise SecurityError(f"{__obj!r} is not safely callable")
|
|
||||||
return __context.call(__obj, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
|
|
||||||
"""Works exactly like the regular `SandboxedEnvironment` but does not
|
|
||||||
permit modifications on the builtin mutable objects `list`, `set`, and
|
|
||||||
`dict` by using the :func:`modifies_known_mutable` function.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
|
|
||||||
if not super().is_safe_attribute(obj, attr, value):
|
|
||||||
return False
|
|
||||||
|
|
||||||
return not modifies_known_mutable(obj, attr)
|
|
||||||
|
|
||||||
|
|
||||||
class SandboxedFormatter(Formatter):
|
|
||||||
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
|
|
||||||
self._env = env
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
def get_field(
|
|
||||||
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
|
|
||||||
) -> t.Tuple[t.Any, str]:
|
|
||||||
first, rest = formatter_field_name_split(field_name)
|
|
||||||
obj = self.get_value(first, args, kwargs)
|
|
||||||
for is_attr, i in rest:
|
|
||||||
if is_attr:
|
|
||||||
obj = self._env.getattr(obj, i)
|
|
||||||
else:
|
|
||||||
obj = self._env.getitem(obj, i)
|
|
||||||
return obj, first
|
|
||||||
|
|
||||||
|
|
||||||
class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
|
|
||||||
pass
|
|
||||||
@ -1,255 +0,0 @@
|
|||||||
"""Built-in template tests used with the ``is`` operator."""
|
|
||||||
import operator
|
|
||||||
import typing as t
|
|
||||||
from collections import abc
|
|
||||||
from numbers import Number
|
|
||||||
|
|
||||||
from .runtime import Undefined
|
|
||||||
from .utils import pass_environment
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
from .environment import Environment
|
|
||||||
|
|
||||||
|
|
||||||
def test_odd(value: int) -> bool:
|
|
||||||
"""Return true if the variable is odd."""
|
|
||||||
return value % 2 == 1
|
|
||||||
|
|
||||||
|
|
||||||
def test_even(value: int) -> bool:
|
|
||||||
"""Return true if the variable is even."""
|
|
||||||
return value % 2 == 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_divisibleby(value: int, num: int) -> bool:
|
|
||||||
"""Check if a variable is divisible by a number."""
|
|
||||||
return value % num == 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_defined(value: t.Any) -> bool:
|
|
||||||
"""Return true if the variable is defined:
|
|
||||||
|
|
||||||
.. sourcecode:: jinja
|
|
||||||
|
|
||||||
{% if variable is defined %}
|
|
||||||
value of variable: {{ variable }}
|
|
||||||
{% else %}
|
|
||||||
variable is not defined
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
See the :func:`default` filter for a simple way to set undefined
|
|
||||||
variables.
|
|
||||||
"""
|
|
||||||
return not isinstance(value, Undefined)
|
|
||||||
|
|
||||||
|
|
||||||
def test_undefined(value: t.Any) -> bool:
|
|
||||||
"""Like :func:`defined` but the other way round."""
|
|
||||||
return isinstance(value, Undefined)
|
|
||||||
|
|
||||||
|
|
||||||
@pass_environment
|
|
||||||
def test_filter(env: "Environment", value: str) -> bool:
|
|
||||||
"""Check if a filter exists by name. Useful if a filter may be
|
|
||||||
optionally available.
|
|
||||||
|
|
||||||
.. code-block:: jinja
|
|
||||||
|
|
||||||
{% if 'markdown' is filter %}
|
|
||||||
{{ value | markdown }}
|
|
||||||
{% else %}
|
|
||||||
{{ value }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
.. versionadded:: 3.0
|
|
||||||
"""
|
|
||||||
return value in env.filters
|
|
||||||
|
|
||||||
|
|
||||||
@pass_environment
|
|
||||||
def test_test(env: "Environment", value: str) -> bool:
|
|
||||||
"""Check if a test exists by name. Useful if a test may be
|
|
||||||
optionally available.
|
|
||||||
|
|
||||||
.. code-block:: jinja
|
|
||||||
|
|
||||||
{% if 'loud' is test %}
|
|
||||||
{% if value is loud %}
|
|
||||||
{{ value|upper }}
|
|
||||||
{% else %}
|
|
||||||
{{ value|lower }}
|
|
||||||
{% endif %}
|
|
||||||
{% else %}
|
|
||||||
{{ value }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
.. versionadded:: 3.0
|
|
||||||
"""
|
|
||||||
return value in env.tests
|
|
||||||
|
|
||||||
|
|
||||||
def test_none(value: t.Any) -> bool:
|
|
||||||
"""Return true if the variable is none."""
|
|
||||||
return value is None
|
|
||||||
|
|
||||||
|
|
||||||
def test_boolean(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is a boolean value.
|
|
||||||
|
|
||||||
.. versionadded:: 2.11
|
|
||||||
"""
|
|
||||||
return value is True or value is False
|
|
||||||
|
|
||||||
|
|
||||||
def test_false(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is False.
|
|
||||||
|
|
||||||
.. versionadded:: 2.11
|
|
||||||
"""
|
|
||||||
return value is False
|
|
||||||
|
|
||||||
|
|
||||||
def test_true(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is True.
|
|
||||||
|
|
||||||
.. versionadded:: 2.11
|
|
||||||
"""
|
|
||||||
return value is True
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE: The existing 'number' test matches booleans and floats
|
|
||||||
def test_integer(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is an integer.
|
|
||||||
|
|
||||||
.. versionadded:: 2.11
|
|
||||||
"""
|
|
||||||
return isinstance(value, int) and value is not True and value is not False
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE: The existing 'number' test matches booleans and integers
|
|
||||||
def test_float(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is a float.
|
|
||||||
|
|
||||||
.. versionadded:: 2.11
|
|
||||||
"""
|
|
||||||
return isinstance(value, float)
|
|
||||||
|
|
||||||
|
|
||||||
def test_lower(value: str) -> bool:
|
|
||||||
"""Return true if the variable is lowercased."""
|
|
||||||
return str(value).islower()
|
|
||||||
|
|
||||||
|
|
||||||
def test_upper(value: str) -> bool:
|
|
||||||
"""Return true if the variable is uppercased."""
|
|
||||||
return str(value).isupper()
|
|
||||||
|
|
||||||
|
|
||||||
def test_string(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is a string."""
|
|
||||||
return isinstance(value, str)
|
|
||||||
|
|
||||||
|
|
||||||
def test_mapping(value: t.Any) -> bool:
|
|
||||||
"""Return true if the object is a mapping (dict etc.).
|
|
||||||
|
|
||||||
.. versionadded:: 2.6
|
|
||||||
"""
|
|
||||||
return isinstance(value, abc.Mapping)
|
|
||||||
|
|
||||||
|
|
||||||
def test_number(value: t.Any) -> bool:
|
|
||||||
"""Return true if the variable is a number."""
|
|
||||||
return isinstance(value, Number)
|
|
||||||
|
|
||||||
|
|
||||||
def test_sequence(value: t.Any) -> bool:
|
|
||||||
"""Return true if the variable is a sequence. Sequences are variables
|
|
||||||
that are iterable.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
len(value)
|
|
||||||
value.__getitem__
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def test_sameas(value: t.Any, other: t.Any) -> bool:
|
|
||||||
"""Check if an object points to the same memory address than another
|
|
||||||
object:
|
|
||||||
|
|
||||||
.. sourcecode:: jinja
|
|
||||||
|
|
||||||
{% if foo.attribute is sameas false %}
|
|
||||||
the foo attribute really is the `False` singleton
|
|
||||||
{% endif %}
|
|
||||||
"""
|
|
||||||
return value is other
|
|
||||||
|
|
||||||
|
|
||||||
def test_iterable(value: t.Any) -> bool:
|
|
||||||
"""Check if it's possible to iterate over an object."""
|
|
||||||
try:
|
|
||||||
iter(value)
|
|
||||||
except TypeError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def test_escaped(value: t.Any) -> bool:
|
|
||||||
"""Check if the value is escaped."""
|
|
||||||
return hasattr(value, "__html__")
|
|
||||||
|
|
||||||
|
|
||||||
def test_in(value: t.Any, seq: t.Container) -> bool:
|
|
||||||
"""Check if value is in seq.
|
|
||||||
|
|
||||||
.. versionadded:: 2.10
|
|
||||||
"""
|
|
||||||
return value in seq
|
|
||||||
|
|
||||||
|
|
||||||
TESTS = {
|
|
||||||
"odd": test_odd,
|
|
||||||
"even": test_even,
|
|
||||||
"divisibleby": test_divisibleby,
|
|
||||||
"defined": test_defined,
|
|
||||||
"undefined": test_undefined,
|
|
||||||
"filter": test_filter,
|
|
||||||
"test": test_test,
|
|
||||||
"none": test_none,
|
|
||||||
"boolean": test_boolean,
|
|
||||||
"false": test_false,
|
|
||||||
"true": test_true,
|
|
||||||
"integer": test_integer,
|
|
||||||
"float": test_float,
|
|
||||||
"lower": test_lower,
|
|
||||||
"upper": test_upper,
|
|
||||||
"string": test_string,
|
|
||||||
"mapping": test_mapping,
|
|
||||||
"number": test_number,
|
|
||||||
"sequence": test_sequence,
|
|
||||||
"iterable": test_iterable,
|
|
||||||
"callable": callable,
|
|
||||||
"sameas": test_sameas,
|
|
||||||
"escaped": test_escaped,
|
|
||||||
"in": test_in,
|
|
||||||
"==": operator.eq,
|
|
||||||
"eq": operator.eq,
|
|
||||||
"equalto": operator.eq,
|
|
||||||
"!=": operator.ne,
|
|
||||||
"ne": operator.ne,
|
|
||||||
">": operator.gt,
|
|
||||||
"gt": operator.gt,
|
|
||||||
"greaterthan": operator.gt,
|
|
||||||
"ge": operator.ge,
|
|
||||||
">=": operator.ge,
|
|
||||||
"<": operator.lt,
|
|
||||||
"lt": operator.lt,
|
|
||||||
"lessthan": operator.lt,
|
|
||||||
"<=": operator.le,
|
|
||||||
"le": operator.le,
|
|
||||||
}
|
|
||||||
@ -1,755 +0,0 @@
|
|||||||
import enum
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import typing as t
|
|
||||||
from collections import abc
|
|
||||||
from collections import deque
|
|
||||||
from random import choice
|
|
||||||
from random import randrange
|
|
||||||
from threading import Lock
|
|
||||||
from types import CodeType
|
|
||||||
from urllib.parse import quote_from_bytes
|
|
||||||
|
|
||||||
import markupsafe
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
|
|
||||||
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
|
|
||||||
|
|
||||||
# special singleton representing missing values for the runtime
|
|
||||||
missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})()
|
|
||||||
|
|
||||||
internal_code: t.MutableSet[CodeType] = set()
|
|
||||||
|
|
||||||
concat = "".join
|
|
||||||
|
|
||||||
|
|
||||||
def pass_context(f: F) -> F:
|
|
||||||
"""Pass the :class:`~jinja2.runtime.Context` as the first argument
|
|
||||||
to the decorated function when called while rendering a template.
|
|
||||||
|
|
||||||
Can be used on functions, filters, and tests.
|
|
||||||
|
|
||||||
If only ``Context.eval_context`` is needed, use
|
|
||||||
:func:`pass_eval_context`. If only ``Context.environment`` is
|
|
||||||
needed, use :func:`pass_environment`.
|
|
||||||
|
|
||||||
.. versionadded:: 3.0.0
|
|
||||||
Replaces ``contextfunction`` and ``contextfilter``.
|
|
||||||
"""
|
|
||||||
f.jinja_pass_arg = _PassArg.context # type: ignore
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
def pass_eval_context(f: F) -> F:
|
|
||||||
"""Pass the :class:`~jinja2.nodes.EvalContext` as the first argument
|
|
||||||
to the decorated function when called while rendering a template.
|
|
||||||
See :ref:`eval-context`.
|
|
||||||
|
|
||||||
Can be used on functions, filters, and tests.
|
|
||||||
|
|
||||||
If only ``EvalContext.environment`` is needed, use
|
|
||||||
:func:`pass_environment`.
|
|
||||||
|
|
||||||
.. versionadded:: 3.0.0
|
|
||||||
Replaces ``evalcontextfunction`` and ``evalcontextfilter``.
|
|
||||||
"""
|
|
||||||
f.jinja_pass_arg = _PassArg.eval_context # type: ignore
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
def pass_environment(f: F) -> F:
|
|
||||||
"""Pass the :class:`~jinja2.Environment` as the first argument to
|
|
||||||
the decorated function when called while rendering a template.
|
|
||||||
|
|
||||||
Can be used on functions, filters, and tests.
|
|
||||||
|
|
||||||
.. versionadded:: 3.0.0
|
|
||||||
Replaces ``environmentfunction`` and ``environmentfilter``.
|
|
||||||
"""
|
|
||||||
f.jinja_pass_arg = _PassArg.environment # type: ignore
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
class _PassArg(enum.Enum):
|
|
||||||
context = enum.auto()
|
|
||||||
eval_context = enum.auto()
|
|
||||||
environment = enum.auto()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
|
|
||||||
if hasattr(obj, "jinja_pass_arg"):
|
|
||||||
return obj.jinja_pass_arg # type: ignore
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def internalcode(f: F) -> F:
|
|
||||||
"""Marks the function as internally used"""
|
|
||||||
internal_code.add(f.__code__)
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
def is_undefined(obj: t.Any) -> bool:
|
|
||||||
"""Check if the object passed is undefined. This does nothing more than
|
|
||||||
performing an instance check against :class:`Undefined` but looks nicer.
|
|
||||||
This can be used for custom filters or tests that want to react to
|
|
||||||
undefined variables. For example a custom default filter can look like
|
|
||||||
this::
|
|
||||||
|
|
||||||
def default(var, default=''):
|
|
||||||
if is_undefined(var):
|
|
||||||
return default
|
|
||||||
return var
|
|
||||||
"""
|
|
||||||
from .runtime import Undefined
|
|
||||||
|
|
||||||
return isinstance(obj, Undefined)
|
|
||||||
|
|
||||||
|
|
||||||
def consume(iterable: t.Iterable[t.Any]) -> None:
|
|
||||||
"""Consumes an iterable without doing anything with it."""
|
|
||||||
for _ in iterable:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def clear_caches() -> None:
|
|
||||||
"""Jinja keeps internal caches for environments and lexers. These are
|
|
||||||
used so that Jinja doesn't have to recreate environments and lexers all
|
|
||||||
the time. Normally you don't have to care about that but if you are
|
|
||||||
measuring memory consumption you may want to clean the caches.
|
|
||||||
"""
|
|
||||||
from .environment import get_spontaneous_environment
|
|
||||||
from .lexer import _lexer_cache
|
|
||||||
|
|
||||||
get_spontaneous_environment.cache_clear()
|
|
||||||
_lexer_cache.clear()
|
|
||||||
|
|
||||||
|
|
||||||
def import_string(import_name: str, silent: bool = False) -> t.Any:
|
|
||||||
"""Imports an object based on a string. This is useful if you want to
|
|
||||||
use import paths as endpoints or something similar. An import path can
|
|
||||||
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
|
||||||
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
|
||||||
|
|
||||||
If the `silent` is True the return value will be `None` if the import
|
|
||||||
fails.
|
|
||||||
|
|
||||||
:return: imported object
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if ":" in import_name:
|
|
||||||
module, obj = import_name.split(":", 1)
|
|
||||||
elif "." in import_name:
|
|
||||||
module, _, obj = import_name.rpartition(".")
|
|
||||||
else:
|
|
||||||
return __import__(import_name)
|
|
||||||
return getattr(__import__(module, None, None, [obj]), obj)
|
|
||||||
except (ImportError, AttributeError):
|
|
||||||
if not silent:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def open_if_exists(filename: str, mode: str = "rb") -> t.Optional[t.IO]:
|
|
||||||
"""Returns a file descriptor for the filename if that file exists,
|
|
||||||
otherwise ``None``.
|
|
||||||
"""
|
|
||||||
if not os.path.isfile(filename):
|
|
||||||
return None
|
|
||||||
|
|
||||||
return open(filename, mode)
|
|
||||||
|
|
||||||
|
|
||||||
def object_type_repr(obj: t.Any) -> str:
|
|
||||||
"""Returns the name of the object's type. For some recognized
|
|
||||||
singletons the name of the object is returned instead. (For
|
|
||||||
example for `None` and `Ellipsis`).
|
|
||||||
"""
|
|
||||||
if obj is None:
|
|
||||||
return "None"
|
|
||||||
elif obj is Ellipsis:
|
|
||||||
return "Ellipsis"
|
|
||||||
|
|
||||||
cls = type(obj)
|
|
||||||
|
|
||||||
if cls.__module__ == "builtins":
|
|
||||||
return f"{cls.__name__} object"
|
|
||||||
|
|
||||||
return f"{cls.__module__}.{cls.__name__} object"
|
|
||||||
|
|
||||||
|
|
||||||
def pformat(obj: t.Any) -> str:
|
|
||||||
"""Format an object using :func:`pprint.pformat`."""
|
|
||||||
from pprint import pformat # type: ignore
|
|
||||||
|
|
||||||
return pformat(obj)
|
|
||||||
|
|
||||||
|
|
||||||
_http_re = re.compile(
|
|
||||||
r"""
|
|
||||||
^
|
|
||||||
(
|
|
||||||
(https?://|www\.) # scheme or www
|
|
||||||
(([\w%-]+\.)+)? # subdomain
|
|
||||||
(
|
|
||||||
[a-z]{2,63} # basic tld
|
|
||||||
|
|
|
||||||
xn--[\w%]{2,59} # idna tld
|
|
||||||
)
|
|
||||||
|
|
|
||||||
([\w%-]{2,63}\.)+ # basic domain
|
|
||||||
(com|net|int|edu|gov|org|info|mil) # basic tld
|
|
||||||
|
|
|
||||||
(https?://) # scheme
|
|
||||||
(
|
|
||||||
(([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
|
|
||||||
|
|
|
||||||
(\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
|
|
||||||
)
|
|
||||||
)
|
|
||||||
(?::[\d]{1,5})? # port
|
|
||||||
(?:[/?#]\S*)? # path, query, and fragment
|
|
||||||
$
|
|
||||||
""",
|
|
||||||
re.IGNORECASE | re.VERBOSE,
|
|
||||||
)
|
|
||||||
_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
|
|
||||||
|
|
||||||
|
|
||||||
def urlize(
|
|
||||||
text: str,
|
|
||||||
trim_url_limit: t.Optional[int] = None,
|
|
||||||
rel: t.Optional[str] = None,
|
|
||||||
target: t.Optional[str] = None,
|
|
||||||
extra_schemes: t.Optional[t.Iterable[str]] = None,
|
|
||||||
) -> str:
|
|
||||||
"""Convert URLs in text into clickable links.
|
|
||||||
|
|
||||||
This may not recognize links in some situations. Usually, a more
|
|
||||||
comprehensive formatter, such as a Markdown library, is a better
|
|
||||||
choice.
|
|
||||||
|
|
||||||
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
|
|
||||||
addresses. Links with trailing punctuation (periods, commas, closing
|
|
||||||
parentheses) and leading punctuation (opening parentheses) are
|
|
||||||
recognized excluding the punctuation. Email addresses that include
|
|
||||||
header fields are not recognized (for example,
|
|
||||||
``mailto:address@example.com?cc=copy@example.com``).
|
|
||||||
|
|
||||||
:param text: Original text containing URLs to link.
|
|
||||||
:param trim_url_limit: Shorten displayed URL values to this length.
|
|
||||||
:param target: Add the ``target`` attribute to links.
|
|
||||||
:param rel: Add the ``rel`` attribute to links.
|
|
||||||
:param extra_schemes: Recognize URLs that start with these schemes
|
|
||||||
in addition to the default behavior.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
The ``extra_schemes`` parameter was added.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
Generate ``https://`` links for URLs without a scheme.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
The parsing rules were updated. Recognize email addresses with
|
|
||||||
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
|
|
||||||
parentheses and brackets in more cases.
|
|
||||||
"""
|
|
||||||
if trim_url_limit is not None:
|
|
||||||
|
|
||||||
def trim_url(x: str) -> str:
|
|
||||||
if len(x) > trim_url_limit: # type: ignore
|
|
||||||
return f"{x[:trim_url_limit]}..."
|
|
||||||
|
|
||||||
return x
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
def trim_url(x: str) -> str:
|
|
||||||
return x
|
|
||||||
|
|
||||||
words = re.split(r"(\s+)", str(markupsafe.escape(text)))
|
|
||||||
rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
|
|
||||||
target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
|
|
||||||
|
|
||||||
for i, word in enumerate(words):
|
|
||||||
head, middle, tail = "", word, ""
|
|
||||||
match = re.match(r"^([(<]|<)+", middle)
|
|
||||||
|
|
||||||
if match:
|
|
||||||
head = match.group()
|
|
||||||
middle = middle[match.end() :]
|
|
||||||
|
|
||||||
# Unlike lead, which is anchored to the start of the string,
|
|
||||||
# need to check that the string ends with any of the characters
|
|
||||||
# before trying to match all of them, to avoid backtracking.
|
|
||||||
if middle.endswith((")", ">", ".", ",", "\n", ">")):
|
|
||||||
match = re.search(r"([)>.,\n]|>)+$", middle)
|
|
||||||
|
|
||||||
if match:
|
|
||||||
tail = match.group()
|
|
||||||
middle = middle[: match.start()]
|
|
||||||
|
|
||||||
# Prefer balancing parentheses in URLs instead of ignoring a
|
|
||||||
# trailing character.
|
|
||||||
for start_char, end_char in ("(", ")"), ("<", ">"), ("<", ">"):
|
|
||||||
start_count = middle.count(start_char)
|
|
||||||
|
|
||||||
if start_count <= middle.count(end_char):
|
|
||||||
# Balanced, or lighter on the left
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Move as many as possible from the tail to balance
|
|
||||||
for _ in range(min(start_count, tail.count(end_char))):
|
|
||||||
end_index = tail.index(end_char) + len(end_char)
|
|
||||||
# Move anything in the tail before the end char too
|
|
||||||
middle += tail[:end_index]
|
|
||||||
tail = tail[end_index:]
|
|
||||||
|
|
||||||
if _http_re.match(middle):
|
|
||||||
if middle.startswith("https://") or middle.startswith("http://"):
|
|
||||||
middle = (
|
|
||||||
f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
middle = (
|
|
||||||
f'<a href="https://{middle}"{rel_attr}{target_attr}>'
|
|
||||||
f"{trim_url(middle)}</a>"
|
|
||||||
)
|
|
||||||
|
|
||||||
elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
|
|
||||||
middle = f'<a href="{middle}">{middle[7:]}</a>'
|
|
||||||
|
|
||||||
elif (
|
|
||||||
"@" in middle
|
|
||||||
and not middle.startswith("www.")
|
|
||||||
and ":" not in middle
|
|
||||||
and _email_re.match(middle)
|
|
||||||
):
|
|
||||||
middle = f'<a href="mailto:{middle}">{middle}</a>'
|
|
||||||
|
|
||||||
elif extra_schemes is not None:
|
|
||||||
for scheme in extra_schemes:
|
|
||||||
if middle != scheme and middle.startswith(scheme):
|
|
||||||
middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
|
|
||||||
|
|
||||||
words[i] = f"{head}{middle}{tail}"
|
|
||||||
|
|
||||||
return "".join(words)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_lorem_ipsum(
|
|
||||||
n: int = 5, html: bool = True, min: int = 20, max: int = 100
|
|
||||||
) -> str:
|
|
||||||
"""Generate some lorem ipsum for the template."""
|
|
||||||
from .constants import LOREM_IPSUM_WORDS
|
|
||||||
|
|
||||||
words = LOREM_IPSUM_WORDS.split()
|
|
||||||
result = []
|
|
||||||
|
|
||||||
for _ in range(n):
|
|
||||||
next_capitalized = True
|
|
||||||
last_comma = last_fullstop = 0
|
|
||||||
word = None
|
|
||||||
last = None
|
|
||||||
p = []
|
|
||||||
|
|
||||||
# each paragraph contains out of 20 to 100 words.
|
|
||||||
for idx, _ in enumerate(range(randrange(min, max))):
|
|
||||||
while True:
|
|
||||||
word = choice(words)
|
|
||||||
if word != last:
|
|
||||||
last = word
|
|
||||||
break
|
|
||||||
if next_capitalized:
|
|
||||||
word = word.capitalize()
|
|
||||||
next_capitalized = False
|
|
||||||
# add commas
|
|
||||||
if idx - randrange(3, 8) > last_comma:
|
|
||||||
last_comma = idx
|
|
||||||
last_fullstop += 2
|
|
||||||
word += ","
|
|
||||||
# add end of sentences
|
|
||||||
if idx - randrange(10, 20) > last_fullstop:
|
|
||||||
last_comma = last_fullstop = idx
|
|
||||||
word += "."
|
|
||||||
next_capitalized = True
|
|
||||||
p.append(word)
|
|
||||||
|
|
||||||
# ensure that the paragraph ends with a dot.
|
|
||||||
p_str = " ".join(p)
|
|
||||||
|
|
||||||
if p_str.endswith(","):
|
|
||||||
p_str = p_str[:-1] + "."
|
|
||||||
elif not p_str.endswith("."):
|
|
||||||
p_str += "."
|
|
||||||
|
|
||||||
result.append(p_str)
|
|
||||||
|
|
||||||
if not html:
|
|
||||||
return "\n\n".join(result)
|
|
||||||
return markupsafe.Markup(
|
|
||||||
"\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
|
|
||||||
"""Quote a string for use in a URL using the given charset.
|
|
||||||
|
|
||||||
:param obj: String or bytes to quote. Other types are converted to
|
|
||||||
string then encoded to bytes using the given charset.
|
|
||||||
:param charset: Encode text to bytes using this charset.
|
|
||||||
:param for_qs: Quote "/" and use "+" for spaces.
|
|
||||||
"""
|
|
||||||
if not isinstance(obj, bytes):
|
|
||||||
if not isinstance(obj, str):
|
|
||||||
obj = str(obj)
|
|
||||||
|
|
||||||
obj = obj.encode(charset)
|
|
||||||
|
|
||||||
safe = b"" if for_qs else b"/"
|
|
||||||
rv = quote_from_bytes(obj, safe)
|
|
||||||
|
|
||||||
if for_qs:
|
|
||||||
rv = rv.replace("%20", "+")
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
@abc.MutableMapping.register
|
|
||||||
class LRUCache:
|
|
||||||
"""A simple LRU Cache implementation."""
|
|
||||||
|
|
||||||
# this is fast for small capacities (something below 1000) but doesn't
|
|
||||||
# scale. But as long as it's only used as storage for templates this
|
|
||||||
# won't do any harm.
|
|
||||||
|
|
||||||
def __init__(self, capacity: int) -> None:
|
|
||||||
self.capacity = capacity
|
|
||||||
self._mapping: t.Dict[t.Any, t.Any] = {}
|
|
||||||
self._queue: "te.Deque[t.Any]" = deque()
|
|
||||||
self._postinit()
|
|
||||||
|
|
||||||
def _postinit(self) -> None:
|
|
||||||
# alias all queue methods for faster lookup
|
|
||||||
self._popleft = self._queue.popleft
|
|
||||||
self._pop = self._queue.pop
|
|
||||||
self._remove = self._queue.remove
|
|
||||||
self._wlock = Lock()
|
|
||||||
self._append = self._queue.append
|
|
||||||
|
|
||||||
def __getstate__(self) -> t.Mapping[str, t.Any]:
|
|
||||||
return {
|
|
||||||
"capacity": self.capacity,
|
|
||||||
"_mapping": self._mapping,
|
|
||||||
"_queue": self._queue,
|
|
||||||
}
|
|
||||||
|
|
||||||
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
|
|
||||||
self.__dict__.update(d)
|
|
||||||
self._postinit()
|
|
||||||
|
|
||||||
def __getnewargs__(self) -> t.Tuple:
|
|
||||||
return (self.capacity,)
|
|
||||||
|
|
||||||
def copy(self) -> "LRUCache":
|
|
||||||
"""Return a shallow copy of the instance."""
|
|
||||||
rv = self.__class__(self.capacity)
|
|
||||||
rv._mapping.update(self._mapping)
|
|
||||||
rv._queue.extend(self._queue)
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
|
|
||||||
"""Return an item from the cache dict or `default`"""
|
|
||||||
try:
|
|
||||||
return self[key]
|
|
||||||
except KeyError:
|
|
||||||
return default
|
|
||||||
|
|
||||||
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
|
|
||||||
"""Set `default` if the key is not in the cache otherwise
|
|
||||||
leave unchanged. Return the value of this key.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return self[key]
|
|
||||||
except KeyError:
|
|
||||||
self[key] = default
|
|
||||||
return default
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
|
||||||
"""Clear the cache."""
|
|
||||||
with self._wlock:
|
|
||||||
self._mapping.clear()
|
|
||||||
self._queue.clear()
|
|
||||||
|
|
||||||
def __contains__(self, key: t.Any) -> bool:
|
|
||||||
"""Check if a key exists in this cache."""
|
|
||||||
return key in self._mapping
|
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
"""Return the current size of the cache."""
|
|
||||||
return len(self._mapping)
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<{type(self).__name__} {self._mapping!r}>"
|
|
||||||
|
|
||||||
def __getitem__(self, key: t.Any) -> t.Any:
|
|
||||||
"""Get an item from the cache. Moves the item up so that it has the
|
|
||||||
highest priority then.
|
|
||||||
|
|
||||||
Raise a `KeyError` if it does not exist.
|
|
||||||
"""
|
|
||||||
with self._wlock:
|
|
||||||
rv = self._mapping[key]
|
|
||||||
|
|
||||||
if self._queue[-1] != key:
|
|
||||||
try:
|
|
||||||
self._remove(key)
|
|
||||||
except ValueError:
|
|
||||||
# if something removed the key from the container
|
|
||||||
# when we read, ignore the ValueError that we would
|
|
||||||
# get otherwise.
|
|
||||||
pass
|
|
||||||
|
|
||||||
self._append(key)
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def __setitem__(self, key: t.Any, value: t.Any) -> None:
|
|
||||||
"""Sets the value for an item. Moves the item up so that it
|
|
||||||
has the highest priority then.
|
|
||||||
"""
|
|
||||||
with self._wlock:
|
|
||||||
if key in self._mapping:
|
|
||||||
self._remove(key)
|
|
||||||
elif len(self._mapping) == self.capacity:
|
|
||||||
del self._mapping[self._popleft()]
|
|
||||||
|
|
||||||
self._append(key)
|
|
||||||
self._mapping[key] = value
|
|
||||||
|
|
||||||
def __delitem__(self, key: t.Any) -> None:
|
|
||||||
"""Remove an item from the cache dict.
|
|
||||||
Raise a `KeyError` if it does not exist.
|
|
||||||
"""
|
|
||||||
with self._wlock:
|
|
||||||
del self._mapping[key]
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._remove(key)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
|
|
||||||
"""Return a list of items."""
|
|
||||||
result = [(key, self._mapping[key]) for key in list(self._queue)]
|
|
||||||
result.reverse()
|
|
||||||
return result
|
|
||||||
|
|
||||||
def values(self) -> t.Iterable[t.Any]:
|
|
||||||
"""Return a list of all values."""
|
|
||||||
return [x[1] for x in self.items()]
|
|
||||||
|
|
||||||
def keys(self) -> t.Iterable[t.Any]:
|
|
||||||
"""Return a list of all keys ordered by most recent usage."""
|
|
||||||
return list(self)
|
|
||||||
|
|
||||||
def __iter__(self) -> t.Iterator[t.Any]:
|
|
||||||
return reversed(tuple(self._queue))
|
|
||||||
|
|
||||||
def __reversed__(self) -> t.Iterator[t.Any]:
|
|
||||||
"""Iterate over the keys in the cache dict, oldest items
|
|
||||||
coming first.
|
|
||||||
"""
|
|
||||||
return iter(tuple(self._queue))
|
|
||||||
|
|
||||||
__copy__ = copy
|
|
||||||
|
|
||||||
|
|
||||||
def select_autoescape(
|
|
||||||
enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
|
|
||||||
disabled_extensions: t.Collection[str] = (),
|
|
||||||
default_for_string: bool = True,
|
|
||||||
default: bool = False,
|
|
||||||
) -> t.Callable[[t.Optional[str]], bool]:
|
|
||||||
"""Intelligently sets the initial value of autoescaping based on the
|
|
||||||
filename of the template. This is the recommended way to configure
|
|
||||||
autoescaping if you do not want to write a custom function yourself.
|
|
||||||
|
|
||||||
If you want to enable it for all templates created from strings or
|
|
||||||
for all templates with `.html` and `.xml` extensions::
|
|
||||||
|
|
||||||
from jinja2 import Environment, select_autoescape
|
|
||||||
env = Environment(autoescape=select_autoescape(
|
|
||||||
enabled_extensions=('html', 'xml'),
|
|
||||||
default_for_string=True,
|
|
||||||
))
|
|
||||||
|
|
||||||
Example configuration to turn it on at all times except if the template
|
|
||||||
ends with `.txt`::
|
|
||||||
|
|
||||||
from jinja2 import Environment, select_autoescape
|
|
||||||
env = Environment(autoescape=select_autoescape(
|
|
||||||
disabled_extensions=('txt',),
|
|
||||||
default_for_string=True,
|
|
||||||
default=True,
|
|
||||||
))
|
|
||||||
|
|
||||||
The `enabled_extensions` is an iterable of all the extensions that
|
|
||||||
autoescaping should be enabled for. Likewise `disabled_extensions` is
|
|
||||||
a list of all templates it should be disabled for. If a template is
|
|
||||||
loaded from a string then the default from `default_for_string` is used.
|
|
||||||
If nothing matches then the initial value of autoescaping is set to the
|
|
||||||
value of `default`.
|
|
||||||
|
|
||||||
For security reasons this function operates case insensitive.
|
|
||||||
|
|
||||||
.. versionadded:: 2.9
|
|
||||||
"""
|
|
||||||
enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
|
|
||||||
disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
|
|
||||||
|
|
||||||
def autoescape(template_name: t.Optional[str]) -> bool:
|
|
||||||
if template_name is None:
|
|
||||||
return default_for_string
|
|
||||||
template_name = template_name.lower()
|
|
||||||
if template_name.endswith(enabled_patterns):
|
|
||||||
return True
|
|
||||||
if template_name.endswith(disabled_patterns):
|
|
||||||
return False
|
|
||||||
return default
|
|
||||||
|
|
||||||
return autoescape
|
|
||||||
|
|
||||||
|
|
||||||
def htmlsafe_json_dumps(
|
|
||||||
obj: t.Any, dumps: t.Optional[t.Callable[..., str]] = None, **kwargs: t.Any
|
|
||||||
) -> markupsafe.Markup:
|
|
||||||
"""Serialize an object to a string of JSON with :func:`json.dumps`,
|
|
||||||
then replace HTML-unsafe characters with Unicode escapes and mark
|
|
||||||
the result safe with :class:`~markupsafe.Markup`.
|
|
||||||
|
|
||||||
This is available in templates as the ``|tojson`` filter.
|
|
||||||
|
|
||||||
The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
|
|
||||||
|
|
||||||
The returned string is safe to render in HTML documents and
|
|
||||||
``<script>`` tags. The exception is in HTML attributes that are
|
|
||||||
double quoted; either use single quotes or the ``|forceescape``
|
|
||||||
filter.
|
|
||||||
|
|
||||||
:param obj: The object to serialize to JSON.
|
|
||||||
:param dumps: The ``dumps`` function to use. Defaults to
|
|
||||||
``env.policies["json.dumps_function"]``, which defaults to
|
|
||||||
:func:`json.dumps`.
|
|
||||||
:param kwargs: Extra arguments to pass to ``dumps``. Merged onto
|
|
||||||
``env.policies["json.dumps_kwargs"]``.
|
|
||||||
|
|
||||||
.. versionchanged:: 3.0
|
|
||||||
The ``dumper`` parameter is renamed to ``dumps``.
|
|
||||||
|
|
||||||
.. versionadded:: 2.9
|
|
||||||
"""
|
|
||||||
if dumps is None:
|
|
||||||
dumps = json.dumps
|
|
||||||
|
|
||||||
return markupsafe.Markup(
|
|
||||||
dumps(obj, **kwargs)
|
|
||||||
.replace("<", "\\u003c")
|
|
||||||
.replace(">", "\\u003e")
|
|
||||||
.replace("&", "\\u0026")
|
|
||||||
.replace("'", "\\u0027")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Cycler:
|
|
||||||
"""Cycle through values by yield them one at a time, then restarting
|
|
||||||
once the end is reached. Available as ``cycler`` in templates.
|
|
||||||
|
|
||||||
Similar to ``loop.cycle``, but can be used outside loops or across
|
|
||||||
multiple loops. For example, render a list of folders and files in a
|
|
||||||
list, alternating giving them "odd" and "even" classes.
|
|
||||||
|
|
||||||
.. code-block:: html+jinja
|
|
||||||
|
|
||||||
{% set row_class = cycler("odd", "even") %}
|
|
||||||
<ul class="browser">
|
|
||||||
{% for folder in folders %}
|
|
||||||
<li class="folder {{ row_class.next() }}">{{ folder }}
|
|
||||||
{% endfor %}
|
|
||||||
{% for file in files %}
|
|
||||||
<li class="file {{ row_class.next() }}">{{ file }}
|
|
||||||
{% endfor %}
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
:param items: Each positional argument will be yielded in the order
|
|
||||||
given for each cycle.
|
|
||||||
|
|
||||||
.. versionadded:: 2.1
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *items: t.Any) -> None:
|
|
||||||
if not items:
|
|
||||||
raise RuntimeError("at least one item has to be provided")
|
|
||||||
self.items = items
|
|
||||||
self.pos = 0
|
|
||||||
|
|
||||||
def reset(self) -> None:
|
|
||||||
"""Resets the current item to the first item."""
|
|
||||||
self.pos = 0
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self) -> t.Any:
|
|
||||||
"""Return the current item. Equivalent to the item that will be
|
|
||||||
returned next time :meth:`next` is called.
|
|
||||||
"""
|
|
||||||
return self.items[self.pos]
|
|
||||||
|
|
||||||
def next(self) -> t.Any:
|
|
||||||
"""Return the current item, then advance :attr:`current` to the
|
|
||||||
next item.
|
|
||||||
"""
|
|
||||||
rv = self.current
|
|
||||||
self.pos = (self.pos + 1) % len(self.items)
|
|
||||||
return rv
|
|
||||||
|
|
||||||
__next__ = next
|
|
||||||
|
|
||||||
|
|
||||||
class Joiner:
|
|
||||||
"""A joining helper for templates."""
|
|
||||||
|
|
||||||
def __init__(self, sep: str = ", ") -> None:
|
|
||||||
self.sep = sep
|
|
||||||
self.used = False
|
|
||||||
|
|
||||||
def __call__(self) -> str:
|
|
||||||
if not self.used:
|
|
||||||
self.used = True
|
|
||||||
return ""
|
|
||||||
return self.sep
|
|
||||||
|
|
||||||
|
|
||||||
class Namespace:
|
|
||||||
"""A namespace object that can hold arbitrary attributes. It may be
|
|
||||||
initialized from a dictionary or with keyword arguments."""
|
|
||||||
|
|
||||||
def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
|
|
||||||
self, args = args[0], args[1:]
|
|
||||||
self.__attrs = dict(*args, **kwargs)
|
|
||||||
|
|
||||||
def __getattribute__(self, name: str) -> t.Any:
|
|
||||||
# __class__ is needed for the awaitable check in async mode
|
|
||||||
if name in {"_Namespace__attrs", "__class__"}:
|
|
||||||
return object.__getattribute__(self, name)
|
|
||||||
try:
|
|
||||||
return self.__attrs[name]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(name) from None
|
|
||||||
|
|
||||||
def __setitem__(self, name: str, value: t.Any) -> None:
|
|
||||||
self.__attrs[name] = value
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<Namespace {self.__attrs!r}>"
|
|
||||||
@ -1,92 +0,0 @@
|
|||||||
"""API for traversing the AST nodes. Implemented by the compiler and
|
|
||||||
meta introspection.
|
|
||||||
"""
|
|
||||||
import typing as t
|
|
||||||
|
|
||||||
from .nodes import Node
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
|
|
||||||
class VisitCallable(te.Protocol):
|
|
||||||
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
class NodeVisitor:
|
|
||||||
"""Walks the abstract syntax tree and call visitor functions for every
|
|
||||||
node found. The visitor functions may return values which will be
|
|
||||||
forwarded by the `visit` method.
|
|
||||||
|
|
||||||
Per default the visitor functions for the nodes are ``'visit_'`` +
|
|
||||||
class name of the node. So a `TryFinally` node visit function would
|
|
||||||
be `visit_TryFinally`. This behavior can be changed by overriding
|
|
||||||
the `get_visitor` function. If no visitor function exists for a node
|
|
||||||
(return value `None`) the `generic_visit` visitor is used instead.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
|
|
||||||
"""Return the visitor function for this node or `None` if no visitor
|
|
||||||
exists for this node. In that case the generic visit function is
|
|
||||||
used instead.
|
|
||||||
"""
|
|
||||||
return getattr(self, f"visit_{type(node).__name__}", None)
|
|
||||||
|
|
||||||
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
|
||||||
"""Visit a node."""
|
|
||||||
f = self.get_visitor(node)
|
|
||||||
|
|
||||||
if f is not None:
|
|
||||||
return f(node, *args, **kwargs)
|
|
||||||
|
|
||||||
return self.generic_visit(node, *args, **kwargs)
|
|
||||||
|
|
||||||
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
|
||||||
"""Called if no explicit visitor function exists for a node."""
|
|
||||||
for child_node in node.iter_child_nodes():
|
|
||||||
self.visit(child_node, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class NodeTransformer(NodeVisitor):
|
|
||||||
"""Walks the abstract syntax tree and allows modifications of nodes.
|
|
||||||
|
|
||||||
The `NodeTransformer` will walk the AST and use the return value of the
|
|
||||||
visitor functions to replace or remove the old node. If the return
|
|
||||||
value of the visitor function is `None` the node will be removed
|
|
||||||
from the previous location otherwise it's replaced with the return
|
|
||||||
value. The return value may be the original node in which case no
|
|
||||||
replacement takes place.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
|
|
||||||
for field, old_value in node.iter_fields():
|
|
||||||
if isinstance(old_value, list):
|
|
||||||
new_values = []
|
|
||||||
for value in old_value:
|
|
||||||
if isinstance(value, Node):
|
|
||||||
value = self.visit(value, *args, **kwargs)
|
|
||||||
if value is None:
|
|
||||||
continue
|
|
||||||
elif not isinstance(value, Node):
|
|
||||||
new_values.extend(value)
|
|
||||||
continue
|
|
||||||
new_values.append(value)
|
|
||||||
old_value[:] = new_values
|
|
||||||
elif isinstance(old_value, Node):
|
|
||||||
new_node = self.visit(old_value, *args, **kwargs)
|
|
||||||
if new_node is None:
|
|
||||||
delattr(node, field)
|
|
||||||
else:
|
|
||||||
setattr(node, field, new_node)
|
|
||||||
return node
|
|
||||||
|
|
||||||
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
|
|
||||||
"""As transformers may return lists in some places this method
|
|
||||||
can be used to enforce a list as return value.
|
|
||||||
"""
|
|
||||||
rv = self.visit(node, *args, **kwargs)
|
|
||||||
|
|
||||||
if not isinstance(rv, list):
|
|
||||||
return [rv]
|
|
||||||
|
|
||||||
return rv
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
Copyright 2010 Pallets
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
3. Neither the name of the copyright holder nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
||||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
||||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
||||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
||||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
@ -1,295 +0,0 @@
|
|||||||
import functools
|
|
||||||
import re
|
|
||||||
import string
|
|
||||||
import typing as t
|
|
||||||
|
|
||||||
if t.TYPE_CHECKING:
|
|
||||||
import typing_extensions as te
|
|
||||||
|
|
||||||
class HasHTML(te.Protocol):
|
|
||||||
def __html__(self) -> str:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
__version__ = "2.1.1"
|
|
||||||
|
|
||||||
_strip_comments_re = re.compile(r"<!--.*?-->")
|
|
||||||
_strip_tags_re = re.compile(r"<.*?>")
|
|
||||||
|
|
||||||
|
|
||||||
def _simple_escaping_wrapper(name: str) -> t.Callable[..., "Markup"]:
|
|
||||||
orig = getattr(str, name)
|
|
||||||
|
|
||||||
@functools.wraps(orig)
|
|
||||||
def wrapped(self: "Markup", *args: t.Any, **kwargs: t.Any) -> "Markup":
|
|
||||||
args = _escape_argspec(list(args), enumerate(args), self.escape) # type: ignore
|
|
||||||
_escape_argspec(kwargs, kwargs.items(), self.escape)
|
|
||||||
return self.__class__(orig(self, *args, **kwargs))
|
|
||||||
|
|
||||||
return wrapped
|
|
||||||
|
|
||||||
|
|
||||||
class Markup(str):
|
|
||||||
"""A string that is ready to be safely inserted into an HTML or XML
|
|
||||||
document, either because it was escaped or because it was marked
|
|
||||||
safe.
|
|
||||||
|
|
||||||
Passing an object to the constructor converts it to text and wraps
|
|
||||||
it to mark it safe without escaping. To escape the text, use the
|
|
||||||
:meth:`escape` class method instead.
|
|
||||||
|
|
||||||
>>> Markup("Hello, <em>World</em>!")
|
|
||||||
Markup('Hello, <em>World</em>!')
|
|
||||||
>>> Markup(42)
|
|
||||||
Markup('42')
|
|
||||||
>>> Markup.escape("Hello, <em>World</em>!")
|
|
||||||
Markup('Hello <em>World</em>!')
|
|
||||||
|
|
||||||
This implements the ``__html__()`` interface that some frameworks
|
|
||||||
use. Passing an object that implements ``__html__()`` will wrap the
|
|
||||||
output of that method, marking it safe.
|
|
||||||
|
|
||||||
>>> class Foo:
|
|
||||||
... def __html__(self):
|
|
||||||
... return '<a href="/foo">foo</a>'
|
|
||||||
...
|
|
||||||
>>> Markup(Foo())
|
|
||||||
Markup('<a href="/foo">foo</a>')
|
|
||||||
|
|
||||||
This is a subclass of :class:`str`. It has the same methods, but
|
|
||||||
escapes their arguments and returns a ``Markup`` instance.
|
|
||||||
|
|
||||||
>>> Markup("<em>%s</em>") % ("foo & bar",)
|
|
||||||
Markup('<em>foo & bar</em>')
|
|
||||||
>>> Markup("<em>Hello</em> ") + "<foo>"
|
|
||||||
Markup('<em>Hello</em> <foo>')
|
|
||||||
"""
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
def __new__(
|
|
||||||
cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
|
|
||||||
) -> "Markup":
|
|
||||||
if hasattr(base, "__html__"):
|
|
||||||
base = base.__html__()
|
|
||||||
|
|
||||||
if encoding is None:
|
|
||||||
return super().__new__(cls, base)
|
|
||||||
|
|
||||||
return super().__new__(cls, base, encoding, errors)
|
|
||||||
|
|
||||||
def __html__(self) -> "Markup":
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __add__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
|
|
||||||
if isinstance(other, str) or hasattr(other, "__html__"):
|
|
||||||
return self.__class__(super().__add__(self.escape(other)))
|
|
||||||
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
def __radd__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
|
|
||||||
if isinstance(other, str) or hasattr(other, "__html__"):
|
|
||||||
return self.escape(other).__add__(self)
|
|
||||||
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
def __mul__(self, num: "te.SupportsIndex") -> "Markup":
|
|
||||||
if isinstance(num, int):
|
|
||||||
return self.__class__(super().__mul__(num))
|
|
||||||
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
__rmul__ = __mul__
|
|
||||||
|
|
||||||
def __mod__(self, arg: t.Any) -> "Markup":
|
|
||||||
if isinstance(arg, tuple):
|
|
||||||
# a tuple of arguments, each wrapped
|
|
||||||
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
|
|
||||||
elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
|
|
||||||
# a mapping of arguments, wrapped
|
|
||||||
arg = _MarkupEscapeHelper(arg, self.escape)
|
|
||||||
else:
|
|
||||||
# a single argument, wrapped with the helper and a tuple
|
|
||||||
arg = (_MarkupEscapeHelper(arg, self.escape),)
|
|
||||||
|
|
||||||
return self.__class__(super().__mod__(arg))
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"{self.__class__.__name__}({super().__repr__()})"
|
|
||||||
|
|
||||||
def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "Markup":
|
|
||||||
return self.__class__(super().join(map(self.escape, seq)))
|
|
||||||
|
|
||||||
join.__doc__ = str.join.__doc__
|
|
||||||
|
|
||||||
def split( # type: ignore
|
|
||||||
self, sep: t.Optional[str] = None, maxsplit: int = -1
|
|
||||||
) -> t.List["Markup"]:
|
|
||||||
return [self.__class__(v) for v in super().split(sep, maxsplit)]
|
|
||||||
|
|
||||||
split.__doc__ = str.split.__doc__
|
|
||||||
|
|
||||||
def rsplit( # type: ignore
|
|
||||||
self, sep: t.Optional[str] = None, maxsplit: int = -1
|
|
||||||
) -> t.List["Markup"]:
|
|
||||||
return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
|
|
||||||
|
|
||||||
rsplit.__doc__ = str.rsplit.__doc__
|
|
||||||
|
|
||||||
def splitlines(self, keepends: bool = False) -> t.List["Markup"]: # type: ignore
|
|
||||||
return [self.__class__(v) for v in super().splitlines(keepends)]
|
|
||||||
|
|
||||||
splitlines.__doc__ = str.splitlines.__doc__
|
|
||||||
|
|
||||||
def unescape(self) -> str:
|
|
||||||
"""Convert escaped markup back into a text string. This replaces
|
|
||||||
HTML entities with the characters they represent.
|
|
||||||
|
|
||||||
>>> Markup("Main » <em>About</em>").unescape()
|
|
||||||
'Main » <em>About</em>'
|
|
||||||
"""
|
|
||||||
from html import unescape
|
|
||||||
|
|
||||||
return unescape(str(self))
|
|
||||||
|
|
||||||
def striptags(self) -> str:
|
|
||||||
""":meth:`unescape` the markup, remove tags, and normalize
|
|
||||||
whitespace to single spaces.
|
|
||||||
|
|
||||||
>>> Markup("Main »\t<em>About</em>").striptags()
|
|
||||||
'Main » About'
|
|
||||||
"""
|
|
||||||
# Use two regexes to avoid ambiguous matches.
|
|
||||||
value = _strip_comments_re.sub("", self)
|
|
||||||
value = _strip_tags_re.sub("", value)
|
|
||||||
value = " ".join(value.split())
|
|
||||||
return Markup(value).unescape()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def escape(cls, s: t.Any) -> "Markup":
|
|
||||||
"""Escape a string. Calls :func:`escape` and ensures that for
|
|
||||||
subclasses the correct type is returned.
|
|
||||||
"""
|
|
||||||
rv = escape(s)
|
|
||||||
|
|
||||||
if rv.__class__ is not cls:
|
|
||||||
return cls(rv)
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
for method in (
|
|
||||||
"__getitem__",
|
|
||||||
"capitalize",
|
|
||||||
"title",
|
|
||||||
"lower",
|
|
||||||
"upper",
|
|
||||||
"replace",
|
|
||||||
"ljust",
|
|
||||||
"rjust",
|
|
||||||
"lstrip",
|
|
||||||
"rstrip",
|
|
||||||
"center",
|
|
||||||
"strip",
|
|
||||||
"translate",
|
|
||||||
"expandtabs",
|
|
||||||
"swapcase",
|
|
||||||
"zfill",
|
|
||||||
):
|
|
||||||
locals()[method] = _simple_escaping_wrapper(method)
|
|
||||||
|
|
||||||
del method
|
|
||||||
|
|
||||||
def partition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
|
|
||||||
l, s, r = super().partition(self.escape(sep))
|
|
||||||
cls = self.__class__
|
|
||||||
return cls(l), cls(s), cls(r)
|
|
||||||
|
|
||||||
def rpartition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
|
|
||||||
l, s, r = super().rpartition(self.escape(sep))
|
|
||||||
cls = self.__class__
|
|
||||||
return cls(l), cls(s), cls(r)
|
|
||||||
|
|
||||||
def format(self, *args: t.Any, **kwargs: t.Any) -> "Markup":
|
|
||||||
formatter = EscapeFormatter(self.escape)
|
|
||||||
return self.__class__(formatter.vformat(self, args, kwargs))
|
|
||||||
|
|
||||||
def __html_format__(self, format_spec: str) -> "Markup":
|
|
||||||
if format_spec:
|
|
||||||
raise ValueError("Unsupported format specification for Markup.")
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
class EscapeFormatter(string.Formatter):
|
|
||||||
__slots__ = ("escape",)
|
|
||||||
|
|
||||||
def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
|
|
||||||
self.escape = escape
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def format_field(self, value: t.Any, format_spec: str) -> str:
|
|
||||||
if hasattr(value, "__html_format__"):
|
|
||||||
rv = value.__html_format__(format_spec)
|
|
||||||
elif hasattr(value, "__html__"):
|
|
||||||
if format_spec:
|
|
||||||
raise ValueError(
|
|
||||||
f"Format specifier {format_spec} given, but {type(value)} does not"
|
|
||||||
" define __html_format__. A class that defines __html__ must define"
|
|
||||||
" __html_format__ to work with format specifiers."
|
|
||||||
)
|
|
||||||
rv = value.__html__()
|
|
||||||
else:
|
|
||||||
# We need to make sure the format spec is str here as
|
|
||||||
# otherwise the wrong callback methods are invoked.
|
|
||||||
rv = string.Formatter.format_field(self, value, str(format_spec))
|
|
||||||
return str(self.escape(rv))
|
|
||||||
|
|
||||||
|
|
||||||
_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
|
|
||||||
|
|
||||||
|
|
||||||
def _escape_argspec(
|
|
||||||
obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
|
|
||||||
) -> _ListOrDict:
|
|
||||||
"""Helper for various string-wrapped functions."""
|
|
||||||
for key, value in iterable:
|
|
||||||
if isinstance(value, str) or hasattr(value, "__html__"):
|
|
||||||
obj[key] = escape(value)
|
|
||||||
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
class _MarkupEscapeHelper:
|
|
||||||
"""Helper for :meth:`Markup.__mod__`."""
|
|
||||||
|
|
||||||
__slots__ = ("obj", "escape")
|
|
||||||
|
|
||||||
def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
|
|
||||||
self.obj = obj
|
|
||||||
self.escape = escape
|
|
||||||
|
|
||||||
def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
|
|
||||||
return _MarkupEscapeHelper(self.obj[item], self.escape)
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return str(self.escape(self.obj))
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return str(self.escape(repr(self.obj)))
|
|
||||||
|
|
||||||
def __int__(self) -> int:
|
|
||||||
return int(self.obj)
|
|
||||||
|
|
||||||
def __float__(self) -> float:
|
|
||||||
return float(self.obj)
|
|
||||||
|
|
||||||
|
|
||||||
# circular import
|
|
||||||
try:
|
|
||||||
from ._speedups import escape as escape
|
|
||||||
from ._speedups import escape_silent as escape_silent
|
|
||||||
from ._speedups import soft_str as soft_str
|
|
||||||
except ImportError:
|
|
||||||
from ._native import escape as escape
|
|
||||||
from ._native import escape_silent as escape_silent # noqa: F401
|
|
||||||
from ._native import soft_str as soft_str # noqa: F401
|
|
||||||
@ -1,63 +0,0 @@
|
|||||||
import typing as t
|
|
||||||
|
|
||||||
from . import Markup
|
|
||||||
|
|
||||||
|
|
||||||
def escape(s: t.Any) -> Markup:
|
|
||||||
"""Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
|
|
||||||
the string with HTML-safe sequences. Use this if you need to display
|
|
||||||
text that might contain such characters in HTML.
|
|
||||||
|
|
||||||
If the object has an ``__html__`` method, it is called and the
|
|
||||||
return value is assumed to already be safe for HTML.
|
|
||||||
|
|
||||||
:param s: An object to be converted to a string and escaped.
|
|
||||||
:return: A :class:`Markup` string with the escaped text.
|
|
||||||
"""
|
|
||||||
if hasattr(s, "__html__"):
|
|
||||||
return Markup(s.__html__())
|
|
||||||
|
|
||||||
return Markup(
|
|
||||||
str(s)
|
|
||||||
.replace("&", "&")
|
|
||||||
.replace(">", ">")
|
|
||||||
.replace("<", "<")
|
|
||||||
.replace("'", "'")
|
|
||||||
.replace('"', """)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def escape_silent(s: t.Optional[t.Any]) -> Markup:
|
|
||||||
"""Like :func:`escape` but treats ``None`` as the empty string.
|
|
||||||
Useful with optional values, as otherwise you get the string
|
|
||||||
``'None'`` when the value is ``None``.
|
|
||||||
|
|
||||||
>>> escape(None)
|
|
||||||
Markup('None')
|
|
||||||
>>> escape_silent(None)
|
|
||||||
Markup('')
|
|
||||||
"""
|
|
||||||
if s is None:
|
|
||||||
return Markup()
|
|
||||||
|
|
||||||
return escape(s)
|
|
||||||
|
|
||||||
|
|
||||||
def soft_str(s: t.Any) -> str:
|
|
||||||
"""Convert an object to a string if it isn't already. This preserves
|
|
||||||
a :class:`Markup` string rather than converting it back to a basic
|
|
||||||
string, so it will still be marked as safe and won't be escaped
|
|
||||||
again.
|
|
||||||
|
|
||||||
>>> value = escape("<User 1>")
|
|
||||||
>>> value
|
|
||||||
Markup('<User 1>')
|
|
||||||
>>> escape(str(value))
|
|
||||||
Markup('&lt;User 1&gt;')
|
|
||||||
>>> escape(soft_str(value))
|
|
||||||
Markup('<User 1>')
|
|
||||||
"""
|
|
||||||
if not isinstance(s, str):
|
|
||||||
return str(s)
|
|
||||||
|
|
||||||
return s
|
|
||||||
@ -1,320 +0,0 @@
|
|||||||
#include <Python.h>
|
|
||||||
|
|
||||||
static PyObject* markup;
|
|
||||||
|
|
||||||
static int
|
|
||||||
init_constants(void)
|
|
||||||
{
|
|
||||||
PyObject *module;
|
|
||||||
|
|
||||||
/* import markup type so that we can mark the return value */
|
|
||||||
module = PyImport_ImportModule("markupsafe");
|
|
||||||
if (!module)
|
|
||||||
return 0;
|
|
||||||
markup = PyObject_GetAttrString(module, "Markup");
|
|
||||||
Py_DECREF(module);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GET_DELTA(inp, inp_end, delta) \
|
|
||||||
while (inp < inp_end) { \
|
|
||||||
switch (*inp++) { \
|
|
||||||
case '"': \
|
|
||||||
case '\'': \
|
|
||||||
case '&': \
|
|
||||||
delta += 4; \
|
|
||||||
break; \
|
|
||||||
case '<': \
|
|
||||||
case '>': \
|
|
||||||
delta += 3; \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DO_ESCAPE(inp, inp_end, outp) \
|
|
||||||
{ \
|
|
||||||
Py_ssize_t ncopy = 0; \
|
|
||||||
while (inp < inp_end) { \
|
|
||||||
switch (*inp) { \
|
|
||||||
case '"': \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
outp += ncopy; ncopy = 0; \
|
|
||||||
*outp++ = '&'; \
|
|
||||||
*outp++ = '#'; \
|
|
||||||
*outp++ = '3'; \
|
|
||||||
*outp++ = '4'; \
|
|
||||||
*outp++ = ';'; \
|
|
||||||
break; \
|
|
||||||
case '\'': \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
outp += ncopy; ncopy = 0; \
|
|
||||||
*outp++ = '&'; \
|
|
||||||
*outp++ = '#'; \
|
|
||||||
*outp++ = '3'; \
|
|
||||||
*outp++ = '9'; \
|
|
||||||
*outp++ = ';'; \
|
|
||||||
break; \
|
|
||||||
case '&': \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
outp += ncopy; ncopy = 0; \
|
|
||||||
*outp++ = '&'; \
|
|
||||||
*outp++ = 'a'; \
|
|
||||||
*outp++ = 'm'; \
|
|
||||||
*outp++ = 'p'; \
|
|
||||||
*outp++ = ';'; \
|
|
||||||
break; \
|
|
||||||
case '<': \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
outp += ncopy; ncopy = 0; \
|
|
||||||
*outp++ = '&'; \
|
|
||||||
*outp++ = 'l'; \
|
|
||||||
*outp++ = 't'; \
|
|
||||||
*outp++ = ';'; \
|
|
||||||
break; \
|
|
||||||
case '>': \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
outp += ncopy; ncopy = 0; \
|
|
||||||
*outp++ = '&'; \
|
|
||||||
*outp++ = 'g'; \
|
|
||||||
*outp++ = 't'; \
|
|
||||||
*outp++ = ';'; \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
ncopy++; \
|
|
||||||
} \
|
|
||||||
inp++; \
|
|
||||||
} \
|
|
||||||
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
|
|
||||||
}
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape_unicode_kind1(PyUnicodeObject *in)
|
|
||||||
{
|
|
||||||
Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
|
|
||||||
Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
|
|
||||||
Py_UCS1 *outp;
|
|
||||||
PyObject *out;
|
|
||||||
Py_ssize_t delta = 0;
|
|
||||||
|
|
||||||
GET_DELTA(inp, inp_end, delta);
|
|
||||||
if (!delta) {
|
|
||||||
Py_INCREF(in);
|
|
||||||
return (PyObject*)in;
|
|
||||||
}
|
|
||||||
|
|
||||||
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
|
|
||||||
PyUnicode_IS_ASCII(in) ? 127 : 255);
|
|
||||||
if (!out)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
inp = PyUnicode_1BYTE_DATA(in);
|
|
||||||
outp = PyUnicode_1BYTE_DATA(out);
|
|
||||||
DO_ESCAPE(inp, inp_end, outp);
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape_unicode_kind2(PyUnicodeObject *in)
|
|
||||||
{
|
|
||||||
Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
|
|
||||||
Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
|
|
||||||
Py_UCS2 *outp;
|
|
||||||
PyObject *out;
|
|
||||||
Py_ssize_t delta = 0;
|
|
||||||
|
|
||||||
GET_DELTA(inp, inp_end, delta);
|
|
||||||
if (!delta) {
|
|
||||||
Py_INCREF(in);
|
|
||||||
return (PyObject*)in;
|
|
||||||
}
|
|
||||||
|
|
||||||
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
|
|
||||||
if (!out)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
inp = PyUnicode_2BYTE_DATA(in);
|
|
||||||
outp = PyUnicode_2BYTE_DATA(out);
|
|
||||||
DO_ESCAPE(inp, inp_end, outp);
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape_unicode_kind4(PyUnicodeObject *in)
|
|
||||||
{
|
|
||||||
Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
|
|
||||||
Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
|
|
||||||
Py_UCS4 *outp;
|
|
||||||
PyObject *out;
|
|
||||||
Py_ssize_t delta = 0;
|
|
||||||
|
|
||||||
GET_DELTA(inp, inp_end, delta);
|
|
||||||
if (!delta) {
|
|
||||||
Py_INCREF(in);
|
|
||||||
return (PyObject*)in;
|
|
||||||
}
|
|
||||||
|
|
||||||
out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
|
|
||||||
if (!out)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
inp = PyUnicode_4BYTE_DATA(in);
|
|
||||||
outp = PyUnicode_4BYTE_DATA(out);
|
|
||||||
DO_ESCAPE(inp, inp_end, outp);
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape_unicode(PyUnicodeObject *in)
|
|
||||||
{
|
|
||||||
if (PyUnicode_READY(in))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
switch (PyUnicode_KIND(in)) {
|
|
||||||
case PyUnicode_1BYTE_KIND:
|
|
||||||
return escape_unicode_kind1(in);
|
|
||||||
case PyUnicode_2BYTE_KIND:
|
|
||||||
return escape_unicode_kind2(in);
|
|
||||||
case PyUnicode_4BYTE_KIND:
|
|
||||||
return escape_unicode_kind4(in);
|
|
||||||
}
|
|
||||||
assert(0); /* shouldn't happen */
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape(PyObject *self, PyObject *text)
|
|
||||||
{
|
|
||||||
static PyObject *id_html;
|
|
||||||
PyObject *s = NULL, *rv = NULL, *html;
|
|
||||||
|
|
||||||
if (id_html == NULL) {
|
|
||||||
id_html = PyUnicode_InternFromString("__html__");
|
|
||||||
if (id_html == NULL) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* we don't have to escape integers, bools or floats */
|
|
||||||
if (PyLong_CheckExact(text) ||
|
|
||||||
PyFloat_CheckExact(text) || PyBool_Check(text) ||
|
|
||||||
text == Py_None)
|
|
||||||
return PyObject_CallFunctionObjArgs(markup, text, NULL);
|
|
||||||
|
|
||||||
/* if the object has an __html__ method that performs the escaping */
|
|
||||||
html = PyObject_GetAttr(text ,id_html);
|
|
||||||
if (html) {
|
|
||||||
s = PyObject_CallObject(html, NULL);
|
|
||||||
Py_DECREF(html);
|
|
||||||
if (s == NULL) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
/* Convert to Markup object */
|
|
||||||
rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
|
|
||||||
Py_DECREF(s);
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* otherwise make the object unicode if it isn't, then escape */
|
|
||||||
PyErr_Clear();
|
|
||||||
if (!PyUnicode_Check(text)) {
|
|
||||||
PyObject *unicode = PyObject_Str(text);
|
|
||||||
if (!unicode)
|
|
||||||
return NULL;
|
|
||||||
s = escape_unicode((PyUnicodeObject*)unicode);
|
|
||||||
Py_DECREF(unicode);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
s = escape_unicode((PyUnicodeObject*)text);
|
|
||||||
|
|
||||||
/* convert the unicode string into a markup object. */
|
|
||||||
rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
|
|
||||||
Py_DECREF(s);
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
escape_silent(PyObject *self, PyObject *text)
|
|
||||||
{
|
|
||||||
if (text != Py_None)
|
|
||||||
return escape(self, text);
|
|
||||||
return PyObject_CallFunctionObjArgs(markup, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static PyObject*
|
|
||||||
soft_str(PyObject *self, PyObject *s)
|
|
||||||
{
|
|
||||||
if (!PyUnicode_Check(s))
|
|
||||||
return PyObject_Str(s);
|
|
||||||
Py_INCREF(s);
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static PyMethodDef module_methods[] = {
|
|
||||||
{
|
|
||||||
"escape",
|
|
||||||
(PyCFunction)escape,
|
|
||||||
METH_O,
|
|
||||||
"Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
|
|
||||||
" the string with HTML-safe sequences. Use this if you need to display"
|
|
||||||
" text that might contain such characters in HTML.\n\n"
|
|
||||||
"If the object has an ``__html__`` method, it is called and the"
|
|
||||||
" return value is assumed to already be safe for HTML.\n\n"
|
|
||||||
":param s: An object to be converted to a string and escaped.\n"
|
|
||||||
":return: A :class:`Markup` string with the escaped text.\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"escape_silent",
|
|
||||||
(PyCFunction)escape_silent,
|
|
||||||
METH_O,
|
|
||||||
"Like :func:`escape` but treats ``None`` as the empty string."
|
|
||||||
" Useful with optional values, as otherwise you get the string"
|
|
||||||
" ``'None'`` when the value is ``None``.\n\n"
|
|
||||||
">>> escape(None)\n"
|
|
||||||
"Markup('None')\n"
|
|
||||||
">>> escape_silent(None)\n"
|
|
||||||
"Markup('')\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"soft_str",
|
|
||||||
(PyCFunction)soft_str,
|
|
||||||
METH_O,
|
|
||||||
"Convert an object to a string if it isn't already. This preserves"
|
|
||||||
" a :class:`Markup` string rather than converting it back to a basic"
|
|
||||||
" string, so it will still be marked as safe and won't be escaped"
|
|
||||||
" again.\n\n"
|
|
||||||
">>> value = escape(\"<User 1>\")\n"
|
|
||||||
">>> value\n"
|
|
||||||
"Markup('<User 1>')\n"
|
|
||||||
">>> escape(str(value))\n"
|
|
||||||
"Markup('&lt;User 1&gt;')\n"
|
|
||||||
">>> escape(soft_str(value))\n"
|
|
||||||
"Markup('<User 1>')\n"
|
|
||||||
},
|
|
||||||
{NULL, NULL, 0, NULL} /* Sentinel */
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct PyModuleDef module_definition = {
|
|
||||||
PyModuleDef_HEAD_INIT,
|
|
||||||
"markupsafe._speedups",
|
|
||||||
NULL,
|
|
||||||
-1,
|
|
||||||
module_methods,
|
|
||||||
NULL,
|
|
||||||
NULL,
|
|
||||||
NULL,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
PyMODINIT_FUNC
|
|
||||||
PyInit__speedups(void)
|
|
||||||
{
|
|
||||||
if (!init_constants())
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return PyModule_Create(&module_definition);
|
|
||||||
}
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
from typing import Any
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from . import Markup
|
|
||||||
|
|
||||||
def escape(s: Any) -> Markup: ...
|
|
||||||
def escape_silent(s: Optional[Any]) -> Markup: ...
|
|
||||||
def soft_str(s: Any) -> str: ...
|
|
||||||
def soft_unicode(s: Any) -> str: ...
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
Copyright (c) 2017-2021 Ingy döt Net
|
|
||||||
Copyright (c) 2006-2016 Kirill Simonov
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
||||||
of the Software, and to permit persons to whom the Software is furnished to do
|
|
||||||
so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
@ -1,390 +0,0 @@
|
|||||||
|
|
||||||
from .error import *
|
|
||||||
|
|
||||||
from .tokens import *
|
|
||||||
from .events import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
from .loader import *
|
|
||||||
from .dumper import *
|
|
||||||
|
|
||||||
__version__ = '6.0'
|
|
||||||
try:
|
|
||||||
from .cyaml import *
|
|
||||||
__with_libyaml__ = True
|
|
||||||
except ImportError:
|
|
||||||
__with_libyaml__ = False
|
|
||||||
|
|
||||||
import io
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# XXX "Warnings control" is now deprecated. Leaving in the API function to not
|
|
||||||
# break code that uses it.
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
def warnings(settings=None):
|
|
||||||
if settings is None:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
def scan(stream, Loader=Loader):
|
|
||||||
"""
|
|
||||||
Scan a YAML stream and produce scanning tokens.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
while loader.check_token():
|
|
||||||
yield loader.get_token()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def parse(stream, Loader=Loader):
|
|
||||||
"""
|
|
||||||
Parse a YAML stream and produce parsing events.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
while loader.check_event():
|
|
||||||
yield loader.get_event()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def compose(stream, Loader=Loader):
|
|
||||||
"""
|
|
||||||
Parse the first YAML document in a stream
|
|
||||||
and produce the corresponding representation tree.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
return loader.get_single_node()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def compose_all(stream, Loader=Loader):
|
|
||||||
"""
|
|
||||||
Parse all YAML documents in a stream
|
|
||||||
and produce corresponding representation trees.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
while loader.check_node():
|
|
||||||
yield loader.get_node()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def load(stream, Loader):
|
|
||||||
"""
|
|
||||||
Parse the first YAML document in a stream
|
|
||||||
and produce the corresponding Python object.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
return loader.get_single_data()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def load_all(stream, Loader):
|
|
||||||
"""
|
|
||||||
Parse all YAML documents in a stream
|
|
||||||
and produce corresponding Python objects.
|
|
||||||
"""
|
|
||||||
loader = Loader(stream)
|
|
||||||
try:
|
|
||||||
while loader.check_data():
|
|
||||||
yield loader.get_data()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def full_load(stream):
|
|
||||||
"""
|
|
||||||
Parse the first YAML document in a stream
|
|
||||||
and produce the corresponding Python object.
|
|
||||||
|
|
||||||
Resolve all tags except those known to be
|
|
||||||
unsafe on untrusted input.
|
|
||||||
"""
|
|
||||||
return load(stream, FullLoader)
|
|
||||||
|
|
||||||
def full_load_all(stream):
|
|
||||||
"""
|
|
||||||
Parse all YAML documents in a stream
|
|
||||||
and produce corresponding Python objects.
|
|
||||||
|
|
||||||
Resolve all tags except those known to be
|
|
||||||
unsafe on untrusted input.
|
|
||||||
"""
|
|
||||||
return load_all(stream, FullLoader)
|
|
||||||
|
|
||||||
def safe_load(stream):
|
|
||||||
"""
|
|
||||||
Parse the first YAML document in a stream
|
|
||||||
and produce the corresponding Python object.
|
|
||||||
|
|
||||||
Resolve only basic YAML tags. This is known
|
|
||||||
to be safe for untrusted input.
|
|
||||||
"""
|
|
||||||
return load(stream, SafeLoader)
|
|
||||||
|
|
||||||
def safe_load_all(stream):
|
|
||||||
"""
|
|
||||||
Parse all YAML documents in a stream
|
|
||||||
and produce corresponding Python objects.
|
|
||||||
|
|
||||||
Resolve only basic YAML tags. This is known
|
|
||||||
to be safe for untrusted input.
|
|
||||||
"""
|
|
||||||
return load_all(stream, SafeLoader)
|
|
||||||
|
|
||||||
def unsafe_load(stream):
|
|
||||||
"""
|
|
||||||
Parse the first YAML document in a stream
|
|
||||||
and produce the corresponding Python object.
|
|
||||||
|
|
||||||
Resolve all tags, even those known to be
|
|
||||||
unsafe on untrusted input.
|
|
||||||
"""
|
|
||||||
return load(stream, UnsafeLoader)
|
|
||||||
|
|
||||||
def unsafe_load_all(stream):
|
|
||||||
"""
|
|
||||||
Parse all YAML documents in a stream
|
|
||||||
and produce corresponding Python objects.
|
|
||||||
|
|
||||||
Resolve all tags, even those known to be
|
|
||||||
unsafe on untrusted input.
|
|
||||||
"""
|
|
||||||
return load_all(stream, UnsafeLoader)
|
|
||||||
|
|
||||||
def emit(events, stream=None, Dumper=Dumper,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None):
|
|
||||||
"""
|
|
||||||
Emit YAML parsing events into a stream.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
getvalue = None
|
|
||||||
if stream is None:
|
|
||||||
stream = io.StringIO()
|
|
||||||
getvalue = stream.getvalue
|
|
||||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break)
|
|
||||||
try:
|
|
||||||
for event in events:
|
|
||||||
dumper.emit(event)
|
|
||||||
finally:
|
|
||||||
dumper.dispose()
|
|
||||||
if getvalue:
|
|
||||||
return getvalue()
|
|
||||||
|
|
||||||
def serialize_all(nodes, stream=None, Dumper=Dumper,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None):
|
|
||||||
"""
|
|
||||||
Serialize a sequence of representation trees into a YAML stream.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
getvalue = None
|
|
||||||
if stream is None:
|
|
||||||
if encoding is None:
|
|
||||||
stream = io.StringIO()
|
|
||||||
else:
|
|
||||||
stream = io.BytesIO()
|
|
||||||
getvalue = stream.getvalue
|
|
||||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break,
|
|
||||||
encoding=encoding, version=version, tags=tags,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end)
|
|
||||||
try:
|
|
||||||
dumper.open()
|
|
||||||
for node in nodes:
|
|
||||||
dumper.serialize(node)
|
|
||||||
dumper.close()
|
|
||||||
finally:
|
|
||||||
dumper.dispose()
|
|
||||||
if getvalue:
|
|
||||||
return getvalue()
|
|
||||||
|
|
||||||
def serialize(node, stream=None, Dumper=Dumper, **kwds):
|
|
||||||
"""
|
|
||||||
Serialize a representation tree into a YAML stream.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
return serialize_all([node], stream, Dumper=Dumper, **kwds)
|
|
||||||
|
|
||||||
def dump_all(documents, stream=None, Dumper=Dumper,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
"""
|
|
||||||
Serialize a sequence of Python objects into a YAML stream.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
getvalue = None
|
|
||||||
if stream is None:
|
|
||||||
if encoding is None:
|
|
||||||
stream = io.StringIO()
|
|
||||||
else:
|
|
||||||
stream = io.BytesIO()
|
|
||||||
getvalue = stream.getvalue
|
|
||||||
dumper = Dumper(stream, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style,
|
|
||||||
canonical=canonical, indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break,
|
|
||||||
encoding=encoding, version=version, tags=tags,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
|
|
||||||
try:
|
|
||||||
dumper.open()
|
|
||||||
for data in documents:
|
|
||||||
dumper.represent(data)
|
|
||||||
dumper.close()
|
|
||||||
finally:
|
|
||||||
dumper.dispose()
|
|
||||||
if getvalue:
|
|
||||||
return getvalue()
|
|
||||||
|
|
||||||
def dump(data, stream=None, Dumper=Dumper, **kwds):
|
|
||||||
"""
|
|
||||||
Serialize a Python object into a YAML stream.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
return dump_all([data], stream, Dumper=Dumper, **kwds)
|
|
||||||
|
|
||||||
def safe_dump_all(documents, stream=None, **kwds):
|
|
||||||
"""
|
|
||||||
Serialize a sequence of Python objects into a YAML stream.
|
|
||||||
Produce only basic YAML tags.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
|
|
||||||
|
|
||||||
def safe_dump(data, stream=None, **kwds):
|
|
||||||
"""
|
|
||||||
Serialize a Python object into a YAML stream.
|
|
||||||
Produce only basic YAML tags.
|
|
||||||
If stream is None, return the produced string instead.
|
|
||||||
"""
|
|
||||||
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
|
|
||||||
|
|
||||||
def add_implicit_resolver(tag, regexp, first=None,
|
|
||||||
Loader=None, Dumper=Dumper):
|
|
||||||
"""
|
|
||||||
Add an implicit scalar detector.
|
|
||||||
If an implicit scalar value matches the given regexp,
|
|
||||||
the corresponding tag is assigned to the scalar.
|
|
||||||
first is a sequence of possible initial characters or None.
|
|
||||||
"""
|
|
||||||
if Loader is None:
|
|
||||||
loader.Loader.add_implicit_resolver(tag, regexp, first)
|
|
||||||
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
|
|
||||||
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
|
|
||||||
else:
|
|
||||||
Loader.add_implicit_resolver(tag, regexp, first)
|
|
||||||
Dumper.add_implicit_resolver(tag, regexp, first)
|
|
||||||
|
|
||||||
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
|
|
||||||
"""
|
|
||||||
Add a path based resolver for the given tag.
|
|
||||||
A path is a list of keys that forms a path
|
|
||||||
to a node in the representation tree.
|
|
||||||
Keys can be string values, integers, or None.
|
|
||||||
"""
|
|
||||||
if Loader is None:
|
|
||||||
loader.Loader.add_path_resolver(tag, path, kind)
|
|
||||||
loader.FullLoader.add_path_resolver(tag, path, kind)
|
|
||||||
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
|
|
||||||
else:
|
|
||||||
Loader.add_path_resolver(tag, path, kind)
|
|
||||||
Dumper.add_path_resolver(tag, path, kind)
|
|
||||||
|
|
||||||
def add_constructor(tag, constructor, Loader=None):
|
|
||||||
"""
|
|
||||||
Add a constructor for the given tag.
|
|
||||||
Constructor is a function that accepts a Loader instance
|
|
||||||
and a node object and produces the corresponding Python object.
|
|
||||||
"""
|
|
||||||
if Loader is None:
|
|
||||||
loader.Loader.add_constructor(tag, constructor)
|
|
||||||
loader.FullLoader.add_constructor(tag, constructor)
|
|
||||||
loader.UnsafeLoader.add_constructor(tag, constructor)
|
|
||||||
else:
|
|
||||||
Loader.add_constructor(tag, constructor)
|
|
||||||
|
|
||||||
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
|
|
||||||
"""
|
|
||||||
Add a multi-constructor for the given tag prefix.
|
|
||||||
Multi-constructor is called for a node if its tag starts with tag_prefix.
|
|
||||||
Multi-constructor accepts a Loader instance, a tag suffix,
|
|
||||||
and a node object and produces the corresponding Python object.
|
|
||||||
"""
|
|
||||||
if Loader is None:
|
|
||||||
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
|
|
||||||
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
|
|
||||||
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
|
|
||||||
else:
|
|
||||||
Loader.add_multi_constructor(tag_prefix, multi_constructor)
|
|
||||||
|
|
||||||
def add_representer(data_type, representer, Dumper=Dumper):
|
|
||||||
"""
|
|
||||||
Add a representer for the given type.
|
|
||||||
Representer is a function accepting a Dumper instance
|
|
||||||
and an instance of the given data type
|
|
||||||
and producing the corresponding representation node.
|
|
||||||
"""
|
|
||||||
Dumper.add_representer(data_type, representer)
|
|
||||||
|
|
||||||
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
|
|
||||||
"""
|
|
||||||
Add a representer for the given type.
|
|
||||||
Multi-representer is a function accepting a Dumper instance
|
|
||||||
and an instance of the given data type or subtype
|
|
||||||
and producing the corresponding representation node.
|
|
||||||
"""
|
|
||||||
Dumper.add_multi_representer(data_type, multi_representer)
|
|
||||||
|
|
||||||
class YAMLObjectMetaclass(type):
|
|
||||||
"""
|
|
||||||
The metaclass for YAMLObject.
|
|
||||||
"""
|
|
||||||
def __init__(cls, name, bases, kwds):
|
|
||||||
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
|
|
||||||
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
|
|
||||||
if isinstance(cls.yaml_loader, list):
|
|
||||||
for loader in cls.yaml_loader:
|
|
||||||
loader.add_constructor(cls.yaml_tag, cls.from_yaml)
|
|
||||||
else:
|
|
||||||
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
|
|
||||||
|
|
||||||
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
|
|
||||||
|
|
||||||
class YAMLObject(metaclass=YAMLObjectMetaclass):
|
|
||||||
"""
|
|
||||||
An object that can dump itself to a YAML stream
|
|
||||||
and load itself from a YAML stream.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__slots__ = () # no direct instantiation, so allow immutable subclasses
|
|
||||||
|
|
||||||
yaml_loader = [Loader, FullLoader, UnsafeLoader]
|
|
||||||
yaml_dumper = Dumper
|
|
||||||
|
|
||||||
yaml_tag = None
|
|
||||||
yaml_flow_style = None
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_yaml(cls, loader, node):
|
|
||||||
"""
|
|
||||||
Convert a representation node to a Python object.
|
|
||||||
"""
|
|
||||||
return loader.construct_yaml_object(node, cls)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def to_yaml(cls, dumper, data):
|
|
||||||
"""
|
|
||||||
Convert a Python object to a representation node.
|
|
||||||
"""
|
|
||||||
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
|
|
||||||
flow_style=cls.yaml_flow_style)
|
|
||||||
|
|
||||||
@ -1,139 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['Composer', 'ComposerError']
|
|
||||||
|
|
||||||
from .error import MarkedYAMLError
|
|
||||||
from .events import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
class ComposerError(MarkedYAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class Composer:
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.anchors = {}
|
|
||||||
|
|
||||||
def check_node(self):
|
|
||||||
# Drop the STREAM-START event.
|
|
||||||
if self.check_event(StreamStartEvent):
|
|
||||||
self.get_event()
|
|
||||||
|
|
||||||
# If there are more documents available?
|
|
||||||
return not self.check_event(StreamEndEvent)
|
|
||||||
|
|
||||||
def get_node(self):
|
|
||||||
# Get the root node of the next document.
|
|
||||||
if not self.check_event(StreamEndEvent):
|
|
||||||
return self.compose_document()
|
|
||||||
|
|
||||||
def get_single_node(self):
|
|
||||||
# Drop the STREAM-START event.
|
|
||||||
self.get_event()
|
|
||||||
|
|
||||||
# Compose a document if the stream is not empty.
|
|
||||||
document = None
|
|
||||||
if not self.check_event(StreamEndEvent):
|
|
||||||
document = self.compose_document()
|
|
||||||
|
|
||||||
# Ensure that the stream contains no more documents.
|
|
||||||
if not self.check_event(StreamEndEvent):
|
|
||||||
event = self.get_event()
|
|
||||||
raise ComposerError("expected a single document in the stream",
|
|
||||||
document.start_mark, "but found another document",
|
|
||||||
event.start_mark)
|
|
||||||
|
|
||||||
# Drop the STREAM-END event.
|
|
||||||
self.get_event()
|
|
||||||
|
|
||||||
return document
|
|
||||||
|
|
||||||
def compose_document(self):
|
|
||||||
# Drop the DOCUMENT-START event.
|
|
||||||
self.get_event()
|
|
||||||
|
|
||||||
# Compose the root node.
|
|
||||||
node = self.compose_node(None, None)
|
|
||||||
|
|
||||||
# Drop the DOCUMENT-END event.
|
|
||||||
self.get_event()
|
|
||||||
|
|
||||||
self.anchors = {}
|
|
||||||
return node
|
|
||||||
|
|
||||||
def compose_node(self, parent, index):
|
|
||||||
if self.check_event(AliasEvent):
|
|
||||||
event = self.get_event()
|
|
||||||
anchor = event.anchor
|
|
||||||
if anchor not in self.anchors:
|
|
||||||
raise ComposerError(None, None, "found undefined alias %r"
|
|
||||||
% anchor, event.start_mark)
|
|
||||||
return self.anchors[anchor]
|
|
||||||
event = self.peek_event()
|
|
||||||
anchor = event.anchor
|
|
||||||
if anchor is not None:
|
|
||||||
if anchor in self.anchors:
|
|
||||||
raise ComposerError("found duplicate anchor %r; first occurrence"
|
|
||||||
% anchor, self.anchors[anchor].start_mark,
|
|
||||||
"second occurrence", event.start_mark)
|
|
||||||
self.descend_resolver(parent, index)
|
|
||||||
if self.check_event(ScalarEvent):
|
|
||||||
node = self.compose_scalar_node(anchor)
|
|
||||||
elif self.check_event(SequenceStartEvent):
|
|
||||||
node = self.compose_sequence_node(anchor)
|
|
||||||
elif self.check_event(MappingStartEvent):
|
|
||||||
node = self.compose_mapping_node(anchor)
|
|
||||||
self.ascend_resolver()
|
|
||||||
return node
|
|
||||||
|
|
||||||
def compose_scalar_node(self, anchor):
|
|
||||||
event = self.get_event()
|
|
||||||
tag = event.tag
|
|
||||||
if tag is None or tag == '!':
|
|
||||||
tag = self.resolve(ScalarNode, event.value, event.implicit)
|
|
||||||
node = ScalarNode(tag, event.value,
|
|
||||||
event.start_mark, event.end_mark, style=event.style)
|
|
||||||
if anchor is not None:
|
|
||||||
self.anchors[anchor] = node
|
|
||||||
return node
|
|
||||||
|
|
||||||
def compose_sequence_node(self, anchor):
|
|
||||||
start_event = self.get_event()
|
|
||||||
tag = start_event.tag
|
|
||||||
if tag is None or tag == '!':
|
|
||||||
tag = self.resolve(SequenceNode, None, start_event.implicit)
|
|
||||||
node = SequenceNode(tag, [],
|
|
||||||
start_event.start_mark, None,
|
|
||||||
flow_style=start_event.flow_style)
|
|
||||||
if anchor is not None:
|
|
||||||
self.anchors[anchor] = node
|
|
||||||
index = 0
|
|
||||||
while not self.check_event(SequenceEndEvent):
|
|
||||||
node.value.append(self.compose_node(node, index))
|
|
||||||
index += 1
|
|
||||||
end_event = self.get_event()
|
|
||||||
node.end_mark = end_event.end_mark
|
|
||||||
return node
|
|
||||||
|
|
||||||
def compose_mapping_node(self, anchor):
|
|
||||||
start_event = self.get_event()
|
|
||||||
tag = start_event.tag
|
|
||||||
if tag is None or tag == '!':
|
|
||||||
tag = self.resolve(MappingNode, None, start_event.implicit)
|
|
||||||
node = MappingNode(tag, [],
|
|
||||||
start_event.start_mark, None,
|
|
||||||
flow_style=start_event.flow_style)
|
|
||||||
if anchor is not None:
|
|
||||||
self.anchors[anchor] = node
|
|
||||||
while not self.check_event(MappingEndEvent):
|
|
||||||
#key_event = self.peek_event()
|
|
||||||
item_key = self.compose_node(node, None)
|
|
||||||
#if item_key in node.value:
|
|
||||||
# raise ComposerError("while composing a mapping", start_event.start_mark,
|
|
||||||
# "found duplicate key", key_event.start_mark)
|
|
||||||
item_value = self.compose_node(node, item_key)
|
|
||||||
#node.value[item_key] = item_value
|
|
||||||
node.value.append((item_key, item_value))
|
|
||||||
end_event = self.get_event()
|
|
||||||
node.end_mark = end_event.end_mark
|
|
||||||
return node
|
|
||||||
|
|
||||||
@ -1,748 +0,0 @@
|
|||||||
|
|
||||||
__all__ = [
|
|
||||||
'BaseConstructor',
|
|
||||||
'SafeConstructor',
|
|
||||||
'FullConstructor',
|
|
||||||
'UnsafeConstructor',
|
|
||||||
'Constructor',
|
|
||||||
'ConstructorError'
|
|
||||||
]
|
|
||||||
|
|
||||||
from .error import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
import collections.abc, datetime, base64, binascii, re, sys, types
|
|
||||||
|
|
||||||
class ConstructorError(MarkedYAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class BaseConstructor:
|
|
||||||
|
|
||||||
yaml_constructors = {}
|
|
||||||
yaml_multi_constructors = {}
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.constructed_objects = {}
|
|
||||||
self.recursive_objects = {}
|
|
||||||
self.state_generators = []
|
|
||||||
self.deep_construct = False
|
|
||||||
|
|
||||||
def check_data(self):
|
|
||||||
# If there are more documents available?
|
|
||||||
return self.check_node()
|
|
||||||
|
|
||||||
def check_state_key(self, key):
|
|
||||||
"""Block special attributes/methods from being set in a newly created
|
|
||||||
object, to prevent user-controlled methods from being called during
|
|
||||||
deserialization"""
|
|
||||||
if self.get_state_keys_blacklist_regexp().match(key):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"blacklisted key '%s' in instance state found" % (key,), None)
|
|
||||||
|
|
||||||
def get_data(self):
|
|
||||||
# Construct and return the next document.
|
|
||||||
if self.check_node():
|
|
||||||
return self.construct_document(self.get_node())
|
|
||||||
|
|
||||||
def get_single_data(self):
|
|
||||||
# Ensure that the stream contains a single document and construct it.
|
|
||||||
node = self.get_single_node()
|
|
||||||
if node is not None:
|
|
||||||
return self.construct_document(node)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def construct_document(self, node):
|
|
||||||
data = self.construct_object(node)
|
|
||||||
while self.state_generators:
|
|
||||||
state_generators = self.state_generators
|
|
||||||
self.state_generators = []
|
|
||||||
for generator in state_generators:
|
|
||||||
for dummy in generator:
|
|
||||||
pass
|
|
||||||
self.constructed_objects = {}
|
|
||||||
self.recursive_objects = {}
|
|
||||||
self.deep_construct = False
|
|
||||||
return data
|
|
||||||
|
|
||||||
def construct_object(self, node, deep=False):
|
|
||||||
if node in self.constructed_objects:
|
|
||||||
return self.constructed_objects[node]
|
|
||||||
if deep:
|
|
||||||
old_deep = self.deep_construct
|
|
||||||
self.deep_construct = True
|
|
||||||
if node in self.recursive_objects:
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"found unconstructable recursive node", node.start_mark)
|
|
||||||
self.recursive_objects[node] = None
|
|
||||||
constructor = None
|
|
||||||
tag_suffix = None
|
|
||||||
if node.tag in self.yaml_constructors:
|
|
||||||
constructor = self.yaml_constructors[node.tag]
|
|
||||||
else:
|
|
||||||
for tag_prefix in self.yaml_multi_constructors:
|
|
||||||
if tag_prefix is not None and node.tag.startswith(tag_prefix):
|
|
||||||
tag_suffix = node.tag[len(tag_prefix):]
|
|
||||||
constructor = self.yaml_multi_constructors[tag_prefix]
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if None in self.yaml_multi_constructors:
|
|
||||||
tag_suffix = node.tag
|
|
||||||
constructor = self.yaml_multi_constructors[None]
|
|
||||||
elif None in self.yaml_constructors:
|
|
||||||
constructor = self.yaml_constructors[None]
|
|
||||||
elif isinstance(node, ScalarNode):
|
|
||||||
constructor = self.__class__.construct_scalar
|
|
||||||
elif isinstance(node, SequenceNode):
|
|
||||||
constructor = self.__class__.construct_sequence
|
|
||||||
elif isinstance(node, MappingNode):
|
|
||||||
constructor = self.__class__.construct_mapping
|
|
||||||
if tag_suffix is None:
|
|
||||||
data = constructor(self, node)
|
|
||||||
else:
|
|
||||||
data = constructor(self, tag_suffix, node)
|
|
||||||
if isinstance(data, types.GeneratorType):
|
|
||||||
generator = data
|
|
||||||
data = next(generator)
|
|
||||||
if self.deep_construct:
|
|
||||||
for dummy in generator:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.state_generators.append(generator)
|
|
||||||
self.constructed_objects[node] = data
|
|
||||||
del self.recursive_objects[node]
|
|
||||||
if deep:
|
|
||||||
self.deep_construct = old_deep
|
|
||||||
return data
|
|
||||||
|
|
||||||
def construct_scalar(self, node):
|
|
||||||
if not isinstance(node, ScalarNode):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"expected a scalar node, but found %s" % node.id,
|
|
||||||
node.start_mark)
|
|
||||||
return node.value
|
|
||||||
|
|
||||||
def construct_sequence(self, node, deep=False):
|
|
||||||
if not isinstance(node, SequenceNode):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"expected a sequence node, but found %s" % node.id,
|
|
||||||
node.start_mark)
|
|
||||||
return [self.construct_object(child, deep=deep)
|
|
||||||
for child in node.value]
|
|
||||||
|
|
||||||
def construct_mapping(self, node, deep=False):
|
|
||||||
if not isinstance(node, MappingNode):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"expected a mapping node, but found %s" % node.id,
|
|
||||||
node.start_mark)
|
|
||||||
mapping = {}
|
|
||||||
for key_node, value_node in node.value:
|
|
||||||
key = self.construct_object(key_node, deep=deep)
|
|
||||||
if not isinstance(key, collections.abc.Hashable):
|
|
||||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
|
||||||
"found unhashable key", key_node.start_mark)
|
|
||||||
value = self.construct_object(value_node, deep=deep)
|
|
||||||
mapping[key] = value
|
|
||||||
return mapping
|
|
||||||
|
|
||||||
def construct_pairs(self, node, deep=False):
|
|
||||||
if not isinstance(node, MappingNode):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"expected a mapping node, but found %s" % node.id,
|
|
||||||
node.start_mark)
|
|
||||||
pairs = []
|
|
||||||
for key_node, value_node in node.value:
|
|
||||||
key = self.construct_object(key_node, deep=deep)
|
|
||||||
value = self.construct_object(value_node, deep=deep)
|
|
||||||
pairs.append((key, value))
|
|
||||||
return pairs
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_constructor(cls, tag, constructor):
|
|
||||||
if not 'yaml_constructors' in cls.__dict__:
|
|
||||||
cls.yaml_constructors = cls.yaml_constructors.copy()
|
|
||||||
cls.yaml_constructors[tag] = constructor
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_multi_constructor(cls, tag_prefix, multi_constructor):
|
|
||||||
if not 'yaml_multi_constructors' in cls.__dict__:
|
|
||||||
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
|
|
||||||
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
|
|
||||||
|
|
||||||
class SafeConstructor(BaseConstructor):
|
|
||||||
|
|
||||||
def construct_scalar(self, node):
|
|
||||||
if isinstance(node, MappingNode):
|
|
||||||
for key_node, value_node in node.value:
|
|
||||||
if key_node.tag == 'tag:yaml.org,2002:value':
|
|
||||||
return self.construct_scalar(value_node)
|
|
||||||
return super().construct_scalar(node)
|
|
||||||
|
|
||||||
def flatten_mapping(self, node):
|
|
||||||
merge = []
|
|
||||||
index = 0
|
|
||||||
while index < len(node.value):
|
|
||||||
key_node, value_node = node.value[index]
|
|
||||||
if key_node.tag == 'tag:yaml.org,2002:merge':
|
|
||||||
del node.value[index]
|
|
||||||
if isinstance(value_node, MappingNode):
|
|
||||||
self.flatten_mapping(value_node)
|
|
||||||
merge.extend(value_node.value)
|
|
||||||
elif isinstance(value_node, SequenceNode):
|
|
||||||
submerge = []
|
|
||||||
for subnode in value_node.value:
|
|
||||||
if not isinstance(subnode, MappingNode):
|
|
||||||
raise ConstructorError("while constructing a mapping",
|
|
||||||
node.start_mark,
|
|
||||||
"expected a mapping for merging, but found %s"
|
|
||||||
% subnode.id, subnode.start_mark)
|
|
||||||
self.flatten_mapping(subnode)
|
|
||||||
submerge.append(subnode.value)
|
|
||||||
submerge.reverse()
|
|
||||||
for value in submerge:
|
|
||||||
merge.extend(value)
|
|
||||||
else:
|
|
||||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
|
||||||
"expected a mapping or list of mappings for merging, but found %s"
|
|
||||||
% value_node.id, value_node.start_mark)
|
|
||||||
elif key_node.tag == 'tag:yaml.org,2002:value':
|
|
||||||
key_node.tag = 'tag:yaml.org,2002:str'
|
|
||||||
index += 1
|
|
||||||
else:
|
|
||||||
index += 1
|
|
||||||
if merge:
|
|
||||||
node.value = merge + node.value
|
|
||||||
|
|
||||||
def construct_mapping(self, node, deep=False):
|
|
||||||
if isinstance(node, MappingNode):
|
|
||||||
self.flatten_mapping(node)
|
|
||||||
return super().construct_mapping(node, deep=deep)
|
|
||||||
|
|
||||||
def construct_yaml_null(self, node):
|
|
||||||
self.construct_scalar(node)
|
|
||||||
return None
|
|
||||||
|
|
||||||
bool_values = {
|
|
||||||
'yes': True,
|
|
||||||
'no': False,
|
|
||||||
'true': True,
|
|
||||||
'false': False,
|
|
||||||
'on': True,
|
|
||||||
'off': False,
|
|
||||||
}
|
|
||||||
|
|
||||||
def construct_yaml_bool(self, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
return self.bool_values[value.lower()]
|
|
||||||
|
|
||||||
def construct_yaml_int(self, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
value = value.replace('_', '')
|
|
||||||
sign = +1
|
|
||||||
if value[0] == '-':
|
|
||||||
sign = -1
|
|
||||||
if value[0] in '+-':
|
|
||||||
value = value[1:]
|
|
||||||
if value == '0':
|
|
||||||
return 0
|
|
||||||
elif value.startswith('0b'):
|
|
||||||
return sign*int(value[2:], 2)
|
|
||||||
elif value.startswith('0x'):
|
|
||||||
return sign*int(value[2:], 16)
|
|
||||||
elif value[0] == '0':
|
|
||||||
return sign*int(value, 8)
|
|
||||||
elif ':' in value:
|
|
||||||
digits = [int(part) for part in value.split(':')]
|
|
||||||
digits.reverse()
|
|
||||||
base = 1
|
|
||||||
value = 0
|
|
||||||
for digit in digits:
|
|
||||||
value += digit*base
|
|
||||||
base *= 60
|
|
||||||
return sign*value
|
|
||||||
else:
|
|
||||||
return sign*int(value)
|
|
||||||
|
|
||||||
inf_value = 1e300
|
|
||||||
while inf_value != inf_value*inf_value:
|
|
||||||
inf_value *= inf_value
|
|
||||||
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
|
|
||||||
|
|
||||||
def construct_yaml_float(self, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
value = value.replace('_', '').lower()
|
|
||||||
sign = +1
|
|
||||||
if value[0] == '-':
|
|
||||||
sign = -1
|
|
||||||
if value[0] in '+-':
|
|
||||||
value = value[1:]
|
|
||||||
if value == '.inf':
|
|
||||||
return sign*self.inf_value
|
|
||||||
elif value == '.nan':
|
|
||||||
return self.nan_value
|
|
||||||
elif ':' in value:
|
|
||||||
digits = [float(part) for part in value.split(':')]
|
|
||||||
digits.reverse()
|
|
||||||
base = 1
|
|
||||||
value = 0.0
|
|
||||||
for digit in digits:
|
|
||||||
value += digit*base
|
|
||||||
base *= 60
|
|
||||||
return sign*value
|
|
||||||
else:
|
|
||||||
return sign*float(value)
|
|
||||||
|
|
||||||
def construct_yaml_binary(self, node):
|
|
||||||
try:
|
|
||||||
value = self.construct_scalar(node).encode('ascii')
|
|
||||||
except UnicodeEncodeError as exc:
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"failed to convert base64 data into ascii: %s" % exc,
|
|
||||||
node.start_mark)
|
|
||||||
try:
|
|
||||||
if hasattr(base64, 'decodebytes'):
|
|
||||||
return base64.decodebytes(value)
|
|
||||||
else:
|
|
||||||
return base64.decodestring(value)
|
|
||||||
except binascii.Error as exc:
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
|
||||||
|
|
||||||
timestamp_regexp = re.compile(
|
|
||||||
r'''^(?P<year>[0-9][0-9][0-9][0-9])
|
|
||||||
-(?P<month>[0-9][0-9]?)
|
|
||||||
-(?P<day>[0-9][0-9]?)
|
|
||||||
(?:(?:[Tt]|[ \t]+)
|
|
||||||
(?P<hour>[0-9][0-9]?)
|
|
||||||
:(?P<minute>[0-9][0-9])
|
|
||||||
:(?P<second>[0-9][0-9])
|
|
||||||
(?:\.(?P<fraction>[0-9]*))?
|
|
||||||
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
|
|
||||||
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
|
|
||||||
|
|
||||||
def construct_yaml_timestamp(self, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
match = self.timestamp_regexp.match(node.value)
|
|
||||||
values = match.groupdict()
|
|
||||||
year = int(values['year'])
|
|
||||||
month = int(values['month'])
|
|
||||||
day = int(values['day'])
|
|
||||||
if not values['hour']:
|
|
||||||
return datetime.date(year, month, day)
|
|
||||||
hour = int(values['hour'])
|
|
||||||
minute = int(values['minute'])
|
|
||||||
second = int(values['second'])
|
|
||||||
fraction = 0
|
|
||||||
tzinfo = None
|
|
||||||
if values['fraction']:
|
|
||||||
fraction = values['fraction'][:6]
|
|
||||||
while len(fraction) < 6:
|
|
||||||
fraction += '0'
|
|
||||||
fraction = int(fraction)
|
|
||||||
if values['tz_sign']:
|
|
||||||
tz_hour = int(values['tz_hour'])
|
|
||||||
tz_minute = int(values['tz_minute'] or 0)
|
|
||||||
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
|
|
||||||
if values['tz_sign'] == '-':
|
|
||||||
delta = -delta
|
|
||||||
tzinfo = datetime.timezone(delta)
|
|
||||||
elif values['tz']:
|
|
||||||
tzinfo = datetime.timezone.utc
|
|
||||||
return datetime.datetime(year, month, day, hour, minute, second, fraction,
|
|
||||||
tzinfo=tzinfo)
|
|
||||||
|
|
||||||
def construct_yaml_omap(self, node):
|
|
||||||
# Note: we do not check for duplicate keys, because it's too
|
|
||||||
# CPU-expensive.
|
|
||||||
omap = []
|
|
||||||
yield omap
|
|
||||||
if not isinstance(node, SequenceNode):
|
|
||||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
|
||||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
|
||||||
for subnode in node.value:
|
|
||||||
if not isinstance(subnode, MappingNode):
|
|
||||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
|
||||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
|
||||||
subnode.start_mark)
|
|
||||||
if len(subnode.value) != 1:
|
|
||||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
|
||||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
|
||||||
subnode.start_mark)
|
|
||||||
key_node, value_node = subnode.value[0]
|
|
||||||
key = self.construct_object(key_node)
|
|
||||||
value = self.construct_object(value_node)
|
|
||||||
omap.append((key, value))
|
|
||||||
|
|
||||||
def construct_yaml_pairs(self, node):
|
|
||||||
# Note: the same code as `construct_yaml_omap`.
|
|
||||||
pairs = []
|
|
||||||
yield pairs
|
|
||||||
if not isinstance(node, SequenceNode):
|
|
||||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
|
||||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
|
||||||
for subnode in node.value:
|
|
||||||
if not isinstance(subnode, MappingNode):
|
|
||||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
|
||||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
|
||||||
subnode.start_mark)
|
|
||||||
if len(subnode.value) != 1:
|
|
||||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
|
||||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
|
||||||
subnode.start_mark)
|
|
||||||
key_node, value_node = subnode.value[0]
|
|
||||||
key = self.construct_object(key_node)
|
|
||||||
value = self.construct_object(value_node)
|
|
||||||
pairs.append((key, value))
|
|
||||||
|
|
||||||
def construct_yaml_set(self, node):
|
|
||||||
data = set()
|
|
||||||
yield data
|
|
||||||
value = self.construct_mapping(node)
|
|
||||||
data.update(value)
|
|
||||||
|
|
||||||
def construct_yaml_str(self, node):
|
|
||||||
return self.construct_scalar(node)
|
|
||||||
|
|
||||||
def construct_yaml_seq(self, node):
|
|
||||||
data = []
|
|
||||||
yield data
|
|
||||||
data.extend(self.construct_sequence(node))
|
|
||||||
|
|
||||||
def construct_yaml_map(self, node):
|
|
||||||
data = {}
|
|
||||||
yield data
|
|
||||||
value = self.construct_mapping(node)
|
|
||||||
data.update(value)
|
|
||||||
|
|
||||||
def construct_yaml_object(self, node, cls):
|
|
||||||
data = cls.__new__(cls)
|
|
||||||
yield data
|
|
||||||
if hasattr(data, '__setstate__'):
|
|
||||||
state = self.construct_mapping(node, deep=True)
|
|
||||||
data.__setstate__(state)
|
|
||||||
else:
|
|
||||||
state = self.construct_mapping(node)
|
|
||||||
data.__dict__.update(state)
|
|
||||||
|
|
||||||
def construct_undefined(self, node):
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"could not determine a constructor for the tag %r" % node.tag,
|
|
||||||
node.start_mark)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:null',
|
|
||||||
SafeConstructor.construct_yaml_null)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:bool',
|
|
||||||
SafeConstructor.construct_yaml_bool)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:int',
|
|
||||||
SafeConstructor.construct_yaml_int)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:float',
|
|
||||||
SafeConstructor.construct_yaml_float)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:binary',
|
|
||||||
SafeConstructor.construct_yaml_binary)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:timestamp',
|
|
||||||
SafeConstructor.construct_yaml_timestamp)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:omap',
|
|
||||||
SafeConstructor.construct_yaml_omap)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:pairs',
|
|
||||||
SafeConstructor.construct_yaml_pairs)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:set',
|
|
||||||
SafeConstructor.construct_yaml_set)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:str',
|
|
||||||
SafeConstructor.construct_yaml_str)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:seq',
|
|
||||||
SafeConstructor.construct_yaml_seq)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:map',
|
|
||||||
SafeConstructor.construct_yaml_map)
|
|
||||||
|
|
||||||
SafeConstructor.add_constructor(None,
|
|
||||||
SafeConstructor.construct_undefined)
|
|
||||||
|
|
||||||
class FullConstructor(SafeConstructor):
|
|
||||||
# 'extend' is blacklisted because it is used by
|
|
||||||
# construct_python_object_apply to add `listitems` to a newly generate
|
|
||||||
# python instance
|
|
||||||
def get_state_keys_blacklist(self):
|
|
||||||
return ['^extend$', '^__.*__$']
|
|
||||||
|
|
||||||
def get_state_keys_blacklist_regexp(self):
|
|
||||||
if not hasattr(self, 'state_keys_blacklist_regexp'):
|
|
||||||
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
|
|
||||||
return self.state_keys_blacklist_regexp
|
|
||||||
|
|
||||||
def construct_python_str(self, node):
|
|
||||||
return self.construct_scalar(node)
|
|
||||||
|
|
||||||
def construct_python_unicode(self, node):
|
|
||||||
return self.construct_scalar(node)
|
|
||||||
|
|
||||||
def construct_python_bytes(self, node):
|
|
||||||
try:
|
|
||||||
value = self.construct_scalar(node).encode('ascii')
|
|
||||||
except UnicodeEncodeError as exc:
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"failed to convert base64 data into ascii: %s" % exc,
|
|
||||||
node.start_mark)
|
|
||||||
try:
|
|
||||||
if hasattr(base64, 'decodebytes'):
|
|
||||||
return base64.decodebytes(value)
|
|
||||||
else:
|
|
||||||
return base64.decodestring(value)
|
|
||||||
except binascii.Error as exc:
|
|
||||||
raise ConstructorError(None, None,
|
|
||||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
|
||||||
|
|
||||||
def construct_python_long(self, node):
|
|
||||||
return self.construct_yaml_int(node)
|
|
||||||
|
|
||||||
def construct_python_complex(self, node):
|
|
||||||
return complex(self.construct_scalar(node))
|
|
||||||
|
|
||||||
def construct_python_tuple(self, node):
|
|
||||||
return tuple(self.construct_sequence(node))
|
|
||||||
|
|
||||||
def find_python_module(self, name, mark, unsafe=False):
|
|
||||||
if not name:
|
|
||||||
raise ConstructorError("while constructing a Python module", mark,
|
|
||||||
"expected non-empty name appended to the tag", mark)
|
|
||||||
if unsafe:
|
|
||||||
try:
|
|
||||||
__import__(name)
|
|
||||||
except ImportError as exc:
|
|
||||||
raise ConstructorError("while constructing a Python module", mark,
|
|
||||||
"cannot find module %r (%s)" % (name, exc), mark)
|
|
||||||
if name not in sys.modules:
|
|
||||||
raise ConstructorError("while constructing a Python module", mark,
|
|
||||||
"module %r is not imported" % name, mark)
|
|
||||||
return sys.modules[name]
|
|
||||||
|
|
||||||
def find_python_name(self, name, mark, unsafe=False):
|
|
||||||
if not name:
|
|
||||||
raise ConstructorError("while constructing a Python object", mark,
|
|
||||||
"expected non-empty name appended to the tag", mark)
|
|
||||||
if '.' in name:
|
|
||||||
module_name, object_name = name.rsplit('.', 1)
|
|
||||||
else:
|
|
||||||
module_name = 'builtins'
|
|
||||||
object_name = name
|
|
||||||
if unsafe:
|
|
||||||
try:
|
|
||||||
__import__(module_name)
|
|
||||||
except ImportError as exc:
|
|
||||||
raise ConstructorError("while constructing a Python object", mark,
|
|
||||||
"cannot find module %r (%s)" % (module_name, exc), mark)
|
|
||||||
if module_name not in sys.modules:
|
|
||||||
raise ConstructorError("while constructing a Python object", mark,
|
|
||||||
"module %r is not imported" % module_name, mark)
|
|
||||||
module = sys.modules[module_name]
|
|
||||||
if not hasattr(module, object_name):
|
|
||||||
raise ConstructorError("while constructing a Python object", mark,
|
|
||||||
"cannot find %r in the module %r"
|
|
||||||
% (object_name, module.__name__), mark)
|
|
||||||
return getattr(module, object_name)
|
|
||||||
|
|
||||||
def construct_python_name(self, suffix, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
if value:
|
|
||||||
raise ConstructorError("while constructing a Python name", node.start_mark,
|
|
||||||
"expected the empty value, but found %r" % value, node.start_mark)
|
|
||||||
return self.find_python_name(suffix, node.start_mark)
|
|
||||||
|
|
||||||
def construct_python_module(self, suffix, node):
|
|
||||||
value = self.construct_scalar(node)
|
|
||||||
if value:
|
|
||||||
raise ConstructorError("while constructing a Python module", node.start_mark,
|
|
||||||
"expected the empty value, but found %r" % value, node.start_mark)
|
|
||||||
return self.find_python_module(suffix, node.start_mark)
|
|
||||||
|
|
||||||
def make_python_instance(self, suffix, node,
|
|
||||||
args=None, kwds=None, newobj=False, unsafe=False):
|
|
||||||
if not args:
|
|
||||||
args = []
|
|
||||||
if not kwds:
|
|
||||||
kwds = {}
|
|
||||||
cls = self.find_python_name(suffix, node.start_mark)
|
|
||||||
if not (unsafe or isinstance(cls, type)):
|
|
||||||
raise ConstructorError("while constructing a Python instance", node.start_mark,
|
|
||||||
"expected a class, but found %r" % type(cls),
|
|
||||||
node.start_mark)
|
|
||||||
if newobj and isinstance(cls, type):
|
|
||||||
return cls.__new__(cls, *args, **kwds)
|
|
||||||
else:
|
|
||||||
return cls(*args, **kwds)
|
|
||||||
|
|
||||||
def set_python_instance_state(self, instance, state, unsafe=False):
|
|
||||||
if hasattr(instance, '__setstate__'):
|
|
||||||
instance.__setstate__(state)
|
|
||||||
else:
|
|
||||||
slotstate = {}
|
|
||||||
if isinstance(state, tuple) and len(state) == 2:
|
|
||||||
state, slotstate = state
|
|
||||||
if hasattr(instance, '__dict__'):
|
|
||||||
if not unsafe and state:
|
|
||||||
for key in state.keys():
|
|
||||||
self.check_state_key(key)
|
|
||||||
instance.__dict__.update(state)
|
|
||||||
elif state:
|
|
||||||
slotstate.update(state)
|
|
||||||
for key, value in slotstate.items():
|
|
||||||
if not unsafe:
|
|
||||||
self.check_state_key(key)
|
|
||||||
setattr(instance, key, value)
|
|
||||||
|
|
||||||
def construct_python_object(self, suffix, node):
|
|
||||||
# Format:
|
|
||||||
# !!python/object:module.name { ... state ... }
|
|
||||||
instance = self.make_python_instance(suffix, node, newobj=True)
|
|
||||||
yield instance
|
|
||||||
deep = hasattr(instance, '__setstate__')
|
|
||||||
state = self.construct_mapping(node, deep=deep)
|
|
||||||
self.set_python_instance_state(instance, state)
|
|
||||||
|
|
||||||
def construct_python_object_apply(self, suffix, node, newobj=False):
|
|
||||||
# Format:
|
|
||||||
# !!python/object/apply # (or !!python/object/new)
|
|
||||||
# args: [ ... arguments ... ]
|
|
||||||
# kwds: { ... keywords ... }
|
|
||||||
# state: ... state ...
|
|
||||||
# listitems: [ ... listitems ... ]
|
|
||||||
# dictitems: { ... dictitems ... }
|
|
||||||
# or short format:
|
|
||||||
# !!python/object/apply [ ... arguments ... ]
|
|
||||||
# The difference between !!python/object/apply and !!python/object/new
|
|
||||||
# is how an object is created, check make_python_instance for details.
|
|
||||||
if isinstance(node, SequenceNode):
|
|
||||||
args = self.construct_sequence(node, deep=True)
|
|
||||||
kwds = {}
|
|
||||||
state = {}
|
|
||||||
listitems = []
|
|
||||||
dictitems = {}
|
|
||||||
else:
|
|
||||||
value = self.construct_mapping(node, deep=True)
|
|
||||||
args = value.get('args', [])
|
|
||||||
kwds = value.get('kwds', {})
|
|
||||||
state = value.get('state', {})
|
|
||||||
listitems = value.get('listitems', [])
|
|
||||||
dictitems = value.get('dictitems', {})
|
|
||||||
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
|
|
||||||
if state:
|
|
||||||
self.set_python_instance_state(instance, state)
|
|
||||||
if listitems:
|
|
||||||
instance.extend(listitems)
|
|
||||||
if dictitems:
|
|
||||||
for key in dictitems:
|
|
||||||
instance[key] = dictitems[key]
|
|
||||||
return instance
|
|
||||||
|
|
||||||
def construct_python_object_new(self, suffix, node):
|
|
||||||
return self.construct_python_object_apply(suffix, node, newobj=True)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/none',
|
|
||||||
FullConstructor.construct_yaml_null)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/bool',
|
|
||||||
FullConstructor.construct_yaml_bool)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/str',
|
|
||||||
FullConstructor.construct_python_str)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/unicode',
|
|
||||||
FullConstructor.construct_python_unicode)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/bytes',
|
|
||||||
FullConstructor.construct_python_bytes)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/int',
|
|
||||||
FullConstructor.construct_yaml_int)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/long',
|
|
||||||
FullConstructor.construct_python_long)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/float',
|
|
||||||
FullConstructor.construct_yaml_float)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/complex',
|
|
||||||
FullConstructor.construct_python_complex)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/list',
|
|
||||||
FullConstructor.construct_yaml_seq)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/tuple',
|
|
||||||
FullConstructor.construct_python_tuple)
|
|
||||||
|
|
||||||
FullConstructor.add_constructor(
|
|
||||||
'tag:yaml.org,2002:python/dict',
|
|
||||||
FullConstructor.construct_yaml_map)
|
|
||||||
|
|
||||||
FullConstructor.add_multi_constructor(
|
|
||||||
'tag:yaml.org,2002:python/name:',
|
|
||||||
FullConstructor.construct_python_name)
|
|
||||||
|
|
||||||
class UnsafeConstructor(FullConstructor):
|
|
||||||
|
|
||||||
def find_python_module(self, name, mark):
|
|
||||||
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
|
|
||||||
|
|
||||||
def find_python_name(self, name, mark):
|
|
||||||
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
|
|
||||||
|
|
||||||
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
|
|
||||||
return super(UnsafeConstructor, self).make_python_instance(
|
|
||||||
suffix, node, args, kwds, newobj, unsafe=True)
|
|
||||||
|
|
||||||
def set_python_instance_state(self, instance, state):
|
|
||||||
return super(UnsafeConstructor, self).set_python_instance_state(
|
|
||||||
instance, state, unsafe=True)
|
|
||||||
|
|
||||||
UnsafeConstructor.add_multi_constructor(
|
|
||||||
'tag:yaml.org,2002:python/module:',
|
|
||||||
UnsafeConstructor.construct_python_module)
|
|
||||||
|
|
||||||
UnsafeConstructor.add_multi_constructor(
|
|
||||||
'tag:yaml.org,2002:python/object:',
|
|
||||||
UnsafeConstructor.construct_python_object)
|
|
||||||
|
|
||||||
UnsafeConstructor.add_multi_constructor(
|
|
||||||
'tag:yaml.org,2002:python/object/new:',
|
|
||||||
UnsafeConstructor.construct_python_object_new)
|
|
||||||
|
|
||||||
UnsafeConstructor.add_multi_constructor(
|
|
||||||
'tag:yaml.org,2002:python/object/apply:',
|
|
||||||
UnsafeConstructor.construct_python_object_apply)
|
|
||||||
|
|
||||||
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
|
|
||||||
# people have extended it directly.
|
|
||||||
class Constructor(UnsafeConstructor):
|
|
||||||
pass
|
|
||||||
@ -1,101 +0,0 @@
|
|||||||
|
|
||||||
__all__ = [
|
|
||||||
'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
|
|
||||||
'CBaseDumper', 'CSafeDumper', 'CDumper'
|
|
||||||
]
|
|
||||||
|
|
||||||
from yaml._yaml import CParser, CEmitter
|
|
||||||
|
|
||||||
from .constructor import *
|
|
||||||
|
|
||||||
from .serializer import *
|
|
||||||
from .representer import *
|
|
||||||
|
|
||||||
from .resolver import *
|
|
||||||
|
|
||||||
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
CParser.__init__(self, stream)
|
|
||||||
BaseConstructor.__init__(self)
|
|
||||||
BaseResolver.__init__(self)
|
|
||||||
|
|
||||||
class CSafeLoader(CParser, SafeConstructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
CParser.__init__(self, stream)
|
|
||||||
SafeConstructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CFullLoader(CParser, FullConstructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
CParser.__init__(self, stream)
|
|
||||||
FullConstructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
CParser.__init__(self, stream)
|
|
||||||
UnsafeConstructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CLoader(CParser, Constructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
CParser.__init__(self, stream)
|
|
||||||
Constructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
CEmitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width, encoding=encoding,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
Representer.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
CEmitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width, encoding=encoding,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
SafeRepresenter.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class CDumper(CEmitter, Serializer, Representer, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
CEmitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width, encoding=encoding,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
Representer.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
@ -1,62 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
|
|
||||||
|
|
||||||
from .emitter import *
|
|
||||||
from .serializer import *
|
|
||||||
from .representer import *
|
|
||||||
from .resolver import *
|
|
||||||
|
|
||||||
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
Emitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break)
|
|
||||||
Serializer.__init__(self, encoding=encoding,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
Representer.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
Emitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break)
|
|
||||||
Serializer.__init__(self, encoding=encoding,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
SafeRepresenter.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class Dumper(Emitter, Serializer, Representer, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream,
|
|
||||||
default_style=None, default_flow_style=False,
|
|
||||||
canonical=None, indent=None, width=None,
|
|
||||||
allow_unicode=None, line_break=None,
|
|
||||||
encoding=None, explicit_start=None, explicit_end=None,
|
|
||||||
version=None, tags=None, sort_keys=True):
|
|
||||||
Emitter.__init__(self, stream, canonical=canonical,
|
|
||||||
indent=indent, width=width,
|
|
||||||
allow_unicode=allow_unicode, line_break=line_break)
|
|
||||||
Serializer.__init__(self, encoding=encoding,
|
|
||||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
|
||||||
version=version, tags=tags)
|
|
||||||
Representer.__init__(self, default_style=default_style,
|
|
||||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,75 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
|
|
||||||
|
|
||||||
class Mark:
|
|
||||||
|
|
||||||
def __init__(self, name, index, line, column, buffer, pointer):
|
|
||||||
self.name = name
|
|
||||||
self.index = index
|
|
||||||
self.line = line
|
|
||||||
self.column = column
|
|
||||||
self.buffer = buffer
|
|
||||||
self.pointer = pointer
|
|
||||||
|
|
||||||
def get_snippet(self, indent=4, max_length=75):
|
|
||||||
if self.buffer is None:
|
|
||||||
return None
|
|
||||||
head = ''
|
|
||||||
start = self.pointer
|
|
||||||
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
|
|
||||||
start -= 1
|
|
||||||
if self.pointer-start > max_length/2-1:
|
|
||||||
head = ' ... '
|
|
||||||
start += 5
|
|
||||||
break
|
|
||||||
tail = ''
|
|
||||||
end = self.pointer
|
|
||||||
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
|
|
||||||
end += 1
|
|
||||||
if end-self.pointer > max_length/2-1:
|
|
||||||
tail = ' ... '
|
|
||||||
end -= 5
|
|
||||||
break
|
|
||||||
snippet = self.buffer[start:end]
|
|
||||||
return ' '*indent + head + snippet + tail + '\n' \
|
|
||||||
+ ' '*(indent+self.pointer-start+len(head)) + '^'
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
snippet = self.get_snippet()
|
|
||||||
where = " in \"%s\", line %d, column %d" \
|
|
||||||
% (self.name, self.line+1, self.column+1)
|
|
||||||
if snippet is not None:
|
|
||||||
where += ":\n"+snippet
|
|
||||||
return where
|
|
||||||
|
|
||||||
class YAMLError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class MarkedYAMLError(YAMLError):
|
|
||||||
|
|
||||||
def __init__(self, context=None, context_mark=None,
|
|
||||||
problem=None, problem_mark=None, note=None):
|
|
||||||
self.context = context
|
|
||||||
self.context_mark = context_mark
|
|
||||||
self.problem = problem
|
|
||||||
self.problem_mark = problem_mark
|
|
||||||
self.note = note
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
lines = []
|
|
||||||
if self.context is not None:
|
|
||||||
lines.append(self.context)
|
|
||||||
if self.context_mark is not None \
|
|
||||||
and (self.problem is None or self.problem_mark is None
|
|
||||||
or self.context_mark.name != self.problem_mark.name
|
|
||||||
or self.context_mark.line != self.problem_mark.line
|
|
||||||
or self.context_mark.column != self.problem_mark.column):
|
|
||||||
lines.append(str(self.context_mark))
|
|
||||||
if self.problem is not None:
|
|
||||||
lines.append(self.problem)
|
|
||||||
if self.problem_mark is not None:
|
|
||||||
lines.append(str(self.problem_mark))
|
|
||||||
if self.note is not None:
|
|
||||||
lines.append(self.note)
|
|
||||||
return '\n'.join(lines)
|
|
||||||
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
|
|
||||||
# Abstract classes.
|
|
||||||
|
|
||||||
class Event(object):
|
|
||||||
def __init__(self, start_mark=None, end_mark=None):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
def __repr__(self):
|
|
||||||
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
|
|
||||||
if hasattr(self, key)]
|
|
||||||
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
|
|
||||||
for key in attributes])
|
|
||||||
return '%s(%s)' % (self.__class__.__name__, arguments)
|
|
||||||
|
|
||||||
class NodeEvent(Event):
|
|
||||||
def __init__(self, anchor, start_mark=None, end_mark=None):
|
|
||||||
self.anchor = anchor
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
|
|
||||||
class CollectionStartEvent(NodeEvent):
|
|
||||||
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
|
|
||||||
flow_style=None):
|
|
||||||
self.anchor = anchor
|
|
||||||
self.tag = tag
|
|
||||||
self.implicit = implicit
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.flow_style = flow_style
|
|
||||||
|
|
||||||
class CollectionEndEvent(Event):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Implementations.
|
|
||||||
|
|
||||||
class StreamStartEvent(Event):
|
|
||||||
def __init__(self, start_mark=None, end_mark=None, encoding=None):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.encoding = encoding
|
|
||||||
|
|
||||||
class StreamEndEvent(Event):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class DocumentStartEvent(Event):
|
|
||||||
def __init__(self, start_mark=None, end_mark=None,
|
|
||||||
explicit=None, version=None, tags=None):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.explicit = explicit
|
|
||||||
self.version = version
|
|
||||||
self.tags = tags
|
|
||||||
|
|
||||||
class DocumentEndEvent(Event):
|
|
||||||
def __init__(self, start_mark=None, end_mark=None,
|
|
||||||
explicit=None):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.explicit = explicit
|
|
||||||
|
|
||||||
class AliasEvent(NodeEvent):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class ScalarEvent(NodeEvent):
|
|
||||||
def __init__(self, anchor, tag, implicit, value,
|
|
||||||
start_mark=None, end_mark=None, style=None):
|
|
||||||
self.anchor = anchor
|
|
||||||
self.tag = tag
|
|
||||||
self.implicit = implicit
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.style = style
|
|
||||||
|
|
||||||
class SequenceStartEvent(CollectionStartEvent):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class SequenceEndEvent(CollectionEndEvent):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class MappingStartEvent(CollectionStartEvent):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class MappingEndEvent(CollectionEndEvent):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@ -1,63 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
|
|
||||||
|
|
||||||
from .reader import *
|
|
||||||
from .scanner import *
|
|
||||||
from .parser import *
|
|
||||||
from .composer import *
|
|
||||||
from .constructor import *
|
|
||||||
from .resolver import *
|
|
||||||
|
|
||||||
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
Reader.__init__(self, stream)
|
|
||||||
Scanner.__init__(self)
|
|
||||||
Parser.__init__(self)
|
|
||||||
Composer.__init__(self)
|
|
||||||
BaseConstructor.__init__(self)
|
|
||||||
BaseResolver.__init__(self)
|
|
||||||
|
|
||||||
class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
Reader.__init__(self, stream)
|
|
||||||
Scanner.__init__(self)
|
|
||||||
Parser.__init__(self)
|
|
||||||
Composer.__init__(self)
|
|
||||||
FullConstructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
Reader.__init__(self, stream)
|
|
||||||
Scanner.__init__(self)
|
|
||||||
Parser.__init__(self)
|
|
||||||
Composer.__init__(self)
|
|
||||||
SafeConstructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
Reader.__init__(self, stream)
|
|
||||||
Scanner.__init__(self)
|
|
||||||
Parser.__init__(self)
|
|
||||||
Composer.__init__(self)
|
|
||||||
Constructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
|
|
||||||
# UnsafeLoader is the same as Loader (which is and was always unsafe on
|
|
||||||
# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
|
|
||||||
# FullLoad should be able to load almost all YAML safely. Loader is left intact
|
|
||||||
# to ensure backwards compatibility.
|
|
||||||
class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
Reader.__init__(self, stream)
|
|
||||||
Scanner.__init__(self)
|
|
||||||
Parser.__init__(self)
|
|
||||||
Composer.__init__(self)
|
|
||||||
Constructor.__init__(self)
|
|
||||||
Resolver.__init__(self)
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
|
|
||||||
class Node(object):
|
|
||||||
def __init__(self, tag, value, start_mark, end_mark):
|
|
||||||
self.tag = tag
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
def __repr__(self):
|
|
||||||
value = self.value
|
|
||||||
#if isinstance(value, list):
|
|
||||||
# if len(value) == 0:
|
|
||||||
# value = '<empty>'
|
|
||||||
# elif len(value) == 1:
|
|
||||||
# value = '<1 item>'
|
|
||||||
# else:
|
|
||||||
# value = '<%d items>' % len(value)
|
|
||||||
#else:
|
|
||||||
# if len(value) > 75:
|
|
||||||
# value = repr(value[:70]+u' ... ')
|
|
||||||
# else:
|
|
||||||
# value = repr(value)
|
|
||||||
value = repr(value)
|
|
||||||
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
|
|
||||||
|
|
||||||
class ScalarNode(Node):
|
|
||||||
id = 'scalar'
|
|
||||||
def __init__(self, tag, value,
|
|
||||||
start_mark=None, end_mark=None, style=None):
|
|
||||||
self.tag = tag
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.style = style
|
|
||||||
|
|
||||||
class CollectionNode(Node):
|
|
||||||
def __init__(self, tag, value,
|
|
||||||
start_mark=None, end_mark=None, flow_style=None):
|
|
||||||
self.tag = tag
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.flow_style = flow_style
|
|
||||||
|
|
||||||
class SequenceNode(CollectionNode):
|
|
||||||
id = 'sequence'
|
|
||||||
|
|
||||||
class MappingNode(CollectionNode):
|
|
||||||
id = 'mapping'
|
|
||||||
|
|
||||||
@ -1,589 +0,0 @@
|
|||||||
|
|
||||||
# The following YAML grammar is LL(1) and is parsed by a recursive descent
|
|
||||||
# parser.
|
|
||||||
#
|
|
||||||
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
|
||||||
# implicit_document ::= block_node DOCUMENT-END*
|
|
||||||
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
|
||||||
# block_node_or_indentless_sequence ::=
|
|
||||||
# ALIAS
|
|
||||||
# | properties (block_content | indentless_block_sequence)?
|
|
||||||
# | block_content
|
|
||||||
# | indentless_block_sequence
|
|
||||||
# block_node ::= ALIAS
|
|
||||||
# | properties block_content?
|
|
||||||
# | block_content
|
|
||||||
# flow_node ::= ALIAS
|
|
||||||
# | properties flow_content?
|
|
||||||
# | flow_content
|
|
||||||
# properties ::= TAG ANCHOR? | ANCHOR TAG?
|
|
||||||
# block_content ::= block_collection | flow_collection | SCALAR
|
|
||||||
# flow_content ::= flow_collection | SCALAR
|
|
||||||
# block_collection ::= block_sequence | block_mapping
|
|
||||||
# flow_collection ::= flow_sequence | flow_mapping
|
|
||||||
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
|
||||||
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
|
||||||
# block_mapping ::= BLOCK-MAPPING_START
|
|
||||||
# ((KEY block_node_or_indentless_sequence?)?
|
|
||||||
# (VALUE block_node_or_indentless_sequence?)?)*
|
|
||||||
# BLOCK-END
|
|
||||||
# flow_sequence ::= FLOW-SEQUENCE-START
|
|
||||||
# (flow_sequence_entry FLOW-ENTRY)*
|
|
||||||
# flow_sequence_entry?
|
|
||||||
# FLOW-SEQUENCE-END
|
|
||||||
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
|
||||||
# flow_mapping ::= FLOW-MAPPING-START
|
|
||||||
# (flow_mapping_entry FLOW-ENTRY)*
|
|
||||||
# flow_mapping_entry?
|
|
||||||
# FLOW-MAPPING-END
|
|
||||||
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
|
||||||
#
|
|
||||||
# FIRST sets:
|
|
||||||
#
|
|
||||||
# stream: { STREAM-START }
|
|
||||||
# explicit_document: { DIRECTIVE DOCUMENT-START }
|
|
||||||
# implicit_document: FIRST(block_node)
|
|
||||||
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
|
||||||
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
|
||||||
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
|
|
||||||
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
|
|
||||||
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
|
|
||||||
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
|
||||||
# block_sequence: { BLOCK-SEQUENCE-START }
|
|
||||||
# block_mapping: { BLOCK-MAPPING-START }
|
|
||||||
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
|
|
||||||
# indentless_sequence: { ENTRY }
|
|
||||||
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
|
||||||
# flow_sequence: { FLOW-SEQUENCE-START }
|
|
||||||
# flow_mapping: { FLOW-MAPPING-START }
|
|
||||||
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
|
|
||||||
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
|
|
||||||
|
|
||||||
__all__ = ['Parser', 'ParserError']
|
|
||||||
|
|
||||||
from .error import MarkedYAMLError
|
|
||||||
from .tokens import *
|
|
||||||
from .events import *
|
|
||||||
from .scanner import *
|
|
||||||
|
|
||||||
class ParserError(MarkedYAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class Parser:
|
|
||||||
# Since writing a recursive-descendant parser is a straightforward task, we
|
|
||||||
# do not give many comments here.
|
|
||||||
|
|
||||||
DEFAULT_TAGS = {
|
|
||||||
'!': '!',
|
|
||||||
'!!': 'tag:yaml.org,2002:',
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.current_event = None
|
|
||||||
self.yaml_version = None
|
|
||||||
self.tag_handles = {}
|
|
||||||
self.states = []
|
|
||||||
self.marks = []
|
|
||||||
self.state = self.parse_stream_start
|
|
||||||
|
|
||||||
def dispose(self):
|
|
||||||
# Reset the state attributes (to clear self-references)
|
|
||||||
self.states = []
|
|
||||||
self.state = None
|
|
||||||
|
|
||||||
def check_event(self, *choices):
|
|
||||||
# Check the type of the next event.
|
|
||||||
if self.current_event is None:
|
|
||||||
if self.state:
|
|
||||||
self.current_event = self.state()
|
|
||||||
if self.current_event is not None:
|
|
||||||
if not choices:
|
|
||||||
return True
|
|
||||||
for choice in choices:
|
|
||||||
if isinstance(self.current_event, choice):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def peek_event(self):
|
|
||||||
# Get the next event.
|
|
||||||
if self.current_event is None:
|
|
||||||
if self.state:
|
|
||||||
self.current_event = self.state()
|
|
||||||
return self.current_event
|
|
||||||
|
|
||||||
def get_event(self):
|
|
||||||
# Get the next event and proceed further.
|
|
||||||
if self.current_event is None:
|
|
||||||
if self.state:
|
|
||||||
self.current_event = self.state()
|
|
||||||
value = self.current_event
|
|
||||||
self.current_event = None
|
|
||||||
return value
|
|
||||||
|
|
||||||
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
|
||||||
# implicit_document ::= block_node DOCUMENT-END*
|
|
||||||
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
|
||||||
|
|
||||||
def parse_stream_start(self):
|
|
||||||
|
|
||||||
# Parse the stream start.
|
|
||||||
token = self.get_token()
|
|
||||||
event = StreamStartEvent(token.start_mark, token.end_mark,
|
|
||||||
encoding=token.encoding)
|
|
||||||
|
|
||||||
# Prepare the next state.
|
|
||||||
self.state = self.parse_implicit_document_start
|
|
||||||
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_implicit_document_start(self):
|
|
||||||
|
|
||||||
# Parse an implicit document.
|
|
||||||
if not self.check_token(DirectiveToken, DocumentStartToken,
|
|
||||||
StreamEndToken):
|
|
||||||
self.tag_handles = self.DEFAULT_TAGS
|
|
||||||
token = self.peek_token()
|
|
||||||
start_mark = end_mark = token.start_mark
|
|
||||||
event = DocumentStartEvent(start_mark, end_mark,
|
|
||||||
explicit=False)
|
|
||||||
|
|
||||||
# Prepare the next state.
|
|
||||||
self.states.append(self.parse_document_end)
|
|
||||||
self.state = self.parse_block_node
|
|
||||||
|
|
||||||
return event
|
|
||||||
|
|
||||||
else:
|
|
||||||
return self.parse_document_start()
|
|
||||||
|
|
||||||
def parse_document_start(self):
|
|
||||||
|
|
||||||
# Parse any extra document end indicators.
|
|
||||||
while self.check_token(DocumentEndToken):
|
|
||||||
self.get_token()
|
|
||||||
|
|
||||||
# Parse an explicit document.
|
|
||||||
if not self.check_token(StreamEndToken):
|
|
||||||
token = self.peek_token()
|
|
||||||
start_mark = token.start_mark
|
|
||||||
version, tags = self.process_directives()
|
|
||||||
if not self.check_token(DocumentStartToken):
|
|
||||||
raise ParserError(None, None,
|
|
||||||
"expected '<document start>', but found %r"
|
|
||||||
% self.peek_token().id,
|
|
||||||
self.peek_token().start_mark)
|
|
||||||
token = self.get_token()
|
|
||||||
end_mark = token.end_mark
|
|
||||||
event = DocumentStartEvent(start_mark, end_mark,
|
|
||||||
explicit=True, version=version, tags=tags)
|
|
||||||
self.states.append(self.parse_document_end)
|
|
||||||
self.state = self.parse_document_content
|
|
||||||
else:
|
|
||||||
# Parse the end of the stream.
|
|
||||||
token = self.get_token()
|
|
||||||
event = StreamEndEvent(token.start_mark, token.end_mark)
|
|
||||||
assert not self.states
|
|
||||||
assert not self.marks
|
|
||||||
self.state = None
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_document_end(self):
|
|
||||||
|
|
||||||
# Parse the document end.
|
|
||||||
token = self.peek_token()
|
|
||||||
start_mark = end_mark = token.start_mark
|
|
||||||
explicit = False
|
|
||||||
if self.check_token(DocumentEndToken):
|
|
||||||
token = self.get_token()
|
|
||||||
end_mark = token.end_mark
|
|
||||||
explicit = True
|
|
||||||
event = DocumentEndEvent(start_mark, end_mark,
|
|
||||||
explicit=explicit)
|
|
||||||
|
|
||||||
# Prepare the next state.
|
|
||||||
self.state = self.parse_document_start
|
|
||||||
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_document_content(self):
|
|
||||||
if self.check_token(DirectiveToken,
|
|
||||||
DocumentStartToken, DocumentEndToken, StreamEndToken):
|
|
||||||
event = self.process_empty_scalar(self.peek_token().start_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
return event
|
|
||||||
else:
|
|
||||||
return self.parse_block_node()
|
|
||||||
|
|
||||||
def process_directives(self):
|
|
||||||
self.yaml_version = None
|
|
||||||
self.tag_handles = {}
|
|
||||||
while self.check_token(DirectiveToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if token.name == 'YAML':
|
|
||||||
if self.yaml_version is not None:
|
|
||||||
raise ParserError(None, None,
|
|
||||||
"found duplicate YAML directive", token.start_mark)
|
|
||||||
major, minor = token.value
|
|
||||||
if major != 1:
|
|
||||||
raise ParserError(None, None,
|
|
||||||
"found incompatible YAML document (version 1.* is required)",
|
|
||||||
token.start_mark)
|
|
||||||
self.yaml_version = token.value
|
|
||||||
elif token.name == 'TAG':
|
|
||||||
handle, prefix = token.value
|
|
||||||
if handle in self.tag_handles:
|
|
||||||
raise ParserError(None, None,
|
|
||||||
"duplicate tag handle %r" % handle,
|
|
||||||
token.start_mark)
|
|
||||||
self.tag_handles[handle] = prefix
|
|
||||||
if self.tag_handles:
|
|
||||||
value = self.yaml_version, self.tag_handles.copy()
|
|
||||||
else:
|
|
||||||
value = self.yaml_version, None
|
|
||||||
for key in self.DEFAULT_TAGS:
|
|
||||||
if key not in self.tag_handles:
|
|
||||||
self.tag_handles[key] = self.DEFAULT_TAGS[key]
|
|
||||||
return value
|
|
||||||
|
|
||||||
# block_node_or_indentless_sequence ::= ALIAS
|
|
||||||
# | properties (block_content | indentless_block_sequence)?
|
|
||||||
# | block_content
|
|
||||||
# | indentless_block_sequence
|
|
||||||
# block_node ::= ALIAS
|
|
||||||
# | properties block_content?
|
|
||||||
# | block_content
|
|
||||||
# flow_node ::= ALIAS
|
|
||||||
# | properties flow_content?
|
|
||||||
# | flow_content
|
|
||||||
# properties ::= TAG ANCHOR? | ANCHOR TAG?
|
|
||||||
# block_content ::= block_collection | flow_collection | SCALAR
|
|
||||||
# flow_content ::= flow_collection | SCALAR
|
|
||||||
# block_collection ::= block_sequence | block_mapping
|
|
||||||
# flow_collection ::= flow_sequence | flow_mapping
|
|
||||||
|
|
||||||
def parse_block_node(self):
|
|
||||||
return self.parse_node(block=True)
|
|
||||||
|
|
||||||
def parse_flow_node(self):
|
|
||||||
return self.parse_node()
|
|
||||||
|
|
||||||
def parse_block_node_or_indentless_sequence(self):
|
|
||||||
return self.parse_node(block=True, indentless_sequence=True)
|
|
||||||
|
|
||||||
def parse_node(self, block=False, indentless_sequence=False):
|
|
||||||
if self.check_token(AliasToken):
|
|
||||||
token = self.get_token()
|
|
||||||
event = AliasEvent(token.value, token.start_mark, token.end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
else:
|
|
||||||
anchor = None
|
|
||||||
tag = None
|
|
||||||
start_mark = end_mark = tag_mark = None
|
|
||||||
if self.check_token(AnchorToken):
|
|
||||||
token = self.get_token()
|
|
||||||
start_mark = token.start_mark
|
|
||||||
end_mark = token.end_mark
|
|
||||||
anchor = token.value
|
|
||||||
if self.check_token(TagToken):
|
|
||||||
token = self.get_token()
|
|
||||||
tag_mark = token.start_mark
|
|
||||||
end_mark = token.end_mark
|
|
||||||
tag = token.value
|
|
||||||
elif self.check_token(TagToken):
|
|
||||||
token = self.get_token()
|
|
||||||
start_mark = tag_mark = token.start_mark
|
|
||||||
end_mark = token.end_mark
|
|
||||||
tag = token.value
|
|
||||||
if self.check_token(AnchorToken):
|
|
||||||
token = self.get_token()
|
|
||||||
end_mark = token.end_mark
|
|
||||||
anchor = token.value
|
|
||||||
if tag is not None:
|
|
||||||
handle, suffix = tag
|
|
||||||
if handle is not None:
|
|
||||||
if handle not in self.tag_handles:
|
|
||||||
raise ParserError("while parsing a node", start_mark,
|
|
||||||
"found undefined tag handle %r" % handle,
|
|
||||||
tag_mark)
|
|
||||||
tag = self.tag_handles[handle]+suffix
|
|
||||||
else:
|
|
||||||
tag = suffix
|
|
||||||
#if tag == '!':
|
|
||||||
# raise ParserError("while parsing a node", start_mark,
|
|
||||||
# "found non-specific tag '!'", tag_mark,
|
|
||||||
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
|
|
||||||
if start_mark is None:
|
|
||||||
start_mark = end_mark = self.peek_token().start_mark
|
|
||||||
event = None
|
|
||||||
implicit = (tag is None or tag == '!')
|
|
||||||
if indentless_sequence and self.check_token(BlockEntryToken):
|
|
||||||
end_mark = self.peek_token().end_mark
|
|
||||||
event = SequenceStartEvent(anchor, tag, implicit,
|
|
||||||
start_mark, end_mark)
|
|
||||||
self.state = self.parse_indentless_sequence_entry
|
|
||||||
else:
|
|
||||||
if self.check_token(ScalarToken):
|
|
||||||
token = self.get_token()
|
|
||||||
end_mark = token.end_mark
|
|
||||||
if (token.plain and tag is None) or tag == '!':
|
|
||||||
implicit = (True, False)
|
|
||||||
elif tag is None:
|
|
||||||
implicit = (False, True)
|
|
||||||
else:
|
|
||||||
implicit = (False, False)
|
|
||||||
event = ScalarEvent(anchor, tag, implicit, token.value,
|
|
||||||
start_mark, end_mark, style=token.style)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
elif self.check_token(FlowSequenceStartToken):
|
|
||||||
end_mark = self.peek_token().end_mark
|
|
||||||
event = SequenceStartEvent(anchor, tag, implicit,
|
|
||||||
start_mark, end_mark, flow_style=True)
|
|
||||||
self.state = self.parse_flow_sequence_first_entry
|
|
||||||
elif self.check_token(FlowMappingStartToken):
|
|
||||||
end_mark = self.peek_token().end_mark
|
|
||||||
event = MappingStartEvent(anchor, tag, implicit,
|
|
||||||
start_mark, end_mark, flow_style=True)
|
|
||||||
self.state = self.parse_flow_mapping_first_key
|
|
||||||
elif block and self.check_token(BlockSequenceStartToken):
|
|
||||||
end_mark = self.peek_token().start_mark
|
|
||||||
event = SequenceStartEvent(anchor, tag, implicit,
|
|
||||||
start_mark, end_mark, flow_style=False)
|
|
||||||
self.state = self.parse_block_sequence_first_entry
|
|
||||||
elif block and self.check_token(BlockMappingStartToken):
|
|
||||||
end_mark = self.peek_token().start_mark
|
|
||||||
event = MappingStartEvent(anchor, tag, implicit,
|
|
||||||
start_mark, end_mark, flow_style=False)
|
|
||||||
self.state = self.parse_block_mapping_first_key
|
|
||||||
elif anchor is not None or tag is not None:
|
|
||||||
# Empty scalars are allowed even if a tag or an anchor is
|
|
||||||
# specified.
|
|
||||||
event = ScalarEvent(anchor, tag, (implicit, False), '',
|
|
||||||
start_mark, end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
else:
|
|
||||||
if block:
|
|
||||||
node = 'block'
|
|
||||||
else:
|
|
||||||
node = 'flow'
|
|
||||||
token = self.peek_token()
|
|
||||||
raise ParserError("while parsing a %s node" % node, start_mark,
|
|
||||||
"expected the node content, but found %r" % token.id,
|
|
||||||
token.start_mark)
|
|
||||||
return event
|
|
||||||
|
|
||||||
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
|
||||||
|
|
||||||
def parse_block_sequence_first_entry(self):
|
|
||||||
token = self.get_token()
|
|
||||||
self.marks.append(token.start_mark)
|
|
||||||
return self.parse_block_sequence_entry()
|
|
||||||
|
|
||||||
def parse_block_sequence_entry(self):
|
|
||||||
if self.check_token(BlockEntryToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(BlockEntryToken, BlockEndToken):
|
|
||||||
self.states.append(self.parse_block_sequence_entry)
|
|
||||||
return self.parse_block_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_block_sequence_entry
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
if not self.check_token(BlockEndToken):
|
|
||||||
token = self.peek_token()
|
|
||||||
raise ParserError("while parsing a block collection", self.marks[-1],
|
|
||||||
"expected <block end>, but found %r" % token.id, token.start_mark)
|
|
||||||
token = self.get_token()
|
|
||||||
event = SequenceEndEvent(token.start_mark, token.end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
self.marks.pop()
|
|
||||||
return event
|
|
||||||
|
|
||||||
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
|
||||||
|
|
||||||
def parse_indentless_sequence_entry(self):
|
|
||||||
if self.check_token(BlockEntryToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(BlockEntryToken,
|
|
||||||
KeyToken, ValueToken, BlockEndToken):
|
|
||||||
self.states.append(self.parse_indentless_sequence_entry)
|
|
||||||
return self.parse_block_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_indentless_sequence_entry
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
token = self.peek_token()
|
|
||||||
event = SequenceEndEvent(token.start_mark, token.start_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
return event
|
|
||||||
|
|
||||||
# block_mapping ::= BLOCK-MAPPING_START
|
|
||||||
# ((KEY block_node_or_indentless_sequence?)?
|
|
||||||
# (VALUE block_node_or_indentless_sequence?)?)*
|
|
||||||
# BLOCK-END
|
|
||||||
|
|
||||||
def parse_block_mapping_first_key(self):
|
|
||||||
token = self.get_token()
|
|
||||||
self.marks.append(token.start_mark)
|
|
||||||
return self.parse_block_mapping_key()
|
|
||||||
|
|
||||||
def parse_block_mapping_key(self):
|
|
||||||
if self.check_token(KeyToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
|
|
||||||
self.states.append(self.parse_block_mapping_value)
|
|
||||||
return self.parse_block_node_or_indentless_sequence()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_block_mapping_value
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
if not self.check_token(BlockEndToken):
|
|
||||||
token = self.peek_token()
|
|
||||||
raise ParserError("while parsing a block mapping", self.marks[-1],
|
|
||||||
"expected <block end>, but found %r" % token.id, token.start_mark)
|
|
||||||
token = self.get_token()
|
|
||||||
event = MappingEndEvent(token.start_mark, token.end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
self.marks.pop()
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_block_mapping_value(self):
|
|
||||||
if self.check_token(ValueToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
|
|
||||||
self.states.append(self.parse_block_mapping_key)
|
|
||||||
return self.parse_block_node_or_indentless_sequence()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_block_mapping_key
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
else:
|
|
||||||
self.state = self.parse_block_mapping_key
|
|
||||||
token = self.peek_token()
|
|
||||||
return self.process_empty_scalar(token.start_mark)
|
|
||||||
|
|
||||||
# flow_sequence ::= FLOW-SEQUENCE-START
|
|
||||||
# (flow_sequence_entry FLOW-ENTRY)*
|
|
||||||
# flow_sequence_entry?
|
|
||||||
# FLOW-SEQUENCE-END
|
|
||||||
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
|
||||||
#
|
|
||||||
# Note that while production rules for both flow_sequence_entry and
|
|
||||||
# flow_mapping_entry are equal, their interpretations are different.
|
|
||||||
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
|
|
||||||
# generate an inline mapping (set syntax).
|
|
||||||
|
|
||||||
def parse_flow_sequence_first_entry(self):
|
|
||||||
token = self.get_token()
|
|
||||||
self.marks.append(token.start_mark)
|
|
||||||
return self.parse_flow_sequence_entry(first=True)
|
|
||||||
|
|
||||||
def parse_flow_sequence_entry(self, first=False):
|
|
||||||
if not self.check_token(FlowSequenceEndToken):
|
|
||||||
if not first:
|
|
||||||
if self.check_token(FlowEntryToken):
|
|
||||||
self.get_token()
|
|
||||||
else:
|
|
||||||
token = self.peek_token()
|
|
||||||
raise ParserError("while parsing a flow sequence", self.marks[-1],
|
|
||||||
"expected ',' or ']', but got %r" % token.id, token.start_mark)
|
|
||||||
|
|
||||||
if self.check_token(KeyToken):
|
|
||||||
token = self.peek_token()
|
|
||||||
event = MappingStartEvent(None, None, True,
|
|
||||||
token.start_mark, token.end_mark,
|
|
||||||
flow_style=True)
|
|
||||||
self.state = self.parse_flow_sequence_entry_mapping_key
|
|
||||||
return event
|
|
||||||
elif not self.check_token(FlowSequenceEndToken):
|
|
||||||
self.states.append(self.parse_flow_sequence_entry)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
token = self.get_token()
|
|
||||||
event = SequenceEndEvent(token.start_mark, token.end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
self.marks.pop()
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_flow_sequence_entry_mapping_key(self):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(ValueToken,
|
|
||||||
FlowEntryToken, FlowSequenceEndToken):
|
|
||||||
self.states.append(self.parse_flow_sequence_entry_mapping_value)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_sequence_entry_mapping_value
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
|
|
||||||
def parse_flow_sequence_entry_mapping_value(self):
|
|
||||||
if self.check_token(ValueToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
|
|
||||||
self.states.append(self.parse_flow_sequence_entry_mapping_end)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_sequence_entry_mapping_end
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_sequence_entry_mapping_end
|
|
||||||
token = self.peek_token()
|
|
||||||
return self.process_empty_scalar(token.start_mark)
|
|
||||||
|
|
||||||
def parse_flow_sequence_entry_mapping_end(self):
|
|
||||||
self.state = self.parse_flow_sequence_entry
|
|
||||||
token = self.peek_token()
|
|
||||||
return MappingEndEvent(token.start_mark, token.start_mark)
|
|
||||||
|
|
||||||
# flow_mapping ::= FLOW-MAPPING-START
|
|
||||||
# (flow_mapping_entry FLOW-ENTRY)*
|
|
||||||
# flow_mapping_entry?
|
|
||||||
# FLOW-MAPPING-END
|
|
||||||
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
|
||||||
|
|
||||||
def parse_flow_mapping_first_key(self):
|
|
||||||
token = self.get_token()
|
|
||||||
self.marks.append(token.start_mark)
|
|
||||||
return self.parse_flow_mapping_key(first=True)
|
|
||||||
|
|
||||||
def parse_flow_mapping_key(self, first=False):
|
|
||||||
if not self.check_token(FlowMappingEndToken):
|
|
||||||
if not first:
|
|
||||||
if self.check_token(FlowEntryToken):
|
|
||||||
self.get_token()
|
|
||||||
else:
|
|
||||||
token = self.peek_token()
|
|
||||||
raise ParserError("while parsing a flow mapping", self.marks[-1],
|
|
||||||
"expected ',' or '}', but got %r" % token.id, token.start_mark)
|
|
||||||
if self.check_token(KeyToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(ValueToken,
|
|
||||||
FlowEntryToken, FlowMappingEndToken):
|
|
||||||
self.states.append(self.parse_flow_mapping_value)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_mapping_value
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
elif not self.check_token(FlowMappingEndToken):
|
|
||||||
self.states.append(self.parse_flow_mapping_empty_value)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
token = self.get_token()
|
|
||||||
event = MappingEndEvent(token.start_mark, token.end_mark)
|
|
||||||
self.state = self.states.pop()
|
|
||||||
self.marks.pop()
|
|
||||||
return event
|
|
||||||
|
|
||||||
def parse_flow_mapping_value(self):
|
|
||||||
if self.check_token(ValueToken):
|
|
||||||
token = self.get_token()
|
|
||||||
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
|
|
||||||
self.states.append(self.parse_flow_mapping_key)
|
|
||||||
return self.parse_flow_node()
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_mapping_key
|
|
||||||
return self.process_empty_scalar(token.end_mark)
|
|
||||||
else:
|
|
||||||
self.state = self.parse_flow_mapping_key
|
|
||||||
token = self.peek_token()
|
|
||||||
return self.process_empty_scalar(token.start_mark)
|
|
||||||
|
|
||||||
def parse_flow_mapping_empty_value(self):
|
|
||||||
self.state = self.parse_flow_mapping_key
|
|
||||||
return self.process_empty_scalar(self.peek_token().start_mark)
|
|
||||||
|
|
||||||
def process_empty_scalar(self, mark):
|
|
||||||
return ScalarEvent(None, None, (True, False), '', mark, mark)
|
|
||||||
|
|
||||||
@ -1,185 +0,0 @@
|
|||||||
# This module contains abstractions for the input stream. You don't have to
|
|
||||||
# looks further, there are no pretty code.
|
|
||||||
#
|
|
||||||
# We define two classes here.
|
|
||||||
#
|
|
||||||
# Mark(source, line, column)
|
|
||||||
# It's just a record and its only use is producing nice error messages.
|
|
||||||
# Parser does not use it for any other purposes.
|
|
||||||
#
|
|
||||||
# Reader(source, data)
|
|
||||||
# Reader determines the encoding of `data` and converts it to unicode.
|
|
||||||
# Reader provides the following methods and attributes:
|
|
||||||
# reader.peek(length=1) - return the next `length` characters
|
|
||||||
# reader.forward(length=1) - move the current position to `length` characters.
|
|
||||||
# reader.index - the number of the current character.
|
|
||||||
# reader.line, stream.column - the line and the column of the current character.
|
|
||||||
|
|
||||||
__all__ = ['Reader', 'ReaderError']
|
|
||||||
|
|
||||||
from .error import YAMLError, Mark
|
|
||||||
|
|
||||||
import codecs, re
|
|
||||||
|
|
||||||
class ReaderError(YAMLError):
|
|
||||||
|
|
||||||
def __init__(self, name, position, character, encoding, reason):
|
|
||||||
self.name = name
|
|
||||||
self.character = character
|
|
||||||
self.position = position
|
|
||||||
self.encoding = encoding
|
|
||||||
self.reason = reason
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if isinstance(self.character, bytes):
|
|
||||||
return "'%s' codec can't decode byte #x%02x: %s\n" \
|
|
||||||
" in \"%s\", position %d" \
|
|
||||||
% (self.encoding, ord(self.character), self.reason,
|
|
||||||
self.name, self.position)
|
|
||||||
else:
|
|
||||||
return "unacceptable character #x%04x: %s\n" \
|
|
||||||
" in \"%s\", position %d" \
|
|
||||||
% (self.character, self.reason,
|
|
||||||
self.name, self.position)
|
|
||||||
|
|
||||||
class Reader(object):
|
|
||||||
# Reader:
|
|
||||||
# - determines the data encoding and converts it to a unicode string,
|
|
||||||
# - checks if characters are in allowed range,
|
|
||||||
# - adds '\0' to the end.
|
|
||||||
|
|
||||||
# Reader accepts
|
|
||||||
# - a `bytes` object,
|
|
||||||
# - a `str` object,
|
|
||||||
# - a file-like object with its `read` method returning `str`,
|
|
||||||
# - a file-like object with its `read` method returning `unicode`.
|
|
||||||
|
|
||||||
# Yeah, it's ugly and slow.
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.name = None
|
|
||||||
self.stream = None
|
|
||||||
self.stream_pointer = 0
|
|
||||||
self.eof = True
|
|
||||||
self.buffer = ''
|
|
||||||
self.pointer = 0
|
|
||||||
self.raw_buffer = None
|
|
||||||
self.raw_decode = None
|
|
||||||
self.encoding = None
|
|
||||||
self.index = 0
|
|
||||||
self.line = 0
|
|
||||||
self.column = 0
|
|
||||||
if isinstance(stream, str):
|
|
||||||
self.name = "<unicode string>"
|
|
||||||
self.check_printable(stream)
|
|
||||||
self.buffer = stream+'\0'
|
|
||||||
elif isinstance(stream, bytes):
|
|
||||||
self.name = "<byte string>"
|
|
||||||
self.raw_buffer = stream
|
|
||||||
self.determine_encoding()
|
|
||||||
else:
|
|
||||||
self.stream = stream
|
|
||||||
self.name = getattr(stream, 'name', "<file>")
|
|
||||||
self.eof = False
|
|
||||||
self.raw_buffer = None
|
|
||||||
self.determine_encoding()
|
|
||||||
|
|
||||||
def peek(self, index=0):
|
|
||||||
try:
|
|
||||||
return self.buffer[self.pointer+index]
|
|
||||||
except IndexError:
|
|
||||||
self.update(index+1)
|
|
||||||
return self.buffer[self.pointer+index]
|
|
||||||
|
|
||||||
def prefix(self, length=1):
|
|
||||||
if self.pointer+length >= len(self.buffer):
|
|
||||||
self.update(length)
|
|
||||||
return self.buffer[self.pointer:self.pointer+length]
|
|
||||||
|
|
||||||
def forward(self, length=1):
|
|
||||||
if self.pointer+length+1 >= len(self.buffer):
|
|
||||||
self.update(length+1)
|
|
||||||
while length:
|
|
||||||
ch = self.buffer[self.pointer]
|
|
||||||
self.pointer += 1
|
|
||||||
self.index += 1
|
|
||||||
if ch in '\n\x85\u2028\u2029' \
|
|
||||||
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
|
|
||||||
self.line += 1
|
|
||||||
self.column = 0
|
|
||||||
elif ch != '\uFEFF':
|
|
||||||
self.column += 1
|
|
||||||
length -= 1
|
|
||||||
|
|
||||||
def get_mark(self):
|
|
||||||
if self.stream is None:
|
|
||||||
return Mark(self.name, self.index, self.line, self.column,
|
|
||||||
self.buffer, self.pointer)
|
|
||||||
else:
|
|
||||||
return Mark(self.name, self.index, self.line, self.column,
|
|
||||||
None, None)
|
|
||||||
|
|
||||||
def determine_encoding(self):
|
|
||||||
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
|
|
||||||
self.update_raw()
|
|
||||||
if isinstance(self.raw_buffer, bytes):
|
|
||||||
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
|
|
||||||
self.raw_decode = codecs.utf_16_le_decode
|
|
||||||
self.encoding = 'utf-16-le'
|
|
||||||
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
|
|
||||||
self.raw_decode = codecs.utf_16_be_decode
|
|
||||||
self.encoding = 'utf-16-be'
|
|
||||||
else:
|
|
||||||
self.raw_decode = codecs.utf_8_decode
|
|
||||||
self.encoding = 'utf-8'
|
|
||||||
self.update(1)
|
|
||||||
|
|
||||||
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
|
|
||||||
def check_printable(self, data):
|
|
||||||
match = self.NON_PRINTABLE.search(data)
|
|
||||||
if match:
|
|
||||||
character = match.group()
|
|
||||||
position = self.index+(len(self.buffer)-self.pointer)+match.start()
|
|
||||||
raise ReaderError(self.name, position, ord(character),
|
|
||||||
'unicode', "special characters are not allowed")
|
|
||||||
|
|
||||||
def update(self, length):
|
|
||||||
if self.raw_buffer is None:
|
|
||||||
return
|
|
||||||
self.buffer = self.buffer[self.pointer:]
|
|
||||||
self.pointer = 0
|
|
||||||
while len(self.buffer) < length:
|
|
||||||
if not self.eof:
|
|
||||||
self.update_raw()
|
|
||||||
if self.raw_decode is not None:
|
|
||||||
try:
|
|
||||||
data, converted = self.raw_decode(self.raw_buffer,
|
|
||||||
'strict', self.eof)
|
|
||||||
except UnicodeDecodeError as exc:
|
|
||||||
character = self.raw_buffer[exc.start]
|
|
||||||
if self.stream is not None:
|
|
||||||
position = self.stream_pointer-len(self.raw_buffer)+exc.start
|
|
||||||
else:
|
|
||||||
position = exc.start
|
|
||||||
raise ReaderError(self.name, position, character,
|
|
||||||
exc.encoding, exc.reason)
|
|
||||||
else:
|
|
||||||
data = self.raw_buffer
|
|
||||||
converted = len(data)
|
|
||||||
self.check_printable(data)
|
|
||||||
self.buffer += data
|
|
||||||
self.raw_buffer = self.raw_buffer[converted:]
|
|
||||||
if self.eof:
|
|
||||||
self.buffer += '\0'
|
|
||||||
self.raw_buffer = None
|
|
||||||
break
|
|
||||||
|
|
||||||
def update_raw(self, size=4096):
|
|
||||||
data = self.stream.read(size)
|
|
||||||
if self.raw_buffer is None:
|
|
||||||
self.raw_buffer = data
|
|
||||||
else:
|
|
||||||
self.raw_buffer += data
|
|
||||||
self.stream_pointer += len(data)
|
|
||||||
if not data:
|
|
||||||
self.eof = True
|
|
||||||
@ -1,389 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
|
|
||||||
'RepresenterError']
|
|
||||||
|
|
||||||
from .error import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
import datetime, copyreg, types, base64, collections
|
|
||||||
|
|
||||||
class RepresenterError(YAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class BaseRepresenter:
|
|
||||||
|
|
||||||
yaml_representers = {}
|
|
||||||
yaml_multi_representers = {}
|
|
||||||
|
|
||||||
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
|
|
||||||
self.default_style = default_style
|
|
||||||
self.sort_keys = sort_keys
|
|
||||||
self.default_flow_style = default_flow_style
|
|
||||||
self.represented_objects = {}
|
|
||||||
self.object_keeper = []
|
|
||||||
self.alias_key = None
|
|
||||||
|
|
||||||
def represent(self, data):
|
|
||||||
node = self.represent_data(data)
|
|
||||||
self.serialize(node)
|
|
||||||
self.represented_objects = {}
|
|
||||||
self.object_keeper = []
|
|
||||||
self.alias_key = None
|
|
||||||
|
|
||||||
def represent_data(self, data):
|
|
||||||
if self.ignore_aliases(data):
|
|
||||||
self.alias_key = None
|
|
||||||
else:
|
|
||||||
self.alias_key = id(data)
|
|
||||||
if self.alias_key is not None:
|
|
||||||
if self.alias_key in self.represented_objects:
|
|
||||||
node = self.represented_objects[self.alias_key]
|
|
||||||
#if node is None:
|
|
||||||
# raise RepresenterError("recursive objects are not allowed: %r" % data)
|
|
||||||
return node
|
|
||||||
#self.represented_objects[alias_key] = None
|
|
||||||
self.object_keeper.append(data)
|
|
||||||
data_types = type(data).__mro__
|
|
||||||
if data_types[0] in self.yaml_representers:
|
|
||||||
node = self.yaml_representers[data_types[0]](self, data)
|
|
||||||
else:
|
|
||||||
for data_type in data_types:
|
|
||||||
if data_type in self.yaml_multi_representers:
|
|
||||||
node = self.yaml_multi_representers[data_type](self, data)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if None in self.yaml_multi_representers:
|
|
||||||
node = self.yaml_multi_representers[None](self, data)
|
|
||||||
elif None in self.yaml_representers:
|
|
||||||
node = self.yaml_representers[None](self, data)
|
|
||||||
else:
|
|
||||||
node = ScalarNode(None, str(data))
|
|
||||||
#if alias_key is not None:
|
|
||||||
# self.represented_objects[alias_key] = node
|
|
||||||
return node
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_representer(cls, data_type, representer):
|
|
||||||
if not 'yaml_representers' in cls.__dict__:
|
|
||||||
cls.yaml_representers = cls.yaml_representers.copy()
|
|
||||||
cls.yaml_representers[data_type] = representer
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_multi_representer(cls, data_type, representer):
|
|
||||||
if not 'yaml_multi_representers' in cls.__dict__:
|
|
||||||
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
|
|
||||||
cls.yaml_multi_representers[data_type] = representer
|
|
||||||
|
|
||||||
def represent_scalar(self, tag, value, style=None):
|
|
||||||
if style is None:
|
|
||||||
style = self.default_style
|
|
||||||
node = ScalarNode(tag, value, style=style)
|
|
||||||
if self.alias_key is not None:
|
|
||||||
self.represented_objects[self.alias_key] = node
|
|
||||||
return node
|
|
||||||
|
|
||||||
def represent_sequence(self, tag, sequence, flow_style=None):
|
|
||||||
value = []
|
|
||||||
node = SequenceNode(tag, value, flow_style=flow_style)
|
|
||||||
if self.alias_key is not None:
|
|
||||||
self.represented_objects[self.alias_key] = node
|
|
||||||
best_style = True
|
|
||||||
for item in sequence:
|
|
||||||
node_item = self.represent_data(item)
|
|
||||||
if not (isinstance(node_item, ScalarNode) and not node_item.style):
|
|
||||||
best_style = False
|
|
||||||
value.append(node_item)
|
|
||||||
if flow_style is None:
|
|
||||||
if self.default_flow_style is not None:
|
|
||||||
node.flow_style = self.default_flow_style
|
|
||||||
else:
|
|
||||||
node.flow_style = best_style
|
|
||||||
return node
|
|
||||||
|
|
||||||
def represent_mapping(self, tag, mapping, flow_style=None):
|
|
||||||
value = []
|
|
||||||
node = MappingNode(tag, value, flow_style=flow_style)
|
|
||||||
if self.alias_key is not None:
|
|
||||||
self.represented_objects[self.alias_key] = node
|
|
||||||
best_style = True
|
|
||||||
if hasattr(mapping, 'items'):
|
|
||||||
mapping = list(mapping.items())
|
|
||||||
if self.sort_keys:
|
|
||||||
try:
|
|
||||||
mapping = sorted(mapping)
|
|
||||||
except TypeError:
|
|
||||||
pass
|
|
||||||
for item_key, item_value in mapping:
|
|
||||||
node_key = self.represent_data(item_key)
|
|
||||||
node_value = self.represent_data(item_value)
|
|
||||||
if not (isinstance(node_key, ScalarNode) and not node_key.style):
|
|
||||||
best_style = False
|
|
||||||
if not (isinstance(node_value, ScalarNode) and not node_value.style):
|
|
||||||
best_style = False
|
|
||||||
value.append((node_key, node_value))
|
|
||||||
if flow_style is None:
|
|
||||||
if self.default_flow_style is not None:
|
|
||||||
node.flow_style = self.default_flow_style
|
|
||||||
else:
|
|
||||||
node.flow_style = best_style
|
|
||||||
return node
|
|
||||||
|
|
||||||
def ignore_aliases(self, data):
|
|
||||||
return False
|
|
||||||
|
|
||||||
class SafeRepresenter(BaseRepresenter):
|
|
||||||
|
|
||||||
def ignore_aliases(self, data):
|
|
||||||
if data is None:
|
|
||||||
return True
|
|
||||||
if isinstance(data, tuple) and data == ():
|
|
||||||
return True
|
|
||||||
if isinstance(data, (str, bytes, bool, int, float)):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def represent_none(self, data):
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
|
||||||
|
|
||||||
def represent_str(self, data):
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:str', data)
|
|
||||||
|
|
||||||
def represent_binary(self, data):
|
|
||||||
if hasattr(base64, 'encodebytes'):
|
|
||||||
data = base64.encodebytes(data).decode('ascii')
|
|
||||||
else:
|
|
||||||
data = base64.encodestring(data).decode('ascii')
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
|
|
||||||
|
|
||||||
def represent_bool(self, data):
|
|
||||||
if data:
|
|
||||||
value = 'true'
|
|
||||||
else:
|
|
||||||
value = 'false'
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:bool', value)
|
|
||||||
|
|
||||||
def represent_int(self, data):
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
|
|
||||||
|
|
||||||
inf_value = 1e300
|
|
||||||
while repr(inf_value) != repr(inf_value*inf_value):
|
|
||||||
inf_value *= inf_value
|
|
||||||
|
|
||||||
def represent_float(self, data):
|
|
||||||
if data != data or (data == 0.0 and data == 1.0):
|
|
||||||
value = '.nan'
|
|
||||||
elif data == self.inf_value:
|
|
||||||
value = '.inf'
|
|
||||||
elif data == -self.inf_value:
|
|
||||||
value = '-.inf'
|
|
||||||
else:
|
|
||||||
value = repr(data).lower()
|
|
||||||
# Note that in some cases `repr(data)` represents a float number
|
|
||||||
# without the decimal parts. For instance:
|
|
||||||
# >>> repr(1e17)
|
|
||||||
# '1e17'
|
|
||||||
# Unfortunately, this is not a valid float representation according
|
|
||||||
# to the definition of the `!!float` tag. We fix this by adding
|
|
||||||
# '.0' before the 'e' symbol.
|
|
||||||
if '.' not in value and 'e' in value:
|
|
||||||
value = value.replace('e', '.0e', 1)
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:float', value)
|
|
||||||
|
|
||||||
def represent_list(self, data):
|
|
||||||
#pairs = (len(data) > 0 and isinstance(data, list))
|
|
||||||
#if pairs:
|
|
||||||
# for item in data:
|
|
||||||
# if not isinstance(item, tuple) or len(item) != 2:
|
|
||||||
# pairs = False
|
|
||||||
# break
|
|
||||||
#if not pairs:
|
|
||||||
return self.represent_sequence('tag:yaml.org,2002:seq', data)
|
|
||||||
#value = []
|
|
||||||
#for item_key, item_value in data:
|
|
||||||
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
|
|
||||||
# [(item_key, item_value)]))
|
|
||||||
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
|
|
||||||
|
|
||||||
def represent_dict(self, data):
|
|
||||||
return self.represent_mapping('tag:yaml.org,2002:map', data)
|
|
||||||
|
|
||||||
def represent_set(self, data):
|
|
||||||
value = {}
|
|
||||||
for key in data:
|
|
||||||
value[key] = None
|
|
||||||
return self.represent_mapping('tag:yaml.org,2002:set', value)
|
|
||||||
|
|
||||||
def represent_date(self, data):
|
|
||||||
value = data.isoformat()
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
|
|
||||||
|
|
||||||
def represent_datetime(self, data):
|
|
||||||
value = data.isoformat(' ')
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
|
|
||||||
|
|
||||||
def represent_yaml_object(self, tag, data, cls, flow_style=None):
|
|
||||||
if hasattr(data, '__getstate__'):
|
|
||||||
state = data.__getstate__()
|
|
||||||
else:
|
|
||||||
state = data.__dict__.copy()
|
|
||||||
return self.represent_mapping(tag, state, flow_style=flow_style)
|
|
||||||
|
|
||||||
def represent_undefined(self, data):
|
|
||||||
raise RepresenterError("cannot represent an object", data)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(type(None),
|
|
||||||
SafeRepresenter.represent_none)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(str,
|
|
||||||
SafeRepresenter.represent_str)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(bytes,
|
|
||||||
SafeRepresenter.represent_binary)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(bool,
|
|
||||||
SafeRepresenter.represent_bool)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(int,
|
|
||||||
SafeRepresenter.represent_int)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(float,
|
|
||||||
SafeRepresenter.represent_float)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(list,
|
|
||||||
SafeRepresenter.represent_list)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(tuple,
|
|
||||||
SafeRepresenter.represent_list)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(dict,
|
|
||||||
SafeRepresenter.represent_dict)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(set,
|
|
||||||
SafeRepresenter.represent_set)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(datetime.date,
|
|
||||||
SafeRepresenter.represent_date)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(datetime.datetime,
|
|
||||||
SafeRepresenter.represent_datetime)
|
|
||||||
|
|
||||||
SafeRepresenter.add_representer(None,
|
|
||||||
SafeRepresenter.represent_undefined)
|
|
||||||
|
|
||||||
class Representer(SafeRepresenter):
|
|
||||||
|
|
||||||
def represent_complex(self, data):
|
|
||||||
if data.imag == 0.0:
|
|
||||||
data = '%r' % data.real
|
|
||||||
elif data.real == 0.0:
|
|
||||||
data = '%rj' % data.imag
|
|
||||||
elif data.imag > 0:
|
|
||||||
data = '%r+%rj' % (data.real, data.imag)
|
|
||||||
else:
|
|
||||||
data = '%r%rj' % (data.real, data.imag)
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
|
|
||||||
|
|
||||||
def represent_tuple(self, data):
|
|
||||||
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
|
|
||||||
|
|
||||||
def represent_name(self, data):
|
|
||||||
name = '%s.%s' % (data.__module__, data.__name__)
|
|
||||||
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
|
|
||||||
|
|
||||||
def represent_module(self, data):
|
|
||||||
return self.represent_scalar(
|
|
||||||
'tag:yaml.org,2002:python/module:'+data.__name__, '')
|
|
||||||
|
|
||||||
def represent_object(self, data):
|
|
||||||
# We use __reduce__ API to save the data. data.__reduce__ returns
|
|
||||||
# a tuple of length 2-5:
|
|
||||||
# (function, args, state, listitems, dictitems)
|
|
||||||
|
|
||||||
# For reconstructing, we calls function(*args), then set its state,
|
|
||||||
# listitems, and dictitems if they are not None.
|
|
||||||
|
|
||||||
# A special case is when function.__name__ == '__newobj__'. In this
|
|
||||||
# case we create the object with args[0].__new__(*args).
|
|
||||||
|
|
||||||
# Another special case is when __reduce__ returns a string - we don't
|
|
||||||
# support it.
|
|
||||||
|
|
||||||
# We produce a !!python/object, !!python/object/new or
|
|
||||||
# !!python/object/apply node.
|
|
||||||
|
|
||||||
cls = type(data)
|
|
||||||
if cls in copyreg.dispatch_table:
|
|
||||||
reduce = copyreg.dispatch_table[cls](data)
|
|
||||||
elif hasattr(data, '__reduce_ex__'):
|
|
||||||
reduce = data.__reduce_ex__(2)
|
|
||||||
elif hasattr(data, '__reduce__'):
|
|
||||||
reduce = data.__reduce__()
|
|
||||||
else:
|
|
||||||
raise RepresenterError("cannot represent an object", data)
|
|
||||||
reduce = (list(reduce)+[None]*5)[:5]
|
|
||||||
function, args, state, listitems, dictitems = reduce
|
|
||||||
args = list(args)
|
|
||||||
if state is None:
|
|
||||||
state = {}
|
|
||||||
if listitems is not None:
|
|
||||||
listitems = list(listitems)
|
|
||||||
if dictitems is not None:
|
|
||||||
dictitems = dict(dictitems)
|
|
||||||
if function.__name__ == '__newobj__':
|
|
||||||
function = args[0]
|
|
||||||
args = args[1:]
|
|
||||||
tag = 'tag:yaml.org,2002:python/object/new:'
|
|
||||||
newobj = True
|
|
||||||
else:
|
|
||||||
tag = 'tag:yaml.org,2002:python/object/apply:'
|
|
||||||
newobj = False
|
|
||||||
function_name = '%s.%s' % (function.__module__, function.__name__)
|
|
||||||
if not args and not listitems and not dictitems \
|
|
||||||
and isinstance(state, dict) and newobj:
|
|
||||||
return self.represent_mapping(
|
|
||||||
'tag:yaml.org,2002:python/object:'+function_name, state)
|
|
||||||
if not listitems and not dictitems \
|
|
||||||
and isinstance(state, dict) and not state:
|
|
||||||
return self.represent_sequence(tag+function_name, args)
|
|
||||||
value = {}
|
|
||||||
if args:
|
|
||||||
value['args'] = args
|
|
||||||
if state or not isinstance(state, dict):
|
|
||||||
value['state'] = state
|
|
||||||
if listitems:
|
|
||||||
value['listitems'] = listitems
|
|
||||||
if dictitems:
|
|
||||||
value['dictitems'] = dictitems
|
|
||||||
return self.represent_mapping(tag+function_name, value)
|
|
||||||
|
|
||||||
def represent_ordered_dict(self, data):
|
|
||||||
# Provide uniform representation across different Python versions.
|
|
||||||
data_type = type(data)
|
|
||||||
tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
|
|
||||||
% (data_type.__module__, data_type.__name__)
|
|
||||||
items = [[key, value] for key, value in data.items()]
|
|
||||||
return self.represent_sequence(tag, [items])
|
|
||||||
|
|
||||||
Representer.add_representer(complex,
|
|
||||||
Representer.represent_complex)
|
|
||||||
|
|
||||||
Representer.add_representer(tuple,
|
|
||||||
Representer.represent_tuple)
|
|
||||||
|
|
||||||
Representer.add_multi_representer(type,
|
|
||||||
Representer.represent_name)
|
|
||||||
|
|
||||||
Representer.add_representer(collections.OrderedDict,
|
|
||||||
Representer.represent_ordered_dict)
|
|
||||||
|
|
||||||
Representer.add_representer(types.FunctionType,
|
|
||||||
Representer.represent_name)
|
|
||||||
|
|
||||||
Representer.add_representer(types.BuiltinFunctionType,
|
|
||||||
Representer.represent_name)
|
|
||||||
|
|
||||||
Representer.add_representer(types.ModuleType,
|
|
||||||
Representer.represent_module)
|
|
||||||
|
|
||||||
Representer.add_multi_representer(object,
|
|
||||||
Representer.represent_object)
|
|
||||||
|
|
||||||
@ -1,227 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['BaseResolver', 'Resolver']
|
|
||||||
|
|
||||||
from .error import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
class ResolverError(YAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class BaseResolver:
|
|
||||||
|
|
||||||
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
|
|
||||||
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
|
|
||||||
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
|
|
||||||
|
|
||||||
yaml_implicit_resolvers = {}
|
|
||||||
yaml_path_resolvers = {}
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.resolver_exact_paths = []
|
|
||||||
self.resolver_prefix_paths = []
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_implicit_resolver(cls, tag, regexp, first):
|
|
||||||
if not 'yaml_implicit_resolvers' in cls.__dict__:
|
|
||||||
implicit_resolvers = {}
|
|
||||||
for key in cls.yaml_implicit_resolvers:
|
|
||||||
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
|
|
||||||
cls.yaml_implicit_resolvers = implicit_resolvers
|
|
||||||
if first is None:
|
|
||||||
first = [None]
|
|
||||||
for ch in first:
|
|
||||||
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_path_resolver(cls, tag, path, kind=None):
|
|
||||||
# Note: `add_path_resolver` is experimental. The API could be changed.
|
|
||||||
# `new_path` is a pattern that is matched against the path from the
|
|
||||||
# root to the node that is being considered. `node_path` elements are
|
|
||||||
# tuples `(node_check, index_check)`. `node_check` is a node class:
|
|
||||||
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
|
|
||||||
# matches any kind of a node. `index_check` could be `None`, a boolean
|
|
||||||
# value, a string value, or a number. `None` and `False` match against
|
|
||||||
# any _value_ of sequence and mapping nodes. `True` matches against
|
|
||||||
# any _key_ of a mapping node. A string `index_check` matches against
|
|
||||||
# a mapping value that corresponds to a scalar key which content is
|
|
||||||
# equal to the `index_check` value. An integer `index_check` matches
|
|
||||||
# against a sequence value with the index equal to `index_check`.
|
|
||||||
if not 'yaml_path_resolvers' in cls.__dict__:
|
|
||||||
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
|
|
||||||
new_path = []
|
|
||||||
for element in path:
|
|
||||||
if isinstance(element, (list, tuple)):
|
|
||||||
if len(element) == 2:
|
|
||||||
node_check, index_check = element
|
|
||||||
elif len(element) == 1:
|
|
||||||
node_check = element[0]
|
|
||||||
index_check = True
|
|
||||||
else:
|
|
||||||
raise ResolverError("Invalid path element: %s" % element)
|
|
||||||
else:
|
|
||||||
node_check = None
|
|
||||||
index_check = element
|
|
||||||
if node_check is str:
|
|
||||||
node_check = ScalarNode
|
|
||||||
elif node_check is list:
|
|
||||||
node_check = SequenceNode
|
|
||||||
elif node_check is dict:
|
|
||||||
node_check = MappingNode
|
|
||||||
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
|
|
||||||
and not isinstance(node_check, str) \
|
|
||||||
and node_check is not None:
|
|
||||||
raise ResolverError("Invalid node checker: %s" % node_check)
|
|
||||||
if not isinstance(index_check, (str, int)) \
|
|
||||||
and index_check is not None:
|
|
||||||
raise ResolverError("Invalid index checker: %s" % index_check)
|
|
||||||
new_path.append((node_check, index_check))
|
|
||||||
if kind is str:
|
|
||||||
kind = ScalarNode
|
|
||||||
elif kind is list:
|
|
||||||
kind = SequenceNode
|
|
||||||
elif kind is dict:
|
|
||||||
kind = MappingNode
|
|
||||||
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
|
|
||||||
and kind is not None:
|
|
||||||
raise ResolverError("Invalid node kind: %s" % kind)
|
|
||||||
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
|
|
||||||
|
|
||||||
def descend_resolver(self, current_node, current_index):
|
|
||||||
if not self.yaml_path_resolvers:
|
|
||||||
return
|
|
||||||
exact_paths = {}
|
|
||||||
prefix_paths = []
|
|
||||||
if current_node:
|
|
||||||
depth = len(self.resolver_prefix_paths)
|
|
||||||
for path, kind in self.resolver_prefix_paths[-1]:
|
|
||||||
if self.check_resolver_prefix(depth, path, kind,
|
|
||||||
current_node, current_index):
|
|
||||||
if len(path) > depth:
|
|
||||||
prefix_paths.append((path, kind))
|
|
||||||
else:
|
|
||||||
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
|
|
||||||
else:
|
|
||||||
for path, kind in self.yaml_path_resolvers:
|
|
||||||
if not path:
|
|
||||||
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
|
|
||||||
else:
|
|
||||||
prefix_paths.append((path, kind))
|
|
||||||
self.resolver_exact_paths.append(exact_paths)
|
|
||||||
self.resolver_prefix_paths.append(prefix_paths)
|
|
||||||
|
|
||||||
def ascend_resolver(self):
|
|
||||||
if not self.yaml_path_resolvers:
|
|
||||||
return
|
|
||||||
self.resolver_exact_paths.pop()
|
|
||||||
self.resolver_prefix_paths.pop()
|
|
||||||
|
|
||||||
def check_resolver_prefix(self, depth, path, kind,
|
|
||||||
current_node, current_index):
|
|
||||||
node_check, index_check = path[depth-1]
|
|
||||||
if isinstance(node_check, str):
|
|
||||||
if current_node.tag != node_check:
|
|
||||||
return
|
|
||||||
elif node_check is not None:
|
|
||||||
if not isinstance(current_node, node_check):
|
|
||||||
return
|
|
||||||
if index_check is True and current_index is not None:
|
|
||||||
return
|
|
||||||
if (index_check is False or index_check is None) \
|
|
||||||
and current_index is None:
|
|
||||||
return
|
|
||||||
if isinstance(index_check, str):
|
|
||||||
if not (isinstance(current_index, ScalarNode)
|
|
||||||
and index_check == current_index.value):
|
|
||||||
return
|
|
||||||
elif isinstance(index_check, int) and not isinstance(index_check, bool):
|
|
||||||
if index_check != current_index:
|
|
||||||
return
|
|
||||||
return True
|
|
||||||
|
|
||||||
def resolve(self, kind, value, implicit):
|
|
||||||
if kind is ScalarNode and implicit[0]:
|
|
||||||
if value == '':
|
|
||||||
resolvers = self.yaml_implicit_resolvers.get('', [])
|
|
||||||
else:
|
|
||||||
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
|
|
||||||
wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
|
|
||||||
for tag, regexp in resolvers + wildcard_resolvers:
|
|
||||||
if regexp.match(value):
|
|
||||||
return tag
|
|
||||||
implicit = implicit[1]
|
|
||||||
if self.yaml_path_resolvers:
|
|
||||||
exact_paths = self.resolver_exact_paths[-1]
|
|
||||||
if kind in exact_paths:
|
|
||||||
return exact_paths[kind]
|
|
||||||
if None in exact_paths:
|
|
||||||
return exact_paths[None]
|
|
||||||
if kind is ScalarNode:
|
|
||||||
return self.DEFAULT_SCALAR_TAG
|
|
||||||
elif kind is SequenceNode:
|
|
||||||
return self.DEFAULT_SEQUENCE_TAG
|
|
||||||
elif kind is MappingNode:
|
|
||||||
return self.DEFAULT_MAPPING_TAG
|
|
||||||
|
|
||||||
class Resolver(BaseResolver):
|
|
||||||
pass
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:bool',
|
|
||||||
re.compile(r'''^(?:yes|Yes|YES|no|No|NO
|
|
||||||
|true|True|TRUE|false|False|FALSE
|
|
||||||
|on|On|ON|off|Off|OFF)$''', re.X),
|
|
||||||
list('yYnNtTfFoO'))
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:float',
|
|
||||||
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|
|
||||||
|\.[0-9][0-9_]*(?:[eE][-+][0-9]+)?
|
|
||||||
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|
|
||||||
|[-+]?\.(?:inf|Inf|INF)
|
|
||||||
|\.(?:nan|NaN|NAN))$''', re.X),
|
|
||||||
list('-+0123456789.'))
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:int',
|
|
||||||
re.compile(r'''^(?:[-+]?0b[0-1_]+
|
|
||||||
|[-+]?0[0-7_]+
|
|
||||||
|[-+]?(?:0|[1-9][0-9_]*)
|
|
||||||
|[-+]?0x[0-9a-fA-F_]+
|
|
||||||
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
|
|
||||||
list('-+0123456789'))
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:merge',
|
|
||||||
re.compile(r'^(?:<<)$'),
|
|
||||||
['<'])
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:null',
|
|
||||||
re.compile(r'''^(?: ~
|
|
||||||
|null|Null|NULL
|
|
||||||
| )$''', re.X),
|
|
||||||
['~', 'n', 'N', ''])
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:timestamp',
|
|
||||||
re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|
|
||||||
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
|
|
||||||
(?:[Tt]|[ \t]+)[0-9][0-9]?
|
|
||||||
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
|
|
||||||
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
|
|
||||||
list('0123456789'))
|
|
||||||
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:value',
|
|
||||||
re.compile(r'^(?:=)$'),
|
|
||||||
['='])
|
|
||||||
|
|
||||||
# The following resolver is only for documentation purposes. It cannot work
|
|
||||||
# because plain scalars cannot start with '!', '&', or '*'.
|
|
||||||
Resolver.add_implicit_resolver(
|
|
||||||
'tag:yaml.org,2002:yaml',
|
|
||||||
re.compile(r'^(?:!|&|\*)$'),
|
|
||||||
list('!&*'))
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,111 +0,0 @@
|
|||||||
|
|
||||||
__all__ = ['Serializer', 'SerializerError']
|
|
||||||
|
|
||||||
from .error import YAMLError
|
|
||||||
from .events import *
|
|
||||||
from .nodes import *
|
|
||||||
|
|
||||||
class SerializerError(YAMLError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class Serializer:
|
|
||||||
|
|
||||||
ANCHOR_TEMPLATE = 'id%03d'
|
|
||||||
|
|
||||||
def __init__(self, encoding=None,
|
|
||||||
explicit_start=None, explicit_end=None, version=None, tags=None):
|
|
||||||
self.use_encoding = encoding
|
|
||||||
self.use_explicit_start = explicit_start
|
|
||||||
self.use_explicit_end = explicit_end
|
|
||||||
self.use_version = version
|
|
||||||
self.use_tags = tags
|
|
||||||
self.serialized_nodes = {}
|
|
||||||
self.anchors = {}
|
|
||||||
self.last_anchor_id = 0
|
|
||||||
self.closed = None
|
|
||||||
|
|
||||||
def open(self):
|
|
||||||
if self.closed is None:
|
|
||||||
self.emit(StreamStartEvent(encoding=self.use_encoding))
|
|
||||||
self.closed = False
|
|
||||||
elif self.closed:
|
|
||||||
raise SerializerError("serializer is closed")
|
|
||||||
else:
|
|
||||||
raise SerializerError("serializer is already opened")
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self.closed is None:
|
|
||||||
raise SerializerError("serializer is not opened")
|
|
||||||
elif not self.closed:
|
|
||||||
self.emit(StreamEndEvent())
|
|
||||||
self.closed = True
|
|
||||||
|
|
||||||
#def __del__(self):
|
|
||||||
# self.close()
|
|
||||||
|
|
||||||
def serialize(self, node):
|
|
||||||
if self.closed is None:
|
|
||||||
raise SerializerError("serializer is not opened")
|
|
||||||
elif self.closed:
|
|
||||||
raise SerializerError("serializer is closed")
|
|
||||||
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
|
|
||||||
version=self.use_version, tags=self.use_tags))
|
|
||||||
self.anchor_node(node)
|
|
||||||
self.serialize_node(node, None, None)
|
|
||||||
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
|
|
||||||
self.serialized_nodes = {}
|
|
||||||
self.anchors = {}
|
|
||||||
self.last_anchor_id = 0
|
|
||||||
|
|
||||||
def anchor_node(self, node):
|
|
||||||
if node in self.anchors:
|
|
||||||
if self.anchors[node] is None:
|
|
||||||
self.anchors[node] = self.generate_anchor(node)
|
|
||||||
else:
|
|
||||||
self.anchors[node] = None
|
|
||||||
if isinstance(node, SequenceNode):
|
|
||||||
for item in node.value:
|
|
||||||
self.anchor_node(item)
|
|
||||||
elif isinstance(node, MappingNode):
|
|
||||||
for key, value in node.value:
|
|
||||||
self.anchor_node(key)
|
|
||||||
self.anchor_node(value)
|
|
||||||
|
|
||||||
def generate_anchor(self, node):
|
|
||||||
self.last_anchor_id += 1
|
|
||||||
return self.ANCHOR_TEMPLATE % self.last_anchor_id
|
|
||||||
|
|
||||||
def serialize_node(self, node, parent, index):
|
|
||||||
alias = self.anchors[node]
|
|
||||||
if node in self.serialized_nodes:
|
|
||||||
self.emit(AliasEvent(alias))
|
|
||||||
else:
|
|
||||||
self.serialized_nodes[node] = True
|
|
||||||
self.descend_resolver(parent, index)
|
|
||||||
if isinstance(node, ScalarNode):
|
|
||||||
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
|
|
||||||
default_tag = self.resolve(ScalarNode, node.value, (False, True))
|
|
||||||
implicit = (node.tag == detected_tag), (node.tag == default_tag)
|
|
||||||
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
|
|
||||||
style=node.style))
|
|
||||||
elif isinstance(node, SequenceNode):
|
|
||||||
implicit = (node.tag
|
|
||||||
== self.resolve(SequenceNode, node.value, True))
|
|
||||||
self.emit(SequenceStartEvent(alias, node.tag, implicit,
|
|
||||||
flow_style=node.flow_style))
|
|
||||||
index = 0
|
|
||||||
for item in node.value:
|
|
||||||
self.serialize_node(item, node, index)
|
|
||||||
index += 1
|
|
||||||
self.emit(SequenceEndEvent())
|
|
||||||
elif isinstance(node, MappingNode):
|
|
||||||
implicit = (node.tag
|
|
||||||
== self.resolve(MappingNode, node.value, True))
|
|
||||||
self.emit(MappingStartEvent(alias, node.tag, implicit,
|
|
||||||
flow_style=node.flow_style))
|
|
||||||
for key, value in node.value:
|
|
||||||
self.serialize_node(key, node, None)
|
|
||||||
self.serialize_node(value, node, key)
|
|
||||||
self.emit(MappingEndEvent())
|
|
||||||
self.ascend_resolver()
|
|
||||||
|
|
||||||
@ -1,104 +0,0 @@
|
|||||||
|
|
||||||
class Token(object):
|
|
||||||
def __init__(self, start_mark, end_mark):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
def __repr__(self):
|
|
||||||
attributes = [key for key in self.__dict__
|
|
||||||
if not key.endswith('_mark')]
|
|
||||||
attributes.sort()
|
|
||||||
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
|
|
||||||
for key in attributes])
|
|
||||||
return '%s(%s)' % (self.__class__.__name__, arguments)
|
|
||||||
|
|
||||||
#class BOMToken(Token):
|
|
||||||
# id = '<byte order mark>'
|
|
||||||
|
|
||||||
class DirectiveToken(Token):
|
|
||||||
id = '<directive>'
|
|
||||||
def __init__(self, name, value, start_mark, end_mark):
|
|
||||||
self.name = name
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
|
|
||||||
class DocumentStartToken(Token):
|
|
||||||
id = '<document start>'
|
|
||||||
|
|
||||||
class DocumentEndToken(Token):
|
|
||||||
id = '<document end>'
|
|
||||||
|
|
||||||
class StreamStartToken(Token):
|
|
||||||
id = '<stream start>'
|
|
||||||
def __init__(self, start_mark=None, end_mark=None,
|
|
||||||
encoding=None):
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.encoding = encoding
|
|
||||||
|
|
||||||
class StreamEndToken(Token):
|
|
||||||
id = '<stream end>'
|
|
||||||
|
|
||||||
class BlockSequenceStartToken(Token):
|
|
||||||
id = '<block sequence start>'
|
|
||||||
|
|
||||||
class BlockMappingStartToken(Token):
|
|
||||||
id = '<block mapping start>'
|
|
||||||
|
|
||||||
class BlockEndToken(Token):
|
|
||||||
id = '<block end>'
|
|
||||||
|
|
||||||
class FlowSequenceStartToken(Token):
|
|
||||||
id = '['
|
|
||||||
|
|
||||||
class FlowMappingStartToken(Token):
|
|
||||||
id = '{'
|
|
||||||
|
|
||||||
class FlowSequenceEndToken(Token):
|
|
||||||
id = ']'
|
|
||||||
|
|
||||||
class FlowMappingEndToken(Token):
|
|
||||||
id = '}'
|
|
||||||
|
|
||||||
class KeyToken(Token):
|
|
||||||
id = '?'
|
|
||||||
|
|
||||||
class ValueToken(Token):
|
|
||||||
id = ':'
|
|
||||||
|
|
||||||
class BlockEntryToken(Token):
|
|
||||||
id = '-'
|
|
||||||
|
|
||||||
class FlowEntryToken(Token):
|
|
||||||
id = ','
|
|
||||||
|
|
||||||
class AliasToken(Token):
|
|
||||||
id = '<alias>'
|
|
||||||
def __init__(self, value, start_mark, end_mark):
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
|
|
||||||
class AnchorToken(Token):
|
|
||||||
id = '<anchor>'
|
|
||||||
def __init__(self, value, start_mark, end_mark):
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
|
|
||||||
class TagToken(Token):
|
|
||||||
id = '<tag>'
|
|
||||||
def __init__(self, value, start_mark, end_mark):
|
|
||||||
self.value = value
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
|
|
||||||
class ScalarToken(Token):
|
|
||||||
id = '<scalar>'
|
|
||||||
def __init__(self, value, plain, start_mark, end_mark, style=None):
|
|
||||||
self.value = value
|
|
||||||
self.plain = plain
|
|
||||||
self.start_mark = start_mark
|
|
||||||
self.end_mark = end_mark
|
|
||||||
self.style = style
|
|
||||||
|
|
||||||
@ -1,7 +1,6 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), 'contrib'))
|
|
||||||
|
|
||||||
from cement import App, CaughtSignal # noqa: E402
|
from cement import App, CaughtSignal # noqa: E402
|
||||||
from .controllers.base import Base # noqa: E402
|
from .controllers.base import Base # noqa: E402
|
||||||
|
|||||||
2
lgtm.yml
2
lgtm.yml
@ -1,6 +1,4 @@
|
|||||||
path_classifiers:
|
path_classifiers:
|
||||||
library:
|
|
||||||
- cli/contrib/jinja2
|
|
||||||
template:
|
template:
|
||||||
- cli/templates
|
- cli/templates
|
||||||
|
|
||||||
|
|||||||
16
pdm.lock
16
pdm.lock
@ -2,10 +2,10 @@
|
|||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
|
|
||||||
[metadata]
|
[metadata]
|
||||||
groups = ["default", "alarm", "argparse", "colorlog", "configparser", "daemon", "dev", "docs", "dummy", "generate", "jinja2", "json", "logging", "memcached", "mustache", "plugin", "print", "redis", "scrub", "smtp", "tabulate", "watchdog", "yaml"]
|
groups = ["default", "alarm", "argparse", "colorlog", "configparser", "daemon", "dev", "docs", "dummy", "generate", "jinja2", "json", "logging", "memcached", "mustache", "plugin", "print", "redis", "scrub", "smtp", "tabulate", "watchdog", "yaml", "cli"]
|
||||||
strategy = ["cross_platform", "inherit_metadata"]
|
strategy = ["cross_platform", "inherit_metadata"]
|
||||||
lock_version = "4.4.1"
|
lock_version = "4.4.1"
|
||||||
content_hash = "sha256:b3e19108389b63d5ab60634d76e82d6ef472cc06a5f8a81e98c57d30bf92c2b3"
|
content_hash = "sha256:93956382ffe7f8e0c7376e940245a61ca8e9336b8801d69aa41069b84ab4faae"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "alabaster"
|
name = "alabaster"
|
||||||
@ -379,16 +379,16 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jinja2"
|
name = "jinja2"
|
||||||
version = "3.1.3"
|
version = "3.1.4"
|
||||||
requires_python = ">=3.7"
|
requires_python = ">=3.7"
|
||||||
summary = "A very fast and expressive template engine."
|
summary = "A very fast and expressive template engine."
|
||||||
groups = ["docs", "jinja2"]
|
groups = ["cli", "docs", "jinja2"]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"MarkupSafe>=2.0",
|
"MarkupSafe>=2.0",
|
||||||
]
|
]
|
||||||
files = [
|
files = [
|
||||||
{file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"},
|
{file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
|
||||||
{file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"},
|
{file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -396,7 +396,7 @@ name = "markupsafe"
|
|||||||
version = "2.1.5"
|
version = "2.1.5"
|
||||||
requires_python = ">=3.7"
|
requires_python = ">=3.7"
|
||||||
summary = "Safely add untrusted strings to HTML/XML markup."
|
summary = "Safely add untrusted strings to HTML/XML markup."
|
||||||
groups = ["docs", "jinja2"]
|
groups = ["cli", "docs", "jinja2"]
|
||||||
files = [
|
files = [
|
||||||
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
|
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
|
||||||
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
|
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
|
||||||
@ -650,7 +650,7 @@ name = "pyyaml"
|
|||||||
version = "6.0.1"
|
version = "6.0.1"
|
||||||
requires_python = ">=3.6"
|
requires_python = ">=3.6"
|
||||||
summary = "YAML parser and emitter for Python"
|
summary = "YAML parser and emitter for Python"
|
||||||
groups = ["generate", "yaml"]
|
groups = ["cli", "generate", "yaml"]
|
||||||
files = [
|
files = [
|
||||||
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
|
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
|
||||||
{file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
|
{file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
|
||||||
|
|||||||
@ -40,6 +40,11 @@ smtp = []
|
|||||||
tabulate = ["tabulate"]
|
tabulate = ["tabulate"]
|
||||||
watchdog = ["watchdog"]
|
watchdog = ["watchdog"]
|
||||||
yaml = ["pyYaml"]
|
yaml = ["pyYaml"]
|
||||||
|
cli = [
|
||||||
|
"jinja2>=3.1.4",
|
||||||
|
"markupsafe>=2.1.5",
|
||||||
|
"pyyaml>=6.0.1",
|
||||||
|
]
|
||||||
|
|
||||||
[tool.pdm.scripts]
|
[tool.pdm.scripts]
|
||||||
cement = {call = "cement.cli.main:main"}
|
cement = {call = "cement.cli.main:main"}
|
||||||
@ -65,11 +70,6 @@ testpaths = ["tests"]
|
|||||||
addopts = "-v --cov-report=term --cov-report=html:coverage-report --capture=sys tests/"
|
addopts = "-v --cov-report=term --cov-report=html:coverage-report --capture=sys tests/"
|
||||||
python_files= "test_*.py"
|
python_files= "test_*.py"
|
||||||
|
|
||||||
[tool.coverage.run]
|
|
||||||
omit = [
|
|
||||||
"cement/cli/contrib/*"
|
|
||||||
]
|
|
||||||
|
|
||||||
[tool.coverage.report]
|
[tool.coverage.report]
|
||||||
precision = 2
|
precision = 2
|
||||||
|
|
||||||
@ -150,7 +150,6 @@ files = [
|
|||||||
]
|
]
|
||||||
exclude = """(?x)(
|
exclude = """(?x)(
|
||||||
^cement/cli/templates |
|
^cement/cli/templates |
|
||||||
^cement/cli/contrib |
|
|
||||||
^.git/ |
|
^.git/ |
|
||||||
^tests
|
^tests
|
||||||
)"""
|
)"""
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user