mirror of
https://github.com/opencv/opencv.git
synced 2024-11-25 11:40:44 +08:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
8e8ff4dd33
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -23,6 +23,7 @@
|
||||
*.idl text
|
||||
*.java text
|
||||
*.js text
|
||||
*.m text
|
||||
*.mk text
|
||||
*.mm text
|
||||
*.plist text
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -7,5 +7,5 @@ tegra/
|
||||
.sw[a-z]
|
||||
.*.swp
|
||||
tags
|
||||
build/
|
||||
Thumbs.db
|
||||
*.autosave
|
||||
|
33
3rdparty/jinja2/AUTHORS
vendored
Normal file
33
3rdparty/jinja2/AUTHORS
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
Jinja is written and maintained by the Jinja Team and various
|
||||
contributors:
|
||||
|
||||
Lead Developer:
|
||||
|
||||
- Armin Ronacher <armin.ronacher@active-4.com>
|
||||
|
||||
Developers:
|
||||
|
||||
- Christoph Hack
|
||||
- Georg Brandl
|
||||
|
||||
Contributors:
|
||||
|
||||
- Bryan McLemore
|
||||
- Mickaël Guérin <kael@crocobox.org>
|
||||
- Cameron Knight
|
||||
- Lawrence Journal-World.
|
||||
- David Cramer
|
||||
|
||||
Patches and suggestions:
|
||||
|
||||
- Ronny Pfannschmidt
|
||||
- Axel Böhm
|
||||
- Alexey Melchakov
|
||||
- Bryan McLemore
|
||||
- Clovis Fabricio (nosklo)
|
||||
- Cameron Knight
|
||||
- Peter van Dijk (Habbie)
|
||||
- Stefan Ebner
|
||||
- Rene Leonhardt
|
||||
- Thomas Waldmann
|
||||
- Cory Benfield (Lukasa)
|
31
3rdparty/jinja2/LICENSE
vendored
Normal file
31
3rdparty/jinja2/LICENSE
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
|
||||
|
||||
Some rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* The names of the contributors may not be used to endorse or
|
||||
promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
69
3rdparty/jinja2/__init__.py
vendored
Normal file
69
3rdparty/jinja2/__init__.py
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2
|
||||
~~~~~~
|
||||
|
||||
Jinja2 is a template engine written in pure Python. It provides a
|
||||
Django inspired non-XML syntax but supports inline expressions and
|
||||
an optional sandboxed environment.
|
||||
|
||||
Nutshell
|
||||
--------
|
||||
|
||||
Here a small example of a Jinja2 template::
|
||||
|
||||
{% extends 'base.html' %}
|
||||
{% block title %}Memberlist{% endblock %}
|
||||
{% block content %}
|
||||
<ul>
|
||||
{% for user in users %}
|
||||
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__version__ = '2.7.1'
|
||||
|
||||
# high level interface
|
||||
from jinja2.environment import Environment, Template
|
||||
|
||||
# loaders
|
||||
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
|
||||
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
|
||||
ModuleLoader
|
||||
|
||||
# bytecode caches
|
||||
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
|
||||
MemcachedBytecodeCache
|
||||
|
||||
# undefined types
|
||||
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
|
||||
|
||||
# exceptions
|
||||
from jinja2.exceptions import TemplateError, UndefinedError, \
|
||||
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
|
||||
TemplateAssertionError
|
||||
|
||||
# decorators and public utilities
|
||||
from jinja2.filters import environmentfilter, contextfilter, \
|
||||
evalcontextfilter
|
||||
from jinja2.utils import Markup, escape, clear_caches, \
|
||||
environmentfunction, evalcontextfunction, contextfunction, \
|
||||
is_undefined
|
||||
|
||||
__all__ = [
|
||||
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
|
||||
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
|
||||
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
|
||||
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
|
||||
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
|
||||
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
|
||||
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
|
||||
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
|
||||
'evalcontextfilter', 'evalcontextfunction'
|
||||
]
|
150
3rdparty/jinja2/_compat.py
vendored
Normal file
150
3rdparty/jinja2/_compat.py
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2._compat
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Some py2/py3 compatibility support based on a stripped down
|
||||
version of six so we don't have to depend on a specific version
|
||||
of it.
|
||||
|
||||
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
import sys
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PYPY = hasattr(sys, 'pypy_translation_info')
|
||||
_identity = lambda x: x
|
||||
|
||||
|
||||
if not PY2:
|
||||
unichr = chr
|
||||
range_type = range
|
||||
text_type = str
|
||||
string_types = (str,)
|
||||
|
||||
iterkeys = lambda d: iter(d.keys())
|
||||
itervalues = lambda d: iter(d.values())
|
||||
iteritems = lambda d: iter(d.items())
|
||||
|
||||
import pickle
|
||||
from io import BytesIO, StringIO
|
||||
NativeStringIO = StringIO
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
|
||||
ifilter = filter
|
||||
imap = map
|
||||
izip = zip
|
||||
intern = sys.intern
|
||||
|
||||
implements_iterator = _identity
|
||||
implements_to_string = _identity
|
||||
encode_filename = _identity
|
||||
get_next = lambda x: x.__next__
|
||||
|
||||
else:
|
||||
unichr = unichr
|
||||
text_type = unicode
|
||||
range_type = xrange
|
||||
string_types = (str, unicode)
|
||||
|
||||
iterkeys = lambda d: d.iterkeys()
|
||||
itervalues = lambda d: d.itervalues()
|
||||
iteritems = lambda d: d.iteritems()
|
||||
|
||||
import cPickle as pickle
|
||||
from cStringIO import StringIO as BytesIO, StringIO
|
||||
NativeStringIO = BytesIO
|
||||
|
||||
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
|
||||
|
||||
from itertools import imap, izip, ifilter
|
||||
intern = intern
|
||||
|
||||
def implements_iterator(cls):
|
||||
cls.next = cls.__next__
|
||||
del cls.__next__
|
||||
return cls
|
||||
|
||||
def implements_to_string(cls):
|
||||
cls.__unicode__ = cls.__str__
|
||||
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
|
||||
return cls
|
||||
|
||||
get_next = lambda x: x.next
|
||||
|
||||
def encode_filename(filename):
|
||||
if isinstance(filename, unicode):
|
||||
return filename.encode('utf-8')
|
||||
return filename
|
||||
|
||||
try:
|
||||
next = next
|
||||
except NameError:
|
||||
def next(it):
|
||||
return it.next()
|
||||
|
||||
|
||||
def with_metaclass(meta, *bases):
|
||||
# This requires a bit of explanation: the basic idea is to make a
|
||||
# dummy metaclass for one level of class instanciation that replaces
|
||||
# itself with the actual metaclass. Because of internal type checks
|
||||
# we also need to make sure that we downgrade the custom metaclass
|
||||
# for one level to something closer to type (that's why __call__ and
|
||||
# __init__ comes back from type etc.).
|
||||
#
|
||||
# This has the advantage over six.with_metaclass in that it does not
|
||||
# introduce dummy classes into the final MRO.
|
||||
class metaclass(meta):
|
||||
__call__ = type.__call__
|
||||
__init__ = type.__init__
|
||||
def __new__(cls, name, this_bases, d):
|
||||
if this_bases is None:
|
||||
return type.__new__(cls, name, (), d)
|
||||
return meta(name, bases, d)
|
||||
return metaclass('temporary_class', None, {})
|
||||
|
||||
|
||||
try:
|
||||
from collections import Mapping as mapping_types
|
||||
except ImportError:
|
||||
import UserDict
|
||||
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
|
||||
|
||||
|
||||
# common types. These do exist in the special types module too which however
|
||||
# does not exist in IronPython out of the box. Also that way we don't have
|
||||
# to deal with implementation specific stuff here
|
||||
class _C(object):
|
||||
def method(self): pass
|
||||
def _func():
|
||||
yield None
|
||||
function_type = type(_func)
|
||||
generator_type = type(_func())
|
||||
method_type = type(_C().method)
|
||||
code_type = type(_C.method.__code__)
|
||||
try:
|
||||
raise TypeError()
|
||||
except TypeError:
|
||||
_tb = sys.exc_info()[2]
|
||||
traceback_type = type(_tb)
|
||||
frame_type = type(_tb.tb_frame)
|
||||
|
||||
|
||||
try:
|
||||
from urllib.parse import quote_from_bytes as url_quote
|
||||
except ImportError:
|
||||
from urllib import quote as url_quote
|
||||
|
||||
|
||||
try:
|
||||
from thread import allocate_lock
|
||||
except ImportError:
|
||||
try:
|
||||
from threading import Lock as allocate_lock
|
||||
except ImportError:
|
||||
from dummy_thread import allocate_lock
|
132
3rdparty/jinja2/_stringdefs.py
vendored
Normal file
132
3rdparty/jinja2/_stringdefs.py
vendored
Normal file
File diff suppressed because one or more lines are too long
311
3rdparty/jinja2/bccache.py
vendored
Normal file
311
3rdparty/jinja2/bccache.py
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.bccache
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements the bytecode cache system Jinja is optionally
|
||||
using. This is useful if you have very complex template situations and
|
||||
the compiliation of all those templates slow down your application too
|
||||
much.
|
||||
|
||||
Situations where this is useful are often forking web applications that
|
||||
are initialized on the first request.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from os import path, listdir
|
||||
import sys
|
||||
import marshal
|
||||
import tempfile
|
||||
import fnmatch
|
||||
from hashlib import sha1
|
||||
from jinja2.utils import open_if_exists
|
||||
from jinja2._compat import BytesIO, pickle, PY2, text_type
|
||||
|
||||
|
||||
# marshal works better on 3.x, one hack less required
|
||||
if not PY2:
|
||||
marshal_dump = marshal.dump
|
||||
marshal_load = marshal.load
|
||||
else:
|
||||
|
||||
def marshal_dump(code, f):
|
||||
if isinstance(f, file):
|
||||
marshal.dump(code, f)
|
||||
else:
|
||||
f.write(marshal.dumps(code))
|
||||
|
||||
def marshal_load(f):
|
||||
if isinstance(f, file):
|
||||
return marshal.load(f)
|
||||
return marshal.loads(f.read())
|
||||
|
||||
|
||||
bc_version = 2
|
||||
|
||||
# magic version used to only change with new jinja versions. With 2.6
|
||||
# we change this to also take Python version changes into account. The
|
||||
# reason for this is that Python tends to segfault if fed earlier bytecode
|
||||
# versions because someone thought it would be a good idea to reuse opcodes
|
||||
# or make Python incompatible with earlier versions.
|
||||
bc_magic = 'j2'.encode('ascii') + \
|
||||
pickle.dumps(bc_version, 2) + \
|
||||
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
|
||||
|
||||
|
||||
class Bucket(object):
|
||||
"""Buckets are used to store the bytecode for one template. It's created
|
||||
and initialized by the bytecode cache and passed to the loading functions.
|
||||
|
||||
The buckets get an internal checksum from the cache assigned and use this
|
||||
to automatically reject outdated cache material. Individual bytecode
|
||||
cache subclasses don't have to care about cache invalidation.
|
||||
"""
|
||||
|
||||
def __init__(self, environment, key, checksum):
|
||||
self.environment = environment
|
||||
self.key = key
|
||||
self.checksum = checksum
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Resets the bucket (unloads the bytecode)."""
|
||||
self.code = None
|
||||
|
||||
def load_bytecode(self, f):
|
||||
"""Loads bytecode from a file or file like object."""
|
||||
# make sure the magic header is correct
|
||||
magic = f.read(len(bc_magic))
|
||||
if magic != bc_magic:
|
||||
self.reset()
|
||||
return
|
||||
# the source code of the file changed, we need to reload
|
||||
checksum = pickle.load(f)
|
||||
if self.checksum != checksum:
|
||||
self.reset()
|
||||
return
|
||||
self.code = marshal_load(f)
|
||||
|
||||
def write_bytecode(self, f):
|
||||
"""Dump the bytecode into the file or file like object passed."""
|
||||
if self.code is None:
|
||||
raise TypeError('can\'t write empty bucket')
|
||||
f.write(bc_magic)
|
||||
pickle.dump(self.checksum, f, 2)
|
||||
marshal_dump(self.code, f)
|
||||
|
||||
def bytecode_from_string(self, string):
|
||||
"""Load bytecode from a string."""
|
||||
self.load_bytecode(BytesIO(string))
|
||||
|
||||
def bytecode_to_string(self):
|
||||
"""Return the bytecode as string."""
|
||||
out = BytesIO()
|
||||
self.write_bytecode(out)
|
||||
return out.getvalue()
|
||||
|
||||
|
||||
class BytecodeCache(object):
|
||||
"""To implement your own bytecode cache you have to subclass this class
|
||||
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
|
||||
these methods are passed a :class:`~jinja2.bccache.Bucket`.
|
||||
|
||||
A very basic bytecode cache that saves the bytecode on the file system::
|
||||
|
||||
from os import path
|
||||
|
||||
class MyCache(BytecodeCache):
|
||||
|
||||
def __init__(self, directory):
|
||||
self.directory = directory
|
||||
|
||||
def load_bytecode(self, bucket):
|
||||
filename = path.join(self.directory, bucket.key)
|
||||
if path.exists(filename):
|
||||
with open(filename, 'rb') as f:
|
||||
bucket.load_bytecode(f)
|
||||
|
||||
def dump_bytecode(self, bucket):
|
||||
filename = path.join(self.directory, bucket.key)
|
||||
with open(filename, 'wb') as f:
|
||||
bucket.write_bytecode(f)
|
||||
|
||||
A more advanced version of a filesystem based bytecode cache is part of
|
||||
Jinja2.
|
||||
"""
|
||||
|
||||
def load_bytecode(self, bucket):
|
||||
"""Subclasses have to override this method to load bytecode into a
|
||||
bucket. If they are not able to find code in the cache for the
|
||||
bucket, it must not do anything.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def dump_bytecode(self, bucket):
|
||||
"""Subclasses have to override this method to write the bytecode
|
||||
from a bucket back to the cache. If it unable to do so it must not
|
||||
fail silently but raise an exception.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def clear(self):
|
||||
"""Clears the cache. This method is not used by Jinja2 but should be
|
||||
implemented to allow applications to clear the bytecode cache used
|
||||
by a particular environment.
|
||||
"""
|
||||
|
||||
def get_cache_key(self, name, filename=None):
|
||||
"""Returns the unique hash key for this template name."""
|
||||
hash = sha1(name.encode('utf-8'))
|
||||
if filename is not None:
|
||||
filename = '|' + filename
|
||||
if isinstance(filename, text_type):
|
||||
filename = filename.encode('utf-8')
|
||||
hash.update(filename)
|
||||
return hash.hexdigest()
|
||||
|
||||
def get_source_checksum(self, source):
|
||||
"""Returns a checksum for the source."""
|
||||
return sha1(source.encode('utf-8')).hexdigest()
|
||||
|
||||
def get_bucket(self, environment, name, filename, source):
|
||||
"""Return a cache bucket for the given template. All arguments are
|
||||
mandatory but filename may be `None`.
|
||||
"""
|
||||
key = self.get_cache_key(name, filename)
|
||||
checksum = self.get_source_checksum(source)
|
||||
bucket = Bucket(environment, key, checksum)
|
||||
self.load_bytecode(bucket)
|
||||
return bucket
|
||||
|
||||
def set_bucket(self, bucket):
|
||||
"""Put the bucket into the cache."""
|
||||
self.dump_bytecode(bucket)
|
||||
|
||||
|
||||
class FileSystemBytecodeCache(BytecodeCache):
|
||||
"""A bytecode cache that stores bytecode on the filesystem. It accepts
|
||||
two arguments: The directory where the cache items are stored and a
|
||||
pattern string that is used to build the filename.
|
||||
|
||||
If no directory is specified the system temporary items folder is used.
|
||||
|
||||
The pattern can be used to have multiple separate caches operate on the
|
||||
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
|
||||
is replaced with the cache key.
|
||||
|
||||
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
|
||||
|
||||
This bytecode cache supports clearing of the cache using the clear method.
|
||||
"""
|
||||
|
||||
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
|
||||
if directory is None:
|
||||
directory = tempfile.gettempdir()
|
||||
self.directory = directory
|
||||
self.pattern = pattern
|
||||
|
||||
def _get_cache_filename(self, bucket):
|
||||
return path.join(self.directory, self.pattern % bucket.key)
|
||||
|
||||
def load_bytecode(self, bucket):
|
||||
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
|
||||
if f is not None:
|
||||
try:
|
||||
bucket.load_bytecode(f)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def dump_bytecode(self, bucket):
|
||||
f = open(self._get_cache_filename(bucket), 'wb')
|
||||
try:
|
||||
bucket.write_bytecode(f)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def clear(self):
|
||||
# imported lazily here because google app-engine doesn't support
|
||||
# write access on the file system and the function does not exist
|
||||
# normally.
|
||||
from os import remove
|
||||
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
|
||||
for filename in files:
|
||||
try:
|
||||
remove(path.join(self.directory, filename))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
class MemcachedBytecodeCache(BytecodeCache):
|
||||
"""This class implements a bytecode cache that uses a memcache cache for
|
||||
storing the information. It does not enforce a specific memcache library
|
||||
(tummy's memcache or cmemcache) but will accept any class that provides
|
||||
the minimal interface required.
|
||||
|
||||
Libraries compatible with this class:
|
||||
|
||||
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
|
||||
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
|
||||
- `cmemcache <http://gijsbert.org/cmemcache/>`_
|
||||
|
||||
(Unfortunately the django cache interface is not compatible because it
|
||||
does not support storing binary data, only unicode. You can however pass
|
||||
the underlying cache client to the bytecode cache which is available
|
||||
as `django.core.cache.cache._client`.)
|
||||
|
||||
The minimal interface for the client passed to the constructor is this:
|
||||
|
||||
.. class:: MinimalClientInterface
|
||||
|
||||
.. method:: set(key, value[, timeout])
|
||||
|
||||
Stores the bytecode in the cache. `value` is a string and
|
||||
`timeout` the timeout of the key. If timeout is not provided
|
||||
a default timeout or no timeout should be assumed, if it's
|
||||
provided it's an integer with the number of seconds the cache
|
||||
item should exist.
|
||||
|
||||
.. method:: get(key)
|
||||
|
||||
Returns the value for the cache key. If the item does not
|
||||
exist in the cache the return value must be `None`.
|
||||
|
||||
The other arguments to the constructor are the prefix for all keys that
|
||||
is added before the actual cache key and the timeout for the bytecode in
|
||||
the cache system. We recommend a high (or no) timeout.
|
||||
|
||||
This bytecode cache does not support clearing of used items in the cache.
|
||||
The clear method is a no-operation function.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
Added support for ignoring memcache errors through the
|
||||
`ignore_memcache_errors` parameter.
|
||||
"""
|
||||
|
||||
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
|
||||
ignore_memcache_errors=True):
|
||||
self.client = client
|
||||
self.prefix = prefix
|
||||
self.timeout = timeout
|
||||
self.ignore_memcache_errors = ignore_memcache_errors
|
||||
|
||||
def load_bytecode(self, bucket):
|
||||
try:
|
||||
code = self.client.get(self.prefix + bucket.key)
|
||||
except Exception:
|
||||
if not self.ignore_memcache_errors:
|
||||
raise
|
||||
code = None
|
||||
if code is not None:
|
||||
bucket.bytecode_from_string(code)
|
||||
|
||||
def dump_bytecode(self, bucket):
|
||||
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
|
||||
if self.timeout is not None:
|
||||
args += (self.timeout,)
|
||||
try:
|
||||
self.client.set(*args)
|
||||
except Exception:
|
||||
if not self.ignore_memcache_errors:
|
||||
raise
|
1640
3rdparty/jinja2/compiler.py
vendored
Normal file
1640
3rdparty/jinja2/compiler.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
32
3rdparty/jinja2/constants.py
vendored
Normal file
32
3rdparty/jinja2/constants.py
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja.constants
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Various constants.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
#: list of lorem ipsum words used by the lipsum() helper function
|
||||
LOREM_IPSUM_WORDS = u'''\
|
||||
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
|
||||
auctor augue bibendum blandit class commodo condimentum congue consectetuer
|
||||
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
|
||||
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
|
||||
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
|
||||
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
|
||||
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
|
||||
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
|
||||
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
|
||||
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
|
||||
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
|
||||
penatibus per pharetra phasellus placerat platea porta porttitor posuere
|
||||
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
|
||||
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
|
||||
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
|
||||
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
|
||||
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
|
||||
viverra volutpat vulputate'''
|
337
3rdparty/jinja2/debug.py
vendored
Normal file
337
3rdparty/jinja2/debug.py
vendored
Normal file
@ -0,0 +1,337 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.debug
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Implements the debug interface for Jinja. This module does some pretty
|
||||
ugly stuff with the Python traceback system in order to achieve tracebacks
|
||||
with correct line numbers, locals and contents.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
import traceback
|
||||
from types import TracebackType
|
||||
from jinja2.utils import missing, internal_code
|
||||
from jinja2.exceptions import TemplateSyntaxError
|
||||
from jinja2._compat import iteritems, reraise, code_type
|
||||
|
||||
# on pypy we can take advantage of transparent proxies
|
||||
try:
|
||||
from __pypy__ import tproxy
|
||||
except ImportError:
|
||||
tproxy = None
|
||||
|
||||
|
||||
# how does the raise helper look like?
|
||||
try:
|
||||
exec("raise TypeError, 'foo'")
|
||||
except SyntaxError:
|
||||
raise_helper = 'raise __jinja_exception__[1]'
|
||||
except TypeError:
|
||||
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
|
||||
|
||||
|
||||
class TracebackFrameProxy(object):
|
||||
"""Proxies a traceback frame."""
|
||||
|
||||
def __init__(self, tb):
|
||||
self.tb = tb
|
||||
self._tb_next = None
|
||||
|
||||
@property
|
||||
def tb_next(self):
|
||||
return self._tb_next
|
||||
|
||||
def set_next(self, next):
|
||||
if tb_set_next is not None:
|
||||
try:
|
||||
tb_set_next(self.tb, next and next.tb or None)
|
||||
except Exception:
|
||||
# this function can fail due to all the hackery it does
|
||||
# on various python implementations. We just catch errors
|
||||
# down and ignore them if necessary.
|
||||
pass
|
||||
self._tb_next = next
|
||||
|
||||
@property
|
||||
def is_jinja_frame(self):
|
||||
return '__jinja_template__' in self.tb.tb_frame.f_globals
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.tb, name)
|
||||
|
||||
|
||||
def make_frame_proxy(frame):
|
||||
proxy = TracebackFrameProxy(frame)
|
||||
if tproxy is None:
|
||||
return proxy
|
||||
def operation_handler(operation, *args, **kwargs):
|
||||
if operation in ('__getattribute__', '__getattr__'):
|
||||
return getattr(proxy, args[0])
|
||||
elif operation == '__setattr__':
|
||||
proxy.__setattr__(*args, **kwargs)
|
||||
else:
|
||||
return getattr(proxy, operation)(*args, **kwargs)
|
||||
return tproxy(TracebackType, operation_handler)
|
||||
|
||||
|
||||
class ProcessedTraceback(object):
|
||||
"""Holds a Jinja preprocessed traceback for printing or reraising."""
|
||||
|
||||
def __init__(self, exc_type, exc_value, frames):
|
||||
assert frames, 'no frames for this traceback?'
|
||||
self.exc_type = exc_type
|
||||
self.exc_value = exc_value
|
||||
self.frames = frames
|
||||
|
||||
# newly concatenate the frames (which are proxies)
|
||||
prev_tb = None
|
||||
for tb in self.frames:
|
||||
if prev_tb is not None:
|
||||
prev_tb.set_next(tb)
|
||||
prev_tb = tb
|
||||
prev_tb.set_next(None)
|
||||
|
||||
def render_as_text(self, limit=None):
|
||||
"""Return a string with the traceback."""
|
||||
lines = traceback.format_exception(self.exc_type, self.exc_value,
|
||||
self.frames[0], limit=limit)
|
||||
return ''.join(lines).rstrip()
|
||||
|
||||
def render_as_html(self, full=False):
|
||||
"""Return a unicode string with the traceback as rendered HTML."""
|
||||
from jinja2.debugrenderer import render_traceback
|
||||
return u'%s\n\n<!--\n%s\n-->' % (
|
||||
render_traceback(self, full=full),
|
||||
self.render_as_text().decode('utf-8', 'replace')
|
||||
)
|
||||
|
||||
@property
|
||||
def is_template_syntax_error(self):
|
||||
"""`True` if this is a template syntax error."""
|
||||
return isinstance(self.exc_value, TemplateSyntaxError)
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"""Exception info tuple with a proxy around the frame objects."""
|
||||
return self.exc_type, self.exc_value, self.frames[0]
|
||||
|
||||
@property
|
||||
def standard_exc_info(self):
|
||||
"""Standard python exc_info for re-raising"""
|
||||
tb = self.frames[0]
|
||||
# the frame will be an actual traceback (or transparent proxy) if
|
||||
# we are on pypy or a python implementation with support for tproxy
|
||||
if type(tb) is not TracebackType:
|
||||
tb = tb.tb
|
||||
return self.exc_type, self.exc_value, tb
|
||||
|
||||
|
||||
def make_traceback(exc_info, source_hint=None):
|
||||
"""Creates a processed traceback object from the exc_info."""
|
||||
exc_type, exc_value, tb = exc_info
|
||||
if isinstance(exc_value, TemplateSyntaxError):
|
||||
exc_info = translate_syntax_error(exc_value, source_hint)
|
||||
initial_skip = 0
|
||||
else:
|
||||
initial_skip = 1
|
||||
return translate_exception(exc_info, initial_skip)
|
||||
|
||||
|
||||
def translate_syntax_error(error, source=None):
|
||||
"""Rewrites a syntax error to please traceback systems."""
|
||||
error.source = source
|
||||
error.translated = True
|
||||
exc_info = (error.__class__, error, None)
|
||||
filename = error.filename
|
||||
if filename is None:
|
||||
filename = '<unknown>'
|
||||
return fake_exc_info(exc_info, filename, error.lineno)
|
||||
|
||||
|
||||
def translate_exception(exc_info, initial_skip=0):
|
||||
"""If passed an exc_info it will automatically rewrite the exceptions
|
||||
all the way down to the correct line numbers and frames.
|
||||
"""
|
||||
tb = exc_info[2]
|
||||
frames = []
|
||||
|
||||
# skip some internal frames if wanted
|
||||
for x in range(initial_skip):
|
||||
if tb is not None:
|
||||
tb = tb.tb_next
|
||||
initial_tb = tb
|
||||
|
||||
while tb is not None:
|
||||
# skip frames decorated with @internalcode. These are internal
|
||||
# calls we can't avoid and that are useless in template debugging
|
||||
# output.
|
||||
if tb.tb_frame.f_code in internal_code:
|
||||
tb = tb.tb_next
|
||||
continue
|
||||
|
||||
# save a reference to the next frame if we override the current
|
||||
# one with a faked one.
|
||||
next = tb.tb_next
|
||||
|
||||
# fake template exceptions
|
||||
template = tb.tb_frame.f_globals.get('__jinja_template__')
|
||||
if template is not None:
|
||||
lineno = template.get_corresponding_lineno(tb.tb_lineno)
|
||||
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
|
||||
lineno)[2]
|
||||
|
||||
frames.append(make_frame_proxy(tb))
|
||||
tb = next
|
||||
|
||||
# if we don't have any exceptions in the frames left, we have to
|
||||
# reraise it unchanged.
|
||||
# XXX: can we backup here? when could this happen?
|
||||
if not frames:
|
||||
reraise(exc_info[0], exc_info[1], exc_info[2])
|
||||
|
||||
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
|
||||
|
||||
|
||||
def fake_exc_info(exc_info, filename, lineno):
|
||||
"""Helper for `translate_exception`."""
|
||||
exc_type, exc_value, tb = exc_info
|
||||
|
||||
# figure the real context out
|
||||
if tb is not None:
|
||||
real_locals = tb.tb_frame.f_locals.copy()
|
||||
ctx = real_locals.get('context')
|
||||
if ctx:
|
||||
locals = ctx.get_all()
|
||||
else:
|
||||
locals = {}
|
||||
for name, value in iteritems(real_locals):
|
||||
if name.startswith('l_') and value is not missing:
|
||||
locals[name[2:]] = value
|
||||
|
||||
# if there is a local called __jinja_exception__, we get
|
||||
# rid of it to not break the debug functionality.
|
||||
locals.pop('__jinja_exception__', None)
|
||||
else:
|
||||
locals = {}
|
||||
|
||||
# assamble fake globals we need
|
||||
globals = {
|
||||
'__name__': filename,
|
||||
'__file__': filename,
|
||||
'__jinja_exception__': exc_info[:2],
|
||||
|
||||
# we don't want to keep the reference to the template around
|
||||
# to not cause circular dependencies, but we mark it as Jinja
|
||||
# frame for the ProcessedTraceback
|
||||
'__jinja_template__': None
|
||||
}
|
||||
|
||||
# and fake the exception
|
||||
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
|
||||
|
||||
# if it's possible, change the name of the code. This won't work
|
||||
# on some python environments such as google appengine
|
||||
try:
|
||||
if tb is None:
|
||||
location = 'template'
|
||||
else:
|
||||
function = tb.tb_frame.f_code.co_name
|
||||
if function == 'root':
|
||||
location = 'top-level template code'
|
||||
elif function.startswith('block_'):
|
||||
location = 'block "%s"' % function[6:]
|
||||
else:
|
||||
location = 'template'
|
||||
code = code_type(0, code.co_nlocals, code.co_stacksize,
|
||||
code.co_flags, code.co_code, code.co_consts,
|
||||
code.co_names, code.co_varnames, filename,
|
||||
location, code.co_firstlineno,
|
||||
code.co_lnotab, (), ())
|
||||
except:
|
||||
pass
|
||||
|
||||
# execute the code and catch the new traceback
|
||||
try:
|
||||
exec(code, globals, locals)
|
||||
except:
|
||||
exc_info = sys.exc_info()
|
||||
new_tb = exc_info[2].tb_next
|
||||
|
||||
# return without this frame
|
||||
return exc_info[:2] + (new_tb,)
|
||||
|
||||
|
||||
def _init_ugly_crap():
|
||||
"""This function implements a few ugly things so that we can patch the
|
||||
traceback objects. The function returned allows resetting `tb_next` on
|
||||
any python traceback object. Do not attempt to use this on non cpython
|
||||
interpreters
|
||||
"""
|
||||
import ctypes
|
||||
from types import TracebackType
|
||||
|
||||
# figure out side of _Py_ssize_t
|
||||
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
|
||||
_Py_ssize_t = ctypes.c_int64
|
||||
else:
|
||||
_Py_ssize_t = ctypes.c_int
|
||||
|
||||
# regular python
|
||||
class _PyObject(ctypes.Structure):
|
||||
pass
|
||||
_PyObject._fields_ = [
|
||||
('ob_refcnt', _Py_ssize_t),
|
||||
('ob_type', ctypes.POINTER(_PyObject))
|
||||
]
|
||||
|
||||
# python with trace
|
||||
if hasattr(sys, 'getobjects'):
|
||||
class _PyObject(ctypes.Structure):
|
||||
pass
|
||||
_PyObject._fields_ = [
|
||||
('_ob_next', ctypes.POINTER(_PyObject)),
|
||||
('_ob_prev', ctypes.POINTER(_PyObject)),
|
||||
('ob_refcnt', _Py_ssize_t),
|
||||
('ob_type', ctypes.POINTER(_PyObject))
|
||||
]
|
||||
|
||||
class _Traceback(_PyObject):
|
||||
pass
|
||||
_Traceback._fields_ = [
|
||||
('tb_next', ctypes.POINTER(_Traceback)),
|
||||
('tb_frame', ctypes.POINTER(_PyObject)),
|
||||
('tb_lasti', ctypes.c_int),
|
||||
('tb_lineno', ctypes.c_int)
|
||||
]
|
||||
|
||||
def tb_set_next(tb, next):
|
||||
"""Set the tb_next attribute of a traceback object."""
|
||||
if not (isinstance(tb, TracebackType) and
|
||||
(next is None or isinstance(next, TracebackType))):
|
||||
raise TypeError('tb_set_next arguments must be traceback objects')
|
||||
obj = _Traceback.from_address(id(tb))
|
||||
if tb.tb_next is not None:
|
||||
old = _Traceback.from_address(id(tb.tb_next))
|
||||
old.ob_refcnt -= 1
|
||||
if next is None:
|
||||
obj.tb_next = ctypes.POINTER(_Traceback)()
|
||||
else:
|
||||
next = _Traceback.from_address(id(next))
|
||||
next.ob_refcnt += 1
|
||||
obj.tb_next = ctypes.pointer(next)
|
||||
|
||||
return tb_set_next
|
||||
|
||||
|
||||
# try to get a tb_set_next implementation if we don't have transparent
|
||||
# proxies.
|
||||
tb_set_next = None
|
||||
if tproxy is None:
|
||||
try:
|
||||
tb_set_next = _init_ugly_crap()
|
||||
except:
|
||||
pass
|
||||
del _init_ugly_crap
|
43
3rdparty/jinja2/defaults.py
vendored
Normal file
43
3rdparty/jinja2/defaults.py
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.defaults
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Jinja default filters and tags.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2._compat import range_type
|
||||
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
|
||||
|
||||
|
||||
# defaults for the parser / lexer
|
||||
BLOCK_START_STRING = '{%'
|
||||
BLOCK_END_STRING = '%}'
|
||||
VARIABLE_START_STRING = '{{'
|
||||
VARIABLE_END_STRING = '}}'
|
||||
COMMENT_START_STRING = '{#'
|
||||
COMMENT_END_STRING = '#}'
|
||||
LINE_STATEMENT_PREFIX = None
|
||||
LINE_COMMENT_PREFIX = None
|
||||
TRIM_BLOCKS = False
|
||||
LSTRIP_BLOCKS = False
|
||||
NEWLINE_SEQUENCE = '\n'
|
||||
KEEP_TRAILING_NEWLINE = False
|
||||
|
||||
|
||||
# default filters, tests and namespace
|
||||
from jinja2.filters import FILTERS as DEFAULT_FILTERS
|
||||
from jinja2.tests import TESTS as DEFAULT_TESTS
|
||||
DEFAULT_NAMESPACE = {
|
||||
'range': range_type,
|
||||
'dict': lambda **kw: kw,
|
||||
'lipsum': generate_lorem_ipsum,
|
||||
'cycler': Cycler,
|
||||
'joiner': Joiner
|
||||
}
|
||||
|
||||
|
||||
# export all constants
|
||||
__all__ = tuple(x for x in locals().keys() if x.isupper())
|
1191
3rdparty/jinja2/environment.py
vendored
Normal file
1191
3rdparty/jinja2/environment.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
146
3rdparty/jinja2/exceptions.py
vendored
Normal file
146
3rdparty/jinja2/exceptions.py
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.exceptions
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Jinja exceptions.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2._compat import imap, text_type, PY2, implements_to_string
|
||||
|
||||
|
||||
class TemplateError(Exception):
|
||||
"""Baseclass for all template errors."""
|
||||
|
||||
if PY2:
|
||||
def __init__(self, message=None):
|
||||
if message is not None:
|
||||
message = text_type(message).encode('utf-8')
|
||||
Exception.__init__(self, message)
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
message = self.args[0]
|
||||
if message is not None:
|
||||
return message.decode('utf-8', 'replace')
|
||||
|
||||
def __unicode__(self):
|
||||
return self.message or u''
|
||||
else:
|
||||
def __init__(self, message=None):
|
||||
Exception.__init__(self, message)
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
message = self.args[0]
|
||||
if message is not None:
|
||||
return message
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class TemplateNotFound(IOError, LookupError, TemplateError):
|
||||
"""Raised if a template does not exist."""
|
||||
|
||||
# looks weird, but removes the warning descriptor that just
|
||||
# bogusly warns us about message being deprecated
|
||||
message = None
|
||||
|
||||
def __init__(self, name, message=None):
|
||||
IOError.__init__(self)
|
||||
if message is None:
|
||||
message = name
|
||||
self.message = message
|
||||
self.name = name
|
||||
self.templates = [name]
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
|
||||
|
||||
class TemplatesNotFound(TemplateNotFound):
|
||||
"""Like :class:`TemplateNotFound` but raised if multiple templates
|
||||
are selected. This is a subclass of :class:`TemplateNotFound`
|
||||
exception, so just catching the base exception will catch both.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
|
||||
def __init__(self, names=(), message=None):
|
||||
if message is None:
|
||||
message = u'none of the templates given were found: ' + \
|
||||
u', '.join(imap(text_type, names))
|
||||
TemplateNotFound.__init__(self, names and names[-1] or None, message)
|
||||
self.templates = list(names)
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class TemplateSyntaxError(TemplateError):
|
||||
"""Raised to tell the user that there is a problem with the template."""
|
||||
|
||||
def __init__(self, message, lineno, name=None, filename=None):
|
||||
TemplateError.__init__(self, message)
|
||||
self.lineno = lineno
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
self.source = None
|
||||
|
||||
# this is set to True if the debug.translate_syntax_error
|
||||
# function translated the syntax error into a new traceback
|
||||
self.translated = False
|
||||
|
||||
def __str__(self):
|
||||
# for translated errors we only return the message
|
||||
if self.translated:
|
||||
return self.message
|
||||
|
||||
# otherwise attach some stuff
|
||||
location = 'line %d' % self.lineno
|
||||
name = self.filename or self.name
|
||||
if name:
|
||||
location = 'File "%s", %s' % (name, location)
|
||||
lines = [self.message, ' ' + location]
|
||||
|
||||
# if the source is set, add the line to the output
|
||||
if self.source is not None:
|
||||
try:
|
||||
line = self.source.splitlines()[self.lineno - 1]
|
||||
except IndexError:
|
||||
line = None
|
||||
if line:
|
||||
lines.append(' ' + line.strip())
|
||||
|
||||
return u'\n'.join(lines)
|
||||
|
||||
|
||||
class TemplateAssertionError(TemplateSyntaxError):
|
||||
"""Like a template syntax error, but covers cases where something in the
|
||||
template caused an error at compile time that wasn't necessarily caused
|
||||
by a syntax error. However it's a direct subclass of
|
||||
:exc:`TemplateSyntaxError` and has the same attributes.
|
||||
"""
|
||||
|
||||
|
||||
class TemplateRuntimeError(TemplateError):
|
||||
"""A generic runtime error in the template engine. Under some situations
|
||||
Jinja may raise this exception.
|
||||
"""
|
||||
|
||||
|
||||
class UndefinedError(TemplateRuntimeError):
|
||||
"""Raised if a template tries to operate on :class:`Undefined`."""
|
||||
|
||||
|
||||
class SecurityError(TemplateRuntimeError):
|
||||
"""Raised if a template tries to do something insecure if the
|
||||
sandbox is enabled.
|
||||
"""
|
||||
|
||||
|
||||
class FilterArgumentError(TemplateRuntimeError):
|
||||
"""This error is raised if a filter was called with inappropriate
|
||||
arguments
|
||||
"""
|
636
3rdparty/jinja2/ext.py
vendored
Normal file
636
3rdparty/jinja2/ext.py
vendored
Normal file
@ -0,0 +1,636 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.ext
|
||||
~~~~~~~~~~
|
||||
|
||||
Jinja extensions allow to add custom tags similar to the way django custom
|
||||
tags work. By default two example extensions exist: an i18n and a cache
|
||||
extension.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from jinja2 import nodes
|
||||
from jinja2.defaults import BLOCK_START_STRING, \
|
||||
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
|
||||
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
|
||||
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
|
||||
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
|
||||
from jinja2.environment import Environment
|
||||
from jinja2.runtime import concat
|
||||
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
|
||||
from jinja2.utils import contextfunction, import_string, Markup
|
||||
from jinja2._compat import next, with_metaclass, string_types, iteritems
|
||||
|
||||
|
||||
# the only real useful gettext functions for a Jinja template. Note
|
||||
# that ugettext must be assigned to gettext as Jinja doesn't support
|
||||
# non unicode strings.
|
||||
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
|
||||
|
||||
|
||||
class ExtensionRegistry(type):
|
||||
"""Gives the extension an unique identifier."""
|
||||
|
||||
def __new__(cls, name, bases, d):
|
||||
rv = type.__new__(cls, name, bases, d)
|
||||
rv.identifier = rv.__module__ + '.' + rv.__name__
|
||||
return rv
|
||||
|
||||
|
||||
class Extension(with_metaclass(ExtensionRegistry, object)):
|
||||
"""Extensions can be used to add extra functionality to the Jinja template
|
||||
system at the parser level. Custom extensions are bound to an environment
|
||||
but may not store environment specific data on `self`. The reason for
|
||||
this is that an extension can be bound to another environment (for
|
||||
overlays) by creating a copy and reassigning the `environment` attribute.
|
||||
|
||||
As extensions are created by the environment they cannot accept any
|
||||
arguments for configuration. One may want to work around that by using
|
||||
a factory function, but that is not possible as extensions are identified
|
||||
by their import name. The correct way to configure the extension is
|
||||
storing the configuration values on the environment. Because this way the
|
||||
environment ends up acting as central configuration storage the
|
||||
attributes may clash which is why extensions have to ensure that the names
|
||||
they choose for configuration are not too generic. ``prefix`` for example
|
||||
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
|
||||
name as includes the name of the extension (fragment cache).
|
||||
"""
|
||||
|
||||
#: if this extension parses this is the list of tags it's listening to.
|
||||
tags = set()
|
||||
|
||||
#: the priority of that extension. This is especially useful for
|
||||
#: extensions that preprocess values. A lower value means higher
|
||||
#: priority.
|
||||
#:
|
||||
#: .. versionadded:: 2.4
|
||||
priority = 100
|
||||
|
||||
def __init__(self, environment):
|
||||
self.environment = environment
|
||||
|
||||
def bind(self, environment):
|
||||
"""Create a copy of this extension bound to another environment."""
|
||||
rv = object.__new__(self.__class__)
|
||||
rv.__dict__.update(self.__dict__)
|
||||
rv.environment = environment
|
||||
return rv
|
||||
|
||||
def preprocess(self, source, name, filename=None):
|
||||
"""This method is called before the actual lexing and can be used to
|
||||
preprocess the source. The `filename` is optional. The return value
|
||||
must be the preprocessed source.
|
||||
"""
|
||||
return source
|
||||
|
||||
def filter_stream(self, stream):
|
||||
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
|
||||
to filter tokens returned. This method has to return an iterable of
|
||||
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
|
||||
:class:`~jinja2.lexer.TokenStream`.
|
||||
|
||||
In the `ext` folder of the Jinja2 source distribution there is a file
|
||||
called `inlinegettext.py` which implements a filter that utilizes this
|
||||
method.
|
||||
"""
|
||||
return stream
|
||||
|
||||
def parse(self, parser):
|
||||
"""If any of the :attr:`tags` matched this method is called with the
|
||||
parser as first argument. The token the parser stream is pointing at
|
||||
is the name token that matched. This method has to return one or a
|
||||
list of multiple nodes.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def attr(self, name, lineno=None):
|
||||
"""Return an attribute node for the current extension. This is useful
|
||||
to pass constants on extensions to generated template code.
|
||||
|
||||
::
|
||||
|
||||
self.attr('_my_attribute', lineno=lineno)
|
||||
"""
|
||||
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
|
||||
|
||||
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
|
||||
dyn_kwargs=None, lineno=None):
|
||||
"""Call a method of the extension. This is a shortcut for
|
||||
:meth:`attr` + :class:`jinja2.nodes.Call`.
|
||||
"""
|
||||
if args is None:
|
||||
args = []
|
||||
if kwargs is None:
|
||||
kwargs = []
|
||||
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
|
||||
dyn_args, dyn_kwargs, lineno=lineno)
|
||||
|
||||
|
||||
@contextfunction
|
||||
def _gettext_alias(__context, *args, **kwargs):
|
||||
return __context.call(__context.resolve('gettext'), *args, **kwargs)
|
||||
|
||||
|
||||
def _make_new_gettext(func):
|
||||
@contextfunction
|
||||
def gettext(__context, __string, **variables):
|
||||
rv = __context.call(func, __string)
|
||||
if __context.eval_ctx.autoescape:
|
||||
rv = Markup(rv)
|
||||
return rv % variables
|
||||
return gettext
|
||||
|
||||
|
||||
def _make_new_ngettext(func):
|
||||
@contextfunction
|
||||
def ngettext(__context, __singular, __plural, __num, **variables):
|
||||
variables.setdefault('num', __num)
|
||||
rv = __context.call(func, __singular, __plural, __num)
|
||||
if __context.eval_ctx.autoescape:
|
||||
rv = Markup(rv)
|
||||
return rv % variables
|
||||
return ngettext
|
||||
|
||||
|
||||
class InternationalizationExtension(Extension):
|
||||
"""This extension adds gettext support to Jinja2."""
|
||||
tags = set(['trans'])
|
||||
|
||||
# TODO: the i18n extension is currently reevaluating values in a few
|
||||
# situations. Take this example:
|
||||
# {% trans count=something() %}{{ count }} foo{% pluralize
|
||||
# %}{{ count }} fooss{% endtrans %}
|
||||
# something is called twice here. One time for the gettext value and
|
||||
# the other time for the n-parameter of the ngettext function.
|
||||
|
||||
def __init__(self, environment):
|
||||
Extension.__init__(self, environment)
|
||||
environment.globals['_'] = _gettext_alias
|
||||
environment.extend(
|
||||
install_gettext_translations=self._install,
|
||||
install_null_translations=self._install_null,
|
||||
install_gettext_callables=self._install_callables,
|
||||
uninstall_gettext_translations=self._uninstall,
|
||||
extract_translations=self._extract,
|
||||
newstyle_gettext=False
|
||||
)
|
||||
|
||||
def _install(self, translations, newstyle=None):
|
||||
gettext = getattr(translations, 'ugettext', None)
|
||||
if gettext is None:
|
||||
gettext = translations.gettext
|
||||
ngettext = getattr(translations, 'ungettext', None)
|
||||
if ngettext is None:
|
||||
ngettext = translations.ngettext
|
||||
self._install_callables(gettext, ngettext, newstyle)
|
||||
|
||||
def _install_null(self, newstyle=None):
|
||||
self._install_callables(
|
||||
lambda x: x,
|
||||
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
|
||||
newstyle
|
||||
)
|
||||
|
||||
def _install_callables(self, gettext, ngettext, newstyle=None):
|
||||
if newstyle is not None:
|
||||
self.environment.newstyle_gettext = newstyle
|
||||
if self.environment.newstyle_gettext:
|
||||
gettext = _make_new_gettext(gettext)
|
||||
ngettext = _make_new_ngettext(ngettext)
|
||||
self.environment.globals.update(
|
||||
gettext=gettext,
|
||||
ngettext=ngettext
|
||||
)
|
||||
|
||||
def _uninstall(self, translations):
|
||||
for key in 'gettext', 'ngettext':
|
||||
self.environment.globals.pop(key, None)
|
||||
|
||||
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
|
||||
if isinstance(source, string_types):
|
||||
source = self.environment.parse(source)
|
||||
return extract_from_ast(source, gettext_functions)
|
||||
|
||||
def parse(self, parser):
|
||||
"""Parse a translatable tag."""
|
||||
lineno = next(parser.stream).lineno
|
||||
num_called_num = False
|
||||
|
||||
# find all the variables referenced. Additionally a variable can be
|
||||
# defined in the body of the trans block too, but this is checked at
|
||||
# a later state.
|
||||
plural_expr = None
|
||||
plural_expr_assignment = None
|
||||
variables = {}
|
||||
while parser.stream.current.type != 'block_end':
|
||||
if variables:
|
||||
parser.stream.expect('comma')
|
||||
|
||||
# skip colon for python compatibility
|
||||
if parser.stream.skip_if('colon'):
|
||||
break
|
||||
|
||||
name = parser.stream.expect('name')
|
||||
if name.value in variables:
|
||||
parser.fail('translatable variable %r defined twice.' %
|
||||
name.value, name.lineno,
|
||||
exc=TemplateAssertionError)
|
||||
|
||||
# expressions
|
||||
if parser.stream.current.type == 'assign':
|
||||
next(parser.stream)
|
||||
variables[name.value] = var = parser.parse_expression()
|
||||
else:
|
||||
variables[name.value] = var = nodes.Name(name.value, 'load')
|
||||
|
||||
if plural_expr is None:
|
||||
if isinstance(var, nodes.Call):
|
||||
plural_expr = nodes.Name('_trans', 'load')
|
||||
variables[name.value] = plural_expr
|
||||
plural_expr_assignment = nodes.Assign(
|
||||
nodes.Name('_trans', 'store'), var)
|
||||
else:
|
||||
plural_expr = var
|
||||
num_called_num = name.value == 'num'
|
||||
|
||||
parser.stream.expect('block_end')
|
||||
|
||||
plural = plural_names = None
|
||||
have_plural = False
|
||||
referenced = set()
|
||||
|
||||
# now parse until endtrans or pluralize
|
||||
singular_names, singular = self._parse_block(parser, True)
|
||||
if singular_names:
|
||||
referenced.update(singular_names)
|
||||
if plural_expr is None:
|
||||
plural_expr = nodes.Name(singular_names[0], 'load')
|
||||
num_called_num = singular_names[0] == 'num'
|
||||
|
||||
# if we have a pluralize block, we parse that too
|
||||
if parser.stream.current.test('name:pluralize'):
|
||||
have_plural = True
|
||||
next(parser.stream)
|
||||
if parser.stream.current.type != 'block_end':
|
||||
name = parser.stream.expect('name')
|
||||
if name.value not in variables:
|
||||
parser.fail('unknown variable %r for pluralization' %
|
||||
name.value, name.lineno,
|
||||
exc=TemplateAssertionError)
|
||||
plural_expr = variables[name.value]
|
||||
num_called_num = name.value == 'num'
|
||||
parser.stream.expect('block_end')
|
||||
plural_names, plural = self._parse_block(parser, False)
|
||||
next(parser.stream)
|
||||
referenced.update(plural_names)
|
||||
else:
|
||||
next(parser.stream)
|
||||
|
||||
# register free names as simple name expressions
|
||||
for var in referenced:
|
||||
if var not in variables:
|
||||
variables[var] = nodes.Name(var, 'load')
|
||||
|
||||
if not have_plural:
|
||||
plural_expr = None
|
||||
elif plural_expr is None:
|
||||
parser.fail('pluralize without variables', lineno)
|
||||
|
||||
node = self._make_node(singular, plural, variables, plural_expr,
|
||||
bool(referenced),
|
||||
num_called_num and have_plural)
|
||||
node.set_lineno(lineno)
|
||||
if plural_expr_assignment is not None:
|
||||
return [plural_expr_assignment, node]
|
||||
else:
|
||||
return node
|
||||
|
||||
def _parse_block(self, parser, allow_pluralize):
|
||||
"""Parse until the next block tag with a given name."""
|
||||
referenced = []
|
||||
buf = []
|
||||
while 1:
|
||||
if parser.stream.current.type == 'data':
|
||||
buf.append(parser.stream.current.value.replace('%', '%%'))
|
||||
next(parser.stream)
|
||||
elif parser.stream.current.type == 'variable_begin':
|
||||
next(parser.stream)
|
||||
name = parser.stream.expect('name').value
|
||||
referenced.append(name)
|
||||
buf.append('%%(%s)s' % name)
|
||||
parser.stream.expect('variable_end')
|
||||
elif parser.stream.current.type == 'block_begin':
|
||||
next(parser.stream)
|
||||
if parser.stream.current.test('name:endtrans'):
|
||||
break
|
||||
elif parser.stream.current.test('name:pluralize'):
|
||||
if allow_pluralize:
|
||||
break
|
||||
parser.fail('a translatable section can have only one '
|
||||
'pluralize section')
|
||||
parser.fail('control structures in translatable sections are '
|
||||
'not allowed')
|
||||
elif parser.stream.eos:
|
||||
parser.fail('unclosed translation block')
|
||||
else:
|
||||
assert False, 'internal parser error'
|
||||
|
||||
return referenced, concat(buf)
|
||||
|
||||
def _make_node(self, singular, plural, variables, plural_expr,
|
||||
vars_referenced, num_called_num):
|
||||
"""Generates a useful node from the data provided."""
|
||||
# no variables referenced? no need to escape for old style
|
||||
# gettext invocations only if there are vars.
|
||||
if not vars_referenced and not self.environment.newstyle_gettext:
|
||||
singular = singular.replace('%%', '%')
|
||||
if plural:
|
||||
plural = plural.replace('%%', '%')
|
||||
|
||||
# singular only:
|
||||
if plural_expr is None:
|
||||
gettext = nodes.Name('gettext', 'load')
|
||||
node = nodes.Call(gettext, [nodes.Const(singular)],
|
||||
[], None, None)
|
||||
|
||||
# singular and plural
|
||||
else:
|
||||
ngettext = nodes.Name('ngettext', 'load')
|
||||
node = nodes.Call(ngettext, [
|
||||
nodes.Const(singular),
|
||||
nodes.Const(plural),
|
||||
plural_expr
|
||||
], [], None, None)
|
||||
|
||||
# in case newstyle gettext is used, the method is powerful
|
||||
# enough to handle the variable expansion and autoescape
|
||||
# handling itself
|
||||
if self.environment.newstyle_gettext:
|
||||
for key, value in iteritems(variables):
|
||||
# the function adds that later anyways in case num was
|
||||
# called num, so just skip it.
|
||||
if num_called_num and key == 'num':
|
||||
continue
|
||||
node.kwargs.append(nodes.Keyword(key, value))
|
||||
|
||||
# otherwise do that here
|
||||
else:
|
||||
# mark the return value as safe if we are in an
|
||||
# environment with autoescaping turned on
|
||||
node = nodes.MarkSafeIfAutoescape(node)
|
||||
if variables:
|
||||
node = nodes.Mod(node, nodes.Dict([
|
||||
nodes.Pair(nodes.Const(key), value)
|
||||
for key, value in variables.items()
|
||||
]))
|
||||
return nodes.Output([node])
|
||||
|
||||
|
||||
class ExprStmtExtension(Extension):
|
||||
"""Adds a `do` tag to Jinja2 that works like the print statement just
|
||||
that it doesn't print the return value.
|
||||
"""
|
||||
tags = set(['do'])
|
||||
|
||||
def parse(self, parser):
|
||||
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
|
||||
node.node = parser.parse_tuple()
|
||||
return node
|
||||
|
||||
|
||||
class LoopControlExtension(Extension):
|
||||
"""Adds break and continue to the template engine."""
|
||||
tags = set(['break', 'continue'])
|
||||
|
||||
def parse(self, parser):
|
||||
token = next(parser.stream)
|
||||
if token.value == 'break':
|
||||
return nodes.Break(lineno=token.lineno)
|
||||
return nodes.Continue(lineno=token.lineno)
|
||||
|
||||
|
||||
class WithExtension(Extension):
|
||||
"""Adds support for a django-like with block."""
|
||||
tags = set(['with'])
|
||||
|
||||
def parse(self, parser):
|
||||
node = nodes.Scope(lineno=next(parser.stream).lineno)
|
||||
assignments = []
|
||||
while parser.stream.current.type != 'block_end':
|
||||
lineno = parser.stream.current.lineno
|
||||
if assignments:
|
||||
parser.stream.expect('comma')
|
||||
target = parser.parse_assign_target()
|
||||
parser.stream.expect('assign')
|
||||
expr = parser.parse_expression()
|
||||
assignments.append(nodes.Assign(target, expr, lineno=lineno))
|
||||
node.body = assignments + \
|
||||
list(parser.parse_statements(('name:endwith',),
|
||||
drop_needle=True))
|
||||
return node
|
||||
|
||||
|
||||
class AutoEscapeExtension(Extension):
|
||||
"""Changes auto escape rules for a scope."""
|
||||
tags = set(['autoescape'])
|
||||
|
||||
def parse(self, parser):
|
||||
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
|
||||
node.options = [
|
||||
nodes.Keyword('autoescape', parser.parse_expression())
|
||||
]
|
||||
node.body = parser.parse_statements(('name:endautoescape',),
|
||||
drop_needle=True)
|
||||
return nodes.Scope([node])
|
||||
|
||||
|
||||
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
|
||||
babel_style=True):
|
||||
"""Extract localizable strings from the given template node. Per
|
||||
default this function returns matches in babel style that means non string
|
||||
parameters as well as keyword arguments are returned as `None`. This
|
||||
allows Babel to figure out what you really meant if you are using
|
||||
gettext functions that allow keyword arguments for placeholder expansion.
|
||||
If you don't want that behavior set the `babel_style` parameter to `False`
|
||||
which causes only strings to be returned and parameters are always stored
|
||||
in tuples. As a consequence invalid gettext calls (calls without a single
|
||||
string parameter or string parameters after non-string parameters) are
|
||||
skipped.
|
||||
|
||||
This example explains the behavior:
|
||||
|
||||
>>> from jinja2 import Environment
|
||||
>>> env = Environment()
|
||||
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
|
||||
>>> list(extract_from_ast(node))
|
||||
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
|
||||
>>> list(extract_from_ast(node, babel_style=False))
|
||||
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
|
||||
|
||||
For every string found this function yields a ``(lineno, function,
|
||||
message)`` tuple, where:
|
||||
|
||||
* ``lineno`` is the number of the line on which the string was found,
|
||||
* ``function`` is the name of the ``gettext`` function used (if the
|
||||
string was extracted from embedded Python code), and
|
||||
* ``message`` is the string itself (a ``unicode`` object, or a tuple
|
||||
of ``unicode`` objects for functions with multiple string arguments).
|
||||
|
||||
This extraction function operates on the AST and is because of that unable
|
||||
to extract any comments. For comment support you have to use the babel
|
||||
extraction interface or extract comments yourself.
|
||||
"""
|
||||
for node in node.find_all(nodes.Call):
|
||||
if not isinstance(node.node, nodes.Name) or \
|
||||
node.node.name not in gettext_functions:
|
||||
continue
|
||||
|
||||
strings = []
|
||||
for arg in node.args:
|
||||
if isinstance(arg, nodes.Const) and \
|
||||
isinstance(arg.value, string_types):
|
||||
strings.append(arg.value)
|
||||
else:
|
||||
strings.append(None)
|
||||
|
||||
for arg in node.kwargs:
|
||||
strings.append(None)
|
||||
if node.dyn_args is not None:
|
||||
strings.append(None)
|
||||
if node.dyn_kwargs is not None:
|
||||
strings.append(None)
|
||||
|
||||
if not babel_style:
|
||||
strings = tuple(x for x in strings if x is not None)
|
||||
if not strings:
|
||||
continue
|
||||
else:
|
||||
if len(strings) == 1:
|
||||
strings = strings[0]
|
||||
else:
|
||||
strings = tuple(strings)
|
||||
yield node.lineno, node.node.name, strings
|
||||
|
||||
|
||||
class _CommentFinder(object):
|
||||
"""Helper class to find comments in a token stream. Can only
|
||||
find comments for gettext calls forwards. Once the comment
|
||||
from line 4 is found, a comment for line 1 will not return a
|
||||
usable value.
|
||||
"""
|
||||
|
||||
def __init__(self, tokens, comment_tags):
|
||||
self.tokens = tokens
|
||||
self.comment_tags = comment_tags
|
||||
self.offset = 0
|
||||
self.last_lineno = 0
|
||||
|
||||
def find_backwards(self, offset):
|
||||
try:
|
||||
for _, token_type, token_value in \
|
||||
reversed(self.tokens[self.offset:offset]):
|
||||
if token_type in ('comment', 'linecomment'):
|
||||
try:
|
||||
prefix, comment = token_value.split(None, 1)
|
||||
except ValueError:
|
||||
continue
|
||||
if prefix in self.comment_tags:
|
||||
return [comment.rstrip()]
|
||||
return []
|
||||
finally:
|
||||
self.offset = offset
|
||||
|
||||
def find_comments(self, lineno):
|
||||
if not self.comment_tags or self.last_lineno > lineno:
|
||||
return []
|
||||
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
|
||||
if token_lineno > lineno:
|
||||
return self.find_backwards(self.offset + idx)
|
||||
return self.find_backwards(len(self.tokens))
|
||||
|
||||
|
||||
def babel_extract(fileobj, keywords, comment_tags, options):
|
||||
"""Babel extraction method for Jinja templates.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Basic support for translation comments was added. If `comment_tags`
|
||||
is now set to a list of keywords for extraction, the extractor will
|
||||
try to find the best preceeding comment that begins with one of the
|
||||
keywords. For best results, make sure to not have more than one
|
||||
gettext call in one line of code and the matching comment in the
|
||||
same line or the line before.
|
||||
|
||||
.. versionchanged:: 2.5.1
|
||||
The `newstyle_gettext` flag can be set to `True` to enable newstyle
|
||||
gettext calls.
|
||||
|
||||
.. versionchanged:: 2.7
|
||||
A `silent` option can now be provided. If set to `False` template
|
||||
syntax errors are propagated instead of being ignored.
|
||||
|
||||
:param fileobj: the file-like object the messages should be extracted from
|
||||
:param keywords: a list of keywords (i.e. function names) that should be
|
||||
recognized as translation functions
|
||||
:param comment_tags: a list of translator tags to search for and include
|
||||
in the results.
|
||||
:param options: a dictionary of additional options (optional)
|
||||
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
|
||||
(comments will be empty currently)
|
||||
"""
|
||||
extensions = set()
|
||||
for extension in options.get('extensions', '').split(','):
|
||||
extension = extension.strip()
|
||||
if not extension:
|
||||
continue
|
||||
extensions.add(import_string(extension))
|
||||
if InternationalizationExtension not in extensions:
|
||||
extensions.add(InternationalizationExtension)
|
||||
|
||||
def getbool(options, key, default=False):
|
||||
return options.get(key, str(default)).lower() in \
|
||||
('1', 'on', 'yes', 'true')
|
||||
|
||||
silent = getbool(options, 'silent', True)
|
||||
environment = Environment(
|
||||
options.get('block_start_string', BLOCK_START_STRING),
|
||||
options.get('block_end_string', BLOCK_END_STRING),
|
||||
options.get('variable_start_string', VARIABLE_START_STRING),
|
||||
options.get('variable_end_string', VARIABLE_END_STRING),
|
||||
options.get('comment_start_string', COMMENT_START_STRING),
|
||||
options.get('comment_end_string', COMMENT_END_STRING),
|
||||
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
|
||||
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
|
||||
getbool(options, 'trim_blocks', TRIM_BLOCKS),
|
||||
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
|
||||
NEWLINE_SEQUENCE,
|
||||
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
|
||||
frozenset(extensions),
|
||||
cache_size=0,
|
||||
auto_reload=False
|
||||
)
|
||||
|
||||
if getbool(options, 'newstyle_gettext'):
|
||||
environment.newstyle_gettext = True
|
||||
|
||||
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
|
||||
try:
|
||||
node = environment.parse(source)
|
||||
tokens = list(environment.lex(environment.preprocess(source)))
|
||||
except TemplateSyntaxError as e:
|
||||
if not silent:
|
||||
raise
|
||||
# skip templates with syntax errors
|
||||
return
|
||||
|
||||
finder = _CommentFinder(tokens, comment_tags)
|
||||
for lineno, func, message in extract_from_ast(node, keywords):
|
||||
yield lineno, func, message, finder.find_comments(lineno)
|
||||
|
||||
|
||||
#: nicer import names
|
||||
i18n = InternationalizationExtension
|
||||
do = ExprStmtExtension
|
||||
loopcontrols = LoopControlExtension
|
||||
with_ = WithExtension
|
||||
autoescape = AutoEscapeExtension
|
987
3rdparty/jinja2/filters.py
vendored
Normal file
987
3rdparty/jinja2/filters.py
vendored
Normal file
@ -0,0 +1,987 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.filters
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Bundled jinja filters.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
|
||||
from random import choice
|
||||
from operator import itemgetter
|
||||
from itertools import groupby
|
||||
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
|
||||
unicode_urlencode
|
||||
from jinja2.runtime import Undefined
|
||||
from jinja2.exceptions import FilterArgumentError
|
||||
from jinja2._compat import next, imap, string_types, text_type, iteritems
|
||||
|
||||
|
||||
_word_re = re.compile(r'\w+(?u)')
|
||||
|
||||
|
||||
def contextfilter(f):
|
||||
"""Decorator for marking context dependent filters. The current
|
||||
:class:`Context` will be passed as first argument.
|
||||
"""
|
||||
f.contextfilter = True
|
||||
return f
|
||||
|
||||
|
||||
def evalcontextfilter(f):
|
||||
"""Decorator for marking eval-context dependent filters. An eval
|
||||
context object is passed as first argument. For more information
|
||||
about the eval context, see :ref:`eval-context`.
|
||||
|
||||
.. versionadded:: 2.4
|
||||
"""
|
||||
f.evalcontextfilter = True
|
||||
return f
|
||||
|
||||
|
||||
def environmentfilter(f):
|
||||
"""Decorator for marking evironment dependent filters. The current
|
||||
:class:`Environment` is passed to the filter as first argument.
|
||||
"""
|
||||
f.environmentfilter = True
|
||||
return f
|
||||
|
||||
|
||||
def make_attrgetter(environment, attribute):
|
||||
"""Returns a callable that looks up the given attribute from a
|
||||
passed object with the rules of the environment. Dots are allowed
|
||||
to access attributes of attributes. Integer parts in paths are
|
||||
looked up as integers.
|
||||
"""
|
||||
if not isinstance(attribute, string_types) \
|
||||
or ('.' not in attribute and not attribute.isdigit()):
|
||||
return lambda x: environment.getitem(x, attribute)
|
||||
attribute = attribute.split('.')
|
||||
def attrgetter(item):
|
||||
for part in attribute:
|
||||
if part.isdigit():
|
||||
part = int(part)
|
||||
item = environment.getitem(item, part)
|
||||
return item
|
||||
return attrgetter
|
||||
|
||||
|
||||
def do_forceescape(value):
|
||||
"""Enforce HTML escaping. This will probably double escape variables."""
|
||||
if hasattr(value, '__html__'):
|
||||
value = value.__html__()
|
||||
return escape(text_type(value))
|
||||
|
||||
|
||||
def do_urlencode(value):
|
||||
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
|
||||
dictionaries and regular strings as well as pairwise iterables.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
itemiter = None
|
||||
if isinstance(value, dict):
|
||||
itemiter = iteritems(value)
|
||||
elif not isinstance(value, string_types):
|
||||
try:
|
||||
itemiter = iter(value)
|
||||
except TypeError:
|
||||
pass
|
||||
if itemiter is None:
|
||||
return unicode_urlencode(value)
|
||||
return u'&'.join(unicode_urlencode(k) + '=' +
|
||||
unicode_urlencode(v) for k, v in itemiter)
|
||||
|
||||
|
||||
@evalcontextfilter
|
||||
def do_replace(eval_ctx, s, old, new, count=None):
|
||||
"""Return a copy of the value with all occurrences of a substring
|
||||
replaced with a new one. The first argument is the substring
|
||||
that should be replaced, the second is the replacement string.
|
||||
If the optional third argument ``count`` is given, only the first
|
||||
``count`` occurrences are replaced:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ "Hello World"|replace("Hello", "Goodbye") }}
|
||||
-> Goodbye World
|
||||
|
||||
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
|
||||
-> d'oh, d'oh, aaargh
|
||||
"""
|
||||
if count is None:
|
||||
count = -1
|
||||
if not eval_ctx.autoescape:
|
||||
return text_type(s).replace(text_type(old), text_type(new), count)
|
||||
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
|
||||
not hasattr(s, '__html__'):
|
||||
s = escape(s)
|
||||
else:
|
||||
s = soft_unicode(s)
|
||||
return s.replace(soft_unicode(old), soft_unicode(new), count)
|
||||
|
||||
|
||||
def do_upper(s):
|
||||
"""Convert a value to uppercase."""
|
||||
return soft_unicode(s).upper()
|
||||
|
||||
|
||||
def do_lower(s):
|
||||
"""Convert a value to lowercase."""
|
||||
return soft_unicode(s).lower()
|
||||
|
||||
|
||||
@evalcontextfilter
|
||||
def do_xmlattr(_eval_ctx, d, autospace=True):
|
||||
"""Create an SGML/XML attribute string based on the items in a dict.
|
||||
All values that are neither `none` nor `undefined` are automatically
|
||||
escaped:
|
||||
|
||||
.. sourcecode:: html+jinja
|
||||
|
||||
<ul{{ {'class': 'my_list', 'missing': none,
|
||||
'id': 'list-%d'|format(variable)}|xmlattr }}>
|
||||
...
|
||||
</ul>
|
||||
|
||||
Results in something like this:
|
||||
|
||||
.. sourcecode:: html
|
||||
|
||||
<ul class="my_list" id="list-42">
|
||||
...
|
||||
</ul>
|
||||
|
||||
As you can see it automatically prepends a space in front of the item
|
||||
if the filter returned something unless the second parameter is false.
|
||||
"""
|
||||
rv = u' '.join(
|
||||
u'%s="%s"' % (escape(key), escape(value))
|
||||
for key, value in iteritems(d)
|
||||
if value is not None and not isinstance(value, Undefined)
|
||||
)
|
||||
if autospace and rv:
|
||||
rv = u' ' + rv
|
||||
if _eval_ctx.autoescape:
|
||||
rv = Markup(rv)
|
||||
return rv
|
||||
|
||||
|
||||
def do_capitalize(s):
|
||||
"""Capitalize a value. The first character will be uppercase, all others
|
||||
lowercase.
|
||||
"""
|
||||
return soft_unicode(s).capitalize()
|
||||
|
||||
|
||||
def do_title(s):
|
||||
"""Return a titlecased version of the value. I.e. words will start with
|
||||
uppercase letters, all remaining characters are lowercase.
|
||||
"""
|
||||
rv = []
|
||||
for item in re.compile(r'([-\s]+)(?u)').split(s):
|
||||
if not item:
|
||||
continue
|
||||
rv.append(item[0].upper() + item[1:].lower())
|
||||
return ''.join(rv)
|
||||
|
||||
|
||||
def do_dictsort(value, case_sensitive=False, by='key'):
|
||||
"""Sort a dict and yield (key, value) pairs. Because python dicts are
|
||||
unsorted you may want to use this function to order them by either
|
||||
key or value:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{% for item in mydict|dictsort %}
|
||||
sort the dict by key, case insensitive
|
||||
|
||||
{% for item in mydict|dictsort(true) %}
|
||||
sort the dict by key, case sensitive
|
||||
|
||||
{% for item in mydict|dictsort(false, 'value') %}
|
||||
sort the dict by key, case insensitive, sorted
|
||||
normally and ordered by value.
|
||||
"""
|
||||
if by == 'key':
|
||||
pos = 0
|
||||
elif by == 'value':
|
||||
pos = 1
|
||||
else:
|
||||
raise FilterArgumentError('You can only sort by either '
|
||||
'"key" or "value"')
|
||||
def sort_func(item):
|
||||
value = item[pos]
|
||||
if isinstance(value, string_types) and not case_sensitive:
|
||||
value = value.lower()
|
||||
return value
|
||||
|
||||
return sorted(value.items(), key=sort_func)
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_sort(environment, value, reverse=False, case_sensitive=False,
|
||||
attribute=None):
|
||||
"""Sort an iterable. Per default it sorts ascending, if you pass it
|
||||
true as first argument it will reverse the sorting.
|
||||
|
||||
If the iterable is made of strings the third parameter can be used to
|
||||
control the case sensitiveness of the comparison which is disabled by
|
||||
default.
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{% for item in iterable|sort %}
|
||||
...
|
||||
{% endfor %}
|
||||
|
||||
It is also possible to sort by an attribute (for example to sort
|
||||
by the date of an object) by specifying the `attribute` parameter:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{% for item in iterable|sort(attribute='date') %}
|
||||
...
|
||||
{% endfor %}
|
||||
|
||||
.. versionchanged:: 2.6
|
||||
The `attribute` parameter was added.
|
||||
"""
|
||||
if not case_sensitive:
|
||||
def sort_func(item):
|
||||
if isinstance(item, string_types):
|
||||
item = item.lower()
|
||||
return item
|
||||
else:
|
||||
sort_func = None
|
||||
if attribute is not None:
|
||||
getter = make_attrgetter(environment, attribute)
|
||||
def sort_func(item, processor=sort_func or (lambda x: x)):
|
||||
return processor(getter(item))
|
||||
return sorted(value, key=sort_func, reverse=reverse)
|
||||
|
||||
|
||||
def do_default(value, default_value=u'', boolean=False):
|
||||
"""If the value is undefined it will return the passed default value,
|
||||
otherwise the value of the variable:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ my_variable|default('my_variable is not defined') }}
|
||||
|
||||
This will output the value of ``my_variable`` if the variable was
|
||||
defined, otherwise ``'my_variable is not defined'``. If you want
|
||||
to use default with variables that evaluate to false you have to
|
||||
set the second parameter to `true`:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ ''|default('the string was empty', true) }}
|
||||
"""
|
||||
if isinstance(value, Undefined) or (boolean and not value):
|
||||
return default_value
|
||||
return value
|
||||
|
||||
|
||||
@evalcontextfilter
|
||||
def do_join(eval_ctx, value, d=u'', attribute=None):
|
||||
"""Return a string which is the concatenation of the strings in the
|
||||
sequence. The separator between elements is an empty string per
|
||||
default, you can define it with the optional parameter:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ [1, 2, 3]|join('|') }}
|
||||
-> 1|2|3
|
||||
|
||||
{{ [1, 2, 3]|join }}
|
||||
-> 123
|
||||
|
||||
It is also possible to join certain attributes of an object:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ users|join(', ', attribute='username') }}
|
||||
|
||||
.. versionadded:: 2.6
|
||||
The `attribute` parameter was added.
|
||||
"""
|
||||
if attribute is not None:
|
||||
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
|
||||
|
||||
# no automatic escaping? joining is a lot eaiser then
|
||||
if not eval_ctx.autoescape:
|
||||
return text_type(d).join(imap(text_type, value))
|
||||
|
||||
# if the delimiter doesn't have an html representation we check
|
||||
# if any of the items has. If yes we do a coercion to Markup
|
||||
if not hasattr(d, '__html__'):
|
||||
value = list(value)
|
||||
do_escape = False
|
||||
for idx, item in enumerate(value):
|
||||
if hasattr(item, '__html__'):
|
||||
do_escape = True
|
||||
else:
|
||||
value[idx] = text_type(item)
|
||||
if do_escape:
|
||||
d = escape(d)
|
||||
else:
|
||||
d = text_type(d)
|
||||
return d.join(value)
|
||||
|
||||
# no html involved, to normal joining
|
||||
return soft_unicode(d).join(imap(soft_unicode, value))
|
||||
|
||||
|
||||
def do_center(value, width=80):
|
||||
"""Centers the value in a field of a given width."""
|
||||
return text_type(value).center(width)
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_first(environment, seq):
|
||||
"""Return the first item of a sequence."""
|
||||
try:
|
||||
return next(iter(seq))
|
||||
except StopIteration:
|
||||
return environment.undefined('No first item, sequence was empty.')
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_last(environment, seq):
|
||||
"""Return the last item of a sequence."""
|
||||
try:
|
||||
return next(iter(reversed(seq)))
|
||||
except StopIteration:
|
||||
return environment.undefined('No last item, sequence was empty.')
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_random(environment, seq):
|
||||
"""Return a random item from the sequence."""
|
||||
try:
|
||||
return choice(seq)
|
||||
except IndexError:
|
||||
return environment.undefined('No random item, sequence was empty.')
|
||||
|
||||
|
||||
def do_filesizeformat(value, binary=False):
|
||||
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
|
||||
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
|
||||
Giga, etc.), if the second parameter is set to `True` the binary
|
||||
prefixes are used (Mebi, Gibi).
|
||||
"""
|
||||
bytes = float(value)
|
||||
base = binary and 1024 or 1000
|
||||
prefixes = [
|
||||
(binary and 'KiB' or 'kB'),
|
||||
(binary and 'MiB' or 'MB'),
|
||||
(binary and 'GiB' or 'GB'),
|
||||
(binary and 'TiB' or 'TB'),
|
||||
(binary and 'PiB' or 'PB'),
|
||||
(binary and 'EiB' or 'EB'),
|
||||
(binary and 'ZiB' or 'ZB'),
|
||||
(binary and 'YiB' or 'YB')
|
||||
]
|
||||
if bytes == 1:
|
||||
return '1 Byte'
|
||||
elif bytes < base:
|
||||
return '%d Bytes' % bytes
|
||||
else:
|
||||
for i, prefix in enumerate(prefixes):
|
||||
unit = base ** (i + 2)
|
||||
if bytes < unit:
|
||||
return '%.1f %s' % ((base * bytes / unit), prefix)
|
||||
return '%.1f %s' % ((base * bytes / unit), prefix)
|
||||
|
||||
|
||||
def do_pprint(value, verbose=False):
|
||||
"""Pretty print a variable. Useful for debugging.
|
||||
|
||||
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
|
||||
is truthy the output will be more verbose (this requires `pretty`)
|
||||
"""
|
||||
return pformat(value, verbose=verbose)
|
||||
|
||||
|
||||
@evalcontextfilter
|
||||
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False):
|
||||
"""Converts URLs in plain text into clickable links.
|
||||
|
||||
If you pass the filter an additional integer it will shorten the urls
|
||||
to that number. Also a third argument exists that makes the urls
|
||||
"nofollow":
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ mytext|urlize(40, true) }}
|
||||
links are shortened to 40 chars and defined with rel="nofollow"
|
||||
"""
|
||||
rv = urlize(value, trim_url_limit, nofollow)
|
||||
if eval_ctx.autoescape:
|
||||
rv = Markup(rv)
|
||||
return rv
|
||||
|
||||
|
||||
def do_indent(s, width=4, indentfirst=False):
|
||||
"""Return a copy of the passed string, each line indented by
|
||||
4 spaces. The first line is not indented. If you want to
|
||||
change the number of spaces or indent the first line too
|
||||
you can pass additional parameters to the filter:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ mytext|indent(2, true) }}
|
||||
indent by two spaces and indent the first line too.
|
||||
"""
|
||||
indention = u' ' * width
|
||||
rv = (u'\n' + indention).join(s.splitlines())
|
||||
if indentfirst:
|
||||
rv = indention + rv
|
||||
return rv
|
||||
|
||||
|
||||
def do_truncate(s, length=255, killwords=False, end='...'):
|
||||
"""Return a truncated copy of the string. The length is specified
|
||||
with the first parameter which defaults to ``255``. If the second
|
||||
parameter is ``true`` the filter will cut the text at length. Otherwise
|
||||
it will discard the last word. If the text was in fact
|
||||
truncated it will append an ellipsis sign (``"..."``). If you want a
|
||||
different ellipsis sign than ``"..."`` you can specify it using the
|
||||
third parameter.
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ "foo bar"|truncate(5) }}
|
||||
-> "foo ..."
|
||||
{{ "foo bar"|truncate(5, True) }}
|
||||
-> "foo b..."
|
||||
"""
|
||||
if len(s) <= length:
|
||||
return s
|
||||
elif killwords:
|
||||
return s[:length] + end
|
||||
words = s.split(' ')
|
||||
result = []
|
||||
m = 0
|
||||
for word in words:
|
||||
m += len(word) + 1
|
||||
if m > length:
|
||||
break
|
||||
result.append(word)
|
||||
result.append(end)
|
||||
return u' '.join(result)
|
||||
|
||||
@environmentfilter
|
||||
def do_wordwrap(environment, s, width=79, break_long_words=True,
|
||||
wrapstring=None):
|
||||
"""
|
||||
Return a copy of the string passed to the filter wrapped after
|
||||
``79`` characters. You can override this default using the first
|
||||
parameter. If you set the second parameter to `false` Jinja will not
|
||||
split words apart if they are longer than `width`. By default, the newlines
|
||||
will be the default newlines for the environment, but this can be changed
|
||||
using the wrapstring keyword argument.
|
||||
|
||||
.. versionadded:: 2.7
|
||||
Added support for the `wrapstring` parameter.
|
||||
"""
|
||||
if not wrapstring:
|
||||
wrapstring = environment.newline_sequence
|
||||
import textwrap
|
||||
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
|
||||
replace_whitespace=False,
|
||||
break_long_words=break_long_words))
|
||||
|
||||
|
||||
def do_wordcount(s):
|
||||
"""Count the words in that string."""
|
||||
return len(_word_re.findall(s))
|
||||
|
||||
|
||||
def do_int(value, default=0):
|
||||
"""Convert the value into an integer. If the
|
||||
conversion doesn't work it will return ``0``. You can
|
||||
override this default using the first parameter.
|
||||
"""
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
# this quirk is necessary so that "42.23"|int gives 42.
|
||||
try:
|
||||
return int(float(value))
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def do_float(value, default=0.0):
|
||||
"""Convert the value into a floating point number. If the
|
||||
conversion doesn't work it will return ``0.0``. You can
|
||||
override this default using the first parameter.
|
||||
"""
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def do_format(value, *args, **kwargs):
|
||||
"""
|
||||
Apply python string formatting on an object:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ "%s - %s"|format("Hello?", "Foo!") }}
|
||||
-> Hello? - Foo!
|
||||
"""
|
||||
if args and kwargs:
|
||||
raise FilterArgumentError('can\'t handle positional and keyword '
|
||||
'arguments at the same time')
|
||||
return soft_unicode(value) % (kwargs or args)
|
||||
|
||||
|
||||
def do_trim(value):
|
||||
"""Strip leading and trailing whitespace."""
|
||||
return soft_unicode(value).strip()
|
||||
|
||||
|
||||
def do_striptags(value):
|
||||
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
|
||||
"""
|
||||
if hasattr(value, '__html__'):
|
||||
value = value.__html__()
|
||||
return Markup(text_type(value)).striptags()
|
||||
|
||||
|
||||
def do_slice(value, slices, fill_with=None):
|
||||
"""Slice an iterator and return a list of lists containing
|
||||
those items. Useful if you want to create a div containing
|
||||
three ul tags that represent columns:
|
||||
|
||||
.. sourcecode:: html+jinja
|
||||
|
||||
<div class="columwrapper">
|
||||
{%- for column in items|slice(3) %}
|
||||
<ul class="column-{{ loop.index }}">
|
||||
{%- for item in column %}
|
||||
<li>{{ item }}</li>
|
||||
{%- endfor %}
|
||||
</ul>
|
||||
{%- endfor %}
|
||||
</div>
|
||||
|
||||
If you pass it a second argument it's used to fill missing
|
||||
values on the last iteration.
|
||||
"""
|
||||
seq = list(value)
|
||||
length = len(seq)
|
||||
items_per_slice = length // slices
|
||||
slices_with_extra = length % slices
|
||||
offset = 0
|
||||
for slice_number in range(slices):
|
||||
start = offset + slice_number * items_per_slice
|
||||
if slice_number < slices_with_extra:
|
||||
offset += 1
|
||||
end = offset + (slice_number + 1) * items_per_slice
|
||||
tmp = seq[start:end]
|
||||
if fill_with is not None and slice_number >= slices_with_extra:
|
||||
tmp.append(fill_with)
|
||||
yield tmp
|
||||
|
||||
|
||||
def do_batch(value, linecount, fill_with=None):
|
||||
"""
|
||||
A filter that batches items. It works pretty much like `slice`
|
||||
just the other way round. It returns a list of lists with the
|
||||
given number of items. If you provide a second parameter this
|
||||
is used to fill up missing items. See this example:
|
||||
|
||||
.. sourcecode:: html+jinja
|
||||
|
||||
<table>
|
||||
{%- for row in items|batch(3, ' ') %}
|
||||
<tr>
|
||||
{%- for column in row %}
|
||||
<td>{{ column }}</td>
|
||||
{%- endfor %}
|
||||
</tr>
|
||||
{%- endfor %}
|
||||
</table>
|
||||
"""
|
||||
result = []
|
||||
tmp = []
|
||||
for item in value:
|
||||
if len(tmp) == linecount:
|
||||
yield tmp
|
||||
tmp = []
|
||||
tmp.append(item)
|
||||
if tmp:
|
||||
if fill_with is not None and len(tmp) < linecount:
|
||||
tmp += [fill_with] * (linecount - len(tmp))
|
||||
yield tmp
|
||||
|
||||
|
||||
def do_round(value, precision=0, method='common'):
|
||||
"""Round the number to a given precision. The first
|
||||
parameter specifies the precision (default is ``0``), the
|
||||
second the rounding method:
|
||||
|
||||
- ``'common'`` rounds either up or down
|
||||
- ``'ceil'`` always rounds up
|
||||
- ``'floor'`` always rounds down
|
||||
|
||||
If you don't specify a method ``'common'`` is used.
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ 42.55|round }}
|
||||
-> 43.0
|
||||
{{ 42.55|round(1, 'floor') }}
|
||||
-> 42.5
|
||||
|
||||
Note that even if rounded to 0 precision, a float is returned. If
|
||||
you need a real integer, pipe it through `int`:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ 42.55|round|int }}
|
||||
-> 43
|
||||
"""
|
||||
if not method in ('common', 'ceil', 'floor'):
|
||||
raise FilterArgumentError('method must be common, ceil or floor')
|
||||
if method == 'common':
|
||||
return round(value, precision)
|
||||
func = getattr(math, method)
|
||||
return func(value * (10 ** precision)) / (10 ** precision)
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_groupby(environment, value, attribute):
|
||||
"""Group a sequence of objects by a common attribute.
|
||||
|
||||
If you for example have a list of dicts or objects that represent persons
|
||||
with `gender`, `first_name` and `last_name` attributes and you want to
|
||||
group all users by genders you can do something like the following
|
||||
snippet:
|
||||
|
||||
.. sourcecode:: html+jinja
|
||||
|
||||
<ul>
|
||||
{% for group in persons|groupby('gender') %}
|
||||
<li>{{ group.grouper }}<ul>
|
||||
{% for person in group.list %}
|
||||
<li>{{ person.first_name }} {{ person.last_name }}</li>
|
||||
{% endfor %}</ul></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
Additionally it's possible to use tuple unpacking for the grouper and
|
||||
list:
|
||||
|
||||
.. sourcecode:: html+jinja
|
||||
|
||||
<ul>
|
||||
{% for grouper, list in persons|groupby('gender') %}
|
||||
...
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
As you can see the item we're grouping by is stored in the `grouper`
|
||||
attribute and the `list` contains all the objects that have this grouper
|
||||
in common.
|
||||
|
||||
.. versionchanged:: 2.6
|
||||
It's now possible to use dotted notation to group by the child
|
||||
attribute of another attribute.
|
||||
"""
|
||||
expr = make_attrgetter(environment, attribute)
|
||||
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
|
||||
|
||||
|
||||
class _GroupTuple(tuple):
|
||||
__slots__ = ()
|
||||
grouper = property(itemgetter(0))
|
||||
list = property(itemgetter(1))
|
||||
|
||||
def __new__(cls, xxx_todo_changeme):
|
||||
(key, value) = xxx_todo_changeme
|
||||
return tuple.__new__(cls, (key, list(value)))
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_sum(environment, iterable, attribute=None, start=0):
|
||||
"""Returns the sum of a sequence of numbers plus the value of parameter
|
||||
'start' (which defaults to 0). When the sequence is empty it returns
|
||||
start.
|
||||
|
||||
It is also possible to sum up only certain attributes:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
Total: {{ items|sum(attribute='price') }}
|
||||
|
||||
.. versionchanged:: 2.6
|
||||
The `attribute` parameter was added to allow suming up over
|
||||
attributes. Also the `start` parameter was moved on to the right.
|
||||
"""
|
||||
if attribute is not None:
|
||||
iterable = imap(make_attrgetter(environment, attribute), iterable)
|
||||
return sum(iterable, start)
|
||||
|
||||
|
||||
def do_list(value):
|
||||
"""Convert the value into a list. If it was a string the returned list
|
||||
will be a list of characters.
|
||||
"""
|
||||
return list(value)
|
||||
|
||||
|
||||
def do_mark_safe(value):
|
||||
"""Mark the value as safe which means that in an environment with automatic
|
||||
escaping enabled this variable will not be escaped.
|
||||
"""
|
||||
return Markup(value)
|
||||
|
||||
|
||||
def do_mark_unsafe(value):
|
||||
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
|
||||
return text_type(value)
|
||||
|
||||
|
||||
def do_reverse(value):
|
||||
"""Reverse the object or return an iterator the iterates over it the other
|
||||
way round.
|
||||
"""
|
||||
if isinstance(value, string_types):
|
||||
return value[::-1]
|
||||
try:
|
||||
return reversed(value)
|
||||
except TypeError:
|
||||
try:
|
||||
rv = list(value)
|
||||
rv.reverse()
|
||||
return rv
|
||||
except TypeError:
|
||||
raise FilterArgumentError('argument must be iterable')
|
||||
|
||||
|
||||
@environmentfilter
|
||||
def do_attr(environment, obj, name):
|
||||
"""Get an attribute of an object. ``foo|attr("bar")`` works like
|
||||
``foo["bar"]`` just that always an attribute is returned and items are not
|
||||
looked up.
|
||||
|
||||
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
|
||||
"""
|
||||
try:
|
||||
name = str(name)
|
||||
except UnicodeError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
value = getattr(obj, name)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if environment.sandboxed and not \
|
||||
environment.is_safe_attribute(obj, name, value):
|
||||
return environment.unsafe_undefined(obj, name)
|
||||
return value
|
||||
return environment.undefined(obj=obj, name=name)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_map(*args, **kwargs):
|
||||
"""Applies a filter on a sequence of objects or looks up an attribute.
|
||||
This is useful when dealing with lists of objects but you are really
|
||||
only interested in a certain value of it.
|
||||
|
||||
The basic usage is mapping on an attribute. Imagine you have a list
|
||||
of users but you are only interested in a list of usernames:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
Users on this page: {{ users|map(attribute='username')|join(', ') }}
|
||||
|
||||
Alternatively you can let it invoke a filter by passing the name of the
|
||||
filter and the arguments afterwards. A good example would be applying a
|
||||
text conversion filter on a sequence:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
Users on this page: {{ titles|map('lower')|join(', ') }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
context = args[0]
|
||||
seq = args[1]
|
||||
|
||||
if len(args) == 2 and 'attribute' in kwargs:
|
||||
attribute = kwargs.pop('attribute')
|
||||
if kwargs:
|
||||
raise FilterArgumentError('Unexpected keyword argument %r' %
|
||||
next(iter(kwargs)))
|
||||
func = make_attrgetter(context.environment, attribute)
|
||||
else:
|
||||
try:
|
||||
name = args[2]
|
||||
args = args[3:]
|
||||
except LookupError:
|
||||
raise FilterArgumentError('map requires a filter argument')
|
||||
func = lambda item: context.environment.call_filter(
|
||||
name, item, args, kwargs, context=context)
|
||||
|
||||
if seq:
|
||||
for item in seq:
|
||||
yield func(item)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_select(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to either the object
|
||||
or the attribute and only selecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ numbers|select("odd") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: x, False)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_reject(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to either the object
|
||||
or the attribute and rejecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ numbers|reject("odd") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: not x, False)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_selectattr(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to either the object
|
||||
or the attribute and only selecting the ones with the test succeeding.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ users|selectattr("is_active") }}
|
||||
{{ users|selectattr("email", "none") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: x, True)
|
||||
|
||||
|
||||
@contextfilter
|
||||
def do_rejectattr(*args, **kwargs):
|
||||
"""Filters a sequence of objects by appying a test to either the object
|
||||
or the attribute and rejecting the ones with the test succeeding.
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{{ users|rejectattr("is_active") }}
|
||||
{{ users|rejectattr("email", "none") }}
|
||||
|
||||
.. versionadded:: 2.7
|
||||
"""
|
||||
return _select_or_reject(args, kwargs, lambda x: not x, True)
|
||||
|
||||
|
||||
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
|
||||
context = args[0]
|
||||
seq = args[1]
|
||||
if lookup_attr:
|
||||
try:
|
||||
attr = args[2]
|
||||
except LookupError:
|
||||
raise FilterArgumentError('Missing parameter for attribute name')
|
||||
transfunc = make_attrgetter(context.environment, attr)
|
||||
off = 1
|
||||
else:
|
||||
off = 0
|
||||
transfunc = lambda x: x
|
||||
|
||||
try:
|
||||
name = args[2 + off]
|
||||
args = args[3 + off:]
|
||||
func = lambda item: context.environment.call_test(
|
||||
name, item, args, kwargs)
|
||||
except LookupError:
|
||||
func = bool
|
||||
|
||||
if seq:
|
||||
for item in seq:
|
||||
if modfunc(func(transfunc(item))):
|
||||
yield item
|
||||
|
||||
|
||||
FILTERS = {
|
||||
'attr': do_attr,
|
||||
'replace': do_replace,
|
||||
'upper': do_upper,
|
||||
'lower': do_lower,
|
||||
'escape': escape,
|
||||
'e': escape,
|
||||
'forceescape': do_forceescape,
|
||||
'capitalize': do_capitalize,
|
||||
'title': do_title,
|
||||
'default': do_default,
|
||||
'd': do_default,
|
||||
'join': do_join,
|
||||
'count': len,
|
||||
'dictsort': do_dictsort,
|
||||
'sort': do_sort,
|
||||
'length': len,
|
||||
'reverse': do_reverse,
|
||||
'center': do_center,
|
||||
'indent': do_indent,
|
||||
'title': do_title,
|
||||
'capitalize': do_capitalize,
|
||||
'first': do_first,
|
||||
'last': do_last,
|
||||
'map': do_map,
|
||||
'random': do_random,
|
||||
'reject': do_reject,
|
||||
'rejectattr': do_rejectattr,
|
||||
'filesizeformat': do_filesizeformat,
|
||||
'pprint': do_pprint,
|
||||
'truncate': do_truncate,
|
||||
'wordwrap': do_wordwrap,
|
||||
'wordcount': do_wordcount,
|
||||
'int': do_int,
|
||||
'float': do_float,
|
||||
'string': soft_unicode,
|
||||
'list': do_list,
|
||||
'urlize': do_urlize,
|
||||
'format': do_format,
|
||||
'trim': do_trim,
|
||||
'striptags': do_striptags,
|
||||
'select': do_select,
|
||||
'selectattr': do_selectattr,
|
||||
'slice': do_slice,
|
||||
'batch': do_batch,
|
||||
'sum': do_sum,
|
||||
'abs': abs,
|
||||
'round': do_round,
|
||||
'groupby': do_groupby,
|
||||
'safe': do_mark_safe,
|
||||
'xmlattr': do_xmlattr,
|
||||
'urlencode': do_urlencode
|
||||
}
|
733
3rdparty/jinja2/lexer.py
vendored
Normal file
733
3rdparty/jinja2/lexer.py
vendored
Normal file
@ -0,0 +1,733 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.lexer
|
||||
~~~~~~~~~~~~
|
||||
|
||||
This module implements a Jinja / Python combination lexer. The
|
||||
`Lexer` class provided by this module is used to do some preprocessing
|
||||
for Jinja.
|
||||
|
||||
On the one hand it filters out invalid operators like the bitshift
|
||||
operators we don't allow in templates. On the other hand it separates
|
||||
template code and python code in expressions.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
|
||||
from operator import itemgetter
|
||||
from collections import deque
|
||||
from jinja2.exceptions import TemplateSyntaxError
|
||||
from jinja2.utils import LRUCache
|
||||
from jinja2._compat import next, iteritems, implements_iterator, text_type, \
|
||||
intern
|
||||
|
||||
|
||||
# cache for the lexers. Exists in order to be able to have multiple
|
||||
# environments with the same lexer
|
||||
_lexer_cache = LRUCache(50)
|
||||
|
||||
# static regular expressions
|
||||
whitespace_re = re.compile(r'\s+', re.U)
|
||||
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
|
||||
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
|
||||
integer_re = re.compile(r'\d+')
|
||||
|
||||
# we use the unicode identifier rule if this python version is able
|
||||
# to handle unicode identifiers, otherwise the standard ASCII one.
|
||||
try:
|
||||
compile('föö', '<unknown>', 'eval')
|
||||
except SyntaxError:
|
||||
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
|
||||
else:
|
||||
from jinja2 import _stringdefs
|
||||
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
|
||||
_stringdefs.xid_continue))
|
||||
|
||||
float_re = re.compile(r'(?<!\.)\d+\.\d+')
|
||||
newline_re = re.compile(r'(\r\n|\r|\n)')
|
||||
|
||||
# internal the tokens and keep references to them
|
||||
TOKEN_ADD = intern('add')
|
||||
TOKEN_ASSIGN = intern('assign')
|
||||
TOKEN_COLON = intern('colon')
|
||||
TOKEN_COMMA = intern('comma')
|
||||
TOKEN_DIV = intern('div')
|
||||
TOKEN_DOT = intern('dot')
|
||||
TOKEN_EQ = intern('eq')
|
||||
TOKEN_FLOORDIV = intern('floordiv')
|
||||
TOKEN_GT = intern('gt')
|
||||
TOKEN_GTEQ = intern('gteq')
|
||||
TOKEN_LBRACE = intern('lbrace')
|
||||
TOKEN_LBRACKET = intern('lbracket')
|
||||
TOKEN_LPAREN = intern('lparen')
|
||||
TOKEN_LT = intern('lt')
|
||||
TOKEN_LTEQ = intern('lteq')
|
||||
TOKEN_MOD = intern('mod')
|
||||
TOKEN_MUL = intern('mul')
|
||||
TOKEN_NE = intern('ne')
|
||||
TOKEN_PIPE = intern('pipe')
|
||||
TOKEN_POW = intern('pow')
|
||||
TOKEN_RBRACE = intern('rbrace')
|
||||
TOKEN_RBRACKET = intern('rbracket')
|
||||
TOKEN_RPAREN = intern('rparen')
|
||||
TOKEN_SEMICOLON = intern('semicolon')
|
||||
TOKEN_SUB = intern('sub')
|
||||
TOKEN_TILDE = intern('tilde')
|
||||
TOKEN_WHITESPACE = intern('whitespace')
|
||||
TOKEN_FLOAT = intern('float')
|
||||
TOKEN_INTEGER = intern('integer')
|
||||
TOKEN_NAME = intern('name')
|
||||
TOKEN_STRING = intern('string')
|
||||
TOKEN_OPERATOR = intern('operator')
|
||||
TOKEN_BLOCK_BEGIN = intern('block_begin')
|
||||
TOKEN_BLOCK_END = intern('block_end')
|
||||
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
|
||||
TOKEN_VARIABLE_END = intern('variable_end')
|
||||
TOKEN_RAW_BEGIN = intern('raw_begin')
|
||||
TOKEN_RAW_END = intern('raw_end')
|
||||
TOKEN_COMMENT_BEGIN = intern('comment_begin')
|
||||
TOKEN_COMMENT_END = intern('comment_end')
|
||||
TOKEN_COMMENT = intern('comment')
|
||||
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
|
||||
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
|
||||
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
|
||||
TOKEN_LINECOMMENT_END = intern('linecomment_end')
|
||||
TOKEN_LINECOMMENT = intern('linecomment')
|
||||
TOKEN_DATA = intern('data')
|
||||
TOKEN_INITIAL = intern('initial')
|
||||
TOKEN_EOF = intern('eof')
|
||||
|
||||
# bind operators to token types
|
||||
operators = {
|
||||
'+': TOKEN_ADD,
|
||||
'-': TOKEN_SUB,
|
||||
'/': TOKEN_DIV,
|
||||
'//': TOKEN_FLOORDIV,
|
||||
'*': TOKEN_MUL,
|
||||
'%': TOKEN_MOD,
|
||||
'**': TOKEN_POW,
|
||||
'~': TOKEN_TILDE,
|
||||
'[': TOKEN_LBRACKET,
|
||||
']': TOKEN_RBRACKET,
|
||||
'(': TOKEN_LPAREN,
|
||||
')': TOKEN_RPAREN,
|
||||
'{': TOKEN_LBRACE,
|
||||
'}': TOKEN_RBRACE,
|
||||
'==': TOKEN_EQ,
|
||||
'!=': TOKEN_NE,
|
||||
'>': TOKEN_GT,
|
||||
'>=': TOKEN_GTEQ,
|
||||
'<': TOKEN_LT,
|
||||
'<=': TOKEN_LTEQ,
|
||||
'=': TOKEN_ASSIGN,
|
||||
'.': TOKEN_DOT,
|
||||
':': TOKEN_COLON,
|
||||
'|': TOKEN_PIPE,
|
||||
',': TOKEN_COMMA,
|
||||
';': TOKEN_SEMICOLON
|
||||
}
|
||||
|
||||
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
|
||||
assert len(operators) == len(reverse_operators), 'operators dropped'
|
||||
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
|
||||
sorted(operators, key=lambda x: -len(x))))
|
||||
|
||||
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
|
||||
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
|
||||
TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
|
||||
TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
|
||||
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
|
||||
TOKEN_COMMENT, TOKEN_LINECOMMENT])
|
||||
|
||||
|
||||
def _describe_token_type(token_type):
|
||||
if token_type in reverse_operators:
|
||||
return reverse_operators[token_type]
|
||||
return {
|
||||
TOKEN_COMMENT_BEGIN: 'begin of comment',
|
||||
TOKEN_COMMENT_END: 'end of comment',
|
||||
TOKEN_COMMENT: 'comment',
|
||||
TOKEN_LINECOMMENT: 'comment',
|
||||
TOKEN_BLOCK_BEGIN: 'begin of statement block',
|
||||
TOKEN_BLOCK_END: 'end of statement block',
|
||||
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
|
||||
TOKEN_VARIABLE_END: 'end of print statement',
|
||||
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
|
||||
TOKEN_LINESTATEMENT_END: 'end of line statement',
|
||||
TOKEN_DATA: 'template data / text',
|
||||
TOKEN_EOF: 'end of template'
|
||||
}.get(token_type, token_type)
|
||||
|
||||
|
||||
def describe_token(token):
|
||||
"""Returns a description of the token."""
|
||||
if token.type == 'name':
|
||||
return token.value
|
||||
return _describe_token_type(token.type)
|
||||
|
||||
|
||||
def describe_token_expr(expr):
|
||||
"""Like `describe_token` but for token expressions."""
|
||||
if ':' in expr:
|
||||
type, value = expr.split(':', 1)
|
||||
if type == 'name':
|
||||
return value
|
||||
else:
|
||||
type = expr
|
||||
return _describe_token_type(type)
|
||||
|
||||
|
||||
def count_newlines(value):
|
||||
"""Count the number of newline characters in the string. This is
|
||||
useful for extensions that filter a stream.
|
||||
"""
|
||||
return len(newline_re.findall(value))
|
||||
|
||||
|
||||
def compile_rules(environment):
|
||||
"""Compiles all the rules from the environment into a list of rules."""
|
||||
e = re.escape
|
||||
rules = [
|
||||
(len(environment.comment_start_string), 'comment',
|
||||
e(environment.comment_start_string)),
|
||||
(len(environment.block_start_string), 'block',
|
||||
e(environment.block_start_string)),
|
||||
(len(environment.variable_start_string), 'variable',
|
||||
e(environment.variable_start_string))
|
||||
]
|
||||
|
||||
if environment.line_statement_prefix is not None:
|
||||
rules.append((len(environment.line_statement_prefix), 'linestatement',
|
||||
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
|
||||
if environment.line_comment_prefix is not None:
|
||||
rules.append((len(environment.line_comment_prefix), 'linecomment',
|
||||
r'(?:^|(?<=\S))[^\S\r\n]*' +
|
||||
e(environment.line_comment_prefix)))
|
||||
|
||||
return [x[1:] for x in sorted(rules, reverse=True)]
|
||||
|
||||
|
||||
class Failure(object):
|
||||
"""Class that raises a `TemplateSyntaxError` if called.
|
||||
Used by the `Lexer` to specify known errors.
|
||||
"""
|
||||
|
||||
def __init__(self, message, cls=TemplateSyntaxError):
|
||||
self.message = message
|
||||
self.error_class = cls
|
||||
|
||||
def __call__(self, lineno, filename):
|
||||
raise self.error_class(self.message, lineno, filename)
|
||||
|
||||
|
||||
class Token(tuple):
|
||||
"""Token class."""
|
||||
__slots__ = ()
|
||||
lineno, type, value = (property(itemgetter(x)) for x in range(3))
|
||||
|
||||
def __new__(cls, lineno, type, value):
|
||||
return tuple.__new__(cls, (lineno, intern(str(type)), value))
|
||||
|
||||
def __str__(self):
|
||||
if self.type in reverse_operators:
|
||||
return reverse_operators[self.type]
|
||||
elif self.type == 'name':
|
||||
return self.value
|
||||
return self.type
|
||||
|
||||
def test(self, expr):
|
||||
"""Test a token against a token expression. This can either be a
|
||||
token type or ``'token_type:token_value'``. This can only test
|
||||
against string values and types.
|
||||
"""
|
||||
# here we do a regular string equality check as test_any is usually
|
||||
# passed an iterable of not interned strings.
|
||||
if self.type == expr:
|
||||
return True
|
||||
elif ':' in expr:
|
||||
return expr.split(':', 1) == [self.type, self.value]
|
||||
return False
|
||||
|
||||
def test_any(self, *iterable):
|
||||
"""Test against multiple token expressions."""
|
||||
for expr in iterable:
|
||||
if self.test(expr):
|
||||
return True
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return 'Token(%r, %r, %r)' % (
|
||||
self.lineno,
|
||||
self.type,
|
||||
self.value
|
||||
)
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class TokenStreamIterator(object):
|
||||
"""The iterator for tokenstreams. Iterate over the stream
|
||||
until the eof token is reached.
|
||||
"""
|
||||
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
token = self.stream.current
|
||||
if token.type is TOKEN_EOF:
|
||||
self.stream.close()
|
||||
raise StopIteration()
|
||||
next(self.stream)
|
||||
return token
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class TokenStream(object):
|
||||
"""A token stream is an iterable that yields :class:`Token`\s. The
|
||||
parser however does not iterate over it but calls :meth:`next` to go
|
||||
one token ahead. The current active token is stored as :attr:`current`.
|
||||
"""
|
||||
|
||||
def __init__(self, generator, name, filename):
|
||||
self._iter = iter(generator)
|
||||
self._pushed = deque()
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
self.closed = False
|
||||
self.current = Token(1, TOKEN_INITIAL, '')
|
||||
next(self)
|
||||
|
||||
def __iter__(self):
|
||||
return TokenStreamIterator(self)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self._pushed) or self.current.type is not TOKEN_EOF
|
||||
__nonzero__ = __bool__ # py2
|
||||
|
||||
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
|
||||
|
||||
def push(self, token):
|
||||
"""Push a token back to the stream."""
|
||||
self._pushed.append(token)
|
||||
|
||||
def look(self):
|
||||
"""Look at the next token."""
|
||||
old_token = next(self)
|
||||
result = self.current
|
||||
self.push(result)
|
||||
self.current = old_token
|
||||
return result
|
||||
|
||||
def skip(self, n=1):
|
||||
"""Got n tokens ahead."""
|
||||
for x in range(n):
|
||||
next(self)
|
||||
|
||||
def next_if(self, expr):
|
||||
"""Perform the token test and return the token if it matched.
|
||||
Otherwise the return value is `None`.
|
||||
"""
|
||||
if self.current.test(expr):
|
||||
return next(self)
|
||||
|
||||
def skip_if(self, expr):
|
||||
"""Like :meth:`next_if` but only returns `True` or `False`."""
|
||||
return self.next_if(expr) is not None
|
||||
|
||||
def __next__(self):
|
||||
"""Go one token ahead and return the old one"""
|
||||
rv = self.current
|
||||
if self._pushed:
|
||||
self.current = self._pushed.popleft()
|
||||
elif self.current.type is not TOKEN_EOF:
|
||||
try:
|
||||
self.current = next(self._iter)
|
||||
except StopIteration:
|
||||
self.close()
|
||||
return rv
|
||||
|
||||
def close(self):
|
||||
"""Close the stream."""
|
||||
self.current = Token(self.current.lineno, TOKEN_EOF, '')
|
||||
self._iter = None
|
||||
self.closed = True
|
||||
|
||||
def expect(self, expr):
|
||||
"""Expect a given token type and return it. This accepts the same
|
||||
argument as :meth:`jinja2.lexer.Token.test`.
|
||||
"""
|
||||
if not self.current.test(expr):
|
||||
expr = describe_token_expr(expr)
|
||||
if self.current.type is TOKEN_EOF:
|
||||
raise TemplateSyntaxError('unexpected end of template, '
|
||||
'expected %r.' % expr,
|
||||
self.current.lineno,
|
||||
self.name, self.filename)
|
||||
raise TemplateSyntaxError("expected token %r, got %r" %
|
||||
(expr, describe_token(self.current)),
|
||||
self.current.lineno,
|
||||
self.name, self.filename)
|
||||
try:
|
||||
return self.current
|
||||
finally:
|
||||
next(self)
|
||||
|
||||
|
||||
def get_lexer(environment):
|
||||
"""Return a lexer which is probably cached."""
|
||||
key = (environment.block_start_string,
|
||||
environment.block_end_string,
|
||||
environment.variable_start_string,
|
||||
environment.variable_end_string,
|
||||
environment.comment_start_string,
|
||||
environment.comment_end_string,
|
||||
environment.line_statement_prefix,
|
||||
environment.line_comment_prefix,
|
||||
environment.trim_blocks,
|
||||
environment.lstrip_blocks,
|
||||
environment.newline_sequence,
|
||||
environment.keep_trailing_newline)
|
||||
lexer = _lexer_cache.get(key)
|
||||
if lexer is None:
|
||||
lexer = Lexer(environment)
|
||||
_lexer_cache[key] = lexer
|
||||
return lexer
|
||||
|
||||
|
||||
class Lexer(object):
|
||||
"""Class that implements a lexer for a given environment. Automatically
|
||||
created by the environment class, usually you don't have to do that.
|
||||
|
||||
Note that the lexer is not automatically bound to an environment.
|
||||
Multiple environments can share the same lexer.
|
||||
"""
|
||||
|
||||
def __init__(self, environment):
|
||||
# shortcuts
|
||||
c = lambda x: re.compile(x, re.M | re.S)
|
||||
e = re.escape
|
||||
|
||||
# lexing rules for tags
|
||||
tag_rules = [
|
||||
(whitespace_re, TOKEN_WHITESPACE, None),
|
||||
(float_re, TOKEN_FLOAT, None),
|
||||
(integer_re, TOKEN_INTEGER, None),
|
||||
(name_re, TOKEN_NAME, None),
|
||||
(string_re, TOKEN_STRING, None),
|
||||
(operator_re, TOKEN_OPERATOR, None)
|
||||
]
|
||||
|
||||
# assemble the root lexing rule. because "|" is ungreedy
|
||||
# we have to sort by length so that the lexer continues working
|
||||
# as expected when we have parsing rules like <% for block and
|
||||
# <%= for variables. (if someone wants asp like syntax)
|
||||
# variables are just part of the rules if variable processing
|
||||
# is required.
|
||||
root_tag_rules = compile_rules(environment)
|
||||
|
||||
# block suffix if trimming is enabled
|
||||
block_suffix_re = environment.trim_blocks and '\\n?' or ''
|
||||
|
||||
# strip leading spaces if lstrip_blocks is enabled
|
||||
prefix_re = {}
|
||||
if environment.lstrip_blocks:
|
||||
# use '{%+' to manually disable lstrip_blocks behavior
|
||||
no_lstrip_re = e('+')
|
||||
# detect overlap between block and variable or comment strings
|
||||
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
|
||||
# make sure we don't mistake a block for a variable or a comment
|
||||
m = block_diff.match(environment.comment_start_string)
|
||||
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
|
||||
m = block_diff.match(environment.variable_start_string)
|
||||
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
|
||||
|
||||
# detect overlap between comment and variable strings
|
||||
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
|
||||
m = comment_diff.match(environment.variable_start_string)
|
||||
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
|
||||
|
||||
lstrip_re = r'^[ \t]*'
|
||||
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
|
||||
lstrip_re,
|
||||
e(environment.block_start_string),
|
||||
no_lstrip_re,
|
||||
e(environment.block_start_string),
|
||||
)
|
||||
comment_prefix_re = r'%s%s%s|%s\+?' % (
|
||||
lstrip_re,
|
||||
e(environment.comment_start_string),
|
||||
no_variable_re,
|
||||
e(environment.comment_start_string),
|
||||
)
|
||||
prefix_re['block'] = block_prefix_re
|
||||
prefix_re['comment'] = comment_prefix_re
|
||||
else:
|
||||
block_prefix_re = '%s' % e(environment.block_start_string)
|
||||
|
||||
self.newline_sequence = environment.newline_sequence
|
||||
self.keep_trailing_newline = environment.keep_trailing_newline
|
||||
|
||||
# global lexing rules
|
||||
self.rules = {
|
||||
'root': [
|
||||
# directives
|
||||
(c('(.*?)(?:%s)' % '|'.join(
|
||||
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
|
||||
e(environment.block_start_string),
|
||||
block_prefix_re,
|
||||
e(environment.block_end_string),
|
||||
e(environment.block_end_string)
|
||||
)] + [
|
||||
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
|
||||
for n, r in root_tag_rules
|
||||
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
|
||||
# data
|
||||
(c('.+'), TOKEN_DATA, None)
|
||||
],
|
||||
# comments
|
||||
TOKEN_COMMENT_BEGIN: [
|
||||
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
|
||||
e(environment.comment_end_string),
|
||||
e(environment.comment_end_string),
|
||||
block_suffix_re
|
||||
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
|
||||
(c('(.)'), (Failure('Missing end of comment tag'),), None)
|
||||
],
|
||||
# blocks
|
||||
TOKEN_BLOCK_BEGIN: [
|
||||
(c('(?:\-%s\s*|%s)%s' % (
|
||||
e(environment.block_end_string),
|
||||
e(environment.block_end_string),
|
||||
block_suffix_re
|
||||
)), TOKEN_BLOCK_END, '#pop'),
|
||||
] + tag_rules,
|
||||
# variables
|
||||
TOKEN_VARIABLE_BEGIN: [
|
||||
(c('\-%s\s*|%s' % (
|
||||
e(environment.variable_end_string),
|
||||
e(environment.variable_end_string)
|
||||
)), TOKEN_VARIABLE_END, '#pop')
|
||||
] + tag_rules,
|
||||
# raw block
|
||||
TOKEN_RAW_BEGIN: [
|
||||
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
|
||||
e(environment.block_start_string),
|
||||
block_prefix_re,
|
||||
e(environment.block_end_string),
|
||||
e(environment.block_end_string),
|
||||
block_suffix_re
|
||||
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
|
||||
(c('(.)'), (Failure('Missing end of raw directive'),), None)
|
||||
],
|
||||
# line statements
|
||||
TOKEN_LINESTATEMENT_BEGIN: [
|
||||
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
|
||||
] + tag_rules,
|
||||
# line comments
|
||||
TOKEN_LINECOMMENT_BEGIN: [
|
||||
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
|
||||
TOKEN_LINECOMMENT_END), '#pop')
|
||||
]
|
||||
}
|
||||
|
||||
def _normalize_newlines(self, value):
|
||||
"""Called for strings and template data to normalize it to unicode."""
|
||||
return newline_re.sub(self.newline_sequence, value)
|
||||
|
||||
def tokenize(self, source, name=None, filename=None, state=None):
|
||||
"""Calls tokeniter + tokenize and wraps it in a token stream.
|
||||
"""
|
||||
stream = self.tokeniter(source, name, filename, state)
|
||||
return TokenStream(self.wrap(stream, name, filename), name, filename)
|
||||
|
||||
def wrap(self, stream, name=None, filename=None):
|
||||
"""This is called with the stream as returned by `tokenize` and wraps
|
||||
every token in a :class:`Token` and converts the value.
|
||||
"""
|
||||
for lineno, token, value in stream:
|
||||
if token in ignored_tokens:
|
||||
continue
|
||||
elif token == 'linestatement_begin':
|
||||
token = 'block_begin'
|
||||
elif token == 'linestatement_end':
|
||||
token = 'block_end'
|
||||
# we are not interested in those tokens in the parser
|
||||
elif token in ('raw_begin', 'raw_end'):
|
||||
continue
|
||||
elif token == 'data':
|
||||
value = self._normalize_newlines(value)
|
||||
elif token == 'keyword':
|
||||
token = value
|
||||
elif token == 'name':
|
||||
value = str(value)
|
||||
elif token == 'string':
|
||||
# try to unescape string
|
||||
try:
|
||||
value = self._normalize_newlines(value[1:-1]) \
|
||||
.encode('ascii', 'backslashreplace') \
|
||||
.decode('unicode-escape')
|
||||
except Exception as e:
|
||||
msg = str(e).split(':')[-1].strip()
|
||||
raise TemplateSyntaxError(msg, lineno, name, filename)
|
||||
# if we can express it as bytestring (ascii only)
|
||||
# we do that for support of semi broken APIs
|
||||
# as datetime.datetime.strftime. On python 3 this
|
||||
# call becomes a noop thanks to 2to3
|
||||
try:
|
||||
value = str(value)
|
||||
except UnicodeError:
|
||||
pass
|
||||
elif token == 'integer':
|
||||
value = int(value)
|
||||
elif token == 'float':
|
||||
value = float(value)
|
||||
elif token == 'operator':
|
||||
token = operators[value]
|
||||
yield Token(lineno, token, value)
|
||||
|
||||
def tokeniter(self, source, name, filename=None, state=None):
|
||||
"""This method tokenizes the text and returns the tokens in a
|
||||
generator. Use this method if you just want to tokenize a template.
|
||||
"""
|
||||
source = text_type(source)
|
||||
lines = source.splitlines()
|
||||
if self.keep_trailing_newline and source:
|
||||
for newline in ('\r\n', '\r', '\n'):
|
||||
if source.endswith(newline):
|
||||
lines.append('')
|
||||
break
|
||||
source = '\n'.join(lines)
|
||||
pos = 0
|
||||
lineno = 1
|
||||
stack = ['root']
|
||||
if state is not None and state != 'root':
|
||||
assert state in ('variable', 'block'), 'invalid state'
|
||||
stack.append(state + '_begin')
|
||||
else:
|
||||
state = 'root'
|
||||
statetokens = self.rules[stack[-1]]
|
||||
source_length = len(source)
|
||||
|
||||
balancing_stack = []
|
||||
|
||||
while 1:
|
||||
# tokenizer loop
|
||||
for regex, tokens, new_state in statetokens:
|
||||
m = regex.match(source, pos)
|
||||
# if no match we try again with the next rule
|
||||
if m is None:
|
||||
continue
|
||||
|
||||
# we only match blocks and variables if braces / parentheses
|
||||
# are balanced. continue parsing with the lower rule which
|
||||
# is the operator rule. do this only if the end tags look
|
||||
# like operators
|
||||
if balancing_stack and \
|
||||
tokens in ('variable_end', 'block_end',
|
||||
'linestatement_end'):
|
||||
continue
|
||||
|
||||
# tuples support more options
|
||||
if isinstance(tokens, tuple):
|
||||
for idx, token in enumerate(tokens):
|
||||
# failure group
|
||||
if token.__class__ is Failure:
|
||||
raise token(lineno, filename)
|
||||
# bygroup is a bit more complex, in that case we
|
||||
# yield for the current token the first named
|
||||
# group that matched
|
||||
elif token == '#bygroup':
|
||||
for key, value in iteritems(m.groupdict()):
|
||||
if value is not None:
|
||||
yield lineno, key, value
|
||||
lineno += value.count('\n')
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('%r wanted to resolve '
|
||||
'the token dynamically'
|
||||
' but no group matched'
|
||||
% regex)
|
||||
# normal group
|
||||
else:
|
||||
data = m.group(idx + 1)
|
||||
if data or token not in ignore_if_empty:
|
||||
yield lineno, token, data
|
||||
lineno += data.count('\n')
|
||||
|
||||
# strings as token just are yielded as it.
|
||||
else:
|
||||
data = m.group()
|
||||
# update brace/parentheses balance
|
||||
if tokens == 'operator':
|
||||
if data == '{':
|
||||
balancing_stack.append('}')
|
||||
elif data == '(':
|
||||
balancing_stack.append(')')
|
||||
elif data == '[':
|
||||
balancing_stack.append(']')
|
||||
elif data in ('}', ')', ']'):
|
||||
if not balancing_stack:
|
||||
raise TemplateSyntaxError('unexpected \'%s\'' %
|
||||
data, lineno, name,
|
||||
filename)
|
||||
expected_op = balancing_stack.pop()
|
||||
if expected_op != data:
|
||||
raise TemplateSyntaxError('unexpected \'%s\', '
|
||||
'expected \'%s\'' %
|
||||
(data, expected_op),
|
||||
lineno, name,
|
||||
filename)
|
||||
# yield items
|
||||
if data or tokens not in ignore_if_empty:
|
||||
yield lineno, tokens, data
|
||||
lineno += data.count('\n')
|
||||
|
||||
# fetch new position into new variable so that we can check
|
||||
# if there is a internal parsing error which would result
|
||||
# in an infinite loop
|
||||
pos2 = m.end()
|
||||
|
||||
# handle state changes
|
||||
if new_state is not None:
|
||||
# remove the uppermost state
|
||||
if new_state == '#pop':
|
||||
stack.pop()
|
||||
# resolve the new state by group checking
|
||||
elif new_state == '#bygroup':
|
||||
for key, value in iteritems(m.groupdict()):
|
||||
if value is not None:
|
||||
stack.append(key)
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('%r wanted to resolve the '
|
||||
'new state dynamically but'
|
||||
' no group matched' %
|
||||
regex)
|
||||
# direct state name given
|
||||
else:
|
||||
stack.append(new_state)
|
||||
statetokens = self.rules[stack[-1]]
|
||||
# we are still at the same position and no stack change.
|
||||
# this means a loop without break condition, avoid that and
|
||||
# raise error
|
||||
elif pos2 == pos:
|
||||
raise RuntimeError('%r yielded empty string without '
|
||||
'stack change' % regex)
|
||||
# publish new function and start again
|
||||
pos = pos2
|
||||
break
|
||||
# if loop terminated without break we haven't found a single match
|
||||
# either we are at the end of the file or we have a problem
|
||||
else:
|
||||
# end of text
|
||||
if pos >= source_length:
|
||||
return
|
||||
# something went wrong
|
||||
raise TemplateSyntaxError('unexpected char %r at %d' %
|
||||
(source[pos], pos), lineno,
|
||||
name, filename)
|
471
3rdparty/jinja2/loaders.py
vendored
Normal file
471
3rdparty/jinja2/loaders.py
vendored
Normal file
@ -0,0 +1,471 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.loaders
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Jinja loader classes.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import weakref
|
||||
from types import ModuleType
|
||||
from os import path
|
||||
from hashlib import sha1
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
from jinja2.utils import open_if_exists, internalcode
|
||||
from jinja2._compat import string_types, iteritems
|
||||
|
||||
|
||||
def split_template_path(template):
|
||||
"""Split a path into segments and perform a sanity check. If it detects
|
||||
'..' in the path it will raise a `TemplateNotFound` error.
|
||||
"""
|
||||
pieces = []
|
||||
for piece in template.split('/'):
|
||||
if path.sep in piece \
|
||||
or (path.altsep and path.altsep in piece) or \
|
||||
piece == path.pardir:
|
||||
raise TemplateNotFound(template)
|
||||
elif piece and piece != '.':
|
||||
pieces.append(piece)
|
||||
return pieces
|
||||
|
||||
|
||||
class BaseLoader(object):
|
||||
"""Baseclass for all loaders. Subclass this and override `get_source` to
|
||||
implement a custom loading mechanism. The environment provides a
|
||||
`get_template` method that calls the loader's `load` method to get the
|
||||
:class:`Template` object.
|
||||
|
||||
A very basic example for a loader that looks up templates on the file
|
||||
system could look like this::
|
||||
|
||||
from jinja2 import BaseLoader, TemplateNotFound
|
||||
from os.path import join, exists, getmtime
|
||||
|
||||
class MyLoader(BaseLoader):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def get_source(self, environment, template):
|
||||
path = join(self.path, template)
|
||||
if not exists(path):
|
||||
raise TemplateNotFound(template)
|
||||
mtime = getmtime(path)
|
||||
with file(path) as f:
|
||||
source = f.read().decode('utf-8')
|
||||
return source, path, lambda: mtime == getmtime(path)
|
||||
"""
|
||||
|
||||
#: if set to `False` it indicates that the loader cannot provide access
|
||||
#: to the source of templates.
|
||||
#:
|
||||
#: .. versionadded:: 2.4
|
||||
has_source_access = True
|
||||
|
||||
def get_source(self, environment, template):
|
||||
"""Get the template source, filename and reload helper for a template.
|
||||
It's passed the environment and template name and has to return a
|
||||
tuple in the form ``(source, filename, uptodate)`` or raise a
|
||||
`TemplateNotFound` error if it can't locate the template.
|
||||
|
||||
The source part of the returned tuple must be the source of the
|
||||
template as unicode string or a ASCII bytestring. The filename should
|
||||
be the name of the file on the filesystem if it was loaded from there,
|
||||
otherwise `None`. The filename is used by python for the tracebacks
|
||||
if no loader extension is used.
|
||||
|
||||
The last item in the tuple is the `uptodate` function. If auto
|
||||
reloading is enabled it's always called to check if the template
|
||||
changed. No arguments are passed so the function must store the
|
||||
old state somewhere (for example in a closure). If it returns `False`
|
||||
the template will be reloaded.
|
||||
"""
|
||||
if not self.has_source_access:
|
||||
raise RuntimeError('%s cannot provide access to the source' %
|
||||
self.__class__.__name__)
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
def list_templates(self):
|
||||
"""Iterates over all templates. If the loader does not support that
|
||||
it should raise a :exc:`TypeError` which is the default behavior.
|
||||
"""
|
||||
raise TypeError('this loader cannot iterate over all templates')
|
||||
|
||||
@internalcode
|
||||
def load(self, environment, name, globals=None):
|
||||
"""Loads a template. This method looks up the template in the cache
|
||||
or loads one by calling :meth:`get_source`. Subclasses should not
|
||||
override this method as loaders working on collections of other
|
||||
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
|
||||
will not call this method but `get_source` directly.
|
||||
"""
|
||||
code = None
|
||||
if globals is None:
|
||||
globals = {}
|
||||
|
||||
# first we try to get the source for this template together
|
||||
# with the filename and the uptodate function.
|
||||
source, filename, uptodate = self.get_source(environment, name)
|
||||
|
||||
# try to load the code from the bytecode cache if there is a
|
||||
# bytecode cache configured.
|
||||
bcc = environment.bytecode_cache
|
||||
if bcc is not None:
|
||||
bucket = bcc.get_bucket(environment, name, filename, source)
|
||||
code = bucket.code
|
||||
|
||||
# if we don't have code so far (not cached, no longer up to
|
||||
# date) etc. we compile the template
|
||||
if code is None:
|
||||
code = environment.compile(source, name, filename)
|
||||
|
||||
# if the bytecode cache is available and the bucket doesn't
|
||||
# have a code so far, we give the bucket the new code and put
|
||||
# it back to the bytecode cache.
|
||||
if bcc is not None and bucket.code is None:
|
||||
bucket.code = code
|
||||
bcc.set_bucket(bucket)
|
||||
|
||||
return environment.template_class.from_code(environment, code,
|
||||
globals, uptodate)
|
||||
|
||||
|
||||
class FileSystemLoader(BaseLoader):
|
||||
"""Loads templates from the file system. This loader can find templates
|
||||
in folders on the file system and is the preferred way to load them.
|
||||
|
||||
The loader takes the path to the templates as string, or if multiple
|
||||
locations are wanted a list of them which is then looked up in the
|
||||
given order:
|
||||
|
||||
>>> loader = FileSystemLoader('/path/to/templates')
|
||||
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
|
||||
|
||||
Per default the template encoding is ``'utf-8'`` which can be changed
|
||||
by setting the `encoding` parameter to something else.
|
||||
"""
|
||||
|
||||
def __init__(self, searchpath, encoding='utf-8'):
|
||||
if isinstance(searchpath, string_types):
|
||||
searchpath = [searchpath]
|
||||
self.searchpath = list(searchpath)
|
||||
self.encoding = encoding
|
||||
|
||||
def get_source(self, environment, template):
|
||||
pieces = split_template_path(template)
|
||||
for searchpath in self.searchpath:
|
||||
filename = path.join(searchpath, *pieces)
|
||||
f = open_if_exists(filename)
|
||||
if f is None:
|
||||
continue
|
||||
try:
|
||||
contents = f.read().decode(self.encoding)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
mtime = path.getmtime(filename)
|
||||
def uptodate():
|
||||
try:
|
||||
return path.getmtime(filename) == mtime
|
||||
except OSError:
|
||||
return False
|
||||
return contents, filename, uptodate
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
def list_templates(self):
|
||||
found = set()
|
||||
for searchpath in self.searchpath:
|
||||
for dirpath, dirnames, filenames in os.walk(searchpath):
|
||||
for filename in filenames:
|
||||
template = os.path.join(dirpath, filename) \
|
||||
[len(searchpath):].strip(os.path.sep) \
|
||||
.replace(os.path.sep, '/')
|
||||
if template[:2] == './':
|
||||
template = template[2:]
|
||||
if template not in found:
|
||||
found.add(template)
|
||||
return sorted(found)
|
||||
|
||||
|
||||
class PackageLoader(BaseLoader):
|
||||
"""Load templates from python eggs or packages. It is constructed with
|
||||
the name of the python package and the path to the templates in that
|
||||
package::
|
||||
|
||||
loader = PackageLoader('mypackage', 'views')
|
||||
|
||||
If the package path is not given, ``'templates'`` is assumed.
|
||||
|
||||
Per default the template encoding is ``'utf-8'`` which can be changed
|
||||
by setting the `encoding` parameter to something else. Due to the nature
|
||||
of eggs it's only possible to reload templates if the package was loaded
|
||||
from the file system and not a zip file.
|
||||
"""
|
||||
|
||||
def __init__(self, package_name, package_path='templates',
|
||||
encoding='utf-8'):
|
||||
from pkg_resources import DefaultProvider, ResourceManager, \
|
||||
get_provider
|
||||
provider = get_provider(package_name)
|
||||
self.encoding = encoding
|
||||
self.manager = ResourceManager()
|
||||
self.filesystem_bound = isinstance(provider, DefaultProvider)
|
||||
self.provider = provider
|
||||
self.package_path = package_path
|
||||
|
||||
def get_source(self, environment, template):
|
||||
pieces = split_template_path(template)
|
||||
p = '/'.join((self.package_path,) + tuple(pieces))
|
||||
if not self.provider.has_resource(p):
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
filename = uptodate = None
|
||||
if self.filesystem_bound:
|
||||
filename = self.provider.get_resource_filename(self.manager, p)
|
||||
mtime = path.getmtime(filename)
|
||||
def uptodate():
|
||||
try:
|
||||
return path.getmtime(filename) == mtime
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
source = self.provider.get_resource_string(self.manager, p)
|
||||
return source.decode(self.encoding), filename, uptodate
|
||||
|
||||
def list_templates(self):
|
||||
path = self.package_path
|
||||
if path[:2] == './':
|
||||
path = path[2:]
|
||||
elif path == '.':
|
||||
path = ''
|
||||
offset = len(path)
|
||||
results = []
|
||||
def _walk(path):
|
||||
for filename in self.provider.resource_listdir(path):
|
||||
fullname = path + '/' + filename
|
||||
if self.provider.resource_isdir(fullname):
|
||||
_walk(fullname)
|
||||
else:
|
||||
results.append(fullname[offset:].lstrip('/'))
|
||||
_walk(path)
|
||||
results.sort()
|
||||
return results
|
||||
|
||||
|
||||
class DictLoader(BaseLoader):
|
||||
"""Loads a template from a python dict. It's passed a dict of unicode
|
||||
strings bound to template names. This loader is useful for unittesting:
|
||||
|
||||
>>> loader = DictLoader({'index.html': 'source here'})
|
||||
|
||||
Because auto reloading is rarely useful this is disabled per default.
|
||||
"""
|
||||
|
||||
def __init__(self, mapping):
|
||||
self.mapping = mapping
|
||||
|
||||
def get_source(self, environment, template):
|
||||
if template in self.mapping:
|
||||
source = self.mapping[template]
|
||||
return source, None, lambda: source == self.mapping.get(template)
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
def list_templates(self):
|
||||
return sorted(self.mapping)
|
||||
|
||||
|
||||
class FunctionLoader(BaseLoader):
|
||||
"""A loader that is passed a function which does the loading. The
|
||||
function becomes the name of the template passed and has to return either
|
||||
an unicode string with the template source, a tuple in the form ``(source,
|
||||
filename, uptodatefunc)`` or `None` if the template does not exist.
|
||||
|
||||
>>> def load_template(name):
|
||||
... if name == 'index.html':
|
||||
... return '...'
|
||||
...
|
||||
>>> loader = FunctionLoader(load_template)
|
||||
|
||||
The `uptodatefunc` is a function that is called if autoreload is enabled
|
||||
and has to return `True` if the template is still up to date. For more
|
||||
details have a look at :meth:`BaseLoader.get_source` which has the same
|
||||
return value.
|
||||
"""
|
||||
|
||||
def __init__(self, load_func):
|
||||
self.load_func = load_func
|
||||
|
||||
def get_source(self, environment, template):
|
||||
rv = self.load_func(template)
|
||||
if rv is None:
|
||||
raise TemplateNotFound(template)
|
||||
elif isinstance(rv, string_types):
|
||||
return rv, None, None
|
||||
return rv
|
||||
|
||||
|
||||
class PrefixLoader(BaseLoader):
|
||||
"""A loader that is passed a dict of loaders where each loader is bound
|
||||
to a prefix. The prefix is delimited from the template by a slash per
|
||||
default, which can be changed by setting the `delimiter` argument to
|
||||
something else::
|
||||
|
||||
loader = PrefixLoader({
|
||||
'app1': PackageLoader('mypackage.app1'),
|
||||
'app2': PackageLoader('mypackage.app2')
|
||||
})
|
||||
|
||||
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
|
||||
by loading ``'app2/index.html'`` the file from the second.
|
||||
"""
|
||||
|
||||
def __init__(self, mapping, delimiter='/'):
|
||||
self.mapping = mapping
|
||||
self.delimiter = delimiter
|
||||
|
||||
def get_loader(self, template):
|
||||
try:
|
||||
prefix, name = template.split(self.delimiter, 1)
|
||||
loader = self.mapping[prefix]
|
||||
except (ValueError, KeyError):
|
||||
raise TemplateNotFound(template)
|
||||
return loader, name
|
||||
|
||||
def get_source(self, environment, template):
|
||||
loader, name = self.get_loader(template)
|
||||
try:
|
||||
return loader.get_source(environment, name)
|
||||
except TemplateNotFound:
|
||||
# re-raise the exception with the correct fileame here.
|
||||
# (the one that includes the prefix)
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
@internalcode
|
||||
def load(self, environment, name, globals=None):
|
||||
loader, local_name = self.get_loader(name)
|
||||
try:
|
||||
return loader.load(environment, local_name)
|
||||
except TemplateNotFound:
|
||||
# re-raise the exception with the correct fileame here.
|
||||
# (the one that includes the prefix)
|
||||
raise TemplateNotFound(name)
|
||||
|
||||
def list_templates(self):
|
||||
result = []
|
||||
for prefix, loader in iteritems(self.mapping):
|
||||
for template in loader.list_templates():
|
||||
result.append(prefix + self.delimiter + template)
|
||||
return result
|
||||
|
||||
|
||||
class ChoiceLoader(BaseLoader):
|
||||
"""This loader works like the `PrefixLoader` just that no prefix is
|
||||
specified. If a template could not be found by one loader the next one
|
||||
is tried.
|
||||
|
||||
>>> loader = ChoiceLoader([
|
||||
... FileSystemLoader('/path/to/user/templates'),
|
||||
... FileSystemLoader('/path/to/system/templates')
|
||||
... ])
|
||||
|
||||
This is useful if you want to allow users to override builtin templates
|
||||
from a different location.
|
||||
"""
|
||||
|
||||
def __init__(self, loaders):
|
||||
self.loaders = loaders
|
||||
|
||||
def get_source(self, environment, template):
|
||||
for loader in self.loaders:
|
||||
try:
|
||||
return loader.get_source(environment, template)
|
||||
except TemplateNotFound:
|
||||
pass
|
||||
raise TemplateNotFound(template)
|
||||
|
||||
@internalcode
|
||||
def load(self, environment, name, globals=None):
|
||||
for loader in self.loaders:
|
||||
try:
|
||||
return loader.load(environment, name, globals)
|
||||
except TemplateNotFound:
|
||||
pass
|
||||
raise TemplateNotFound(name)
|
||||
|
||||
def list_templates(self):
|
||||
found = set()
|
||||
for loader in self.loaders:
|
||||
found.update(loader.list_templates())
|
||||
return sorted(found)
|
||||
|
||||
|
||||
class _TemplateModule(ModuleType):
|
||||
"""Like a normal module but with support for weak references"""
|
||||
|
||||
|
||||
class ModuleLoader(BaseLoader):
|
||||
"""This loader loads templates from precompiled templates.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> loader = ChoiceLoader([
|
||||
... ModuleLoader('/path/to/compiled/templates'),
|
||||
... FileSystemLoader('/path/to/templates')
|
||||
... ])
|
||||
|
||||
Templates can be precompiled with :meth:`Environment.compile_templates`.
|
||||
"""
|
||||
|
||||
has_source_access = False
|
||||
|
||||
def __init__(self, path):
|
||||
package_name = '_jinja2_module_templates_%x' % id(self)
|
||||
|
||||
# create a fake module that looks for the templates in the
|
||||
# path given.
|
||||
mod = _TemplateModule(package_name)
|
||||
if isinstance(path, string_types):
|
||||
path = [path]
|
||||
else:
|
||||
path = list(path)
|
||||
mod.__path__ = path
|
||||
|
||||
sys.modules[package_name] = weakref.proxy(mod,
|
||||
lambda x: sys.modules.pop(package_name, None))
|
||||
|
||||
# the only strong reference, the sys.modules entry is weak
|
||||
# so that the garbage collector can remove it once the
|
||||
# loader that created it goes out of business.
|
||||
self.module = mod
|
||||
self.package_name = package_name
|
||||
|
||||
@staticmethod
|
||||
def get_template_key(name):
|
||||
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
|
||||
|
||||
@staticmethod
|
||||
def get_module_filename(name):
|
||||
return ModuleLoader.get_template_key(name) + '.py'
|
||||
|
||||
@internalcode
|
||||
def load(self, environment, name, globals=None):
|
||||
key = self.get_template_key(name)
|
||||
module = '%s.%s' % (self.package_name, key)
|
||||
mod = getattr(self.module, module, None)
|
||||
if mod is None:
|
||||
try:
|
||||
mod = __import__(module, None, None, ['root'])
|
||||
except ImportError:
|
||||
raise TemplateNotFound(name)
|
||||
|
||||
# remove the entry from sys.modules, we only want the attribute
|
||||
# on the module object we have stored on the loader.
|
||||
sys.modules.pop(module, None)
|
||||
|
||||
return environment.template_class.from_module_dict(
|
||||
environment, mod.__dict__, globals)
|
234
3rdparty/jinja2/markupsafe/__init__.py
vendored
Normal file
234
3rdparty/jinja2/markupsafe/__init__.py
vendored
Normal file
@ -0,0 +1,234 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe
|
||||
~~~~~~~~~~
|
||||
|
||||
Implements a Markup string.
|
||||
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from _compat import text_type, string_types, int_types, \
|
||||
unichr, PY2
|
||||
|
||||
|
||||
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
|
||||
|
||||
|
||||
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
|
||||
_entity_re = re.compile(r'&([^;]+);')
|
||||
|
||||
|
||||
class Markup(text_type):
|
||||
r"""Marks a string as being safe for inclusion in HTML/XML output without
|
||||
needing to be escaped. This implements the `__html__` interface a couple
|
||||
of frameworks and web applications use. :class:`Markup` is a direct
|
||||
subclass of `unicode` and provides all the methods of `unicode` just that
|
||||
it escapes arguments passed and always returns `Markup`.
|
||||
|
||||
The `escape` function returns markup objects so that double escaping can't
|
||||
happen.
|
||||
|
||||
The constructor of the :class:`Markup` class can be used for three
|
||||
different things: When passed an unicode object it's assumed to be safe,
|
||||
when passed an object with an HTML representation (has an `__html__`
|
||||
method) that representation is used, otherwise the object passed is
|
||||
converted into a unicode string and then assumed to be safe:
|
||||
|
||||
>>> Markup("Hello <em>World</em>!")
|
||||
Markup(u'Hello <em>World</em>!')
|
||||
>>> class Foo(object):
|
||||
... def __html__(self):
|
||||
... return '<a href="#">foo</a>'
|
||||
...
|
||||
>>> Markup(Foo())
|
||||
Markup(u'<a href="#">foo</a>')
|
||||
|
||||
If you want object passed being always treated as unsafe you can use the
|
||||
:meth:`escape` classmethod to create a :class:`Markup` object:
|
||||
|
||||
>>> Markup.escape("Hello <em>World</em>!")
|
||||
Markup(u'Hello <em>World</em>!')
|
||||
|
||||
Operations on a markup string are markup aware which means that all
|
||||
arguments are passed through the :func:`escape` function:
|
||||
|
||||
>>> em = Markup("<em>%s</em>")
|
||||
>>> em % "foo & bar"
|
||||
Markup(u'<em>foo & bar</em>')
|
||||
>>> strong = Markup("<strong>%(text)s</strong>")
|
||||
>>> strong % {'text': '<blink>hacker here</blink>'}
|
||||
Markup(u'<strong><blink>hacker here</blink></strong>')
|
||||
>>> Markup("<em>Hello</em> ") + "<foo>"
|
||||
Markup(u'<em>Hello</em> <foo>')
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, base=u'', encoding=None, errors='strict'):
|
||||
if hasattr(base, '__html__'):
|
||||
base = base.__html__()
|
||||
if encoding is None:
|
||||
return text_type.__new__(cls, base)
|
||||
return text_type.__new__(cls, base, encoding, errors)
|
||||
|
||||
def __html__(self):
|
||||
return self
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, string_types) or hasattr(other, '__html__'):
|
||||
return self.__class__(super(Markup, self).__add__(self.escape(other)))
|
||||
return NotImplemented
|
||||
|
||||
def __radd__(self, other):
|
||||
if hasattr(other, '__html__') or isinstance(other, string_types):
|
||||
return self.escape(other).__add__(self)
|
||||
return NotImplemented
|
||||
|
||||
def __mul__(self, num):
|
||||
if isinstance(num, int_types):
|
||||
return self.__class__(text_type.__mul__(self, num))
|
||||
return NotImplemented
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __mod__(self, arg):
|
||||
if isinstance(arg, tuple):
|
||||
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
|
||||
else:
|
||||
arg = _MarkupEscapeHelper(arg, self.escape)
|
||||
return self.__class__(text_type.__mod__(self, arg))
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (
|
||||
self.__class__.__name__,
|
||||
text_type.__repr__(self)
|
||||
)
|
||||
|
||||
def join(self, seq):
|
||||
return self.__class__(text_type.join(self, map(self.escape, seq)))
|
||||
join.__doc__ = text_type.join.__doc__
|
||||
|
||||
def split(self, *args, **kwargs):
|
||||
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
|
||||
split.__doc__ = text_type.split.__doc__
|
||||
|
||||
def rsplit(self, *args, **kwargs):
|
||||
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
|
||||
rsplit.__doc__ = text_type.rsplit.__doc__
|
||||
|
||||
def splitlines(self, *args, **kwargs):
|
||||
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
|
||||
splitlines.__doc__ = text_type.splitlines.__doc__
|
||||
|
||||
def unescape(self):
|
||||
r"""Unescape markup again into an text_type string. This also resolves
|
||||
known HTML4 and XHTML entities:
|
||||
|
||||
>>> Markup("Main » <em>About</em>").unescape()
|
||||
u'Main \xbb <em>About</em>'
|
||||
"""
|
||||
from _constants import HTML_ENTITIES
|
||||
def handle_match(m):
|
||||
name = m.group(1)
|
||||
if name in HTML_ENTITIES:
|
||||
return unichr(HTML_ENTITIES[name])
|
||||
try:
|
||||
if name[:2] in ('#x', '#X'):
|
||||
return unichr(int(name[2:], 16))
|
||||
elif name.startswith('#'):
|
||||
return unichr(int(name[1:]))
|
||||
except ValueError:
|
||||
pass
|
||||
return u''
|
||||
return _entity_re.sub(handle_match, text_type(self))
|
||||
|
||||
def striptags(self):
|
||||
r"""Unescape markup into an text_type string and strip all tags. This
|
||||
also resolves known HTML4 and XHTML entities. Whitespace is
|
||||
normalized to one:
|
||||
|
||||
>>> Markup("Main » <em>About</em>").striptags()
|
||||
u'Main \xbb About'
|
||||
"""
|
||||
stripped = u' '.join(_striptags_re.sub('', self).split())
|
||||
return Markup(stripped).unescape()
|
||||
|
||||
@classmethod
|
||||
def escape(cls, s):
|
||||
"""Escape the string. Works like :func:`escape` with the difference
|
||||
that for subclasses of :class:`Markup` this function would return the
|
||||
correct subclass.
|
||||
"""
|
||||
rv = escape(s)
|
||||
if rv.__class__ is not cls:
|
||||
return cls(rv)
|
||||
return rv
|
||||
|
||||
def make_wrapper(name):
|
||||
orig = getattr(text_type, name)
|
||||
def func(self, *args, **kwargs):
|
||||
args = _escape_argspec(list(args), enumerate(args), self.escape)
|
||||
#_escape_argspec(kwargs, kwargs.iteritems(), None)
|
||||
return self.__class__(orig(self, *args, **kwargs))
|
||||
func.__name__ = orig.__name__
|
||||
func.__doc__ = orig.__doc__
|
||||
return func
|
||||
|
||||
for method in '__getitem__', 'capitalize', \
|
||||
'title', 'lower', 'upper', 'replace', 'ljust', \
|
||||
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
|
||||
'translate', 'expandtabs', 'swapcase', 'zfill':
|
||||
locals()[method] = make_wrapper(method)
|
||||
|
||||
# new in python 2.5
|
||||
if hasattr(text_type, 'partition'):
|
||||
def partition(self, sep):
|
||||
return tuple(map(self.__class__,
|
||||
text_type.partition(self, self.escape(sep))))
|
||||
def rpartition(self, sep):
|
||||
return tuple(map(self.__class__,
|
||||
text_type.rpartition(self, self.escape(sep))))
|
||||
|
||||
# new in python 2.6
|
||||
if hasattr(text_type, 'format'):
|
||||
format = make_wrapper('format')
|
||||
|
||||
# not in python 3
|
||||
if hasattr(text_type, '__getslice__'):
|
||||
__getslice__ = make_wrapper('__getslice__')
|
||||
|
||||
del method, make_wrapper
|
||||
|
||||
|
||||
def _escape_argspec(obj, iterable, escape):
|
||||
"""Helper for various string-wrapped functions."""
|
||||
for key, value in iterable:
|
||||
if hasattr(value, '__html__') or isinstance(value, string_types):
|
||||
obj[key] = escape(value)
|
||||
return obj
|
||||
|
||||
|
||||
class _MarkupEscapeHelper(object):
|
||||
"""Helper for Markup.__mod__"""
|
||||
|
||||
def __init__(self, obj, escape):
|
||||
self.obj = obj
|
||||
self.escape = escape
|
||||
|
||||
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
|
||||
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
|
||||
__repr__ = lambda s: str(s.escape(repr(s.obj)))
|
||||
__int__ = lambda s: int(s.obj)
|
||||
__float__ = lambda s: float(s.obj)
|
||||
|
||||
|
||||
# we have to import it down here as the speedups and native
|
||||
# modules imports the markup type which is define above.
|
||||
try:
|
||||
from _speedups import escape, escape_silent, soft_unicode
|
||||
except ImportError:
|
||||
from _native import escape, escape_silent, soft_unicode
|
||||
|
||||
if not PY2:
|
||||
soft_str = soft_unicode
|
||||
__all__.append('soft_str')
|
24
3rdparty/jinja2/markupsafe/_compat.py
vendored
Normal file
24
3rdparty/jinja2/markupsafe/_compat.py
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe._compat
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Compatibility module for different Python versions.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
|
||||
if not PY2:
|
||||
text_type = str
|
||||
string_types = (str,)
|
||||
unichr = chr
|
||||
int_types = (int,)
|
||||
else:
|
||||
text_type = unicode
|
||||
string_types = (str, unicode)
|
||||
unichr = unichr
|
||||
int_types = (int, long)
|
267
3rdparty/jinja2/markupsafe/_constants.py
vendored
Normal file
267
3rdparty/jinja2/markupsafe/_constants.py
vendored
Normal file
@ -0,0 +1,267 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe._constants
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Highlevel implementation of the Markup string.
|
||||
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
HTML_ENTITIES = {
|
||||
'AElig': 198,
|
||||
'Aacute': 193,
|
||||
'Acirc': 194,
|
||||
'Agrave': 192,
|
||||
'Alpha': 913,
|
||||
'Aring': 197,
|
||||
'Atilde': 195,
|
||||
'Auml': 196,
|
||||
'Beta': 914,
|
||||
'Ccedil': 199,
|
||||
'Chi': 935,
|
||||
'Dagger': 8225,
|
||||
'Delta': 916,
|
||||
'ETH': 208,
|
||||
'Eacute': 201,
|
||||
'Ecirc': 202,
|
||||
'Egrave': 200,
|
||||
'Epsilon': 917,
|
||||
'Eta': 919,
|
||||
'Euml': 203,
|
||||
'Gamma': 915,
|
||||
'Iacute': 205,
|
||||
'Icirc': 206,
|
||||
'Igrave': 204,
|
||||
'Iota': 921,
|
||||
'Iuml': 207,
|
||||
'Kappa': 922,
|
||||
'Lambda': 923,
|
||||
'Mu': 924,
|
||||
'Ntilde': 209,
|
||||
'Nu': 925,
|
||||
'OElig': 338,
|
||||
'Oacute': 211,
|
||||
'Ocirc': 212,
|
||||
'Ograve': 210,
|
||||
'Omega': 937,
|
||||
'Omicron': 927,
|
||||
'Oslash': 216,
|
||||
'Otilde': 213,
|
||||
'Ouml': 214,
|
||||
'Phi': 934,
|
||||
'Pi': 928,
|
||||
'Prime': 8243,
|
||||
'Psi': 936,
|
||||
'Rho': 929,
|
||||
'Scaron': 352,
|
||||
'Sigma': 931,
|
||||
'THORN': 222,
|
||||
'Tau': 932,
|
||||
'Theta': 920,
|
||||
'Uacute': 218,
|
||||
'Ucirc': 219,
|
||||
'Ugrave': 217,
|
||||
'Upsilon': 933,
|
||||
'Uuml': 220,
|
||||
'Xi': 926,
|
||||
'Yacute': 221,
|
||||
'Yuml': 376,
|
||||
'Zeta': 918,
|
||||
'aacute': 225,
|
||||
'acirc': 226,
|
||||
'acute': 180,
|
||||
'aelig': 230,
|
||||
'agrave': 224,
|
||||
'alefsym': 8501,
|
||||
'alpha': 945,
|
||||
'amp': 38,
|
||||
'and': 8743,
|
||||
'ang': 8736,
|
||||
'apos': 39,
|
||||
'aring': 229,
|
||||
'asymp': 8776,
|
||||
'atilde': 227,
|
||||
'auml': 228,
|
||||
'bdquo': 8222,
|
||||
'beta': 946,
|
||||
'brvbar': 166,
|
||||
'bull': 8226,
|
||||
'cap': 8745,
|
||||
'ccedil': 231,
|
||||
'cedil': 184,
|
||||
'cent': 162,
|
||||
'chi': 967,
|
||||
'circ': 710,
|
||||
'clubs': 9827,
|
||||
'cong': 8773,
|
||||
'copy': 169,
|
||||
'crarr': 8629,
|
||||
'cup': 8746,
|
||||
'curren': 164,
|
||||
'dArr': 8659,
|
||||
'dagger': 8224,
|
||||
'darr': 8595,
|
||||
'deg': 176,
|
||||
'delta': 948,
|
||||
'diams': 9830,
|
||||
'divide': 247,
|
||||
'eacute': 233,
|
||||
'ecirc': 234,
|
||||
'egrave': 232,
|
||||
'empty': 8709,
|
||||
'emsp': 8195,
|
||||
'ensp': 8194,
|
||||
'epsilon': 949,
|
||||
'equiv': 8801,
|
||||
'eta': 951,
|
||||
'eth': 240,
|
||||
'euml': 235,
|
||||
'euro': 8364,
|
||||
'exist': 8707,
|
||||
'fnof': 402,
|
||||
'forall': 8704,
|
||||
'frac12': 189,
|
||||
'frac14': 188,
|
||||
'frac34': 190,
|
||||
'frasl': 8260,
|
||||
'gamma': 947,
|
||||
'ge': 8805,
|
||||
'gt': 62,
|
||||
'hArr': 8660,
|
||||
'harr': 8596,
|
||||
'hearts': 9829,
|
||||
'hellip': 8230,
|
||||
'iacute': 237,
|
||||
'icirc': 238,
|
||||
'iexcl': 161,
|
||||
'igrave': 236,
|
||||
'image': 8465,
|
||||
'infin': 8734,
|
||||
'int': 8747,
|
||||
'iota': 953,
|
||||
'iquest': 191,
|
||||
'isin': 8712,
|
||||
'iuml': 239,
|
||||
'kappa': 954,
|
||||
'lArr': 8656,
|
||||
'lambda': 955,
|
||||
'lang': 9001,
|
||||
'laquo': 171,
|
||||
'larr': 8592,
|
||||
'lceil': 8968,
|
||||
'ldquo': 8220,
|
||||
'le': 8804,
|
||||
'lfloor': 8970,
|
||||
'lowast': 8727,
|
||||
'loz': 9674,
|
||||
'lrm': 8206,
|
||||
'lsaquo': 8249,
|
||||
'lsquo': 8216,
|
||||
'lt': 60,
|
||||
'macr': 175,
|
||||
'mdash': 8212,
|
||||
'micro': 181,
|
||||
'middot': 183,
|
||||
'minus': 8722,
|
||||
'mu': 956,
|
||||
'nabla': 8711,
|
||||
'nbsp': 160,
|
||||
'ndash': 8211,
|
||||
'ne': 8800,
|
||||
'ni': 8715,
|
||||
'not': 172,
|
||||
'notin': 8713,
|
||||
'nsub': 8836,
|
||||
'ntilde': 241,
|
||||
'nu': 957,
|
||||
'oacute': 243,
|
||||
'ocirc': 244,
|
||||
'oelig': 339,
|
||||
'ograve': 242,
|
||||
'oline': 8254,
|
||||
'omega': 969,
|
||||
'omicron': 959,
|
||||
'oplus': 8853,
|
||||
'or': 8744,
|
||||
'ordf': 170,
|
||||
'ordm': 186,
|
||||
'oslash': 248,
|
||||
'otilde': 245,
|
||||
'otimes': 8855,
|
||||
'ouml': 246,
|
||||
'para': 182,
|
||||
'part': 8706,
|
||||
'permil': 8240,
|
||||
'perp': 8869,
|
||||
'phi': 966,
|
||||
'pi': 960,
|
||||
'piv': 982,
|
||||
'plusmn': 177,
|
||||
'pound': 163,
|
||||
'prime': 8242,
|
||||
'prod': 8719,
|
||||
'prop': 8733,
|
||||
'psi': 968,
|
||||
'quot': 34,
|
||||
'rArr': 8658,
|
||||
'radic': 8730,
|
||||
'rang': 9002,
|
||||
'raquo': 187,
|
||||
'rarr': 8594,
|
||||
'rceil': 8969,
|
||||
'rdquo': 8221,
|
||||
'real': 8476,
|
||||
'reg': 174,
|
||||
'rfloor': 8971,
|
||||
'rho': 961,
|
||||
'rlm': 8207,
|
||||
'rsaquo': 8250,
|
||||
'rsquo': 8217,
|
||||
'sbquo': 8218,
|
||||
'scaron': 353,
|
||||
'sdot': 8901,
|
||||
'sect': 167,
|
||||
'shy': 173,
|
||||
'sigma': 963,
|
||||
'sigmaf': 962,
|
||||
'sim': 8764,
|
||||
'spades': 9824,
|
||||
'sub': 8834,
|
||||
'sube': 8838,
|
||||
'sum': 8721,
|
||||
'sup': 8835,
|
||||
'sup1': 185,
|
||||
'sup2': 178,
|
||||
'sup3': 179,
|
||||
'supe': 8839,
|
||||
'szlig': 223,
|
||||
'tau': 964,
|
||||
'there4': 8756,
|
||||
'theta': 952,
|
||||
'thetasym': 977,
|
||||
'thinsp': 8201,
|
||||
'thorn': 254,
|
||||
'tilde': 732,
|
||||
'times': 215,
|
||||
'trade': 8482,
|
||||
'uArr': 8657,
|
||||
'uacute': 250,
|
||||
'uarr': 8593,
|
||||
'ucirc': 251,
|
||||
'ugrave': 249,
|
||||
'uml': 168,
|
||||
'upsih': 978,
|
||||
'upsilon': 965,
|
||||
'uuml': 252,
|
||||
'weierp': 8472,
|
||||
'xi': 958,
|
||||
'yacute': 253,
|
||||
'yen': 165,
|
||||
'yuml': 255,
|
||||
'zeta': 950,
|
||||
'zwj': 8205,
|
||||
'zwnj': 8204
|
||||
}
|
45
3rdparty/jinja2/markupsafe/_native.py
vendored
Normal file
45
3rdparty/jinja2/markupsafe/_native.py
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
markupsafe._native
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Native Python implementation the C module is not compiled.
|
||||
|
||||
:copyright: (c) 2010 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from _compat import text_type
|
||||
|
||||
|
||||
def escape(s):
|
||||
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
|
||||
sequences. Use this if you need to display text that might contain
|
||||
such characters in HTML. Marks return value as markup string.
|
||||
"""
|
||||
if hasattr(s, '__html__'):
|
||||
return s.__html__()
|
||||
return Markup(text_type(s)
|
||||
.replace('&', '&')
|
||||
.replace('>', '>')
|
||||
.replace('<', '<')
|
||||
.replace("'", ''')
|
||||
.replace('"', '"')
|
||||
)
|
||||
|
||||
|
||||
def escape_silent(s):
|
||||
"""Like :func:`escape` but converts `None` into an empty
|
||||
markup string.
|
||||
"""
|
||||
if s is None:
|
||||
return Markup()
|
||||
return escape(s)
|
||||
|
||||
|
||||
def soft_unicode(s):
|
||||
"""Make a string unicode if it isn't already. That way a markup
|
||||
string is not converted back to unicode.
|
||||
"""
|
||||
if not isinstance(s, text_type):
|
||||
s = text_type(s)
|
||||
return s
|
103
3rdparty/jinja2/meta.py
vendored
Normal file
103
3rdparty/jinja2/meta.py
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.meta
|
||||
~~~~~~~~~~~
|
||||
|
||||
This module implements various functions that exposes information about
|
||||
templates that might be interesting for various kinds of applications.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2 import nodes
|
||||
from jinja2.compiler import CodeGenerator
|
||||
from jinja2._compat import string_types
|
||||
|
||||
|
||||
class TrackingCodeGenerator(CodeGenerator):
|
||||
"""We abuse the code generator for introspection."""
|
||||
|
||||
def __init__(self, environment):
|
||||
CodeGenerator.__init__(self, environment, '<introspection>',
|
||||
'<introspection>')
|
||||
self.undeclared_identifiers = set()
|
||||
|
||||
def write(self, x):
|
||||
"""Don't write."""
|
||||
|
||||
def pull_locals(self, frame):
|
||||
"""Remember all undeclared identifiers."""
|
||||
self.undeclared_identifiers.update(frame.identifiers.undeclared)
|
||||
|
||||
|
||||
def find_undeclared_variables(ast):
|
||||
"""Returns a set of all variables in the AST that will be looked up from
|
||||
the context at runtime. Because at compile time it's not known which
|
||||
variables will be used depending on the path the execution takes at
|
||||
runtime, all variables are returned.
|
||||
|
||||
>>> from jinja2 import Environment, meta
|
||||
>>> env = Environment()
|
||||
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
|
||||
>>> meta.find_undeclared_variables(ast)
|
||||
set(['bar'])
|
||||
|
||||
.. admonition:: Implementation
|
||||
|
||||
Internally the code generator is used for finding undeclared variables.
|
||||
This is good to know because the code generator might raise a
|
||||
:exc:`TemplateAssertionError` during compilation and as a matter of
|
||||
fact this function can currently raise that exception as well.
|
||||
"""
|
||||
codegen = TrackingCodeGenerator(ast.environment)
|
||||
codegen.visit(ast)
|
||||
return codegen.undeclared_identifiers
|
||||
|
||||
|
||||
def find_referenced_templates(ast):
|
||||
"""Finds all the referenced templates from the AST. This will return an
|
||||
iterator over all the hardcoded template extensions, inclusions and
|
||||
imports. If dynamic inheritance or inclusion is used, `None` will be
|
||||
yielded.
|
||||
|
||||
>>> from jinja2 import Environment, meta
|
||||
>>> env = Environment()
|
||||
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
|
||||
>>> list(meta.find_referenced_templates(ast))
|
||||
['layout.html', None]
|
||||
|
||||
This function is useful for dependency tracking. For example if you want
|
||||
to rebuild parts of the website after a layout template has changed.
|
||||
"""
|
||||
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
|
||||
nodes.Include)):
|
||||
if not isinstance(node.template, nodes.Const):
|
||||
# a tuple with some non consts in there
|
||||
if isinstance(node.template, (nodes.Tuple, nodes.List)):
|
||||
for template_name in node.template.items:
|
||||
# something const, only yield the strings and ignore
|
||||
# non-string consts that really just make no sense
|
||||
if isinstance(template_name, nodes.Const):
|
||||
if isinstance(template_name.value, string_types):
|
||||
yield template_name.value
|
||||
# something dynamic in there
|
||||
else:
|
||||
yield None
|
||||
# something dynamic we don't know about here
|
||||
else:
|
||||
yield None
|
||||
continue
|
||||
# constant is a basestring, direct template name
|
||||
if isinstance(node.template.value, string_types):
|
||||
yield node.template.value
|
||||
# a tuple or list (latter *should* not happen) made of consts,
|
||||
# yield the consts that are strings. We could warn here for
|
||||
# non string values
|
||||
elif isinstance(node, nodes.Include) and \
|
||||
isinstance(node.template.value, (tuple, list)):
|
||||
for template_name in node.template.value:
|
||||
if isinstance(template_name, string_types):
|
||||
yield template_name
|
||||
# something else we don't care about, we could warn here
|
||||
else:
|
||||
yield None
|
914
3rdparty/jinja2/nodes.py
vendored
Normal file
914
3rdparty/jinja2/nodes.py
vendored
Normal file
@ -0,0 +1,914 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.nodes
|
||||
~~~~~~~~~~~~
|
||||
|
||||
This module implements additional nodes derived from the ast base node.
|
||||
|
||||
It also provides some node tree helper functions like `in_lineno` and
|
||||
`get_nodes` used by the parser and translator in order to normalize
|
||||
python and jinja nodes.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import operator
|
||||
|
||||
from collections import deque
|
||||
from jinja2.utils import Markup
|
||||
from jinja2._compat import next, izip, with_metaclass, text_type, \
|
||||
method_type, function_type
|
||||
|
||||
|
||||
#: the types we support for context functions
|
||||
_context_function_types = (function_type, method_type)
|
||||
|
||||
|
||||
_binop_to_func = {
|
||||
'*': operator.mul,
|
||||
'/': operator.truediv,
|
||||
'//': operator.floordiv,
|
||||
'**': operator.pow,
|
||||
'%': operator.mod,
|
||||
'+': operator.add,
|
||||
'-': operator.sub
|
||||
}
|
||||
|
||||
_uaop_to_func = {
|
||||
'not': operator.not_,
|
||||
'+': operator.pos,
|
||||
'-': operator.neg
|
||||
}
|
||||
|
||||
_cmpop_to_func = {
|
||||
'eq': operator.eq,
|
||||
'ne': operator.ne,
|
||||
'gt': operator.gt,
|
||||
'gteq': operator.ge,
|
||||
'lt': operator.lt,
|
||||
'lteq': operator.le,
|
||||
'in': lambda a, b: a in b,
|
||||
'notin': lambda a, b: a not in b
|
||||
}
|
||||
|
||||
|
||||
class Impossible(Exception):
|
||||
"""Raised if the node could not perform a requested action."""
|
||||
|
||||
|
||||
class NodeType(type):
|
||||
"""A metaclass for nodes that handles the field and attribute
|
||||
inheritance. fields and attributes from the parent class are
|
||||
automatically forwarded to the child."""
|
||||
|
||||
def __new__(cls, name, bases, d):
|
||||
for attr in 'fields', 'attributes':
|
||||
storage = []
|
||||
storage.extend(getattr(bases[0], attr, ()))
|
||||
storage.extend(d.get(attr, ()))
|
||||
assert len(bases) == 1, 'multiple inheritance not allowed'
|
||||
assert len(storage) == len(set(storage)), 'layout conflict'
|
||||
d[attr] = tuple(storage)
|
||||
d.setdefault('abstract', False)
|
||||
return type.__new__(cls, name, bases, d)
|
||||
|
||||
|
||||
class EvalContext(object):
|
||||
"""Holds evaluation time information. Custom attributes can be attached
|
||||
to it in extensions.
|
||||
"""
|
||||
|
||||
def __init__(self, environment, template_name=None):
|
||||
self.environment = environment
|
||||
if callable(environment.autoescape):
|
||||
self.autoescape = environment.autoescape(template_name)
|
||||
else:
|
||||
self.autoescape = environment.autoescape
|
||||
self.volatile = False
|
||||
|
||||
def save(self):
|
||||
return self.__dict__.copy()
|
||||
|
||||
def revert(self, old):
|
||||
self.__dict__.clear()
|
||||
self.__dict__.update(old)
|
||||
|
||||
|
||||
def get_eval_context(node, ctx):
|
||||
if ctx is None:
|
||||
if node.environment is None:
|
||||
raise RuntimeError('if no eval context is passed, the '
|
||||
'node must have an attached '
|
||||
'environment.')
|
||||
return EvalContext(node.environment)
|
||||
return ctx
|
||||
|
||||
|
||||
class Node(with_metaclass(NodeType, object)):
|
||||
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
|
||||
of different types. There are four major types:
|
||||
|
||||
- :class:`Stmt`: statements
|
||||
- :class:`Expr`: expressions
|
||||
- :class:`Helper`: helper nodes
|
||||
- :class:`Template`: the outermost wrapper node
|
||||
|
||||
All nodes have fields and attributes. Fields may be other nodes, lists,
|
||||
or arbitrary values. Fields are passed to the constructor as regular
|
||||
positional arguments, attributes as keyword arguments. Each node has
|
||||
two attributes: `lineno` (the line number of the node) and `environment`.
|
||||
The `environment` attribute is set at the end of the parsing process for
|
||||
all nodes automatically.
|
||||
"""
|
||||
fields = ()
|
||||
attributes = ('lineno', 'environment')
|
||||
abstract = True
|
||||
|
||||
def __init__(self, *fields, **attributes):
|
||||
if self.abstract:
|
||||
raise TypeError('abstract nodes are not instanciable')
|
||||
if fields:
|
||||
if len(fields) != len(self.fields):
|
||||
if not self.fields:
|
||||
raise TypeError('%r takes 0 arguments' %
|
||||
self.__class__.__name__)
|
||||
raise TypeError('%r takes 0 or %d argument%s' % (
|
||||
self.__class__.__name__,
|
||||
len(self.fields),
|
||||
len(self.fields) != 1 and 's' or ''
|
||||
))
|
||||
for name, arg in izip(self.fields, fields):
|
||||
setattr(self, name, arg)
|
||||
for attr in self.attributes:
|
||||
setattr(self, attr, attributes.pop(attr, None))
|
||||
if attributes:
|
||||
raise TypeError('unknown attribute %r' %
|
||||
next(iter(attributes)))
|
||||
|
||||
def iter_fields(self, exclude=None, only=None):
|
||||
"""This method iterates over all fields that are defined and yields
|
||||
``(key, value)`` tuples. Per default all fields are returned, but
|
||||
it's possible to limit that to some fields by providing the `only`
|
||||
parameter or to exclude some using the `exclude` parameter. Both
|
||||
should be sets or tuples of field names.
|
||||
"""
|
||||
for name in self.fields:
|
||||
if (exclude is only is None) or \
|
||||
(exclude is not None and name not in exclude) or \
|
||||
(only is not None and name in only):
|
||||
try:
|
||||
yield name, getattr(self, name)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def iter_child_nodes(self, exclude=None, only=None):
|
||||
"""Iterates over all direct child nodes of the node. This iterates
|
||||
over all fields and yields the values of they are nodes. If the value
|
||||
of a field is a list all the nodes in that list are returned.
|
||||
"""
|
||||
for field, item in self.iter_fields(exclude, only):
|
||||
if isinstance(item, list):
|
||||
for n in item:
|
||||
if isinstance(n, Node):
|
||||
yield n
|
||||
elif isinstance(item, Node):
|
||||
yield item
|
||||
|
||||
def find(self, node_type):
|
||||
"""Find the first node of a given type. If no such node exists the
|
||||
return value is `None`.
|
||||
"""
|
||||
for result in self.find_all(node_type):
|
||||
return result
|
||||
|
||||
def find_all(self, node_type):
|
||||
"""Find all the nodes of a given type. If the type is a tuple,
|
||||
the check is performed for any of the tuple items.
|
||||
"""
|
||||
for child in self.iter_child_nodes():
|
||||
if isinstance(child, node_type):
|
||||
yield child
|
||||
for result in child.find_all(node_type):
|
||||
yield result
|
||||
|
||||
def set_ctx(self, ctx):
|
||||
"""Reset the context of a node and all child nodes. Per default the
|
||||
parser will all generate nodes that have a 'load' context as it's the
|
||||
most common one. This method is used in the parser to set assignment
|
||||
targets and other nodes to a store context.
|
||||
"""
|
||||
todo = deque([self])
|
||||
while todo:
|
||||
node = todo.popleft()
|
||||
if 'ctx' in node.fields:
|
||||
node.ctx = ctx
|
||||
todo.extend(node.iter_child_nodes())
|
||||
return self
|
||||
|
||||
def set_lineno(self, lineno, override=False):
|
||||
"""Set the line numbers of the node and children."""
|
||||
todo = deque([self])
|
||||
while todo:
|
||||
node = todo.popleft()
|
||||
if 'lineno' in node.attributes:
|
||||
if node.lineno is None or override:
|
||||
node.lineno = lineno
|
||||
todo.extend(node.iter_child_nodes())
|
||||
return self
|
||||
|
||||
def set_environment(self, environment):
|
||||
"""Set the environment for all nodes."""
|
||||
todo = deque([self])
|
||||
while todo:
|
||||
node = todo.popleft()
|
||||
node.environment = environment
|
||||
todo.extend(node.iter_child_nodes())
|
||||
return self
|
||||
|
||||
def __eq__(self, other):
|
||||
return type(self) is type(other) and \
|
||||
tuple(self.iter_fields()) == tuple(other.iter_fields())
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
# Restore Python 2 hashing behavior on Python 3
|
||||
__hash__ = object.__hash__
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (
|
||||
self.__class__.__name__,
|
||||
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
|
||||
arg in self.fields)
|
||||
)
|
||||
|
||||
|
||||
class Stmt(Node):
|
||||
"""Base node for all statements."""
|
||||
abstract = True
|
||||
|
||||
|
||||
class Helper(Node):
|
||||
"""Nodes that exist in a specific context only."""
|
||||
abstract = True
|
||||
|
||||
|
||||
class Template(Node):
|
||||
"""Node that represents a template. This must be the outermost node that
|
||||
is passed to the compiler.
|
||||
"""
|
||||
fields = ('body',)
|
||||
|
||||
|
||||
class Output(Stmt):
|
||||
"""A node that holds multiple expressions which are then printed out.
|
||||
This is used both for the `print` statement and the regular template data.
|
||||
"""
|
||||
fields = ('nodes',)
|
||||
|
||||
|
||||
class Extends(Stmt):
|
||||
"""Represents an extends statement."""
|
||||
fields = ('template',)
|
||||
|
||||
|
||||
class For(Stmt):
|
||||
"""The for loop. `target` is the target for the iteration (usually a
|
||||
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
|
||||
of nodes that are used as loop-body, and `else_` a list of nodes for the
|
||||
`else` block. If no else node exists it has to be an empty list.
|
||||
|
||||
For filtered nodes an expression can be stored as `test`, otherwise `None`.
|
||||
"""
|
||||
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
|
||||
|
||||
|
||||
class If(Stmt):
|
||||
"""If `test` is true, `body` is rendered, else `else_`."""
|
||||
fields = ('test', 'body', 'else_')
|
||||
|
||||
|
||||
class Macro(Stmt):
|
||||
"""A macro definition. `name` is the name of the macro, `args` a list of
|
||||
arguments and `defaults` a list of defaults if there are any. `body` is
|
||||
a list of nodes for the macro body.
|
||||
"""
|
||||
fields = ('name', 'args', 'defaults', 'body')
|
||||
|
||||
|
||||
class CallBlock(Stmt):
|
||||
"""Like a macro without a name but a call instead. `call` is called with
|
||||
the unnamed macro as `caller` argument this node holds.
|
||||
"""
|
||||
fields = ('call', 'args', 'defaults', 'body')
|
||||
|
||||
|
||||
class FilterBlock(Stmt):
|
||||
"""Node for filter sections."""
|
||||
fields = ('body', 'filter')
|
||||
|
||||
|
||||
class Block(Stmt):
|
||||
"""A node that represents a block."""
|
||||
fields = ('name', 'body', 'scoped')
|
||||
|
||||
|
||||
class Include(Stmt):
|
||||
"""A node that represents the include tag."""
|
||||
fields = ('template', 'with_context', 'ignore_missing')
|
||||
|
||||
|
||||
class Import(Stmt):
|
||||
"""A node that represents the import tag."""
|
||||
fields = ('template', 'target', 'with_context')
|
||||
|
||||
|
||||
class FromImport(Stmt):
|
||||
"""A node that represents the from import tag. It's important to not
|
||||
pass unsafe names to the name attribute. The compiler translates the
|
||||
attribute lookups directly into getattr calls and does *not* use the
|
||||
subscript callback of the interface. As exported variables may not
|
||||
start with double underscores (which the parser asserts) this is not a
|
||||
problem for regular Jinja code, but if this node is used in an extension
|
||||
extra care must be taken.
|
||||
|
||||
The list of names may contain tuples if aliases are wanted.
|
||||
"""
|
||||
fields = ('template', 'names', 'with_context')
|
||||
|
||||
|
||||
class ExprStmt(Stmt):
|
||||
"""A statement that evaluates an expression and discards the result."""
|
||||
fields = ('node',)
|
||||
|
||||
|
||||
class Assign(Stmt):
|
||||
"""Assigns an expression to a target."""
|
||||
fields = ('target', 'node')
|
||||
|
||||
|
||||
class Expr(Node):
|
||||
"""Baseclass for all expressions."""
|
||||
abstract = True
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
"""Return the value of the expression as constant or raise
|
||||
:exc:`Impossible` if this was not possible.
|
||||
|
||||
An :class:`EvalContext` can be provided, if none is given
|
||||
a default context is created which requires the nodes to have
|
||||
an attached environment.
|
||||
|
||||
.. versionchanged:: 2.4
|
||||
the `eval_ctx` parameter was added.
|
||||
"""
|
||||
raise Impossible()
|
||||
|
||||
def can_assign(self):
|
||||
"""Check if it's possible to assign something to this node."""
|
||||
return False
|
||||
|
||||
|
||||
class BinExpr(Expr):
|
||||
"""Baseclass for all binary expressions."""
|
||||
fields = ('left', 'right')
|
||||
operator = None
|
||||
abstract = True
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
# intercepted operators cannot be folded at compile time
|
||||
if self.environment.sandboxed and \
|
||||
self.operator in self.environment.intercepted_binops:
|
||||
raise Impossible()
|
||||
f = _binop_to_func[self.operator]
|
||||
try:
|
||||
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
|
||||
class UnaryExpr(Expr):
|
||||
"""Baseclass for all unary expressions."""
|
||||
fields = ('node',)
|
||||
operator = None
|
||||
abstract = True
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
# intercepted operators cannot be folded at compile time
|
||||
if self.environment.sandboxed and \
|
||||
self.operator in self.environment.intercepted_unops:
|
||||
raise Impossible()
|
||||
f = _uaop_to_func[self.operator]
|
||||
try:
|
||||
return f(self.node.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
|
||||
class Name(Expr):
|
||||
"""Looks up a name or stores a value in a name.
|
||||
The `ctx` of the node can be one of the following values:
|
||||
|
||||
- `store`: store a value in the name
|
||||
- `load`: load that name
|
||||
- `param`: like `store` but if the name was defined as function parameter.
|
||||
"""
|
||||
fields = ('name', 'ctx')
|
||||
|
||||
def can_assign(self):
|
||||
return self.name not in ('true', 'false', 'none',
|
||||
'True', 'False', 'None')
|
||||
|
||||
|
||||
class Literal(Expr):
|
||||
"""Baseclass for literals."""
|
||||
abstract = True
|
||||
|
||||
|
||||
class Const(Literal):
|
||||
"""All constant values. The parser will return this node for simple
|
||||
constants such as ``42`` or ``"foo"`` but it can be used to store more
|
||||
complex values such as lists too. Only constants with a safe
|
||||
representation (objects where ``eval(repr(x)) == x`` is true).
|
||||
"""
|
||||
fields = ('value',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_untrusted(cls, value, lineno=None, environment=None):
|
||||
"""Return a const object if the value is representable as
|
||||
constant value in the generated code, otherwise it will raise
|
||||
an `Impossible` exception.
|
||||
"""
|
||||
from .compiler import has_safe_repr
|
||||
if not has_safe_repr(value):
|
||||
raise Impossible()
|
||||
return cls(value, lineno=lineno, environment=environment)
|
||||
|
||||
|
||||
class TemplateData(Literal):
|
||||
"""A constant template string."""
|
||||
fields = ('data',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if eval_ctx.volatile:
|
||||
raise Impossible()
|
||||
if eval_ctx.autoescape:
|
||||
return Markup(self.data)
|
||||
return self.data
|
||||
|
||||
|
||||
class Tuple(Literal):
|
||||
"""For loop unpacking and some other things like multiple arguments
|
||||
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
|
||||
is used for loading the names or storing.
|
||||
"""
|
||||
fields = ('items', 'ctx')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return tuple(x.as_const(eval_ctx) for x in self.items)
|
||||
|
||||
def can_assign(self):
|
||||
for item in self.items:
|
||||
if not item.can_assign():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class List(Literal):
|
||||
"""Any list literal such as ``[1, 2, 3]``"""
|
||||
fields = ('items',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return [x.as_const(eval_ctx) for x in self.items]
|
||||
|
||||
|
||||
class Dict(Literal):
|
||||
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
|
||||
:class:`Pair` nodes.
|
||||
"""
|
||||
fields = ('items',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return dict(x.as_const(eval_ctx) for x in self.items)
|
||||
|
||||
|
||||
class Pair(Helper):
|
||||
"""A key, value pair for dicts."""
|
||||
fields = ('key', 'value')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
|
||||
|
||||
|
||||
class Keyword(Helper):
|
||||
"""A key, value pair for keyword arguments where key is a string."""
|
||||
fields = ('key', 'value')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return self.key, self.value.as_const(eval_ctx)
|
||||
|
||||
|
||||
class CondExpr(Expr):
|
||||
"""A conditional expression (inline if expression). (``{{
|
||||
foo if bar else baz }}``)
|
||||
"""
|
||||
fields = ('test', 'expr1', 'expr2')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if self.test.as_const(eval_ctx):
|
||||
return self.expr1.as_const(eval_ctx)
|
||||
|
||||
# if we evaluate to an undefined object, we better do that at runtime
|
||||
if self.expr2 is None:
|
||||
raise Impossible()
|
||||
|
||||
return self.expr2.as_const(eval_ctx)
|
||||
|
||||
|
||||
class Filter(Expr):
|
||||
"""This node applies a filter on an expression. `name` is the name of
|
||||
the filter, the rest of the fields are the same as for :class:`Call`.
|
||||
|
||||
If the `node` of a filter is `None` the contents of the last buffer are
|
||||
filtered. Buffers are created by macros and filter blocks.
|
||||
"""
|
||||
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if eval_ctx.volatile or self.node is None:
|
||||
raise Impossible()
|
||||
# we have to be careful here because we call filter_ below.
|
||||
# if this variable would be called filter, 2to3 would wrap the
|
||||
# call in a list beause it is assuming we are talking about the
|
||||
# builtin filter function here which no longer returns a list in
|
||||
# python 3. because of that, do not rename filter_ to filter!
|
||||
filter_ = self.environment.filters.get(self.name)
|
||||
if filter_ is None or getattr(filter_, 'contextfilter', False):
|
||||
raise Impossible()
|
||||
obj = self.node.as_const(eval_ctx)
|
||||
args = [x.as_const(eval_ctx) for x in self.args]
|
||||
if getattr(filter_, 'evalcontextfilter', False):
|
||||
args.insert(0, eval_ctx)
|
||||
elif getattr(filter_, 'environmentfilter', False):
|
||||
args.insert(0, self.environment)
|
||||
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
|
||||
if self.dyn_args is not None:
|
||||
try:
|
||||
args.extend(self.dyn_args.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
if self.dyn_kwargs is not None:
|
||||
try:
|
||||
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
try:
|
||||
return filter_(obj, *args, **kwargs)
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
|
||||
class Test(Expr):
|
||||
"""Applies a test on an expression. `name` is the name of the test, the
|
||||
rest of the fields are the same as for :class:`Call`.
|
||||
"""
|
||||
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
|
||||
|
||||
|
||||
class Call(Expr):
|
||||
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
|
||||
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
|
||||
and `dyn_kwargs` has to be either `None` or a node that is used as
|
||||
node for dynamic positional (``*args``) or keyword (``**kwargs``)
|
||||
arguments.
|
||||
"""
|
||||
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if eval_ctx.volatile:
|
||||
raise Impossible()
|
||||
obj = self.node.as_const(eval_ctx)
|
||||
|
||||
# don't evaluate context functions
|
||||
args = [x.as_const(eval_ctx) for x in self.args]
|
||||
if isinstance(obj, _context_function_types):
|
||||
if getattr(obj, 'contextfunction', False):
|
||||
raise Impossible()
|
||||
elif getattr(obj, 'evalcontextfunction', False):
|
||||
args.insert(0, eval_ctx)
|
||||
elif getattr(obj, 'environmentfunction', False):
|
||||
args.insert(0, self.environment)
|
||||
|
||||
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
|
||||
if self.dyn_args is not None:
|
||||
try:
|
||||
args.extend(self.dyn_args.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
if self.dyn_kwargs is not None:
|
||||
try:
|
||||
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
try:
|
||||
return obj(*args, **kwargs)
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
|
||||
class Getitem(Expr):
|
||||
"""Get an attribute or item from an expression and prefer the item."""
|
||||
fields = ('node', 'arg', 'ctx')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if self.ctx != 'load':
|
||||
raise Impossible()
|
||||
try:
|
||||
return self.environment.getitem(self.node.as_const(eval_ctx),
|
||||
self.arg.as_const(eval_ctx))
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
def can_assign(self):
|
||||
return False
|
||||
|
||||
|
||||
class Getattr(Expr):
|
||||
"""Get an attribute or item from an expression that is a ascii-only
|
||||
bytestring and prefer the attribute.
|
||||
"""
|
||||
fields = ('node', 'attr', 'ctx')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
if self.ctx != 'load':
|
||||
raise Impossible()
|
||||
try:
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return self.environment.getattr(self.node.as_const(eval_ctx),
|
||||
self.attr)
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
|
||||
def can_assign(self):
|
||||
return False
|
||||
|
||||
|
||||
class Slice(Expr):
|
||||
"""Represents a slice object. This must only be used as argument for
|
||||
:class:`Subscript`.
|
||||
"""
|
||||
fields = ('start', 'stop', 'step')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
def const(obj):
|
||||
if obj is None:
|
||||
return None
|
||||
return obj.as_const(eval_ctx)
|
||||
return slice(const(self.start), const(self.stop), const(self.step))
|
||||
|
||||
|
||||
class Concat(Expr):
|
||||
"""Concatenates the list of expressions provided after converting them to
|
||||
unicode.
|
||||
"""
|
||||
fields = ('nodes',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
|
||||
|
||||
|
||||
class Compare(Expr):
|
||||
"""Compares an expression with some other expressions. `ops` must be a
|
||||
list of :class:`Operand`\s.
|
||||
"""
|
||||
fields = ('expr', 'ops')
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
result = value = self.expr.as_const(eval_ctx)
|
||||
try:
|
||||
for op in self.ops:
|
||||
new_value = op.expr.as_const(eval_ctx)
|
||||
result = _cmpop_to_func[op.op](value, new_value)
|
||||
value = new_value
|
||||
except Exception:
|
||||
raise Impossible()
|
||||
return result
|
||||
|
||||
|
||||
class Operand(Helper):
|
||||
"""Holds an operator and an expression."""
|
||||
fields = ('op', 'expr')
|
||||
|
||||
if __debug__:
|
||||
Operand.__doc__ += '\nThe following operators are available: ' + \
|
||||
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
|
||||
set(_uaop_to_func) | set(_cmpop_to_func)))
|
||||
|
||||
|
||||
class Mul(BinExpr):
|
||||
"""Multiplies the left with the right node."""
|
||||
operator = '*'
|
||||
|
||||
|
||||
class Div(BinExpr):
|
||||
"""Divides the left by the right node."""
|
||||
operator = '/'
|
||||
|
||||
|
||||
class FloorDiv(BinExpr):
|
||||
"""Divides the left by the right node and truncates conver the
|
||||
result into an integer by truncating.
|
||||
"""
|
||||
operator = '//'
|
||||
|
||||
|
||||
class Add(BinExpr):
|
||||
"""Add the left to the right node."""
|
||||
operator = '+'
|
||||
|
||||
|
||||
class Sub(BinExpr):
|
||||
"""Substract the right from the left node."""
|
||||
operator = '-'
|
||||
|
||||
|
||||
class Mod(BinExpr):
|
||||
"""Left modulo right."""
|
||||
operator = '%'
|
||||
|
||||
|
||||
class Pow(BinExpr):
|
||||
"""Left to the power of right."""
|
||||
operator = '**'
|
||||
|
||||
|
||||
class And(BinExpr):
|
||||
"""Short circuited AND."""
|
||||
operator = 'and'
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
|
||||
|
||||
|
||||
class Or(BinExpr):
|
||||
"""Short circuited OR."""
|
||||
operator = 'or'
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
|
||||
|
||||
|
||||
class Not(UnaryExpr):
|
||||
"""Negate the expression."""
|
||||
operator = 'not'
|
||||
|
||||
|
||||
class Neg(UnaryExpr):
|
||||
"""Make the expression negative."""
|
||||
operator = '-'
|
||||
|
||||
|
||||
class Pos(UnaryExpr):
|
||||
"""Make the expression positive (noop for most expressions)"""
|
||||
operator = '+'
|
||||
|
||||
|
||||
# Helpers for extensions
|
||||
|
||||
|
||||
class EnvironmentAttribute(Expr):
|
||||
"""Loads an attribute from the environment object. This is useful for
|
||||
extensions that want to call a callback stored on the environment.
|
||||
"""
|
||||
fields = ('name',)
|
||||
|
||||
|
||||
class ExtensionAttribute(Expr):
|
||||
"""Returns the attribute of an extension bound to the environment.
|
||||
The identifier is the identifier of the :class:`Extension`.
|
||||
|
||||
This node is usually constructed by calling the
|
||||
:meth:`~jinja2.ext.Extension.attr` method on an extension.
|
||||
"""
|
||||
fields = ('identifier', 'name')
|
||||
|
||||
|
||||
class ImportedName(Expr):
|
||||
"""If created with an import name the import name is returned on node
|
||||
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
|
||||
function from the cgi module on evaluation. Imports are optimized by the
|
||||
compiler so there is no need to assign them to local variables.
|
||||
"""
|
||||
fields = ('importname',)
|
||||
|
||||
|
||||
class InternalName(Expr):
|
||||
"""An internal name in the compiler. You cannot create these nodes
|
||||
yourself but the parser provides a
|
||||
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
|
||||
a new identifier for you. This identifier is not available from the
|
||||
template and is not threated specially by the compiler.
|
||||
"""
|
||||
fields = ('name',)
|
||||
|
||||
def __init__(self):
|
||||
raise TypeError('Can\'t create internal names. Use the '
|
||||
'`free_identifier` method on a parser.')
|
||||
|
||||
|
||||
class MarkSafe(Expr):
|
||||
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
|
||||
fields = ('expr',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
return Markup(self.expr.as_const(eval_ctx))
|
||||
|
||||
|
||||
class MarkSafeIfAutoescape(Expr):
|
||||
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
|
||||
only if autoescaping is active.
|
||||
|
||||
.. versionadded:: 2.5
|
||||
"""
|
||||
fields = ('expr',)
|
||||
|
||||
def as_const(self, eval_ctx=None):
|
||||
eval_ctx = get_eval_context(self, eval_ctx)
|
||||
if eval_ctx.volatile:
|
||||
raise Impossible()
|
||||
expr = self.expr.as_const(eval_ctx)
|
||||
if eval_ctx.autoescape:
|
||||
return Markup(expr)
|
||||
return expr
|
||||
|
||||
|
||||
class ContextReference(Expr):
|
||||
"""Returns the current template context. It can be used like a
|
||||
:class:`Name` node, with a ``'load'`` ctx and will return the
|
||||
current :class:`~jinja2.runtime.Context` object.
|
||||
|
||||
Here an example that assigns the current template name to a
|
||||
variable named `foo`::
|
||||
|
||||
Assign(Name('foo', ctx='store'),
|
||||
Getattr(ContextReference(), 'name'))
|
||||
"""
|
||||
|
||||
|
||||
class Continue(Stmt):
|
||||
"""Continue a loop."""
|
||||
|
||||
|
||||
class Break(Stmt):
|
||||
"""Break a loop."""
|
||||
|
||||
|
||||
class Scope(Stmt):
|
||||
"""An artificial scope."""
|
||||
fields = ('body',)
|
||||
|
||||
|
||||
class EvalContextModifier(Stmt):
|
||||
"""Modifies the eval context. For each option that should be modified,
|
||||
a :class:`Keyword` has to be added to the :attr:`options` list.
|
||||
|
||||
Example to change the `autoescape` setting::
|
||||
|
||||
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
|
||||
"""
|
||||
fields = ('options',)
|
||||
|
||||
|
||||
class ScopedEvalContextModifier(EvalContextModifier):
|
||||
"""Modifies the eval context and reverts it later. Works exactly like
|
||||
:class:`EvalContextModifier` but will only modify the
|
||||
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
|
||||
"""
|
||||
fields = ('body',)
|
||||
|
||||
|
||||
# make sure nobody creates custom nodes
|
||||
def _failing_new(*args, **kwargs):
|
||||
raise TypeError('can\'t create custom node types')
|
||||
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
|
68
3rdparty/jinja2/optimizer.py
vendored
Normal file
68
3rdparty/jinja2/optimizer.py
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.optimizer
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The jinja optimizer is currently trying to constant fold a few expressions
|
||||
and modify the AST in place so that it should be easier to evaluate it.
|
||||
|
||||
Because the AST does not contain all the scoping information and the
|
||||
compiler has to find that out, we cannot do all the optimizations we
|
||||
want. For example loop unrolling doesn't work because unrolled loops would
|
||||
have a different scoping.
|
||||
|
||||
The solution would be a second syntax tree that has the scoping rules stored.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from jinja2 import nodes
|
||||
from jinja2.visitor import NodeTransformer
|
||||
|
||||
|
||||
def optimize(node, environment):
|
||||
"""The context hint can be used to perform an static optimization
|
||||
based on the context given."""
|
||||
optimizer = Optimizer(environment)
|
||||
return optimizer.visit(node)
|
||||
|
||||
|
||||
class Optimizer(NodeTransformer):
|
||||
|
||||
def __init__(self, environment):
|
||||
self.environment = environment
|
||||
|
||||
def visit_If(self, node):
|
||||
"""Eliminate dead code."""
|
||||
# do not optimize ifs that have a block inside so that it doesn't
|
||||
# break super().
|
||||
if node.find(nodes.Block) is not None:
|
||||
return self.generic_visit(node)
|
||||
try:
|
||||
val = self.visit(node.test).as_const()
|
||||
except nodes.Impossible:
|
||||
return self.generic_visit(node)
|
||||
if val:
|
||||
body = node.body
|
||||
else:
|
||||
body = node.else_
|
||||
result = []
|
||||
for node in body:
|
||||
result.extend(self.visit_list(node))
|
||||
return result
|
||||
|
||||
def fold(self, node):
|
||||
"""Do constant folding."""
|
||||
node = self.generic_visit(node)
|
||||
try:
|
||||
return nodes.Const.from_untrusted(node.as_const(),
|
||||
lineno=node.lineno,
|
||||
environment=self.environment)
|
||||
except nodes.Impossible:
|
||||
return node
|
||||
|
||||
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
|
||||
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
|
||||
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
|
||||
visit_Filter = visit_Test = visit_CondExpr = fold
|
||||
del fold
|
895
3rdparty/jinja2/parser.py
vendored
Normal file
895
3rdparty/jinja2/parser.py
vendored
Normal file
@ -0,0 +1,895 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.parser
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Implements the template parser.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from jinja2 import nodes
|
||||
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
|
||||
from jinja2.lexer import describe_token, describe_token_expr
|
||||
from jinja2._compat import next, imap
|
||||
|
||||
|
||||
#: statements that callinto
|
||||
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
|
||||
'macro', 'include', 'from', 'import',
|
||||
'set'])
|
||||
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
|
||||
|
||||
|
||||
class Parser(object):
|
||||
"""This is the central parsing class Jinja2 uses. It's passed to
|
||||
extensions and can be used to parse expressions or statements.
|
||||
"""
|
||||
|
||||
def __init__(self, environment, source, name=None, filename=None,
|
||||
state=None):
|
||||
self.environment = environment
|
||||
self.stream = environment._tokenize(source, name, filename, state)
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
self.closed = False
|
||||
self.extensions = {}
|
||||
for extension in environment.iter_extensions():
|
||||
for tag in extension.tags:
|
||||
self.extensions[tag] = extension.parse
|
||||
self._last_identifier = 0
|
||||
self._tag_stack = []
|
||||
self._end_token_stack = []
|
||||
|
||||
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
|
||||
"""Convenience method that raises `exc` with the message, passed
|
||||
line number or last line number as well as the current name and
|
||||
filename.
|
||||
"""
|
||||
if lineno is None:
|
||||
lineno = self.stream.current.lineno
|
||||
raise exc(msg, lineno, self.name, self.filename)
|
||||
|
||||
def _fail_ut_eof(self, name, end_token_stack, lineno):
|
||||
expected = []
|
||||
for exprs in end_token_stack:
|
||||
expected.extend(imap(describe_token_expr, exprs))
|
||||
if end_token_stack:
|
||||
currently_looking = ' or '.join(
|
||||
"'%s'" % describe_token_expr(expr)
|
||||
for expr in end_token_stack[-1])
|
||||
else:
|
||||
currently_looking = None
|
||||
|
||||
if name is None:
|
||||
message = ['Unexpected end of template.']
|
||||
else:
|
||||
message = ['Encountered unknown tag \'%s\'.' % name]
|
||||
|
||||
if currently_looking:
|
||||
if name is not None and name in expected:
|
||||
message.append('You probably made a nesting mistake. Jinja '
|
||||
'is expecting this tag, but currently looking '
|
||||
'for %s.' % currently_looking)
|
||||
else:
|
||||
message.append('Jinja was looking for the following tags: '
|
||||
'%s.' % currently_looking)
|
||||
|
||||
if self._tag_stack:
|
||||
message.append('The innermost block that needs to be '
|
||||
'closed is \'%s\'.' % self._tag_stack[-1])
|
||||
|
||||
self.fail(' '.join(message), lineno)
|
||||
|
||||
def fail_unknown_tag(self, name, lineno=None):
|
||||
"""Called if the parser encounters an unknown tag. Tries to fail
|
||||
with a human readable error message that could help to identify
|
||||
the problem.
|
||||
"""
|
||||
return self._fail_ut_eof(name, self._end_token_stack, lineno)
|
||||
|
||||
def fail_eof(self, end_tokens=None, lineno=None):
|
||||
"""Like fail_unknown_tag but for end of template situations."""
|
||||
stack = list(self._end_token_stack)
|
||||
if end_tokens is not None:
|
||||
stack.append(end_tokens)
|
||||
return self._fail_ut_eof(None, stack, lineno)
|
||||
|
||||
def is_tuple_end(self, extra_end_rules=None):
|
||||
"""Are we at the end of a tuple?"""
|
||||
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
|
||||
return True
|
||||
elif extra_end_rules is not None:
|
||||
return self.stream.current.test_any(extra_end_rules)
|
||||
return False
|
||||
|
||||
def free_identifier(self, lineno=None):
|
||||
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
|
||||
self._last_identifier += 1
|
||||
rv = object.__new__(nodes.InternalName)
|
||||
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
|
||||
return rv
|
||||
|
||||
def parse_statement(self):
|
||||
"""Parse a single statement."""
|
||||
token = self.stream.current
|
||||
if token.type != 'name':
|
||||
self.fail('tag name expected', token.lineno)
|
||||
self._tag_stack.append(token.value)
|
||||
pop_tag = True
|
||||
try:
|
||||
if token.value in _statement_keywords:
|
||||
return getattr(self, 'parse_' + self.stream.current.value)()
|
||||
if token.value == 'call':
|
||||
return self.parse_call_block()
|
||||
if token.value == 'filter':
|
||||
return self.parse_filter_block()
|
||||
ext = self.extensions.get(token.value)
|
||||
if ext is not None:
|
||||
return ext(self)
|
||||
|
||||
# did not work out, remove the token we pushed by accident
|
||||
# from the stack so that the unknown tag fail function can
|
||||
# produce a proper error message.
|
||||
self._tag_stack.pop()
|
||||
pop_tag = False
|
||||
self.fail_unknown_tag(token.value, token.lineno)
|
||||
finally:
|
||||
if pop_tag:
|
||||
self._tag_stack.pop()
|
||||
|
||||
def parse_statements(self, end_tokens, drop_needle=False):
|
||||
"""Parse multiple statements into a list until one of the end tokens
|
||||
is reached. This is used to parse the body of statements as it also
|
||||
parses template data if appropriate. The parser checks first if the
|
||||
current token is a colon and skips it if there is one. Then it checks
|
||||
for the block end and parses until if one of the `end_tokens` is
|
||||
reached. Per default the active token in the stream at the end of
|
||||
the call is the matched end token. If this is not wanted `drop_needle`
|
||||
can be set to `True` and the end token is removed.
|
||||
"""
|
||||
# the first token may be a colon for python compatibility
|
||||
self.stream.skip_if('colon')
|
||||
|
||||
# in the future it would be possible to add whole code sections
|
||||
# by adding some sort of end of statement token and parsing those here.
|
||||
self.stream.expect('block_end')
|
||||
result = self.subparse(end_tokens)
|
||||
|
||||
# we reached the end of the template too early, the subparser
|
||||
# does not check for this, so we do that now
|
||||
if self.stream.current.type == 'eof':
|
||||
self.fail_eof(end_tokens)
|
||||
|
||||
if drop_needle:
|
||||
next(self.stream)
|
||||
return result
|
||||
|
||||
def parse_set(self):
|
||||
"""Parse an assign statement."""
|
||||
lineno = next(self.stream).lineno
|
||||
target = self.parse_assign_target()
|
||||
self.stream.expect('assign')
|
||||
expr = self.parse_tuple()
|
||||
return nodes.Assign(target, expr, lineno=lineno)
|
||||
|
||||
def parse_for(self):
|
||||
"""Parse a for loop."""
|
||||
lineno = self.stream.expect('name:for').lineno
|
||||
target = self.parse_assign_target(extra_end_rules=('name:in',))
|
||||
self.stream.expect('name:in')
|
||||
iter = self.parse_tuple(with_condexpr=False,
|
||||
extra_end_rules=('name:recursive',))
|
||||
test = None
|
||||
if self.stream.skip_if('name:if'):
|
||||
test = self.parse_expression()
|
||||
recursive = self.stream.skip_if('name:recursive')
|
||||
body = self.parse_statements(('name:endfor', 'name:else'))
|
||||
if next(self.stream).value == 'endfor':
|
||||
else_ = []
|
||||
else:
|
||||
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
|
||||
return nodes.For(target, iter, body, else_, test,
|
||||
recursive, lineno=lineno)
|
||||
|
||||
def parse_if(self):
|
||||
"""Parse an if construct."""
|
||||
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
|
||||
while 1:
|
||||
node.test = self.parse_tuple(with_condexpr=False)
|
||||
node.body = self.parse_statements(('name:elif', 'name:else',
|
||||
'name:endif'))
|
||||
token = next(self.stream)
|
||||
if token.test('name:elif'):
|
||||
new_node = nodes.If(lineno=self.stream.current.lineno)
|
||||
node.else_ = [new_node]
|
||||
node = new_node
|
||||
continue
|
||||
elif token.test('name:else'):
|
||||
node.else_ = self.parse_statements(('name:endif',),
|
||||
drop_needle=True)
|
||||
else:
|
||||
node.else_ = []
|
||||
break
|
||||
return result
|
||||
|
||||
def parse_block(self):
|
||||
node = nodes.Block(lineno=next(self.stream).lineno)
|
||||
node.name = self.stream.expect('name').value
|
||||
node.scoped = self.stream.skip_if('name:scoped')
|
||||
|
||||
# common problem people encounter when switching from django
|
||||
# to jinja. we do not support hyphens in block names, so let's
|
||||
# raise a nicer error message in that case.
|
||||
if self.stream.current.type == 'sub':
|
||||
self.fail('Block names in Jinja have to be valid Python '
|
||||
'identifiers and may not contain hyphens, use an '
|
||||
'underscore instead.')
|
||||
|
||||
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
|
||||
self.stream.skip_if('name:' + node.name)
|
||||
return node
|
||||
|
||||
def parse_extends(self):
|
||||
node = nodes.Extends(lineno=next(self.stream).lineno)
|
||||
node.template = self.parse_expression()
|
||||
return node
|
||||
|
||||
def parse_import_context(self, node, default):
|
||||
if self.stream.current.test_any('name:with', 'name:without') and \
|
||||
self.stream.look().test('name:context'):
|
||||
node.with_context = next(self.stream).value == 'with'
|
||||
self.stream.skip()
|
||||
else:
|
||||
node.with_context = default
|
||||
return node
|
||||
|
||||
def parse_include(self):
|
||||
node = nodes.Include(lineno=next(self.stream).lineno)
|
||||
node.template = self.parse_expression()
|
||||
if self.stream.current.test('name:ignore') and \
|
||||
self.stream.look().test('name:missing'):
|
||||
node.ignore_missing = True
|
||||
self.stream.skip(2)
|
||||
else:
|
||||
node.ignore_missing = False
|
||||
return self.parse_import_context(node, True)
|
||||
|
||||
def parse_import(self):
|
||||
node = nodes.Import(lineno=next(self.stream).lineno)
|
||||
node.template = self.parse_expression()
|
||||
self.stream.expect('name:as')
|
||||
node.target = self.parse_assign_target(name_only=True).name
|
||||
return self.parse_import_context(node, False)
|
||||
|
||||
def parse_from(self):
|
||||
node = nodes.FromImport(lineno=next(self.stream).lineno)
|
||||
node.template = self.parse_expression()
|
||||
self.stream.expect('name:import')
|
||||
node.names = []
|
||||
|
||||
def parse_context():
|
||||
if self.stream.current.value in ('with', 'without') and \
|
||||
self.stream.look().test('name:context'):
|
||||
node.with_context = next(self.stream).value == 'with'
|
||||
self.stream.skip()
|
||||
return True
|
||||
return False
|
||||
|
||||
while 1:
|
||||
if node.names:
|
||||
self.stream.expect('comma')
|
||||
if self.stream.current.type == 'name':
|
||||
if parse_context():
|
||||
break
|
||||
target = self.parse_assign_target(name_only=True)
|
||||
if target.name.startswith('_'):
|
||||
self.fail('names starting with an underline can not '
|
||||
'be imported', target.lineno,
|
||||
exc=TemplateAssertionError)
|
||||
if self.stream.skip_if('name:as'):
|
||||
alias = self.parse_assign_target(name_only=True)
|
||||
node.names.append((target.name, alias.name))
|
||||
else:
|
||||
node.names.append(target.name)
|
||||
if parse_context() or self.stream.current.type != 'comma':
|
||||
break
|
||||
else:
|
||||
break
|
||||
if not hasattr(node, 'with_context'):
|
||||
node.with_context = False
|
||||
self.stream.skip_if('comma')
|
||||
return node
|
||||
|
||||
def parse_signature(self, node):
|
||||
node.args = args = []
|
||||
node.defaults = defaults = []
|
||||
self.stream.expect('lparen')
|
||||
while self.stream.current.type != 'rparen':
|
||||
if args:
|
||||
self.stream.expect('comma')
|
||||
arg = self.parse_assign_target(name_only=True)
|
||||
arg.set_ctx('param')
|
||||
if self.stream.skip_if('assign'):
|
||||
defaults.append(self.parse_expression())
|
||||
args.append(arg)
|
||||
self.stream.expect('rparen')
|
||||
|
||||
def parse_call_block(self):
|
||||
node = nodes.CallBlock(lineno=next(self.stream).lineno)
|
||||
if self.stream.current.type == 'lparen':
|
||||
self.parse_signature(node)
|
||||
else:
|
||||
node.args = []
|
||||
node.defaults = []
|
||||
|
||||
node.call = self.parse_expression()
|
||||
if not isinstance(node.call, nodes.Call):
|
||||
self.fail('expected call', node.lineno)
|
||||
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
|
||||
return node
|
||||
|
||||
def parse_filter_block(self):
|
||||
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
|
||||
node.filter = self.parse_filter(None, start_inline=True)
|
||||
node.body = self.parse_statements(('name:endfilter',),
|
||||
drop_needle=True)
|
||||
return node
|
||||
|
||||
def parse_macro(self):
|
||||
node = nodes.Macro(lineno=next(self.stream).lineno)
|
||||
node.name = self.parse_assign_target(name_only=True).name
|
||||
self.parse_signature(node)
|
||||
node.body = self.parse_statements(('name:endmacro',),
|
||||
drop_needle=True)
|
||||
return node
|
||||
|
||||
def parse_print(self):
|
||||
node = nodes.Output(lineno=next(self.stream).lineno)
|
||||
node.nodes = []
|
||||
while self.stream.current.type != 'block_end':
|
||||
if node.nodes:
|
||||
self.stream.expect('comma')
|
||||
node.nodes.append(self.parse_expression())
|
||||
return node
|
||||
|
||||
def parse_assign_target(self, with_tuple=True, name_only=False,
|
||||
extra_end_rules=None):
|
||||
"""Parse an assignment target. As Jinja2 allows assignments to
|
||||
tuples, this function can parse all allowed assignment targets. Per
|
||||
default assignments to tuples are parsed, that can be disable however
|
||||
by setting `with_tuple` to `False`. If only assignments to names are
|
||||
wanted `name_only` can be set to `True`. The `extra_end_rules`
|
||||
parameter is forwarded to the tuple parsing function.
|
||||
"""
|
||||
if name_only:
|
||||
token = self.stream.expect('name')
|
||||
target = nodes.Name(token.value, 'store', lineno=token.lineno)
|
||||
else:
|
||||
if with_tuple:
|
||||
target = self.parse_tuple(simplified=True,
|
||||
extra_end_rules=extra_end_rules)
|
||||
else:
|
||||
target = self.parse_primary()
|
||||
target.set_ctx('store')
|
||||
if not target.can_assign():
|
||||
self.fail('can\'t assign to %r' % target.__class__.
|
||||
__name__.lower(), target.lineno)
|
||||
return target
|
||||
|
||||
def parse_expression(self, with_condexpr=True):
|
||||
"""Parse an expression. Per default all expressions are parsed, if
|
||||
the optional `with_condexpr` parameter is set to `False` conditional
|
||||
expressions are not parsed.
|
||||
"""
|
||||
if with_condexpr:
|
||||
return self.parse_condexpr()
|
||||
return self.parse_or()
|
||||
|
||||
def parse_condexpr(self):
|
||||
lineno = self.stream.current.lineno
|
||||
expr1 = self.parse_or()
|
||||
while self.stream.skip_if('name:if'):
|
||||
expr2 = self.parse_or()
|
||||
if self.stream.skip_if('name:else'):
|
||||
expr3 = self.parse_condexpr()
|
||||
else:
|
||||
expr3 = None
|
||||
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return expr1
|
||||
|
||||
def parse_or(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_and()
|
||||
while self.stream.skip_if('name:or'):
|
||||
right = self.parse_and()
|
||||
left = nodes.Or(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_and(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_not()
|
||||
while self.stream.skip_if('name:and'):
|
||||
right = self.parse_not()
|
||||
left = nodes.And(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_not(self):
|
||||
if self.stream.current.test('name:not'):
|
||||
lineno = next(self.stream).lineno
|
||||
return nodes.Not(self.parse_not(), lineno=lineno)
|
||||
return self.parse_compare()
|
||||
|
||||
def parse_compare(self):
|
||||
lineno = self.stream.current.lineno
|
||||
expr = self.parse_add()
|
||||
ops = []
|
||||
while 1:
|
||||
token_type = self.stream.current.type
|
||||
if token_type in _compare_operators:
|
||||
next(self.stream)
|
||||
ops.append(nodes.Operand(token_type, self.parse_add()))
|
||||
elif self.stream.skip_if('name:in'):
|
||||
ops.append(nodes.Operand('in', self.parse_add()))
|
||||
elif self.stream.current.test('name:not') and \
|
||||
self.stream.look().test('name:in'):
|
||||
self.stream.skip(2)
|
||||
ops.append(nodes.Operand('notin', self.parse_add()))
|
||||
else:
|
||||
break
|
||||
lineno = self.stream.current.lineno
|
||||
if not ops:
|
||||
return expr
|
||||
return nodes.Compare(expr, ops, lineno=lineno)
|
||||
|
||||
def parse_add(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_sub()
|
||||
while self.stream.current.type == 'add':
|
||||
next(self.stream)
|
||||
right = self.parse_sub()
|
||||
left = nodes.Add(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_sub(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_concat()
|
||||
while self.stream.current.type == 'sub':
|
||||
next(self.stream)
|
||||
right = self.parse_concat()
|
||||
left = nodes.Sub(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_concat(self):
|
||||
lineno = self.stream.current.lineno
|
||||
args = [self.parse_mul()]
|
||||
while self.stream.current.type == 'tilde':
|
||||
next(self.stream)
|
||||
args.append(self.parse_mul())
|
||||
if len(args) == 1:
|
||||
return args[0]
|
||||
return nodes.Concat(args, lineno=lineno)
|
||||
|
||||
def parse_mul(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_div()
|
||||
while self.stream.current.type == 'mul':
|
||||
next(self.stream)
|
||||
right = self.parse_div()
|
||||
left = nodes.Mul(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_div(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_floordiv()
|
||||
while self.stream.current.type == 'div':
|
||||
next(self.stream)
|
||||
right = self.parse_floordiv()
|
||||
left = nodes.Div(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_floordiv(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_mod()
|
||||
while self.stream.current.type == 'floordiv':
|
||||
next(self.stream)
|
||||
right = self.parse_mod()
|
||||
left = nodes.FloorDiv(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_mod(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_pow()
|
||||
while self.stream.current.type == 'mod':
|
||||
next(self.stream)
|
||||
right = self.parse_pow()
|
||||
left = nodes.Mod(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_pow(self):
|
||||
lineno = self.stream.current.lineno
|
||||
left = self.parse_unary()
|
||||
while self.stream.current.type == 'pow':
|
||||
next(self.stream)
|
||||
right = self.parse_unary()
|
||||
left = nodes.Pow(left, right, lineno=lineno)
|
||||
lineno = self.stream.current.lineno
|
||||
return left
|
||||
|
||||
def parse_unary(self, with_filter=True):
|
||||
token_type = self.stream.current.type
|
||||
lineno = self.stream.current.lineno
|
||||
if token_type == 'sub':
|
||||
next(self.stream)
|
||||
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
|
||||
elif token_type == 'add':
|
||||
next(self.stream)
|
||||
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
|
||||
else:
|
||||
node = self.parse_primary()
|
||||
node = self.parse_postfix(node)
|
||||
if with_filter:
|
||||
node = self.parse_filter_expr(node)
|
||||
return node
|
||||
|
||||
def parse_primary(self):
|
||||
token = self.stream.current
|
||||
if token.type == 'name':
|
||||
if token.value in ('true', 'false', 'True', 'False'):
|
||||
node = nodes.Const(token.value in ('true', 'True'),
|
||||
lineno=token.lineno)
|
||||
elif token.value in ('none', 'None'):
|
||||
node = nodes.Const(None, lineno=token.lineno)
|
||||
else:
|
||||
node = nodes.Name(token.value, 'load', lineno=token.lineno)
|
||||
next(self.stream)
|
||||
elif token.type == 'string':
|
||||
next(self.stream)
|
||||
buf = [token.value]
|
||||
lineno = token.lineno
|
||||
while self.stream.current.type == 'string':
|
||||
buf.append(self.stream.current.value)
|
||||
next(self.stream)
|
||||
node = nodes.Const(''.join(buf), lineno=lineno)
|
||||
elif token.type in ('integer', 'float'):
|
||||
next(self.stream)
|
||||
node = nodes.Const(token.value, lineno=token.lineno)
|
||||
elif token.type == 'lparen':
|
||||
next(self.stream)
|
||||
node = self.parse_tuple(explicit_parentheses=True)
|
||||
self.stream.expect('rparen')
|
||||
elif token.type == 'lbracket':
|
||||
node = self.parse_list()
|
||||
elif token.type == 'lbrace':
|
||||
node = self.parse_dict()
|
||||
else:
|
||||
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
|
||||
return node
|
||||
|
||||
def parse_tuple(self, simplified=False, with_condexpr=True,
|
||||
extra_end_rules=None, explicit_parentheses=False):
|
||||
"""Works like `parse_expression` but if multiple expressions are
|
||||
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
|
||||
This method could also return a regular expression instead of a tuple
|
||||
if no commas where found.
|
||||
|
||||
The default parsing mode is a full tuple. If `simplified` is `True`
|
||||
only names and literals are parsed. The `no_condexpr` parameter is
|
||||
forwarded to :meth:`parse_expression`.
|
||||
|
||||
Because tuples do not require delimiters and may end in a bogus comma
|
||||
an extra hint is needed that marks the end of a tuple. For example
|
||||
for loops support tuples between `for` and `in`. In that case the
|
||||
`extra_end_rules` is set to ``['name:in']``.
|
||||
|
||||
`explicit_parentheses` is true if the parsing was triggered by an
|
||||
expression in parentheses. This is used to figure out if an empty
|
||||
tuple is a valid expression or not.
|
||||
"""
|
||||
lineno = self.stream.current.lineno
|
||||
if simplified:
|
||||
parse = self.parse_primary
|
||||
elif with_condexpr:
|
||||
parse = self.parse_expression
|
||||
else:
|
||||
parse = lambda: self.parse_expression(with_condexpr=False)
|
||||
args = []
|
||||
is_tuple = False
|
||||
while 1:
|
||||
if args:
|
||||
self.stream.expect('comma')
|
||||
if self.is_tuple_end(extra_end_rules):
|
||||
break
|
||||
args.append(parse())
|
||||
if self.stream.current.type == 'comma':
|
||||
is_tuple = True
|
||||
else:
|
||||
break
|
||||
lineno = self.stream.current.lineno
|
||||
|
||||
if not is_tuple:
|
||||
if args:
|
||||
return args[0]
|
||||
|
||||
# if we don't have explicit parentheses, an empty tuple is
|
||||
# not a valid expression. This would mean nothing (literally
|
||||
# nothing) in the spot of an expression would be an empty
|
||||
# tuple.
|
||||
if not explicit_parentheses:
|
||||
self.fail('Expected an expression, got \'%s\'' %
|
||||
describe_token(self.stream.current))
|
||||
|
||||
return nodes.Tuple(args, 'load', lineno=lineno)
|
||||
|
||||
def parse_list(self):
|
||||
token = self.stream.expect('lbracket')
|
||||
items = []
|
||||
while self.stream.current.type != 'rbracket':
|
||||
if items:
|
||||
self.stream.expect('comma')
|
||||
if self.stream.current.type == 'rbracket':
|
||||
break
|
||||
items.append(self.parse_expression())
|
||||
self.stream.expect('rbracket')
|
||||
return nodes.List(items, lineno=token.lineno)
|
||||
|
||||
def parse_dict(self):
|
||||
token = self.stream.expect('lbrace')
|
||||
items = []
|
||||
while self.stream.current.type != 'rbrace':
|
||||
if items:
|
||||
self.stream.expect('comma')
|
||||
if self.stream.current.type == 'rbrace':
|
||||
break
|
||||
key = self.parse_expression()
|
||||
self.stream.expect('colon')
|
||||
value = self.parse_expression()
|
||||
items.append(nodes.Pair(key, value, lineno=key.lineno))
|
||||
self.stream.expect('rbrace')
|
||||
return nodes.Dict(items, lineno=token.lineno)
|
||||
|
||||
def parse_postfix(self, node):
|
||||
while 1:
|
||||
token_type = self.stream.current.type
|
||||
if token_type == 'dot' or token_type == 'lbracket':
|
||||
node = self.parse_subscript(node)
|
||||
# calls are valid both after postfix expressions (getattr
|
||||
# and getitem) as well as filters and tests
|
||||
elif token_type == 'lparen':
|
||||
node = self.parse_call(node)
|
||||
else:
|
||||
break
|
||||
return node
|
||||
|
||||
def parse_filter_expr(self, node):
|
||||
while 1:
|
||||
token_type = self.stream.current.type
|
||||
if token_type == 'pipe':
|
||||
node = self.parse_filter(node)
|
||||
elif token_type == 'name' and self.stream.current.value == 'is':
|
||||
node = self.parse_test(node)
|
||||
# calls are valid both after postfix expressions (getattr
|
||||
# and getitem) as well as filters and tests
|
||||
elif token_type == 'lparen':
|
||||
node = self.parse_call(node)
|
||||
else:
|
||||
break
|
||||
return node
|
||||
|
||||
def parse_subscript(self, node):
|
||||
token = next(self.stream)
|
||||
if token.type == 'dot':
|
||||
attr_token = self.stream.current
|
||||
next(self.stream)
|
||||
if attr_token.type == 'name':
|
||||
return nodes.Getattr(node, attr_token.value, 'load',
|
||||
lineno=token.lineno)
|
||||
elif attr_token.type != 'integer':
|
||||
self.fail('expected name or number', attr_token.lineno)
|
||||
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
|
||||
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
|
||||
if token.type == 'lbracket':
|
||||
args = []
|
||||
while self.stream.current.type != 'rbracket':
|
||||
if args:
|
||||
self.stream.expect('comma')
|
||||
args.append(self.parse_subscribed())
|
||||
self.stream.expect('rbracket')
|
||||
if len(args) == 1:
|
||||
arg = args[0]
|
||||
else:
|
||||
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
|
||||
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
|
||||
self.fail('expected subscript expression', self.lineno)
|
||||
|
||||
def parse_subscribed(self):
|
||||
lineno = self.stream.current.lineno
|
||||
|
||||
if self.stream.current.type == 'colon':
|
||||
next(self.stream)
|
||||
args = [None]
|
||||
else:
|
||||
node = self.parse_expression()
|
||||
if self.stream.current.type != 'colon':
|
||||
return node
|
||||
next(self.stream)
|
||||
args = [node]
|
||||
|
||||
if self.stream.current.type == 'colon':
|
||||
args.append(None)
|
||||
elif self.stream.current.type not in ('rbracket', 'comma'):
|
||||
args.append(self.parse_expression())
|
||||
else:
|
||||
args.append(None)
|
||||
|
||||
if self.stream.current.type == 'colon':
|
||||
next(self.stream)
|
||||
if self.stream.current.type not in ('rbracket', 'comma'):
|
||||
args.append(self.parse_expression())
|
||||
else:
|
||||
args.append(None)
|
||||
else:
|
||||
args.append(None)
|
||||
|
||||
return nodes.Slice(lineno=lineno, *args)
|
||||
|
||||
def parse_call(self, node):
|
||||
token = self.stream.expect('lparen')
|
||||
args = []
|
||||
kwargs = []
|
||||
dyn_args = dyn_kwargs = None
|
||||
require_comma = False
|
||||
|
||||
def ensure(expr):
|
||||
if not expr:
|
||||
self.fail('invalid syntax for function call expression',
|
||||
token.lineno)
|
||||
|
||||
while self.stream.current.type != 'rparen':
|
||||
if require_comma:
|
||||
self.stream.expect('comma')
|
||||
# support for trailing comma
|
||||
if self.stream.current.type == 'rparen':
|
||||
break
|
||||
if self.stream.current.type == 'mul':
|
||||
ensure(dyn_args is None and dyn_kwargs is None)
|
||||
next(self.stream)
|
||||
dyn_args = self.parse_expression()
|
||||
elif self.stream.current.type == 'pow':
|
||||
ensure(dyn_kwargs is None)
|
||||
next(self.stream)
|
||||
dyn_kwargs = self.parse_expression()
|
||||
else:
|
||||
ensure(dyn_args is None and dyn_kwargs is None)
|
||||
if self.stream.current.type == 'name' and \
|
||||
self.stream.look().type == 'assign':
|
||||
key = self.stream.current.value
|
||||
self.stream.skip(2)
|
||||
value = self.parse_expression()
|
||||
kwargs.append(nodes.Keyword(key, value,
|
||||
lineno=value.lineno))
|
||||
else:
|
||||
ensure(not kwargs)
|
||||
args.append(self.parse_expression())
|
||||
|
||||
require_comma = True
|
||||
self.stream.expect('rparen')
|
||||
|
||||
if node is None:
|
||||
return args, kwargs, dyn_args, dyn_kwargs
|
||||
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
|
||||
lineno=token.lineno)
|
||||
|
||||
def parse_filter(self, node, start_inline=False):
|
||||
while self.stream.current.type == 'pipe' or start_inline:
|
||||
if not start_inline:
|
||||
next(self.stream)
|
||||
token = self.stream.expect('name')
|
||||
name = token.value
|
||||
while self.stream.current.type == 'dot':
|
||||
next(self.stream)
|
||||
name += '.' + self.stream.expect('name').value
|
||||
if self.stream.current.type == 'lparen':
|
||||
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
|
||||
else:
|
||||
args = []
|
||||
kwargs = []
|
||||
dyn_args = dyn_kwargs = None
|
||||
node = nodes.Filter(node, name, args, kwargs, dyn_args,
|
||||
dyn_kwargs, lineno=token.lineno)
|
||||
start_inline = False
|
||||
return node
|
||||
|
||||
def parse_test(self, node):
|
||||
token = next(self.stream)
|
||||
if self.stream.current.test('name:not'):
|
||||
next(self.stream)
|
||||
negated = True
|
||||
else:
|
||||
negated = False
|
||||
name = self.stream.expect('name').value
|
||||
while self.stream.current.type == 'dot':
|
||||
next(self.stream)
|
||||
name += '.' + self.stream.expect('name').value
|
||||
dyn_args = dyn_kwargs = None
|
||||
kwargs = []
|
||||
if self.stream.current.type == 'lparen':
|
||||
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
|
||||
elif self.stream.current.type in ('name', 'string', 'integer',
|
||||
'float', 'lparen', 'lbracket',
|
||||
'lbrace') and not \
|
||||
self.stream.current.test_any('name:else', 'name:or',
|
||||
'name:and'):
|
||||
if self.stream.current.test('name:is'):
|
||||
self.fail('You cannot chain multiple tests with is')
|
||||
args = [self.parse_expression()]
|
||||
else:
|
||||
args = []
|
||||
node = nodes.Test(node, name, args, kwargs, dyn_args,
|
||||
dyn_kwargs, lineno=token.lineno)
|
||||
if negated:
|
||||
node = nodes.Not(node, lineno=token.lineno)
|
||||
return node
|
||||
|
||||
def subparse(self, end_tokens=None):
|
||||
body = []
|
||||
data_buffer = []
|
||||
add_data = data_buffer.append
|
||||
|
||||
if end_tokens is not None:
|
||||
self._end_token_stack.append(end_tokens)
|
||||
|
||||
def flush_data():
|
||||
if data_buffer:
|
||||
lineno = data_buffer[0].lineno
|
||||
body.append(nodes.Output(data_buffer[:], lineno=lineno))
|
||||
del data_buffer[:]
|
||||
|
||||
try:
|
||||
while self.stream:
|
||||
token = self.stream.current
|
||||
if token.type == 'data':
|
||||
if token.value:
|
||||
add_data(nodes.TemplateData(token.value,
|
||||
lineno=token.lineno))
|
||||
next(self.stream)
|
||||
elif token.type == 'variable_begin':
|
||||
next(self.stream)
|
||||
add_data(self.parse_tuple(with_condexpr=True))
|
||||
self.stream.expect('variable_end')
|
||||
elif token.type == 'block_begin':
|
||||
flush_data()
|
||||
next(self.stream)
|
||||
if end_tokens is not None and \
|
||||
self.stream.current.test_any(*end_tokens):
|
||||
return body
|
||||
rv = self.parse_statement()
|
||||
if isinstance(rv, list):
|
||||
body.extend(rv)
|
||||
else:
|
||||
body.append(rv)
|
||||
self.stream.expect('block_end')
|
||||
else:
|
||||
raise AssertionError('internal parsing error')
|
||||
|
||||
flush_data()
|
||||
finally:
|
||||
if end_tokens is not None:
|
||||
self._end_token_stack.pop()
|
||||
|
||||
return body
|
||||
|
||||
def parse(self):
|
||||
"""Parse the whole template into a `Template` node."""
|
||||
result = nodes.Template(self.subparse(), lineno=1)
|
||||
result.set_environment(self.environment)
|
||||
return result
|
581
3rdparty/jinja2/runtime.py
vendored
Normal file
581
3rdparty/jinja2/runtime.py
vendored
Normal file
@ -0,0 +1,581 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.runtime
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Runtime helpers.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from itertools import chain
|
||||
from jinja2.nodes import EvalContext, _context_function_types
|
||||
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
|
||||
internalcode, object_type_repr
|
||||
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
|
||||
TemplateNotFound
|
||||
from jinja2._compat import next, imap, text_type, iteritems, \
|
||||
implements_iterator, implements_to_string, string_types, PY2
|
||||
|
||||
|
||||
# these variables are exported to the template runtime
|
||||
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
|
||||
'TemplateRuntimeError', 'missing', 'concat', 'escape',
|
||||
'markup_join', 'unicode_join', 'to_string', 'identity',
|
||||
'TemplateNotFound']
|
||||
|
||||
#: the name of the function that is used to convert something into
|
||||
#: a string. We can just use the text type here.
|
||||
to_string = text_type
|
||||
|
||||
#: the identity function. Useful for certain things in the environment
|
||||
identity = lambda x: x
|
||||
|
||||
_last_iteration = object()
|
||||
|
||||
|
||||
def markup_join(seq):
|
||||
"""Concatenation that escapes if necessary and converts to unicode."""
|
||||
buf = []
|
||||
iterator = imap(soft_unicode, seq)
|
||||
for arg in iterator:
|
||||
buf.append(arg)
|
||||
if hasattr(arg, '__html__'):
|
||||
return Markup(u'').join(chain(buf, iterator))
|
||||
return concat(buf)
|
||||
|
||||
|
||||
def unicode_join(seq):
|
||||
"""Simple args to unicode conversion and concatenation."""
|
||||
return concat(imap(text_type, seq))
|
||||
|
||||
|
||||
def new_context(environment, template_name, blocks, vars=None,
|
||||
shared=None, globals=None, locals=None):
|
||||
"""Internal helper to for context creation."""
|
||||
if vars is None:
|
||||
vars = {}
|
||||
if shared:
|
||||
parent = vars
|
||||
else:
|
||||
parent = dict(globals or (), **vars)
|
||||
if locals:
|
||||
# if the parent is shared a copy should be created because
|
||||
# we don't want to modify the dict passed
|
||||
if shared:
|
||||
parent = dict(parent)
|
||||
for key, value in iteritems(locals):
|
||||
if key[:2] == 'l_' and value is not missing:
|
||||
parent[key[2:]] = value
|
||||
return Context(environment, parent, template_name, blocks)
|
||||
|
||||
|
||||
class TemplateReference(object):
|
||||
"""The `self` in templates."""
|
||||
|
||||
def __init__(self, context):
|
||||
self.__context = context
|
||||
|
||||
def __getitem__(self, name):
|
||||
blocks = self.__context.blocks[name]
|
||||
return BlockReference(name, self.__context, blocks, 0)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (
|
||||
self.__class__.__name__,
|
||||
self.__context.name
|
||||
)
|
||||
|
||||
|
||||
class Context(object):
|
||||
"""The template context holds the variables of a template. It stores the
|
||||
values passed to the template and also the names the template exports.
|
||||
Creating instances is neither supported nor useful as it's created
|
||||
automatically at various stages of the template evaluation and should not
|
||||
be created by hand.
|
||||
|
||||
The context is immutable. Modifications on :attr:`parent` **must not**
|
||||
happen and modifications on :attr:`vars` are allowed from generated
|
||||
template code only. Template filters and global functions marked as
|
||||
:func:`contextfunction`\s get the active context passed as first argument
|
||||
and are allowed to access the context read-only.
|
||||
|
||||
The template context supports read only dict operations (`get`,
|
||||
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
|
||||
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
|
||||
method that doesn't fail with a `KeyError` but returns an
|
||||
:class:`Undefined` object for missing variables.
|
||||
"""
|
||||
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
|
||||
'name', 'blocks', '__weakref__')
|
||||
|
||||
def __init__(self, environment, parent, name, blocks):
|
||||
self.parent = parent
|
||||
self.vars = {}
|
||||
self.environment = environment
|
||||
self.eval_ctx = EvalContext(self.environment, name)
|
||||
self.exported_vars = set()
|
||||
self.name = name
|
||||
|
||||
# create the initial mapping of blocks. Whenever template inheritance
|
||||
# takes place the runtime will update this mapping with the new blocks
|
||||
# from the template.
|
||||
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
|
||||
|
||||
def super(self, name, current):
|
||||
"""Render a parent block."""
|
||||
try:
|
||||
blocks = self.blocks[name]
|
||||
index = blocks.index(current) + 1
|
||||
blocks[index]
|
||||
except LookupError:
|
||||
return self.environment.undefined('there is no parent block '
|
||||
'called %r.' % name,
|
||||
name='super')
|
||||
return BlockReference(name, self, blocks, index)
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Returns an item from the template context, if it doesn't exist
|
||||
`default` is returned.
|
||||
"""
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def resolve(self, key):
|
||||
"""Looks up a variable like `__getitem__` or `get` but returns an
|
||||
:class:`Undefined` object with the name of the name looked up.
|
||||
"""
|
||||
if key in self.vars:
|
||||
return self.vars[key]
|
||||
if key in self.parent:
|
||||
return self.parent[key]
|
||||
return self.environment.undefined(name=key)
|
||||
|
||||
def get_exported(self):
|
||||
"""Get a new dict with the exported variables."""
|
||||
return dict((k, self.vars[k]) for k in self.exported_vars)
|
||||
|
||||
def get_all(self):
|
||||
"""Return a copy of the complete context as dict including the
|
||||
exported variables.
|
||||
"""
|
||||
return dict(self.parent, **self.vars)
|
||||
|
||||
@internalcode
|
||||
def call(__self, __obj, *args, **kwargs):
|
||||
"""Call the callable with the arguments and keyword arguments
|
||||
provided but inject the active context or environment as first
|
||||
argument if the callable is a :func:`contextfunction` or
|
||||
:func:`environmentfunction`.
|
||||
"""
|
||||
if __debug__:
|
||||
__traceback_hide__ = True
|
||||
|
||||
# Allow callable classes to take a context
|
||||
fn = __obj.__call__
|
||||
for fn_type in ('contextfunction',
|
||||
'evalcontextfunction',
|
||||
'environmentfunction'):
|
||||
if hasattr(fn, fn_type):
|
||||
__obj = fn
|
||||
break
|
||||
|
||||
if isinstance(__obj, _context_function_types):
|
||||
if getattr(__obj, 'contextfunction', 0):
|
||||
args = (__self,) + args
|
||||
elif getattr(__obj, 'evalcontextfunction', 0):
|
||||
args = (__self.eval_ctx,) + args
|
||||
elif getattr(__obj, 'environmentfunction', 0):
|
||||
args = (__self.environment,) + args
|
||||
try:
|
||||
return __obj(*args, **kwargs)
|
||||
except StopIteration:
|
||||
return __self.environment.undefined('value was undefined because '
|
||||
'a callable raised a '
|
||||
'StopIteration exception')
|
||||
|
||||
def derived(self, locals=None):
|
||||
"""Internal helper function to create a derived context."""
|
||||
context = new_context(self.environment, self.name, {},
|
||||
self.parent, True, None, locals)
|
||||
context.vars.update(self.vars)
|
||||
context.eval_ctx = self.eval_ctx
|
||||
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
|
||||
return context
|
||||
|
||||
def _all(meth):
|
||||
proxy = lambda self: getattr(self.get_all(), meth)()
|
||||
proxy.__doc__ = getattr(dict, meth).__doc__
|
||||
proxy.__name__ = meth
|
||||
return proxy
|
||||
|
||||
keys = _all('keys')
|
||||
values = _all('values')
|
||||
items = _all('items')
|
||||
|
||||
# not available on python 3
|
||||
if PY2:
|
||||
iterkeys = _all('iterkeys')
|
||||
itervalues = _all('itervalues')
|
||||
iteritems = _all('iteritems')
|
||||
del _all
|
||||
|
||||
def __contains__(self, name):
|
||||
return name in self.vars or name in self.parent
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Lookup a variable or raise `KeyError` if the variable is
|
||||
undefined.
|
||||
"""
|
||||
item = self.resolve(key)
|
||||
if isinstance(item, Undefined):
|
||||
raise KeyError(key)
|
||||
return item
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s of %r>' % (
|
||||
self.__class__.__name__,
|
||||
repr(self.get_all()),
|
||||
self.name
|
||||
)
|
||||
|
||||
|
||||
# register the context as mapping if possible
|
||||
try:
|
||||
from collections import Mapping
|
||||
Mapping.register(Context)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class BlockReference(object):
|
||||
"""One block on a template reference."""
|
||||
|
||||
def __init__(self, name, context, stack, depth):
|
||||
self.name = name
|
||||
self._context = context
|
||||
self._stack = stack
|
||||
self._depth = depth
|
||||
|
||||
@property
|
||||
def super(self):
|
||||
"""Super the block."""
|
||||
if self._depth + 1 >= len(self._stack):
|
||||
return self._context.environment. \
|
||||
undefined('there is no parent block called %r.' %
|
||||
self.name, name='super')
|
||||
return BlockReference(self.name, self._context, self._stack,
|
||||
self._depth + 1)
|
||||
|
||||
@internalcode
|
||||
def __call__(self):
|
||||
rv = concat(self._stack[self._depth](self._context))
|
||||
if self._context.eval_ctx.autoescape:
|
||||
rv = Markup(rv)
|
||||
return rv
|
||||
|
||||
|
||||
class LoopContext(object):
|
||||
"""A loop context for dynamic iteration."""
|
||||
|
||||
def __init__(self, iterable, recurse=None, depth0=0):
|
||||
self._iterator = iter(iterable)
|
||||
self._recurse = recurse
|
||||
self._after = self._safe_next()
|
||||
self.index0 = -1
|
||||
self.depth0 = depth0
|
||||
|
||||
# try to get the length of the iterable early. This must be done
|
||||
# here because there are some broken iterators around where there
|
||||
# __len__ is the number of iterations left (i'm looking at your
|
||||
# listreverseiterator!).
|
||||
try:
|
||||
self._length = len(iterable)
|
||||
except (TypeError, AttributeError):
|
||||
self._length = None
|
||||
|
||||
def cycle(self, *args):
|
||||
"""Cycles among the arguments with the current loop index."""
|
||||
if not args:
|
||||
raise TypeError('no items for cycling given')
|
||||
return args[self.index0 % len(args)]
|
||||
|
||||
first = property(lambda x: x.index0 == 0)
|
||||
last = property(lambda x: x._after is _last_iteration)
|
||||
index = property(lambda x: x.index0 + 1)
|
||||
revindex = property(lambda x: x.length - x.index0)
|
||||
revindex0 = property(lambda x: x.length - x.index)
|
||||
depth = property(lambda x: x.depth0 + 1)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
return LoopContextIterator(self)
|
||||
|
||||
def _safe_next(self):
|
||||
try:
|
||||
return next(self._iterator)
|
||||
except StopIteration:
|
||||
return _last_iteration
|
||||
|
||||
@internalcode
|
||||
def loop(self, iterable):
|
||||
if self._recurse is None:
|
||||
raise TypeError('Tried to call non recursive loop. Maybe you '
|
||||
"forgot the 'recursive' modifier.")
|
||||
return self._recurse(iterable, self._recurse, self.depth0 + 1)
|
||||
|
||||
# a nifty trick to enhance the error message if someone tried to call
|
||||
# the the loop without or with too many arguments.
|
||||
__call__ = loop
|
||||
del loop
|
||||
|
||||
@property
|
||||
def length(self):
|
||||
if self._length is None:
|
||||
# if was not possible to get the length of the iterator when
|
||||
# the loop context was created (ie: iterating over a generator)
|
||||
# we have to convert the iterable into a sequence and use the
|
||||
# length of that.
|
||||
iterable = tuple(self._iterator)
|
||||
self._iterator = iter(iterable)
|
||||
self._length = len(iterable) + self.index0 + 1
|
||||
return self._length
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r/%r>' % (
|
||||
self.__class__.__name__,
|
||||
self.index,
|
||||
self.length
|
||||
)
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class LoopContextIterator(object):
|
||||
"""The iterator for a loop context."""
|
||||
__slots__ = ('context',)
|
||||
|
||||
def __init__(self, context):
|
||||
self.context = context
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
ctx = self.context
|
||||
ctx.index0 += 1
|
||||
if ctx._after is _last_iteration:
|
||||
raise StopIteration()
|
||||
next_elem = ctx._after
|
||||
ctx._after = ctx._safe_next()
|
||||
return next_elem, ctx
|
||||
|
||||
|
||||
class Macro(object):
|
||||
"""Wraps a macro function."""
|
||||
|
||||
def __init__(self, environment, func, name, arguments, defaults,
|
||||
catch_kwargs, catch_varargs, caller):
|
||||
self._environment = environment
|
||||
self._func = func
|
||||
self._argument_count = len(arguments)
|
||||
self.name = name
|
||||
self.arguments = arguments
|
||||
self.defaults = defaults
|
||||
self.catch_kwargs = catch_kwargs
|
||||
self.catch_varargs = catch_varargs
|
||||
self.caller = caller
|
||||
|
||||
@internalcode
|
||||
def __call__(self, *args, **kwargs):
|
||||
# try to consume the positional arguments
|
||||
arguments = list(args[:self._argument_count])
|
||||
off = len(arguments)
|
||||
|
||||
# if the number of arguments consumed is not the number of
|
||||
# arguments expected we start filling in keyword arguments
|
||||
# and defaults.
|
||||
if off != self._argument_count:
|
||||
for idx, name in enumerate(self.arguments[len(arguments):]):
|
||||
try:
|
||||
value = kwargs.pop(name)
|
||||
except KeyError:
|
||||
try:
|
||||
value = self.defaults[idx - self._argument_count + off]
|
||||
except IndexError:
|
||||
value = self._environment.undefined(
|
||||
'parameter %r was not provided' % name, name=name)
|
||||
arguments.append(value)
|
||||
|
||||
# it's important that the order of these arguments does not change
|
||||
# if not also changed in the compiler's `function_scoping` method.
|
||||
# the order is caller, keyword arguments, positional arguments!
|
||||
if self.caller:
|
||||
caller = kwargs.pop('caller', None)
|
||||
if caller is None:
|
||||
caller = self._environment.undefined('No caller defined',
|
||||
name='caller')
|
||||
arguments.append(caller)
|
||||
if self.catch_kwargs:
|
||||
arguments.append(kwargs)
|
||||
elif kwargs:
|
||||
raise TypeError('macro %r takes no keyword argument %r' %
|
||||
(self.name, next(iter(kwargs))))
|
||||
if self.catch_varargs:
|
||||
arguments.append(args[self._argument_count:])
|
||||
elif len(args) > self._argument_count:
|
||||
raise TypeError('macro %r takes not more than %d argument(s)' %
|
||||
(self.name, len(self.arguments)))
|
||||
return self._func(*arguments)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (
|
||||
self.__class__.__name__,
|
||||
self.name is None and 'anonymous' or repr(self.name)
|
||||
)
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class Undefined(object):
|
||||
"""The default undefined type. This undefined type can be printed and
|
||||
iterated over, but every other access will raise an :exc:`UndefinedError`:
|
||||
|
||||
>>> foo = Undefined(name='foo')
|
||||
>>> str(foo)
|
||||
''
|
||||
>>> not foo
|
||||
True
|
||||
>>> foo + 42
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UndefinedError: 'foo' is undefined
|
||||
"""
|
||||
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
|
||||
'_undefined_exception')
|
||||
|
||||
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
|
||||
self._undefined_hint = hint
|
||||
self._undefined_obj = obj
|
||||
self._undefined_name = name
|
||||
self._undefined_exception = exc
|
||||
|
||||
@internalcode
|
||||
def _fail_with_undefined_error(self, *args, **kwargs):
|
||||
"""Regular callback function for undefined objects that raises an
|
||||
`UndefinedError` on call.
|
||||
"""
|
||||
if self._undefined_hint is None:
|
||||
if self._undefined_obj is missing:
|
||||
hint = '%r is undefined' % self._undefined_name
|
||||
elif not isinstance(self._undefined_name, string_types):
|
||||
hint = '%s has no element %r' % (
|
||||
object_type_repr(self._undefined_obj),
|
||||
self._undefined_name
|
||||
)
|
||||
else:
|
||||
hint = '%r has no attribute %r' % (
|
||||
object_type_repr(self._undefined_obj),
|
||||
self._undefined_name
|
||||
)
|
||||
else:
|
||||
hint = self._undefined_hint
|
||||
raise self._undefined_exception(hint)
|
||||
|
||||
@internalcode
|
||||
def __getattr__(self, name):
|
||||
if name[:2] == '__':
|
||||
raise AttributeError(name)
|
||||
return self._fail_with_undefined_error()
|
||||
|
||||
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
|
||||
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
|
||||
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
|
||||
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
|
||||
__float__ = __complex__ = __pow__ = __rpow__ = \
|
||||
_fail_with_undefined_error
|
||||
|
||||
def __eq__(self, other):
|
||||
return type(self) is type(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __hash__(self):
|
||||
return id(type(self))
|
||||
|
||||
def __str__(self):
|
||||
return u''
|
||||
|
||||
def __len__(self):
|
||||
return 0
|
||||
|
||||
def __iter__(self):
|
||||
if 0:
|
||||
yield None
|
||||
|
||||
def __nonzero__(self):
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return 'Undefined'
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class DebugUndefined(Undefined):
|
||||
"""An undefined that returns the debug info when printed.
|
||||
|
||||
>>> foo = DebugUndefined(name='foo')
|
||||
>>> str(foo)
|
||||
'{{ foo }}'
|
||||
>>> not foo
|
||||
True
|
||||
>>> foo + 42
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UndefinedError: 'foo' is undefined
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __str__(self):
|
||||
if self._undefined_hint is None:
|
||||
if self._undefined_obj is missing:
|
||||
return u'{{ %s }}' % self._undefined_name
|
||||
return '{{ no such element: %s[%r] }}' % (
|
||||
object_type_repr(self._undefined_obj),
|
||||
self._undefined_name
|
||||
)
|
||||
return u'{{ undefined value printed: %s }}' % self._undefined_hint
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class StrictUndefined(Undefined):
|
||||
"""An undefined that barks on print and iteration as well as boolean
|
||||
tests and all kinds of comparisons. In other words: you can do nothing
|
||||
with it except checking if it's defined using the `defined` test.
|
||||
|
||||
>>> foo = StrictUndefined(name='foo')
|
||||
>>> str(foo)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UndefinedError: 'foo' is undefined
|
||||
>>> not foo
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UndefinedError: 'foo' is undefined
|
||||
>>> foo + 42
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UndefinedError: 'foo' is undefined
|
||||
"""
|
||||
__slots__ = ()
|
||||
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
|
||||
__ne__ = __bool__ = __hash__ = \
|
||||
Undefined._fail_with_undefined_error
|
||||
|
||||
|
||||
# remove remaining slots attributes, after the metaclass did the magic they
|
||||
# are unneeded and irritating as they contain wrong data for the subclasses.
|
||||
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
|
368
3rdparty/jinja2/sandbox.py
vendored
Normal file
368
3rdparty/jinja2/sandbox.py
vendored
Normal file
@ -0,0 +1,368 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.sandbox
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Adds a sandbox layer to Jinja as it was the default behavior in the old
|
||||
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
|
||||
default behavior is easier to use.
|
||||
|
||||
The behavior can be changed by subclassing the environment.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
import operator
|
||||
from jinja2.environment import Environment
|
||||
from jinja2.exceptions import SecurityError
|
||||
from jinja2._compat import string_types, function_type, method_type, \
|
||||
traceback_type, code_type, frame_type, generator_type, PY2
|
||||
|
||||
|
||||
#: maximum number of items a range may produce
|
||||
MAX_RANGE = 100000
|
||||
|
||||
#: attributes of function objects that are considered unsafe.
|
||||
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
|
||||
'func_defaults', 'func_globals'])
|
||||
|
||||
#: unsafe method attributes. function attributes are unsafe for methods too
|
||||
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
|
||||
|
||||
#: unsafe generator attirbutes.
|
||||
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
|
||||
|
||||
# On versions > python 2 the special attributes on functions are gone,
|
||||
# but they remain on methods and generators for whatever reason.
|
||||
if not PY2:
|
||||
UNSAFE_FUNCTION_ATTRIBUTES = set()
|
||||
|
||||
import warnings
|
||||
|
||||
# make sure we don't warn in python 2.6 about stuff we don't care about
|
||||
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
|
||||
module='jinja2.sandbox')
|
||||
|
||||
from collections import deque
|
||||
|
||||
_mutable_set_types = (set,)
|
||||
_mutable_mapping_types = (dict,)
|
||||
_mutable_sequence_types = (list,)
|
||||
|
||||
|
||||
# on python 2.x we can register the user collection types
|
||||
try:
|
||||
from UserDict import UserDict, DictMixin
|
||||
from UserList import UserList
|
||||
_mutable_mapping_types += (UserDict, DictMixin)
|
||||
_mutable_set_types += (UserList,)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# if sets is still available, register the mutable set from there as well
|
||||
try:
|
||||
from sets import Set
|
||||
_mutable_set_types += (Set,)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
#: register Python 2.6 abstract base classes
|
||||
try:
|
||||
from collections import MutableSet, MutableMapping, MutableSequence
|
||||
_mutable_set_types += (MutableSet,)
|
||||
_mutable_mapping_types += (MutableMapping,)
|
||||
_mutable_sequence_types += (MutableSequence,)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
_mutable_spec = (
|
||||
(_mutable_set_types, frozenset([
|
||||
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
|
||||
'symmetric_difference_update', 'update'
|
||||
])),
|
||||
(_mutable_mapping_types, frozenset([
|
||||
'clear', 'pop', 'popitem', 'setdefault', 'update'
|
||||
])),
|
||||
(_mutable_sequence_types, frozenset([
|
||||
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
|
||||
])),
|
||||
(deque, frozenset([
|
||||
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
|
||||
'popleft', 'remove', 'rotate'
|
||||
]))
|
||||
)
|
||||
|
||||
|
||||
def safe_range(*args):
|
||||
"""A range that can't generate ranges with a length of more than
|
||||
MAX_RANGE items.
|
||||
"""
|
||||
rng = range(*args)
|
||||
if len(rng) > MAX_RANGE:
|
||||
raise OverflowError('range too big, maximum size for range is %d' %
|
||||
MAX_RANGE)
|
||||
return rng
|
||||
|
||||
|
||||
def unsafe(f):
|
||||
"""Marks a function or method as unsafe.
|
||||
|
||||
::
|
||||
|
||||
@unsafe
|
||||
def delete(self):
|
||||
pass
|
||||
"""
|
||||
f.unsafe_callable = True
|
||||
return f
|
||||
|
||||
|
||||
def is_internal_attribute(obj, attr):
|
||||
"""Test if the attribute given is an internal python attribute. For
|
||||
example this function returns `True` for the `func_code` attribute of
|
||||
python objects. This is useful if the environment method
|
||||
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
|
||||
|
||||
>>> from jinja2.sandbox import is_internal_attribute
|
||||
>>> is_internal_attribute(lambda: None, "func_code")
|
||||
True
|
||||
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
|
||||
True
|
||||
>>> is_internal_attribute(str, "upper")
|
||||
False
|
||||
"""
|
||||
if isinstance(obj, function_type):
|
||||
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
|
||||
return True
|
||||
elif isinstance(obj, method_type):
|
||||
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
|
||||
attr in UNSAFE_METHOD_ATTRIBUTES:
|
||||
return True
|
||||
elif isinstance(obj, type):
|
||||
if attr == 'mro':
|
||||
return True
|
||||
elif isinstance(obj, (code_type, traceback_type, frame_type)):
|
||||
return True
|
||||
elif isinstance(obj, generator_type):
|
||||
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
|
||||
return True
|
||||
return attr.startswith('__')
|
||||
|
||||
|
||||
def modifies_known_mutable(obj, attr):
|
||||
"""This function checks if an attribute on a builtin mutable object
|
||||
(list, dict, set or deque) would modify it if called. It also supports
|
||||
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
|
||||
with Python 2.6 onwards the abstract base classes `MutableSet`,
|
||||
`MutableMapping`, and `MutableSequence`.
|
||||
|
||||
>>> modifies_known_mutable({}, "clear")
|
||||
True
|
||||
>>> modifies_known_mutable({}, "keys")
|
||||
False
|
||||
>>> modifies_known_mutable([], "append")
|
||||
True
|
||||
>>> modifies_known_mutable([], "index")
|
||||
False
|
||||
|
||||
If called with an unsupported object (such as unicode) `False` is
|
||||
returned.
|
||||
|
||||
>>> modifies_known_mutable("foo", "upper")
|
||||
False
|
||||
"""
|
||||
for typespec, unsafe in _mutable_spec:
|
||||
if isinstance(obj, typespec):
|
||||
return attr in unsafe
|
||||
return False
|
||||
|
||||
|
||||
class SandboxedEnvironment(Environment):
|
||||
"""The sandboxed environment. It works like the regular environment but
|
||||
tells the compiler to generate sandboxed code. Additionally subclasses of
|
||||
this environment may override the methods that tell the runtime what
|
||||
attributes or functions are safe to access.
|
||||
|
||||
If the template tries to access insecure code a :exc:`SecurityError` is
|
||||
raised. However also other exceptions may occour during the rendering so
|
||||
the caller has to ensure that all exceptions are catched.
|
||||
"""
|
||||
sandboxed = True
|
||||
|
||||
#: default callback table for the binary operators. A copy of this is
|
||||
#: available on each instance of a sandboxed environment as
|
||||
#: :attr:`binop_table`
|
||||
default_binop_table = {
|
||||
'+': operator.add,
|
||||
'-': operator.sub,
|
||||
'*': operator.mul,
|
||||
'/': operator.truediv,
|
||||
'//': operator.floordiv,
|
||||
'**': operator.pow,
|
||||
'%': operator.mod
|
||||
}
|
||||
|
||||
#: default callback table for the unary operators. A copy of this is
|
||||
#: available on each instance of a sandboxed environment as
|
||||
#: :attr:`unop_table`
|
||||
default_unop_table = {
|
||||
'+': operator.pos,
|
||||
'-': operator.neg
|
||||
}
|
||||
|
||||
#: a set of binary operators that should be intercepted. Each operator
|
||||
#: that is added to this set (empty by default) is delegated to the
|
||||
#: :meth:`call_binop` method that will perform the operator. The default
|
||||
#: operator callback is specified by :attr:`binop_table`.
|
||||
#:
|
||||
#: The following binary operators are interceptable:
|
||||
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
|
||||
#:
|
||||
#: The default operation form the operator table corresponds to the
|
||||
#: builtin function. Intercepted calls are always slower than the native
|
||||
#: operator call, so make sure only to intercept the ones you are
|
||||
#: interested in.
|
||||
#:
|
||||
#: .. versionadded:: 2.6
|
||||
intercepted_binops = frozenset()
|
||||
|
||||
#: a set of unary operators that should be intercepted. Each operator
|
||||
#: that is added to this set (empty by default) is delegated to the
|
||||
#: :meth:`call_unop` method that will perform the operator. The default
|
||||
#: operator callback is specified by :attr:`unop_table`.
|
||||
#:
|
||||
#: The following unary operators are interceptable: ``+``, ``-``
|
||||
#:
|
||||
#: The default operation form the operator table corresponds to the
|
||||
#: builtin function. Intercepted calls are always slower than the native
|
||||
#: operator call, so make sure only to intercept the ones you are
|
||||
#: interested in.
|
||||
#:
|
||||
#: .. versionadded:: 2.6
|
||||
intercepted_unops = frozenset()
|
||||
|
||||
def intercept_unop(self, operator):
|
||||
"""Called during template compilation with the name of a unary
|
||||
operator to check if it should be intercepted at runtime. If this
|
||||
method returns `True`, :meth:`call_unop` is excuted for this unary
|
||||
operator. The default implementation of :meth:`call_unop` will use
|
||||
the :attr:`unop_table` dictionary to perform the operator with the
|
||||
same logic as the builtin one.
|
||||
|
||||
The following unary operators are interceptable: ``+`` and ``-``
|
||||
|
||||
Intercepted calls are always slower than the native operator call,
|
||||
so make sure only to intercept the ones you are interested in.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Environment.__init__(self, *args, **kwargs)
|
||||
self.globals['range'] = safe_range
|
||||
self.binop_table = self.default_binop_table.copy()
|
||||
self.unop_table = self.default_unop_table.copy()
|
||||
|
||||
def is_safe_attribute(self, obj, attr, value):
|
||||
"""The sandboxed environment will call this method to check if the
|
||||
attribute of an object is safe to access. Per default all attributes
|
||||
starting with an underscore are considered private as well as the
|
||||
special attributes of internal python objects as returned by the
|
||||
:func:`is_internal_attribute` function.
|
||||
"""
|
||||
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
|
||||
|
||||
def is_safe_callable(self, obj):
|
||||
"""Check if an object is safely callable. Per default a function is
|
||||
considered safe unless the `unsafe_callable` attribute exists and is
|
||||
True. Override this method to alter the behavior, but this won't
|
||||
affect the `unsafe` decorator from this module.
|
||||
"""
|
||||
return not (getattr(obj, 'unsafe_callable', False) or
|
||||
getattr(obj, 'alters_data', False))
|
||||
|
||||
def call_binop(self, context, operator, left, right):
|
||||
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
|
||||
this function is executed instead of the builtin operator. This can
|
||||
be used to fine tune the behavior of certain operators.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return self.binop_table[operator](left, right)
|
||||
|
||||
def call_unop(self, context, operator, arg):
|
||||
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
|
||||
this function is executed instead of the builtin operator. This can
|
||||
be used to fine tune the behavior of certain operators.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return self.unop_table[operator](arg)
|
||||
|
||||
def getitem(self, obj, argument):
|
||||
"""Subscribe an object from sandboxed code."""
|
||||
try:
|
||||
return obj[argument]
|
||||
except (TypeError, LookupError):
|
||||
if isinstance(argument, string_types):
|
||||
try:
|
||||
attr = str(argument)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
value = getattr(obj, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if self.is_safe_attribute(obj, argument, value):
|
||||
return value
|
||||
return self.unsafe_undefined(obj, argument)
|
||||
return self.undefined(obj=obj, name=argument)
|
||||
|
||||
def getattr(self, obj, attribute):
|
||||
"""Subscribe an object from sandboxed code and prefer the
|
||||
attribute. The attribute passed *must* be a bytestring.
|
||||
"""
|
||||
try:
|
||||
value = getattr(obj, attribute)
|
||||
except AttributeError:
|
||||
try:
|
||||
return obj[attribute]
|
||||
except (TypeError, LookupError):
|
||||
pass
|
||||
else:
|
||||
if self.is_safe_attribute(obj, attribute, value):
|
||||
return value
|
||||
return self.unsafe_undefined(obj, attribute)
|
||||
return self.undefined(obj=obj, name=attribute)
|
||||
|
||||
def unsafe_undefined(self, obj, attribute):
|
||||
"""Return an undefined object for unsafe attributes."""
|
||||
return self.undefined('access to attribute %r of %r '
|
||||
'object is unsafe.' % (
|
||||
attribute,
|
||||
obj.__class__.__name__
|
||||
), name=attribute, obj=obj, exc=SecurityError)
|
||||
|
||||
def call(__self, __context, __obj, *args, **kwargs):
|
||||
"""Call an object from sandboxed code."""
|
||||
# the double prefixes are to avoid double keyword argument
|
||||
# errors when proxying the call.
|
||||
if not __self.is_safe_callable(__obj):
|
||||
raise SecurityError('%r is not safely callable' % (__obj,))
|
||||
return __context.call(__obj, *args, **kwargs)
|
||||
|
||||
|
||||
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
|
||||
"""Works exactly like the regular `SandboxedEnvironment` but does not
|
||||
permit modifications on the builtin mutable objects `list`, `set`, and
|
||||
`dict` by using the :func:`modifies_known_mutable` function.
|
||||
"""
|
||||
|
||||
def is_safe_attribute(self, obj, attr, value):
|
||||
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
|
||||
return False
|
||||
return not modifies_known_mutable(obj, attr)
|
149
3rdparty/jinja2/tests.py
vendored
Normal file
149
3rdparty/jinja2/tests.py
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.tests
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Jinja test functions. Used with the "is" operator.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from jinja2.runtime import Undefined
|
||||
from jinja2._compat import text_type, string_types, mapping_types
|
||||
|
||||
|
||||
number_re = re.compile(r'^-?\d+(\.\d+)?$')
|
||||
regex_type = type(number_re)
|
||||
|
||||
|
||||
test_callable = callable
|
||||
|
||||
|
||||
def test_odd(value):
|
||||
"""Return true if the variable is odd."""
|
||||
return value % 2 == 1
|
||||
|
||||
|
||||
def test_even(value):
|
||||
"""Return true if the variable is even."""
|
||||
return value % 2 == 0
|
||||
|
||||
|
||||
def test_divisibleby(value, num):
|
||||
"""Check if a variable is divisible by a number."""
|
||||
return value % num == 0
|
||||
|
||||
|
||||
def test_defined(value):
|
||||
"""Return true if the variable is defined:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{% if variable is defined %}
|
||||
value of variable: {{ variable }}
|
||||
{% else %}
|
||||
variable is not defined
|
||||
{% endif %}
|
||||
|
||||
See the :func:`default` filter for a simple way to set undefined
|
||||
variables.
|
||||
"""
|
||||
return not isinstance(value, Undefined)
|
||||
|
||||
|
||||
def test_undefined(value):
|
||||
"""Like :func:`defined` but the other way round."""
|
||||
return isinstance(value, Undefined)
|
||||
|
||||
|
||||
def test_none(value):
|
||||
"""Return true if the variable is none."""
|
||||
return value is None
|
||||
|
||||
|
||||
def test_lower(value):
|
||||
"""Return true if the variable is lowercased."""
|
||||
return text_type(value).islower()
|
||||
|
||||
|
||||
def test_upper(value):
|
||||
"""Return true if the variable is uppercased."""
|
||||
return text_type(value).isupper()
|
||||
|
||||
|
||||
def test_string(value):
|
||||
"""Return true if the object is a string."""
|
||||
return isinstance(value, string_types)
|
||||
|
||||
|
||||
def test_mapping(value):
|
||||
"""Return true if the object is a mapping (dict etc.).
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return isinstance(value, mapping_types)
|
||||
|
||||
|
||||
def test_number(value):
|
||||
"""Return true if the variable is a number."""
|
||||
return isinstance(value, (int, float, complex))
|
||||
|
||||
|
||||
def test_sequence(value):
|
||||
"""Return true if the variable is a sequence. Sequences are variables
|
||||
that are iterable.
|
||||
"""
|
||||
try:
|
||||
len(value)
|
||||
value.__getitem__
|
||||
except:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def test_sameas(value, other):
|
||||
"""Check if an object points to the same memory address than another
|
||||
object:
|
||||
|
||||
.. sourcecode:: jinja
|
||||
|
||||
{% if foo.attribute is sameas false %}
|
||||
the foo attribute really is the `False` singleton
|
||||
{% endif %}
|
||||
"""
|
||||
return value is other
|
||||
|
||||
|
||||
def test_iterable(value):
|
||||
"""Check if it's possible to iterate over an object."""
|
||||
try:
|
||||
iter(value)
|
||||
except TypeError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def test_escaped(value):
|
||||
"""Check if the value is escaped."""
|
||||
return hasattr(value, '__html__')
|
||||
|
||||
|
||||
TESTS = {
|
||||
'odd': test_odd,
|
||||
'even': test_even,
|
||||
'divisibleby': test_divisibleby,
|
||||
'defined': test_defined,
|
||||
'undefined': test_undefined,
|
||||
'none': test_none,
|
||||
'lower': test_lower,
|
||||
'upper': test_upper,
|
||||
'string': test_string,
|
||||
'mapping': test_mapping,
|
||||
'number': test_number,
|
||||
'sequence': test_sequence,
|
||||
'iterable': test_iterable,
|
||||
'callable': test_callable,
|
||||
'sameas': test_sameas,
|
||||
'escaped': test_escaped
|
||||
}
|
520
3rdparty/jinja2/utils.py
vendored
Normal file
520
3rdparty/jinja2/utils.py
vendored
Normal file
@ -0,0 +1,520 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.utils
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Utility functions.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import errno
|
||||
from collections import deque
|
||||
from jinja2._compat import text_type, string_types, implements_iterator, \
|
||||
allocate_lock, url_quote
|
||||
|
||||
|
||||
_word_split_re = re.compile(r'(\s+)')
|
||||
_punctuation_re = re.compile(
|
||||
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
|
||||
'|'.join(map(re.escape, ('(', '<', '<'))),
|
||||
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
|
||||
)
|
||||
)
|
||||
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
|
||||
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
|
||||
_entity_re = re.compile(r'&([^;]+);')
|
||||
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
||||
_digits = '0123456789'
|
||||
|
||||
# special singleton representing missing values for the runtime
|
||||
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
|
||||
|
||||
# internal code
|
||||
internal_code = set()
|
||||
|
||||
concat = u''.join
|
||||
|
||||
|
||||
def contextfunction(f):
|
||||
"""This decorator can be used to mark a function or method context callable.
|
||||
A context callable is passed the active :class:`Context` as first argument when
|
||||
called from the template. This is useful if a function wants to get access
|
||||
to the context or functions provided on the context object. For example
|
||||
a function that returns a sorted list of template variables the current
|
||||
template exports could look like this::
|
||||
|
||||
@contextfunction
|
||||
def get_exported_names(context):
|
||||
return sorted(context.exported_vars)
|
||||
"""
|
||||
f.contextfunction = True
|
||||
return f
|
||||
|
||||
|
||||
def evalcontextfunction(f):
|
||||
"""This decorator can be used to mark a function or method as an eval
|
||||
context callable. This is similar to the :func:`contextfunction`
|
||||
but instead of passing the context, an evaluation context object is
|
||||
passed. For more information about the eval context, see
|
||||
:ref:`eval-context`.
|
||||
|
||||
.. versionadded:: 2.4
|
||||
"""
|
||||
f.evalcontextfunction = True
|
||||
return f
|
||||
|
||||
|
||||
def environmentfunction(f):
|
||||
"""This decorator can be used to mark a function or method as environment
|
||||
callable. This decorator works exactly like the :func:`contextfunction`
|
||||
decorator just that the first argument is the active :class:`Environment`
|
||||
and not context.
|
||||
"""
|
||||
f.environmentfunction = True
|
||||
return f
|
||||
|
||||
|
||||
def internalcode(f):
|
||||
"""Marks the function as internally used"""
|
||||
internal_code.add(f.__code__)
|
||||
return f
|
||||
|
||||
|
||||
def is_undefined(obj):
|
||||
"""Check if the object passed is undefined. This does nothing more than
|
||||
performing an instance check against :class:`Undefined` but looks nicer.
|
||||
This can be used for custom filters or tests that want to react to
|
||||
undefined variables. For example a custom default filter can look like
|
||||
this::
|
||||
|
||||
def default(var, default=''):
|
||||
if is_undefined(var):
|
||||
return default
|
||||
return var
|
||||
"""
|
||||
from jinja2.runtime import Undefined
|
||||
return isinstance(obj, Undefined)
|
||||
|
||||
|
||||
def consume(iterable):
|
||||
"""Consumes an iterable without doing anything with it."""
|
||||
for event in iterable:
|
||||
pass
|
||||
|
||||
|
||||
def clear_caches():
|
||||
"""Jinja2 keeps internal caches for environments and lexers. These are
|
||||
used so that Jinja2 doesn't have to recreate environments and lexers all
|
||||
the time. Normally you don't have to care about that but if you are
|
||||
messuring memory consumption you may want to clean the caches.
|
||||
"""
|
||||
from jinja2.environment import _spontaneous_environments
|
||||
from jinja2.lexer import _lexer_cache
|
||||
_spontaneous_environments.clear()
|
||||
_lexer_cache.clear()
|
||||
|
||||
|
||||
def import_string(import_name, silent=False):
|
||||
"""Imports an object based on a string. This is useful if you want to
|
||||
use import paths as endpoints or something similar. An import path can
|
||||
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
||||
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
||||
|
||||
If the `silent` is True the return value will be `None` if the import
|
||||
fails.
|
||||
|
||||
:return: imported object
|
||||
"""
|
||||
try:
|
||||
if ':' in import_name:
|
||||
module, obj = import_name.split(':', 1)
|
||||
elif '.' in import_name:
|
||||
items = import_name.split('.')
|
||||
module = '.'.join(items[:-1])
|
||||
obj = items[-1]
|
||||
else:
|
||||
return __import__(import_name)
|
||||
return getattr(__import__(module, None, None, [obj]), obj)
|
||||
except (ImportError, AttributeError):
|
||||
if not silent:
|
||||
raise
|
||||
|
||||
|
||||
def open_if_exists(filename, mode='rb'):
|
||||
"""Returns a file descriptor for the filename if that file exists,
|
||||
otherwise `None`.
|
||||
"""
|
||||
try:
|
||||
return open(filename, mode)
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.ENOENT, errno.EISDIR):
|
||||
raise
|
||||
|
||||
|
||||
def object_type_repr(obj):
|
||||
"""Returns the name of the object's type. For some recognized
|
||||
singletons the name of the object is returned instead. (For
|
||||
example for `None` and `Ellipsis`).
|
||||
"""
|
||||
if obj is None:
|
||||
return 'None'
|
||||
elif obj is Ellipsis:
|
||||
return 'Ellipsis'
|
||||
# __builtin__ in 2.x, builtins in 3.x
|
||||
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
|
||||
name = obj.__class__.__name__
|
||||
else:
|
||||
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
|
||||
return '%s object' % name
|
||||
|
||||
|
||||
def pformat(obj, verbose=False):
|
||||
"""Prettyprint an object. Either use the `pretty` library or the
|
||||
builtin `pprint`.
|
||||
"""
|
||||
try:
|
||||
from pretty import pretty
|
||||
return pretty(obj, verbose=verbose)
|
||||
except ImportError:
|
||||
from pprint import pformat
|
||||
return pformat(obj)
|
||||
|
||||
|
||||
def urlize(text, trim_url_limit=None, nofollow=False):
|
||||
"""Converts any URLs in text into clickable links. Works on http://,
|
||||
https:// and www. links. Links can have trailing punctuation (periods,
|
||||
commas, close-parens) and leading punctuation (opening parens) and
|
||||
it'll still do the right thing.
|
||||
|
||||
If trim_url_limit is not None, the URLs in link text will be limited
|
||||
to trim_url_limit characters.
|
||||
|
||||
If nofollow is True, the URLs in link text will get a rel="nofollow"
|
||||
attribute.
|
||||
"""
|
||||
trim_url = lambda x, limit=trim_url_limit: limit is not None \
|
||||
and (x[:limit] + (len(x) >=limit and '...'
|
||||
or '')) or x
|
||||
words = _word_split_re.split(text_type(escape(text)))
|
||||
nofollow_attr = nofollow and ' rel="nofollow"' or ''
|
||||
for i, word in enumerate(words):
|
||||
match = _punctuation_re.match(word)
|
||||
if match:
|
||||
lead, middle, trail = match.groups()
|
||||
if middle.startswith('www.') or (
|
||||
'@' not in middle and
|
||||
not middle.startswith('http://') and
|
||||
not middle.startswith('https://') and
|
||||
len(middle) > 0 and
|
||||
middle[0] in _letters + _digits and (
|
||||
middle.endswith('.org') or
|
||||
middle.endswith('.net') or
|
||||
middle.endswith('.com')
|
||||
)):
|
||||
middle = '<a href="http://%s"%s>%s</a>' % (middle,
|
||||
nofollow_attr, trim_url(middle))
|
||||
if middle.startswith('http://') or \
|
||||
middle.startswith('https://'):
|
||||
middle = '<a href="%s"%s>%s</a>' % (middle,
|
||||
nofollow_attr, trim_url(middle))
|
||||
if '@' in middle and not middle.startswith('www.') and \
|
||||
not ':' in middle and _simple_email_re.match(middle):
|
||||
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
|
||||
if lead + middle + trail != word:
|
||||
words[i] = lead + middle + trail
|
||||
return u''.join(words)
|
||||
|
||||
|
||||
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
|
||||
"""Generate some lorem impsum for the template."""
|
||||
from jinja2.constants import LOREM_IPSUM_WORDS
|
||||
from random import choice, randrange
|
||||
words = LOREM_IPSUM_WORDS.split()
|
||||
result = []
|
||||
|
||||
for _ in range(n):
|
||||
next_capitalized = True
|
||||
last_comma = last_fullstop = 0
|
||||
word = None
|
||||
last = None
|
||||
p = []
|
||||
|
||||
# each paragraph contains out of 20 to 100 words.
|
||||
for idx, _ in enumerate(range(randrange(min, max))):
|
||||
while True:
|
||||
word = choice(words)
|
||||
if word != last:
|
||||
last = word
|
||||
break
|
||||
if next_capitalized:
|
||||
word = word.capitalize()
|
||||
next_capitalized = False
|
||||
# add commas
|
||||
if idx - randrange(3, 8) > last_comma:
|
||||
last_comma = idx
|
||||
last_fullstop += 2
|
||||
word += ','
|
||||
# add end of sentences
|
||||
if idx - randrange(10, 20) > last_fullstop:
|
||||
last_comma = last_fullstop = idx
|
||||
word += '.'
|
||||
next_capitalized = True
|
||||
p.append(word)
|
||||
|
||||
# ensure that the paragraph ends with a dot.
|
||||
p = u' '.join(p)
|
||||
if p.endswith(','):
|
||||
p = p[:-1] + '.'
|
||||
elif not p.endswith('.'):
|
||||
p += '.'
|
||||
result.append(p)
|
||||
|
||||
if not html:
|
||||
return u'\n\n'.join(result)
|
||||
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
|
||||
|
||||
|
||||
def unicode_urlencode(obj, charset='utf-8'):
|
||||
"""URL escapes a single bytestring or unicode string with the
|
||||
given charset if applicable to URL safe quoting under all rules
|
||||
that need to be considered under all supported Python versions.
|
||||
|
||||
If non strings are provided they are converted to their unicode
|
||||
representation first.
|
||||
"""
|
||||
if not isinstance(obj, string_types):
|
||||
obj = text_type(obj)
|
||||
if isinstance(obj, text_type):
|
||||
obj = obj.encode(charset)
|
||||
return text_type(url_quote(obj))
|
||||
|
||||
|
||||
class LRUCache(object):
|
||||
"""A simple LRU Cache implementation."""
|
||||
|
||||
# this is fast for small capacities (something below 1000) but doesn't
|
||||
# scale. But as long as it's only used as storage for templates this
|
||||
# won't do any harm.
|
||||
|
||||
def __init__(self, capacity):
|
||||
self.capacity = capacity
|
||||
self._mapping = {}
|
||||
self._queue = deque()
|
||||
self._postinit()
|
||||
|
||||
def _postinit(self):
|
||||
# alias all queue methods for faster lookup
|
||||
self._popleft = self._queue.popleft
|
||||
self._pop = self._queue.pop
|
||||
self._remove = self._queue.remove
|
||||
self._wlock = allocate_lock()
|
||||
self._append = self._queue.append
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
'capacity': self.capacity,
|
||||
'_mapping': self._mapping,
|
||||
'_queue': self._queue
|
||||
}
|
||||
|
||||
def __setstate__(self, d):
|
||||
self.__dict__.update(d)
|
||||
self._postinit()
|
||||
|
||||
def __getnewargs__(self):
|
||||
return (self.capacity,)
|
||||
|
||||
def copy(self):
|
||||
"""Return a shallow copy of the instance."""
|
||||
rv = self.__class__(self.capacity)
|
||||
rv._mapping.update(self._mapping)
|
||||
rv._queue = deque(self._queue)
|
||||
return rv
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Return an item from the cache dict or `default`"""
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
"""Set `default` if the key is not in the cache otherwise
|
||||
leave unchanged. Return the value of this key.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def clear(self):
|
||||
"""Clear the cache."""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
self._mapping.clear()
|
||||
self._queue.clear()
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Check if a key exists in this cache."""
|
||||
return key in self._mapping
|
||||
|
||||
def __len__(self):
|
||||
"""Return the current size of the cache."""
|
||||
return len(self._mapping)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (
|
||||
self.__class__.__name__,
|
||||
self._mapping
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Get an item from the cache. Moves the item up so that it has the
|
||||
highest priority then.
|
||||
|
||||
Raise a `KeyError` if it does not exist.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
rv = self._mapping[key]
|
||||
if self._queue[-1] != key:
|
||||
try:
|
||||
self._remove(key)
|
||||
except ValueError:
|
||||
# if something removed the key from the container
|
||||
# when we read, ignore the ValueError that we would
|
||||
# get otherwise.
|
||||
pass
|
||||
self._append(key)
|
||||
return rv
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Sets the value for an item. Moves the item up so that it
|
||||
has the highest priority then.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
if key in self._mapping:
|
||||
self._remove(key)
|
||||
elif len(self._mapping) == self.capacity:
|
||||
del self._mapping[self._popleft()]
|
||||
self._append(key)
|
||||
self._mapping[key] = value
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Remove an item from the cache dict.
|
||||
Raise a `KeyError` if it does not exist.
|
||||
"""
|
||||
self._wlock.acquire()
|
||||
try:
|
||||
del self._mapping[key]
|
||||
try:
|
||||
self._remove(key)
|
||||
except ValueError:
|
||||
# __getitem__ is not locked, it might happen
|
||||
pass
|
||||
finally:
|
||||
self._wlock.release()
|
||||
|
||||
def items(self):
|
||||
"""Return a list of items."""
|
||||
result = [(key, self._mapping[key]) for key in list(self._queue)]
|
||||
result.reverse()
|
||||
return result
|
||||
|
||||
def iteritems(self):
|
||||
"""Iterate over all items."""
|
||||
return iter(self.items())
|
||||
|
||||
def values(self):
|
||||
"""Return a list of all values."""
|
||||
return [x[1] for x in self.items()]
|
||||
|
||||
def itervalue(self):
|
||||
"""Iterate over all values."""
|
||||
return iter(self.values())
|
||||
|
||||
def keys(self):
|
||||
"""Return a list of all keys ordered by most recent usage."""
|
||||
return list(self)
|
||||
|
||||
def iterkeys(self):
|
||||
"""Iterate over all keys in the cache dict, ordered by
|
||||
the most recent usage.
|
||||
"""
|
||||
return reversed(tuple(self._queue))
|
||||
|
||||
__iter__ = iterkeys
|
||||
|
||||
def __reversed__(self):
|
||||
"""Iterate over the values in the cache dict, oldest items
|
||||
coming first.
|
||||
"""
|
||||
return iter(tuple(self._queue))
|
||||
|
||||
__copy__ = copy
|
||||
|
||||
|
||||
# register the LRU cache as mutable mapping if possible
|
||||
try:
|
||||
from collections import MutableMapping
|
||||
MutableMapping.register(LRUCache)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class Cycler(object):
|
||||
"""A cycle helper for templates."""
|
||||
|
||||
def __init__(self, *items):
|
||||
if not items:
|
||||
raise RuntimeError('at least one item has to be provided')
|
||||
self.items = items
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Resets the cycle."""
|
||||
self.pos = 0
|
||||
|
||||
@property
|
||||
def current(self):
|
||||
"""Returns the current item."""
|
||||
return self.items[self.pos]
|
||||
|
||||
def __next__(self):
|
||||
"""Goes one item ahead and returns it."""
|
||||
rv = self.current
|
||||
self.pos = (self.pos + 1) % len(self.items)
|
||||
return rv
|
||||
|
||||
|
||||
class Joiner(object):
|
||||
"""A joining helper for templates."""
|
||||
|
||||
def __init__(self, sep=u', '):
|
||||
self.sep = sep
|
||||
self.used = False
|
||||
|
||||
def __call__(self):
|
||||
if not self.used:
|
||||
self.used = True
|
||||
return u''
|
||||
return self.sep
|
||||
|
||||
|
||||
# Imported here because that's where it was in the past
|
||||
from markupsafe import Markup, escape, soft_unicode
|
87
3rdparty/jinja2/visitor.py
vendored
Normal file
87
3rdparty/jinja2/visitor.py
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
jinja2.visitor
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a visitor for the nodes.
|
||||
|
||||
:copyright: (c) 2010 by the Jinja Team.
|
||||
:license: BSD.
|
||||
"""
|
||||
from jinja2.nodes import Node
|
||||
|
||||
|
||||
class NodeVisitor(object):
|
||||
"""Walks the abstract syntax tree and call visitor functions for every
|
||||
node found. The visitor functions may return values which will be
|
||||
forwarded by the `visit` method.
|
||||
|
||||
Per default the visitor functions for the nodes are ``'visit_'`` +
|
||||
class name of the node. So a `TryFinally` node visit function would
|
||||
be `visit_TryFinally`. This behavior can be changed by overriding
|
||||
the `get_visitor` function. If no visitor function exists for a node
|
||||
(return value `None`) the `generic_visit` visitor is used instead.
|
||||
"""
|
||||
|
||||
def get_visitor(self, node):
|
||||
"""Return the visitor function for this node or `None` if no visitor
|
||||
exists for this node. In that case the generic visit function is
|
||||
used instead.
|
||||
"""
|
||||
method = 'visit_' + node.__class__.__name__
|
||||
return getattr(self, method, None)
|
||||
|
||||
def visit(self, node, *args, **kwargs):
|
||||
"""Visit a node."""
|
||||
f = self.get_visitor(node)
|
||||
if f is not None:
|
||||
return f(node, *args, **kwargs)
|
||||
return self.generic_visit(node, *args, **kwargs)
|
||||
|
||||
def generic_visit(self, node, *args, **kwargs):
|
||||
"""Called if no explicit visitor function exists for a node."""
|
||||
for node in node.iter_child_nodes():
|
||||
self.visit(node, *args, **kwargs)
|
||||
|
||||
|
||||
class NodeTransformer(NodeVisitor):
|
||||
"""Walks the abstract syntax tree and allows modifications of nodes.
|
||||
|
||||
The `NodeTransformer` will walk the AST and use the return value of the
|
||||
visitor functions to replace or remove the old node. If the return
|
||||
value of the visitor function is `None` the node will be removed
|
||||
from the previous location otherwise it's replaced with the return
|
||||
value. The return value may be the original node in which case no
|
||||
replacement takes place.
|
||||
"""
|
||||
|
||||
def generic_visit(self, node, *args, **kwargs):
|
||||
for field, old_value in node.iter_fields():
|
||||
if isinstance(old_value, list):
|
||||
new_values = []
|
||||
for value in old_value:
|
||||
if isinstance(value, Node):
|
||||
value = self.visit(value, *args, **kwargs)
|
||||
if value is None:
|
||||
continue
|
||||
elif not isinstance(value, Node):
|
||||
new_values.extend(value)
|
||||
continue
|
||||
new_values.append(value)
|
||||
old_value[:] = new_values
|
||||
elif isinstance(old_value, Node):
|
||||
new_node = self.visit(old_value, *args, **kwargs)
|
||||
if new_node is None:
|
||||
delattr(node, field)
|
||||
else:
|
||||
setattr(node, field, new_node)
|
||||
return node
|
||||
|
||||
def visit_list(self, node, *args, **kwargs):
|
||||
"""As transformers may return lists in some places this method
|
||||
can be used to enforce a list as return value.
|
||||
"""
|
||||
rv = self.visit(node, *args, **kwargs)
|
||||
if not isinstance(rv, list):
|
||||
rv = [rv]
|
||||
return rv
|
@ -280,7 +280,7 @@ set(OPENCV_EXTRA_MODULES_PATH "" CACHE PATH "Where to look for additional OpenCV
|
||||
# ----------------------------------------------------------------------------
|
||||
# Autodetect if we are in a GIT repository
|
||||
# ----------------------------------------------------------------------------
|
||||
find_package(Git QUIET)
|
||||
find_host_package(Git QUIET)
|
||||
|
||||
if(GIT_FOUND)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "2.[0-9].[0-9]*"
|
||||
@ -393,6 +393,9 @@ if(WITH_OPENCL)
|
||||
include(cmake/OpenCVDetectOpenCL.cmake)
|
||||
endif()
|
||||
|
||||
# --- Matlab/Octave ---
|
||||
include(cmake/OpenCVFindMatlab.cmake)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Solution folders:
|
||||
# ----------------------------------------------------------------------------
|
||||
@ -829,6 +832,14 @@ if(NOT ANDROID)
|
||||
endif()
|
||||
status(" Java tests:" BUILD_TESTS AND (CAN_BUILD_ANDROID_PROJECTS OR HAVE_opencv_java) THEN YES ELSE NO)
|
||||
|
||||
# ========================= matlab =========================
|
||||
status("")
|
||||
status(" Matlab:")
|
||||
status(" mex:" MATLAB_MEX_SCRIPT THEN "${MATLAB_MEX_SCRIPT}" ELSE NO)
|
||||
if (MATLAB_FOUND)
|
||||
status(" Compiler/generator:" MEX_WORKS THEN "Working" ELSE "Not working (bindings will not be generated)")
|
||||
endif()
|
||||
|
||||
# ========================== documentation ==========================
|
||||
if(BUILD_DOCS)
|
||||
status("")
|
||||
@ -840,6 +851,7 @@ if(BUILD_DOCS)
|
||||
endif()
|
||||
status(" Sphinx:" HAVE_SPHINX THEN "${SPHINX_BUILD} (ver ${SPHINX_VERSION})" ELSE NO)
|
||||
status(" PdfLaTeX compiler:" PDFLATEX_COMPILER THEN "${PDFLATEX_COMPILER}" ELSE NO)
|
||||
status(" PlantUML:" PLANTUML THEN "${PLANTUML}" ELSE NO)
|
||||
endif()
|
||||
|
||||
# ========================== samples and tests ==========================
|
||||
|
@ -283,7 +283,7 @@ macro(add_android_project target path)
|
||||
ocv_include_modules_recurse(${android_proj_NATIVE_DEPS})
|
||||
ocv_include_directories("${path}/jni")
|
||||
|
||||
if (NATIVE_APP_GLUE AND 0)
|
||||
if(NATIVE_APP_GLUE)
|
||||
include_directories(${ANDROID_NDK}/sources/android/native_app_glue)
|
||||
list(APPEND android_proj_jni_files ${ANDROID_NDK}/sources/android/native_app_glue/android_native_app_glue.c)
|
||||
ocv_warnings_disable(CMAKE_C_FLAGS -Wstrict-prototypes -Wunused-parameter -Wmissing-prototypes)
|
||||
|
@ -78,6 +78,7 @@ endif()
|
||||
|
||||
if(BUILD_DOCS)
|
||||
find_host_program(SPHINX_BUILD sphinx-build)
|
||||
find_host_program(PLANTUML plantuml)
|
||||
if(SPHINX_BUILD)
|
||||
execute_process(COMMAND "${SPHINX_BUILD}"
|
||||
OUTPUT_QUIET
|
||||
|
199
cmake/OpenCVFindMatlab.cmake
Normal file
199
cmake/OpenCVFindMatlab.cmake
Normal file
@ -0,0 +1,199 @@
|
||||
# ----- Find Matlab/Octave -----
|
||||
#
|
||||
# OpenCVFindMatlab.cmake attempts to locate the install path of Matlab in order
|
||||
# to extract the mex headers, libraries and shell scripts. If found
|
||||
# successfully, the following variables will be defined
|
||||
#
|
||||
# MATLAB_FOUND: true/false
|
||||
# MATLAB_ROOT_DIR: Root of Matlab installation
|
||||
# MATLAB_BIN: The main Matlab "executable" (shell script)
|
||||
# MATLAB_MEX_SCRIPT: The mex script used to compile mex files
|
||||
# MATLAB_INCLUDE_DIRS:Path to "mex.h"
|
||||
# MATLAB_LIBRARY_DIRS:Path to mex and matrix libraries
|
||||
# MATLAB_LIBRARIES: The Matlab libs, usually mx, mex, mat
|
||||
# MATLAB_MEXEXT: The mex library extension. It will be one of:
|
||||
# mexwin32, mexwin64, mexglx, mexa64, mexmac,
|
||||
# mexmaci, mexmaci64, mexsol, mexs64
|
||||
# MATLAB_ARCH: The installation architecture. It is **usually**
|
||||
# the MEXEXT with the preceding "mex" removed,
|
||||
# though it's different for linux distros.
|
||||
#
|
||||
# There doesn't appear to be an elegant way to detect all versions of Matlab
|
||||
# across different platforms. If you know the matlab path and want to avoid
|
||||
# the search, you can define the path to the Matlab root when invoking cmake:
|
||||
#
|
||||
# cmake -DMATLAB_ROOT_DIR='/PATH/TO/ROOT_DIR' ..
|
||||
|
||||
|
||||
|
||||
# ----- set_library_presuffix -----
|
||||
#
|
||||
# Matlab tends to use some non-standard prefixes and suffixes on its libraries.
|
||||
# For example, libmx.dll on Windows (Windows does not add prefixes) and
|
||||
# mkl.dylib on OS X (OS X uses "lib" prefixes).
|
||||
# On some versions of Windows the .dll suffix also appears to not be checked.
|
||||
#
|
||||
# This function modifies the library prefixes and suffixes used by
|
||||
# find_library when finding Matlab libraries. It does not affect scopes
|
||||
# outside of this file.
|
||||
function(set_libarch_prefix_suffix)
|
||||
if (UNIX AND NOT APPLE)
|
||||
set(CMAKE_FIND_LIBRARY_PREFIXES "lib" PARENT_SCOPE)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".so" ".a" PARENT_SCOPE)
|
||||
elseif (APPLE)
|
||||
set(CMAKE_FIND_LIBRARY_PREFIXES "lib" PARENT_SCOPE)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".a" PARENT_SCOPE)
|
||||
elseif (WIN32)
|
||||
set(CMAKE_FIND_LIBRARY_PREFIXES "lib" PARENT_SCOPE)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll" PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
||||
|
||||
# ----- locate_matlab_root -----
|
||||
#
|
||||
# Attempt to find the path to the Matlab installation. If successful, sets
|
||||
# the absolute path in the variable MATLAB_ROOT_DIR
|
||||
function(locate_matlab_root)
|
||||
|
||||
# --- UNIX/APPLE ---
|
||||
if (UNIX)
|
||||
# possible root locations, in order of likelihood
|
||||
set(SEARCH_DIRS_ /Applications /usr/local /opt/local /usr /opt)
|
||||
foreach (DIR_ ${SEARCH_DIRS_})
|
||||
file(GLOB MATLAB_ROOT_DIR_ ${DIR_}/*matlab*)
|
||||
if (MATLAB_ROOT_DIR_)
|
||||
# sort in order from highest to lowest
|
||||
# normally it's in the format MATLAB_R[20XX][A/B]
|
||||
# TODO: numerical rather than lexicographic sort. However,
|
||||
# CMake does not support floating-point MATH(EXPR ...) at this time.
|
||||
list(SORT MATLAB_ROOT_DIR_)
|
||||
list(REVERSE MATLAB_ROOT_DIR_)
|
||||
list(GET MATLAB_ROOT_DIR_ 0 MATLAB_ROOT_DIR_)
|
||||
set(MATLAB_ROOT_DIR ${MATLAB_ROOT_DIR_} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# --- WINDOWS ---
|
||||
elseif (WIN32)
|
||||
# 1. search the path environment variable
|
||||
find_program(MATLAB_ROOT_DIR_ matlab PATHS ENV PATH)
|
||||
if (MATLAB_ROOT_DIR_)
|
||||
# get the root directory from the full path
|
||||
# /path/to/matlab/rootdir/bin/matlab.exe
|
||||
get_filename_component(MATLAB_ROOT_DIR_ ${MATLAB_ROOT_DIR_} PATH)
|
||||
get_filename_component(MATLAB_ROOT_DIR_ ${MATLAB_ROOT_DIR_} PATH)
|
||||
set(MATLAB_ROOT_DIR ${MATLAB_ROOT_DIR_} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# 2. search the registry
|
||||
# determine the available Matlab versions
|
||||
set(REG_EXTENSION_ "SOFTWARE\\Mathworks\\MATLAB")
|
||||
set(REG_ROOTS_ "HKEY_LOCAL_MACHINE" "HKEY_CURRENT_USER")
|
||||
foreach(REG_ROOT_ ${REG_ROOTS_})
|
||||
execute_process(COMMAND reg query "${REG_ROOT_}\\${REG_EXTENSION_}" OUTPUT_VARIABLE QUERY_RESPONSE_)
|
||||
if (QUERY_RESPONSE_)
|
||||
string(REGEX MATCHALL "[0-9]\\.[0-9]" VERSION_STRINGS_ ${QUERY_RESPONSE_})
|
||||
list(APPEND VERSIONS_ ${VERSION_STRINGS_})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# select the highest version
|
||||
list(APPEND VERSIONS_ "0.0")
|
||||
list(SORT VERSIONS_)
|
||||
list(REVERSE VERSIONS_)
|
||||
list(GET VERSIONS_ 0 VERSION_)
|
||||
|
||||
# request the MATLABROOT from the registry
|
||||
foreach(REG_ROOT_ ${REG_ROOTS_})
|
||||
get_filename_component(QUERY_RESPONSE_ [${REG_ROOT_}\\${REG_EXTENSION_}\\${VERSION_};MATLABROOT] ABSOLUTE)
|
||||
if (NOT ${QUERY_RESPONSE_} MATCHES "registry$")
|
||||
set(MATLAB_ROOT_DIR ${QUERY_RESPONSE_} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
||||
|
||||
# ----- locate_matlab_components -----
|
||||
#
|
||||
# Given a directory MATLAB_ROOT_DIR, attempt to find the Matlab components
|
||||
# (include directory and libraries) under the root. If everything is found,
|
||||
# sets the variable MATLAB_FOUND to TRUE
|
||||
function(locate_matlab_components MATLAB_ROOT_DIR)
|
||||
# get the mex extension
|
||||
find_file(MATLAB_MEXEXT_SCRIPT_ NAMES mexext mexext.bat PATHS ${MATLAB_ROOT_DIR}/bin NO_DEFAULT_PATH)
|
||||
execute_process(COMMAND ${MATLAB_MEXEXT_SCRIPT_}
|
||||
OUTPUT_VARIABLE MATLAB_MEXEXT_
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if (NOT MATLAB_MEXEXT_)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# map the mexext to an architecture extension
|
||||
set(ARCHITECTURES_ "maci64" "maci" "glnxa64" "glnx64" "sol64" "sola64" "win32" "win64" )
|
||||
foreach(ARCHITECTURE_ ${ARCHITECTURES_})
|
||||
if(EXISTS ${MATLAB_ROOT_DIR}/bin/${ARCHITECTURE_})
|
||||
set(MATLAB_ARCH_ ${ARCHITECTURE_})
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# get the path to the libraries
|
||||
set(MATLAB_LIBRARY_DIRS_ ${MATLAB_ROOT_DIR}/bin/${MATLAB_ARCH_})
|
||||
|
||||
# get the libraries
|
||||
set_libarch_prefix_suffix()
|
||||
find_library(MATLAB_LIB_MX_ mx PATHS ${MATLAB_LIBRARY_DIRS_} NO_DEFAULT_PATH)
|
||||
find_library(MATLAB_LIB_MEX_ mex PATHS ${MATLAB_LIBRARY_DIRS_} NO_DEFAULT_PATH)
|
||||
find_library(MATLAB_LIB_MAT_ mat PATHS ${MATLAB_LIBRARY_DIRS_} NO_DEFAULT_PATH)
|
||||
set(MATLAB_LIBRARIES_ ${MATLAB_LIB_MX_} ${MATLAB_LIB_MEX_} ${MATLAB_LIB_MAT_})
|
||||
|
||||
# get the include path
|
||||
find_path(MATLAB_INCLUDE_DIRS_ mex.h ${MATLAB_ROOT_DIR}/extern/include)
|
||||
|
||||
# get the mex shell script
|
||||
find_program(MATLAB_MEX_SCRIPT_ NAMES mex mex.bat PATHS ${MATLAB_ROOT_DIR}/bin NO_DEFAULT_PATH)
|
||||
|
||||
# get the Matlab executable
|
||||
find_program(MATLAB_BIN_ NAMES matlab PATHS ${MATLAB_ROOT_DIR}/bin NO_DEFAULT_PATH)
|
||||
|
||||
# export into parent scope
|
||||
if (MATLAB_MEX_SCRIPT_ AND MATLAB_LIBRARIES_ AND MATLAB_INCLUDE_DIRS_)
|
||||
set(MATLAB_BIN ${MATLAB_BIN_} PARENT_SCOPE)
|
||||
set(MATLAB_MEX_SCRIPT ${MATLAB_MEX_SCRIPT_} PARENT_SCOPE)
|
||||
set(MATLAB_INCLUDE_DIRS ${MATLAB_INCLUDE_DIRS_} PARENT_SCOPE)
|
||||
set(MATLAB_LIBRARIES ${MATLAB_LIBRARIES_} PARENT_SCOPE)
|
||||
set(MATLAB_LIBRARY_DIRS ${MATLAB_LIBRARY_DIRS_} PARENT_SCOPE)
|
||||
set(MATLAB_MEXEXT ${MATLAB_MEXEXT_} PARENT_SCOPE)
|
||||
set(MATLAB_ARCH ${MATLAB_ARCH_} PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# FIND MATLAB COMPONENTS
|
||||
# ----------------------------------------------------------------------------
|
||||
if (NOT MATLAB_FOUND)
|
||||
|
||||
# attempt to find the Matlab root folder
|
||||
if (NOT MATLAB_ROOT_DIR)
|
||||
locate_matlab_root()
|
||||
endif()
|
||||
|
||||
# given the matlab root folder, find the library locations
|
||||
if (MATLAB_ROOT_DIR)
|
||||
locate_matlab_components(${MATLAB_ROOT_DIR})
|
||||
endif()
|
||||
find_package_handle_standard_args(Matlab DEFAULT_MSG
|
||||
MATLAB_MEX_SCRIPT MATLAB_INCLUDE_DIRS
|
||||
MATLAB_ROOT_DIR MATLAB_LIBRARIES
|
||||
MATLAB_LIBRARY_DIRS MATLAB_MEXEXT
|
||||
MATLAB_ARCH MATLAB_BIN)
|
||||
endif()
|
@ -33,6 +33,7 @@
|
||||
# <add extra installation rules>
|
||||
# ocv_add_accuracy_tests(<extra dependencies>)
|
||||
# ocv_add_perf_tests(<extra dependencies>)
|
||||
# ocv_add_samples(<extra dependencies>)
|
||||
#
|
||||
#
|
||||
# If module have no "extra" then you can define it in one line:
|
||||
@ -581,6 +582,7 @@ macro(ocv_define_module module_name)
|
||||
|
||||
ocv_add_accuracy_tests()
|
||||
ocv_add_perf_tests()
|
||||
ocv_add_samples()
|
||||
endmacro()
|
||||
|
||||
# ensures that all passed modules are available
|
||||
@ -725,6 +727,48 @@ function(ocv_add_accuracy_tests)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(ocv_add_samples)
|
||||
set(samples_path "${CMAKE_CURRENT_SOURCE_DIR}/samples")
|
||||
string(REGEX REPLACE "^opencv_" "" module_id ${the_module})
|
||||
|
||||
if(BUILD_EXAMPLES AND EXISTS "${samples_path}")
|
||||
set(samples_deps ${the_module} ${OPENCV_MODULE_${the_module}_DEPS} opencv_highgui ${ARGN})
|
||||
ocv_check_dependencies(${samples_deps})
|
||||
|
||||
if(OCV_DEPENDENCIES_FOUND)
|
||||
file(GLOB sample_sources "${samples_path}/*.cpp")
|
||||
ocv_include_modules(${OPENCV_MODULE_${the_module}_DEPS})
|
||||
|
||||
foreach(source ${sample_sources})
|
||||
get_filename_component(name "${source}" NAME_WE)
|
||||
set(the_target "example_${module_id}_${name}")
|
||||
|
||||
add_executable(${the_target} "${source}")
|
||||
target_link_libraries(${the_target} ${samples_deps})
|
||||
|
||||
set_target_properties(${the_target} PROPERTIES PROJECT_LABEL "(sample) ${name}")
|
||||
|
||||
if(ENABLE_SOLUTION_FOLDERS)
|
||||
set_target_properties(${the_target} PROPERTIES
|
||||
OUTPUT_NAME "${module_id}-example-${name}"
|
||||
FOLDER "samples/${module_id}")
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
install(TARGETS ${the_target} RUNTIME DESTINATION "samples/${module_id}" COMPONENT main)
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(INSTALL_C_EXAMPLES AND NOT WIN32 AND EXISTS "${samples_path}")
|
||||
file(GLOB sample_files "${samples_path}/*")
|
||||
install(FILES ${sample_files}
|
||||
DESTINATION share/OpenCV/samples/${module_id}
|
||||
PERMISSIONS OWNER_READ GROUP_READ WORLD_READ)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# internal macro; finds all link dependencies of the module
|
||||
# should be used at the end of CMake processing
|
||||
macro(__ocv_track_module_link_dependencies the_module optkind)
|
||||
|
@ -1,16 +1,19 @@
|
||||
SET(OPENCV_VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/modules/core/include/opencv2/core/version.hpp")
|
||||
FILE(STRINGS "${OPENCV_VERSION_FILE}" OPENCV_VERSION_PARTS REGEX "#define CV_VERSION_[A-Z]+[ ]+[0-9]+" )
|
||||
file(STRINGS "${OPENCV_VERSION_FILE}" OPENCV_VERSION_PARTS REGEX "#define CV_VERSION_[A-Z]+[ ]+" )
|
||||
|
||||
string(REGEX REPLACE ".+CV_VERSION_EPOCH[ ]+([0-9]+).*" "\\1" OPENCV_VERSION_MAJOR "${OPENCV_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+CV_VERSION_MAJOR[ ]+([0-9]+).*" "\\1" OPENCV_VERSION_MINOR "${OPENCV_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+CV_VERSION_MINOR[ ]+([0-9]+).*" "\\1" OPENCV_VERSION_PATCH "${OPENCV_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+CV_VERSION_REVISION[ ]+([0-9]+).*" "\\1" OPENCV_VERSION_TWEAK "${OPENCV_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+CV_VERSION_STATUS[ ]+\"([^\"]*)\".*" "\\1" OPENCV_VERSION_STATUS "${OPENCV_VERSION_PARTS}")
|
||||
|
||||
set(OPENCV_VERSION "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}.${OPENCV_VERSION_PATCH}")
|
||||
set(OPENCV_VERSION_PLAIN "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}.${OPENCV_VERSION_PATCH}")
|
||||
if(OPENCV_VERSION_TWEAK GREATER 0)
|
||||
set(OPENCV_VERSION "${OPENCV_VERSION}.${OPENCV_VERSION_TWEAK}")
|
||||
set(OPENCV_VERSION_PLAIN "${OPENCV_VERSION_PLAIN}.${OPENCV_VERSION_TWEAK}")
|
||||
endif()
|
||||
|
||||
set(OPENCV_VERSION "${OPENCV_VERSION_PLAIN}${OPENCV_VERSION_STATUS}")
|
||||
|
||||
set(OPENCV_SOVERSION "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}")
|
||||
set(OPENCV_LIBVERSION "${OPENCV_VERSION_MAJOR}.${OPENCV_VERSION_MINOR}.${OPENCV_VERSION_PATCH}")
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
set(OpenCV_VERSION @OPENCV_VERSION@)
|
||||
set(OpenCV_VERSION @OPENCV_VERSION_PLAIN@)
|
||||
set(PACKAGE_VERSION ${OpenCV_VERSION})
|
||||
|
||||
set(PACKAGE_VERSION_EXACT False)
|
||||
|
@ -22,11 +22,12 @@
|
||||
# - OpenCV_INCLUDE_DIRS : The OpenCV include directories.
|
||||
# - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability
|
||||
# - OpenCV_ANDROID_NATIVE_API_LEVEL : Minimum required level of Android API
|
||||
# - OpenCV_VERSION : The version of this OpenCV build: "@OPENCV_VERSION@"
|
||||
# - OpenCV_VERSION : The version of this OpenCV build: "@OPENCV_VERSION_PLAIN@"
|
||||
# - OpenCV_VERSION_MAJOR : Major version part of OpenCV_VERSION: "@OPENCV_VERSION_MAJOR@"
|
||||
# - OpenCV_VERSION_MINOR : Minor version part of OpenCV_VERSION: "@OPENCV_VERSION_MINOR@"
|
||||
# - OpenCV_VERSION_PATCH : Patch version part of OpenCV_VERSION: "@OPENCV_VERSION_PATCH@"
|
||||
# - OpenCV_VERSION_TWEAK : Tweak version part of OpenCV_VERSION: "@OPENCV_VERSION_TWEAK@"
|
||||
# - OpenCV_VERSION_STATUS : Development status of this build: "@OPENCV_VERSION_STATUS@"
|
||||
#
|
||||
# Advanced variables:
|
||||
# - OpenCV_SHARED
|
||||
@ -96,11 +97,12 @@ mark_as_advanced(FORCE OpenCV_LIB_DIR_OPT OpenCV_LIB_DIR_DBG OpenCV_3RDPARTY_LIB
|
||||
# ======================================================
|
||||
# Version variables:
|
||||
# ======================================================
|
||||
SET(OpenCV_VERSION @OPENCV_VERSION@)
|
||||
SET(OpenCV_VERSION @OPENCV_VERSION_PLAIN@)
|
||||
SET(OpenCV_VERSION_MAJOR @OPENCV_VERSION_MAJOR@)
|
||||
SET(OpenCV_VERSION_MINOR @OPENCV_VERSION_MINOR@)
|
||||
SET(OpenCV_VERSION_PATCH @OPENCV_VERSION_PATCH@)
|
||||
SET(OpenCV_VERSION_TWEAK @OPENCV_VERSION_TWEAK@)
|
||||
SET(OpenCV_VERSION_STATUS "@OPENCV_VERSION_STATUS@")
|
||||
|
||||
# ====================================================================
|
||||
# Link libraries: e.g. libopencv_core.so, opencv_imgproc220d.lib, etc...
|
||||
|
@ -8,6 +8,6 @@ includedir_new=@includedir@
|
||||
|
||||
Name: OpenCV
|
||||
Description: Open Source Computer Vision Library
|
||||
Version: @OPENCV_VERSION@
|
||||
Version: @OPENCV_VERSION_PLAIN@
|
||||
Libs: @OpenCV_LIB_COMPONENTS@
|
||||
Cflags: -I${includedir_old} -I${includedir_new}
|
||||
|
@ -67,9 +67,14 @@ if(BUILD_DOCS AND HAVE_SPHINX)
|
||||
set(OPENCV_DOC_DEPS conf.py ${OPENCV_FILES_REF} ${OPENCV_FILES_REF_PICT}
|
||||
${OPENCV_FILES_UG} ${OPENCV_FILES_TUT} ${OPENCV_FILES_TUT_PICT})
|
||||
|
||||
set(BUILD_PLANTUML "")
|
||||
if(PLANTUML)
|
||||
set(BUILD_PLANTUML "-tplantuml")
|
||||
endif()
|
||||
|
||||
if(PDFLATEX_COMPILER)
|
||||
add_custom_target(docs
|
||||
COMMAND ${SPHINX_BUILD} -b latex -c ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. .
|
||||
COMMAND ${SPHINX_BUILD} ${BUILD_PLANTUML} -b latex -c ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. .
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/pics ${CMAKE_CURRENT_BINARY_DIR}/doc/opencv1/pics
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/patch_refman_latex.py" opencv2refman.tex
|
||||
@ -103,7 +108,7 @@ if(BUILD_DOCS AND HAVE_SPHINX)
|
||||
endif()
|
||||
|
||||
add_custom_target(html_docs
|
||||
COMMAND ${SPHINX_BUILD} -b html -c ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. ./_html
|
||||
COMMAND ${SPHINX_BUILD} ${BUILD_PLANTUML} -b html -c ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. ./_html
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
|
||||
DEPENDS ${OPENCV_DOC_DEPS}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
|
15
doc/conf.py
15
doc/conf.py
@ -28,6 +28,16 @@ sys.path.insert(0, os.path.abspath('.'))
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.todo', 'sphinx.ext.extlinks', 'ocv', 'sphinx.ext.doctest']
|
||||
|
||||
have_plantuml_ext = False
|
||||
if tags.has('plantuml'):
|
||||
try:
|
||||
import sphinxcontrib.plantuml
|
||||
extensions.append("sphinxcontrib.plantuml")
|
||||
have_plantuml_ext = True
|
||||
except ImportError:
|
||||
print "No module sphinxcontrib.plantuml found, sphinx will not render UML diagrams"
|
||||
|
||||
doctest_test_doctest_blocks = 'block'
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
@ -55,6 +65,7 @@ version_epoch = re.search("^W*#\W*define\W+CV_VERSION_EPOCH\W+(\d+)\W*$", versio
|
||||
version_major = re.search("^W*#\W*define\W+CV_VERSION_MAJOR\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
version_minor = re.search("^W*#\W*define\W+CV_VERSION_MINOR\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
version_patch = re.search("^W*#\W*define\W+CV_VERSION_REVISION\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
version_status = re.search("^W*#\W*define\W+CV_VERSION_STATUS\W+\"(.*?)\"\W*$", version_file, re.MULTILINE).group(1)
|
||||
|
||||
# The short X.Y version.
|
||||
version = version_epoch + '.' + version_major
|
||||
@ -62,6 +73,7 @@ version = version_epoch + '.' + version_major
|
||||
release = version_epoch + '.' + version_major + '.' + version_minor
|
||||
if version_patch:
|
||||
release = release + '.' + version_patch
|
||||
release += version_status
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -77,6 +89,9 @@ if version_patch:
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['doc/tutorials/definitions']
|
||||
|
||||
if not have_plantuml_ext:
|
||||
exclude_patterns.append('**/uml/*')
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
|
@ -40,7 +40,7 @@ Code
|
||||
* Display the detected circle in a window.
|
||||
|
||||
.. |TutorialHoughCirclesSimpleDownload| replace:: here
|
||||
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghlines.cpp
|
||||
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghcircles.cpp
|
||||
.. |TutorialHoughCirclesFancyDownload| replace:: here
|
||||
.. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
|
||||
|
||||
|
@ -43,6 +43,9 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
//data (which is float) is aligend in 32 bytes
|
||||
#define WIDTH_MULTIPLE (32 >> 2)
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
//*******************************************************
|
||||
// basicretinafilter
|
||||
@ -116,22 +119,18 @@ kernel void horizontalAnticausalFilter(
|
||||
|
||||
float4 result_v4 = (float4)(0), out_v4;
|
||||
float result = 0;
|
||||
// we assume elements_per_row is multple of 4
|
||||
for(int i = 0; i < 4; ++ i, -- optr)
|
||||
// we assume elements_per_row is multple of WIDTH_MULTIPLE
|
||||
for(int i = 0; i < WIDTH_MULTIPLE; ++ i, -- optr)
|
||||
{
|
||||
if(i < elements_per_row - cols)
|
||||
{
|
||||
*optr = result;
|
||||
}
|
||||
else
|
||||
if(i >= elements_per_row - cols)
|
||||
{
|
||||
result = *optr + _a * result;
|
||||
*optr = result;
|
||||
}
|
||||
*optr = result;
|
||||
}
|
||||
result_v4.x = result;
|
||||
optr -= 3;
|
||||
for(int i = 1; i < elements_per_row / 4; ++i, optr -= 4)
|
||||
for(int i = WIDTH_MULTIPLE / 4; i < elements_per_row / 4; ++i, optr -= 4)
|
||||
{
|
||||
// shift left, `offset` is type `size_t` so it cannot be negative
|
||||
out_v4 = vload4(0, optr);
|
||||
@ -223,23 +222,19 @@ kernel void horizontalAnticausalFilter_Irregular(
|
||||
|
||||
float4 buf_v4, out_v4, res_v4 = (float4)(0);
|
||||
float result = 0;
|
||||
// we assume elements_per_row is multple of 4
|
||||
for(int i = 0; i < 4; ++ i, -- optr, -- bptr)
|
||||
// we assume elements_per_row is multple of WIDTH_MULTIPLE
|
||||
for(int i = 0; i < WIDTH_MULTIPLE; ++ i, -- optr, -- bptr)
|
||||
{
|
||||
if(i < elements_per_row - cols)
|
||||
{
|
||||
*optr = result;
|
||||
}
|
||||
else
|
||||
if(i >= elements_per_row - cols)
|
||||
{
|
||||
result = *optr + *bptr * result;
|
||||
*optr = result;
|
||||
}
|
||||
*optr = result;
|
||||
}
|
||||
res_v4.x = result;
|
||||
optr -= 3;
|
||||
bptr -= 3;
|
||||
for(int i = 0; i < elements_per_row / 4 - 1; ++i, optr -= 4, bptr -= 4)
|
||||
for(int i = WIDTH_MULTIPLE / 4; i < elements_per_row / 4; ++i, optr -= 4, bptr -= 4)
|
||||
{
|
||||
buf_v4 = vload4(0, bptr);
|
||||
out_v4 = vload4(0, optr);
|
||||
|
@ -262,16 +262,16 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
|
||||
double param1 = 3., double param2 = 0.99 );
|
||||
|
||||
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
|
||||
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2,
|
||||
CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
|
||||
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
int method = RANSAC, double prob = 0.999,
|
||||
double threshold = 1.0, OutputArray mask = noArray() );
|
||||
|
||||
//! decompose essential matrix to possible rotation matrix and one translation vector
|
||||
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
||||
CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
|
||||
|
||||
//! recover relative camera pose from a set of corresponding 2D points
|
||||
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2,
|
||||
CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
|
||||
OutputArray R, OutputArray t,
|
||||
double focal = 1.0, Point2d pp = Point2d(0, 0),
|
||||
InputOutputArray mask = noArray() );
|
||||
|
@ -1403,6 +1403,8 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
|
||||
}
|
||||
if( !(flags & CV_CALIB_RATIONAL_MODEL) )
|
||||
flags |= CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 + CV_CALIB_FIX_K6;
|
||||
if( !(flags & CV_CALIB_THIN_PRISM_MODEL))
|
||||
flags |= CALIB_FIX_S1_S2_S3_S4;
|
||||
if( flags & CV_CALIB_FIX_K1 )
|
||||
mask[4] = 0;
|
||||
if( flags & CV_CALIB_FIX_K2 )
|
||||
@ -1415,8 +1417,6 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
|
||||
mask[10] = 0;
|
||||
if( flags & CV_CALIB_FIX_K6 )
|
||||
mask[11] = 0;
|
||||
if(!(flags & CV_CALIB_THIN_PRISM_MODEL))
|
||||
flags |= CALIB_FIX_S1_S2_S3_S4;
|
||||
|
||||
if(flags & CALIB_FIX_S1_S2_S3_S4)
|
||||
{
|
||||
@ -1638,12 +1638,12 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
CvTermCriteria termCrit,
|
||||
int flags )
|
||||
{
|
||||
const int NINTRINSIC = 12;
|
||||
const int NINTRINSIC = 16;
|
||||
Ptr<CvMat> npoints, err, J_LR, Je, Ji, imagePoints[2], objectPoints, RT0;
|
||||
CvLevMarq solver;
|
||||
double reprojErr = 0;
|
||||
|
||||
double A[2][9], dk[2][8]={{0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0}}, rlr[9];
|
||||
double A[2][9], dk[2][12]={{0,0,0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0,0,0}}, rlr[9];
|
||||
CvMat K[2], Dist[2], om_LR, T_LR;
|
||||
CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
|
||||
int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
|
||||
@ -1689,7 +1689,7 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
(_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
|
||||
|
||||
K[k] = cvMat(3,3,CV_64F,A[k]);
|
||||
Dist[k] = cvMat(1,8,CV_64F,dk[k]);
|
||||
Dist[k] = cvMat(1,12,CV_64F,dk[k]);
|
||||
|
||||
imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
|
||||
cvConvert( points, imagePoints[k] );
|
||||
@ -1748,6 +1748,8 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
|
||||
if( !(flags & CV_CALIB_RATIONAL_MODEL) )
|
||||
flags |= CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;
|
||||
if( !(flags & CV_CALIB_THIN_PRISM_MODEL) )
|
||||
flags |= CV_CALIB_FIX_S1_S2_S3_S4;
|
||||
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
|
||||
imask[0] = imask[NINTRINSIC] = 0;
|
||||
if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
|
||||
@ -1768,6 +1770,13 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
imask[10] = imask[NINTRINSIC+10] = 0;
|
||||
if( flags & CV_CALIB_FIX_K6 )
|
||||
imask[11] = imask[NINTRINSIC+11] = 0;
|
||||
if( flags & CV_CALIB_FIX_S1_S2_S3_S4 )
|
||||
{
|
||||
imask[12] = imask[NINTRINSIC+12] = 0;
|
||||
imask[13] = imask[NINTRINSIC+13] = 0;
|
||||
imask[14] = imask[NINTRINSIC+14] = 0;
|
||||
imask[15] = imask[NINTRINSIC+15] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1842,6 +1851,10 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
|
||||
iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
|
||||
iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
|
||||
iparam[12] = dk[k][8];
|
||||
iparam[13] = dk[k][9];
|
||||
iparam[14] = dk[k][10];
|
||||
iparam[15] = dk[k][11];
|
||||
}
|
||||
|
||||
om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
|
||||
@ -1908,6 +1921,10 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
dk[k][5] = iparam[k*NINTRINSIC+9];
|
||||
dk[k][6] = iparam[k*NINTRINSIC+10];
|
||||
dk[k][7] = iparam[k*NINTRINSIC+11];
|
||||
dk[k][8] = iparam[k*NINTRINSIC+12];
|
||||
dk[k][9] = iparam[k*NINTRINSIC+13];
|
||||
dk[k][10] = iparam[k*NINTRINSIC+14];
|
||||
dk[k][11] = iparam[k*NINTRINSIC+15];
|
||||
}
|
||||
}
|
||||
|
||||
@ -3009,6 +3026,7 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
|
||||
if( distCoeffs0.size() == Size(1, 4) ||
|
||||
distCoeffs0.size() == Size(1, 5) ||
|
||||
distCoeffs0.size() == Size(1, 8) ||
|
||||
distCoeffs0.size() == Size(1, 12) ||
|
||||
distCoeffs0.size() == Size(4, 1) ||
|
||||
distCoeffs0.size() == Size(5, 1) ||
|
||||
distCoeffs0.size() == Size(8, 1) ||
|
||||
|
@ -70,6 +70,8 @@ Moreover every :ocv:class:`FaceRecognizer` supports the:
|
||||
|
||||
* **Loading/Saving** the model state from/to a given XML or YAML.
|
||||
|
||||
.. note:: When using the FaceRecognizer interface in combination with Python, please stick to Python 2. Some underlying scripts like create_csv will not work in other versions, like Python 3.
|
||||
|
||||
Setting the Thresholds
|
||||
+++++++++++++++++++++++
|
||||
|
||||
|
@ -48,10 +48,11 @@
|
||||
#ifndef __OPENCV_VERSION_HPP__
|
||||
#define __OPENCV_VERSION_HPP__
|
||||
|
||||
#define CV_VERSION_EPOCH 2
|
||||
#define CV_VERSION_MAJOR 9
|
||||
#define CV_VERSION_EPOCH 3
|
||||
#define CV_VERSION_MAJOR 0
|
||||
#define CV_VERSION_MINOR 0
|
||||
#define CV_VERSION_REVISION 0
|
||||
#define CV_VERSION_STATUS "-dev"
|
||||
|
||||
#define CVAUX_STR_EXP(__A) #__A
|
||||
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)
|
||||
@ -60,9 +61,9 @@
|
||||
#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A)
|
||||
|
||||
#if CV_VERSION_REVISION
|
||||
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION)
|
||||
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) CV_VERSION_STATUS
|
||||
#else
|
||||
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR)
|
||||
# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) CV_VERSION_STATUS
|
||||
#endif
|
||||
|
||||
/* old style version constants*/
|
||||
|
@ -56,6 +56,7 @@ namespace grid_histogram_detail
|
||||
template <int BIN_COUNT, int BLOCK_SIZE, class SrcPtr, typename ResType, class MaskPtr>
|
||||
__global__ void histogram(const SrcPtr src, ResType* hist, const MaskPtr mask, const int rows, const int cols)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 120
|
||||
__shared__ ResType smem[BIN_COUNT];
|
||||
|
||||
const int y = blockIdx.x * blockDim.y + threadIdx.y;
|
||||
@ -86,6 +87,7 @@ namespace grid_histogram_detail
|
||||
if (histVal > 0)
|
||||
atomicAdd(hist + i, histVal);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
template <int BIN_COUNT, class Policy, class SrcPtr, typename ResType, class MaskPtr>
|
||||
|
@ -57,6 +57,8 @@ namespace cv { namespace cudev {
|
||||
template <int BIN_COUNT, class Policy, class SrcPtr, typename ResType, class MaskPtr>
|
||||
__host__ void gridHistogram_(const SrcPtr& src, GpuMat_<ResType>& dst, const MaskPtr& mask, Stream& stream = Stream::Null())
|
||||
{
|
||||
CV_Assert( deviceSupports(SHARED_ATOMICS) );
|
||||
|
||||
const int rows = getRows(src);
|
||||
const int cols = getCols(src);
|
||||
|
||||
@ -75,6 +77,8 @@ __host__ void gridHistogram_(const SrcPtr& src, GpuMat_<ResType>& dst, const Mas
|
||||
template <int BIN_COUNT, class Policy, class SrcPtr, typename ResType>
|
||||
__host__ void gridHistogram_(const SrcPtr& src, GpuMat_<ResType>& dst, Stream& stream = Stream::Null())
|
||||
{
|
||||
CV_Assert( deviceSupports(SHARED_ATOMICS) );
|
||||
|
||||
const int rows = getRows(src);
|
||||
const int cols = getCols(src);
|
||||
|
||||
|
@ -52,6 +52,40 @@
|
||||
#include "gpumat.hpp"
|
||||
#include "traits.hpp"
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T> struct CvCudevTextureRef
|
||||
{
|
||||
typedef texture<T, cudaTextureType2D, cudaReadModeElementType> TexRef;
|
||||
|
||||
static TexRef ref;
|
||||
|
||||
__host__ static void bind(const cv::cudev::GlobPtrSz<T>& mat,
|
||||
bool normalizedCoords = false,
|
||||
cudaTextureFilterMode filterMode = cudaFilterModePoint,
|
||||
cudaTextureAddressMode addressMode = cudaAddressModeClamp)
|
||||
{
|
||||
ref.normalized = normalizedCoords;
|
||||
ref.filterMode = filterMode;
|
||||
ref.addressMode[0] = addressMode;
|
||||
ref.addressMode[1] = addressMode;
|
||||
ref.addressMode[2] = addressMode;
|
||||
|
||||
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
|
||||
|
||||
CV_CUDEV_SAFE_CALL( cudaBindTexture2D(0, &ref, mat.data, &desc, mat.cols, mat.rows, mat.step) );
|
||||
}
|
||||
|
||||
__host__ static void unbind()
|
||||
{
|
||||
CV_CUDEV_SAFE_CALL( cudaUnbindTexture(ref) );
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
typename CvCudevTextureRef<T>::TexRef CvCudevTextureRef<T>::ref;
|
||||
}
|
||||
|
||||
namespace cv { namespace cudev {
|
||||
|
||||
template <typename T> struct TexturePtr
|
||||
@ -63,79 +97,73 @@ template <typename T> struct TexturePtr
|
||||
|
||||
__device__ __forceinline__ T operator ()(float y, float x) const
|
||||
{
|
||||
#if CV_CUDEV_ARCH < 300
|
||||
// Use the texture reference
|
||||
return tex2D(CvCudevTextureRef<T>::ref, x, y);
|
||||
#else
|
||||
// Use the texture object
|
||||
return tex2D<T>(texObj, x, y);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T> struct Texture : TexturePtr<T>
|
||||
{
|
||||
int rows, cols;
|
||||
bool cc30;
|
||||
|
||||
__host__ explicit Texture(const GlobPtrSz<T>& mat,
|
||||
bool normalizedCoords = false,
|
||||
cudaTextureFilterMode filterMode = cudaFilterModePoint,
|
||||
cudaTextureAddressMode addressMode = cudaAddressModeClamp)
|
||||
{
|
||||
CV_Assert( deviceSupports(FEATURE_SET_COMPUTE_30) );
|
||||
cc30 = deviceSupports(FEATURE_SET_COMPUTE_30);
|
||||
|
||||
rows = mat.rows;
|
||||
cols = mat.cols;
|
||||
|
||||
cudaResourceDesc texRes;
|
||||
std::memset(&texRes, 0, sizeof(texRes));
|
||||
texRes.resType = cudaResourceTypePitch2D;
|
||||
texRes.res.pitch2D.devPtr = mat.data;
|
||||
texRes.res.pitch2D.height = mat.rows;
|
||||
texRes.res.pitch2D.width = mat.cols;
|
||||
texRes.res.pitch2D.pitchInBytes = mat.step;
|
||||
texRes.res.pitch2D.desc = cudaCreateChannelDesc<T>();
|
||||
if (cc30)
|
||||
{
|
||||
// Use the texture object
|
||||
cudaResourceDesc texRes;
|
||||
std::memset(&texRes, 0, sizeof(texRes));
|
||||
texRes.resType = cudaResourceTypePitch2D;
|
||||
texRes.res.pitch2D.devPtr = mat.data;
|
||||
texRes.res.pitch2D.height = mat.rows;
|
||||
texRes.res.pitch2D.width = mat.cols;
|
||||
texRes.res.pitch2D.pitchInBytes = mat.step;
|
||||
texRes.res.pitch2D.desc = cudaCreateChannelDesc<T>();
|
||||
|
||||
cudaTextureDesc texDescr;
|
||||
std::memset(&texDescr, 0, sizeof(texDescr));
|
||||
texDescr.addressMode[0] = addressMode;
|
||||
texDescr.addressMode[1] = addressMode;
|
||||
texDescr.addressMode[2] = addressMode;
|
||||
texDescr.filterMode = filterMode;
|
||||
texDescr.readMode = cudaReadModeElementType;
|
||||
texDescr.normalizedCoords = normalizedCoords;
|
||||
cudaTextureDesc texDescr;
|
||||
std::memset(&texDescr, 0, sizeof(texDescr));
|
||||
texDescr.normalizedCoords = normalizedCoords;
|
||||
texDescr.filterMode = filterMode;
|
||||
texDescr.addressMode[0] = addressMode;
|
||||
texDescr.addressMode[1] = addressMode;
|
||||
texDescr.addressMode[2] = addressMode;
|
||||
texDescr.readMode = cudaReadModeElementType;
|
||||
|
||||
CV_CUDEV_SAFE_CALL( cudaCreateTextureObject(&this->texObj, &texRes, &texDescr, 0) );
|
||||
}
|
||||
|
||||
__host__ explicit Texture(const GpuMat_<T>& mat,
|
||||
bool normalizedCoords = false,
|
||||
cudaTextureFilterMode filterMode = cudaFilterModePoint,
|
||||
cudaTextureAddressMode addressMode = cudaAddressModeClamp)
|
||||
{
|
||||
CV_Assert( deviceSupports(FEATURE_SET_COMPUTE_30) );
|
||||
|
||||
rows = mat.rows;
|
||||
cols = mat.cols;
|
||||
|
||||
cudaResourceDesc texRes;
|
||||
std::memset(&texRes, 0, sizeof(texRes));
|
||||
texRes.resType = cudaResourceTypePitch2D;
|
||||
texRes.res.pitch2D.devPtr = mat.data;
|
||||
texRes.res.pitch2D.height = mat.rows;
|
||||
texRes.res.pitch2D.width = mat.cols;
|
||||
texRes.res.pitch2D.pitchInBytes = mat.step;
|
||||
texRes.res.pitch2D.desc = cudaCreateChannelDesc<T>();
|
||||
|
||||
cudaTextureDesc texDescr;
|
||||
std::memset(&texDescr, 0, sizeof(texDescr));
|
||||
texDescr.addressMode[0] = addressMode;
|
||||
texDescr.addressMode[1] = addressMode;
|
||||
texDescr.addressMode[2] = addressMode;
|
||||
texDescr.filterMode = filterMode;
|
||||
texDescr.readMode = cudaReadModeElementType;
|
||||
texDescr.normalizedCoords = normalizedCoords;
|
||||
|
||||
CV_CUDEV_SAFE_CALL( cudaCreateTextureObject(&this->texObj, &texRes, &texDescr, 0) );
|
||||
CV_CUDEV_SAFE_CALL( cudaCreateTextureObject(&this->texObj, &texRes, &texDescr, 0) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use the texture reference
|
||||
CvCudevTextureRef<T>::bind(mat, normalizedCoords, filterMode, addressMode);
|
||||
}
|
||||
}
|
||||
|
||||
__host__ ~Texture()
|
||||
{
|
||||
cudaDestroyTextureObject(this->texObj);
|
||||
if (cc30)
|
||||
{
|
||||
// Use the texture object
|
||||
cudaDestroyTextureObject(this->texObj);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use the texture reference
|
||||
CvCudevTextureRef<T>::unbind();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -64,11 +64,23 @@ __device__ __forceinline__ uint atomicAdd(uint* address, uint val)
|
||||
|
||||
__device__ __forceinline__ float atomicAdd(float* address, float val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 200
|
||||
return ::atomicAdd(address, val);
|
||||
#else
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_i, assumed,
|
||||
__float_as_int(val + __int_as_float(assumed)));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#endif
|
||||
}
|
||||
|
||||
__device__ static double atomicAdd(double* address, double val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
@ -77,6 +89,11 @@ __device__ static double atomicAdd(double* address, double val)
|
||||
__double_as_longlong(val + __longlong_as_double(assumed)));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// atomicMin
|
||||
@ -93,6 +110,7 @@ __device__ __forceinline__ uint atomicMin(uint* address, uint val)
|
||||
|
||||
__device__ static float atomicMin(float* address, float val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 120
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
@ -101,10 +119,16 @@ __device__ static float atomicMin(float* address, float val)
|
||||
__float_as_int(::fminf(val, __int_as_float(assumed))));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0f;
|
||||
#endif
|
||||
}
|
||||
|
||||
__device__ static double atomicMin(double* address, double val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
@ -113,6 +137,11 @@ __device__ static double atomicMin(double* address, double val)
|
||||
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// atomicMax
|
||||
@ -129,6 +158,7 @@ __device__ __forceinline__ uint atomicMax(uint* address, uint val)
|
||||
|
||||
__device__ static float atomicMax(float* address, float val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 120
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
@ -137,10 +167,16 @@ __device__ static float atomicMax(float* address, float val)
|
||||
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0f;
|
||||
#endif
|
||||
}
|
||||
|
||||
__device__ static double atomicMax(double* address, double val)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
@ -149,6 +185,11 @@ __device__ static double atomicMax(double* address, double val)
|
||||
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
|
||||
}}
|
||||
|
@ -228,7 +228,11 @@ template <> __device__ __forceinline__ int saturate_cast<int>(float v)
|
||||
}
|
||||
template <> __device__ __forceinline__ int saturate_cast<int>(double v)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 130
|
||||
return __double2int_rn(v);
|
||||
#else
|
||||
return saturate_cast<int>((float) v);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <> __device__ __forceinline__ uint saturate_cast<uint>(schar v)
|
||||
@ -256,7 +260,11 @@ template <> __device__ __forceinline__ uint saturate_cast<uint>(float v)
|
||||
}
|
||||
template <> __device__ __forceinline__ uint saturate_cast<uint>(double v)
|
||||
{
|
||||
#if CV_CUDEV_ARCH >= 130
|
||||
return __double2uint_rn(v);
|
||||
#else
|
||||
return saturate_cast<uint>((float) v);
|
||||
#endif
|
||||
}
|
||||
|
||||
}}
|
||||
|
@ -257,8 +257,7 @@ public:
|
||||
const T& cast() const
|
||||
{
|
||||
if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();
|
||||
void* obj = const_cast<void*>(object);
|
||||
T* r = reinterpret_cast<T*>(policy->get_value(&obj));
|
||||
T* r = reinterpret_cast<T*>(policy->get_value(const_cast<void **>(&object)));
|
||||
return *r;
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void call_resize_nearest_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
|
||||
void call_resize_nearest_tex(const PtrStepSz<T>& /*src*/, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
|
||||
{
|
||||
const dim3 block(32, 8);
|
||||
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
|
||||
@ -301,7 +301,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
template <typename T> struct ResizeNearestDispatcher
|
||||
{
|
||||
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
|
||||
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
|
||||
{
|
||||
call_resize_nearest_glob(src, dst, fy, fx, stream);
|
||||
}
|
||||
|
@ -496,6 +496,110 @@ And this is the output of the above program in case of the probabilistic Hough t
|
||||
|
||||
.. image:: pics/houghp.png
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ocv:class:`LineSegmentDetector`
|
||||
|
||||
|
||||
|
||||
LineSegmentDetector
|
||||
-------------------
|
||||
Line segment detector class, following the algorithm described at [Rafael12]_.
|
||||
|
||||
.. ocv:class:: LineSegmentDetector : public Algorithm
|
||||
|
||||
|
||||
createLineSegmentDetectorPtr
|
||||
----------------------------
|
||||
Creates a smart pointer to a LineSegmentDetector object and initializes it.
|
||||
|
||||
.. ocv:function:: Ptr<LineSegmentDetector> createLineSegmentDetectorPtr(int _refine = LSD_REFINE_STD, double _scale = 0.8, double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5, double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024)
|
||||
|
||||
:param _refine: The way found lines will be refined:
|
||||
|
||||
* **LSD_REFINE_NONE** - No refinement applied.
|
||||
|
||||
* **LSD_REFINE_STD** - Standard refinement is applied. E.g. breaking arches into smaller straighter line approximations.
|
||||
|
||||
* **LSD_REFINE_ADV** - Advanced refinement. Number of false alarms is calculated, lines are refined through increase of precision, decrement in size, etc.
|
||||
|
||||
:param scale: The scale of the image that will be used to find the lines. Range (0..1].
|
||||
|
||||
:param sigma_scale: Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
|
||||
|
||||
:param quant: Bound to the quantization error on the gradient norm.
|
||||
|
||||
:param ang_th: Gradient angle tolerance in degrees.
|
||||
|
||||
:param log_eps: Detection threshold: -log10(NFA) > log_eps. Used only when advancent refinement is chosen.
|
||||
|
||||
:param density_th: Minimal density of aligned region points in the enclosing rectangle.
|
||||
|
||||
:param n_bins: Number of bins in pseudo-ordering of gradient modulus.
|
||||
|
||||
The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want to edit those, as to tailor it for their own application.
|
||||
|
||||
|
||||
LineSegmentDetector::detect
|
||||
---------------------------
|
||||
Finds lines in the input image. See the lsd_lines.cpp sample for possible usage.
|
||||
|
||||
.. ocv:function:: void LineSegmentDetector::detect(const InputArray _image, OutputArray _lines, OutputArray width = noArray(), OutputArray prec = noArray(), OutputArray nfa = noArray())
|
||||
|
||||
:param _image A grayscale (CV_8UC1) input image.
|
||||
If only a roi needs to be selected, use ::
|
||||
lsd_ptr->detect(image(roi), lines, ...);
|
||||
lines += Scalar(roi.x, roi.y, roi.x, roi.y);
|
||||
|
||||
:param lines: A vector of Vec4i elements specifying the beginning and ending point of a line. Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.
|
||||
|
||||
:param width: Vector of widths of the regions, where the lines are found. E.g. Width of line.
|
||||
|
||||
:param prec: Vector of precisions with which the lines are found.
|
||||
|
||||
:param nfa: Vector containing number of false alarms in the line region, with precision of 10%. The bigger the value, logarithmically better the detection.
|
||||
|
||||
* -1 corresponds to 10 mean false alarms
|
||||
|
||||
* 0 corresponds to 1 mean false alarm
|
||||
|
||||
* 1 corresponds to 0.1 mean false alarms
|
||||
|
||||
This vector will be calculated only when the objects type is LSD_REFINE_ADV.
|
||||
|
||||
This is the output of the default parameters of the algorithm on the above shown image.
|
||||
|
||||
.. image:: pics/building_lsd.png
|
||||
|
||||
.. note::
|
||||
|
||||
* An example using the LineSegmentDetector can be found at opencv_source_code/samples/cpp/lsd_lines.cpp
|
||||
|
||||
LineSegmentDetector::drawSegments
|
||||
---------------------------------
|
||||
Draws the line segments on a given image.
|
||||
|
||||
.. ocv:function:: void LineSegmentDetector::drawSegments(InputOutputArray _image, InputArray lines)
|
||||
|
||||
:param image: The image, where the liens will be drawn. Should be bigger or equal to the image, where the lines were found.
|
||||
|
||||
:param lines: A vector of the lines that needed to be drawn.
|
||||
|
||||
|
||||
LineSegmentDetector::compareSegments
|
||||
------------------------------------
|
||||
Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.
|
||||
|
||||
.. ocv:function:: int LineSegmentDetector::compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray())
|
||||
|
||||
:param size: The size of the image, where lines1 and lines2 were found.
|
||||
|
||||
:param lines1: The first group of lines that needs to be drawn. It is visualized in blue color.
|
||||
|
||||
:param lines2: The second group of lines. They visualized in red color.
|
||||
|
||||
:param image: Optional image, where the lines will be drawn. The image should be color in order for lines1 and lines2 to be drawn in the above mentioned colors.
|
||||
|
||||
|
||||
|
||||
preCornerDetect
|
||||
@ -542,3 +646,5 @@ The corners can be found as local maximums of the functions, as shown below: ::
|
||||
.. [Shi94] J. Shi and C. Tomasi. *Good Features to Track*. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 593-600, June 1994.
|
||||
|
||||
.. [Yuen90] Yuen, H. K. and Princen, J. and Illingworth, J. and Kittler, J., *Comparative study of Hough transform methods for circle finding*. Image Vision Comput. 8 1, pp 71–77 (1990)
|
||||
|
||||
.. [Rafael12] Rafael Grompone von Gioi, Jérémie Jakubowicz, Jean-Michel Morel, and Gregory Randall, LSD: a Line Segment Detector, Image Processing On Line, vol. 2012. http://dx.doi.org/10.5201/ipol.2012.gjmr-lsd
|
||||
|
@ -412,6 +412,28 @@ http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.ht
|
||||
This filter does not work inplace.
|
||||
|
||||
|
||||
adaptiveBilateralFilter
|
||||
-----------------------
|
||||
Applies the adaptive bilateral filter to an image.
|
||||
|
||||
.. ocv:function:: void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, double sigmaSpace, Point anchor=Point(-1, -1), int borderType=BORDER_DEFAULT )
|
||||
|
||||
.. ocv:pyfunction:: cv2.adaptiveBilateralFilter(src, ksize, sigmaSpace[, dst[, anchor[, borderType]]]) -> dst
|
||||
|
||||
:param src: Source 8-bit, 1-channel or 3-channel image.
|
||||
|
||||
:param dst: Destination image of the same size and type as ``src`` .
|
||||
|
||||
:param ksize: filter kernel size.
|
||||
|
||||
:param sigmaSpace: Filter sigma in the coordinate space. It has similar meaning with ``sigmaSpace`` in ``bilateralFilter``.
|
||||
|
||||
:param anchor: anchor point; default value ``Point(-1,-1)`` means that the anchor is at the kernel center. Only default value is supported now.
|
||||
|
||||
:param borderType: border mode used to extrapolate pixels outside of the image.
|
||||
|
||||
The function applies adaptive bilateral filtering to the input image. This filter is similar to ``bilateralFilter``, in that dissimilarity from and distance to the center pixel is punished. Instead of using ``sigmaColor``, we employ the variance of pixel values in the neighbourhood.
|
||||
|
||||
|
||||
|
||||
blur
|
||||
|
BIN
modules/imgproc/doc/pics/building_lsd.png
Normal file
BIN
modules/imgproc/doc/pics/building_lsd.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 49 KiB |
@ -908,7 +908,7 @@ class LineSegmentDetector : public Algorithm
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Detect lines in the input image with the specified ROI.
|
||||
* Detect lines in the input image.
|
||||
*
|
||||
* @param _image A grayscale(CV_8UC1) input image.
|
||||
* If only a roi needs to be selected, use
|
||||
@ -917,8 +917,6 @@ public:
|
||||
* @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line.
|
||||
* Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end.
|
||||
* Returned lines are strictly oriented depending on the gradient.
|
||||
* @param _roi Return: ROI of the image, where lines are to be found. If specified, the returning
|
||||
* lines coordinates are image wise.
|
||||
* @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line.
|
||||
* @param prec Return: Vector of precisions with which the lines are found.
|
||||
* @param nfa Return: Vector containing number of false alarms in the line region, with precision of 10%.
|
||||
@ -939,18 +937,19 @@ public:
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @param lines The lines that need to be drawn
|
||||
*/
|
||||
virtual void drawSegments(InputOutputArray image, InputArray lines) = 0;
|
||||
virtual void drawSegments(InputOutputArray _image, InputArray lines) = 0;
|
||||
|
||||
/**
|
||||
* Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2.
|
||||
*
|
||||
* @param image The image, where lines will be drawn.
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @param size The size of the image, where lines were found.
|
||||
* @param lines1 The first lines that need to be drawn. Color - Blue.
|
||||
* @param lines2 The second lines that need to be drawn. Color - Red.
|
||||
* @param image Optional image, where lines will be drawn.
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @return The number of mismatching pixels between lines1 and lines2.
|
||||
*/
|
||||
virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, Mat* image = 0) = 0;
|
||||
virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) = 0;
|
||||
|
||||
virtual ~LineSegmentDetector() {};
|
||||
};
|
||||
@ -1065,6 +1064,11 @@ CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
|
||||
double sigmaColor, double sigmaSpace,
|
||||
int borderType = BORDER_DEFAULT );
|
||||
|
||||
//! smooths the image using adaptive bilateral filter
|
||||
CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize,
|
||||
double sigmaSpace, Point anchor=Point(-1, -1),
|
||||
int borderType=BORDER_DEFAULT );
|
||||
|
||||
//! smooths the image using the box filter. Each pixel is processed in O(1) time
|
||||
CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
|
||||
Size ksize, Point anchor = Point(-1,-1),
|
||||
|
@ -254,19 +254,19 @@ bool CvtColorIPPLoopCopy(Mat& src, Mat& dst, const Cvt& cvt)
|
||||
return ok;
|
||||
}
|
||||
|
||||
IppStatus __stdcall ippiSwapChannels_8u_C3C4Rf(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep,
|
||||
static IppStatus CV_STDCALL ippiSwapChannels_8u_C3C4Rf(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep,
|
||||
IppiSize roiSize, const int *dstOrder)
|
||||
{
|
||||
return ippiSwapChannels_8u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP8u);
|
||||
}
|
||||
|
||||
IppStatus __stdcall ippiSwapChannels_16u_C3C4Rf(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep,
|
||||
static IppStatus CV_STDCALL ippiSwapChannels_16u_C3C4Rf(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep,
|
||||
IppiSize roiSize, const int *dstOrder)
|
||||
{
|
||||
return ippiSwapChannels_16u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP16u);
|
||||
}
|
||||
|
||||
IppStatus __stdcall ippiSwapChannels_32f_C3C4Rf(const Ipp32f* pSrc, int srcStep, Ipp32f* pDst, int dstStep,
|
||||
static IppStatus CV_STDCALL ippiSwapChannels_32f_C3C4Rf(const Ipp32f* pSrc, int srcStep, Ipp32f* pDst, int dstStep,
|
||||
IppiSize roiSize, const int *dstOrder)
|
||||
{
|
||||
return ippiSwapChannels_32f_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP32f);
|
||||
|
@ -50,9 +50,73 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
static IppStatus sts = ippInit();
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
typedef IppStatus (CV_STDCALL* ippiSetFunc)(const void*, void *, int, IppiSize);
|
||||
typedef IppStatus (CV_STDCALL* ippiWarpPerspectiveBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [3][3], int);
|
||||
typedef IppStatus (CV_STDCALL* ippiWarpAffineBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [2][3], int);
|
||||
typedef IppStatus (CV_STDCALL* ippiResizeSqrPixelFunc)(const void*, IppiSize, int, IppiRect, void*, int, IppiRect, double, double, double, double, int, Ipp8u *);
|
||||
|
||||
template <int channels, typename Type>
|
||||
bool IPPSetSimple(cv::Scalar value, void *dataPointer, int step, IppiSize &size, ippiSetFunc func)
|
||||
{
|
||||
Type values[channels];
|
||||
for( int i = 0; i < channels; i++ )
|
||||
values[i] = (Type)value[i];
|
||||
return func(values, dataPointer, step, size) >= 0;
|
||||
}
|
||||
|
||||
bool IPPSet(const cv::Scalar &value, void *dataPointer, int step, IppiSize &size, int channels, int depth)
|
||||
{
|
||||
if( channels == 1 )
|
||||
{
|
||||
switch( depth )
|
||||
{
|
||||
case CV_8U:
|
||||
return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0;
|
||||
case CV_16U:
|
||||
return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0;
|
||||
case CV_32F:
|
||||
return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if( channels == 3 )
|
||||
{
|
||||
switch( depth )
|
||||
{
|
||||
case CV_8U:
|
||||
return IPPSetSimple<3, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C3R);
|
||||
case CV_16U:
|
||||
return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R);
|
||||
case CV_32F:
|
||||
return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R);
|
||||
}
|
||||
}
|
||||
else if( channels == 4 )
|
||||
{
|
||||
switch( depth )
|
||||
{
|
||||
case CV_8U:
|
||||
return IPPSetSimple<4, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C4R);
|
||||
case CV_16U:
|
||||
return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R);
|
||||
case CV_32F:
|
||||
return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/************** interpolation formulas and tables ***************/
|
||||
|
||||
const int INTER_RESIZE_COEF_BITS=11;
|
||||
@ -1795,6 +1859,45 @@ static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, Dec
|
||||
return k;
|
||||
}
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
class IPPresizeInvoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
IPPresizeInvoker(Mat &_src, Mat &_dst, double &_inv_scale_x, double &_inv_scale_y, int _mode, ippiResizeSqrPixelFunc _func, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), func(_func), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
}
|
||||
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
int cn = src.channels();
|
||||
IppiRect srcroi = { 0, range.start, src.cols, range.end - range.start };
|
||||
int dsty = CV_IMIN(cvRound(range.start * inv_scale_y), dst.rows);
|
||||
int dstwidth = CV_IMIN(cvRound(src.cols * inv_scale_x), dst.cols);
|
||||
int dstheight = CV_IMIN(cvRound(range.end * inv_scale_y), dst.rows);
|
||||
IppiRect dstroi = { 0, dsty, dstwidth, dstheight - dsty };
|
||||
int bufsize;
|
||||
ippiResizeGetBufSize( srcroi, dstroi, cn, mode, &bufsize );
|
||||
Ipp8u *buf;
|
||||
buf = ippsMalloc_8u( bufsize );
|
||||
IppStatus sts;
|
||||
if( func( src.data, ippiSize(src.cols, src.rows), (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, inv_scale_x, inv_scale_y, 0, 0, mode, buf ) < 0 )
|
||||
*ok = false;
|
||||
ippsFree(buf);
|
||||
}
|
||||
private:
|
||||
Mat &src;
|
||||
Mat &dst;
|
||||
double inv_scale_x;
|
||||
double inv_scale_y;
|
||||
int mode;
|
||||
ippiResizeSqrPixelFunc func;
|
||||
bool *ok;
|
||||
const IPPresizeInvoker& operator= (const IPPresizeInvoker&);
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
@ -1937,6 +2040,34 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
|
||||
double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y;
|
||||
int k, sx, sy, dx, dy;
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
int mode = interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : 0;
|
||||
int type = src.type();
|
||||
ippiResizeSqrPixelFunc ippFunc =
|
||||
type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R :
|
||||
type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R :
|
||||
type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R :
|
||||
type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R :
|
||||
type == CV_16UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C3R :
|
||||
type == CV_16UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C4R :
|
||||
type == CV_16SC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C1R :
|
||||
type == CV_16SC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C3R :
|
||||
type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R :
|
||||
type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R :
|
||||
type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R :
|
||||
type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R :
|
||||
0;
|
||||
if( ippFunc && mode != 0 )
|
||||
{
|
||||
bool ok;
|
||||
Range range(0, src.rows);
|
||||
IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, ippFunc, &ok);
|
||||
parallel_for_(range, invoker, dst.total()/(double)(1<<16));
|
||||
if( ok )
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if( interpolation == INTER_NEAREST )
|
||||
{
|
||||
resizeNN( src, dst, inv_scale_x, inv_scale_y );
|
||||
@ -3446,6 +3577,49 @@ private:
|
||||
double *M;
|
||||
};
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
class IPPwarpAffineInvoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
IPPwarpAffineInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[2][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpAffineBackFunc _func, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
}
|
||||
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
IppiSize srcsize = { src.cols, src.rows };
|
||||
IppiRect srcroi = { 0, 0, src.cols, src.rows };
|
||||
IppiRect dstroi = { 0, range.start, dst.cols, range.end - range.start };
|
||||
int cnn = src.channels();
|
||||
if( borderType == BORDER_CONSTANT )
|
||||
{
|
||||
IppiSize setSize = { dst.cols, range.end - range.start };
|
||||
void *dataPointer = dst.data + dst.step[0] * range.start;
|
||||
if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
|
||||
{
|
||||
*ok = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if( func( src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode ) < 0) ////Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr
|
||||
*ok = false;
|
||||
}
|
||||
private:
|
||||
Mat &src;
|
||||
Mat &dst;
|
||||
double (&coeffs)[2][3];
|
||||
int mode;
|
||||
int borderType;
|
||||
Scalar borderValue;
|
||||
ippiWarpAffineBackFunc func;
|
||||
bool *ok;
|
||||
const IPPwarpAffineInvoker& operator= (const IPPwarpAffineInvoker&);
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -3492,6 +3666,50 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
|
||||
const int AB_BITS = MAX(10, (int)INTER_BITS);
|
||||
const int AB_SCALE = 1 << AB_BITS;
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
int depth = src.depth();
|
||||
int channels = src.channels();
|
||||
if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
|
||||
( channels == 1 || channels == 3 || channels == 4 ) &&
|
||||
( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) )
|
||||
{
|
||||
int type = src.type();
|
||||
ippiWarpAffineBackFunc ippFunc =
|
||||
type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R :
|
||||
type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R :
|
||||
type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R :
|
||||
type == CV_16UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C1R :
|
||||
type == CV_16UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C3R :
|
||||
type == CV_16UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C4R :
|
||||
type == CV_32FC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C1R :
|
||||
type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R :
|
||||
type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R :
|
||||
0;
|
||||
int mode =
|
||||
flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
|
||||
flags == INTER_NEAREST ? IPPI_INTER_NN :
|
||||
flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
|
||||
0;
|
||||
if( mode && ippFunc )
|
||||
{
|
||||
double coeffs[2][3];
|
||||
for( int i = 0; i < 2; i++ )
|
||||
{
|
||||
for( int j = 0; j < 3; j++ )
|
||||
{
|
||||
coeffs[i][j] = matM.at<double>(i, j);
|
||||
}
|
||||
}
|
||||
bool ok;
|
||||
Range range(0, dst.rows);
|
||||
IPPwarpAffineInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
|
||||
parallel_for_(range, invoker, dst.total()/(double)(1<<16));
|
||||
if( ok )
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for( x = 0; x < dst.cols; x++ )
|
||||
{
|
||||
adelta[x] = saturate_cast<int>(M[0]*x*AB_SCALE);
|
||||
@ -3599,6 +3817,50 @@ private:
|
||||
Scalar borderValue;
|
||||
};
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
class IPPwarpPerspectiveInvoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
}
|
||||
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
IppiSize srcsize = {src.cols, src.rows};
|
||||
IppiRect srcroi = {0, 0, src.cols, src.rows};
|
||||
IppiRect dstroi = {0, range.start, dst.cols, range.end - range.start};
|
||||
int cnn = src.channels();
|
||||
|
||||
if( borderType == BORDER_CONSTANT )
|
||||
{
|
||||
IppiSize setSize = {dst.cols, range.end - range.start};
|
||||
void *dataPointer = dst.data + dst.step[0] * range.start;
|
||||
if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) )
|
||||
{
|
||||
*ok = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if( func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode) < 0)
|
||||
*ok = false;
|
||||
}
|
||||
private:
|
||||
Mat &src;
|
||||
Mat &dst;
|
||||
double (&coeffs)[3][3];
|
||||
int mode;
|
||||
int borderType;
|
||||
const Scalar borderValue;
|
||||
ippiWarpPerspectiveBackFunc func;
|
||||
bool *ok;
|
||||
const IPPwarpPerspectiveInvoker& operator= (const IPPwarpPerspectiveInvoker&);
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
|
||||
@ -3629,6 +3891,50 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
|
||||
if( !(flags & WARP_INVERSE_MAP) )
|
||||
invert(matM, matM);
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
int depth = src.depth();
|
||||
int channels = src.channels();
|
||||
if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) &&
|
||||
( channels == 1 || channels == 3 || channels == 4 ) &&
|
||||
( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) )
|
||||
{
|
||||
int type = src.type();
|
||||
ippiWarpPerspectiveBackFunc ippFunc =
|
||||
type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R :
|
||||
type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R :
|
||||
type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R :
|
||||
type == CV_16UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C1R :
|
||||
type == CV_16UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C3R :
|
||||
type == CV_16UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C4R :
|
||||
type == CV_32FC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C1R :
|
||||
type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R :
|
||||
type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R :
|
||||
0;
|
||||
int mode =
|
||||
flags == INTER_LINEAR ? IPPI_INTER_LINEAR :
|
||||
flags == INTER_NEAREST ? IPPI_INTER_NN :
|
||||
flags == INTER_CUBIC ? IPPI_INTER_CUBIC :
|
||||
0;
|
||||
if( mode && ippFunc )
|
||||
{
|
||||
double coeffs[3][3];
|
||||
for( int i = 0; i < 3; i++ )
|
||||
{
|
||||
for( int j = 0; j < 3; j++ )
|
||||
{
|
||||
coeffs[i][j] = matM.at<double>(i, j);
|
||||
}
|
||||
}
|
||||
bool ok;
|
||||
Range range(0, dst.rows);
|
||||
IPPwarpPerspectiveInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok);
|
||||
parallel_for_(range, invoker, dst.total()/(double)(1<<16));
|
||||
if( ok )
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
Range range(0, dst.rows);
|
||||
warpPerspectiveInvoker invoker(src, dst, M, interpolation, borderType, borderValue);
|
||||
parallel_for_(range, invoker, dst.total()/(double)(1<<16));
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
@ -9,8 +10,7 @@
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
@ -185,7 +185,7 @@ public:
|
||||
double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024);
|
||||
|
||||
/**
|
||||
* Detect lines in the input image with the specified ROI.
|
||||
* Detect lines in the input image.
|
||||
*
|
||||
* @param _image A grayscale(CV_8UC1) input image.
|
||||
* If only a roi needs to be selected, use
|
||||
@ -194,8 +194,6 @@ public:
|
||||
* @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line.
|
||||
* Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end.
|
||||
* Returned lines are strictly oriented depending on the gradient.
|
||||
* @param _roi Return: ROI of the image, where lines are to be found. If specified, the returning
|
||||
* lines coordinates are image wise.
|
||||
* @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line.
|
||||
* @param prec Return: Vector of precisions with which the lines are found.
|
||||
* @param nfa Return: Vector containing number of false alarms in the line region, with precision of 10%.
|
||||
@ -216,18 +214,19 @@ public:
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @param lines The lines that need to be drawn
|
||||
*/
|
||||
void drawSegments(InputOutputArray image, InputArray lines);
|
||||
void drawSegments(InputOutputArray _image, InputArray lines);
|
||||
|
||||
/**
|
||||
* Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2.
|
||||
*
|
||||
* @param image The image, where lines will be drawn.
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @param size The size of the image, where lines1 and lines2 were found.
|
||||
* @param lines1 The first lines that need to be drawn. Color - Blue.
|
||||
* @param lines2 The second lines that need to be drawn. Color - Red.
|
||||
* @param image An optional image, where lines will be drawn.
|
||||
* Should have the size of the image, where the lines were found
|
||||
* @return The number of mismatching pixels between lines1 and lines2.
|
||||
*/
|
||||
int compareSegments(const Size& size, InputArray lines1, InputArray lines2, Mat* image = 0);
|
||||
int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray());
|
||||
|
||||
private:
|
||||
Mat image;
|
||||
@ -336,7 +335,7 @@ private:
|
||||
* @param rec Return: The generated rectangle.
|
||||
*/
|
||||
void region2rect(const std::vector<RegionPoint>& reg, const int reg_size, const double reg_angle,
|
||||
const double prec, const double p, rect& rec) const;
|
||||
const double prec, const double p, rect& rec) const;
|
||||
|
||||
/**
|
||||
* Compute region's angle as the principal inertia axis of the region.
|
||||
@ -410,7 +409,7 @@ LineSegmentDetectorImpl::LineSegmentDetectorImpl(int _refine, double _scale, dou
|
||||
_n_bins > 0);
|
||||
}
|
||||
|
||||
void LineSegmentDetectorImpl::detect(const InputArray _image, OutputArray _lines,
|
||||
void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines,
|
||||
OutputArray _width, OutputArray _prec, OutputArray _nfa)
|
||||
{
|
||||
Mat_<double> img = _image.getMat();
|
||||
@ -1150,7 +1149,7 @@ inline bool LineSegmentDetectorImpl::isAligned(const int& address, const double&
|
||||
}
|
||||
|
||||
|
||||
void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, const InputArray lines)
|
||||
void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, InputArray lines)
|
||||
{
|
||||
CV_Assert(!_image.empty() && (_image.channels() == 1 || _image.channels() == 3));
|
||||
|
||||
@ -1186,10 +1185,10 @@ void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, const InputA
|
||||
}
|
||||
|
||||
|
||||
int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, Mat* _image)
|
||||
int LineSegmentDetectorImpl::compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image)
|
||||
{
|
||||
Size sz = size;
|
||||
if (_image && _image->size() != size) sz = _image->size();
|
||||
if (_image.needed() && _image.size() != size) sz = _image.size();
|
||||
CV_Assert(sz.area());
|
||||
|
||||
Mat_<uchar> I1 = Mat_<uchar>::zeros(sz);
|
||||
@ -1219,14 +1218,11 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray
|
||||
bitwise_xor(I1, I2, Ixor);
|
||||
int N = countNonZero(Ixor);
|
||||
|
||||
if (_image)
|
||||
if (_image.needed())
|
||||
{
|
||||
Mat Ig;
|
||||
if (_image->channels() == 1)
|
||||
{
|
||||
cvtColor(*_image, *_image, CV_GRAY2BGR);
|
||||
}
|
||||
CV_Assert(_image->isContinuous() && I1.isContinuous() && I2.isContinuous());
|
||||
CV_Assert(_image.channels() == 3);
|
||||
Mat img = _image.getMatRef();
|
||||
CV_Assert(img.isContinuous() && I1.isContinuous() && I2.isContinuous());
|
||||
|
||||
for (unsigned int i = 0; i < I1.total(); ++i)
|
||||
{
|
||||
@ -1234,11 +1230,12 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray
|
||||
uchar i2 = I2.data[i];
|
||||
if (i1 || i2)
|
||||
{
|
||||
_image->data[3*i + 1] = 0;
|
||||
if (i1) _image->data[3*i] = 255;
|
||||
else _image->data[3*i] = 0;
|
||||
if (i2) _image->data[3*i + 2] = 255;
|
||||
else _image->data[3*i + 2] = 0;
|
||||
unsigned int base_idx = i * 3;
|
||||
if (i1) img.data[base_idx] = 255;
|
||||
else img.data[base_idx] = 0;
|
||||
img.data[base_idx + 1] = 0;
|
||||
if (i2) img.data[base_idx + 2] = 255;
|
||||
else img.data[base_idx + 2] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1213,11 +1213,10 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne
|
||||
}
|
||||
|
||||
static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
|
||||
InputArray _kernel,
|
||||
const Point &anchor, int iterations,
|
||||
const Mat& _kernel, Point anchor, int iterations,
|
||||
int borderType, const Scalar &borderValue)
|
||||
{
|
||||
Mat src = _src.getMat(), kernel = _kernel.getMat();
|
||||
Mat src = _src.getMat(), kernel = _kernel;
|
||||
if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) ||
|
||||
!( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) )
|
||||
|| !( op == MORPH_DILATE || op == MORPH_ERODE) )
|
||||
@ -1248,9 +1247,6 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
|
||||
|
||||
}
|
||||
Size ksize = kernel.data ? kernel.size() : Size(3,3);
|
||||
Point normanchor = normalizeAnchor(anchor, ksize);
|
||||
|
||||
CV_Assert( normanchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
|
||||
|
||||
_dst.create( src.size(), src.type() );
|
||||
Mat dst = _dst.getMat();
|
||||
@ -1265,7 +1261,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
|
||||
if( !kernel.data )
|
||||
{
|
||||
ksize = Size(1+iterations*2,1+iterations*2);
|
||||
normanchor = Point(iterations, iterations);
|
||||
anchor = Point(iterations, iterations);
|
||||
rectKernel = true;
|
||||
iterations = 1;
|
||||
}
|
||||
@ -1273,7 +1269,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
|
||||
{
|
||||
ksize = Size(ksize.width + (iterations-1)*(ksize.width-1),
|
||||
ksize.height + (iterations-1)*(ksize.height-1)),
|
||||
normanchor = Point(normanchor.x*iterations, normanchor.y*iterations);
|
||||
anchor = Point(anchor.x*iterations, anchor.y*iterations);
|
||||
kernel = Mat();
|
||||
rectKernel = true;
|
||||
iterations = 1;
|
||||
@ -1283,7 +1279,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
|
||||
if( iterations > 1 )
|
||||
return false;
|
||||
|
||||
return IPPMorphReplicate( op, src, dst, kernel, ksize, normanchor, rectKernel );
|
||||
return IPPMorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel );
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1292,18 +1288,19 @@ static void morphOp( int op, InputArray _src, OutputArray _dst,
|
||||
Point anchor, int iterations,
|
||||
int borderType, const Scalar& borderValue )
|
||||
{
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
if( IPPMorphOp(op, _src, _dst, _kernel, anchor, iterations, borderType, borderValue) )
|
||||
return;
|
||||
#endif
|
||||
|
||||
Mat src = _src.getMat(), kernel = _kernel.getMat();
|
||||
Mat kernel = _kernel.getMat();
|
||||
Size ksize = kernel.data ? kernel.size() : Size(3,3);
|
||||
anchor = normalizeAnchor(anchor, ksize);
|
||||
|
||||
CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
if( IPPMorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue) )
|
||||
return;
|
||||
#endif
|
||||
|
||||
Mat src = _src.getMat();
|
||||
|
||||
_dst.create( src.size(), src.type() );
|
||||
Mat dst = _dst.getMat();
|
||||
|
||||
|
@ -1879,6 +1879,41 @@ private:
|
||||
float *space_weight, *color_weight;
|
||||
};
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
class IPPBilateralFilter_8u_Invoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
IPPBilateralFilter_8u_Invoker(Mat &_src, Mat &_dst, double _sigma_color, double _sigma_space, int _radius, bool *_ok) :
|
||||
ParallelLoopBody(), src(_src), dst(_dst), sigma_color(_sigma_color), sigma_space(_sigma_space), radius(_radius), ok(_ok)
|
||||
{
|
||||
*ok = true;
|
||||
}
|
||||
|
||||
virtual void operator() (const Range& range) const
|
||||
{
|
||||
int d = radius * 2 + 1;
|
||||
IppiSize kernel = {d, d};
|
||||
IppiSize roi={dst.cols, range.end - range.start};
|
||||
int bufsize=0;
|
||||
ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize);
|
||||
AutoBuffer<uchar> buf(bufsize);
|
||||
IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32);
|
||||
ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, (Ipp32f)sigma_color, (Ipp32f)sigma_space, 1, pSpec );
|
||||
if( ippiFilterBilateral_8u_C1R( src.ptr<uchar>(range.start) + radius * ((int)src.step[0] + 1), (int)src.step[0], dst.ptr<uchar>(range.start), (int)dst.step[0], roi, kernel, pSpec ) < 0)
|
||||
*ok = false;
|
||||
}
|
||||
private:
|
||||
Mat &src;
|
||||
Mat &dst;
|
||||
double sigma_color;
|
||||
double sigma_space;
|
||||
int radius;
|
||||
bool *ok;
|
||||
const IPPBilateralFilter_8u_Invoker& operator= (const IPPBilateralFilter_8u_Invoker&);
|
||||
};
|
||||
#endif
|
||||
|
||||
static void
|
||||
bilateralFilter_8u( const Mat& src, Mat& dst, int d,
|
||||
double sigma_color, double sigma_space,
|
||||
@ -1908,32 +1943,19 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
|
||||
radius = MAX(radius, 1);
|
||||
d = radius*2 + 1;
|
||||
|
||||
#if 0 && defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7)
|
||||
if(cn == 1)
|
||||
{
|
||||
IppiSize kernel = {d, d};
|
||||
IppiSize roi={src.cols, src.rows};
|
||||
int bufsize=0;
|
||||
ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize);
|
||||
AutoBuffer<uchar> buf(bufsize+128);
|
||||
IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32);
|
||||
ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, sigma_color*sigma_color, sigma_space*sigma_space, 1, pSpec );
|
||||
Mat tsrc;
|
||||
const Mat* psrc = &src;
|
||||
if( src.data == dst.data )
|
||||
{
|
||||
src.copyTo(tsrc);
|
||||
psrc = &tsrc;
|
||||
}
|
||||
if( ippiFilterBilateral_8u_C1R(psrc->data, (int)psrc->step[0],
|
||||
dst.data, (int)dst.step[0],
|
||||
roi, kernel, pSpec) >= 0 )
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
Mat temp;
|
||||
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
|
||||
|
||||
#if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7)
|
||||
if( cn == 1 )
|
||||
{
|
||||
bool ok;
|
||||
IPPBilateralFilter_8u_Invoker body(temp, dst, sigma_color * sigma_color, sigma_space * sigma_space, radius, &ok );
|
||||
parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16));
|
||||
if( ok ) return;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::vector<float> _color_weight(cn*256);
|
||||
std::vector<float> _space_weight(d*d);
|
||||
std::vector<int> _space_ofs(d*d);
|
||||
@ -2258,6 +2280,236 @@ void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
|
||||
"Bilateral filtering is only implemented for 8u and 32f images" );
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
Adaptive Bilateral Filtering
|
||||
\****************************************************************************************/
|
||||
|
||||
namespace cv
|
||||
{
|
||||
#define CALCVAR 1
|
||||
#define FIXED_WEIGHT 0
|
||||
|
||||
class adaptiveBilateralFilter_8u_Invoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
adaptiveBilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, Size _ksize, double _sigma_space, Point _anchor) :
|
||||
temp(&_temp), dest(&_dest), ksize(_ksize), sigma_space(_sigma_space), anchor(_anchor)
|
||||
{
|
||||
if( sigma_space <= 0 )
|
||||
sigma_space = 1;
|
||||
CV_Assert((ksize.width & 1) && (ksize.height & 1));
|
||||
space_weight.resize(ksize.width * ksize.height);
|
||||
double sigma2 = sigma_space * sigma_space;
|
||||
int idx = 0;
|
||||
int w = ksize.width / 2;
|
||||
int h = ksize.height / 2;
|
||||
for(int y=-h; y<=h; y++)
|
||||
for(int x=-w; x<=w; x++)
|
||||
{
|
||||
space_weight[idx++] = (float)(sigma2 / (sigma2 + x * x + y * y));
|
||||
}
|
||||
}
|
||||
virtual void operator()(const Range& range) const
|
||||
{
|
||||
int cn = dest->channels();
|
||||
int anX = anchor.x;
|
||||
|
||||
const uchar *tptr;
|
||||
|
||||
for(int i = range.start;i < range.end; i++)
|
||||
{
|
||||
int startY = i;
|
||||
if(cn == 1)
|
||||
{
|
||||
float var;
|
||||
int currVal;
|
||||
int sumVal = 0;
|
||||
int sumValSqr = 0;
|
||||
int currValCenter;
|
||||
int currWRTCenter;
|
||||
float weight;
|
||||
float totalWeight = 0.;
|
||||
float tmpSum = 0.;
|
||||
|
||||
for(int j = 0;j < dest->cols *cn; j+=cn)
|
||||
{
|
||||
sumVal = 0;
|
||||
sumValSqr= 0;
|
||||
totalWeight = 0.;
|
||||
tmpSum = 0.;
|
||||
|
||||
// Top row: don't sum the very last element
|
||||
int startLMJ = 0;
|
||||
int endLMJ = ksize.width - 1;
|
||||
int howManyAll = (anX *2 +1)*(ksize.width );
|
||||
#if CALCVAR
|
||||
for(int x = startLMJ; x< endLMJ; x++)
|
||||
{
|
||||
tptr = temp->ptr(startY + x) +j;
|
||||
for(int y=-anX; y<=anX; y++)
|
||||
{
|
||||
currVal = tptr[cn*(y+anX)];
|
||||
sumVal += currVal;
|
||||
sumValSqr += (currVal *currVal);
|
||||
}
|
||||
}
|
||||
var = ( (sumValSqr * howManyAll)- sumVal * sumVal ) / ( (float)(howManyAll*howManyAll));
|
||||
#else
|
||||
var = 900.0;
|
||||
#endif
|
||||
startLMJ = 0;
|
||||
endLMJ = ksize.width;
|
||||
tptr = temp->ptr(startY + (startLMJ+ endLMJ)/2);
|
||||
currValCenter =tptr[j+cn*anX];
|
||||
for(int x = startLMJ; x< endLMJ; x++)
|
||||
{
|
||||
tptr = temp->ptr(startY + x) +j;
|
||||
for(int y=-anX; y<=anX; y++)
|
||||
{
|
||||
#if FIXED_WEIGHT
|
||||
weight = 1.0;
|
||||
#else
|
||||
currVal = tptr[cn*(y+anX)];
|
||||
currWRTCenter = currVal - currValCenter;
|
||||
|
||||
weight = var / ( var + (currWRTCenter * currWRTCenter) ) * space_weight[x*ksize.width+y+anX];;
|
||||
#endif
|
||||
tmpSum += ((float)tptr[cn*(y+anX)] * weight);
|
||||
totalWeight += weight;
|
||||
}
|
||||
}
|
||||
tmpSum /= totalWeight;
|
||||
|
||||
dest->at<uchar>(startY ,j)= static_cast<uchar>(tmpSum);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(cn == 3);
|
||||
float var_b, var_g, var_r;
|
||||
int currVal_b, currVal_g, currVal_r;
|
||||
int sumVal_b= 0, sumVal_g= 0, sumVal_r= 0;
|
||||
int sumValSqr_b= 0, sumValSqr_g= 0, sumValSqr_r= 0;
|
||||
int currValCenter_b= 0, currValCenter_g= 0, currValCenter_r= 0;
|
||||
int currWRTCenter_b, currWRTCenter_g, currWRTCenter_r;
|
||||
float weight_b, weight_g, weight_r;
|
||||
float totalWeight_b= 0., totalWeight_g= 0., totalWeight_r= 0.;
|
||||
float tmpSum_b = 0., tmpSum_g= 0., tmpSum_r = 0.;
|
||||
|
||||
for(int j = 0;j < dest->cols *cn; j+=cn)
|
||||
{
|
||||
sumVal_b= 0, sumVal_g= 0, sumVal_r= 0;
|
||||
sumValSqr_b= 0, sumValSqr_g= 0, sumValSqr_r= 0;
|
||||
totalWeight_b= 0., totalWeight_g= 0., totalWeight_r= 0.;
|
||||
tmpSum_b = 0., tmpSum_g= 0., tmpSum_r = 0.;
|
||||
|
||||
// Top row: don't sum the very last element
|
||||
int startLMJ = 0;
|
||||
int endLMJ = ksize.width - 1;
|
||||
int howManyAll = (anX *2 +1)*(ksize.width);
|
||||
#if CALCVAR
|
||||
for(int x = startLMJ; x< endLMJ; x++)
|
||||
{
|
||||
tptr = temp->ptr(startY + x) +j;
|
||||
for(int y=-anX; y<=anX; y++)
|
||||
{
|
||||
currVal_b = tptr[cn*(y+anX)], currVal_g = tptr[cn*(y+anX)+1], currVal_r =tptr[cn*(y+anX)+2];
|
||||
sumVal_b += currVal_b;
|
||||
sumVal_g += currVal_g;
|
||||
sumVal_r += currVal_r;
|
||||
sumValSqr_b += (currVal_b *currVal_b);
|
||||
sumValSqr_g += (currVal_g *currVal_g);
|
||||
sumValSqr_r += (currVal_r *currVal_r);
|
||||
}
|
||||
}
|
||||
var_b = ( (sumValSqr_b * howManyAll)- sumVal_b * sumVal_b ) / ( (float)(howManyAll*howManyAll));
|
||||
var_g = ( (sumValSqr_g * howManyAll)- sumVal_g * sumVal_g ) / ( (float)(howManyAll*howManyAll));
|
||||
var_r = ( (sumValSqr_r * howManyAll)- sumVal_r * sumVal_r ) / ( (float)(howManyAll*howManyAll));
|
||||
#else
|
||||
var_b = 900.0; var_g = 900.0;var_r = 900.0;
|
||||
#endif
|
||||
startLMJ = 0;
|
||||
endLMJ = ksize.width;
|
||||
tptr = temp->ptr(startY + (startLMJ+ endLMJ)/2) + j;
|
||||
currValCenter_b =tptr[cn*anX], currValCenter_g =tptr[cn*anX+1], currValCenter_r =tptr[cn*anX+2];
|
||||
for(int x = startLMJ; x< endLMJ; x++)
|
||||
{
|
||||
tptr = temp->ptr(startY + x) +j;
|
||||
for(int y=-anX; y<=anX; y++)
|
||||
{
|
||||
#if FIXED_WEIGHT
|
||||
weight_b = 1.0;
|
||||
weight_g = 1.0;
|
||||
weight_r = 1.0;
|
||||
#else
|
||||
currVal_b = tptr[cn*(y+anX)];currVal_g=tptr[cn*(y+anX)+1];currVal_r=tptr[cn*(y+anX)+2];
|
||||
currWRTCenter_b = currVal_b - currValCenter_b;
|
||||
currWRTCenter_g = currVal_g - currValCenter_g;
|
||||
currWRTCenter_r = currVal_r - currValCenter_r;
|
||||
|
||||
float cur_spw = space_weight[x*ksize.width+y+anX];
|
||||
weight_b = var_b / ( var_b + (currWRTCenter_b * currWRTCenter_b) ) * cur_spw;
|
||||
weight_g = var_g / ( var_g + (currWRTCenter_g * currWRTCenter_g) ) * cur_spw;
|
||||
weight_r = var_r / ( var_r + (currWRTCenter_r * currWRTCenter_r) ) * cur_spw;
|
||||
#endif
|
||||
tmpSum_b += ((float)tptr[cn*(y+anX)] * weight_b);
|
||||
tmpSum_g += ((float)tptr[cn*(y+anX)+1] * weight_g);
|
||||
tmpSum_r += ((float)tptr[cn*(y+anX)+2] * weight_r);
|
||||
totalWeight_b += weight_b, totalWeight_g += weight_g, totalWeight_r += weight_r;
|
||||
}
|
||||
}
|
||||
tmpSum_b /= totalWeight_b;
|
||||
tmpSum_g /= totalWeight_g;
|
||||
tmpSum_r /= totalWeight_r;
|
||||
|
||||
dest->at<uchar>(startY,j )= static_cast<uchar>(tmpSum_b);
|
||||
dest->at<uchar>(startY,j+1)= static_cast<uchar>(tmpSum_g);
|
||||
dest->at<uchar>(startY,j+2)= static_cast<uchar>(tmpSum_r);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
private:
|
||||
const Mat *temp;
|
||||
Mat *dest;
|
||||
Size ksize;
|
||||
double sigma_space;
|
||||
Point anchor;
|
||||
std::vector<float> space_weight;
|
||||
};
|
||||
static void adaptiveBilateralFilter_8u( const Mat& src, Mat& dst, Size ksize, double sigmaSpace, Point anchor, int borderType )
|
||||
{
|
||||
Size size = src.size();
|
||||
|
||||
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
|
||||
src.type() == dst.type() && src.size() == dst.size() &&
|
||||
src.data != dst.data );
|
||||
Mat temp;
|
||||
copyMakeBorder(src, temp, anchor.x, anchor.y, anchor.x, anchor.y, borderType);
|
||||
|
||||
adaptiveBilateralFilter_8u_Invoker body(dst, temp, ksize, sigmaSpace, anchor);
|
||||
parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
|
||||
}
|
||||
}
|
||||
void cv::adaptiveBilateralFilter( InputArray _src, OutputArray _dst, Size ksize,
|
||||
double sigmaSpace, Point anchor, int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
_dst.create(src.size(), src.type());
|
||||
Mat dst = _dst.getMat();
|
||||
|
||||
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3);
|
||||
|
||||
anchor = normalizeAnchor(anchor,ksize);
|
||||
if( src.depth() == CV_8U )
|
||||
adaptiveBilateralFilter_8u( src, dst, ksize, sigmaSpace, anchor, borderType );
|
||||
else
|
||||
CV_Error( CV_StsUnsupportedFormat,
|
||||
"Adaptive Bilateral filtering is only implemented for 8u images" );
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CV_IMPL void
|
||||
|
@ -251,7 +251,7 @@ namespace cvtest
|
||||
|
||||
int CV_BilateralFilterTest::validate_test_results(int test_case_index)
|
||||
{
|
||||
static const double eps = 1;
|
||||
static const double eps = 4;
|
||||
|
||||
Mat reference_dst, reference_src;
|
||||
if (_src.depth() == CV_32F)
|
||||
|
@ -1424,7 +1424,7 @@ TEST(Imgproc_fitLine_vector_2d, regression)
|
||||
|
||||
TEST(Imgproc_fitLine_Mat_2dC2, regression)
|
||||
{
|
||||
cv::Mat mat1(3, 1, CV_32SC2);
|
||||
cv::Mat mat1 = Mat::zeros(3, 1, CV_32SC2);
|
||||
std::vector<float> line1;
|
||||
|
||||
cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0);
|
||||
@ -1444,7 +1444,7 @@ TEST(Imgproc_fitLine_Mat_2dC1, regression)
|
||||
|
||||
TEST(Imgproc_fitLine_Mat_3dC3, regression)
|
||||
{
|
||||
cv::Mat mat1(2, 1, CV_32SC3);
|
||||
cv::Mat mat1 = Mat::zeros(2, 1, CV_32SC3);
|
||||
std::vector<float> line1;
|
||||
|
||||
cv::fitLine(mat1, line1, CV_DIST_L2, 0 ,0 ,0);
|
||||
@ -1454,7 +1454,7 @@ TEST(Imgproc_fitLine_Mat_3dC3, regression)
|
||||
|
||||
TEST(Imgproc_fitLine_Mat_3dC1, regression)
|
||||
{
|
||||
cv::Mat mat2(2, 3, CV_32SC1);
|
||||
cv::Mat mat2 = Mat::zeros(2, 3, CV_32SC1);
|
||||
std::vector<float> line2;
|
||||
|
||||
cv::fitLine(mat2, line2, CV_DIST_L2, 0 ,0 ,0);
|
||||
|
@ -678,8 +678,8 @@ void CV_Remap_Test::generate_test_data()
|
||||
MatIterator_<Vec2s> begin_x = mapx.begin<Vec2s>(), end_x = mapx.end<Vec2s>();
|
||||
for ( ; begin_x != end_x; ++begin_x)
|
||||
{
|
||||
begin_x[0] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.cols + n - 1, 0)));
|
||||
begin_x[1] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.rows + n - 1, 0)));
|
||||
(*begin_x)[0] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.cols + n - 1, 0)));
|
||||
(*begin_x)[1] = static_cast<short>(rng.uniform(static_cast<int>(_n), std::max(src.rows + n - 1, 0)));
|
||||
}
|
||||
|
||||
if (interpolation != INTER_NEAREST)
|
||||
|
@ -3,7 +3,7 @@
|
||||
package="org.opencv.test"
|
||||
android:versionCode="1"
|
||||
android:versionName="1.0">
|
||||
|
||||
|
||||
<uses-sdk android:minSdkVersion="8" />
|
||||
|
||||
<!-- We add an application tag here just so that we can indicate that
|
||||
@ -20,7 +20,7 @@
|
||||
<instrumentation android:name="org.opencv.test.OpenCVTestRunner"
|
||||
android:targetPackage="org.opencv.test"
|
||||
android:label="Tests for org.opencv"/>
|
||||
|
||||
|
||||
<uses-permission android:name="android.permission.CAMERA"/>
|
||||
<uses-feature android:name="android.hardware.camera" />
|
||||
<uses-feature android:name="android.hardware.camera.autofocus" />
|
||||
|
@ -4,9 +4,9 @@
|
||||
android:layout_width="fill_parent"
|
||||
android:layout_height="fill_parent"
|
||||
>
|
||||
<TextView
|
||||
android:layout_width="fill_parent"
|
||||
android:layout_height="wrap_content"
|
||||
<TextView
|
||||
android:layout_width="fill_parent"
|
||||
android:layout_height="wrap_content"
|
||||
android:text="@string/hello"
|
||||
/>
|
||||
</LinearLayout>
|
||||
|
@ -632,7 +632,8 @@ def getLibVersion(version_hpp_path):
|
||||
major = re.search("^W*#\W*define\W+CV_VERSION_MAJOR\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
minor = re.search("^W*#\W*define\W+CV_VERSION_MINOR\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
revision = re.search("^W*#\W*define\W+CV_VERSION_REVISION\W+(\d+)\W*$", version_file, re.MULTILINE).group(1)
|
||||
return (epoch, major, minor, revision)
|
||||
status = re.search("^W*#\W*define\W+CV_VERSION_STATUS\W+\"(.*?)\"\W*$", version_file, re.MULTILINE).group(1)
|
||||
return (epoch, major, minor, revision, status)
|
||||
|
||||
class ConstInfo(object):
|
||||
def __init__(self, cname, name, val, addedManually=False):
|
||||
@ -799,15 +800,19 @@ public class %(jc)s {
|
||||
""" % { 'm' : self.module, 'jc' : jname } )
|
||||
|
||||
if class_name == 'Core':
|
||||
(epoch, major, minor, revision) = getLibVersion(
|
||||
(epoch, major, minor, revision, status) = getLibVersion(
|
||||
(os.path.dirname(__file__) or '.') + '/../../core/include/opencv2/core/version.hpp')
|
||||
version_str = '.'.join( (epoch, major, minor, revision) )
|
||||
version_str = '.'.join( (epoch, major, minor, revision) ) + status
|
||||
version_suffix = ''.join( (epoch, major, minor) )
|
||||
self.classes[class_name].imports.add("java.lang.String")
|
||||
self.java_code[class_name]["j_code"].write("""
|
||||
public static final String VERSION = "%(v)s", NATIVE_LIBRARY_NAME = "opencv_java%(vs)s";
|
||||
public static final int VERSION_EPOCH = %(ep)s, VERSION_MAJOR = %(ma)s, VERSION_MINOR = %(mi)s, VERSION_REVISION = %(re)s;
|
||||
""" % { 'v' : version_str, 'vs' : version_suffix, 'ep' : epoch, 'ma' : major, 'mi' : minor, 're' : revision } )
|
||||
public static final int VERSION_EPOCH = %(ep)s;
|
||||
public static final int VERSION_MAJOR = %(ma)s;
|
||||
public static final int VERSION_MINOR = %(mi)s;
|
||||
public static final int VERSION_REVISION = %(re)s;
|
||||
public static final String VERSION_STATUS = "%(st)s";
|
||||
""" % { 'v' : version_str, 'vs' : version_suffix, 'ep' : epoch, 'ma' : major, 'mi' : minor, 're' : revision, 'st': status } )
|
||||
|
||||
|
||||
def add_class(self, decl):
|
||||
|
307
modules/matlab/CMakeLists.txt
Normal file
307
modules/matlab/CMakeLists.txt
Normal file
@ -0,0 +1,307 @@
|
||||
# ----------------------------------------------------------------------------
|
||||
# CMake file for Matlab/Octave support
|
||||
#
|
||||
# Matlab code generation and compilation is broken down into two distinct
|
||||
# stages: configure time and build time. The idea is that we want to give
|
||||
# the user reasonable guarantees that once they type 'make', wrapper
|
||||
# generation is unlikely to fail. Therefore we run a series of tests at
|
||||
# configure time to check the working status of the core components.
|
||||
#
|
||||
# Configure Time
|
||||
# During configure time, the script attempts to ascertain whether the
|
||||
# generator and mex compiler are working for a given architecture.
|
||||
# Currently this involves:
|
||||
# 1) Generating a simple CV_EXPORTS_W symbol and checking whether a file
|
||||
# of the symbol name is generated
|
||||
# 2) Compiling a simple mex gateway to check that Bridge.hpp and mex.h
|
||||
# can be found, and that a file with the mexext is produced
|
||||
#
|
||||
# Build Time
|
||||
# If the configure time tests pass, then we assume Matlab wrapper generation
|
||||
# will not fail during build time. We simply glob all of the symbols in
|
||||
# the OpenCV module headers, generate intermediate .cpp files, then compile
|
||||
# them with mex.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# PREPEND
|
||||
# Given a list of strings IN and a TOKEN, prepend the token to each string
|
||||
# and append to OUT. This is used for passing command line "-I", "-L" and "-l"
|
||||
# arguments to mex. e.g.
|
||||
# prepend("-I" OUT /path/to/include/dir) --> -I/path/to/include/dir
|
||||
macro(PREPEND TOKEN OUT IN)
|
||||
foreach(VAR ${IN})
|
||||
list(APPEND ${OUT} "${TOKEN}${VAR}")
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# WARN_MIXED_PRECISION
|
||||
# Formats a warning message if the compiler and Matlab bitness is different
|
||||
macro(WARN_MIXED_PRECISION COMPILER_BITNESS MATLAB_BITNESS)
|
||||
set(MSG "Your compiler is ${COMPILER_BITNESS}-bit")
|
||||
set(MSG "${MSG} but your version of Matlab is ${MATLAB_BITNESS}-bit.")
|
||||
set(MSG "${MSG} To build Matlab bindings, please switch to a ${MATLAB_BITNESS}-bit compiler.")
|
||||
message(WARNING ${MSG})
|
||||
endmacro()
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Architecture checks
|
||||
# ----------------------------------------------------------------------------
|
||||
# make sure we're on a supported architecture with Matlab and python installed
|
||||
if (IOS OR ANDROID OR NOT MATLAB_FOUND)
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
elseif (NOT PYTHONLIBS_FOUND)
|
||||
message(WARNING "A required dependency of the matlab module (PythonLibs) was not found. Disabling Matlab bindings...")
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
# If the user built OpenCV as X-bit, but they have a Y-bit version of Matlab,
|
||||
# attempting to link to OpenCV during binding generation will fail, since
|
||||
# mixed precision pointers are not allowed. Disable the bindings.
|
||||
math(EXPR ARCH "${CMAKE_SIZEOF_VOID_P} * 8")
|
||||
if (${ARCH} EQUAL 32 AND ${MATLAB_ARCH} MATCHES "64")
|
||||
warn_mixed_precision("32" "64")
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
elseif (${ARCH} EQUAL 64 AND NOT ${MATLAB_ARCH} MATCHES "64")
|
||||
warn_mixed_precision("64" "32")
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# If it's MSVC, warn the user that bindings will only be built in Release mode.
|
||||
# Debug mode seems to cause issues...
|
||||
if (MSVC)
|
||||
message(STATUS "Warning: Matlab bindings will only be built in Release configurations")
|
||||
endif()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Configure time components
|
||||
# ----------------------------------------------------------------------------
|
||||
set(the_description "The Matlab/Octave bindings")
|
||||
ocv_add_module(matlab BINDINGS
|
||||
OPTIONAL opencv_core
|
||||
opencv_imgproc opencv_ml opencv_highgui
|
||||
opencv_objdetect opencv_flann opencv_features2d
|
||||
opencv_photo opencv_video opencv_videostab
|
||||
opencv_calib opencv_calib3d
|
||||
opencv_stitching opencv_superres
|
||||
opencv_nonfree
|
||||
)
|
||||
|
||||
# get the commit information
|
||||
execute_process(COMMAND git log -1 --pretty=%H OUTPUT_VARIABLE GIT_COMMIT ERROR_QUIET)
|
||||
string(REGEX REPLACE "(\r?\n)+$" "" GIT_COMMIT "${GIT_COMMIT}")
|
||||
|
||||
# set the path to the C++ header and doc parser, and template engine
|
||||
set(JINJA2_PATH ${CMAKE_SOURCE_DIR}/3rdparty)
|
||||
set(HDR_PARSER_PATH ${CMAKE_SOURCE_DIR}/modules/python/src2)
|
||||
set(RST_PARSER_PATH ${CMAKE_SOURCE_DIR}/modules/java/generator)
|
||||
|
||||
# set mex compiler options
|
||||
prepend("-I" MEX_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
prepend("-L" MEX_LIB_DIR ${LIBRARY_OUTPUT_PATH}/$(Configuration))
|
||||
set(MEX_OPTS "-largeArrayDims")
|
||||
|
||||
if (BUILD_TESTS)
|
||||
add_subdirectory(test)
|
||||
endif()
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
|
||||
|
||||
# intersection of available modules and optional dependencies
|
||||
# 1. populate the command-line include directories (-I/path/to/module/header, ...)
|
||||
# 2. populate the command-line link libraries (-lopencv_core, ...) for Debug and Release
|
||||
set(MATLAB_DEPS ${OPENCV_MODULE_${the_module}_REQ_DEPS} ${OPENCV_MODULE_${the_module}_OPT_DEPS})
|
||||
foreach(opencv_module ${MATLAB_DEPS})
|
||||
if (HAVE_${opencv_module})
|
||||
string(REPLACE "opencv_" "" module ${opencv_module})
|
||||
list(APPEND opencv_modules ${module})
|
||||
list(APPEND ${the_module}_ACTUAL_DEPS ${opencv_module})
|
||||
prepend("-I" MEX_INCLUDE_DIRS "${OPENCV_MODULE_${opencv_module}_LOCATION}/include")
|
||||
prepend("-l" MEX_LIBS ${opencv_module}${OPENCV_DLLVERSION})
|
||||
prepend("-l" MEX_DEBUG_LIBS ${opencv_module}${OPENCV_DLLVERSION}${OPENCV_DEBUG_POSTFIX})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# add extra headers by hand
|
||||
list(APPEND opencv_extra_hdrs "core=${OPENCV_MODULE_opencv_core_LOCATION}/include/opencv2/core/base.hpp")
|
||||
list(APPEND opencv_extra_hdrs "video=${OPENCV_MODULE_opencv_video_LOCATION}/include/opencv2/video/tracking.hpp")
|
||||
|
||||
# pass the OPENCV_CXX_EXTRA_FLAGS through to the mex compiler
|
||||
# remove the visibility modifiers, so the mex gateway is visible
|
||||
# TODO: get mex working without warnings
|
||||
string(REGEX REPLACE "[^\ ]*visibility[^\ ]*" "" MEX_CXXFLAGS "${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_CXX_FLAGS}")
|
||||
|
||||
# Configure checks
|
||||
# Check to see whether the generator and the mex compiler are working.
|
||||
# The checks currently test:
|
||||
# - whether the python generator can be found
|
||||
# - whether the python generator correctly outputs a file for a definition
|
||||
# - whether the mex compiler can find the required headers
|
||||
# - whether the mex compiler can compile a trivial definition
|
||||
if (NOT MEX_WORKS)
|
||||
# attempt to generate a gateway for a function
|
||||
message(STATUS "Trying to generate Matlab code")
|
||||
execute_process(
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/generator/gen_matlab.py
|
||||
--jinja2 ${JINJA2_PATH}
|
||||
--hdrparser ${HDR_PARSER_PATH}
|
||||
--rstparser ${RST_PARSER_PATH}
|
||||
--extra "test=${CMAKE_CURRENT_SOURCE_DIR}/test/test_generator.hpp"
|
||||
--outdir ${CMAKE_BINARY_DIR}/junk
|
||||
ERROR_VARIABLE GEN_ERROR
|
||||
OUTPUT_QUIET
|
||||
)
|
||||
|
||||
if (GEN_ERROR)
|
||||
message(${GEN_ERROR})
|
||||
message(STATUS "Error generating Matlab code. Disabling Matlab bindings...")
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
else()
|
||||
message(STATUS "Trying to generate Matlab code - OK")
|
||||
endif()
|
||||
|
||||
# attempt to compile a gateway using mex
|
||||
message(STATUS "Trying to compile mex file")
|
||||
execute_process(
|
||||
COMMAND ${MATLAB_MEX_SCRIPT} ${MEX_OPTS} "CXXFLAGS=\$CXXFLAGS ${MEX_CXX_FLAGS}"
|
||||
${MEX_INCLUDE_DIRS} ${CMAKE_CURRENT_SOURCE_DIR}/test/test_compiler.cpp
|
||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/junk
|
||||
ERROR_VARIABLE MEX_ERROR
|
||||
OUTPUT_QUIET
|
||||
)
|
||||
|
||||
if (MEX_ERROR)
|
||||
message(${MEX_ERROR})
|
||||
message(STATUS "Error compiling mex file. Disabling Matlab bindings...")
|
||||
ocv_module_disable(matlab)
|
||||
return()
|
||||
else()
|
||||
message(STATUS "Trying to compile mex file - OK")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# if we make it here, mex works!
|
||||
set(MEX_WORKS True CACHE BOOL ADVANCED)
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Build time components
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# proxies
|
||||
# these proxies are used to trigger the add_custom_commands
|
||||
# (which do the real work) only when they're outdated
|
||||
set(GENERATE_PROXY ${CMAKE_CURRENT_BINARY_DIR}/generate.proxy)
|
||||
set(COMPILE_PROXY ${CMAKE_CURRENT_BINARY_DIR}/compile.proxy)
|
||||
# TODO: Remove following line before merging with master
|
||||
file(REMOVE ${GENERATE_PROXY} ${COMPILE_PROXY})
|
||||
|
||||
# generate
|
||||
# call the python executable to generate the Matlab gateways
|
||||
add_custom_command(
|
||||
OUTPUT ${GENERATE_PROXY}
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/generator/gen_matlab.py
|
||||
--jinja2 ${JINJA2_PATH}
|
||||
--hdrparser ${HDR_PARSER_PATH}
|
||||
--rstparser ${RST_PARSER_PATH}
|
||||
--moduleroot ${CMAKE_SOURCE_DIR}/modules
|
||||
--modules ${opencv_modules}
|
||||
--extra ${opencv_extra_hdrs}
|
||||
--outdir ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/generator/build_info.py
|
||||
--jinja2 ${JINJA2_PATH}
|
||||
--os ${CMAKE_SYSTEM}
|
||||
--arch ${ARCH} ${CMAKE_SYSTEM_PROCESSOR}
|
||||
--compiler ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}
|
||||
--mex_arch ${MATLAB_ARCH}
|
||||
--mex_script ${MATLAB_MEX_SCRIPT}
|
||||
--cxx_flags ${MEX_CXXFLAGS}
|
||||
--opencv_version ${OPENCV_VERSION}
|
||||
--commit ${GIT_COMMIT}
|
||||
--modules ${opencv_modules}
|
||||
--configuration "$(Configuration)" ${CMAKE_BUILD_TYPE}
|
||||
--outdir ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/generator/cvmex.py
|
||||
--jinja2 ${JINJA2_PATH}
|
||||
--opts="${MEX_OPTS}"
|
||||
--include_dirs="${MEX_INCLUDE_DIRS}"
|
||||
--lib_dir=${MEX_LIB_DIR}
|
||||
--libs="${MEX_LIBS}"
|
||||
--flags ${MEX_CXXFLAGS}
|
||||
--outdir ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/test/help.m ${CMAKE_CURRENT_BINARY_DIR}/+cv
|
||||
COMMAND ${CMAKE_COMMAND} -E touch ${GENERATE_PROXY}
|
||||
COMMENT "Generating Matlab source files"
|
||||
)
|
||||
|
||||
# compile
|
||||
# call the mex compiler to compile the gateways
|
||||
# because we don't know the source files at configure-time, this
|
||||
# has to be executed in a separate script in cmake's script processing mode
|
||||
add_custom_command(
|
||||
OUTPUT ${COMPILE_PROXY}
|
||||
COMMAND ${CMAKE_COMMAND} -DMATLAB_MEX_SCRIPT=${MATLAB_MEX_SCRIPT}
|
||||
-DMATLAB_MEXEXT=${MATLAB_MEXEXT}
|
||||
-DMEX_OPTS=${MEX_OPTS}
|
||||
-DMEX_CXXFLAGS=${MEX_CXX_FLAGS}
|
||||
-DMEX_INCLUDE_DIRS="${MEX_INCLUDE_DIRS}"
|
||||
-DMEX_LIB_DIR=${MEX_LIB_DIR}
|
||||
-DCONFIGURATION="$(Configuration)"
|
||||
-DMEX_LIBS="${MEX_LIBS}"
|
||||
-DMEX_DEBUG_LIBS="${MEX_DEBUG_LIBS}"
|
||||
-P ${CMAKE_CURRENT_SOURCE_DIR}/compile.cmake
|
||||
COMMAND ${CMAKE_COMMAND} -E touch ${COMPILE_PROXY}
|
||||
COMMENT "Compiling Matlab source files. This could take a while..."
|
||||
)
|
||||
|
||||
# targets
|
||||
# opencv_matlab_sources --> opencv_matlab
|
||||
add_custom_target(${the_module}_sources ALL DEPENDS ${GENERATE_PROXY})
|
||||
add_custom_target(${the_module} ALL DEPENDS ${COMPILE_PROXY})
|
||||
add_dependencies(${the_module} ${the_module}_sources ${${the_module}_ACTUAL_DEPS})
|
||||
|
||||
if (ENABLE_SOLUTION_FOLDERS)
|
||||
set_target_properties(${the_module} PROPERTIES FOLDER "modules")
|
||||
endif()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Install time components
|
||||
# ----------------------------------------------------------------------------
|
||||
# NOTE: Trailing slashes on the DIRECTORY paths are important!
|
||||
# TODO: What needs to be done with rpath????
|
||||
|
||||
# install the +cv directory verbatim
|
||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH})
|
||||
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/+cv/ DESTINATION matlab/+cv)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cv.m DESTINATION matlab)
|
||||
|
||||
# update the custom mex compiler to point to the install locations
|
||||
string(REPLACE ";" "\\ " MEX_OPTS "${MEX_OPTS}")
|
||||
string(REPLACE ";" "\\ " MEX_LIBS "${MEX_LIBS}")
|
||||
string(REPLACE " " "\\ " MEX_CXXFLAGS ${MEX_CXXFLAGS})
|
||||
string(REPLACE ";" "\\ " MEX_INCLUDE_DIRS "${MEX_INCLUDE_DIRS}")
|
||||
install(CODE
|
||||
"execute_process(
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/generator/cvmex.py
|
||||
--jinja2 ${JINJA2_PATH}
|
||||
--opts=${MEX_OPTS}
|
||||
--include_dirs=-I${CMAKE_INSTALL_PREFIX}/${OPENCV_INCLUDE_INSTALL_PATH}
|
||||
--lib_dir=-L${CMAKE_INSTALL_PREFIX}/${OPENCV_LIB_INSTALL_PATH}
|
||||
--libs=${MEX_LIBS}
|
||||
--flags=${MEX_CXXFLAGS}
|
||||
--outdir ${CMAKE_INSTALL_PREFIX}/matlab
|
||||
)"
|
||||
)
|
42
modules/matlab/LICENSE
Normal file
42
modules/matlab/LICENSE
Normal file
@ -0,0 +1,42 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this
|
||||
// license. If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is"
|
||||
// and any express or implied warranties, including, but not limited to, the
|
||||
// implied warranties of merchantability and fitness for a particular purpose
|
||||
// are disclaimed. In no event shall the Intel Corporation or contributors be
|
||||
// liable for any direct, indirect, incidental, special, exemplary, or
|
||||
// consequential damages (including, but not limited to, procurement of
|
||||
// substitute goods or services; loss of use, data, or profits; or business
|
||||
// interruption) however caused and on any theory of liability, whether in
|
||||
// contract, strict liability, or tort (including negligence or otherwise)
|
||||
// arising in any way out of the use of this software, even if advised of the
|
||||
// possibility of such damage.
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
394
modules/matlab/README.md
Normal file
394
modules/matlab/README.md
Normal file
@ -0,0 +1,394 @@
|
||||
OpenCV Matlab Code Generator
|
||||
============================
|
||||
This module contains a code generator to automatically produce Matlab mex wrappers for other modules within the OpenCV library. Once compiled and added to the Matlab path, this gives users the ability to call OpenCV methods natively from within Matlab.
|
||||
|
||||
|
||||
Build
|
||||
-----
|
||||
The Matlab code generator is fully integrated into the OpenCV build system. If cmake finds a Matlab installation available on the host system while configuring OpenCV, it will attempt to generate Matlab wrappers for all OpenCV modules. If cmake is having trouble finding your Matlab installation, you can explicitly point it to the root by defining the `MATLAB_ROOT_DIR` variable. For example, on a Mac you could type:
|
||||
|
||||
cmake -DMATLAB_ROOT_DIR=/Applications/MATLAB_R2013a.app ..
|
||||
|
||||
|
||||
Install
|
||||
-------
|
||||
In order to use the bindings, you will need to add them to the Matlab path. The path to add is:
|
||||
|
||||
1. `${CMAKE_BUILD_DIR}/modules/matlab` if you are working from the build tree, or
|
||||
2. `${CMAKE_INSTALL_PREFIX}/matlab` if you have installed OpenCV
|
||||
|
||||
In Matlab, simply run:
|
||||
|
||||
addpath('/path/to/opencv/matlab/');
|
||||
|
||||
|
||||
Run
|
||||
---
|
||||
Once you've added the bindings directory to the Matlab path, you can start using them straight away! OpenCV calls need to be prefixed with a 'cv' qualifier, to disambiguate them from Matlab methods of the same name. For example, to compute the dft of a matrix, you might do the following:
|
||||
|
||||
```matlab
|
||||
% load an image (Matlab)
|
||||
I = imread('cameraman.tif');
|
||||
|
||||
% compute the DFT (OpenCV)
|
||||
If = cv.dft(I, cv.DFT_COMPLEX_OUTPUT);
|
||||
```
|
||||
|
||||
As you can see, both OpenCV methods and constants can be used with 'cv' qualification. You can also call:
|
||||
|
||||
help cv.dft
|
||||
|
||||
to get help on the purpose and call signature of a particular method, or
|
||||
|
||||
help cv
|
||||
|
||||
to get general help regarding the OpenCV bindings. If you ever run into issues with the bindings
|
||||
|
||||
cv.buildInformation();
|
||||
|
||||
will produce a printout of diagnostic information pertaining to your particular build of OS, OpenCV and Matlab. It is useful to submit this information alongside a bug report to the OpenCV team.
|
||||
|
||||
Writing your own mex files
|
||||
--------------------------
|
||||
The Matlab bindings come with a set of utilities to help you quickly write your own mex files using OpenCV definitions. By doing so, you have all the speed and freedom of C++, with the power of OpenCV's math expressions and optimizations.
|
||||
|
||||
The first thing you need to learn how to do is write a mex-file with Matlab constructs. Following is a brief example:
|
||||
|
||||
```cpp
|
||||
// include useful constructs
|
||||
// this automatically includes opencv core.hpp and mex.h)
|
||||
#include <opencv2/matlab/bridge.hpp>
|
||||
using namespace cv;
|
||||
using namespace matlab;
|
||||
using namespace bridge;
|
||||
|
||||
// define the mex gateway
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// claim the inputs into scoped management
|
||||
MxArrayVector raw(prhs, prhs+nrhs);
|
||||
|
||||
// add an argument parser to automatically handle basic options
|
||||
ArgumentParser parser("my function");
|
||||
parser.addVariant(1, 1, "opt");
|
||||
MxArrayVector reordered = parser.parse(raw);
|
||||
|
||||
// if we get here, we know the inputs are valid and reordered. Unpack...
|
||||
BridgeVector inputs(reordered.begin(), reordered.end());
|
||||
Mat required = inputs[0].toMat();
|
||||
string optional = inputs[1].empty() ? "Default string" : inputs[1].toString();
|
||||
|
||||
try {
|
||||
// Do stuff...
|
||||
} catch(Exception& e) {
|
||||
error(e.what());
|
||||
} catch(...) {
|
||||
error("Uncaught exception occurred");
|
||||
}
|
||||
|
||||
// allocate an output
|
||||
Bridge out = required;
|
||||
plhs[0] = out.toMxArray().releaseOwnership();
|
||||
}
|
||||
```
|
||||
|
||||
There are a couple of important things going on in this example. Firstly, you need to include `<opencv2/matlab/bridge.hpp>` to enable the bridging capabilities. Once you've done this, you get some nice utilities for free. `MxArray` is a class that wraps Matlab's `mxArray*` class in an OOP-style interface. `ArgumentParser` is a class that handles default, optional and named arguments for you, along with multiple possible calling syntaxes. Finally, `Bridge` is a class that allows bidirectional conversions between OpenCV/std and Matlab types.
|
||||
|
||||
Once you have written your file, it can be compiled with the provided mex utility:
|
||||
|
||||
cv.mex('my_function.cpp');
|
||||
|
||||
This utility automatically links in all of the necessary OpenCV libraries to make your function work.
|
||||
|
||||
NOTE: OpenCV uses exceptions throughout the codebase. It is a **very** good idea to wrap your code in exception handlers to avoid crashing Matlab in the event of an exception being thrown.
|
||||
|
||||
------------------------------------------------------------------
|
||||
|
||||
|
||||
Developer
|
||||
=========
|
||||
The following sections contain information for developers seeking to use, understand or extend the Matlab bindings. The bindings are generated in python using a powerful templating engine called Jinja2. Because Matlab mex gateways have a common structure, they are well suited to templatization. There are separate templates for formatting C++ classes, Matlab classes, C++ functions, constants (enums) and documentation.
|
||||
|
||||
The task of the generator is two-fold:
|
||||
|
||||
1. To parse the OpenCV headers and build a semantic tree that can be fed to the template engine
|
||||
2. To define type conversions between C++/OpenCV and Matlab types
|
||||
|
||||
Once a source file has been generated for each OpenCV definition, and type conversions have been established, the mex compiler is invoked to produce the mex gateway (shared object) and link in the OpenCV libraries.
|
||||
|
||||
|
||||
File layout
|
||||
-----------
|
||||
opencv/modules/matlab (this module)
|
||||
|
||||
* `CMakeLists.txt` (main cmake configuration file)
|
||||
* `README.md` (this file)
|
||||
* `compile.cmake` (the cmake script for compiling generated source code)
|
||||
* `generator` (the folder containing generator code)
|
||||
* `filters.py` (template filters)
|
||||
* `gen_matlab.py` (the binding generator control script)
|
||||
* `parse_tree.py` (python class to refactor the hdr_parser.py output)
|
||||
* `templates` (the raw templates for populating classes, constants, functions and docs)
|
||||
* `include` (C++ headers for the bindings)
|
||||
* `mxarray.hpp` (C++ OOP-style interface for Matlab mxArray* class)
|
||||
* `bridge.hpp` (type conversions)
|
||||
* `map.hpp` (hash map interface for instance storage and method lookup)
|
||||
* `test` (generator, compiler and binding test scripts)
|
||||
|
||||
|
||||
Call Tree
|
||||
---------
|
||||
The cmake call tree can be broken into 3 main components:
|
||||
|
||||
1. configure time
|
||||
2. build time
|
||||
3. install time
|
||||
|
||||
**Find Matlab (configure)**
|
||||
The first thing to do is discover a Matlab installation on the host system. This is handled by the `OpenCVFindMatlab.cmake` in `opencv/cmake`. On Windows machines it searches the registry and path, while on *NIX machines it searches a set of canonical install paths. Once Matlab has been found, a number of variables are defined, such as the path to the mex compiler, the mex libraries, the mex include paths, the architectural extension, etc.
|
||||
|
||||
**Test the generator (configure)**
|
||||
Attempt to produce a source file for a simple definition. This tests whether python and pythonlibs are correctly invoked on the host.
|
||||
|
||||
**Test the mex compiler (configure)**
|
||||
Attempt to compile a simple definition using the mex compiler. A mex file is actually just a shared object with a special exported symbol `_mexFunction` which serves as the entry-point to the function. As such, the mex compiler is just a set of scripts configuring the system compiler. In most cases this is the same as the OpenCV compiler, but *could* be different. The test checks whether the mex and generator includes can be found, the system libraries can be linked and the passed compiler flags are compatible.
|
||||
|
||||
If any of the configure time tests fail, the bindings will be disabled, but the main OpenCV configure will continue without error. The configuration summary will contain the block:
|
||||
|
||||
Matlab
|
||||
mex: /Applications/MATLAB_R2013a.app/bin/mex
|
||||
compiler/generator: Not working (bindings will not be generated)
|
||||
|
||||
**Generate the sources (build)**
|
||||
Given a set of modules (the intersection of the OpenCV modules being built and the matlab module optional dependencies), the `CppHeaderParser()` from `opencv/modules/python/src2/hdr_parser.py` will parse the module headers and produce a set of definitions.
|
||||
|
||||
The `ParseTree()` from `opencv/modules/matlab/generator/parse_tree.py` takes this set of definitions and refactors them into a semantic tree better suited to templatization. For example, a trivial definition from the header parser may look something like:
|
||||
|
||||
```python
|
||||
[fill, void, ['/S'], [cv::Mat&, mat, '', ['/I', '/O']]]
|
||||
```
|
||||
|
||||
The equivalent refactored output will look like:
|
||||
|
||||
```python
|
||||
Function
|
||||
name = 'fill'
|
||||
rtype = 'void'
|
||||
static = True
|
||||
req =
|
||||
Argument
|
||||
name = 'mat'
|
||||
type = 'cv::Mat'
|
||||
ref = '&'
|
||||
I = True
|
||||
O = True
|
||||
default = ''
|
||||
```
|
||||
|
||||
The added semantics (Namespace, Class, Function, Argument, name, etc) make it easier for the templating engine to parse, slice and populate definitions.
|
||||
|
||||
Once the definitions have been parsed, `gen_matlab.py` passes each definition to the template engine with the appropriate template (class, function, enum, doc) and the filled template gets written to the `${CMAKE_CURRENT_BUILD_DIR}/src` directory.
|
||||
|
||||
The generator relies upon a proxy object called `generate.proxy` to determine when the sources are out of date and need to be re-generated.
|
||||
|
||||
**Compile the sources (build)**
|
||||
Once the sources have been generated, they are compiled by the mex compiler. The `compile.cmake` script in `opencv/modules/matlab/` takes responsibility for iterating over each source file in `${CMAKE_CURRENT_BUILD_DIR}/src` and compiling it with the passed includes and OpenCV libraries.
|
||||
|
||||
The flags used to compile the main OpenCV libraries are also forwarded to the mex compiler. So if, for example, you compiled OpenCV with SSE support, the mex bindings will also use SSE. Likewise, if you compile OpenCV in debug mode, the bindings will link to the debug version of the libraries.
|
||||
|
||||
Importantly, the mex compiler includes the `mxarray.hpp`, `bridge.hpp` and `map.hpp` files from the `opencv/modules/matlab/include` directory. `mxarray.hpp` defines a `MxArray` class which wraps Matlab's `mxArray*` type in a more friendly OOP-syle interface. `bridge.hpp` defines a `Bridge` class which is able to perform type conversions between Matlab types and std/OpenCV types. It can be extended with new definitions using the plugin interface described in that file.
|
||||
|
||||
The compiler relies upon a proxy object called `compile.proxy` to determine when the generated sources are out of date and need to be re-compiled.
|
||||
|
||||
**Install the files (install)**
|
||||
At install time, the mex files are put into place at `${CMAKE_INSTALL_PREFIX}/matlab` and their linkages updated.
|
||||
|
||||
|
||||
Jinja2
|
||||
------
|
||||
Jinja2 is a powerful templating engine, similar to python's builtin `string.Template` class but implementing the model-view-controller paradigm. For example, a trivial view could be populated as follows:
|
||||
|
||||
**view.py**
|
||||
|
||||
```html+django
|
||||
<title>{{ title }}</title>
|
||||
<ul>
|
||||
{% for user in users %}
|
||||
<li><a href="{{ user.url }}">{{ user.username | sanitize }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
```
|
||||
|
||||
**model.py**
|
||||
|
||||
```python
|
||||
class User(object):
|
||||
__init__(self):
|
||||
self.username = ''
|
||||
self.url = ''
|
||||
|
||||
def sanitize(text):
|
||||
"""Filter for escaping html tags to prevent code injection"""
|
||||
```
|
||||
|
||||
**controller.py**
|
||||
|
||||
```python
|
||||
def populate(users):
|
||||
# initialize jinja
|
||||
jtemplate = jinja2.Environment(loader=FileSystemLoader())
|
||||
|
||||
# add the filters to the engine
|
||||
jtemplate['sanitize'] = sanitize
|
||||
|
||||
# get the view
|
||||
template = jtemplate.get_template('view')
|
||||
|
||||
# populate the template with a list of User objects
|
||||
populated = template.render(title='all users', users=users)
|
||||
|
||||
# write to file
|
||||
with open('users.html', 'wb') as f:
|
||||
f.write(populated)
|
||||
```
|
||||
|
||||
Thus the style and layout of the view is kept separate from the content (model). This modularity improves readability and maintainability of both the view and content and (for my own sanity) has helped significantly in debugging errors.
|
||||
|
||||
File Reference
|
||||
--------------
|
||||
**gen_matlab.py**
|
||||
gen_matlab has the following call signature:
|
||||
|
||||
gen_matlab.py --jinja2 path/to/jinja2/engine
|
||||
--hdrparser path/to/hdr_parser/dir
|
||||
--rstparser path/to/rst_parser/dir
|
||||
--moduleroot path/to/opencv/modules
|
||||
--modules [core imgproc highgui ...]
|
||||
--extra namespace=/additional/header/to/parse
|
||||
--outdir /path/to/place/generated/src
|
||||
|
||||
**build_info.py**
|
||||
build_info has the following call signature:
|
||||
|
||||
build_info.py --jinja2 path/to/jinja2/engine
|
||||
--os operating_system_string
|
||||
--arch [bitness processor]
|
||||
--compiler [id version]
|
||||
--mex_arch arch_string
|
||||
--mex_script /path/to/mex/script
|
||||
--cxx_flags [-list -of -flags -to -passthrough]
|
||||
--opencv_version version_string
|
||||
--commit commit_hash_if_using_git
|
||||
--modules core imgproc highgui etc
|
||||
--configuration Debug/Release
|
||||
--outdir path/to/place/build/info
|
||||
|
||||
**cvmex.py**
|
||||
cvmex.py, the custom compiler generator, has the following call signature:
|
||||
|
||||
cvmex.py --jinja2 path/to/jinja2/engine
|
||||
--opts [-list -of -opts]
|
||||
--include_dirs [-list -of -opencv_include_directories]
|
||||
--lib_dir opencv_lib_directory
|
||||
--libs [-lopencv_core -lopencv_imgproc ...]
|
||||
--flags [-Wall -opencv_build_flags ...]
|
||||
--outdir /path/to/generated/output
|
||||
|
||||
**parse_tree.py**
|
||||
To build a parse tree, first parse a set of headers, then invoke the parse tree to refactor the output:
|
||||
|
||||
```python
|
||||
# parse a set of definitions into a dictionary of namespaces
|
||||
parser = CppHeaderParser()
|
||||
ns['core'] = parser.parse('path/to/opencv/core.hpp')
|
||||
|
||||
# refactor into a semantic tree
|
||||
parse_tree = ParseTree()
|
||||
parse_tree.build(ns)
|
||||
|
||||
# iterate over the tree
|
||||
for namespace in parse_tree.namespaces:
|
||||
for clss in namespace.classes:
|
||||
# do stuff
|
||||
for method in namespace.methods:
|
||||
# do stuff
|
||||
```
|
||||
|
||||
**mxarray.hpp**
|
||||
mxarray.hpp defines a class called `MxArray` which provides an OOP-style interface for Matlab's homogeneous `mxArray*` type. To create an `MxArray`, you can either inherit an existing array
|
||||
|
||||
```cpp
|
||||
MxArray mat(prhs[0]);
|
||||
```
|
||||
|
||||
or create a new array
|
||||
|
||||
```cpp
|
||||
MxArray mat(5, 5, Matlab::Traits<double>::ScalarType);
|
||||
MxArray mat = MxArray::Matrix<double>(5, 5);
|
||||
```
|
||||
|
||||
The default constructor allocates a `0 x 0` array. Once you have encapculated an `mxArray*` you can access its properties through member functions:
|
||||
|
||||
```cpp
|
||||
mat.rows();
|
||||
mat.cols();
|
||||
mat.size();
|
||||
mat.channels();
|
||||
mat.isComplex();
|
||||
mat.isNumeric();
|
||||
mat.isLogical();
|
||||
mat.isClass();
|
||||
mat.className();
|
||||
mat.real();
|
||||
mat.imag();
|
||||
```
|
||||
|
||||
The MxArray object uses scoped memory management. If you wish to pass an MxArray back to Matlab (as a lhs pointer), you need to explicitly release ownership of the array so that it is not destroyed when it leaves scope:
|
||||
|
||||
```cpp
|
||||
plhs[0] = mat.releaseOwnership();
|
||||
```
|
||||
|
||||
mxarray.hpp also includes a number of helper utilities that make working in mex-world a little easier. One such utility is the `ArgumentParser`. `ArgumentParser` automatically handles required and optional arguments to a method, and even enables named arguments as used in many core Matlab functions. For example, if you had a function with the following signature:
|
||||
|
||||
```cpp
|
||||
void f(Mat first, Mat second, Mat mask=Mat(), int dtype=-1);
|
||||
```
|
||||
|
||||
then you can create an `ArgumentParser` as follows:
|
||||
|
||||
```cpp
|
||||
ArgumentParser parser("f");
|
||||
parser.addVariant(2, 2, "mask", "dtype");
|
||||
MxArrayVector inputs = parser.parse(prhs, prhs+nrhs);
|
||||
```
|
||||
|
||||
and that will make available the following calling syntaxes:
|
||||
|
||||
```matlab
|
||||
f(first, second);
|
||||
f(first, second, mask);
|
||||
f(first, second, mask, dtype);
|
||||
f(first, second, 'dtype', dtype, 'mask', mask); % optional ordering does not matter
|
||||
f(first, second, 'dtype', dtype); % only second optional argument provided
|
||||
f(first, second, mask, 'dtype', dtype); % mixture of ordered and named
|
||||
```
|
||||
|
||||
Further, the output of the `parser.parse()` method will always contain the total number of required and optional arguments that the method can take, with unspecified arguments given by empty matrices. Thus, to check if an optional argument has been given, you can do:
|
||||
|
||||
```cpp
|
||||
int dtype = inputs[3].empty() ? -1 : inputs[3].scalar<double>();
|
||||
```
|
||||
|
||||
**bridge.hpp**
|
||||
The bridge interface defines a `Bridge` class which provides type conversion between std/OpenCV and Matlab types. A type conversion must provide the following:
|
||||
|
||||
```cpp
|
||||
Bridge& operator=(const MyObject&);
|
||||
MyObject toMyObject();
|
||||
operator MyObject();
|
||||
```
|
||||
|
||||
The binding generator will then automatically call the conversion operators (either explicitly or implicitly) if your `MyObject` class is encountered as an input or return from a parsed definition.
|
49
modules/matlab/compile.cmake
Normal file
49
modules/matlab/compile.cmake
Normal file
@ -0,0 +1,49 @@
|
||||
# LISTIFY
|
||||
# Given a string of space-delimited tokens, reparse as a string of
|
||||
# semi-colon delimited tokens, which in CMake land is exactly equivalent
|
||||
# to a list
|
||||
macro(listify OUT_LIST IN_STRING)
|
||||
string(REPLACE " " ";" ${OUT_LIST} ${IN_STRING})
|
||||
endmacro()
|
||||
|
||||
# listify multiple-argument inputs
|
||||
listify(MEX_INCLUDE_DIRS_LIST ${MEX_INCLUDE_DIRS})
|
||||
if (${CONFIGURATION} MATCHES "Debug")
|
||||
listify(MEX_LIBS_LIST ${MEX_DEBUG_LIBS})
|
||||
else()
|
||||
listify(MEX_LIBS_LIST ${MEX_LIBS})
|
||||
endif()
|
||||
|
||||
# if it's MSVC building a Debug configuration, don't build bindings
|
||||
if ("${CONFIGURATION}" MATCHES "Debug")
|
||||
message(STATUS "Matlab bindings are only available in Release configurations. Skipping...")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Compile
|
||||
# -----------------------------------------------------------------------------
|
||||
# for each generated source file:
|
||||
# 1. check if the file has already been compiled
|
||||
# 2. attempt compile if required
|
||||
# 3. if the compile fails, throw an error and cancel compilation
|
||||
file(GLOB SOURCE_FILES "${CMAKE_CURRENT_BINARY_DIR}/src/*.cpp")
|
||||
foreach(SOURCE_FILE ${SOURCE_FILES})
|
||||
# strip out the filename
|
||||
get_filename_component(FILENAME ${SOURCE_FILE} NAME_WE)
|
||||
# compile the source file using mex
|
||||
if (NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/+cv/${FILENAME}.${MATLAB_MEXEXT})
|
||||
execute_process(
|
||||
COMMAND ${MATLAB_MEX_SCRIPT} ${MEX_OPTS} "CXXFLAGS=\$CXXFLAGS ${MEX_CXXFLAGS}" ${MEX_INCLUDE_DIRS_LIST}
|
||||
${MEX_LIB_DIR} ${MEX_LIBS_LIST} ${SOURCE_FILE}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/+cv
|
||||
OUTPUT_QUIET
|
||||
ERROR_VARIABLE FAILED
|
||||
)
|
||||
endif()
|
||||
# TODO: If a mex file fails to compile, should we error out?
|
||||
# TODO: Warnings are currently treated as errors...
|
||||
if (FAILED)
|
||||
message(FATAL_ERROR "Failed to compile ${FILENAME}: ${FAILED}")
|
||||
endif()
|
||||
endforeach()
|
75
modules/matlab/generator/build_info.py
Normal file
75
modules/matlab/generator/build_info.py
Normal file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
def substitute(build, output_dir):
|
||||
|
||||
# setup the template engine
|
||||
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
# add the filters
|
||||
jtemplate.filters['csv'] = csv
|
||||
jtemplate.filters['stripExtraSpaces'] = stripExtraSpaces
|
||||
|
||||
# load the template
|
||||
template = jtemplate.get_template('template_build_info.m')
|
||||
|
||||
# create the build directory
|
||||
output_dir = output_dir+'/+cv'
|
||||
if not os.path.isdir(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
|
||||
# populate template
|
||||
populated = template.render(build=build, time=time)
|
||||
with open(os.path.join(output_dir, 'buildInformation.m'), 'wb') as f:
|
||||
f.write(populated)
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Usage: python build_info.py --jinja2 /path/to/jinja2/engine
|
||||
--os os_version_string
|
||||
--arch [bitness processor]
|
||||
--compiler [id version]
|
||||
--mex_arch arch_string
|
||||
--mex_script /path/to/mex/script
|
||||
--cxx_flags [-list -of -flags -to -passthrough]
|
||||
--opencv_version version_string
|
||||
--commit commit_hash_if_using_git
|
||||
--modules [core imgproc highgui etc]
|
||||
--configuration Debug/Release
|
||||
--outdir /path/to/write/build/info
|
||||
|
||||
build_info.py generates a Matlab function that can be invoked with a call to
|
||||
>> cv.buildInformation();
|
||||
|
||||
This function prints a summary of the user's OS, OpenCV and Matlab build
|
||||
given the information passed to this module. build_info.py invokes Jinja2
|
||||
on the template_build_info.m template.
|
||||
"""
|
||||
|
||||
# parse the input options
|
||||
import sys, re, os, time
|
||||
from argparse import ArgumentParser
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--jinja2')
|
||||
parser.add_argument('--os')
|
||||
parser.add_argument('--arch', nargs=2)
|
||||
parser.add_argument('--compiler', nargs='+')
|
||||
parser.add_argument('--mex_arch')
|
||||
parser.add_argument('--mex_script')
|
||||
parser.add_argument('--mex_opts', default=['-largeArrayDims'], nargs='*')
|
||||
parser.add_argument('--cxx_flags', default=[], nargs='*')
|
||||
parser.add_argument('--opencv_version', default='', nargs='?')
|
||||
parser.add_argument('--commit', default='Not in working git tree', nargs='?')
|
||||
parser.add_argument('--modules', nargs='+')
|
||||
parser.add_argument('--configuration')
|
||||
parser.add_argument('--outdir')
|
||||
build = parser.parse_args()
|
||||
|
||||
# add jinja to the path
|
||||
sys.path.append(build.jinja2)
|
||||
|
||||
from filters import *
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
# populate the build info template
|
||||
substitute(build, build.outdir)
|
63
modules/matlab/generator/cvmex.py
Normal file
63
modules/matlab/generator/cvmex.py
Normal file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
def substitute(cv, output_dir):
|
||||
|
||||
# setup the template engine
|
||||
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
# add the filters
|
||||
jtemplate.filters['cellarray'] = cellarray
|
||||
jtemplate.filters['split'] = split
|
||||
jtemplate.filters['csv'] = csv
|
||||
|
||||
# load the template
|
||||
template = jtemplate.get_template('template_cvmex_base.m')
|
||||
|
||||
# create the build directory
|
||||
output_dir = output_dir+'/+cv'
|
||||
if not os.path.isdir(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
|
||||
# populate template
|
||||
populated = template.render(cv=cv, time=time)
|
||||
with open(os.path.join(output_dir, 'mex.m'), 'wb') as f:
|
||||
f.write(populated)
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Usage: python cvmex.py --jinja2 /path/to/jinja2/engine
|
||||
--opts [-list -of -opts]
|
||||
--include_dirs [-list -of -opencv_include_directories]
|
||||
--lib_dir opencv_lib_directory
|
||||
--libs [-lopencv_core -lopencv_imgproc ...]
|
||||
--flags [-Wall -opencv_build_flags ...]
|
||||
--outdir /path/to/generated/output
|
||||
|
||||
cvmex.py generates a custom mex compiler that automatically links OpenCV
|
||||
libraries to built sources where appropriate. The calling syntax is the
|
||||
same as the builtin mex compiler, with added cv qualification:
|
||||
>> cv.mex(..., ...);
|
||||
"""
|
||||
|
||||
# parse the input options
|
||||
import sys, re, os, time
|
||||
from argparse import ArgumentParser
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--jinja2')
|
||||
parser.add_argument('--opts')
|
||||
parser.add_argument('--include_dirs')
|
||||
parser.add_argument('--lib_dir')
|
||||
parser.add_argument('--libs')
|
||||
parser.add_argument('--flags')
|
||||
parser.add_argument('--outdir')
|
||||
cv = parser.parse_args()
|
||||
|
||||
# add jinja to the path
|
||||
sys.path.append(cv.jinja2)
|
||||
|
||||
from filters import *
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
# populate the mex base template
|
||||
substitute(cv, cv.outdir)
|
180
modules/matlab/generator/filters.py
Normal file
180
modules/matlab/generator/filters.py
Normal file
@ -0,0 +1,180 @@
|
||||
from textwrap import TextWrapper
|
||||
from string import split, join
|
||||
import re, os
|
||||
# precompile a URL matching regular expression
|
||||
urlexpr = re.compile(r"((https?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.MULTILINE|re.UNICODE)
|
||||
|
||||
def inputs(args):
|
||||
'''Keeps only the input arguments in a list of elements.
|
||||
In OpenCV input arguments are all arguments with names
|
||||
not beginning with 'dst'
|
||||
'''
|
||||
try:
|
||||
return [arg for arg in args['only'] if arg.I and not arg.O]
|
||||
except:
|
||||
return [arg for arg in args if arg.I]
|
||||
|
||||
def ninputs(fun):
|
||||
'''Counts the number of input arguments in the input list'''
|
||||
return len(inputs(fun.req)) + len(inputs(fun.opt))
|
||||
|
||||
def outputs(args):
|
||||
'''Determines whether any of the given arguments is an output
|
||||
reference, and returns a list of only those elements.
|
||||
In OpenCV, output references are preceeded by 'dst'
|
||||
'''
|
||||
try:
|
||||
return [arg for arg in args['only'] if arg.O and not arg.I]
|
||||
except:
|
||||
return [arg for arg in args if arg.O]
|
||||
|
||||
def only(args):
|
||||
'''Returns exclusively the arguments which are only inputs
|
||||
or only outputs'''
|
||||
d = {};
|
||||
d['only'] = args
|
||||
return d
|
||||
|
||||
def void(arg):
|
||||
'''Is the input 'void' '''
|
||||
return arg == 'void'
|
||||
|
||||
def flip(arg):
|
||||
'''flip the sign of the input'''
|
||||
return not arg
|
||||
|
||||
def noutputs(fun):
|
||||
'''Counts the number of output arguments in the input list'''
|
||||
return int(not void(fun.rtp)) + len(outputs(fun.req)) + len(outputs(fun.opt))
|
||||
|
||||
def convertibleToInt(string):
|
||||
'''Can the input string be evaluated to an integer?'''
|
||||
salt = '1+'
|
||||
try:
|
||||
exec(salt+string)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def binaryToDecimal(string):
|
||||
'''Attempt to convert the input string to floating point representation'''
|
||||
try:
|
||||
return str(eval(string))
|
||||
except:
|
||||
return string
|
||||
|
||||
def formatMatlabConstant(string, table):
|
||||
'''
|
||||
Given a string representing a Constant, and a table of all Constants,
|
||||
attempt to resolve the Constant into a valid Matlab expression
|
||||
For example, the input
|
||||
DEPENDENT_VALUE = 1 << FIXED_VALUE
|
||||
needs to be converted to
|
||||
DEPENDENT_VALUE = bitshift(1, cv.FIXED_VALUE);
|
||||
'''
|
||||
# split the string into expressions
|
||||
words = re.split('(\W+)', string)
|
||||
# add a 'cv' prefix if an expression is also a key in the lookup table
|
||||
words = ''.join([('cv.'+word if word in table else word) for word in words])
|
||||
# attempt to convert arithmetic expressions and binary/hex to decimal
|
||||
words = binaryToDecimal(words)
|
||||
# convert any remaining bitshifts to Matlab 'bitshift' methods
|
||||
shift = re.sub('[\(\) ]', '', words).split('<<')
|
||||
words = 'bitshift('+shift[0]+', '+shift[1]+')' if len(shift) == 2 else words
|
||||
return words
|
||||
|
||||
def matlabURL(string):
|
||||
"""This filter is used to construct a Matlab specific URL that calls the
|
||||
system browser instead of the (insanely bad) builtin Matlab browser"""
|
||||
return re.sub(urlexpr, '<a href="matlab: web(\'\\1\', \'-browser\')">\\1</a>', string)
|
||||
|
||||
def capitalizeFirst(text):
|
||||
'''Capitalize only the first character of the text string'''
|
||||
return text[0].upper() + text[1:]
|
||||
|
||||
def toUpperCamelCase(text):
|
||||
'''variable_name --> VariableName'''
|
||||
return ''.join([capitalizeFirst(word) for word in text.split('_')])
|
||||
|
||||
def toLowerCamelCase(text):
|
||||
'''variable_name --> variableName'''
|
||||
upper_camel = toUpperCamelCase(test)
|
||||
return upper_camel[0].lower() + upper_camel[1:]
|
||||
|
||||
def toUnderCase(text):
|
||||
'''VariableName --> variable_name'''
|
||||
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
|
||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
||||
|
||||
def stripTags(text):
|
||||
'''
|
||||
strip or convert html tags from a text string
|
||||
<code>content</code> --> content
|
||||
<anything> --> ''
|
||||
< --> <
|
||||
> --> >
|
||||
&le --> <=
|
||||
&ge --> >=
|
||||
'''
|
||||
upper = lambda pattern: pattern.group(1).upper()
|
||||
text = re.sub('<code>(.*?)</code>', upper, text)
|
||||
text = re.sub('<([^=\s].*?)>', '', text)
|
||||
text = re.sub('<', '<', text)
|
||||
text = re.sub('>', '>', text)
|
||||
text = re.sub('&le', '<=', text)
|
||||
text = re.sub('&ge', '>=', text)
|
||||
return text
|
||||
|
||||
def qualify(text, name):
|
||||
'''Adds uppercase 'CV.' qualification to any occurrences of name in text'''
|
||||
return re.sub(name.upper(), 'CV.'+name.upper(), text)
|
||||
|
||||
def slugify(text):
|
||||
'''A_Function_name --> a-function-name'''
|
||||
return text.lower().replace('_', '-')
|
||||
|
||||
def filename(fullpath):
|
||||
'''Returns only the filename without an extension from a file path
|
||||
eg. /path/to/file.txt --> file
|
||||
'''
|
||||
return os.path.splitext(os.path.basename(fullpath))[0]
|
||||
|
||||
def split(text, delimiter=' '):
|
||||
'''Split a text string into a list using the specified delimiter'''
|
||||
return text.split(delimiter)
|
||||
|
||||
def csv(items, sep=', '):
|
||||
'''format a list with a separator (comma if not specified)'''
|
||||
return sep.join(item for item in items)
|
||||
|
||||
def cellarray(items, escape='\''):
|
||||
'''format a list of items as a matlab cell array'''
|
||||
return '{' + ', '.join(escape+item+escape for item in items) + '}'
|
||||
|
||||
def stripExtraSpaces(text):
|
||||
'''Removes superfluous whitespace from a string, including the removal
|
||||
of all leading and trailing whitespace'''
|
||||
return ' '.join(text.split())
|
||||
|
||||
def comment(text, wrap=80, escape='% ', escape_first='', escape_last=''):
|
||||
'''comment filter
|
||||
Takes a string in text, and wraps it to wrap characters in length with
|
||||
preceding comment escape sequence on each line. escape_first and
|
||||
escape_last can be used for languages which define block comments.
|
||||
Examples:
|
||||
C++ inline comment comment(80, '// ')
|
||||
C block comment: comment(80, ' * ', '/*', ' */')
|
||||
Matlab comment: comment(80, '% ')
|
||||
Matlab block comment: comment(80, '', '%{', '%}')
|
||||
Python docstrings: comment(80, '', '\'\'\'', '\'\'\'')
|
||||
'''
|
||||
|
||||
tw = TextWrapper(width=wrap-len(escape))
|
||||
if escape_first:
|
||||
escape_first = escape_first+'\n'
|
||||
if escape_last:
|
||||
escape_last = '\n'+escape_last
|
||||
escapn = '\n'+escape
|
||||
lines = text.split('\n')
|
||||
wlines = (tw.wrap(line) for line in lines)
|
||||
return escape_first+escape+join((join(line, escapn) for line in wlines), escapn)+escape_last
|
197
modules/matlab/generator/gen_matlab.py
Normal file
197
modules/matlab/generator/gen_matlab.py
Normal file
@ -0,0 +1,197 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
class MatlabWrapperGenerator(object):
|
||||
"""
|
||||
MatlabWrapperGenerator is a class for generating Matlab mex sources from
|
||||
a set of C++ headers. MatlabWrapperGenerator objects can be default
|
||||
constructed. Given an instance, the gen() method performs the translation.
|
||||
"""
|
||||
|
||||
def gen(self, module_root, modules, extras, output_dir):
|
||||
"""
|
||||
Generate a set of Matlab mex source files by parsing exported symbols
|
||||
in a set of C++ headers. The headers can be input in one (or both) of
|
||||
two methods:
|
||||
1. specify module_root and modules
|
||||
Given a path to the OpenCV module root and a list of module names,
|
||||
the headers to parse are implicitly constructed.
|
||||
2. specifiy header locations explicitly in extras
|
||||
Each element in the list of extras must be of the form:
|
||||
'namespace=/full/path/to/extra/header.hpp' where 'namespace' is
|
||||
the namespace in which the definitions should be added.
|
||||
The output_dir specifies the directory to write the generated sources
|
||||
to.
|
||||
"""
|
||||
# parse each of the files and store in a dictionary
|
||||
# as a separate "namespace"
|
||||
parser = CppHeaderParser()
|
||||
rst = rst_parser.RstParser(parser)
|
||||
rst_parser.verbose = False
|
||||
rst_parser.show_warnings = False
|
||||
rst_parser.show_errors = False
|
||||
rst_parser.show_critical_errors = False
|
||||
|
||||
ns = dict((key, []) for key in modules)
|
||||
doc = dict((key, []) for key in modules)
|
||||
path_template = Template('${module}/include/opencv2/${module}.hpp')
|
||||
|
||||
for module in modules:
|
||||
# construct a header path from the module root and a path template
|
||||
header = os.path.join(module_root, path_template.substitute(module=module))
|
||||
# parse the definitions
|
||||
ns[module] = parser.parse(header)
|
||||
# parse the documentation
|
||||
rst.parse(module, os.path.join(module_root, module))
|
||||
doc[module] = rst.definitions
|
||||
rst.definitions = {}
|
||||
|
||||
for extra in extras:
|
||||
module = extra.split("=")[0]
|
||||
header = extra.split("=")[1]
|
||||
ns[module] = ns[module] + parser.parse(header) if module in ns else parser.parse(header)
|
||||
|
||||
# cleanify the parser output
|
||||
parse_tree = ParseTree()
|
||||
parse_tree.build(ns)
|
||||
|
||||
# setup the template engine
|
||||
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
# add the custom filters
|
||||
jtemplate.filters['formatMatlabConstant'] = formatMatlabConstant
|
||||
jtemplate.filters['convertibleToInt'] = convertibleToInt
|
||||
jtemplate.filters['toUpperCamelCase'] = toUpperCamelCase
|
||||
jtemplate.filters['toLowerCamelCase'] = toLowerCamelCase
|
||||
jtemplate.filters['toUnderCase'] = toUnderCase
|
||||
jtemplate.filters['matlabURL'] = matlabURL
|
||||
jtemplate.filters['stripTags'] = stripTags
|
||||
jtemplate.filters['filename'] = filename
|
||||
jtemplate.filters['comment'] = comment
|
||||
jtemplate.filters['inputs'] = inputs
|
||||
jtemplate.filters['ninputs'] = ninputs
|
||||
jtemplate.filters['outputs'] = outputs
|
||||
jtemplate.filters['noutputs'] = noutputs
|
||||
jtemplate.filters['qualify'] = qualify
|
||||
jtemplate.filters['slugify'] = slugify
|
||||
jtemplate.filters['only'] = only
|
||||
jtemplate.filters['void'] = void
|
||||
jtemplate.filters['not'] = flip
|
||||
|
||||
# load the templates
|
||||
tfunction = jtemplate.get_template('template_function_base.cpp')
|
||||
tclassm = jtemplate.get_template('template_class_base.m')
|
||||
tclassc = jtemplate.get_template('template_class_base.cpp')
|
||||
tdoc = jtemplate.get_template('template_doc_base.m')
|
||||
tconst = jtemplate.get_template('template_map_base.m')
|
||||
|
||||
# create the build directory
|
||||
output_source_dir = output_dir+'/src'
|
||||
output_private_dir = output_source_dir+'/private'
|
||||
output_class_dir = output_dir+'/+cv'
|
||||
output_map_dir = output_dir+'/map'
|
||||
if not os.path.isdir(output_source_dir):
|
||||
os.mkdir(output_source_dir)
|
||||
if not os.path.isdir(output_private_dir):
|
||||
os.mkdir(output_private_dir)
|
||||
if not os.path.isdir(output_class_dir):
|
||||
os.mkdir(output_class_dir)
|
||||
if not os.path.isdir(output_map_dir):
|
||||
os.mkdir(output_map_dir)
|
||||
|
||||
# populate templates
|
||||
for namespace in parse_tree.namespaces:
|
||||
# functions
|
||||
for method in namespace.methods:
|
||||
populated = tfunction.render(fun=method, time=time, includes=namespace.name)
|
||||
with open(output_source_dir+'/'+method.name+'.cpp', 'wb') as f:
|
||||
f.write(populated)
|
||||
if namespace.name in doc and method.name in doc[namespace.name]:
|
||||
populated = tdoc.render(fun=method, doc=doc[namespace.name][method.name], time=time)
|
||||
with open(output_class_dir+'/'+method.name+'.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
# classes
|
||||
for clss in namespace.classes:
|
||||
# cpp converter
|
||||
populated = tclassc.render(clss=clss, time=time)
|
||||
with open(output_private_dir+'/'+clss.name+'Bridge.cpp', 'wb') as f:
|
||||
f.write(populated)
|
||||
# matlab classdef
|
||||
populated = tclassm.render(clss=clss, time=time)
|
||||
with open(output_class_dir+'/'+clss.name+'.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
|
||||
# create a global constants lookup table
|
||||
const = dict(constants(todict(parse_tree.namespaces)))
|
||||
populated = tconst.render(constants=const, time=time)
|
||||
with open(output_dir+'/cv.m', 'wb') as f:
|
||||
f.write(populated)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Usage: python gen_matlab.py --jinja2 /path/to/jinja2/engine
|
||||
--hdrparser /path/to/hdr_parser/dir
|
||||
--rstparser /path/to/rst_parser/dir
|
||||
--moduleroot /path/to/opencv/modules
|
||||
--modules [core imgproc objdetect etc]
|
||||
--extra namespace=/path/to/extra/header.hpp
|
||||
--outdir /path/to/output/generated/srcs
|
||||
|
||||
gen_matlab.py is the main control script for generating matlab source
|
||||
files from given set of headers. Internally, gen_matlab:
|
||||
1. constructs the headers to parse from the module root and list of modules
|
||||
2. parses the headers using CppHeaderParser
|
||||
3. refactors the definitions using ParseTree
|
||||
4. parses .rst docs using RstParser
|
||||
5. populates the templates for classes, function, enums and docs from the
|
||||
definitions
|
||||
|
||||
gen_matlab.py requires the following inputs:
|
||||
--jinja2 the path to the Jinja2 templating engine
|
||||
e.g. ${CMAKE_SOURCE_DIR}/3rdparty
|
||||
--hdrparser the path to the header parser directory
|
||||
(opencv/modules/python/src2)
|
||||
--rstparser the path to the rst parser directory
|
||||
(opencv/modules/java/generator)
|
||||
--moduleroot (optional) path to the opencv directory containing the modules
|
||||
--modules (optional - required if --moduleroot specified) the modules
|
||||
to produce bindings for. The path to the include directories
|
||||
as well as the namespaces are constructed from the modules
|
||||
and the moduleroot
|
||||
--extra extra headers explicitly defined to parse. This must be in
|
||||
the format "namepsace=/path/to/extra/header.hpp". For example,
|
||||
the core module requires the extra header:
|
||||
"core=/opencv/modules/core/include/opencv2/core/core/base.hpp"
|
||||
--outdir the output directory to put the generated matlab sources. In
|
||||
the OpenCV build this is "${CMAKE_CURRENT_BUILD_DIR}/src"
|
||||
"""
|
||||
|
||||
# parse the input options
|
||||
import sys, re, os, time
|
||||
from argparse import ArgumentParser
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--jinja2')
|
||||
parser.add_argument('--hdrparser')
|
||||
parser.add_argument('--rstparser')
|
||||
parser.add_argument('--moduleroot', default='', required=False)
|
||||
parser.add_argument('--modules', nargs='*', default=[], required=False)
|
||||
parser.add_argument('--extra', nargs='*', default=[], required=False)
|
||||
parser.add_argument('--outdir')
|
||||
args = parser.parse_args()
|
||||
|
||||
# add the hdr_parser and rst_parser modules to the path
|
||||
sys.path.append(args.jinja2)
|
||||
sys.path.append(args.hdrparser)
|
||||
sys.path.append(args.rstparser)
|
||||
|
||||
from string import Template
|
||||
from hdr_parser import CppHeaderParser
|
||||
import rst_parser
|
||||
from parse_tree import ParseTree, todict, constants
|
||||
from filters import *
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
# create the generator
|
||||
mwg = MatlabWrapperGenerator()
|
||||
mwg.gen(args.moduleroot, args.modules, args.extra, args.outdir)
|
356
modules/matlab/generator/parse_tree.py
Normal file
356
modules/matlab/generator/parse_tree.py
Normal file
@ -0,0 +1,356 @@
|
||||
from string import join
|
||||
from textwrap import fill
|
||||
from filters import *
|
||||
|
||||
class ParseTree(object):
|
||||
"""
|
||||
The ParseTree class produces a semantic tree of C++ definitions given
|
||||
the output of the CppHeaderParser (from opencv/modules/python/src2/hdr_parser.py)
|
||||
|
||||
The full hierarchy is as follows:
|
||||
|
||||
Namespaces
|
||||
|
|
||||
|- name
|
||||
|- Classes
|
||||
|
|
||||
|- name
|
||||
|- Methods
|
||||
|- Constants
|
||||
|- Methods
|
||||
|
|
||||
|- name
|
||||
|- static (T/F)
|
||||
|- return type
|
||||
|- required Arguments
|
||||
|
|
||||
|- name
|
||||
|- const (T/F)
|
||||
|- reference ('&'/'*')
|
||||
|- type
|
||||
|- input
|
||||
|- output (pass return by reference)
|
||||
|- default value
|
||||
|- optional Arguments
|
||||
|- Constants
|
||||
|
|
||||
|- name
|
||||
|- const (T/F)
|
||||
|- reference ('&'/'*')
|
||||
|- type
|
||||
|- value
|
||||
|
||||
The semantic tree contains substantial information for easily introspecting
|
||||
information about objects. How many methods does the 'core' namespace have?
|
||||
Does the 'randn' method have any return by reference (output) arguments?
|
||||
How many required and optional arguments does the 'add' method have? Is the
|
||||
variable passed by reference or raw pointer?
|
||||
|
||||
Individual definitions from the parse tree (Classes, Functions, Constants)
|
||||
are passed to the Jinja2 template engine where they are manipulated to
|
||||
produce Matlab mex sources.
|
||||
|
||||
A common call tree for constructing and using a ParseTree object is:
|
||||
|
||||
# parse a set of definitions into a dictionary of namespaces
|
||||
parser = CppHeaderParser()
|
||||
ns['core'] = parser.parse('path/to/opencv/core.hpp')
|
||||
|
||||
# refactor into a semantic tree
|
||||
parse_tree = ParseTree()
|
||||
parse_tree.build(ns)
|
||||
|
||||
# iterate over the tree
|
||||
for namespace in parse_tree.namespaces:
|
||||
for clss in namespace.classes:
|
||||
# do stuff
|
||||
for method in namespace.methods:
|
||||
# do stuff
|
||||
|
||||
Calling 'print' on a ParseTree object will reconstruct the definitions
|
||||
to produce an output resembling the original C++ code.
|
||||
"""
|
||||
def __init__(self, namespaces=None):
|
||||
self.namespaces = namespaces if namespaces else []
|
||||
|
||||
def __str__(self):
|
||||
return join((ns.__str__() for ns in self.namespaces), '\n\n\n')
|
||||
|
||||
def build(self, namespaces):
|
||||
babel = Translator()
|
||||
for name, definitions in namespaces.items():
|
||||
class_tree = {}
|
||||
methods = []
|
||||
constants = []
|
||||
for defn in definitions:
|
||||
obj = babel.translate(defn)
|
||||
if obj is None:
|
||||
continue
|
||||
if type(obj) is Class or obj.clss:
|
||||
self.insertIntoClassTree(obj, class_tree)
|
||||
elif type(obj) is Method:
|
||||
methods.append(obj)
|
||||
elif type(obj) is Constant:
|
||||
constants.append(obj)
|
||||
else:
|
||||
raise TypeError('Unexpected object type: '+str(type(obj)))
|
||||
self.namespaces.append(Namespace(name, constants, class_tree.values(), methods))
|
||||
|
||||
def insertIntoClassTree(self, obj, class_tree):
|
||||
cname = obj.name if type(obj) is Class else obj.clss
|
||||
if not cname:
|
||||
return
|
||||
if not cname in class_tree:
|
||||
# add a new class to the tree
|
||||
class_tree[cname] = Class(cname)
|
||||
# insert the definition into the class
|
||||
val = class_tree[cname]
|
||||
if type(obj) is Method:
|
||||
val.methods.append(obj)
|
||||
elif type(obj) is Constant:
|
||||
val.constants.append(obj)
|
||||
else:
|
||||
raise TypeError('Unexpected object type: '+str(type(obj)))
|
||||
|
||||
|
||||
|
||||
class Translator(object):
|
||||
"""
|
||||
The Translator class does the heavy lifting of translating the nested
|
||||
list representation of the hdr_parser into individual definitions that
|
||||
are inserted into the ParseTree.
|
||||
Translator consists of a top-level method: translate()
|
||||
along with a number of helper methods: translateClass(), translateMethod(),
|
||||
translateArgument(), translateConstant(), translateName(), and
|
||||
translateClassName()
|
||||
"""
|
||||
def translate(self, defn):
|
||||
# --- class ---
|
||||
# classes have 'class' prefixed on their name
|
||||
if 'class' in defn[0].split(' ') or 'struct' in defn[0].split(' '):
|
||||
return self.translateClass(defn)
|
||||
# --- operators! ---
|
||||
#TODO: implement operators: http://www.mathworks.com.au/help/matlab/matlab_oop/implementing-operators-for-your-class.html
|
||||
if 'operator' in defn[0]:
|
||||
return
|
||||
# --- constant ---
|
||||
elif convertibleToInt(defn[1]):
|
||||
return self.translateConstant(defn)
|
||||
# --- function ---
|
||||
# functions either need to have input arguments, or not uppercase names
|
||||
elif defn[3] or not self.translateName(defn[0]).split('_')[0].isupper():
|
||||
return self.translateMethod(defn)
|
||||
# --- constant ---
|
||||
else:
|
||||
return self.translateConstant(defn)
|
||||
|
||||
def translateClass(self, defn):
|
||||
return Class()
|
||||
|
||||
def translateMethod(self, defn, class_tree=None):
|
||||
name = self.translateName(defn[0])
|
||||
clss = self.translateClassName(defn[0])
|
||||
rtp = defn[1]
|
||||
static = True if 'S' in ''.join(defn[2]) else False
|
||||
args = defn[3]
|
||||
req = []
|
||||
opt = []
|
||||
for arg in args:
|
||||
if arg:
|
||||
a = self.translateArgument(arg)
|
||||
opt.append(a) if a.default else req.append(a)
|
||||
return Method(name, clss, static, '', rtp, False, req, opt)
|
||||
|
||||
def translateConstant(self, defn):
|
||||
const = True if 'const' in defn[0] else False
|
||||
name = self.translateName(defn[0])
|
||||
clss = self.translateClassName(defn[0])
|
||||
tp = 'int'
|
||||
val = defn[1]
|
||||
return Constant(name, clss, tp, const, '', val)
|
||||
|
||||
def translateArgument(self, defn):
|
||||
ref = '*' if '*' in defn[0] else ''
|
||||
ref = '&' if '&' in defn[0] else ref
|
||||
const = ' const ' in ' '+defn[0]+' '
|
||||
tp = " ".join([word for word in defn[0].replace(ref, '').split() if not ' const ' in ' '+word+' '])
|
||||
name = defn[1]
|
||||
default = defn[2] if defn[2] else ''
|
||||
modifiers = ''.join(defn[3])
|
||||
I = True if not modifiers or 'I' in modifiers else False
|
||||
O = True if 'O' in modifiers else False
|
||||
return Argument(name, tp, const, I, O, ref, default)
|
||||
|
||||
def translateName(self, name):
|
||||
return name.split(' ')[-1].split('.')[-1]
|
||||
|
||||
def translateClassName(self, name):
|
||||
name = name.split(' ')[-1]
|
||||
parts = name.split('.')
|
||||
return parts[-2] if len(parts) > 1 and not parts[-2] == 'cv' else ''
|
||||
|
||||
|
||||
|
||||
class Namespace(object):
|
||||
"""
|
||||
Namespace
|
||||
|
|
||||
|- name
|
||||
|- Constants
|
||||
|- Methods
|
||||
|- Constants
|
||||
"""
|
||||
def __init__(self, name='', constants=None, classes=None, methods=None):
|
||||
self.name = name
|
||||
self.constants = constants if constants else []
|
||||
self.classes = classes if classes else []
|
||||
self.methods = methods if methods else []
|
||||
|
||||
def __str__(self):
|
||||
return 'namespace '+self.name+' {\n\n'+\
|
||||
(join((c.__str__() for c in self.constants), '\n')+'\n\n' if self.constants else '')+\
|
||||
(join((f.__str__() for f in self.methods), '\n')+'\n\n' if self.methods else '')+\
|
||||
(join((o.__str__() for o in self.classes), '\n\n') if self.classes else '')+'\n};'
|
||||
|
||||
class Class(object):
|
||||
"""
|
||||
Class
|
||||
|
|
||||
|- name
|
||||
|- Methods
|
||||
|- Constants
|
||||
"""
|
||||
def __init__(self, name='', namespace='', constants=None, methods=None):
|
||||
self.name = name
|
||||
self.namespace = namespace
|
||||
self.constants = constants if constants else []
|
||||
self.methods = methods if methods else []
|
||||
|
||||
def __str__(self):
|
||||
return 'class '+self.name+' {\n\t'+\
|
||||
(join((c.__str__() for c in self.constants), '\n\t')+'\n\n\t' if self.constants else '')+\
|
||||
(join((f.__str__() for f in self.methods), '\n\t') if self.methods else '')+'\n};'
|
||||
|
||||
class Method(object):
|
||||
"""
|
||||
Method
|
||||
int VideoWriter::read( cv::Mat& frame, const cv::Mat& mask=cv::Mat() );
|
||||
--- ----- ---- -------- ----------------
|
||||
rtp class name required optional
|
||||
|
||||
name the method name
|
||||
clss the class the method belongs to ('' if free)
|
||||
static static?
|
||||
namespace the namespace the method belongs to ('' if free)
|
||||
rtp the return type
|
||||
const const?
|
||||
req list of required arguments
|
||||
opt list of optional arguments
|
||||
"""
|
||||
def __init__(self, name='', clss='', static=False, namespace='', rtp='', const=False, req=None, opt=None):
|
||||
self.name = name
|
||||
self.clss = clss
|
||||
self.constructor = True if name == clss else False
|
||||
self.static = static
|
||||
self.const = const
|
||||
self.namespace = namespace
|
||||
self.rtp = rtp
|
||||
self.req = req if req else []
|
||||
self.opt = opt if opt else []
|
||||
|
||||
def __str__(self):
|
||||
return (self.rtp+' ' if self.rtp else '')+self.name+'('+\
|
||||
join((arg.__str__() for arg in self.req+self.opt), ', ')+\
|
||||
')'+(' const' if self.const else '')+';'
|
||||
|
||||
class Argument(object):
|
||||
"""
|
||||
Argument
|
||||
const cv::Mat& mask=cv::Mat()
|
||||
----- ---- --- ---- -------
|
||||
const tp ref name default
|
||||
|
||||
name the argument name
|
||||
tp the argument type
|
||||
const const?
|
||||
I is the argument treated as an input?
|
||||
O is the argument treated as an output (return by reference)
|
||||
ref is the argument passed by reference? ('*'/'&')
|
||||
default the default value of the argument ('' if required)
|
||||
"""
|
||||
def __init__(self, name='', tp='', const=False, I=True, O=False, ref='', default=''):
|
||||
self.name = name
|
||||
self.tp = tp
|
||||
self.ref = ref
|
||||
self.I = I
|
||||
self.O = O
|
||||
self.const = const
|
||||
self.default = default
|
||||
|
||||
def __str__(self):
|
||||
return ('const ' if self.const else '')+self.tp+self.ref+\
|
||||
' '+self.name+('='+self.default if self.default else '')
|
||||
|
||||
class Constant(object):
|
||||
"""
|
||||
Constant
|
||||
DFT_COMPLEX_OUTPUT = 12;
|
||||
---- -------
|
||||
name default
|
||||
|
||||
name the name of the constant
|
||||
clss the class that the constant belongs to ('' if free)
|
||||
tp the type of the constant ('' if int)
|
||||
const const?
|
||||
ref is the constant a reference? ('*'/'&')
|
||||
default default value, required for constants
|
||||
"""
|
||||
def __init__(self, name='', clss='', tp='', const=False, ref='', default=''):
|
||||
self.name = name
|
||||
self.clss = clss
|
||||
self.tp = tp
|
||||
self.ref = ref
|
||||
self.const = const
|
||||
self.default = default
|
||||
|
||||
def __str__(self):
|
||||
return ('const ' if self.const else '')+self.tp+self.ref+\
|
||||
' '+self.name+('='+self.default if self.default else '')+';'
|
||||
|
||||
def constants(tree):
|
||||
"""
|
||||
recursive generator to strip all Constant objects from the ParseTree
|
||||
and place them into a flat dictionary of { name, value (default) }
|
||||
"""
|
||||
if isinstance(tree, dict) and 'constants' in tree and isinstance(tree['constants'], list):
|
||||
for node in tree['constants']:
|
||||
yield (node['name'], node['default'])
|
||||
if isinstance(tree, dict):
|
||||
for key, val in tree.items():
|
||||
for gen in constants(val):
|
||||
yield gen
|
||||
if isinstance(tree, list):
|
||||
for val in tree:
|
||||
for gen in constants(val):
|
||||
yield gen
|
||||
|
||||
def todict(obj, classkey=None):
|
||||
"""
|
||||
Convert the ParseTree to a dictionary, stripping all objects of their
|
||||
methods and converting class names to strings
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
for k in obj.keys():
|
||||
obj[k] = todict(obj[k], classkey)
|
||||
return obj
|
||||
elif hasattr(obj, "__iter__"):
|
||||
return [todict(v, classkey) for v in obj]
|
||||
elif hasattr(obj, "__dict__"):
|
||||
data = dict([(key, todict(value, classkey))
|
||||
for key, value in obj.__dict__.iteritems()
|
||||
if not callable(value) and not key.startswith('_')])
|
||||
if classkey is not None and hasattr(obj, "__class__"):
|
||||
data[classkey] = obj.__class__.__name__
|
||||
return data
|
||||
else:
|
||||
return obj
|
0
modules/matlab/generator/templates/__init__.py
Normal file
0
modules/matlab/generator/templates/__init__.py
Normal file
149
modules/matlab/generator/templates/functional.cpp
Normal file
149
modules/matlab/generator/templates/functional.cpp
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* compose
|
||||
* compose a function call
|
||||
* This macro takes as input a Method object and composes
|
||||
* a function call by inspecting the types and argument names
|
||||
*/
|
||||
{% macro compose(fun) %}
|
||||
{# ----------- Return type ------------- #}
|
||||
{%- if not fun.rtp|void and not fun.constructor -%} retval = {% endif -%}
|
||||
{%- if fun.constructor -%}{{fun.clss}} obj = {% endif -%}
|
||||
{%- if fun.clss and not fun.constructor -%}inst.{%- else -%} cv:: {%- endif -%}
|
||||
{{fun.name}}(
|
||||
{#- ----------- Required ------------- -#}
|
||||
{%- for arg in fun.req -%}
|
||||
{%- if arg.ref == '*' -%}&{%- endif -%}
|
||||
{{arg.name}}
|
||||
{%- if not loop.last %}, {% endif %}
|
||||
{% endfor %}
|
||||
{#- ----------- Optional ------------- -#}
|
||||
{% if fun.req and fun.opt %}, {% endif %}
|
||||
{%- for opt in fun.opt -%}
|
||||
{%- if opt.ref == '*' -%}&{%- endif -%}
|
||||
{{opt.name}}
|
||||
{%- if not loop.last -%}, {% endif %}
|
||||
{%- endfor -%}
|
||||
);
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
/*
|
||||
* composeMatlab
|
||||
* compose a Matlab function call
|
||||
* This macro takes as input a Method object and composes
|
||||
* a Matlab function call by inspecting the types and argument names
|
||||
*/
|
||||
{% macro composeMatlab(fun) %}
|
||||
{# ----------- Return type ------------- #}
|
||||
{%- if fun|noutputs > 1 -%}[{% endif -%}
|
||||
{%- if not fun.rtp|void -%}LVALUE{% endif -%}
|
||||
{%- if not fun.rtp|void and fun|noutputs > 1 -%},{% endif -%}
|
||||
{# ------------- Outputs ------------- -#}
|
||||
{%- for arg in fun.req|outputs + fun.opt|outputs -%}
|
||||
{{arg.name}}
|
||||
{%- if arg.I -%}_out{%- endif -%}
|
||||
{%- if not loop.last %}, {% endif %}
|
||||
{% endfor %}
|
||||
{%- if fun|noutputs > 1 -%}]{% endif -%}
|
||||
{%- if fun|noutputs %} = {% endif -%}
|
||||
cv.{{fun.name}}(
|
||||
{#- ------------ Inputs -------------- -#}
|
||||
{%- for arg in fun.req|inputs + fun.opt|inputs -%}
|
||||
{{arg.name}}
|
||||
{%- if arg.O -%}_in{%- endif -%}
|
||||
{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor -%}
|
||||
);
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
/*
|
||||
* composeVariant
|
||||
* compose a variant call for the ArgumentParser
|
||||
*/
|
||||
{% macro composeVariant(fun) %}
|
||||
addVariant("{{ fun.name }}", {{ fun.req|inputs|length }}, {{ fun.opt|inputs|length }}
|
||||
{%- if fun.opt|inputs|length %}, {% endif -%}
|
||||
{%- for arg in fun.opt|inputs -%}
|
||||
"{{arg.name}}"
|
||||
{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor -%}
|
||||
)
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
/*
|
||||
* composeWithExceptionHandler
|
||||
* compose a function call wrapped in exception traps
|
||||
* This macro takes an input a Method object and composes a function
|
||||
* call through the compose() macro, then wraps the return in traps
|
||||
* for cv::Exceptions, std::exceptions, and all generic exceptions
|
||||
* and returns a useful error message to the Matlab interpreter
|
||||
*/
|
||||
{%- macro composeWithExceptionHandler(fun) -%}
|
||||
// call the opencv function
|
||||
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
|
||||
try {
|
||||
{{ compose(fun) }}
|
||||
} catch(cv::Exception& e) {
|
||||
error(std::string("cv::exception caught: ").append(e.what()).c_str());
|
||||
} catch(std::exception& e) {
|
||||
error(std::string("std::exception caught: ").append(e.what()).c_str());
|
||||
} catch(...) {
|
||||
error("Uncaught exception occurred in {{fun.name}}");
|
||||
}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
/*
|
||||
* handleInputs
|
||||
* unpack input arguments from the Bridge
|
||||
* Given an input Bridge object, this unpacks the object from the Bridge and
|
||||
* casts them into the correct type
|
||||
*/
|
||||
{%- macro handleInputs(fun) %}
|
||||
|
||||
{% if fun|ninputs or (fun|noutputs and not fun.constructor) %}
|
||||
// unpack the arguments
|
||||
{# ----------- Inputs ------------- #}
|
||||
{% for arg in fun.req|inputs %}
|
||||
{{arg.tp}} {{arg.name}} = inputs[{{ loop.index0 }}].to{{arg.tp|toUpperCamelCase}}();
|
||||
{% endfor %}
|
||||
{% for opt in fun.opt|inputs %}
|
||||
{{opt.tp}} {{opt.name}} = inputs[{{loop.index0 + fun.req|inputs|length}}].empty() ? {% if opt.ref == '*' -%} {{opt.tp}}() {%- else -%} {{opt.default}} {%- endif %} : inputs[{{loop.index0 + fun.req|inputs|length}}].to{{opt.tp|toUpperCamelCase}}();
|
||||
{% endfor %}
|
||||
{# ----------- Outputs ------------ #}
|
||||
{% for arg in fun.req|only|outputs %}
|
||||
{{arg.tp}} {{arg.name}};
|
||||
{% endfor %}
|
||||
{% for opt in fun.opt|only|outputs %}
|
||||
{{opt.tp}} {{opt.name}};
|
||||
{% endfor %}
|
||||
{% if not fun.rtp|void and not fun.constructor %}
|
||||
{{fun.rtp}} retval;
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{%- endmacro %}
|
||||
|
||||
/*
|
||||
* handleOutputs
|
||||
* pack outputs into the bridge
|
||||
* Given a set of outputs, this methods assigns them into the bridge for
|
||||
* return to the calling method
|
||||
*/
|
||||
{%- macro handleOutputs(fun) %}
|
||||
|
||||
{% if fun|noutputs %}
|
||||
// assign the outputs into the bridge
|
||||
{% if not fun.rtp|void and not fun.constructor %}
|
||||
outputs[0] = retval;
|
||||
{% endif %}
|
||||
{% for arg in fun.req|outputs %}
|
||||
outputs[{{loop.index0 + fun.rtp|void|not}}] = {{arg.name}};
|
||||
{% endfor %}
|
||||
{% for opt in fun.opt|outputs %}
|
||||
outputs[{{loop.index0 + fun.rtp|void|not + fun.req|outputs|length}}] = {{opt.name}};
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{%- endmacro %}
|
41
modules/matlab/generator/templates/template_build_info.m
Normal file
41
modules/matlab/generator/templates/template_build_info.m
Normal file
@ -0,0 +1,41 @@
|
||||
function buildInformation()
|
||||
%CV.BUILDINFORMATION display OpenCV Toolbox build information
|
||||
%
|
||||
% Call CV.BUILDINFORMATION() to get a printout of diagonstic information
|
||||
% pertaining to your particular build of the OpenCV Toolbox. If you ever
|
||||
% run into issues with the Toolbox, it is useful to submit this
|
||||
% information alongside a bug report to the OpenCV team.
|
||||
%
|
||||
% Copyright {{ time.strftime("%Y", time.localtime()) }} The OpenCV Foundation
|
||||
%
|
||||
info = {
|
||||
' ------------------------------------------------------------------------'
|
||||
' <strong>OpenCV Toolbox</strong>'
|
||||
' Build and diagnostic information'
|
||||
' ------------------------------------------------------------------------'
|
||||
''
|
||||
' <strong>Platform</strong>'
|
||||
' OS: {{ build.os }}'
|
||||
' Architecture: {{ build.arch[0] }}-bit {{ build.arch[1] }}'
|
||||
' Compiler: {{ build.compiler | csv(' ') }}'
|
||||
''
|
||||
' <strong>Matlab</strong>'
|
||||
[' Version: ' version()]
|
||||
[' Mex extension: ' mexext()]
|
||||
' Architecture: {{ build.mex_arch }}'
|
||||
' Mex path: {{ build.mex_script }}'
|
||||
' Mex flags: {{ build.mex_opts | csv(' ') }}'
|
||||
' CXX flags: {{ build.cxx_flags | csv(' ') | stripExtraSpaces | wordwrap(60, True, '\'\n\' ') }}'
|
||||
''
|
||||
' <strong>OpenCV</strong>'
|
||||
' Version: {{ build.opencv_version }}'
|
||||
' Commit: {{ build.commit }}'
|
||||
' Configuration: {{ build.configuration }}'
|
||||
' Modules: {{ build.modules | csv | wordwrap(60, True, '\'\n\' ') }}'
|
||||
''
|
||||
};
|
||||
|
||||
info = cellfun(@(x) [x '\n'], info, 'UniformOutput', false);
|
||||
info = horzcat(info{:});
|
||||
fprintf(info);
|
||||
end
|
98
modules/matlab/generator/templates/template_class_base.cpp
Normal file
98
modules/matlab/generator/templates/template_class_base.cpp
Normal file
@ -0,0 +1,98 @@
|
||||
{% import 'functional.cpp' as functional %}
|
||||
/*
|
||||
* file: {{clss.name}}Bridge.cpp
|
||||
* author: A trusty code generator
|
||||
* date: {{time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())}}
|
||||
*
|
||||
* This file was autogenerated, do not modify.
|
||||
* See LICENSE for full modification and redistribution details.
|
||||
* Copyright {{time.strftime("%Y", time.localtime())}} The OpenCV Foundation
|
||||
*/
|
||||
#include <mex.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <opencv2/matlab/map.hpp>
|
||||
#include <opencv2/matlab/bridge.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
using namespace cv;
|
||||
using namespace matlab;
|
||||
using namespace bridge;
|
||||
|
||||
namespace {
|
||||
|
||||
typedef std::vector<Bridge> (*)({{clss.name}}&, const std::vector<Bridge>&) MethodSignature;
|
||||
|
||||
{% for function in clss.methods %}
|
||||
|
||||
{% if function.constructor %}
|
||||
// wrapper for {{function.name}}() constructor
|
||||
{{ function.clss }} {{function.name}}(const std::vector<Bridge>& inputs) {
|
||||
{{ functional.handleInputs(function) }}
|
||||
{{ functional.compose(function) }}
|
||||
return obj;
|
||||
}
|
||||
{% else %}
|
||||
// wrapper for {{function.name}}() method
|
||||
std::vector<Bridge> {{function.name}}({{clss.name}}& inst, const std::vector<Bridge>& inputs) {
|
||||
std::vector<Bridge> outputs{% if function|noutputs %}({{function|noutputs}}){% endif %};
|
||||
{{ functional.handleInputs(function) }}
|
||||
{{ functional.composeWithExceptionHandler(function) }}
|
||||
{{ functional.handleOutputs(function) }}
|
||||
return outputs;
|
||||
}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
Map<std::string, MethodSignature> createMethodMap() {
|
||||
Map<std::string, MethodSignature> m;
|
||||
{% for function in clss.methods %}
|
||||
m["{{function.name}}"] = &{{function.name}};
|
||||
{% endfor %}
|
||||
|
||||
return m;
|
||||
}
|
||||
static const Map<std::string, MethodSignature> methods = createMethodMap();
|
||||
|
||||
// map of created {{clss.name}} instances. Don't trust the user to keep them safe...
|
||||
static Map<void *, {{clss.name}}> instances;
|
||||
|
||||
/*
|
||||
* {{ clss.name }}
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// parse the inputs
|
||||
Bridge method_name(prhs[0]);
|
||||
|
||||
Bridge handle(prhs[1]);
|
||||
std::vector<Bridge> brhs(prhs+2, prhs+nrhs);
|
||||
|
||||
// retrieve the instance of interest
|
||||
try {
|
||||
{{clss.name}}& inst = instances.at(handle.address());
|
||||
} catch (const std::out_of_range& e) {
|
||||
mexErrMsgTxt("Invalid object instance provided");
|
||||
}
|
||||
|
||||
// invoke the correct method on the data
|
||||
try {
|
||||
std::vector<Bridge> blhs = (*methods.at(method_name))(inst, brhs);
|
||||
} catch (const std::out_of_range& e) {
|
||||
mexErrMsgTxt("Unknown method specified");
|
||||
}
|
||||
|
||||
{% block postfun %}
|
||||
{% endblock %}
|
||||
|
||||
{% block cleanup %}
|
||||
{% endblock %}
|
||||
|
||||
}
|
||||
|
||||
} // end namespace
|
31
modules/matlab/generator/templates/template_class_base.m
Normal file
31
modules/matlab/generator/templates/template_class_base.m
Normal file
@ -0,0 +1,31 @@
|
||||
% {{clss.name | upper}}
|
||||
% Matlab handle class for OpenCV object classes
|
||||
%
|
||||
% This file was autogenerated, do not modify.
|
||||
% See LICENSE for full modification and redistribution details.
|
||||
% Copyright {{time.strftime("%Y", time.localtime())}} The OpenCV Foundation
|
||||
classdef {{clss.name}} < handle
|
||||
properties (SetAccess = private, Hidden = true)
|
||||
ptr_ = 0; % handle to the underlying c++ clss instance
|
||||
end
|
||||
|
||||
methods
|
||||
% constructor
|
||||
function this = {{clss.name}}(varargin)
|
||||
this.ptr_ = {{clss.name}}Bridge('new', varargin{:});
|
||||
end
|
||||
|
||||
% destructor
|
||||
function delete(this)
|
||||
{{clss.name}}Bridge(this.ptr_, 'delete');
|
||||
end
|
||||
|
||||
{% for function in clss.functions %}
|
||||
% {{function.__str__()}}
|
||||
function varargout = {{function.name}}(this, varargin)
|
||||
[varargout{1:nargout}] = {{clss.name}}Bridge('{{function.name}}', this.ptr_, varargin{:});
|
||||
end
|
||||
|
||||
{% endfor %}
|
||||
end
|
||||
end
|
46
modules/matlab/generator/templates/template_cvmex_base.m
Normal file
46
modules/matlab/generator/templates/template_cvmex_base.m
Normal file
@ -0,0 +1,46 @@
|
||||
function mex(varargin)
|
||||
%CV.MEX compile MEX-function with OpenCV linkages
|
||||
%
|
||||
% Usage:
|
||||
% CV.MEX [options ...] file [file file ...]
|
||||
%
|
||||
% Description:
|
||||
% CV.MEX compiles one or more C/C++ source files into a shared-library
|
||||
% called a mex-file. This function is equivalent to the builtin MEX
|
||||
% routine, with the notable exception that it automatically resolves
|
||||
% OpenCV includes, and links in the OpenCV libraries where appropriate.
|
||||
% It also forwards the flags used to build OpenCV, so architecture-
|
||||
% specific optimizations can be used.
|
||||
%
|
||||
% CV.MEX is designed to be used in situations where the source(s) you
|
||||
% are compiling contain OpenCV definitions. In such cases, it streamlines
|
||||
% the finding and including of appropriate OpenCV libraries.
|
||||
%
|
||||
% See also: mex
|
||||
%
|
||||
% Copyright {{ time.strftime("%Y", time.localtime()) }} The OpenCV Foundation
|
||||
%
|
||||
|
||||
% forward the OpenCV build flags (C++ only)
|
||||
EXTRA_FLAGS = ['"CXXFLAGS="\$CXXFLAGS '...
|
||||
'{{ cv.flags | trim | wordwrap(60, false, '\'...\n \'') }}""'];
|
||||
|
||||
% add the OpenCV include dirs
|
||||
INCLUDE_DIRS = {{ cv.include_dirs | split | cellarray | wordwrap(60, false, '...\n ') }};
|
||||
|
||||
% add the lib dir (singular in both build tree and install tree)
|
||||
LIB_DIR = '{{ cv.lib_dir }}';
|
||||
|
||||
% add the OpenCV libs. Only the used libs will actually be linked
|
||||
LIBS = {{ cv.libs | split | cellarray | wordwrap(60, false, '...\n ') }};
|
||||
|
||||
% add the mex opts (usually at least -largeArrayDims)
|
||||
OPTS = {{ cv.opts | split | cellarray | wordwrap(60, false, '...\n ') }};
|
||||
|
||||
% merge all of the default options (EXTRA_FLAGS, LIBS, etc) and the options
|
||||
% and files passed by the user (varargin) into a single cell array
|
||||
merged = [ {EXTRA_FLAGS}, INCLUDE_DIRS, {LIB_DIR}, LIBS, OPTS, varargin ];
|
||||
|
||||
% expand the merged argument list into the builtin mex utility
|
||||
mex(merged{:});
|
||||
end
|
62
modules/matlab/generator/templates/template_doc_base.m
Normal file
62
modules/matlab/generator/templates/template_doc_base.m
Normal file
@ -0,0 +1,62 @@
|
||||
{% import 'functional.cpp' as functional %}
|
||||
{{ ('CV.' + fun.name | upper + ' ' + doc.brief | stripTags) | comment(75, '%') | matlabURL }}
|
||||
%
|
||||
% {{ functional.composeMatlab(fun) | upper }}
|
||||
{% if doc.long %}
|
||||
{{ doc.long | stripTags | qualify(fun.name) | comment(75, '% ') | matlabURL }}
|
||||
{% endif %}
|
||||
%
|
||||
{# ----------------------- Returns --------------------- #}
|
||||
{% if fun.rtp|void|not or fun.req|outputs|length or fun.opt|outputs|length %}
|
||||
% Returns:
|
||||
{% if fun.rtp|void|not %}
|
||||
% LVALUE
|
||||
{% endif %}
|
||||
{% for arg in fun.req|outputs + fun.opt|outputs %}
|
||||
{% set uname = arg.name | upper + ('_OUT' if arg.I else '') %}
|
||||
{% if arg.name in doc.params %}
|
||||
{{ (uname + ' ' + doc.params[arg.name]) | stripTags | comment(75, '% ') }}
|
||||
{% else %}
|
||||
{{ uname }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
%
|
||||
{% endif %}
|
||||
{# ----------------- Required Inputs ------------------- #}
|
||||
{% if fun.req|inputs|length %}
|
||||
% Required Inputs:
|
||||
{% for arg in fun.req|inputs %}
|
||||
{% set uname = arg.name | upper + ('_IN' if arg.O else '') %}
|
||||
{% if arg.name in doc.params %}
|
||||
{{ (uname + ' ' + doc.params[arg.name]) | stripTags | comment(75, '% ') }}
|
||||
{% else %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
%
|
||||
{% endif %}
|
||||
{# ------------------ Optional Inputs ------------------- #}
|
||||
{% if fun.opt|inputs|length %}
|
||||
% Optional Inputs:
|
||||
{% for arg in fun.opt|inputs %}
|
||||
{% set uname = arg.name | upper + ('_IN' if arg.O else '') + ' (default: ' + arg.default + ')' %}
|
||||
{% if arg.name in doc.params %}
|
||||
{{ (uname + ' ' + doc.params[arg.name]) | stripTags | comment(75, '% ') }}
|
||||
{% else %}
|
||||
{{ uname }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
%
|
||||
{% endif %}
|
||||
{# ---------------------- See also --------------------- #}
|
||||
{% if 'seealso' in doc %}
|
||||
% See also: {% for item in doc['seealso'] %}
|
||||
cv.{{ item }}{% if not loop.last %}, {% endif %}
|
||||
{% endfor %}
|
||||
|
||||
%
|
||||
{% endif %}
|
||||
{# ----------------------- Online ---------------------- #}
|
||||
{% set url = 'http://docs.opencv.org/modules/' + doc.module + '/doc/' + (doc.file|filename) + '.html#' + (fun.name|slugify) %}
|
||||
% Online docs: {{ url | matlabURL }}
|
||||
% Copyright {{ time.strftime("%Y", time.localtime()) }} The OpenCV Foundation
|
||||
%
|
@ -0,0 +1,60 @@
|
||||
{% import 'functional.cpp' as functional %}
|
||||
/*
|
||||
* file: {{fun.name}}.cpp
|
||||
* author: A trusty code generator
|
||||
* date: {{time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())}}
|
||||
*
|
||||
* This file was autogenerated, do not modify.
|
||||
* See LICENSE for full modification and redistribution details.
|
||||
* Copyright {{time.strftime("%Y", time.localtime())}} The OpenCV Foundation
|
||||
*/
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include <exception>
|
||||
#include <opencv2/matlab/bridge.hpp>
|
||||
#include <opencv2/{{includes}}.hpp>
|
||||
using namespace cv;
|
||||
using namespace matlab;
|
||||
using namespace bridge;
|
||||
|
||||
/*
|
||||
* {{ fun.name }}
|
||||
* {{ fun }}
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray*{% if fun|noutputs %} plhs[]{% else %}*{% endif %},
|
||||
int nrhs, const mxArray*{% if fun|ninputs %} prhs[]{% else %}*{% endif %}) {
|
||||
|
||||
{% if fun|ninputs %}
|
||||
// parse the inputs
|
||||
ArgumentParser parser("{{fun.name}}");
|
||||
parser.{{ functional.composeVariant(fun) }};
|
||||
MxArrayVector sorted = parser.parse(MxArrayVector(prhs, prhs+nrhs));
|
||||
{% endif %}
|
||||
|
||||
{% if fun|ninputs or fun|noutputs %}
|
||||
// setup
|
||||
{% if fun|ninputs %}
|
||||
BridgeVector inputs(sorted.begin(), sorted.end());
|
||||
{% endif -%}
|
||||
{%- if fun|noutputs %}
|
||||
BridgeVector outputs({{fun|noutputs}});
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{{ functional.handleInputs(fun) }}
|
||||
{{ functional.composeWithExceptionHandler(fun) }}
|
||||
{{ functional.handleOutputs(fun) }}
|
||||
|
||||
{% if fun|noutputs %}
|
||||
// push the outputs back to matlab
|
||||
for (size_t n = 0; n < static_cast<size_t>(std::max(nlhs,1)); ++n) {
|
||||
plhs[n] = outputs[n].toMxArray().releaseOwnership();
|
||||
}
|
||||
{% endif %}
|
||||
}
|
71
modules/matlab/generator/templates/template_map_base.m
Normal file
71
modules/matlab/generator/templates/template_map_base.m
Normal file
@ -0,0 +1,71 @@
|
||||
% ------------------------------------------------------------------------
|
||||
% <strong>OpenCV Toolbox</strong>
|
||||
% Matlab bindings for the OpenCV library
|
||||
% ------------------------------------------------------------------------
|
||||
%
|
||||
% The OpenCV Toolbox allows you to make calls to native OpenCV methods
|
||||
% and classes directly from within Matlab.
|
||||
%
|
||||
% <strong>PATHS</strong>
|
||||
% To call OpenCV methods from anywhere in your workspace, add the
|
||||
% directory containing this file to the path:
|
||||
%
|
||||
% addpath(fileparts(which('cv')));
|
||||
%
|
||||
% The OpenCV Toolbox contains two important locations:
|
||||
% cv.m - This file, containing OpenCV enums
|
||||
% +cv/ - The directory containing the OpenCV methods and classes
|
||||
%
|
||||
% <strong>CALLING SYNTAX</strong>
|
||||
% To call an OpenCV method, class or enum, it must be prefixed with the
|
||||
% 'cv' qualifier. For example:
|
||||
%
|
||||
% % perform a Fourier transform
|
||||
% Xf = cv.dft(X, cv.DFT_COMPLEX_OUTPUT);
|
||||
%
|
||||
% % create a VideoCapture object, and open a file
|
||||
% camera = cv.VideoCapture();
|
||||
% camera.open('/path/to/file');
|
||||
%
|
||||
% You can specify optional arguments by name, similar to how python
|
||||
% and many builtin Matlab functions work. For example, the cv.dft
|
||||
% method used above has an optional 'nonzeroRows' argument. If
|
||||
% you want to specify that, but keep the default 'flags' behaviour,
|
||||
% simply call the method as:
|
||||
%
|
||||
% Xf = cv.dft(X, 'nonzeroRows', 7);
|
||||
%
|
||||
% <strong>HELP</strong>
|
||||
% Each method has its own help file containing information about the
|
||||
% arguments, return values, and what operation the method performs.
|
||||
% You can access this help information by typing:
|
||||
%
|
||||
% help cv.methodName
|
||||
%
|
||||
% The full list of methods can be found by inspecting the +cv/
|
||||
% directory. Note that the methods available to you will depend
|
||||
% on which modules you configured OpenCV to build.
|
||||
%
|
||||
% <strong>DIAGNOSTICS</strong>
|
||||
% If you are having problems with the OpenCV Toolbox and need to send a
|
||||
% bug report to the OpenCV team, you can get a printout of diagnostic
|
||||
% information to submit along with your report by typing:
|
||||
%
|
||||
% <a href="matlab: cv.buildInformation()">cv.buildInformation();</a>
|
||||
%
|
||||
% <strong>OTHER RESOURCES</strong>
|
||||
% OpenCV documentation online: <a href="matlab: web('http://docs.opencv.org', '-browser')">http://docs.opencv.org</a>
|
||||
% OpenCV issue tracker: <a href="matlab: web('http://code.opencv.org', '-browser')">http://code.opencv.org</a>
|
||||
% OpenCV Q&A: <a href="matlab: web('http://answers.opencv.org', '-browser')">http://answers.opencv.org</a>
|
||||
%
|
||||
% See also: cv.help, <a href="matlab: cv.buildInformation()">cv.buildInformation</a>
|
||||
%
|
||||
% Copyright {{ time.strftime("%Y", time.localtime()) }} The OpenCV Foundation
|
||||
%
|
||||
classdef cv
|
||||
properties (Constant = true)
|
||||
{% for key, val in constants.items() %}
|
||||
{{key}} = {{val|formatMatlabConstant(constants)}};
|
||||
{% endfor %}
|
||||
end
|
||||
end
|
536
modules/matlab/include/opencv2/matlab/bridge.hpp
Normal file
536
modules/matlab/include/opencv2/matlab/bridge.hpp
Normal file
@ -0,0 +1,536 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this
|
||||
// license. If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is"
|
||||
// and any express or implied warranties, including, but not limited to, the
|
||||
// implied warranties of merchantability and fitness for a particular purpose
|
||||
// are disclaimed. In no event shall the Intel Corporation or contributors be
|
||||
// liable for any direct, indirect, incidental, special, exemplary, or
|
||||
// consequential damages (including, but not limited to, procurement of
|
||||
// substitute goods or services; loss of use, data, or profits; or business
|
||||
// interruption) however caused and on any theory of liability, whether in
|
||||
// contract, strict liability, or tort (including negligence or otherwise)
|
||||
// arising in any way out of the use of this software, even if advised of the
|
||||
// possibility of such damage.
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef OPENCV_BRIDGE_HPP_
|
||||
#define OPENCV_BRIDGE_HPP_
|
||||
|
||||
#include "mxarray.hpp"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/calib3d.hpp>
|
||||
|
||||
namespace cv {
|
||||
namespace bridge {
|
||||
|
||||
/*
|
||||
* Custom typedefs
|
||||
* Parsed names from the hdr_parser
|
||||
*/
|
||||
typedef std::vector<cv::Mat> vector_Mat;
|
||||
typedef std::vector<cv::Point> vector_Point;
|
||||
typedef std::vector<int> vector_int;
|
||||
typedef std::vector<float> vector_float;
|
||||
typedef std::vector<cv::String> vector_String;
|
||||
typedef std::vector<unsigned char> vector_uchar;
|
||||
typedef std::vector<std::vector<char> > vector_vector_char;
|
||||
typedef std::vector<std::vector<cv::DMatch> > vector_vector_DMatch;
|
||||
typedef std::vector<cv::Rect> vector_Rect;
|
||||
typedef std::vector<cv::KeyPoint> vector_KeyPoint;
|
||||
typedef cv::Ptr<cv::StereoBM> Ptr_StereoBM;
|
||||
typedef cv::Ptr<cv::StereoSGBM> Ptr_StereoSGBM;
|
||||
typedef cv::Ptr<cv::FeatureDetector> Ptr_FeatureDetector;
|
||||
typedef cv::Ptr<CLAHE> Ptr_CLAHE;
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// PREDECLARATIONS
|
||||
// ----------------------------------------------------------------------------
|
||||
class Bridge;
|
||||
typedef std::vector<Bridge> BridgeVector;
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void deepCopyAndTranspose(const cv::Mat& src, matlab::MxArray& dst);
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void deepCopyAndTranspose(const matlab::MxArray& src, cv::Mat& dst);
|
||||
|
||||
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// BRIDGE
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
/*!
|
||||
* @class Bridge
|
||||
* @brief Type conversion class for converting OpenCV and native C++ types
|
||||
*
|
||||
* Bridge provides an interface for converting between OpenCV/C++ types
|
||||
* to Matlab's mxArray format.
|
||||
*
|
||||
* Each type conversion requires three operators:
|
||||
* // conversion from ObjectType --> Bridge
|
||||
* Bridge& operator=(const ObjectType&);
|
||||
* // implicit conversion from Bridge --> ObjectType
|
||||
* operator ObjectType();
|
||||
* // explicit conversion from Bridge --> ObjectType
|
||||
* ObjectType toObjectType();
|
||||
*
|
||||
* The bridging class provides common conversions between OpenCV types,
|
||||
* std and stl types to Matlab's mxArray format. By inheriting Bridge,
|
||||
* you can add your own custom type conversions.
|
||||
*
|
||||
* Because Matlab uses a homogeneous storage type, all operations are provided
|
||||
* relative to Matlab's type. That is, Bridge always stores an matlab::MxArray object
|
||||
* and converts to and from other object types on demand.
|
||||
*
|
||||
* NOTE: for the explicit conversion function, the object name must be
|
||||
* in UpperCamelCase, for example:
|
||||
* int --> toInt
|
||||
* my_object --> MyObject
|
||||
* my_Object --> MyObject
|
||||
* myObject --> MyObject
|
||||
* this is because the binding generator standardises the calling syntax.
|
||||
*
|
||||
* Bridge attempts to make as few assumptions as possible, however in
|
||||
* some cases where 1-to-1 mappings don't exist, some assumptions are necessary.
|
||||
* In particular:
|
||||
* - conversion from of a 2-channel Mat to an mxArray will result in a complex
|
||||
* output
|
||||
* - conversion from multi-channel interleaved Mats will result in
|
||||
* multichannel planar mxArrays
|
||||
*
|
||||
*/
|
||||
class Bridge {
|
||||
private:
|
||||
matlab::MxArray ptr_;
|
||||
public:
|
||||
// bridges are default constructible
|
||||
Bridge() {}
|
||||
virtual ~Bridge() {}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Bridge Properties
|
||||
// --------------------------------------------------------------------------
|
||||
bool empty() const { return ptr_.empty(); }
|
||||
|
||||
/*! @brief unpack an object from Matlab into C++
|
||||
*
|
||||
* this function checks whether the given bridge is derived from an
|
||||
* object in Matlab. If so, it converts it to a (platform dependent)
|
||||
* pointer to the underlying C++ object.
|
||||
*
|
||||
* NOTE! This function assumes that the C++ pointer is stored in inst_
|
||||
*/
|
||||
template <typename Object>
|
||||
Object* getObjectByName(const std::string& name) {
|
||||
// check that the object is actually of correct type before unpacking
|
||||
// TODO: Traverse class hierarchy?
|
||||
if (!ptr_.isClass(name)) {
|
||||
matlab::error(std::string("Expected class ").append(std::string(name))
|
||||
.append(" but was given ").append(ptr_.className()));
|
||||
}
|
||||
// get the instance field
|
||||
matlab::MxArray inst = ptr_.field("inst_");
|
||||
Object* obj = NULL;
|
||||
// make sure the pointer is the correct size for the system
|
||||
if (sizeof(void *) == 8 && inst.ID() == mxUINT64_CLASS) {
|
||||
// 64-bit pointers
|
||||
// TODO: Do we REALLY REALLY need to reinterpret_cast?
|
||||
obj = reinterpret_cast<Object *>(inst.scalar<uint64_t>());
|
||||
} else if (sizeof(void *) == 4 && inst.ID() == mxUINT32_CLASS) {
|
||||
// 32-bit pointers
|
||||
obj = reinterpret_cast<Object *>(inst.scalar<uint32_t>());
|
||||
} else {
|
||||
matlab::error("Incorrect pointer type stored for architecture");
|
||||
}
|
||||
|
||||
// finally check if the object is NULL
|
||||
matlab::conditionalError(obj, std::string("Object ").append(std::string(name)).append(std::string(" is NULL")));
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// MATLAB TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
Bridge& operator=(const mxArray* obj) { ptr_ = obj; return *this; }
|
||||
Bridge& operator=(const matlab::MxArray& obj) { ptr_ = obj; return *this; }
|
||||
Bridge(const matlab::MxArray& obj) : ptr_(obj) {}
|
||||
Bridge(const mxArray* obj) : ptr_(obj) {}
|
||||
matlab::MxArray toMxArray() { return ptr_; }
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// MATRIX CONVERSIONS
|
||||
// --------------------------------------------------------------------------
|
||||
Bridge& operator=(const cv::Mat& mat);
|
||||
cv::Mat toMat() const;
|
||||
operator cv::Mat() const { return toMat(); }
|
||||
|
||||
template <typename Scalar>
|
||||
static matlab::MxArray FromMat(const cv::Mat& mat) {
|
||||
matlab::MxArray arr(mat.rows, mat.cols, mat.channels(), matlab::Traits<Scalar>::ScalarType);
|
||||
switch (mat.depth()) {
|
||||
case CV_8U: deepCopyAndTranspose<uint8_t, Scalar>(mat, arr); break;
|
||||
case CV_8S: deepCopyAndTranspose<int8_t, Scalar>(mat, arr); break;
|
||||
case CV_16U: deepCopyAndTranspose<uint16_t, Scalar>(mat, arr); break;
|
||||
case CV_16S: deepCopyAndTranspose<int16_t, Scalar>(mat, arr); break;
|
||||
case CV_32S: deepCopyAndTranspose<int32_t, Scalar>(mat, arr); break;
|
||||
case CV_32F: deepCopyAndTranspose<float, Scalar>(mat, arr); break;
|
||||
case CV_64F: deepCopyAndTranspose<double, Scalar>(mat, arr); break;
|
||||
default: matlab::error("Attempted to convert from unknown class");
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
cv::Mat toMat() const {
|
||||
cv::Mat mat(ptr_.rows(), ptr_.cols(), CV_MAKETYPE(cv::DataType<Scalar>::type, ptr_.channels()));
|
||||
switch (ptr_.ID()) {
|
||||
case mxINT8_CLASS: deepCopyAndTranspose<int8_t, Scalar>(ptr_, mat); break;
|
||||
case mxUINT8_CLASS: deepCopyAndTranspose<uint8_t, Scalar>(ptr_, mat); break;
|
||||
case mxINT16_CLASS: deepCopyAndTranspose<int16_t, Scalar>(ptr_, mat); break;
|
||||
case mxUINT16_CLASS: deepCopyAndTranspose<uint16_t, Scalar>(ptr_, mat); break;
|
||||
case mxINT32_CLASS: deepCopyAndTranspose<int32_t, Scalar>(ptr_, mat); break;
|
||||
case mxUINT32_CLASS: deepCopyAndTranspose<uint32_t, Scalar>(ptr_, mat); break;
|
||||
case mxINT64_CLASS: deepCopyAndTranspose<int64_t, Scalar>(ptr_, mat); break;
|
||||
case mxUINT64_CLASS: deepCopyAndTranspose<uint64_t, Scalar>(ptr_, mat); break;
|
||||
case mxSINGLE_CLASS: deepCopyAndTranspose<float, Scalar>(ptr_, mat); break;
|
||||
case mxDOUBLE_CLASS: deepCopyAndTranspose<double, Scalar>(ptr_, mat); break;
|
||||
case mxCHAR_CLASS: deepCopyAndTranspose<char, Scalar>(ptr_, mat); break;
|
||||
case mxLOGICAL_CLASS: deepCopyAndTranspose<int8_t, Scalar>(ptr_, mat); break;
|
||||
default: matlab::error("Attempted to convert from unknown class");
|
||||
}
|
||||
return mat;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// INTEGRAL TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// --------------------------- string --------------------------------------
|
||||
Bridge& operator=(const std::string& ) { return *this; }
|
||||
std::string toString() {
|
||||
return ptr_.toString();
|
||||
}
|
||||
operator std::string() { return toString(); }
|
||||
|
||||
// --------------------------- bool --------------------------------------
|
||||
Bridge& operator=(const bool& ) { return *this; }
|
||||
bool toBool() { return 0; }
|
||||
operator bool() { return toBool(); }
|
||||
|
||||
// --------------------------- double --------------------------------------
|
||||
Bridge& operator=(const double& ) { return *this; }
|
||||
double toDouble() { return ptr_.scalar<double>(); }
|
||||
operator double() { return toDouble(); }
|
||||
|
||||
// --------------------------- float ---------------------------------------
|
||||
Bridge& operator=(const float& ) { return *this; }
|
||||
float toFloat() { return ptr_.scalar<float>(); }
|
||||
operator float() { return toFloat(); }
|
||||
|
||||
// --------------------------- int --------------------------------------
|
||||
Bridge& operator=(const int& ) { return *this; }
|
||||
int toInt() { return ptr_.scalar<int>(); }
|
||||
operator int() { return toInt(); }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// CORE OPENCV TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// -------------------------- Point --------------------------------------
|
||||
Bridge& operator=(const cv::Point& ) { return *this; }
|
||||
cv::Point toPoint() const { return cv::Point(); }
|
||||
operator cv::Point() const { return toPoint(); }
|
||||
|
||||
// -------------------------- Point2f ------------------------------------
|
||||
Bridge& operator=(const cv::Point2f& ) { return *this; }
|
||||
cv::Point2f toPoint2f() const { return cv::Point2f(); }
|
||||
operator cv::Point2f() const { return toPoint2f(); }
|
||||
|
||||
// -------------------------- Point2d ------------------------------------
|
||||
Bridge& operator=(const cv::Point2d& ) { return *this; }
|
||||
cv::Point2d toPoint2d() const { return cv::Point2d(); }
|
||||
operator cv::Point2d() const { return toPoint2d(); }
|
||||
|
||||
// -------------------------- Size ---------------------------------------
|
||||
Bridge& operator=(const cv::Size& ) { return *this; }
|
||||
cv::Size toSize() const { return cv::Size(); }
|
||||
operator cv::Size() const { return toSize(); }
|
||||
|
||||
// -------------------------- Moments --------------------------------------
|
||||
Bridge& operator=(const cv::Moments& ) { return *this; }
|
||||
cv::Moments toMoments() const { return cv::Moments(); }
|
||||
operator cv::Moments() const { return toMoments(); }
|
||||
|
||||
// -------------------------- Scalar --------------------------------------
|
||||
Bridge& operator=(const cv::Scalar& ) { return *this; }
|
||||
cv::Scalar toScalar() { return cv::Scalar(); }
|
||||
operator cv::Scalar() { return toScalar(); }
|
||||
|
||||
// -------------------------- Rect -----------------------------------------
|
||||
Bridge& operator=(const cv::Rect& ) { return *this; }
|
||||
cv::Rect toRect() { return cv::Rect(); }
|
||||
operator cv::Rect() { return toRect(); }
|
||||
|
||||
// ---------------------- RotatedRect ---------------------------------------
|
||||
Bridge& operator=(const cv::RotatedRect& ) { return *this; }
|
||||
cv::RotatedRect toRotatedRect() { return cv::RotatedRect(); }
|
||||
operator cv::RotatedRect() { return toRotatedRect(); }
|
||||
|
||||
// ---------------------- TermCriteria --------------------------------------
|
||||
Bridge& operator=(const cv::TermCriteria& ) { return *this; }
|
||||
cv::TermCriteria toTermCriteria() { return cv::TermCriteria(); }
|
||||
operator cv::TermCriteria() { return toTermCriteria(); }
|
||||
|
||||
// ---------------------- RNG --------------------------------------
|
||||
Bridge& operator=(const cv::RNG& ) { return *this; }
|
||||
/*! @brief explicit conversion to cv::RNG()
|
||||
*
|
||||
* Converts a bridge object to a cv::RNG(). We explicitly assert that
|
||||
* the object is an RNG in matlab space before attempting to deference
|
||||
* its pointer
|
||||
*/
|
||||
cv::RNG toRNG() {
|
||||
return (*getObjectByName<cv::RNG>("RNG"));
|
||||
}
|
||||
operator cv::RNG() { return toRNG(); }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// OPENCV VECTOR TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// -------------------- vector_Mat ------------------------------------------
|
||||
Bridge& operator=(const vector_Mat& ) { return *this; }
|
||||
vector_Mat toVectorMat() { return vector_Mat(); }
|
||||
operator vector_Mat() { return toVectorMat(); }
|
||||
|
||||
// --------------------------- vector_int ----------------------------------
|
||||
Bridge& operator=(const vector_int& ) { return *this; }
|
||||
vector_int toVectorInt() { return vector_int(); }
|
||||
operator vector_int() { return toVectorInt(); }
|
||||
|
||||
// --------------------------- vector_float --------------------------------
|
||||
Bridge& operator=(const vector_float& ) { return *this; }
|
||||
vector_float toVectorFloat() { return vector_float(); }
|
||||
operator vector_float() { return toVectorFloat(); }
|
||||
|
||||
// --------------------------- vector_Rect ---------------------------------
|
||||
Bridge& operator=(const vector_Rect& ) { return *this; }
|
||||
vector_Rect toVectorRect() { return vector_Rect(); }
|
||||
operator vector_Rect() { return toVectorRect(); }
|
||||
|
||||
// --------------------------- vector_KeyPoint -----------------------------
|
||||
Bridge& operator=(const vector_KeyPoint& ) { return *this; }
|
||||
vector_KeyPoint toVectorKeyPoint() { return vector_KeyPoint(); }
|
||||
operator vector_KeyPoint() { return toVectorKeyPoint(); }
|
||||
|
||||
// --------------------------- vector_String -------------------------------
|
||||
Bridge& operator=(const vector_String& ) { return *this; }
|
||||
vector_String toVectorString() { return vector_String(); }
|
||||
operator vector_String() { return toVectorString(); }
|
||||
|
||||
// ------------------------ vector_Point ------------------------------------
|
||||
Bridge& operator=(const vector_Point& ) { return *this; }
|
||||
vector_Point toVectorPoint() { return vector_Point(); }
|
||||
operator vector_Point() { return toVectorPoint(); }
|
||||
|
||||
// ------------------------ vector_uchar ------------------------------------
|
||||
Bridge& operator=(const vector_uchar& ) { return *this; }
|
||||
vector_uchar toVectorUchar() { return vector_uchar(); }
|
||||
operator vector_uchar() { return toVectorUchar(); }
|
||||
|
||||
// ------------------------ vector_vector_char ------------------------------
|
||||
Bridge& operator=(const vector_vector_char& ) { return *this; }
|
||||
vector_vector_char toVectorVectorChar() { return vector_vector_char(); }
|
||||
operator vector_vector_char() { return toVectorVectorChar(); }
|
||||
|
||||
// ------------------------ vector_vector_DMatch ---------------------------
|
||||
Bridge& operator=(const vector_vector_DMatch& ) { return *this; }
|
||||
vector_vector_DMatch toVectorVectorDMatch() { return vector_vector_DMatch(); }
|
||||
operator vector_vector_DMatch() { return toVectorVectorDMatch(); }
|
||||
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// OPENCV COMPOUND TYPES
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// --------------------------- Ptr_StereoBM -----------------------------
|
||||
Bridge& operator=(const Ptr_StereoBM& ) { return *this; }
|
||||
Ptr_StereoBM toPtrStereoBM() { return Ptr_StereoBM(); }
|
||||
operator Ptr_StereoBM() { return toPtrStereoBM(); }
|
||||
|
||||
// --------------------------- Ptr_StereoSGBM ---------------------------
|
||||
Bridge& operator=(const Ptr_StereoSGBM& ) { return *this; }
|
||||
Ptr_StereoSGBM toPtrStereoSGBM() { return Ptr_StereoSGBM(); }
|
||||
operator Ptr_StereoSGBM() { return toPtrStereoSGBM(); }
|
||||
|
||||
// --------------------------- Ptr_FeatureDetector ----------------------
|
||||
Bridge& operator=(const Ptr_FeatureDetector& ) { return *this; }
|
||||
Ptr_FeatureDetector toPtrFeatureDetector() { return Ptr_FeatureDetector(); }
|
||||
operator Ptr_FeatureDetector() { return toPtrFeatureDetector(); }
|
||||
|
||||
// --------------------------- Ptr_CLAHE --------------------------------
|
||||
Bridge& operator=(const Ptr_CLAHE& ) { return *this; }
|
||||
Ptr_CLAHE toPtrCLAHE() { return Ptr_CLAHE(); }
|
||||
operator Ptr_CLAHE() { return toPtrCLAHE(); }
|
||||
}; // class Bridge
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// SPECIALIZATIONS
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
/*!
|
||||
* @brief template specialization for inheriting types
|
||||
*
|
||||
* This template specialization attempts to preserve the best mapping
|
||||
* between OpenCV and Matlab types. Matlab uses double types almost universally, so
|
||||
* all floating float types are converted to doubles.
|
||||
* Unfortunately OpenCV does not have a native logical type, so
|
||||
* that gets mapped to an unsigned 8-bit value
|
||||
*/
|
||||
template <>
|
||||
matlab::MxArray Bridge::FromMat<matlab::InheritType>(const cv::Mat& mat) {
|
||||
switch (mat.depth()) {
|
||||
case CV_8U: return FromMat<uint8_t>(mat);
|
||||
case CV_8S: return FromMat<int8_t>(mat);
|
||||
case CV_16U: return FromMat<uint16_t>(mat);
|
||||
case CV_16S: return FromMat<int16_t>(mat);
|
||||
case CV_32S: return FromMat<int32_t>(mat);
|
||||
case CV_32F: return FromMat<double>(mat); //NOTE: Matlab uses double as native type!
|
||||
case CV_64F: return FromMat<double>(mat);
|
||||
default: matlab::error("Attempted to convert from unknown class");
|
||||
}
|
||||
return matlab::MxArray();
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief template specialization for inheriting types
|
||||
*
|
||||
* This template specialization attempts to preserve the best mapping
|
||||
* between Matlab and OpenCV types. OpenCV has poor support for double precision
|
||||
* types, so all floating point types are cast to float. Logicals get cast
|
||||
* to unsignd 8-bit value.
|
||||
*/
|
||||
template <>
|
||||
cv::Mat Bridge::toMat<matlab::InheritType>() const {
|
||||
switch (ptr_.ID()) {
|
||||
case mxINT8_CLASS: return toMat<int8_t>();
|
||||
case mxUINT8_CLASS: return toMat<uint8_t>();
|
||||
case mxINT16_CLASS: return toMat<int16_t>();
|
||||
case mxUINT16_CLASS: return toMat<uint16_t>();
|
||||
case mxINT32_CLASS: return toMat<int32_t>();
|
||||
case mxUINT32_CLASS: return toMat<int32_t>();
|
||||
case mxINT64_CLASS: return toMat<int64_t>();
|
||||
case mxUINT64_CLASS: return toMat<int64_t>();
|
||||
case mxSINGLE_CLASS: return toMat<float>();
|
||||
case mxDOUBLE_CLASS: return toMat<float>(); //NOTE: OpenCV uses float as native type!
|
||||
case mxCHAR_CLASS: return toMat<int8_t>();
|
||||
case mxLOGICAL_CLASS: return toMat<int8_t>();
|
||||
default: matlab::error("Attempted to convert from unknown class");
|
||||
}
|
||||
return cv::Mat();
|
||||
}
|
||||
|
||||
Bridge& Bridge::operator=(const cv::Mat& mat) { ptr_ = FromMat<matlab::InheritType>(mat); return *this; }
|
||||
cv::Mat Bridge::toMat() const { return toMat<matlab::InheritType>(); }
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// MATRIX TRANSPOSE
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void deepCopyAndTranspose(const cv::Mat& in, matlab::MxArray& out) {
|
||||
matlab::conditionalError(static_cast<size_t>(in.rows) == out.rows(), "Matrices must have the same number of rows");
|
||||
matlab::conditionalError(static_cast<size_t>(in.cols) == out.cols(), "Matrices must have the same number of cols");
|
||||
matlab::conditionalError(static_cast<size_t>(in.channels()) == out.channels(), "Matrices must have the same number of channels");
|
||||
std::vector<cv::Mat> channels;
|
||||
cv::split(in, channels);
|
||||
for (size_t c = 0; c < out.channels(); ++c) {
|
||||
cv::transpose(channels[c], channels[c]);
|
||||
cv::Mat outmat(out.cols(), out.rows(), cv::DataType<OutputScalar>::type,
|
||||
static_cast<void *>(out.real<OutputScalar>() + out.cols()*out.rows()*c));
|
||||
channels[c].convertTo(outmat, cv::DataType<OutputScalar>::type);
|
||||
}
|
||||
|
||||
//const InputScalar* inp = in.ptr<InputScalar>(0);
|
||||
//OutputScalar* outp = out.real<OutputScalar>();
|
||||
//gemt('R', out.rows(), out.cols(), inp, in.step1(), outp, out.rows());
|
||||
}
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void deepCopyAndTranspose(const matlab::MxArray& in, cv::Mat& out) {
|
||||
matlab::conditionalError(in.rows() == static_cast<size_t>(out.rows), "Matrices must have the same number of rows");
|
||||
matlab::conditionalError(in.cols() == static_cast<size_t>(out.cols), "Matrices must have the same number of cols");
|
||||
matlab::conditionalError(in.channels() == static_cast<size_t>(out.channels()), "Matrices must have the same number of channels");
|
||||
std::vector<cv::Mat> channels;
|
||||
for (size_t c = 0; c < in.channels(); ++c) {
|
||||
cv::Mat outmat;
|
||||
cv::Mat inmat(in.cols(), in.rows(), cv::DataType<InputScalar>::type,
|
||||
static_cast<void *>(const_cast<InputScalar *>(in.real<InputScalar>() + in.cols()*in.rows()*c)));
|
||||
inmat.convertTo(outmat, cv::DataType<OutputScalar>::type);
|
||||
cv::transpose(outmat, outmat);
|
||||
channels.push_back(outmat);
|
||||
}
|
||||
cv::merge(channels, out);
|
||||
|
||||
//const InputScalar* inp = in.real<InputScalar>();
|
||||
//OutputScalar* outp = out.ptr<OutputScalar>(0);
|
||||
//gemt('C', in.rows(), in.cols(), inp, in.rows(), outp, out.step1());
|
||||
}
|
||||
|
||||
|
||||
|
||||
} // namespace bridge
|
||||
} // namespace cv
|
||||
|
||||
#endif
|
91
modules/matlab/include/opencv2/matlab/map.hpp
Normal file
91
modules/matlab/include/opencv2/matlab/map.hpp
Normal file
@ -0,0 +1,91 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this
|
||||
// license. If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is"
|
||||
// and any express or implied warranties, including, but not limited to, the
|
||||
// implied warranties of merchantability and fitness for a particular purpose
|
||||
// are disclaimed. In no event shall the Intel Corporation or contributors be
|
||||
// liable for any direct, indirect, incidental, special, exemplary, or
|
||||
// consequential damages (including, but not limited to, procurement of
|
||||
// substitute goods or services; loss of use, data, or profits; or business
|
||||
// interruption) however caused and on any theory of liability, whether in
|
||||
// contract, strict liability, or tort (including negligence or otherwise)
|
||||
// arising in any way out of the use of this software, even if advised of the
|
||||
// possibility of such damage.
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef OPENCV_MAP_HPP_
|
||||
#define OPENCV_MAP_HPP_
|
||||
|
||||
namespace matlab {
|
||||
#if __cplusplus >= 201103L
|
||||
|
||||
// If we have C++11 support, we just want to use unordered_map
|
||||
#include <unordered_map>
|
||||
template <typename KeyType, typename ValueType>
|
||||
using Map = std::unordered_map<KeyType, ValueType>;
|
||||
|
||||
#else
|
||||
|
||||
// If we don't have C++11 support, we wrap another map implementation
|
||||
// in the same public API as unordered_map
|
||||
#include <map>
|
||||
#include <stdexcept>
|
||||
|
||||
template <typename KeyType, typename ValueType>
|
||||
class Map {
|
||||
private:
|
||||
std::map<KeyType, ValueType> map_;
|
||||
public:
|
||||
// map[key] = val;
|
||||
ValueType& operator[] (const KeyType& k) {
|
||||
return map_[k];
|
||||
}
|
||||
|
||||
// map.at(key) = val (throws)
|
||||
ValueType& at(const KeyType& k) {
|
||||
typename std::map<KeyType, ValueType>::iterator it;
|
||||
it = map_.find(k);
|
||||
if (it == map_.end()) throw std::out_of_range("Key not found");
|
||||
return *it;
|
||||
}
|
||||
|
||||
// val = map.at(key) (throws, const)
|
||||
const ValueType& at(const KeyType& k) const {
|
||||
typename std::map<KeyType, ValueType>::const_iterator it;
|
||||
it = map_.find(k);
|
||||
if (it == map_.end()) throw std::out_of_range("Key not found");
|
||||
return *it;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace matlab
|
||||
|
||||
#endif
|
||||
#endif
|
684
modules/matlab/include/opencv2/matlab/mxarray.hpp
Normal file
684
modules/matlab/include/opencv2/matlab/mxarray.hpp
Normal file
@ -0,0 +1,684 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this
|
||||
// license. If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is"
|
||||
// and any express or implied warranties, including, but not limited to, the
|
||||
// implied warranties of merchantability and fitness for a particular purpose
|
||||
// are disclaimed. In no event shall the Intel Corporation or contributors be
|
||||
// liable for any direct, indirect, incidental, special, exemplary, or
|
||||
// consequential damages (including, but not limited to, procurement of
|
||||
// substitute goods or services; loss of use, data, or profits; or business
|
||||
// interruption) however caused and on any theory of liability, whether in
|
||||
// contract, strict liability, or tort (including negligence or otherwise)
|
||||
// arising in any way out of the use of this software, even if advised of the
|
||||
// possibility of such damage.
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef OPENCV_MXARRAY_HPP_
|
||||
#define OPENCV_MXARRAY_HPP_
|
||||
|
||||
#include <mex.h>
|
||||
#include <stdint.h>
|
||||
#include <cstdarg>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sstream>
|
||||
#if __cplusplus > 201103
|
||||
#include <unordered_set>
|
||||
typedef std::unordered_set<std::string> StringSet;
|
||||
#else
|
||||
#include <set>
|
||||
typedef std::set<std::string> StringSet;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* All recent versions of Matlab ship with the MKL library which contains
|
||||
* a blas extension called mkl_?omatcopy(). This defines an out-of-place
|
||||
* copy and transpose operation.
|
||||
*
|
||||
* The mkl library is in ${MATLAB_ROOT}/bin/${MATLAB_MEXEXT}/libmkl...
|
||||
* Matlab does not ship headers for the mkl functions, so we define them
|
||||
* here.
|
||||
*
|
||||
*/
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace matlab {
|
||||
// ----------------------------------------------------------------------------
|
||||
// PREDECLARATIONS
|
||||
// ----------------------------------------------------------------------------
|
||||
class MxArray;
|
||||
typedef std::vector<MxArray> MxArrayVector;
|
||||
|
||||
/*!
|
||||
* @brief raise error if condition fails
|
||||
*
|
||||
* This is a conditional wrapper for mexErrMsgTxt. If the conditional
|
||||
* expression fails, an error is raised and the mex function returns
|
||||
* to Matlab, otherwise this function does nothing
|
||||
*/
|
||||
static void conditionalError(bool expr, const std::string& str) {
|
||||
if (!expr) mexErrMsgTxt(std::string("condition failed: ").append(str).c_str());
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief raise an error
|
||||
*
|
||||
* This function is a wrapper around mexErrMsgTxt
|
||||
*/
|
||||
static void error(const std::string& str) {
|
||||
mexErrMsgTxt(str.c_str());
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// MATLAB TRAITS
|
||||
// ----------------------------------------------------------------------------
|
||||
class DefaultTraits {};
|
||||
class InheritType {};
|
||||
|
||||
template<typename _Tp = DefaultTraits> class Traits {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxUNKNOWN_CLASS;
|
||||
static const mxComplexity Complex = mxCOMPLEX;
|
||||
static const mxComplexity Real = mxREAL;
|
||||
static std::string ToString() { return "Unknown/Unsupported"; }
|
||||
};
|
||||
// bool
|
||||
template<> class Traits<bool> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxLOGICAL_CLASS;
|
||||
static std::string ToString() { return "boolean"; }
|
||||
};
|
||||
// uint8_t
|
||||
template<> class Traits<uint8_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxUINT8_CLASS;
|
||||
static std::string ToString() { return "uint8_t"; }
|
||||
};
|
||||
// int8_t
|
||||
template<> class Traits<int8_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxINT8_CLASS;
|
||||
static std::string ToString() { return "int8_t"; }
|
||||
};
|
||||
// uint16_t
|
||||
template<> class Traits<uint16_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxUINT16_CLASS;
|
||||
static std::string ToString() { return "uint16_t"; }
|
||||
};
|
||||
// int16_t
|
||||
template<> class Traits<int16_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxINT16_CLASS;
|
||||
static std::string ToString() { return "int16_t"; }
|
||||
};
|
||||
// uint32_t
|
||||
template<> class Traits<uint32_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxUINT32_CLASS;
|
||||
static std::string ToString() { return "uint32_t"; }
|
||||
};
|
||||
// int32_t
|
||||
template<> class Traits<int32_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxINT32_CLASS;
|
||||
static std::string ToString() { return "int32_t"; }
|
||||
};
|
||||
// uint64_t
|
||||
template<> class Traits<uint64_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxUINT64_CLASS;
|
||||
static std::string ToString() { return "uint64_t"; }
|
||||
};
|
||||
// int64_t
|
||||
template<> class Traits<int64_t> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxINT64_CLASS;
|
||||
static std::string ToString() { return "int64_t"; }
|
||||
};
|
||||
// float
|
||||
template<> class Traits<float> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxSINGLE_CLASS;
|
||||
static std::string ToString() { return "float"; }
|
||||
};
|
||||
// double
|
||||
template<> class Traits<double> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxDOUBLE_CLASS;
|
||||
static std::string ToString() { return "double"; }
|
||||
};
|
||||
// char
|
||||
template<> class Traits<char> {
|
||||
public:
|
||||
static const mxClassID ScalarType = mxCHAR_CLASS;
|
||||
static std::string ToString() { return "char"; }
|
||||
};
|
||||
// inherited type
|
||||
template<> class Traits<matlab::InheritType> {
|
||||
public:
|
||||
static std::string ToString() { return "Inherited type"; }
|
||||
};
|
||||
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// MXARRAY
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
|
||||
/*!
|
||||
* @class MxArray
|
||||
* @brief A thin wrapper around Matlab's mxArray types
|
||||
*
|
||||
* MxArray provides a thin object oriented wrapper around Matlab's
|
||||
* native mxArray type which exposes most of the functionality of the
|
||||
* Matlab interface, but in a more C++ manner. MxArray objects are scoped,
|
||||
* so you can freely create and destroy them without worrying about memory
|
||||
* management. If you wish to pass the underlying mxArray* representation
|
||||
* back to Matlab as an lvalue, see the releaseOwnership() method
|
||||
*
|
||||
* MxArrays can be directly converted into OpenCV mat objects and std::string
|
||||
* objects, since there is a natural mapping between these types. More
|
||||
* complex types are mapped through the Bridge which does custom conversions
|
||||
* such as MxArray --> cv::Keypoints, etc
|
||||
*/
|
||||
class MxArray {
|
||||
private:
|
||||
mxArray* ptr_;
|
||||
bool owns_;
|
||||
|
||||
/*!
|
||||
* @brief swap all members of this and other
|
||||
*
|
||||
* the swap method is used by the assignment and move constructors
|
||||
* to swap the members of two MxArrays, leaving both in destructible states
|
||||
*/
|
||||
friend void swap(MxArray& first, MxArray& second) {
|
||||
using std::swap;
|
||||
swap(first.ptr_, second.ptr_);
|
||||
swap(first.owns_, second.owns_);
|
||||
}
|
||||
|
||||
void dealloc() {
|
||||
if (owns_ && ptr_) { mxDestroyArray(ptr_); ptr_ = NULL; owns_ = false; }
|
||||
}
|
||||
public:
|
||||
// --------------------------------------------------------------------------
|
||||
// CONSTRUCTORS
|
||||
// --------------------------------------------------------------------------
|
||||
/*!
|
||||
* @brief default constructor
|
||||
*
|
||||
* Construct a valid 0x0 matrix (so all other methods do not need validity checks)
|
||||
*/
|
||||
MxArray() : ptr_(mxCreateDoubleMatrix(0, 0, matlab::Traits<>::Real)), owns_(true) {}
|
||||
|
||||
/*!
|
||||
* @brief destructor
|
||||
*
|
||||
* The destructor deallocates any data allocated by mxCreate* methods only
|
||||
* if the object is owned
|
||||
*/
|
||||
virtual ~MxArray() {
|
||||
dealloc();
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief inheriting constructor
|
||||
*
|
||||
* Inherit an mxArray from Matlab. Don't claim ownership of the array,
|
||||
* just encapsulate it
|
||||
*/
|
||||
MxArray(const mxArray* ptr) : ptr_(const_cast<mxArray *>(ptr)), owns_(false) {}
|
||||
MxArray& operator=(const mxArray* ptr) {
|
||||
dealloc();
|
||||
ptr_ = const_cast<mxArray *>(ptr);
|
||||
owns_ = false;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief explicit typed constructor
|
||||
*
|
||||
* This constructor explicitly creates an MxArray of the given size and type.
|
||||
*/
|
||||
MxArray(size_t m, size_t n, size_t k, mxClassID id, mxComplexity com = matlab::Traits<>::Real)
|
||||
: ptr_(NULL), owns_(true) {
|
||||
mwSize dims[] = { static_cast<mwSize>(m), static_cast<mwSize>(n), static_cast<mwSize>(k) };
|
||||
ptr_ = mxCreateNumericArray(3, dims, id, com);
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief explicit tensor constructor
|
||||
*
|
||||
* Explicitly construct a tensor of given size and type. Since constructors cannot
|
||||
* be explicitly templated, this is a static factory method
|
||||
*/
|
||||
template <typename Scalar>
|
||||
static MxArray Tensor(size_t m, size_t n, size_t k=1) {
|
||||
return MxArray(m, n, k, matlab::Traits<Scalar>::ScalarType);
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief explicit matrix constructor
|
||||
*
|
||||
* Explicitly construct a matrix of given size and type. Since constructors cannot
|
||||
* be explicitly templated, this is a static factory method
|
||||
*/
|
||||
template <typename Scalar>
|
||||
static MxArray Matrix(size_t m, size_t n) {
|
||||
return MxArray(m, n, 1, matlab::Traits<Scalar>::ScalarType);
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief explicit vector constructor
|
||||
*
|
||||
* Explicitly construct a vector of given size and type. Since constructors cannot
|
||||
* be explicitly templated, this is a static factory method
|
||||
*/
|
||||
template <typename Scalar>
|
||||
static MxArray Vector(size_t m) {
|
||||
return MxArray(m, 1, 1, matlab::Traits<Scalar>::ScalarType);
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief explicit scalar constructor
|
||||
*
|
||||
* Explicitly construct a scalar of given type. Since constructors cannot
|
||||
* be explicitly templated, this is a static factory method
|
||||
*/
|
||||
template <typename ScalarType>
|
||||
static MxArray Scalar(ScalarType value = 0) {
|
||||
MxArray s(1, 1, 1, matlab::Traits<ScalarType>::ScalarType);
|
||||
s.real<ScalarType>()[0] = value;
|
||||
return s;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief copy constructor
|
||||
*
|
||||
* All copies are deep copies. If you have a C++11 compatible compiler, prefer
|
||||
* move construction to copy construction
|
||||
*/
|
||||
MxArray(const MxArray& other) : ptr_(mxDuplicateArray(other.ptr_)), owns_(true) {}
|
||||
|
||||
/*!
|
||||
* @brief copy-and-swap assignment
|
||||
*
|
||||
* This assignment operator uses the copy and swap idiom to provide a strong
|
||||
* exception guarantee when swapping two objects.
|
||||
*
|
||||
* Note in particular that the other MxArray is passed by value, thus invoking
|
||||
* the copy constructor which performs a deep copy of the input. The members of
|
||||
* this and other are then swapped
|
||||
*/
|
||||
MxArray& operator=(MxArray other) {
|
||||
swap(*this, other);
|
||||
return *this;
|
||||
}
|
||||
#if __cplusplus >= 201103L
|
||||
/*
|
||||
* @brief C++11 move constructor
|
||||
*
|
||||
* When C++11 support is available, move construction is used to move returns
|
||||
* out of functions, etc. This is much fast than copy construction, since the
|
||||
* move constructed object replaced itself with a default constructed MxArray,
|
||||
* which is of size 0 x 0.
|
||||
*/
|
||||
MxArray(MxArray&& other) : MxArray() {
|
||||
swap(*this, other);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* @brief release ownership to allow return into Matlab workspace
|
||||
*
|
||||
* MxArray is not directly convertible back to mxArray types through assignment
|
||||
* because the MxArray may have been allocated on the free store, making it impossible
|
||||
* to know whether the returned pointer will be released by someone else or not.
|
||||
*
|
||||
* Since Matlab requires mxArrays be passed back into the workspace, the only way
|
||||
* to achieve that is through this function, which explicitly releases ownership
|
||||
* of the object, assuming the Matlab interpreter receving the object will delete
|
||||
* it at a later time
|
||||
*
|
||||
* e.g.
|
||||
* {
|
||||
* MxArray A = MxArray::Matrix<double>(5, 5); // allocates memory
|
||||
* MxArray B = MxArray::Matrix<double>(5, 5); // ditto
|
||||
* plhs[0] = A; // not allowed!!
|
||||
* plhs[0] = A.releaseOwnership(); // makes explicit that ownership is being released
|
||||
* } // end of scope. B is released, A isn't
|
||||
*
|
||||
*/
|
||||
mxArray* releaseOwnership() {
|
||||
owns_ = false;
|
||||
return ptr_;
|
||||
}
|
||||
|
||||
MxArray field(const std::string& name) { return MxArray(mxGetField(ptr_, 0, name.c_str())); }
|
||||
|
||||
template <typename Scalar>
|
||||
Scalar* real() { return static_cast<Scalar *>(mxGetData(ptr_)); }
|
||||
|
||||
template <typename Scalar>
|
||||
Scalar* imag() { return static_cast<Scalar *>(mxGetImagData(ptr_)); }
|
||||
|
||||
template <typename Scalar>
|
||||
const Scalar* real() const { return static_cast<const Scalar *>(mxGetData(ptr_)); }
|
||||
|
||||
template <typename Scalar>
|
||||
const Scalar* imag() const { return static_cast<const Scalar *>(mxGetData(ptr_)); }
|
||||
|
||||
template <typename Scalar>
|
||||
Scalar scalar() const { return static_cast<Scalar *>(mxGetData(ptr_))[0]; }
|
||||
|
||||
std::string toString() const {
|
||||
conditionalError(isString(), "Attempted to convert non-string type to string");
|
||||
std::string str(size(), '\0');
|
||||
mxGetString(ptr_, const_cast<char *>(str.data()), str.size()+1);
|
||||
return str;
|
||||
}
|
||||
|
||||
size_t size() const { return mxGetNumberOfElements(ptr_); }
|
||||
bool empty() const { return size() == 0; }
|
||||
size_t rows() const { return mxGetDimensions(ptr_)[0]; }
|
||||
size_t cols() const { return mxGetDimensions(ptr_)[1]; }
|
||||
size_t channels() const { return (mxGetNumberOfDimensions(ptr_) > 2) ? mxGetDimensions(ptr_)[2] : 1; }
|
||||
bool isComplex() const { return mxIsComplex(ptr_); }
|
||||
bool isNumeric() const { return mxIsNumeric(ptr_); }
|
||||
bool isLogical() const { return mxIsLogical(ptr_); }
|
||||
bool isString() const { return mxIsChar(ptr_); }
|
||||
bool isCell() const { return mxIsCell(ptr_); }
|
||||
bool isStructure() const { return mxIsStruct(ptr_); }
|
||||
bool isClass(const std::string& name) const { return mxIsClass(ptr_, name.c_str()); }
|
||||
std::string className() const { return std::string(mxGetClassName(ptr_)); }
|
||||
mxClassID ID() const { return mxGetClassID(ptr_); }
|
||||
|
||||
};
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// ARGUMENT PARSER
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
/*! @class ArgumentParser
|
||||
* @brief parses inputs to a method and resolves the argument names.
|
||||
*
|
||||
* The ArgumentParser resolves the inputs to a method. It checks that all
|
||||
* required arguments are specified and also allows named optional arguments.
|
||||
* For example, the C++ function:
|
||||
* void randn(Mat& mat, Mat& mean=Mat(), Mat& std=Mat());
|
||||
* could be called in Matlab using any of the following signatures:
|
||||
* \code
|
||||
* out = randn(in);
|
||||
* out = randn(in, 0, 1);
|
||||
* out = randn(in, 'mean', 0, 'std', 1);
|
||||
* \endcode
|
||||
*
|
||||
* ArgumentParser also enables function overloading by allowing users
|
||||
* to add variants to a method. For example, there may be two C++ sum() methods:
|
||||
* \code
|
||||
* double sum(Mat& mat); % sum elements of a matrix
|
||||
* Mat sum(Mat& A, Mat& B); % add two matrices
|
||||
* \endcode
|
||||
*
|
||||
* by adding two variants to ArgumentParser, the correct underlying sum
|
||||
* method can be called. If the function call is ambiguous, the
|
||||
* ArgumentParser will fail with an error message.
|
||||
*
|
||||
* The previous example could be parsed as:
|
||||
* \code
|
||||
* // set up the Argument parser
|
||||
* ArgumentParser arguments;
|
||||
* arguments.addVariant("elementwise", 1);
|
||||
* arguments.addVariant("matrix", 2);
|
||||
*
|
||||
* // parse the arguments
|
||||
* std::vector<MxArray> inputs;
|
||||
* inputs = arguments.parse(std::vector<MxArray>(prhs, prhs+nrhs));
|
||||
*
|
||||
* // if we get here, one unique variant is valid
|
||||
* if (arguments.variantIs("elementwise")) {
|
||||
* // call elementwise sum()
|
||||
* }
|
||||
* \endcode
|
||||
*/
|
||||
class ArgumentParser {
|
||||
private:
|
||||
struct Variant;
|
||||
typedef std::string String;
|
||||
typedef std::vector<std::string> StringVector;
|
||||
typedef std::vector<size_t> IndexVector;
|
||||
typedef std::vector<Variant> VariantVector;
|
||||
|
||||
/* @class Variant
|
||||
* @brief Describes a variant of arguments to a method
|
||||
*
|
||||
* When addVariant() is called on an instance to ArgumentParser, this class
|
||||
* holds the the information that decribes that variant. The parse() method
|
||||
* of ArgumentParser then attempts to match a Variant, given a set of
|
||||
* inputs for a method invocation.
|
||||
*/
|
||||
class Variant {
|
||||
private:
|
||||
String name_;
|
||||
size_t Nreq_;
|
||||
size_t Nopt_;
|
||||
StringVector keys_;
|
||||
IndexVector order_;
|
||||
bool valid_;
|
||||
size_t nparsed_;
|
||||
size_t nkeys_;
|
||||
size_t working_opt_;
|
||||
bool expecting_val_;
|
||||
bool using_named_;
|
||||
size_t find(const String& key) const {
|
||||
return std::find(keys_.begin(), keys_.end(), key) - keys_.begin();
|
||||
}
|
||||
public:
|
||||
/*! @brief default constructor */
|
||||
Variant() : Nreq_(0), Nopt_(0), valid_(false) {}
|
||||
/*! @brief construct a new variant spec */
|
||||
Variant(const String& name, size_t Nreq, size_t Nopt, const StringVector& keys)
|
||||
: name_(name), Nreq_(Nreq), Nopt_(Nopt), keys_(keys),
|
||||
order_(Nreq+Nopt, Nreq+2*Nopt), valid_(true), nparsed_(0), nkeys_(0),
|
||||
working_opt_(0), expecting_val_(false), using_named_(false) {}
|
||||
/*! @brief the name of the variant */
|
||||
String name() const { return name_; }
|
||||
/*! @brief return the total number of arguments the variant can take */
|
||||
size_t size() const { return Nreq_ + Nopt_; }
|
||||
/*! @brief has the variant been fulfilled? */
|
||||
bool fulfilled() const { return (valid_ && nparsed_ >= Nreq_ && !expecting_val_); }
|
||||
/*! @brief is the variant in a valid state (though not necessarily fulfilled) */
|
||||
bool valid() const { return valid_; }
|
||||
/*! @brief check if the named argument exists in the variant */
|
||||
bool exist(const String& key) const { return find(key) != keys_.size(); }
|
||||
/*! @brief retrieve the order mapping raw inputs to their position in the variant */
|
||||
const IndexVector& order() const { return order_; }
|
||||
size_t order(size_t n) const { return order_[n]; }
|
||||
/*! @brief attempt to parse the next argument as a value */
|
||||
bool parseNextAsValue() {
|
||||
if (!valid_) {}
|
||||
else if ((using_named_ && !expecting_val_) || (nparsed_-nkeys_ == Nreq_+Nopt_)) { valid_ = false; }
|
||||
else if (nparsed_ < Nreq_) { order_[nparsed_] = nparsed_; }
|
||||
else if (!using_named_) { order_[nparsed_] = nparsed_; }
|
||||
else if (using_named_ && expecting_val_) { order_[Nreq_ + working_opt_] = nparsed_; }
|
||||
nparsed_++;
|
||||
expecting_val_ = false;
|
||||
return valid_;
|
||||
}
|
||||
/*! @biref attempt to parse the next argument as a name (key) */
|
||||
bool parseNextAsKey(const String& key) {
|
||||
if (!valid_) {}
|
||||
else if ((nparsed_ < Nreq_) || (nparsed_-nkeys_ == Nreq_+Nopt_)) { valid_ = false; }
|
||||
else if (using_named_ && expecting_val_) { valid_ = false; }
|
||||
else if ((working_opt_ = find(key)) == keys_.size()) { valid_ = false; }
|
||||
else { using_named_ = true; expecting_val_ = true; nkeys_++; nparsed_++; }
|
||||
return valid_;
|
||||
}
|
||||
String toString(const String& method_name="f") const {
|
||||
int req_begin = 0, req_end = 0, opt_begin = 0, opt_end = 0;
|
||||
std::ostringstream s;
|
||||
// f(...)
|
||||
s << method_name << "(";
|
||||
// required arguments
|
||||
req_begin = s.str().size();
|
||||
for (size_t n = 0; n < Nreq_; ++n) { s << "src" << n+1 << (n != Nreq_-1 ? ", " : ""); }
|
||||
req_end = s.str().size();
|
||||
if (Nreq_ && Nopt_) s << ", ";
|
||||
// optional arguments
|
||||
opt_begin = s.str().size();
|
||||
for (size_t n = 0; n < keys_.size(); ++n) { s << "'" << keys_[n] << "', " << keys_[n] << (n != Nopt_-1 ? ", " : ""); }
|
||||
opt_end = s.str().size();
|
||||
s << ");";
|
||||
if (Nreq_ + Nopt_ == 0) return s.str();
|
||||
// underscores
|
||||
String under = String(req_begin, ' ') + String(req_end-req_begin, '-')
|
||||
+ String(std::max(opt_begin-req_end,0), ' ') + String(opt_end-opt_begin, '-');
|
||||
s << "\n" << under;
|
||||
// required and optional sets
|
||||
String req_set(req_end-req_begin, ' ');
|
||||
String opt_set(opt_end-opt_begin, ' ');
|
||||
if (!req_set.empty() && req_set.size() < 8) req_set.replace((req_set.size()-3)/2, 3, "req");
|
||||
if (req_set.size() > 7) req_set.replace((req_set.size()-8)/2, 8, "required");
|
||||
if (!opt_set.empty() && opt_set.size() < 8) opt_set.replace((opt_set.size()-3)/2, 3, "opt");
|
||||
if (opt_set.size() > 7) opt_set.replace((opt_set.size()-8)/2, 8, "optional");
|
||||
String set = String(req_begin, ' ') + req_set + String(std::max(opt_begin-req_end,0), ' ') + opt_set;
|
||||
s << "\n" << set;
|
||||
return s.str();
|
||||
}
|
||||
};
|
||||
/*! @brief given an input and output vector of arguments, and a variant spec, sort */
|
||||
void sortArguments(Variant& v, MxArrayVector& in, MxArrayVector& out) {
|
||||
// allocate the output array with ALL arguments
|
||||
out.resize(v.size());
|
||||
// reorder the inputs based on the variant ordering
|
||||
for (size_t n = 0; n < v.size(); ++n) {
|
||||
if (v.order(n) >= in.size()) continue;
|
||||
swap(in[v.order(n)], out[n]);
|
||||
}
|
||||
}
|
||||
VariantVector variants_;
|
||||
String valid_;
|
||||
String method_name_;
|
||||
public:
|
||||
ArgumentParser(const String& method_name) : method_name_(method_name) {}
|
||||
|
||||
/*! @brief add a function call variant to the parser
|
||||
*
|
||||
* Adds a function-call signature to the parser. The function call *must* be
|
||||
* unique either in its number of arguments, or in the named-syntax.
|
||||
* Currently this function does not check whether that invariant stands true.
|
||||
*
|
||||
* This function is variadic. If should be called as follows:
|
||||
* addVariant(2, 2, 'opt_1_name', 'opt_2_name');
|
||||
*/
|
||||
void addVariant(const String& name, size_t nreq, size_t nopt = 0, ...) {
|
||||
StringVector keys;
|
||||
va_list opt;
|
||||
va_start(opt, nopt);
|
||||
for (size_t n = 0; n < nopt; ++n) keys.push_back(va_arg(opt, const char*));
|
||||
addVariant(name, nreq, nopt, keys);
|
||||
}
|
||||
void addVariant(const String& name, size_t nreq, size_t nopt, StringVector keys) {
|
||||
variants_.push_back(Variant(name, nreq, nopt, keys));
|
||||
}
|
||||
|
||||
/*! @brief check if the valid variant is the key name */
|
||||
bool variantIs(const String& name) {
|
||||
return name.compare(valid_) == 0;
|
||||
}
|
||||
|
||||
/*! @brief parse a vector of input arguments
|
||||
*
|
||||
* This method parses a vector of input arguments, attempting to match them
|
||||
* to a Variant spec. For each input, the method attempts to cull any
|
||||
* Variants which don't match the given inputs so far.
|
||||
*
|
||||
* Once all inputs have been parsed, if there is one unique spec remaining,
|
||||
* the output MxArray vector gets populated with the arguments, with named
|
||||
* arguments removed. Any optional arguments that have not been encountered
|
||||
* are set to an empty array.
|
||||
*
|
||||
* If multiple variants or no variants match the given call, an error
|
||||
* message is emitted
|
||||
*/
|
||||
MxArrayVector parse(const MxArrayVector& inputs) {
|
||||
// allocate the outputs
|
||||
String variant_string;
|
||||
MxArrayVector outputs;
|
||||
VariantVector candidates = variants_;
|
||||
|
||||
// iterate over the inputs, attempting to match a variant
|
||||
for (MxArrayVector::const_iterator input = inputs.begin(); input != inputs.end(); ++input) {
|
||||
String name = input->isString() ? input->toString() : String();
|
||||
for (VariantVector::iterator candidate = candidates.begin(); candidate < candidates.end(); ++candidate) {
|
||||
candidate->exist(name) ? candidate->parseNextAsKey(name) : candidate->parseNextAsValue();
|
||||
}
|
||||
}
|
||||
|
||||
// make sure the candidates have been fulfilled
|
||||
for (VariantVector::iterator candidate = candidates.begin(); candidate < candidates.end(); ++candidate) {
|
||||
if (!candidate->fulfilled()) candidate = candidates.erase(candidate)--;
|
||||
}
|
||||
|
||||
// if there is not a unique candidate, throw an error
|
||||
for (VariantVector::iterator variant = variants_.begin(); variant != variants_.end(); ++variant) {
|
||||
variant_string += "\n" + variant->toString(method_name_);
|
||||
}
|
||||
|
||||
// if there is not a unique candidate, throw an error
|
||||
if (candidates.size() > 1) {
|
||||
error(String("Call to method is ambiguous. Valid variants are:")
|
||||
.append(variant_string).append("\nUse named arguments to disambiguate call"));
|
||||
}
|
||||
if (candidates.size() == 0) {
|
||||
error(String("No matching method signatures for given arguments. Valid variants are:").append(variant_string));
|
||||
}
|
||||
|
||||
// Unique candidate!
|
||||
valid_ = candidates[0].name();
|
||||
sortArguments(candidates[0], const_cast<MxArrayVector&>(inputs), outputs);
|
||||
return outputs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace matlab
|
||||
|
||||
#endif
|
141
modules/matlab/include/opencv2/matlab/transpose.hpp
Normal file
141
modules/matlab/include/opencv2/matlab/transpose.hpp
Normal file
@ -0,0 +1,141 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this
|
||||
// license. If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is"
|
||||
// and any express or implied warranties, including, but not limited to, the
|
||||
// implied warranties of merchantability and fitness for a particular purpose
|
||||
// are disclaimed. In no event shall the Intel Corporation or contributors be
|
||||
// liable for any direct, indirect, incidental, special, exemplary, or
|
||||
// consequential damages (including, but not limited to, procurement of
|
||||
// substitute goods or services; loss of use, data, or profits; or business
|
||||
// interruption) however caused and on any theory of liability, whether in
|
||||
// contract, strict liability, or tort (including negligence or otherwise)
|
||||
// arising in any way out of the use of this software, even if advised of the
|
||||
// possibility of such damage.
|
||||
//
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef OPENCV_TRANSPOSE_HPP_
|
||||
#define OPENCV_TRANSPOSE_HPP_
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void transposeBlock(const size_t M, const size_t N, const InputScalar* src, size_t lda, OutputScalar* dst, size_t ldb) {
|
||||
InputScalar cache[16];
|
||||
// copy the source into the cache contiguously
|
||||
for (size_t n = 0; n < N; ++n)
|
||||
for (size_t m = 0; m < M; ++m)
|
||||
cache[m+n*4] = src[m+n*lda];
|
||||
// copy the destination out of the cache contiguously
|
||||
for (size_t m = 0; m < M; ++m)
|
||||
for (size_t n = 0; n < N; ++n)
|
||||
dst[n+m*ldb] = cache[m+n*4];
|
||||
}
|
||||
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void transpose4x4(const InputScalar* src, size_t lda, OutputScalar* dst, size_t ldb) {
|
||||
InputScalar cache[16];
|
||||
// copy the source into the cache contiguously
|
||||
cache[0] = src[0]; cache[1] = src[1]; cache[2] = src[2]; cache[3] = src[3]; src+=lda;
|
||||
cache[4] = src[0]; cache[5] = src[1]; cache[6] = src[2]; cache[7] = src[3]; src+=lda;
|
||||
cache[8] = src[0]; cache[9] = src[1]; cache[10] = src[2]; cache[11] = src[3]; src+=lda;
|
||||
cache[12] = src[0]; cache[13] = src[1]; cache[14] = src[2]; cache[15] = src[3]; src+=lda;
|
||||
// copy the destination out of the contiguously
|
||||
dst[0] = cache[0]; dst[1] = cache[4]; dst[2] = cache[8]; dst[3] = cache[12]; dst+=ldb;
|
||||
dst[0] = cache[1]; dst[1] = cache[5]; dst[2] = cache[9]; dst[3] = cache[13]; dst+=ldb;
|
||||
dst[0] = cache[2]; dst[1] = cache[6]; dst[2] = cache[10]; dst[3] = cache[14]; dst+=ldb;
|
||||
dst[0] = cache[3]; dst[1] = cache[7]; dst[2] = cache[11]; dst[3] = cache[15]; dst+=ldb;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Vanilla copy, transpose and cast
|
||||
*/
|
||||
template <typename InputScalar, typename OutputScalar>
|
||||
void gemt(const char major, const size_t M, const size_t N, const InputScalar* a, size_t lda, OutputScalar* b, size_t ldb) {
|
||||
|
||||
// 1x1 transpose is just copy
|
||||
if (M == 1 && N == 1) { *b = *a; return; }
|
||||
|
||||
// get the interior 4x4 blocks, and the extra skirting
|
||||
const size_t Fblock = (major == 'R') ? N/4 : M/4;
|
||||
const size_t Frem = (major == 'R') ? N%4 : M%4;
|
||||
const size_t Sblock = (major == 'R') ? M/4 : N/4;
|
||||
const size_t Srem = (major == 'R') ? M%4 : N%4;
|
||||
|
||||
// if less than 4x4, invoke the block transpose immediately
|
||||
if (M < 4 && N < 4) { transposeBlock(Frem, Srem, a, lda, b, ldb); return; }
|
||||
|
||||
// transpose 4x4 blocks
|
||||
const InputScalar* aptr = a;
|
||||
OutputScalar* bptr = b;
|
||||
for (size_t second = 0; second < Sblock; ++second) {
|
||||
aptr = a + second*lda;
|
||||
bptr = b + second;
|
||||
for (size_t first = 0; first < Fblock; ++first) {
|
||||
transposeBlock(4, 4, aptr, lda, bptr, ldb);
|
||||
//transpose4x4(aptr, lda, bptr, ldb);
|
||||
aptr+=4;
|
||||
bptr+=4*ldb;
|
||||
}
|
||||
// transpose trailing blocks on primary dimension
|
||||
transposeBlock(Frem, 4, aptr, lda, bptr, ldb);
|
||||
}
|
||||
// transpose trailing blocks on secondary dimension
|
||||
aptr = a + 4*Sblock*lda;
|
||||
bptr = b + 4*Sblock;
|
||||
for (size_t first = 0; first < Fblock; ++first) {
|
||||
transposeBlock(4, Srem, aptr, lda, bptr, ldb);
|
||||
aptr+=4;
|
||||
bptr+=4*ldb;
|
||||
}
|
||||
// transpose bottom right-hand corner
|
||||
transposeBlock(Frem, Srem, aptr, lda, bptr, ldb);
|
||||
}
|
||||
|
||||
#ifdef __SSE2__
|
||||
/*
|
||||
* SSE2 supported fast copy, transpose and cast
|
||||
*/
|
||||
#include <emmintrin.h>
|
||||
|
||||
template <>
|
||||
void transpose4x4<float, float>(const float* src, size_t lda, float* dst, size_t ldb) {
|
||||
__m128 row0, row1, row2, row3;
|
||||
row0 = _mm_loadu_ps(src);
|
||||
row1 = _mm_loadu_ps(src+lda);
|
||||
row2 = _mm_loadu_ps(src+2*lda);
|
||||
row3 = _mm_loadu_ps(src+3*lda);
|
||||
_MM_TRANSPOSE4_PS(row0, row1, row2, row3);
|
||||
_mm_storeu_ps(dst, row0);
|
||||
_mm_storeu_ps(dst+ldb, row1);
|
||||
_mm_storeu_ps(dst+2*ldb, row2);
|
||||
_mm_storeu_ps(dst+3*ldb, row3);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
23
modules/matlab/test/CMakeLists.txt
Normal file
23
modules/matlab/test/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
||||
set(TEST_PROXY ${CMAKE_CURRENT_BINARY_DIR}/test.proxy)
|
||||
file(REMOVE ${TEST_PROXY})
|
||||
|
||||
# generate
|
||||
# call the python executable to generate the Matlab gateways
|
||||
add_custom_command(
|
||||
OUTPUT ${TEST_PROXY}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/OpenCVTest.m ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/testsuite.m ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND ${CMAKE_COMMAND} -E touch ${TEST_PROXY}
|
||||
COMMENT "Building Matlab tests"
|
||||
)
|
||||
|
||||
# targets
|
||||
# opencv_matlab_sources --> opencv_matlab
|
||||
add_custom_target(opencv_test_matlab ALL DEPENDS ${TEST_PROXY})
|
||||
add_dependencies(opencv_test_matlab ${the_module})
|
||||
|
||||
# run the matlab test suite
|
||||
add_test(opencv_test_matlab
|
||||
COMMAND ${MATLAB_BIN} "-nodisplay" "-r" "testsuite.m"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
166
modules/matlab/test/OpenCVTest.m
Normal file
166
modules/matlab/test/OpenCVTest.m
Normal file
@ -0,0 +1,166 @@
|
||||
% Matlab binding test cases
|
||||
% Uses Matlab's builtin testing framework
|
||||
classdef OpenCVTest < matlab.unittest.TestCase
|
||||
|
||||
methods(Test)
|
||||
|
||||
% -------------------------------------------------------------------------
|
||||
% EXCEPTIONS
|
||||
% Check that errors and exceptions are thrown correctly
|
||||
% -------------------------------------------------------------------------
|
||||
|
||||
% check that std exception is thrown
|
||||
function stdException(testcase)
|
||||
try
|
||||
std_exception();
|
||||
testcase.verifyFail();
|
||||
catch
|
||||
% TODO: Catch more specific exception
|
||||
testcase.verifyTrue(true);
|
||||
end
|
||||
end
|
||||
|
||||
% check that OpenCV exceptions are correctly caught
|
||||
function cvException(testcase)
|
||||
try
|
||||
cv_exception();
|
||||
testcase.verifyFail();
|
||||
catch
|
||||
% TODO: Catch more specific exception
|
||||
testcase.verifyTrue(true);
|
||||
end
|
||||
end
|
||||
|
||||
% check that all exceptions are caught
|
||||
function allException(testcase)
|
||||
try
|
||||
exception();
|
||||
testcase.verifyFail();
|
||||
catch
|
||||
% TODO: Catch more specific exception
|
||||
testcase.verifyTrue(true);
|
||||
end
|
||||
end
|
||||
|
||||
% -------------------------------------------------------------------------
|
||||
% SIZES AND FILLS
|
||||
% Check that matrices are correctly filled and resized
|
||||
% -------------------------------------------------------------------------
|
||||
|
||||
% check that a matrix is correctly filled with random numbers
|
||||
function randomFill(testcase)
|
||||
sz = [7 11];
|
||||
mat = zeros(sz);
|
||||
mat = cv.randn(mat, 0, 1);
|
||||
testcase.verifyEqual(size(mat), sz, 'Matrix should not change size');
|
||||
testcase.verifyNotEqual(mat, zeros(sz), 'Matrix should be nonzero');
|
||||
end
|
||||
|
||||
function transpose(testcase)
|
||||
m = randn(19, 81);
|
||||
mt1 = transpose(m);
|
||||
mt2 = cv.transpose(m);
|
||||
testcase.verifyEqual(size(mt1), size(mt2), 'Matrix transposed to incorrect dimensionality');
|
||||
testcase.verifyLessThan(norm(mt1 - mt2), 1e-8, 'Too much precision lost in tranposition');
|
||||
end
|
||||
|
||||
% multiple return
|
||||
function multipleReturn(testcase)
|
||||
A = randn(10);
|
||||
A = A'*A;
|
||||
[V1, D1] = eig(A); D1 = diag(D1);
|
||||
[~, D2, V2] = cv.eigen(A);
|
||||
testcase.verifyLessThan(norm(V1 - V2), 1e-6, 'Too much precision lost in eigenvectors');
|
||||
testcase.verifyLessThan(norm(D1 - D2), 1e-6, 'Too much precision lost in eigenvalues');
|
||||
end
|
||||
|
||||
% complex output from SVD
|
||||
function complexOutputSVD(testcase)
|
||||
A = randn(10);
|
||||
[V1, D1] = eig(A);
|
||||
[~, D2, V2] = cv.eigen(A);
|
||||
testcase.verifyTrue(~isreal(V2) && size(V2,3) == 1, 'Output should be complex');
|
||||
testcase.verifyLessThan(norm(V1 - V2), 1e-6, 'Too much precision lost in eigenvectors');
|
||||
end
|
||||
|
||||
% complex output from Fourier Transform
|
||||
function complexOutputFFT(testcase)
|
||||
A = randn(10);
|
||||
F1 = fft2(A);
|
||||
F2 = cv.dft(A, cv.DFT_COMPLEX_OUTPUT);
|
||||
testcase.verifyTrue(~isreal(F2) && size(F2,3) == 1, 'Output should be complex');
|
||||
testcase.verifyLessThan(norm(F1 - F2), 1e-6, 'Too much precision lost in eigenvectors');
|
||||
end
|
||||
|
||||
% -------------------------------------------------------------------------
|
||||
% TYPE CASTS
|
||||
% Check that types are correctly cast
|
||||
% -------------------------------------------------------------------------
|
||||
|
||||
% -------------------------------------------------------------------------
|
||||
% PRECISION
|
||||
% Check that basic operations are performed with sufficient precision
|
||||
% -------------------------------------------------------------------------
|
||||
|
||||
% check that summing elements is within reasonable precision
|
||||
function sumElements(testcase)
|
||||
a = randn(5000);
|
||||
b = sum(a(:));
|
||||
c = cv.sum(a);
|
||||
testcase.verifyLessThan(norm(b - c), 1e-8, 'Matrix reduction with insufficient precision');
|
||||
end
|
||||
|
||||
|
||||
% check that adding two matrices is within reasonable precision
|
||||
function addPrecision(testcase)
|
||||
a = randn(50);
|
||||
b = randn(50);
|
||||
c = a+b;
|
||||
d = cv.add(a, b);
|
||||
testcase.verifyLessThan(norm(c - d), 1e-8, 'Matrices are added with insufficient precision');
|
||||
end
|
||||
|
||||
% check that performing gemm is within reasonable precision
|
||||
function gemmPrecision(testcase)
|
||||
a = randn(10, 50);
|
||||
b = randn(50, 10);
|
||||
c = randn(10, 10);
|
||||
alpha = 2.71828;
|
||||
gamma = 1.61803;
|
||||
d = alpha*a*b + gamma*c;
|
||||
e = cv.gemm(a, b, alpha, c, gamma);
|
||||
testcase.verifyLessThan(norm(d - e), 1e-8, 'Matrices are multiplied with insufficient precision');
|
||||
end
|
||||
|
||||
|
||||
% -------------------------------------------------------------------------
|
||||
% MISCELLANEOUS
|
||||
% Miscellaneous tests
|
||||
% -------------------------------------------------------------------------
|
||||
|
||||
% check that cv::waitKey waits for at least specified time
|
||||
function waitKey(testcase)
|
||||
tic();
|
||||
cv.waitKey(500);
|
||||
elapsed = toc();
|
||||
testcase.verifyGreaterThan(elapsed, 0.5, 'Elapsed time should be at least 0.5 seconds');
|
||||
end
|
||||
|
||||
% check that highgui window can be created and destroyed
|
||||
function createAndDestroyWindow(testcase)
|
||||
try
|
||||
cv.namedWindow('test window');
|
||||
catch
|
||||
testcase.verifyFail('could not create window');
|
||||
end
|
||||
|
||||
try
|
||||
cv.destroyWindow('test window');
|
||||
catch
|
||||
testcase.verifyFail('could not destroy window');
|
||||
end
|
||||
testcase.verifyTrue(true);
|
||||
end
|
||||
|
||||
end
|
||||
end
|
33
modules/matlab/test/cv_exception.cpp
Normal file
33
modules/matlab/test/cv_exception.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* file: exception.cpp
|
||||
* author: Hilton Bristow
|
||||
* date: Wed, 19 Jun 2013 11:15:15
|
||||
*
|
||||
* See LICENCE for full modification and redistribution details.
|
||||
* Copyright 2013 The OpenCV Foundation
|
||||
*/
|
||||
#include <exception>
|
||||
#include <opencv2/core.hpp>
|
||||
#include "mex.h"
|
||||
|
||||
/*
|
||||
* exception
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// call the opencv function
|
||||
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
|
||||
try {
|
||||
throw cv::Exception(-1, "OpenCV exception thrown", __func__, __FILE__, __LINE__);
|
||||
} catch(cv::Exception& e) {
|
||||
mexErrMsgTxt(e.what());
|
||||
} catch(...) {
|
||||
mexErrMsgTxt("Incorrect exception caught!");
|
||||
}
|
||||
}
|
29
modules/matlab/test/exception.cpp
Normal file
29
modules/matlab/test/exception.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* file: exception.cpp
|
||||
* author: Hilton Bristow
|
||||
* date: Wed, 19 Jun 2013 11:15:15
|
||||
*
|
||||
* See LICENCE for full modification and redistribution details.
|
||||
* Copyright 2013 The OpenCV Foundation
|
||||
*/
|
||||
#include "mex.h"
|
||||
|
||||
/*
|
||||
* exception
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// call the opencv function
|
||||
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
|
||||
try {
|
||||
throw 1;
|
||||
} catch(...) {
|
||||
mexErrMsgTxt("Uncaught exception occurred!");
|
||||
}
|
||||
}
|
15
modules/matlab/test/help.m
Normal file
15
modules/matlab/test/help.m
Normal file
@ -0,0 +1,15 @@
|
||||
function help()
|
||||
%CV.HELP display help information for the OpenCV Toolbox
|
||||
%
|
||||
% Calling:
|
||||
% >> cv.help();
|
||||
%
|
||||
% is equivalent to calling:
|
||||
% >> help cv;
|
||||
%
|
||||
% It displays high-level usage information about the OpenCV toolbox
|
||||
% along with resources to find out more information.
|
||||
%
|
||||
% See also: cv.buildInformation
|
||||
help('cv');
|
||||
end
|
32
modules/matlab/test/std_exception.cpp
Normal file
32
modules/matlab/test/std_exception.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* file: exception.cpp
|
||||
* author: Hilton Bristow
|
||||
* date: Wed, 19 Jun 2013 11:15:15
|
||||
*
|
||||
* See LICENCE for full modification and redistribution details.
|
||||
* Copyright 2013 The OpenCV Foundation
|
||||
*/
|
||||
#include <exception>
|
||||
#include "mex.h"
|
||||
|
||||
/*
|
||||
* exception
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// call the opencv function
|
||||
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
|
||||
try {
|
||||
throw std::exception();
|
||||
} catch(std::exception& e) {
|
||||
mexErrMsgTxt(e.what());
|
||||
} catch(...) {
|
||||
mexErrMsgTxt("Incorrect exception caught!");
|
||||
}
|
||||
}
|
31
modules/matlab/test/test_compiler.cpp
Normal file
31
modules/matlab/test/test_compiler.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* file: rand.cpp
|
||||
* author: A trusty code generator
|
||||
* date: Wed, 19 Jun 2013 11:15:15
|
||||
*
|
||||
* This file was autogenerated, do not modify.
|
||||
* See LICENCE for full modification and redistribution details.
|
||||
* Copyright 2013 The OpenCV Foundation
|
||||
*/
|
||||
#include "mex.h"
|
||||
#include <vector>
|
||||
|
||||
/*
|
||||
* rand
|
||||
* Gateway routine
|
||||
* nlhs - number of return arguments
|
||||
* plhs - pointers to return arguments
|
||||
* nrhs - number of input arguments
|
||||
* prhs - pointers to input arguments
|
||||
*/
|
||||
void mexFunction(int nlhs, mxArray* plhs[],
|
||||
int nrhs, const mxArray* prhs[]) {
|
||||
|
||||
// call the opencv function
|
||||
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
|
||||
try {
|
||||
rand();
|
||||
} catch(...) {
|
||||
mexErrMsgTxt("Uncaught exception occurred in rand");
|
||||
}
|
||||
}
|
15
modules/matlab/test/test_generator.hpp
Normal file
15
modules/matlab/test/test_generator.hpp
Normal file
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* a rather innocuous-looking function which is actually
|
||||
* part of <cstdlib>, so we can be reasonably sure its
|
||||
* definition will be found
|
||||
*/
|
||||
#ifndef __OPENCV_MATLAB_TEST_GENERATOR_HPP_
|
||||
#define __OPENCV_MATLAB_TEST_GENERATOR_HPP_
|
||||
|
||||
namespace cv {
|
||||
|
||||
CV_EXPORTS_W int rand( );
|
||||
|
||||
};
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user