Sfoglia il codice sorgente

Refactoring: update directories and imports

omassot 6 anni fa
parent
commit
861566219a
100 ha cambiato i file con 15038 aggiunte e 15032 eliminazioni
  1. 0 37
      __init__.py
  2. 16 9
      build.py
  3. 36 0
      mncheck/__init__.py
  4. 0 0
      mncheck/core/__init__.py
  5. 0 0
      mncheck/core/cerberus_.py
  6. 0 0
      mncheck/core/checking.py
  7. 0 0
      mncheck/core/constants.py
  8. 1 1
      mncheck/core/logging.yaml
  9. 2 2
      mncheck/core/logging_.py
  10. 8 8
      mncheck/core/mncheck.py
  11. 29 29
      mncheck/ext/cerberus/__init__.py
  12. 626 626
      mncheck/ext/cerberus/errors.py
  13. 14 14
      mncheck/ext/cerberus/platform.py
  14. 482 482
      mncheck/ext/cerberus/schema.py
  15. 144 144
      mncheck/ext/cerberus/tests/__init__.py
  16. 134 134
      mncheck/ext/cerberus/tests/conftest.py
  17. 76 76
      mncheck/ext/cerberus/tests/test_assorted.py
  18. 77 77
      mncheck/ext/cerberus/tests/test_customization.py
  19. 260 260
      mncheck/ext/cerberus/tests/test_errors.py
  20. 3 3
      mncheck/ext/cerberus/tests/test_legacy.py
  21. 485 485
      mncheck/ext/cerberus/tests/test_normalization.py
  22. 82 82
      mncheck/ext/cerberus/tests/test_registries.py
  23. 111 111
      mncheck/ext/cerberus/tests/test_schema.py
  24. 1579 1579
      mncheck/ext/cerberus/tests/test_validation.py
  25. 119 119
      mncheck/ext/cerberus/utils.py
  26. 1407 1407
      mncheck/ext/cerberus/validator.py
  27. 17 17
      mncheck/ext/importlib_metadata/__init__.py
  28. 154 154
      mncheck/ext/importlib_metadata/_hooks.py
  29. 375 375
      mncheck/ext/importlib_metadata/api.py
  30. 0 0
      mncheck/ext/importlib_metadata/docs/__init__.py
  31. 85 85
      mncheck/ext/importlib_metadata/docs/changelog.rst
  32. 196 196
      mncheck/ext/importlib_metadata/docs/conf.py
  33. 53 53
      mncheck/ext/importlib_metadata/docs/index.rst
  34. 254 254
      mncheck/ext/importlib_metadata/docs/using.rst
  35. 0 0
      mncheck/ext/importlib_metadata/tests/__init__.py
  36. 0 0
      mncheck/ext/importlib_metadata/tests/data/__init__.py
  37. 35 35
      mncheck/ext/importlib_metadata/tests/fixtures.py
  38. 154 154
      mncheck/ext/importlib_metadata/tests/test_api.py
  39. 155 155
      mncheck/ext/importlib_metadata/tests/test_main.py
  40. 48 48
      mncheck/ext/importlib_metadata/tests/test_zip.py
  41. 1969 1969
      mncheck/ext/path.py
  42. 312 312
      mncheck/ext/yaml/__init__.py
  43. 139 139
      mncheck/ext/yaml/composer.py
  44. 686 686
      mncheck/ext/yaml/constructor.py
  45. 85 85
      mncheck/ext/yaml/cyaml.py
  46. 62 62
      mncheck/ext/yaml/dumper.py
  47. 1137 1137
      mncheck/ext/yaml/emitter.py
  48. 75 75
      mncheck/ext/yaml/error.py
  49. 86 86
      mncheck/ext/yaml/events.py
  50. 40 40
      mncheck/ext/yaml/loader.py
  51. 49 49
      mncheck/ext/yaml/nodes.py
  52. 589 589
      mncheck/ext/yaml/parser.py
  53. 192 192
      mncheck/ext/yaml/reader.py
  54. 387 387
      mncheck/ext/yaml/representer.py
  55. 227 227
      mncheck/ext/yaml/resolver.py
  56. 1444 1444
      mncheck/ext/yaml/scanner.py
  57. 111 111
      mncheck/ext/yaml/serializer.py
  58. 104 104
      mncheck/ext/yaml/tokens.py
  59. 110 110
      mncheck/ext/zipp.py
  60. 0 0
      mncheck/icon.png
  61. 3 3
      mncheck/main.py
  62. 1 1
      mncheck/metadata.txt
  63. 0 0
      mncheck/schemas/__init__.py
  64. 4 4
      mncheck/schemas/mn1_rec.py
  65. 4 4
      mncheck/schemas/mn2_rec.py
  66. 0 0
      mncheck/test/__init__.py
  67. 1 1
      mncheck/test/_base.py
  68. 0 0
      mncheck/test/projects/mn1_rec/0_empty/0_empty.qgz
  69. 0 0
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.cpg
  70. 0 0
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.dbf
  71. 0 0
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.prj
  72. 1 1
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.qpj
  73. 0 0
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.shp
  74. 0 0
      mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.shx
  75. 0 0
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.cpg
  76. 0 0
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.dbf
  77. 0 0
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.prj
  78. 1 1
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.qpj
  79. 0 0
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.shp
  80. 0 0
      mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.shx
  81. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.cpg
  82. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.dbf
  83. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.prj
  84. 1 1
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.qpj
  85. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shp
  86. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shx
  87. 0 0
      mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.xlsx
  88. 0 0
      mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.dbf
  89. 0 0
      mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.prj
  90. 0 0
      mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shp
  91. 0 0
      mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shx
  92. 0 0
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.cpg
  93. 0 0
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.dbf
  94. 0 0
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.prj
  95. 1 1
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.qpj
  96. 0 0
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.shp
  97. 0 0
      mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.shx
  98. 0 0
      mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.cpg
  99. 0 0
      mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.dbf
  100. 0 0
      mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.prj

+ 0 - 37
__init__.py

@@ -1,37 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-/***************************************************************************
- MnCheck
-                                 A QGIS plugin
- Contrôle des données FTTH format MN
- Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-                             -------------------
-        begin                : 2018-12-07
-        copyright            : (C) 2018 by Manche Numérique 2019
-        email                : olivier.massot@manchenumerique.fr
-        git sha              : $Format:%H$
- ***************************************************************************/
-
-/***************************************************************************
- *                                                                         *
- *   This program is free software; you can redistribute it and/or modify  *
- *   it under the terms of the GNU General Public License as published by  *
- *   the Free Software Foundation; either version 2 of the License, or     *
- *   (at your option) any later version.                                   *
- *                                                                         *
- ***************************************************************************/
- This script initializes the plugin, making it known to QGIS.
-"""
-import os
-import sys
-
-HERE = os.path.abspath(os.path.normpath(os.path.join(__file__, os.pardir)))
-# sys.path.insert(0, HERE)
-sys.path.insert(0, os.path.join(HERE, "ext"))
-
-# noinspection PyPep8Naming
-def classFactory(iface):  # pylint: disable=invalid-name
-    """Load MnCheck class from file MnCheck.
-    """
-    from main import MnCheck   # @UnresolvedImport
-    return MnCheck(iface)

+ 16 - 9
build.py

@@ -3,21 +3,26 @@
 @author: olivier.massot, 2019
 '''
 import configparser
-from core import constants
 from zipfile import ZipFile, ZIP_DEFLATED
 
 from path import Path
 
-build_dir = constants.MAIN / "build"
+from mncheck.core import constants
+
+
+HERE = Path(__file__).parent
+
+build_dir = HERE / "build"
 build_dir.mkdir_p()
+
 version = constants.VERSION
 
-basename = "MnCheck"
-name = "{}_v{}.zip".format(basename, version.replace(".", "-"))
+pluginname = "mncheck"
+name = "{}_v{}.zip".format(pluginname, version.replace(".", "-"))
 
 config = configparser.ConfigParser()
 
-config["general"] = {"name" : basename,
+config["general"] = {"name" : pluginname,
                     "qgisminimumversion" : "3.4",
                     "description" : "Contrôle des données FTTH format MN",
                     "version" : version,
@@ -39,10 +44,11 @@ with open(constants.MAIN / 'metadata.txt', 'w+', encoding='utf-8') as mdf:
 
 with ZipFile(build_dir / name, 'w', ZIP_DEFLATED, 9) as zip_:
     
-    def _zip_write(p):
+    def _zip_write(p, arcname=None):
         if p.ext == ".pyc":
             return
-        arcname = (basename + "/" + p.relpath(constants.MAIN)).replace("\\", "/")
+        if not arcname:
+            arcname = ("{}/{}".format(pluginname, p.relpath(constants.MAIN)).replace("\\", "/"))
         print(arcname)
         zip_.write(p, arcname)
 
@@ -66,11 +72,12 @@ with ZipFile(build_dir / name, 'w', ZIP_DEFLATED, 9) as zip_:
     
     _zip_write(constants.MAIN / "__init__.py")
     _zip_write(constants.MAIN / "main.py")
-    _zip_write(constants.MAIN / "README.md")
     _zip_write(constants.MAIN / "icon.png")
-    _zip_write(constants.MAIN / "LICENSE")
     _zip_write(constants.MAIN / "metadata.txt")
     
+    _zip_write(HERE / "README.md", f"{pluginname}/README.md")
+    _zip_write(HERE / "LICENSE", f"{pluginname}/LICENSE")
+    
 Path(build_dir / "metadata.txt").remove_p()
 
 print(f"-- {name} file built --")

+ 36 - 0
mncheck/__init__.py

@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+"""
+/***************************************************************************
+ MnCheck
+                                 A QGIS plugin
+ Contrôle des données FTTH format MN
+ Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
+                             -------------------
+        begin                : 2018-12-07
+        copyright            : (C) 2018 by Manche Numérique 2019
+        email                : olivier.massot@manchenumerique.fr
+        git sha              : $Format:%H$
+ ***************************************************************************/
+
+/***************************************************************************
+ *                                                                         *
+ *   This program is free software; you can redistribute it and/or modify  *
+ *   it under the terms of the GNU General Public License as published by  *
+ *   the Free Software Foundation; either version 2 of the License, or     *
+ *   (at your option) any later version.                                   *
+ *                                                                         *
+ ***************************************************************************/
+ This script initializes the plugin, making it known to QGIS.
+"""
+import os
+import sys
+
+HERE = os.path.abspath(os.path.normpath(os.path.join(__file__, os.pardir)))
+sys.path.insert(0, os.path.join(HERE, "ext"))
+
+# noinspection PyPep8Naming
+def classFactory(iface):  # pylint: disable=invalid-name
+    """Load MnCheck class from file MnCheck.
+    """
+    from mncheck.main import MnCheck   # @UnresolvedImport
+    return MnCheck(iface)

+ 0 - 0
core/__init__.py → mncheck/core/__init__.py


+ 0 - 0
core/cerberus_.py → mncheck/core/cerberus_.py


+ 0 - 0
core/checking.py → mncheck/core/checking.py


+ 0 - 0
core/constants.py → mncheck/core/constants.py


+ 1 - 1
core/logging.yaml → mncheck/core/logging.yaml

@@ -12,7 +12,7 @@ formatters:
         
 handlers:
     qgis:
-        class: core.logging_.QgsLogHandler
+        class: mncheck.core.logging_.QgsLogHandler
         level: DEBUG
         formatter: message_only
         

+ 2 - 2
core/logging_.py → mncheck/core/logging_.py

@@ -9,9 +9,9 @@ import sys
 import traceback
 import yaml
 
-from core.constants import LOGDIR, LOGCONF
+from mncheck.core.constants import LOGDIR, LOGCONF
 
-from qgis.core import QgsMessageLog
+from qgis.core import QgsMessageLog  #@UnresolvedImport
 
 _to_qgis_level = {logging.DEBUG: 0, 
                   logging.INFO: 0, 

+ 8 - 8
core/mncheck.py → mncheck/core/mncheck.py

@@ -6,25 +6,25 @@ import importlib
 import inspect
 import logging
 import pkgutil
-from qgis.core import QgsProject, QgsWkbTypes, QgsGeometry, QgsPoint
+from qgis.core import QgsProject, QgsWkbTypes, QgsGeometry, QgsPoint  #@UnresolvedImport
 
-from PyQt5.QtCore import QVariant, QDate, Qt
+from PyQt5.QtCore import QVariant, QDate
 import yaml
 
-from core.checking import BaseChecker
-from core.constants import USER_DATA
-from plugins import processing
+from mncheck.core.checking import BaseChecker
+from mncheck.core.constants import USER_DATA
+from plugins import processing  #@UnresolvedImport
 
 
 logger = logging.getLogger("mncheck")
 
 
 def list_schemas():
-    import schemas
-    return [name for _, name, ispkg in pkgutil.iter_modules(schemas.__path__) if not (ispkg or name[0] == '_')]
+    import mncheck.schemas
+    return [name for _, name, ispkg in pkgutil.iter_modules(mncheck.schemas.__path__) if not (ispkg or name[0] == '_')]
 
 def get_schema(schema_name):
-    return importlib.import_module("schemas." + schema_name)
+    return importlib.import_module("mncheck.schemas." + schema_name)
 
 def get_checkers(schema):
     return [cls for _, cls in inspect.getmembers(schema, predicate=inspect.isclass) \

+ 29 - 29
ext/cerberus/__init__.py → mncheck/ext/cerberus/__init__.py

@@ -1,29 +1,29 @@
-"""
-    Extensible validation for Python dictionaries.
-
-    :copyright: 2012-2016 by Nicola Iarocci.
-    :license: ISC, see LICENSE for more details.
-
-    Full documentation is available at http://python-cerberus.org/
-
-"""
-
-from __future__ import absolute_import
-
-from cerberus.validator import DocumentError, Validator
-from cerberus.schema import (rules_set_registry, schema_registry, Registry,
-                             SchemaError)
-from cerberus.utils import TypeDefinition
-
-
-__version__ = "1.2"
-
-__all__ = [
-    DocumentError.__name__,
-    Registry.__name__,
-    SchemaError.__name__,
-    TypeDefinition.__name__,
-    Validator.__name__,
-    'schema_registry',
-    'rules_set_registry'
-]
+"""
+    Extensible validation for Python dictionaries.
+
+    :copyright: 2012-2016 by Nicola Iarocci.
+    :license: ISC, see LICENSE for more details.
+
+    Full documentation is available at http://python-cerberus.org/
+
+"""
+
+from __future__ import absolute_import
+
+from cerberus.validator import DocumentError, Validator
+from cerberus.schema import (rules_set_registry, schema_registry, Registry,
+                             SchemaError)
+from cerberus.utils import TypeDefinition
+
+
+__version__ = "1.2"
+
+__all__ = [
+    DocumentError.__name__,
+    Registry.__name__,
+    SchemaError.__name__,
+    TypeDefinition.__name__,
+    Validator.__name__,
+    'schema_registry',
+    'rules_set_registry'
+]

+ 626 - 626
ext/cerberus/errors.py → mncheck/ext/cerberus/errors.py

@@ -1,626 +1,626 @@
-# -*-: coding utf-8 -*-
-""" This module contains the error-related constants and classes. """
-
-from __future__ import absolute_import
-
-from collections import defaultdict, namedtuple, MutableMapping
-from copy import copy, deepcopy
-from functools import wraps
-from pprint import pformat
-
-from cerberus.platform import PYTHON_VERSION
-from cerberus.utils import compare_paths_lt, quote_string
-
-
-ErrorDefinition = namedtuple('ErrorDefinition', 'code, rule')
-"""
-This class is used to define possible errors. Each distinguishable error is
-defined by a *unique* error ``code`` as integer and the ``rule`` that can
-cause it as string.
-The instances' names do not contain a common prefix as they are supposed to be
-referenced within the module namespace, e.g. ``errors.CUSTOM``.
-"""
-
-
-# custom
-CUSTOM = ErrorDefinition(0x00, None)
-
-# existence
-DOCUMENT_MISSING = ErrorDefinition(0x01, None)  # issues/141
-DOCUMENT_MISSING = "document is missing"
-REQUIRED_FIELD = ErrorDefinition(0x02, 'required')
-UNKNOWN_FIELD = ErrorDefinition(0x03, None)
-DEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies')
-DEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies')
-EXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes')
-
-# shape
-DOCUMENT_FORMAT = ErrorDefinition(0x21, None)  # issues/141
-DOCUMENT_FORMAT = "'{0}' is not a document, must be a dict"
-EMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty')
-NOT_NULLABLE = ErrorDefinition(0x23, 'nullable')
-BAD_TYPE = ErrorDefinition(0x24, 'type')
-BAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema')
-ITEMS_LENGTH = ErrorDefinition(0x26, 'items')
-MIN_LENGTH = ErrorDefinition(0x27, 'minlength')
-MAX_LENGTH = ErrorDefinition(0x28, 'maxlength')
-
-
-# color
-REGEX_MISMATCH = ErrorDefinition(0x41, 'regex')
-MIN_VALUE = ErrorDefinition(0x42, 'min')
-MAX_VALUE = ErrorDefinition(0x43, 'max')
-UNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed')
-UNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed')
-FORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden')
-FORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden')
-
-# other
-NORMALIZATION = ErrorDefinition(0x60, None)
-COERCION_FAILED = ErrorDefinition(0x61, 'coerce')
-RENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler')
-READONLY_FIELD = ErrorDefinition(0x63, 'readonly')
-SETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter')
-
-# groups
-ERROR_GROUP = ErrorDefinition(0x80, None)
-MAPPING_SCHEMA = ErrorDefinition(0x81, 'schema')
-SEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema')
-KEYSCHEMA = ErrorDefinition(0x83, 'keyschema')
-VALUESCHEMA = ErrorDefinition(0x84, 'valueschema')
-BAD_ITEMS = ErrorDefinition(0x8f, 'items')
-
-LOGICAL = ErrorDefinition(0x90, None)
-NONEOF = ErrorDefinition(0x91, 'noneof')
-ONEOF = ErrorDefinition(0x92, 'oneof')
-ANYOF = ErrorDefinition(0x93, 'anyof')
-ALLOF = ErrorDefinition(0x94, 'allof')
-
-
-""" SchemaError messages """
-
-SCHEMA_ERROR_DEFINITION_TYPE = \
-    "schema definition for field '{0}' must be a dict"
-SCHEMA_ERROR_MISSING = "validation schema missing"
-
-
-""" Error representations """
-
-
-class ValidationError(object):
-    """ A simple class to store and query basic error information. """
-    def __init__(self, document_path, schema_path, code, rule, constraint,
-                 value, info):
-        self.document_path = document_path
-        """ The path to the field within the document that caused the error.
-            Type: :class:`tuple` """
-        self.schema_path = schema_path
-        """ The path to the rule within the schema that caused the error.
-            Type: :class:`tuple` """
-        self.code = code
-        """ The error's identifier code. Type: :class:`int` """
-        self.rule = rule
-        """ The rule that failed. Type: `string` """
-        self.constraint = constraint
-        """ The constraint that failed. """
-        self.value = value
-        """ The value that failed. """
-        self.info = info
-        """ May hold additional information about the error.
-            Type: :class:`tuple` """
-
-    def __eq__(self, other):
-        """ Assumes the errors relate to the same document and schema. """
-        return hash(self) == hash(other)
-
-    def __hash__(self):
-        """ Expects that all other properties are transitively determined. """
-        return hash(self.document_path) ^ hash(self.schema_path) \
-            ^ hash(self.code)
-
-    def __lt__(self, other):
-        if self.document_path != other.document_path:
-            return compare_paths_lt(self.document_path, other.document_path)
-        else:
-            return compare_paths_lt(self.schema_path, other.schema_path)
-
-    def __repr__(self):
-        return "{class_name} @ {memptr} ( " \
-               "document_path={document_path}," \
-               "schema_path={schema_path}," \
-               "code={code}," \
-               "constraint={constraint}," \
-               "value={value}," \
-               "info={info} )"\
-               .format(class_name=self.__class__.__name__, memptr=hex(id(self)),  # noqa: E501
-                       document_path=self.document_path,
-                       schema_path=self.schema_path,
-                       code=hex(self.code),
-                       constraint=quote_string(self.constraint),
-                       value=quote_string(self.value),
-                       info=self.info)
-
-    @property
-    def child_errors(self):
-        """
-        A list that contains the individual errors of a bulk validation error.
-        """
-        return self.info[0] if self.is_group_error else None
-
-    @property
-    def definitions_errors(self):
-        """ Dictionary with errors of an *of-rule mapped to the index of the
-            definition it occurred in. Returns :obj:`None` if not applicable.
-            """
-        if not self.is_logic_error:
-            return None
-
-        result = defaultdict(list)
-        for error in self.child_errors:
-            i = error.schema_path[len(self.schema_path)]
-            result[i].append(error)
-        return result
-
-    @property
-    def field(self):
-        """ Field of the contextual mapping, possibly :obj:`None`. """
-        if self.document_path:
-            return self.document_path[-1]
-        else:
-            return None
-
-    @property
-    def is_group_error(self):
-        """ ``True`` for errors of bulk validations. """
-        return bool(self.code & ERROR_GROUP.code)
-
-    @property
-    def is_logic_error(self):
-        """ ``True`` for validation errors against different schemas with
-            *of-rules. """
-        return bool(self.code & LOGICAL.code - ERROR_GROUP.code)
-
-    @property
-    def is_normalization_error(self):
-        """ ``True`` for normalization errors. """
-        return bool(self.code & NORMALIZATION.code)
-
-
-class ErrorList(list):
-    """ A list for :class:`~cerberus.errors.ValidationError` instances that
-        can be queried with the ``in`` keyword for a particular
-        :class:`~cerberus.errors.ErrorDefinition`. """
-    def __contains__(self, error_definition):
-        for code in (x.code for x in self):
-            if code == error_definition.code:
-                return True
-        return False
-
-
-class ErrorTreeNode(MutableMapping):
-    __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root')
-
-    def __init__(self, path, parent_node):
-        self.parent_node = parent_node
-        self.tree_root = self.parent_node.tree_root
-        self.path = path[:self.parent_node.depth + 1]
-        self.errors = ErrorList()
-        self.descendants = {}
-
-    def __add__(self, error):
-        self.add(error)
-        return self
-
-    def __contains__(self, item):
-        if isinstance(item, ErrorDefinition):
-            return item in self.errors
-        else:
-            return item in self.descendants
-
-    def __delitem__(self, key):
-        del self.descendants[key]
-
-    def __iter__(self):
-        return iter(self.errors)
-
-    def __getitem__(self, item):
-        if isinstance(item, ErrorDefinition):
-            for error in self.errors:
-                if item.code == error.code:
-                    return error
-        else:
-            return self.descendants.get(item)
-
-    def __len__(self):
-        return len(self.errors)
-
-    def __repr__(self):
-        return self.__str__()
-
-    def __setitem__(self, key, value):
-        self.descendants[key] = value
-
-    def __str__(self):
-        return str(self.errors) + ',' + str(self.descendants)
-
-    @property
-    def depth(self):
-        return len(self.path)
-
-    @property
-    def tree_type(self):
-        return self.tree_root.tree_type
-
-    def add(self, error):
-        error_path = self._path_of_(error)
-
-        key = error_path[self.depth]
-        if key not in self.descendants:
-            self[key] = ErrorTreeNode(error_path, self)
-
-        if len(error_path) == self.depth + 1:
-            self[key].errors.append(error)
-            self[key].errors.sort()
-            if error.is_group_error:
-                for child_error in error.child_errors:
-                    self.tree_root += child_error
-        else:
-            self[key] += error
-
-    def _path_of_(self, error):
-        return getattr(error, self.tree_type + '_path')
-
-
-class ErrorTree(ErrorTreeNode):
-    """ Base class for :class:`~cerberus.errors.DocumentErrorTree` and
-        :class:`~cerberus.errors.SchemaErrorTree`. """
-    def __init__(self, errors=[]):
-        self.parent_node = None
-        self.tree_root = self
-        self.path = ()
-        self.errors = ErrorList()
-        self.descendants = {}
-        for error in errors:
-            self += error
-
-    def add(self, error):
-        """ Add an error to the tree.
-
-        :param error: :class:`~cerberus.errors.ValidationError`
-        """
-        if not self._path_of_(error):
-            self.errors.append(error)
-            self.errors.sort()
-        else:
-            super(ErrorTree, self).add(error)
-
-    def fetch_errors_from(self, path):
-        """ Returns all errors for a particular path.
-
-        :param path: :class:`tuple` of :term:`hashable` s.
-        :rtype: :class:`~cerberus.errors.ErrorList`
-        """
-        node = self.fetch_node_from(path)
-        if node is not None:
-            return node.errors
-        else:
-            return ErrorList()
-
-    def fetch_node_from(self, path):
-        """ Returns a node for a path.
-
-        :param path: Tuple of :term:`hashable` s.
-        :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`
-        """
-        context = self
-        for key in path:
-            context = context[key]
-            if context is None:
-                break
-        return context
-
-
-class DocumentErrorTree(ErrorTree):
-    """ Implements a dict-like class to query errors by indexes following the
-        structure of a validated document. """
-    tree_type = 'document'
-
-
-class SchemaErrorTree(ErrorTree):
-    """ Implements a dict-like class to query errors by indexes following the
-        structure of the used schema. """
-    tree_type = 'schema'
-
-
-class BaseErrorHandler(object):
-    """ Base class for all error handlers.
-        Subclasses are identified as error-handlers with an instance-test. """
-    def __init__(self, *args, **kwargs):
-        """ Optionally initialize a new instance. """
-        pass
-
-    def __call__(self, errors):
-        """ Returns errors in a handler-specific format.
-
-        :param errors: An object containing the errors.
-        :type errors: :term:`iterable` of
-                      :class:`~cerberus.errors.ValidationError` instances or a
-                      :class:`~cerberus.Validator` instance
-        """
-        raise NotImplementedError
-
-    def __iter__(self):
-        """ Be a superhero and implement an iterator over errors. """
-        raise NotImplementedError
-
-    def add(self, error):
-        """ Add an error to the errors' container object of a handler.
-
-        :param error: The error to add.
-        :type error: :class:`~cerberus.errors.ValidationError`
-        """
-        raise NotImplementedError
-
-    def emit(self, error):
-        """ Optionally emits an error in the handler's format to a stream.
-            Or light a LED, or even shut down a power plant.
-
-        :param error: The error to emit.
-        :type error: :class:`~cerberus.errors.ValidationError`
-        """
-        pass
-
-    def end(self, validator):
-        """ Gets called when a validation ends.
-
-        :param validator: The calling validator.
-        :type validator: :class:`~cerberus.Validator` """
-        pass
-
-    def extend(self, errors):
-        """ Adds all errors to the handler's container object.
-
-        :param errors: The errors to add.
-        :type errors: :term:`iterable` of
-                      :class:`~cerberus.errors.ValidationError` instances
-        """
-        for error in errors:
-            self.add(error)
-
-    def start(self, validator):
-        """ Gets called when a validation starts.
-
-        :param validator: The calling validator.
-        :type validator: :class:`~cerberus.Validator`
-        """
-        pass
-
-
-class ToyErrorHandler(BaseErrorHandler):
-    def __call__(self, *args, **kwargs):
-        raise RuntimeError('This is not supposed to happen.')
-
-    def clear(self):
-        pass
-
-
-def encode_unicode(f):
-    """Cerberus error messages expect regular binary strings.
-    If unicode is used in a ValidationError message can't be printed.
-
-    This decorator ensures that if legacy Python is used unicode
-    strings are encoded before passing to a function.
-    """
-    @wraps(f)
-    def wrapped(obj, error):
-
-        def _encode(value):
-            """Helper encoding unicode strings into binary utf-8"""
-            if isinstance(value, unicode):  # noqa: F821
-                return value.encode('utf-8')
-            return value
-
-        error = copy(error)
-        error.document_path = _encode(error.document_path)
-        error.schema_path = _encode(error.schema_path)
-        error.constraint = _encode(error.constraint)
-        error.value = _encode(error.value)
-        error.info = _encode(error.info)
-        return f(obj, error)
-
-    return wrapped if PYTHON_VERSION < 3 else f
-
-
-class BasicErrorHandler(BaseErrorHandler):
-    """ Models cerberus' legacy. Returns a :class:`dict`. When mangled
-        through :class:`str` a pretty-formatted representation of that
-        tree is returned.
-    """
-    messages = {0x00: "{0}",
-
-                0x01: "document is missing",
-                0x02: "required field",
-                0x03: "unknown field",
-                0x04: "field '{0}' is required",
-                0x05: "depends on these values: {constraint}",
-                0x06: "{0} must not be present with '{field}'",
-
-                0x21: "'{0}' is not a document, must be a dict",
-                0x22: "empty values not allowed",
-                0x23: "null value not allowed",
-                0x24: "must be of {constraint} type",
-                0x25: "must be of dict type",
-                0x26: "length of list should be {constraint}, it is {0}",
-                0x27: "min length is {constraint}",
-                0x28: "max length is {constraint}",
-
-                0x41: "value does not match regex '{constraint}'",
-                0x42: "min value is {constraint}",
-                0x43: "max value is {constraint}",
-                0x44: "unallowed value {value}",
-                0x45: "unallowed values {0}",
-                0x46: "unallowed value {value}",
-                0x47: "unallowed values {0}",
-
-                0x61: "field '{field}' cannot be coerced: {0}",
-                0x62: "field '{field}' cannot be renamed: {0}",
-                0x63: "field is read-only",
-                0x64: "default value for '{field}' cannot be set: {0}",
-
-                0x81: "mapping doesn't validate subschema: {0}",
-                0x82: "one or more sequence-items don't validate: {0}",
-                0x83: "one or more keys of a mapping  don't validate: {0}",
-                0x84: "one or more values in a mapping don't validate: {0}",
-                0x85: "one or more sequence-items don't validate: {0}",
-
-                0x91: "one or more definitions validate",
-                0x92: "none or more than one rule validate",
-                0x93: "no definitions validate",
-                0x94: "one or more definitions don't validate"
-                }
-
-    def __init__(self, tree=None):
-        self.tree = {} if tree is None else tree
-
-    def __call__(self, errors=None):
-        if errors is not None:
-            self.clear()
-            self.extend(errors)
-        return self.pretty_tree
-
-    def __str__(self):
-        return pformat(self.pretty_tree)
-
-    @property
-    def pretty_tree(self):
-        pretty = deepcopy(self.tree)
-        for field in pretty:
-            self._purge_empty_dicts(pretty[field])
-        return pretty
-
-    @encode_unicode
-    def add(self, error):
-        # Make sure the original error is not altered with
-        # error paths specific to the handler.
-        error = deepcopy(error)
-
-        self._rewrite_error_path(error)
-
-        if error.is_logic_error:
-            self._insert_logic_error(error)
-        elif error.is_group_error:
-            self._insert_group_error(error)
-        elif error.code in self.messages:
-            self._insert_error(error.document_path,
-                               self._format_message(error.field, error))
-
-    def clear(self):
-        self.tree = {}
-
-    def start(self, validator):
-        self.clear()
-
-    def _format_message(self, field, error):
-        return self.messages[error.code].format(
-            *error.info, constraint=error.constraint,
-            field=field, value=error.value)
-
-    def _insert_error(self, path, node):
-        """ Adds an error or sub-tree to :attr:tree.
-
-        :param path: Path to the error.
-        :type path: Tuple of strings and integers.
-        :param node: An error message or a sub-tree.
-        :type node: String or dictionary.
-        """
-        field = path[0]
-        if len(path) == 1:
-            if field in self.tree:
-                subtree = self.tree[field].pop()
-                self.tree[field] += [node, subtree]
-            else:
-                self.tree[field] = [node, {}]
-        elif len(path) >= 1:
-            if field not in self.tree:
-                self.tree[field] = [{}]
-            subtree = self.tree[field][-1]
-
-            if subtree:
-                new = self.__class__(tree=copy(subtree))
-            else:
-                new = self.__class__()
-            new._insert_error(path[1:], node)
-            subtree.update(new.tree)
-
-    def _insert_group_error(self, error):
-        for child_error in error.child_errors:
-            if child_error.is_logic_error:
-                self._insert_logic_error(child_error)
-            elif child_error.is_group_error:
-                self._insert_group_error(child_error)
-            else:
-                self._insert_error(child_error.document_path,
-                                   self._format_message(child_error.field,
-                                                        child_error))
-
-    def _insert_logic_error(self, error):
-        field = error.field
-        self._insert_error(error.document_path,
-                           self._format_message(field, error))
-
-        for definition_errors in error.definitions_errors.values():
-            for child_error in definition_errors:
-                if child_error.is_logic_error:
-                    self._insert_logic_error(child_error)
-                elif child_error.is_group_error:
-                    self._insert_group_error(child_error)
-                else:
-                    self._insert_error(child_error.document_path,
-                                       self._format_message(field, child_error))
-
-    def _purge_empty_dicts(self, error_list):
-        subtree = error_list[-1]
-        if not error_list[-1]:
-            error_list.pop()
-        else:
-            for key in subtree:
-                self._purge_empty_dicts(subtree[key])
-
-    def _rewrite_error_path(self, error, offset=0):
-        """
-        Recursively rewrites the error path to correctly represent logic errors
-        """
-        if error.is_logic_error:
-            self._rewrite_logic_error_path(error, offset)
-        elif error.is_group_error:
-            self._rewrite_group_error_path(error, offset)
-
-    def _rewrite_group_error_path(self, error, offset=0):
-        child_start = len(error.document_path) - offset
-
-        for child_error in error.child_errors:
-            relative_path = child_error.document_path[child_start:]
-            child_error.document_path = error.document_path + relative_path
-
-            self._rewrite_error_path(child_error, offset)
-
-    def _rewrite_logic_error_path(self, error, offset=0):
-        child_start = len(error.document_path) - offset
-
-        for i, definition_errors in error.definitions_errors.items():
-            if not definition_errors:
-                continue
-
-            nodename = '%s definition %s' % (error.rule, i)
-            path = error.document_path + (nodename,)
-
-            for child_error in definition_errors:
-                rel_path = child_error.document_path[child_start:]
-                child_error.document_path = path + rel_path
-
-                self._rewrite_error_path(child_error, offset + 1)
-
-
-class SchemaErrorHandler(BasicErrorHandler):
-    messages = BasicErrorHandler.messages.copy()
-    messages[0x03] = "unknown rule"
+# -*-: coding utf-8 -*-
+""" This module contains the error-related constants and classes. """
+
+from __future__ import absolute_import
+
+from collections import defaultdict, namedtuple, MutableMapping
+from copy import copy, deepcopy
+from functools import wraps
+from pprint import pformat
+
+from cerberus.platform import PYTHON_VERSION
+from cerberus.utils import compare_paths_lt, quote_string
+
+
+ErrorDefinition = namedtuple('ErrorDefinition', 'code, rule')
+"""
+This class is used to define possible errors. Each distinguishable error is
+defined by a *unique* error ``code`` as integer and the ``rule`` that can
+cause it as string.
+The instances' names do not contain a common prefix as they are supposed to be
+referenced within the module namespace, e.g. ``errors.CUSTOM``.
+"""
+
+
+# custom
+CUSTOM = ErrorDefinition(0x00, None)
+
+# existence
+DOCUMENT_MISSING = ErrorDefinition(0x01, None)  # issues/141
+DOCUMENT_MISSING = "document is missing"
+REQUIRED_FIELD = ErrorDefinition(0x02, 'required')
+UNKNOWN_FIELD = ErrorDefinition(0x03, None)
+DEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies')
+DEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies')
+EXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes')
+
+# shape
+DOCUMENT_FORMAT = ErrorDefinition(0x21, None)  # issues/141
+DOCUMENT_FORMAT = "'{0}' is not a document, must be a dict"
+EMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty')
+NOT_NULLABLE = ErrorDefinition(0x23, 'nullable')
+BAD_TYPE = ErrorDefinition(0x24, 'type')
+BAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema')
+ITEMS_LENGTH = ErrorDefinition(0x26, 'items')
+MIN_LENGTH = ErrorDefinition(0x27, 'minlength')
+MAX_LENGTH = ErrorDefinition(0x28, 'maxlength')
+
+
+# color
+REGEX_MISMATCH = ErrorDefinition(0x41, 'regex')
+MIN_VALUE = ErrorDefinition(0x42, 'min')
+MAX_VALUE = ErrorDefinition(0x43, 'max')
+UNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed')
+UNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed')
+FORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden')
+FORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden')
+
+# other
+NORMALIZATION = ErrorDefinition(0x60, None)
+COERCION_FAILED = ErrorDefinition(0x61, 'coerce')
+RENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler')
+READONLY_FIELD = ErrorDefinition(0x63, 'readonly')
+SETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter')
+
+# groups
+ERROR_GROUP = ErrorDefinition(0x80, None)
+MAPPING_SCHEMA = ErrorDefinition(0x81, 'schema')
+SEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema')
+KEYSCHEMA = ErrorDefinition(0x83, 'keyschema')
+VALUESCHEMA = ErrorDefinition(0x84, 'valueschema')
+BAD_ITEMS = ErrorDefinition(0x8f, 'items')
+
+LOGICAL = ErrorDefinition(0x90, None)
+NONEOF = ErrorDefinition(0x91, 'noneof')
+ONEOF = ErrorDefinition(0x92, 'oneof')
+ANYOF = ErrorDefinition(0x93, 'anyof')
+ALLOF = ErrorDefinition(0x94, 'allof')
+
+
+""" SchemaError messages """
+
+SCHEMA_ERROR_DEFINITION_TYPE = \
+    "schema definition for field '{0}' must be a dict"
+SCHEMA_ERROR_MISSING = "validation schema missing"
+
+
+""" Error representations """
+
+
+class ValidationError(object):
+    """ A simple class to store and query basic error information. """
+    def __init__(self, document_path, schema_path, code, rule, constraint,
+                 value, info):
+        self.document_path = document_path
+        """ The path to the field within the document that caused the error.
+            Type: :class:`tuple` """
+        self.schema_path = schema_path
+        """ The path to the rule within the schema that caused the error.
+            Type: :class:`tuple` """
+        self.code = code
+        """ The error's identifier code. Type: :class:`int` """
+        self.rule = rule
+        """ The rule that failed. Type: `string` """
+        self.constraint = constraint
+        """ The constraint that failed. """
+        self.value = value
+        """ The value that failed. """
+        self.info = info
+        """ May hold additional information about the error.
+            Type: :class:`tuple` """
+
+    def __eq__(self, other):
+        """ Assumes the errors relate to the same document and schema. """
+        return hash(self) == hash(other)
+
+    def __hash__(self):
+        """ Expects that all other properties are transitively determined. """
+        return hash(self.document_path) ^ hash(self.schema_path) \
+            ^ hash(self.code)
+
+    def __lt__(self, other):
+        if self.document_path != other.document_path:
+            return compare_paths_lt(self.document_path, other.document_path)
+        else:
+            return compare_paths_lt(self.schema_path, other.schema_path)
+
+    def __repr__(self):
+        return "{class_name} @ {memptr} ( " \
+               "document_path={document_path}," \
+               "schema_path={schema_path}," \
+               "code={code}," \
+               "constraint={constraint}," \
+               "value={value}," \
+               "info={info} )"\
+               .format(class_name=self.__class__.__name__, memptr=hex(id(self)),  # noqa: E501
+                       document_path=self.document_path,
+                       schema_path=self.schema_path,
+                       code=hex(self.code),
+                       constraint=quote_string(self.constraint),
+                       value=quote_string(self.value),
+                       info=self.info)
+
+    @property
+    def child_errors(self):
+        """
+        A list that contains the individual errors of a bulk validation error.
+        """
+        return self.info[0] if self.is_group_error else None
+
+    @property
+    def definitions_errors(self):
+        """ Dictionary with errors of an *of-rule mapped to the index of the
+            definition it occurred in. Returns :obj:`None` if not applicable.
+            """
+        if not self.is_logic_error:
+            return None
+
+        result = defaultdict(list)
+        for error in self.child_errors:
+            i = error.schema_path[len(self.schema_path)]
+            result[i].append(error)
+        return result
+
+    @property
+    def field(self):
+        """ Field of the contextual mapping, possibly :obj:`None`. """
+        if self.document_path:
+            return self.document_path[-1]
+        else:
+            return None
+
+    @property
+    def is_group_error(self):
+        """ ``True`` for errors of bulk validations. """
+        return bool(self.code & ERROR_GROUP.code)
+
+    @property
+    def is_logic_error(self):
+        """ ``True`` for validation errors against different schemas with
+            *of-rules. """
+        return bool(self.code & LOGICAL.code - ERROR_GROUP.code)
+
+    @property
+    def is_normalization_error(self):
+        """ ``True`` for normalization errors. """
+        return bool(self.code & NORMALIZATION.code)
+
+
+class ErrorList(list):
+    """ A list for :class:`~cerberus.errors.ValidationError` instances that
+        can be queried with the ``in`` keyword for a particular
+        :class:`~cerberus.errors.ErrorDefinition`. """
+    def __contains__(self, error_definition):
+        for code in (x.code for x in self):
+            if code == error_definition.code:
+                return True
+        return False
+
+
+class ErrorTreeNode(MutableMapping):
+    __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root')
+
+    def __init__(self, path, parent_node):
+        self.parent_node = parent_node
+        self.tree_root = self.parent_node.tree_root
+        self.path = path[:self.parent_node.depth + 1]
+        self.errors = ErrorList()
+        self.descendants = {}
+
+    def __add__(self, error):
+        self.add(error)
+        return self
+
+    def __contains__(self, item):
+        if isinstance(item, ErrorDefinition):
+            return item in self.errors
+        else:
+            return item in self.descendants
+
+    def __delitem__(self, key):
+        del self.descendants[key]
+
+    def __iter__(self):
+        return iter(self.errors)
+
+    def __getitem__(self, item):
+        if isinstance(item, ErrorDefinition):
+            for error in self.errors:
+                if item.code == error.code:
+                    return error
+        else:
+            return self.descendants.get(item)
+
+    def __len__(self):
+        return len(self.errors)
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __setitem__(self, key, value):
+        self.descendants[key] = value
+
+    def __str__(self):
+        return str(self.errors) + ',' + str(self.descendants)
+
+    @property
+    def depth(self):
+        return len(self.path)
+
+    @property
+    def tree_type(self):
+        return self.tree_root.tree_type
+
+    def add(self, error):
+        error_path = self._path_of_(error)
+
+        key = error_path[self.depth]
+        if key not in self.descendants:
+            self[key] = ErrorTreeNode(error_path, self)
+
+        if len(error_path) == self.depth + 1:
+            self[key].errors.append(error)
+            self[key].errors.sort()
+            if error.is_group_error:
+                for child_error in error.child_errors:
+                    self.tree_root += child_error
+        else:
+            self[key] += error
+
+    def _path_of_(self, error):
+        return getattr(error, self.tree_type + '_path')
+
+
+class ErrorTree(ErrorTreeNode):
+    """ Base class for :class:`~cerberus.errors.DocumentErrorTree` and
+        :class:`~cerberus.errors.SchemaErrorTree`. """
+    def __init__(self, errors=[]):
+        self.parent_node = None
+        self.tree_root = self
+        self.path = ()
+        self.errors = ErrorList()
+        self.descendants = {}
+        for error in errors:
+            self += error
+
+    def add(self, error):
+        """ Add an error to the tree.
+
+        :param error: :class:`~cerberus.errors.ValidationError`
+        """
+        if not self._path_of_(error):
+            self.errors.append(error)
+            self.errors.sort()
+        else:
+            super(ErrorTree, self).add(error)
+
+    def fetch_errors_from(self, path):
+        """ Returns all errors for a particular path.
+
+        :param path: :class:`tuple` of :term:`hashable` s.
+        :rtype: :class:`~cerberus.errors.ErrorList`
+        """
+        node = self.fetch_node_from(path)
+        if node is not None:
+            return node.errors
+        else:
+            return ErrorList()
+
+    def fetch_node_from(self, path):
+        """ Returns a node for a path.
+
+        :param path: Tuple of :term:`hashable` s.
+        :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`
+        """
+        context = self
+        for key in path:
+            context = context[key]
+            if context is None:
+                break
+        return context
+
+
+class DocumentErrorTree(ErrorTree):
+    """ Implements a dict-like class to query errors by indexes following the
+        structure of a validated document. """
+    tree_type = 'document'
+
+
+class SchemaErrorTree(ErrorTree):
+    """ Implements a dict-like class to query errors by indexes following the
+        structure of the used schema. """
+    tree_type = 'schema'
+
+
+class BaseErrorHandler(object):
+    """ Base class for all error handlers.
+        Subclasses are identified as error-handlers with an instance-test. """
+    def __init__(self, *args, **kwargs):
+        """ Optionally initialize a new instance. """
+        pass
+
+    def __call__(self, errors):
+        """ Returns errors in a handler-specific format.
+
+        :param errors: An object containing the errors.
+        :type errors: :term:`iterable` of
+                      :class:`~cerberus.errors.ValidationError` instances or a
+                      :class:`~cerberus.Validator` instance
+        """
+        raise NotImplementedError
+
+    def __iter__(self):
+        """ Be a superhero and implement an iterator over errors. """
+        raise NotImplementedError
+
+    def add(self, error):
+        """ Add an error to the errors' container object of a handler.
+
+        :param error: The error to add.
+        :type error: :class:`~cerberus.errors.ValidationError`
+        """
+        raise NotImplementedError
+
+    def emit(self, error):
+        """ Optionally emits an error in the handler's format to a stream.
+            Or light a LED, or even shut down a power plant.
+
+        :param error: The error to emit.
+        :type error: :class:`~cerberus.errors.ValidationError`
+        """
+        pass
+
+    def end(self, validator):
+        """ Gets called when a validation ends.
+
+        :param validator: The calling validator.
+        :type validator: :class:`~cerberus.Validator` """
+        pass
+
+    def extend(self, errors):
+        """ Adds all errors to the handler's container object.
+
+        :param errors: The errors to add.
+        :type errors: :term:`iterable` of
+                      :class:`~cerberus.errors.ValidationError` instances
+        """
+        for error in errors:
+            self.add(error)
+
+    def start(self, validator):
+        """ Gets called when a validation starts.
+
+        :param validator: The calling validator.
+        :type validator: :class:`~cerberus.Validator`
+        """
+        pass
+
+
+class ToyErrorHandler(BaseErrorHandler):
+    def __call__(self, *args, **kwargs):
+        raise RuntimeError('This is not supposed to happen.')
+
+    def clear(self):
+        pass
+
+
+def encode_unicode(f):
+    """Cerberus error messages expect regular binary strings.
+    If unicode is used in a ValidationError message can't be printed.
+
+    This decorator ensures that if legacy Python is used unicode
+    strings are encoded before passing to a function.
+    """
+    @wraps(f)
+    def wrapped(obj, error):
+
+        def _encode(value):
+            """Helper encoding unicode strings into binary utf-8"""
+            if isinstance(value, unicode):  # noqa: F821
+                return value.encode('utf-8')
+            return value
+
+        error = copy(error)
+        error.document_path = _encode(error.document_path)
+        error.schema_path = _encode(error.schema_path)
+        error.constraint = _encode(error.constraint)
+        error.value = _encode(error.value)
+        error.info = _encode(error.info)
+        return f(obj, error)
+
+    return wrapped if PYTHON_VERSION < 3 else f
+
+
+class BasicErrorHandler(BaseErrorHandler):
+    """ Models cerberus' legacy. Returns a :class:`dict`. When mangled
+        through :class:`str` a pretty-formatted representation of that
+        tree is returned.
+    """
+    messages = {0x00: "{0}",
+
+                0x01: "document is missing",
+                0x02: "required field",
+                0x03: "unknown field",
+                0x04: "field '{0}' is required",
+                0x05: "depends on these values: {constraint}",
+                0x06: "{0} must not be present with '{field}'",
+
+                0x21: "'{0}' is not a document, must be a dict",
+                0x22: "empty values not allowed",
+                0x23: "null value not allowed",
+                0x24: "must be of {constraint} type",
+                0x25: "must be of dict type",
+                0x26: "length of list should be {constraint}, it is {0}",
+                0x27: "min length is {constraint}",
+                0x28: "max length is {constraint}",
+
+                0x41: "value does not match regex '{constraint}'",
+                0x42: "min value is {constraint}",
+                0x43: "max value is {constraint}",
+                0x44: "unallowed value {value}",
+                0x45: "unallowed values {0}",
+                0x46: "unallowed value {value}",
+                0x47: "unallowed values {0}",
+
+                0x61: "field '{field}' cannot be coerced: {0}",
+                0x62: "field '{field}' cannot be renamed: {0}",
+                0x63: "field is read-only",
+                0x64: "default value for '{field}' cannot be set: {0}",
+
+                0x81: "mapping doesn't validate subschema: {0}",
+                0x82: "one or more sequence-items don't validate: {0}",
+                0x83: "one or more keys of a mapping  don't validate: {0}",
+                0x84: "one or more values in a mapping don't validate: {0}",
+                0x85: "one or more sequence-items don't validate: {0}",
+
+                0x91: "one or more definitions validate",
+                0x92: "none or more than one rule validate",
+                0x93: "no definitions validate",
+                0x94: "one or more definitions don't validate"
+                }
+
+    def __init__(self, tree=None):
+        self.tree = {} if tree is None else tree
+
+    def __call__(self, errors=None):
+        if errors is not None:
+            self.clear()
+            self.extend(errors)
+        return self.pretty_tree
+
+    def __str__(self):
+        return pformat(self.pretty_tree)
+
+    @property
+    def pretty_tree(self):
+        pretty = deepcopy(self.tree)
+        for field in pretty:
+            self._purge_empty_dicts(pretty[field])
+        return pretty
+
+    @encode_unicode
+    def add(self, error):
+        # Make sure the original error is not altered with
+        # error paths specific to the handler.
+        error = deepcopy(error)
+
+        self._rewrite_error_path(error)
+
+        if error.is_logic_error:
+            self._insert_logic_error(error)
+        elif error.is_group_error:
+            self._insert_group_error(error)
+        elif error.code in self.messages:
+            self._insert_error(error.document_path,
+                               self._format_message(error.field, error))
+
+    def clear(self):
+        self.tree = {}
+
+    def start(self, validator):
+        self.clear()
+
+    def _format_message(self, field, error):
+        return self.messages[error.code].format(
+            *error.info, constraint=error.constraint,
+            field=field, value=error.value)
+
+    def _insert_error(self, path, node):
+        """ Adds an error or sub-tree to :attr:tree.
+
+        :param path: Path to the error.
+        :type path: Tuple of strings and integers.
+        :param node: An error message or a sub-tree.
+        :type node: String or dictionary.
+        """
+        field = path[0]
+        if len(path) == 1:
+            if field in self.tree:
+                subtree = self.tree[field].pop()
+                self.tree[field] += [node, subtree]
+            else:
+                self.tree[field] = [node, {}]
+        elif len(path) >= 1:
+            if field not in self.tree:
+                self.tree[field] = [{}]
+            subtree = self.tree[field][-1]
+
+            if subtree:
+                new = self.__class__(tree=copy(subtree))
+            else:
+                new = self.__class__()
+            new._insert_error(path[1:], node)
+            subtree.update(new.tree)
+
+    def _insert_group_error(self, error):
+        for child_error in error.child_errors:
+            if child_error.is_logic_error:
+                self._insert_logic_error(child_error)
+            elif child_error.is_group_error:
+                self._insert_group_error(child_error)
+            else:
+                self._insert_error(child_error.document_path,
+                                   self._format_message(child_error.field,
+                                                        child_error))
+
+    def _insert_logic_error(self, error):
+        field = error.field
+        self._insert_error(error.document_path,
+                           self._format_message(field, error))
+
+        for definition_errors in error.definitions_errors.values():
+            for child_error in definition_errors:
+                if child_error.is_logic_error:
+                    self._insert_logic_error(child_error)
+                elif child_error.is_group_error:
+                    self._insert_group_error(child_error)
+                else:
+                    self._insert_error(child_error.document_path,
+                                       self._format_message(field, child_error))
+
+    def _purge_empty_dicts(self, error_list):
+        subtree = error_list[-1]
+        if not error_list[-1]:
+            error_list.pop()
+        else:
+            for key in subtree:
+                self._purge_empty_dicts(subtree[key])
+
+    def _rewrite_error_path(self, error, offset=0):
+        """
+        Recursively rewrites the error path to correctly represent logic errors
+        """
+        if error.is_logic_error:
+            self._rewrite_logic_error_path(error, offset)
+        elif error.is_group_error:
+            self._rewrite_group_error_path(error, offset)
+
+    def _rewrite_group_error_path(self, error, offset=0):
+        child_start = len(error.document_path) - offset
+
+        for child_error in error.child_errors:
+            relative_path = child_error.document_path[child_start:]
+            child_error.document_path = error.document_path + relative_path
+
+            self._rewrite_error_path(child_error, offset)
+
+    def _rewrite_logic_error_path(self, error, offset=0):
+        child_start = len(error.document_path) - offset
+
+        for i, definition_errors in error.definitions_errors.items():
+            if not definition_errors:
+                continue
+
+            nodename = '%s definition %s' % (error.rule, i)
+            path = error.document_path + (nodename,)
+
+            for child_error in definition_errors:
+                rel_path = child_error.document_path[child_start:]
+                child_error.document_path = path + rel_path
+
+                self._rewrite_error_path(child_error, offset + 1)
+
+
+class SchemaErrorHandler(BasicErrorHandler):
+    messages = BasicErrorHandler.messages.copy()
+    messages[0x03] = "unknown rule"

+ 14 - 14
ext/cerberus/platform.py → mncheck/ext/cerberus/platform.py

@@ -1,14 +1,14 @@
-""" Platform-dependent objects """
-
-import sys
-
-
-PYTHON_VERSION = float(sys.version_info[0]) + float(sys.version_info[1]) / 10
-
-
-if PYTHON_VERSION < 3:
-    _str_type = basestring  # noqa: F821
-    _int_types = (int, long)  # noqa: F821
-else:
-    _str_type = str
-    _int_types = (int,)
+""" Platform-dependent objects """
+
+import sys
+
+
+PYTHON_VERSION = float(sys.version_info[0]) + float(sys.version_info[1]) / 10
+
+
+if PYTHON_VERSION < 3:
+    _str_type = basestring  # noqa: F821
+    _int_types = (int, long)  # noqa: F821
+else:
+    _str_type = str
+    _int_types = (int,)

+ 482 - 482
ext/cerberus/schema.py → mncheck/ext/cerberus/schema.py

@@ -1,482 +1,482 @@
-from __future__ import absolute_import
-
-from collections import (Callable, Hashable, Iterable, Mapping,
-                         MutableMapping, Sequence)
-from copy import copy
-
-from cerberus import errors
-from cerberus.platform import _str_type
-from cerberus.utils import (get_Validator_class, validator_factory,
-                            mapping_hash, TypeDefinition)
-
-
-class _Abort(Exception):
-    pass
-
-
-class SchemaError(Exception):
-    """ Raised when the validation schema is missing, has the wrong format or
-        contains errors. """
-    pass
-
-
-class DefinitionSchema(MutableMapping):
-    """ A dict-subclass for caching of validated schemas. """
-
-    def __new__(cls, *args, **kwargs):
-        if 'SchemaValidator' not in globals():
-            global SchemaValidator
-            SchemaValidator = validator_factory('SchemaValidator',
-                                                SchemaValidatorMixin)
-            types_mapping = SchemaValidator.types_mapping.copy()
-            types_mapping.update({
-                'callable': TypeDefinition('callable', (Callable,), ()),
-                'hashable': TypeDefinition('hashable', (Hashable,), ())
-            })
-            SchemaValidator.types_mapping = types_mapping
-
-        return super(DefinitionSchema, cls).__new__(cls)
-
-    def __init__(self, validator, schema={}):
-        """
-        :param validator: An instance of Validator-(sub-)class that uses this
-                          schema.
-        :param schema: A definition-schema as ``dict``. Defaults to an empty
-                       one.
-        """
-        if not isinstance(validator, get_Validator_class()):
-            raise RuntimeError('validator argument must be a Validator-'
-                               'instance.')
-        self.validator = validator
-
-        if isinstance(schema, _str_type):
-            schema = validator.schema_registry.get(schema, schema)
-
-        if not isinstance(schema, Mapping):
-            try:
-                schema = dict(schema)
-            except Exception:
-                raise SchemaError(
-                    errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema))
-
-        self.validation_schema = SchemaValidationSchema(validator)
-        self.schema_validator = SchemaValidator(
-            None, allow_unknown=self.validation_schema,
-            error_handler=errors.SchemaErrorHandler,
-            target_schema=schema, target_validator=validator)
-
-        schema = self.expand(schema)
-        self.validate(schema)
-        self.schema = schema
-
-    def __delitem__(self, key):
-        _new_schema = self.schema.copy()
-        try:
-            del _new_schema[key]
-        except ValueError:
-            raise SchemaError("Schema has no field '%s' defined" % key)
-        except Exception as e:
-            raise e
-        else:
-            del self.schema[key]
-
-    def __getitem__(self, item):
-        return self.schema[item]
-
-    def __iter__(self):
-        return iter(self.schema)
-
-    def __len__(self):
-        return len(self.schema)
-
-    def __repr__(self):
-        return str(self)
-
-    def __setitem__(self, key, value):
-        value = self.expand({0: value})[0]
-        self.validate({key: value})
-        self.schema[key] = value
-
-    def __str__(self):
-        return str(self.schema)
-
-    def copy(self):
-        return self.__class__(self.validator, self.schema.copy())
-
-    @classmethod
-    def expand(cls, schema):
-        try:
-            schema = cls._expand_logical_shortcuts(schema)
-            schema = cls._expand_subschemas(schema)
-        except Exception:
-            pass
-        return schema
-
-    @classmethod
-    def _expand_logical_shortcuts(cls, schema):
-        """ Expand agglutinated rules in a definition-schema.
-
-        :param schema: The schema-definition to expand.
-        :return: The expanded schema-definition.
-        """
-        def is_of_rule(x):
-            return isinstance(x, _str_type) and \
-                x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))
-
-        for field in schema:
-            for of_rule in (x for x in schema[field] if is_of_rule(x)):
-                operator, rule = of_rule.split('_')
-                schema[field].update({operator: []})
-                for value in schema[field][of_rule]:
-                    schema[field][operator].append({rule: value})
-                del schema[field][of_rule]
-        return schema
-
-    @classmethod
-    def _expand_subschemas(cls, schema):
-        def has_schema_rule():
-            return isinstance(schema[field], Mapping) and \
-                'schema' in schema[field]
-
-        def has_mapping_schema():
-            """ Tries to determine heuristically if the schema-constraints are
-                aimed to mappings. """
-            try:
-                return all(isinstance(x, Mapping) for x
-                           in schema[field]['schema'].values())
-            except TypeError:
-                return False
-
-        for field in schema:
-            if not has_schema_rule():
-                pass
-            elif has_mapping_schema():
-                schema[field]['schema'] = cls.expand(schema[field]['schema'])
-            else:  # assumes schema-constraints for a sequence
-                schema[field]['schema'] = \
-                    cls.expand({0: schema[field]['schema']})[0]
-
-            for rule in ('keyschema', 'valueschema'):
-                if rule in schema[field]:
-                    schema[field][rule] = \
-                        cls.expand({0: schema[field][rule]})[0]
-
-            for rule in ('allof', 'anyof', 'items', 'noneof', 'oneof'):
-                if rule in schema[field]:
-                    if not isinstance(schema[field][rule], Sequence):
-                        continue
-                    new_rules_definition = []
-                    for item in schema[field][rule]:
-                        new_rules_definition.append(cls.expand({0: item})[0])
-                    schema[field][rule] = new_rules_definition
-        return schema
-
-    def update(self, schema):
-        try:
-            schema = self.expand(schema)
-            _new_schema = self.schema.copy()
-            _new_schema.update(schema)
-            self.validate(_new_schema)
-        except ValueError:
-            raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE
-                              .format(schema))
-        except Exception as e:
-            raise e
-        else:
-            self.schema = _new_schema
-
-    def regenerate_validation_schema(self):
-        self.validation_schema = SchemaValidationSchema(self.validator)
-
-    def validate(self, schema=None):
-        if schema is None:
-            schema = self.schema
-        _hash = (mapping_hash(schema),
-                 mapping_hash(self.validator.types_mapping))
-        if _hash not in self.validator._valid_schemas:
-            self._validate(schema)
-            self.validator._valid_schemas.add(_hash)
-
-    def _validate(self, schema):
-        """ Validates a schema that defines rules against supported rules.
-
-        :param schema: The schema to be validated as a legal cerberus schema
-                       according to the rules of this Validator object.
-        """
-        if isinstance(schema, _str_type):
-            schema = self.validator.schema_registry.get(schema, schema)
-
-        if schema is None:
-            raise SchemaError(errors.SCHEMA_ERROR_MISSING)
-
-        schema = copy(schema)
-        for field in schema:
-            if isinstance(schema[field], _str_type):
-                schema[field] = rules_set_registry.get(schema[field],
-                                                       schema[field])
-
-        if not self.schema_validator(schema, normalize=False):
-            raise SchemaError(self.schema_validator.errors)
-
-
-class UnvalidatedSchema(DefinitionSchema):
-    def __init__(self, schema={}):
-        if not isinstance(schema, Mapping):
-            schema = dict(schema)
-        self.schema = schema
-
-    def validate(self, schema):
-        pass
-
-    def copy(self):
-        # Override ancestor's copy, because
-        # UnvalidatedSchema does not have .validator:
-        return self.__class__(self.schema.copy())
-
-
-class SchemaValidationSchema(UnvalidatedSchema):
-    def __init__(self, validator):
-        self.schema = {'allow_unknown': False,
-                       'schema': validator.rules,
-                       'type': 'dict'}
-
-
-class SchemaValidatorMixin(object):
-    """ This validator is extended to validate schemas passed to a Cerberus
-        validator. """
-    @property
-    def known_rules_set_refs(self):
-        """ The encountered references to rules set registry items. """
-        return self._config.get('known_rules_set_refs', ())
-
-    @known_rules_set_refs.setter
-    def known_rules_set_refs(self, value):
-        self._config['known_rules_set_refs'] = value
-
-    @property
-    def known_schema_refs(self):
-        """ The encountered references to schema registry items. """
-        return self._config.get('known_schema_refs', ())
-
-    @known_schema_refs.setter
-    def known_schema_refs(self, value):
-        self._config['known_schema_refs'] = value
-
-    @property
-    def target_schema(self):
-        """ The schema that is being validated. """
-        return self._config['target_schema']
-
-    @property
-    def target_validator(self):
-        """ The validator whose schema is being validated. """
-        return self._config['target_validator']
-
-    def _validate_logical(self, rule, field, value):
-        """ {'allowed': ('allof', 'anyof', 'noneof', 'oneof')} """
-        if not isinstance(value, Sequence):
-            self._error(field, errors.BAD_TYPE)
-            return
-
-        validator = self._get_child_validator(
-            document_crumb=rule, allow_unknown=False,
-            schema=self.target_validator.validation_rules)
-
-        for constraints in value:
-            _hash = (mapping_hash({'turing': constraints}),
-                     mapping_hash(self.target_validator.types_mapping))
-            if _hash in self.target_validator._valid_schemas:
-                continue
-
-            validator(constraints, normalize=False)
-            if validator._errors:
-                self._error(validator._errors)
-            else:
-                self.target_validator._valid_schemas.add(_hash)
-
-    def _validator_bulk_schema(self, field, value):
-        # resolve schema registry reference
-        if isinstance(value, _str_type):
-            if value in self.known_rules_set_refs:
-                return
-            else:
-                self.known_rules_set_refs += (value,)
-            definition = self.target_validator.rules_set_registry.get(value)
-            if definition is None:
-                self._error(field, 'Rules set definition %s not found.' % value)
-                return
-            else:
-                value = definition
-
-        _hash = (mapping_hash({'turing': value}),
-                 mapping_hash(self.target_validator.types_mapping))
-        if _hash in self.target_validator._valid_schemas:
-            return
-
-        validator = self._get_child_validator(
-            document_crumb=field, allow_unknown=False,
-            schema=self.target_validator.rules)
-        validator(value, normalize=False)
-        if validator._errors:
-            self._error(validator._errors)
-        else:
-            self.target_validator._valid_schemas.add(_hash)
-
-    def _validator_dependencies(self, field, value):
-        if isinstance(value, _str_type):
-            pass
-        elif isinstance(value, Mapping):
-            validator = self._get_child_validator(
-                document_crumb=field,
-                schema={'valueschema': {'type': 'list'}},
-                allow_unknown=True
-            )
-            if not validator(value, normalize=False):
-                self._error(validator._errors)
-        elif isinstance(value, Sequence):
-            if not all(isinstance(x, Hashable) for x in value):
-                path = self.document_path + (field,)
-                self._error(path, 'All dependencies must be a hashable type.')
-
-    def _validator_handler(self, field, value):
-        if isinstance(value, Callable):
-            return
-        if isinstance(value, _str_type):
-            if value not in self.target_validator.validators + \
-                    self.target_validator.coercers:
-                self._error(field, '%s is no valid coercer' % value)
-        elif isinstance(value, Iterable):
-            for handler in value:
-                self._validator_handler(field, handler)
-
-    def _validator_items(self, field, value):
-        for i, schema in enumerate(value):
-            self._validator_bulk_schema((field, i), schema)
-
-    def _validator_schema(self, field, value):
-        try:
-            value = self._handle_schema_reference_for_validator(field, value)
-        except _Abort:
-            return
-
-        _hash = (mapping_hash(value),
-                 mapping_hash(self.target_validator.types_mapping))
-        if _hash in self.target_validator._valid_schemas:
-            return
-
-        validator = self._get_child_validator(
-            document_crumb=field,
-            schema=None, allow_unknown=self.root_allow_unknown)
-        validator(self._expand_rules_set_refs(value), normalize=False)
-        if validator._errors:
-            self._error(validator._errors)
-        else:
-            self.target_validator._valid_schemas.add(_hash)
-
-    def _handle_schema_reference_for_validator(self, field, value):
-        if not isinstance(value, _str_type):
-            return value
-        if value in self.known_schema_refs:
-            raise _Abort
-
-        self.known_schema_refs += (value,)
-        definition = self.target_validator.schema_registry.get(value)
-        if definition is None:
-            path = self.document_path + (field,)
-            self._error(path, 'Schema definition {} not found.'.format(value))
-            raise _Abort
-        return definition
-
-    def _expand_rules_set_refs(self, schema):
-        result = {}
-        for k, v in schema.items():
-            if isinstance(v, _str_type):
-                result[k] = self.target_validator.rules_set_registry.get(v)
-            else:
-                result[k] = v
-        return result
-
-    def _validator_type(self, field, value):
-        value = (value,) if isinstance(value, _str_type) else value
-        invalid_constraints = ()
-        for constraint in value:
-            if constraint not in self.target_validator.types:
-                invalid_constraints += (constraint,)
-        if invalid_constraints:
-            path = self.document_path + (field,)
-            self._error(path, 'Unsupported types: %s' % invalid_constraints)
-
-####
-
-
-class Registry(object):
-    """ A registry to store and retrieve schemas and parts of it by a name
-    that can be used in validation schemas.
-
-    :param definitions: Optional, initial definitions.
-    :type definitions: any :term:`mapping` """
-
-    def __init__(self, definitions={}):
-        self._storage = {}
-        self.extend(definitions)
-
-    def add(self, name, definition):
-        """ Register a definition to the registry. Existing definitions are
-        replaced silently.
-
-        :param name: The name which can be used as reference in a validation
-                     schema.
-        :type name: :class:`str`
-        :param definition: The definition.
-        :type definition: any :term:`mapping` """
-        self._storage[name] = self._expand_definition(definition)
-
-    def all(self):
-        """ Returns a :class:`dict` with all registered definitions mapped to
-        their name. """
-        return self._storage
-
-    def clear(self):
-        """ Purge all definitions in the registry. """
-        self._storage.clear()
-
-    def extend(self, definitions):
-        """ Add several definitions at once. Existing definitions are
-        replaced silently.
-
-        :param definitions: The names and definitions.
-        :type definitions: a :term:`mapping` or an :term:`iterable` with
-                           two-value :class:`tuple` s """
-        for name, definition in dict(definitions).items():
-            self.add(name, definition)
-
-    def get(self, name, default=None):
-        """ Retrieve a definition from the registry.
-
-        :param name: The reference that points to the definition.
-        :type name: :class:`str`
-        :param default: Return value if the reference isn't registered. """
-        return self._storage.get(name, default)
-
-    def remove(self, *names):
-        """ Unregister definitions from the registry.
-
-        :param names: The names of the definitions that are to be
-                      unregistered. """
-        for name in names:
-            self._storage.pop(name, None)
-
-
-class SchemaRegistry(Registry):
-    @classmethod
-    def _expand_definition(cls, definition):
-        return DefinitionSchema.expand(definition)
-
-
-class RulesSetRegistry(Registry):
-    @classmethod
-    def _expand_definition(cls, definition):
-        return DefinitionSchema.expand({0: definition})[0]
-
-
-schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry()
+from __future__ import absolute_import
+
+from collections import (Callable, Hashable, Iterable, Mapping,
+                         MutableMapping, Sequence)
+from copy import copy
+
+from cerberus import errors
+from cerberus.platform import _str_type
+from cerberus.utils import (get_Validator_class, validator_factory,
+                            mapping_hash, TypeDefinition)
+
+
+class _Abort(Exception):
+    pass
+
+
+class SchemaError(Exception):
+    """ Raised when the validation schema is missing, has the wrong format or
+        contains errors. """
+    pass
+
+
+class DefinitionSchema(MutableMapping):
+    """ A dict-subclass for caching of validated schemas. """
+
+    def __new__(cls, *args, **kwargs):
+        if 'SchemaValidator' not in globals():
+            global SchemaValidator
+            SchemaValidator = validator_factory('SchemaValidator',
+                                                SchemaValidatorMixin)
+            types_mapping = SchemaValidator.types_mapping.copy()
+            types_mapping.update({
+                'callable': TypeDefinition('callable', (Callable,), ()),
+                'hashable': TypeDefinition('hashable', (Hashable,), ())
+            })
+            SchemaValidator.types_mapping = types_mapping
+
+        return super(DefinitionSchema, cls).__new__(cls)
+
+    def __init__(self, validator, schema={}):
+        """
+        :param validator: An instance of Validator-(sub-)class that uses this
+                          schema.
+        :param schema: A definition-schema as ``dict``. Defaults to an empty
+                       one.
+        """
+        if not isinstance(validator, get_Validator_class()):
+            raise RuntimeError('validator argument must be a Validator-'
+                               'instance.')
+        self.validator = validator
+
+        if isinstance(schema, _str_type):
+            schema = validator.schema_registry.get(schema, schema)
+
+        if not isinstance(schema, Mapping):
+            try:
+                schema = dict(schema)
+            except Exception:
+                raise SchemaError(
+                    errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema))
+
+        self.validation_schema = SchemaValidationSchema(validator)
+        self.schema_validator = SchemaValidator(
+            None, allow_unknown=self.validation_schema,
+            error_handler=errors.SchemaErrorHandler,
+            target_schema=schema, target_validator=validator)
+
+        schema = self.expand(schema)
+        self.validate(schema)
+        self.schema = schema
+
+    def __delitem__(self, key):
+        _new_schema = self.schema.copy()
+        try:
+            del _new_schema[key]
+        except ValueError:
+            raise SchemaError("Schema has no field '%s' defined" % key)
+        except Exception as e:
+            raise e
+        else:
+            del self.schema[key]
+
+    def __getitem__(self, item):
+        return self.schema[item]
+
+    def __iter__(self):
+        return iter(self.schema)
+
+    def __len__(self):
+        return len(self.schema)
+
+    def __repr__(self):
+        return str(self)
+
+    def __setitem__(self, key, value):
+        value = self.expand({0: value})[0]
+        self.validate({key: value})
+        self.schema[key] = value
+
+    def __str__(self):
+        return str(self.schema)
+
+    def copy(self):
+        return self.__class__(self.validator, self.schema.copy())
+
+    @classmethod
+    def expand(cls, schema):
+        try:
+            schema = cls._expand_logical_shortcuts(schema)
+            schema = cls._expand_subschemas(schema)
+        except Exception:
+            pass
+        return schema
+
+    @classmethod
+    def _expand_logical_shortcuts(cls, schema):
+        """ Expand agglutinated rules in a definition-schema.
+
+        :param schema: The schema-definition to expand.
+        :return: The expanded schema-definition.
+        """
+        def is_of_rule(x):
+            return isinstance(x, _str_type) and \
+                x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))
+
+        for field in schema:
+            for of_rule in (x for x in schema[field] if is_of_rule(x)):
+                operator, rule = of_rule.split('_')
+                schema[field].update({operator: []})
+                for value in schema[field][of_rule]:
+                    schema[field][operator].append({rule: value})
+                del schema[field][of_rule]
+        return schema
+
+    @classmethod
+    def _expand_subschemas(cls, schema):
+        def has_schema_rule():
+            return isinstance(schema[field], Mapping) and \
+                'schema' in schema[field]
+
+        def has_mapping_schema():
+            """ Tries to determine heuristically if the schema-constraints are
+                aimed to mappings. """
+            try:
+                return all(isinstance(x, Mapping) for x
+                           in schema[field]['schema'].values())
+            except TypeError:
+                return False
+
+        for field in schema:
+            if not has_schema_rule():
+                pass
+            elif has_mapping_schema():
+                schema[field]['schema'] = cls.expand(schema[field]['schema'])
+            else:  # assumes schema-constraints for a sequence
+                schema[field]['schema'] = \
+                    cls.expand({0: schema[field]['schema']})[0]
+
+            for rule in ('keyschema', 'valueschema'):
+                if rule in schema[field]:
+                    schema[field][rule] = \
+                        cls.expand({0: schema[field][rule]})[0]
+
+            for rule in ('allof', 'anyof', 'items', 'noneof', 'oneof'):
+                if rule in schema[field]:
+                    if not isinstance(schema[field][rule], Sequence):
+                        continue
+                    new_rules_definition = []
+                    for item in schema[field][rule]:
+                        new_rules_definition.append(cls.expand({0: item})[0])
+                    schema[field][rule] = new_rules_definition
+        return schema
+
+    def update(self, schema):
+        try:
+            schema = self.expand(schema)
+            _new_schema = self.schema.copy()
+            _new_schema.update(schema)
+            self.validate(_new_schema)
+        except ValueError:
+            raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE
+                              .format(schema))
+        except Exception as e:
+            raise e
+        else:
+            self.schema = _new_schema
+
+    def regenerate_validation_schema(self):
+        self.validation_schema = SchemaValidationSchema(self.validator)
+
+    def validate(self, schema=None):
+        if schema is None:
+            schema = self.schema
+        _hash = (mapping_hash(schema),
+                 mapping_hash(self.validator.types_mapping))
+        if _hash not in self.validator._valid_schemas:
+            self._validate(schema)
+            self.validator._valid_schemas.add(_hash)
+
+    def _validate(self, schema):
+        """ Validates a schema that defines rules against supported rules.
+
+        :param schema: The schema to be validated as a legal cerberus schema
+                       according to the rules of this Validator object.
+        """
+        if isinstance(schema, _str_type):
+            schema = self.validator.schema_registry.get(schema, schema)
+
+        if schema is None:
+            raise SchemaError(errors.SCHEMA_ERROR_MISSING)
+
+        schema = copy(schema)
+        for field in schema:
+            if isinstance(schema[field], _str_type):
+                schema[field] = rules_set_registry.get(schema[field],
+                                                       schema[field])
+
+        if not self.schema_validator(schema, normalize=False):
+            raise SchemaError(self.schema_validator.errors)
+
+
+class UnvalidatedSchema(DefinitionSchema):
+    def __init__(self, schema={}):
+        if not isinstance(schema, Mapping):
+            schema = dict(schema)
+        self.schema = schema
+
+    def validate(self, schema):
+        pass
+
+    def copy(self):
+        # Override ancestor's copy, because
+        # UnvalidatedSchema does not have .validator:
+        return self.__class__(self.schema.copy())
+
+
+class SchemaValidationSchema(UnvalidatedSchema):
+    def __init__(self, validator):
+        self.schema = {'allow_unknown': False,
+                       'schema': validator.rules,
+                       'type': 'dict'}
+
+
+class SchemaValidatorMixin(object):
+    """ This validator is extended to validate schemas passed to a Cerberus
+        validator. """
+    @property
+    def known_rules_set_refs(self):
+        """ The encountered references to rules set registry items. """
+        return self._config.get('known_rules_set_refs', ())
+
+    @known_rules_set_refs.setter
+    def known_rules_set_refs(self, value):
+        self._config['known_rules_set_refs'] = value
+
+    @property
+    def known_schema_refs(self):
+        """ The encountered references to schema registry items. """
+        return self._config.get('known_schema_refs', ())
+
+    @known_schema_refs.setter
+    def known_schema_refs(self, value):
+        self._config['known_schema_refs'] = value
+
+    @property
+    def target_schema(self):
+        """ The schema that is being validated. """
+        return self._config['target_schema']
+
+    @property
+    def target_validator(self):
+        """ The validator whose schema is being validated. """
+        return self._config['target_validator']
+
+    def _validate_logical(self, rule, field, value):
+        """ {'allowed': ('allof', 'anyof', 'noneof', 'oneof')} """
+        if not isinstance(value, Sequence):
+            self._error(field, errors.BAD_TYPE)
+            return
+
+        validator = self._get_child_validator(
+            document_crumb=rule, allow_unknown=False,
+            schema=self.target_validator.validation_rules)
+
+        for constraints in value:
+            _hash = (mapping_hash({'turing': constraints}),
+                     mapping_hash(self.target_validator.types_mapping))
+            if _hash in self.target_validator._valid_schemas:
+                continue
+
+            validator(constraints, normalize=False)
+            if validator._errors:
+                self._error(validator._errors)
+            else:
+                self.target_validator._valid_schemas.add(_hash)
+
+    def _validator_bulk_schema(self, field, value):
+        # resolve schema registry reference
+        if isinstance(value, _str_type):
+            if value in self.known_rules_set_refs:
+                return
+            else:
+                self.known_rules_set_refs += (value,)
+            definition = self.target_validator.rules_set_registry.get(value)
+            if definition is None:
+                self._error(field, 'Rules set definition %s not found.' % value)
+                return
+            else:
+                value = definition
+
+        _hash = (mapping_hash({'turing': value}),
+                 mapping_hash(self.target_validator.types_mapping))
+        if _hash in self.target_validator._valid_schemas:
+            return
+
+        validator = self._get_child_validator(
+            document_crumb=field, allow_unknown=False,
+            schema=self.target_validator.rules)
+        validator(value, normalize=False)
+        if validator._errors:
+            self._error(validator._errors)
+        else:
+            self.target_validator._valid_schemas.add(_hash)
+
+    def _validator_dependencies(self, field, value):
+        if isinstance(value, _str_type):
+            pass
+        elif isinstance(value, Mapping):
+            validator = self._get_child_validator(
+                document_crumb=field,
+                schema={'valueschema': {'type': 'list'}},
+                allow_unknown=True
+            )
+            if not validator(value, normalize=False):
+                self._error(validator._errors)
+        elif isinstance(value, Sequence):
+            if not all(isinstance(x, Hashable) for x in value):
+                path = self.document_path + (field,)
+                self._error(path, 'All dependencies must be a hashable type.')
+
+    def _validator_handler(self, field, value):
+        if isinstance(value, Callable):
+            return
+        if isinstance(value, _str_type):
+            if value not in self.target_validator.validators + \
+                    self.target_validator.coercers:
+                self._error(field, '%s is no valid coercer' % value)
+        elif isinstance(value, Iterable):
+            for handler in value:
+                self._validator_handler(field, handler)
+
+    def _validator_items(self, field, value):
+        for i, schema in enumerate(value):
+            self._validator_bulk_schema((field, i), schema)
+
+    def _validator_schema(self, field, value):
+        try:
+            value = self._handle_schema_reference_for_validator(field, value)
+        except _Abort:
+            return
+
+        _hash = (mapping_hash(value),
+                 mapping_hash(self.target_validator.types_mapping))
+        if _hash in self.target_validator._valid_schemas:
+            return
+
+        validator = self._get_child_validator(
+            document_crumb=field,
+            schema=None, allow_unknown=self.root_allow_unknown)
+        validator(self._expand_rules_set_refs(value), normalize=False)
+        if validator._errors:
+            self._error(validator._errors)
+        else:
+            self.target_validator._valid_schemas.add(_hash)
+
+    def _handle_schema_reference_for_validator(self, field, value):
+        if not isinstance(value, _str_type):
+            return value
+        if value in self.known_schema_refs:
+            raise _Abort
+
+        self.known_schema_refs += (value,)
+        definition = self.target_validator.schema_registry.get(value)
+        if definition is None:
+            path = self.document_path + (field,)
+            self._error(path, 'Schema definition {} not found.'.format(value))
+            raise _Abort
+        return definition
+
+    def _expand_rules_set_refs(self, schema):
+        result = {}
+        for k, v in schema.items():
+            if isinstance(v, _str_type):
+                result[k] = self.target_validator.rules_set_registry.get(v)
+            else:
+                result[k] = v
+        return result
+
+    def _validator_type(self, field, value):
+        value = (value,) if isinstance(value, _str_type) else value
+        invalid_constraints = ()
+        for constraint in value:
+            if constraint not in self.target_validator.types:
+                invalid_constraints += (constraint,)
+        if invalid_constraints:
+            path = self.document_path + (field,)
+            self._error(path, 'Unsupported types: %s' % invalid_constraints)
+
+####
+
+
+class Registry(object):
+    """ A registry to store and retrieve schemas and parts of it by a name
+    that can be used in validation schemas.
+
+    :param definitions: Optional, initial definitions.
+    :type definitions: any :term:`mapping` """
+
+    def __init__(self, definitions={}):
+        self._storage = {}
+        self.extend(definitions)
+
+    def add(self, name, definition):
+        """ Register a definition to the registry. Existing definitions are
+        replaced silently.
+
+        :param name: The name which can be used as reference in a validation
+                     schema.
+        :type name: :class:`str`
+        :param definition: The definition.
+        :type definition: any :term:`mapping` """
+        self._storage[name] = self._expand_definition(definition)
+
+    def all(self):
+        """ Returns a :class:`dict` with all registered definitions mapped to
+        their name. """
+        return self._storage
+
+    def clear(self):
+        """ Purge all definitions in the registry. """
+        self._storage.clear()
+
+    def extend(self, definitions):
+        """ Add several definitions at once. Existing definitions are
+        replaced silently.
+
+        :param definitions: The names and definitions.
+        :type definitions: a :term:`mapping` or an :term:`iterable` with
+                           two-value :class:`tuple` s """
+        for name, definition in dict(definitions).items():
+            self.add(name, definition)
+
+    def get(self, name, default=None):
+        """ Retrieve a definition from the registry.
+
+        :param name: The reference that points to the definition.
+        :type name: :class:`str`
+        :param default: Return value if the reference isn't registered. """
+        return self._storage.get(name, default)
+
+    def remove(self, *names):
+        """ Unregister definitions from the registry.
+
+        :param names: The names of the definitions that are to be
+                      unregistered. """
+        for name in names:
+            self._storage.pop(name, None)
+
+
+class SchemaRegistry(Registry):
+    @classmethod
+    def _expand_definition(cls, definition):
+        return DefinitionSchema.expand(definition)
+
+
+class RulesSetRegistry(Registry):
+    @classmethod
+    def _expand_definition(cls, definition):
+        return DefinitionSchema.expand({0: definition})[0]
+
+
+schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry()

+ 144 - 144
ext/cerberus/tests/__init__.py → mncheck/ext/cerberus/tests/__init__.py

@@ -1,144 +1,144 @@
-# -*- coding: utf-8 -*-
-
-import pytest
-
-from cerberus import errors, Validator, SchemaError, DocumentError
-from cerberus.tests.conftest import sample_schema
-
-
-def assert_exception(exception, document={}, schema=None, validator=None,
-                     msg=None):
-    """ Tests whether a specific exception is raised. Optionally also tests
-        whether the exception message is as expected. """
-    if validator is None:
-        validator = Validator()
-    if msg is None:
-        with pytest.raises(exception) as excinfo:
-            validator(document, schema)
-    else:
-        with pytest.raises(exception, message=msg) as excinfo:  # noqa: F841
-            validator(document, schema)
-
-
-def assert_schema_error(*args):
-    """ Tests whether a validation raises an exception due to a malformed
-        schema. """
-    assert_exception(SchemaError, *args)
-
-
-def assert_document_error(*args):
-    """ Tests whether a validation raises an exception due to a malformed
-        document. """
-    assert_exception(DocumentError, *args)
-
-
-def assert_fail(document, schema=None, validator=None, update=False,
-                error=None, errors=None, child_errors=None):
-    """ Tests whether a validation fails. """
-    if validator is None:
-        validator = Validator(sample_schema)
-    result = validator(document, schema, update)
-    assert isinstance(result, bool)
-    assert not result
-
-    actual_errors = validator._errors
-
-    assert not (error is not None and errors is not None)
-    assert not (errors is not None and child_errors is not None), (
-        'child_errors can only be tested in '
-        'conjunction with the error parameter'
-    )
-    assert not (child_errors is not None and error is None)
-    if error is not None:
-        assert len(actual_errors) == 1
-        assert_has_error(actual_errors, *error)
-
-        if child_errors is not None:
-            assert len(actual_errors[0].child_errors) == len(child_errors)
-            assert_has_errors(actual_errors[0].child_errors, child_errors)
-
-    elif errors is not None:
-        assert len(actual_errors) == len(errors)
-        assert_has_errors(actual_errors, errors)
-
-    return actual_errors
-
-
-def assert_success(document, schema=None, validator=None, update=False):
-    """ Tests whether a validation succeeds. """
-    if validator is None:
-        validator = Validator(sample_schema)
-    result = validator(document, schema, update)
-    assert isinstance(result, bool)
-    if not result:
-        raise AssertionError(validator.errors)
-
-
-def assert_has_error(_errors, d_path, s_path, error_def, constraint, info=()):
-    if not isinstance(d_path, tuple):
-        d_path = (d_path,)
-    if not isinstance(info, tuple):
-        info = (info,)
-
-    assert isinstance(_errors, errors.ErrorList)
-
-    for i, error in enumerate(_errors):
-        assert isinstance(error, errors.ValidationError)
-        try:
-            assert error.document_path == d_path
-            assert error.schema_path == s_path
-            assert error.code == error_def.code
-            assert error.rule == error_def.rule
-            assert error.constraint == constraint
-            if not error.is_group_error:
-                assert error.info == info
-        except AssertionError:
-            pass
-        except Exception:
-            raise
-        else:
-            break
-    else:
-        raise AssertionError("""
-        Error with properties:
-          document_path={doc_path}
-          schema_path={schema_path}
-          code={code}
-          constraint={constraint}
-          info={info}
-        not found in errors:
-        {errors}
-        """.format(doc_path=d_path, schema_path=s_path,
-                   code=hex(error.code), info=info,
-                   constraint=constraint, errors=_errors))
-    return i
-
-
-def assert_has_errors(_errors, _exp_errors):
-    assert isinstance(_exp_errors, list)
-    for error in _exp_errors:
-        assert isinstance(error, tuple)
-        assert_has_error(_errors, *error)
-
-
-def assert_not_has_error(_errors, *args, **kwargs):
-    try:
-        assert_has_error(_errors, *args, **kwargs)
-    except AssertionError:
-        pass
-    except Exception as e:
-        raise e
-    else:
-        raise AssertionError('An unexpected error occurred.')
-
-
-def assert_bad_type(field, data_type, value):
-    assert_fail({field: value},
-                error=(field, (field, 'type'), errors.BAD_TYPE, data_type))
-
-
-def assert_normalized(document, expected, schema=None, validator=None):
-    if validator is None:
-        validator = Validator(sample_schema)
-    assert_success(document, schema, validator)
-    assert validator.document == expected
+# -*- coding: utf-8 -*-
+
+import pytest
+
+from cerberus import errors, Validator, SchemaError, DocumentError
+from cerberus.tests.conftest import sample_schema
+
+
+def assert_exception(exception, document={}, schema=None, validator=None,
+                     msg=None):
+    """ Tests whether a specific exception is raised. Optionally also tests
+        whether the exception message is as expected. """
+    if validator is None:
+        validator = Validator()
+    if msg is None:
+        with pytest.raises(exception) as excinfo:
+            validator(document, schema)
+    else:
+        with pytest.raises(exception, message=msg) as excinfo:  # noqa: F841
+            validator(document, schema)
+
+
+def assert_schema_error(*args):
+    """ Tests whether a validation raises an exception due to a malformed
+        schema. """
+    assert_exception(SchemaError, *args)
+
+
+def assert_document_error(*args):
+    """ Tests whether a validation raises an exception due to a malformed
+        document. """
+    assert_exception(DocumentError, *args)
+
+
+def assert_fail(document, schema=None, validator=None, update=False,
+                error=None, errors=None, child_errors=None):
+    """ Tests whether a validation fails. """
+    if validator is None:
+        validator = Validator(sample_schema)
+    result = validator(document, schema, update)
+    assert isinstance(result, bool)
+    assert not result
+
+    actual_errors = validator._errors
+
+    assert not (error is not None and errors is not None)
+    assert not (errors is not None and child_errors is not None), (
+        'child_errors can only be tested in '
+        'conjunction with the error parameter'
+    )
+    assert not (child_errors is not None and error is None)
+    if error is not None:
+        assert len(actual_errors) == 1
+        assert_has_error(actual_errors, *error)
+
+        if child_errors is not None:
+            assert len(actual_errors[0].child_errors) == len(child_errors)
+            assert_has_errors(actual_errors[0].child_errors, child_errors)
+
+    elif errors is not None:
+        assert len(actual_errors) == len(errors)
+        assert_has_errors(actual_errors, errors)
+
+    return actual_errors
+
+
+def assert_success(document, schema=None, validator=None, update=False):
+    """ Tests whether a validation succeeds. """
+    if validator is None:
+        validator = Validator(sample_schema)
+    result = validator(document, schema, update)
+    assert isinstance(result, bool)
+    if not result:
+        raise AssertionError(validator.errors)
+
+
+def assert_has_error(_errors, d_path, s_path, error_def, constraint, info=()):
+    if not isinstance(d_path, tuple):
+        d_path = (d_path,)
+    if not isinstance(info, tuple):
+        info = (info,)
+
+    assert isinstance(_errors, errors.ErrorList)
+
+    for i, error in enumerate(_errors):
+        assert isinstance(error, errors.ValidationError)
+        try:
+            assert error.document_path == d_path
+            assert error.schema_path == s_path
+            assert error.code == error_def.code
+            assert error.rule == error_def.rule
+            assert error.constraint == constraint
+            if not error.is_group_error:
+                assert error.info == info
+        except AssertionError:
+            pass
+        except Exception:
+            raise
+        else:
+            break
+    else:
+        raise AssertionError("""
+        Error with properties:
+          document_path={doc_path}
+          schema_path={schema_path}
+          code={code}
+          constraint={constraint}
+          info={info}
+        not found in errors:
+        {errors}
+        """.format(doc_path=d_path, schema_path=s_path,
+                   code=hex(error.code), info=info,
+                   constraint=constraint, errors=_errors))
+    return i
+
+
+def assert_has_errors(_errors, _exp_errors):
+    assert isinstance(_exp_errors, list)
+    for error in _exp_errors:
+        assert isinstance(error, tuple)
+        assert_has_error(_errors, *error)
+
+
+def assert_not_has_error(_errors, *args, **kwargs):
+    try:
+        assert_has_error(_errors, *args, **kwargs)
+    except AssertionError:
+        pass
+    except Exception as e:
+        raise e
+    else:
+        raise AssertionError('An unexpected error occurred.')
+
+
+def assert_bad_type(field, data_type, value):
+    assert_fail({field: value},
+                error=(field, (field, 'type'), errors.BAD_TYPE, data_type))
+
+
+def assert_normalized(document, expected, schema=None, validator=None):
+    if validator is None:
+        validator = Validator(sample_schema)
+    assert_success(document, schema, validator)
+    assert validator.document == expected

+ 134 - 134
ext/cerberus/tests/conftest.py → mncheck/ext/cerberus/tests/conftest.py

@@ -1,134 +1,134 @@
-# -*- coding: utf-8 -*-
-
-from copy import deepcopy
-
-import pytest
-
-from cerberus import Validator
-
-
-@pytest.fixture
-def document():
-    return deepcopy(sample_document)
-
-
-@pytest.fixture
-def schema():
-    return deepcopy(sample_schema)
-
-
-@pytest.fixture
-def validator():
-    return Validator(sample_schema)
-
-
-sample_schema = {
-    'a_string': {
-        'type': 'string',
-        'minlength': 2,
-        'maxlength': 10
-    },
-    'a_binary': {
-        'type': 'binary',
-        'minlength': 2,
-        'maxlength': 10
-    },
-    'a_nullable_integer': {
-        'type': 'integer',
-        'nullable': True
-    },
-    'an_integer': {
-        'type': 'integer',
-        'min': 1,
-        'max': 100,
-    },
-    'a_restricted_integer': {
-        'type': 'integer',
-        'allowed': [-1, 0, 1],
-    },
-    'a_boolean': {
-        'type': 'boolean',
-    },
-    'a_datetime': {
-        'type': 'datetime',
-    },
-    'a_float': {
-        'type': 'float',
-        'min': 1,
-        'max': 100,
-    },
-    'a_number': {
-        'type': 'number',
-        'min': 1,
-        'max': 100,
-    },
-    'a_set': {
-        'type': 'set',
-    },
-    'one_or_more_strings': {
-        'type': ['string', 'list'],
-        'schema': {'type': 'string'}
-    },
-    'a_regex_email': {
-        'type': 'string',
-        'regex': '^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
-    },
-    'a_readonly_string': {
-        'type': 'string',
-        'readonly': True,
-    },
-    'a_restricted_string': {
-        'type': 'string',
-        'allowed': ["agent", "client", "vendor"],
-    },
-    'an_array': {
-        'type': 'list',
-        'allowed': ["agent", "client", "vendor"],
-    },
-    'a_list_of_dicts': {
-        'type': 'list',
-        'schema': {
-            'type': 'dict',
-            'schema': {
-                'sku': {'type': 'string'},
-                'price': {'type': 'integer', 'required': True},
-            },
-        },
-    },
-    'a_list_of_values': {
-        'type': 'list',
-        'items': [{'type': 'string'}, {'type': 'integer'}, ]
-    },
-    'a_list_of_integers': {
-        'type': 'list',
-        'schema': {'type': 'integer'},
-    },
-    'a_dict': {
-        'type': 'dict',
-        'schema': {
-            'address': {'type': 'string'},
-            'city': {'type': 'string', 'required': True}
-        },
-    },
-    'a_dict_with_valueschema': {
-        'type': 'dict',
-        'valueschema': {'type': 'integer'}
-    },
-    'a_dict_with_keyschema': {
-        'type': 'dict',
-        'keyschema': {'type': 'string', 'regex': '[a-z]+'}
-    },
-    'a_list_length': {
-        'type': 'list',
-        'schema': {'type': 'integer'},
-        'minlength': 2,
-        'maxlength': 5,
-    },
-    'a_nullable_field_without_type': {
-        'nullable': True
-    },
-    'a_not_nullable_field_without_type': {
-    },
-}
-
-sample_document = {'name': 'john doe'}
+# -*- coding: utf-8 -*-
+
+from copy import deepcopy
+
+import pytest
+
+from cerberus import Validator
+
+
+@pytest.fixture
+def document():
+    return deepcopy(sample_document)
+
+
+@pytest.fixture
+def schema():
+    return deepcopy(sample_schema)
+
+
+@pytest.fixture
+def validator():
+    return Validator(sample_schema)
+
+
+sample_schema = {
+    'a_string': {
+        'type': 'string',
+        'minlength': 2,
+        'maxlength': 10
+    },
+    'a_binary': {
+        'type': 'binary',
+        'minlength': 2,
+        'maxlength': 10
+    },
+    'a_nullable_integer': {
+        'type': 'integer',
+        'nullable': True
+    },
+    'an_integer': {
+        'type': 'integer',
+        'min': 1,
+        'max': 100,
+    },
+    'a_restricted_integer': {
+        'type': 'integer',
+        'allowed': [-1, 0, 1],
+    },
+    'a_boolean': {
+        'type': 'boolean',
+    },
+    'a_datetime': {
+        'type': 'datetime',
+    },
+    'a_float': {
+        'type': 'float',
+        'min': 1,
+        'max': 100,
+    },
+    'a_number': {
+        'type': 'number',
+        'min': 1,
+        'max': 100,
+    },
+    'a_set': {
+        'type': 'set',
+    },
+    'one_or_more_strings': {
+        'type': ['string', 'list'],
+        'schema': {'type': 'string'}
+    },
+    'a_regex_email': {
+        'type': 'string',
+        'regex': '^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
+    },
+    'a_readonly_string': {
+        'type': 'string',
+        'readonly': True,
+    },
+    'a_restricted_string': {
+        'type': 'string',
+        'allowed': ["agent", "client", "vendor"],
+    },
+    'an_array': {
+        'type': 'list',
+        'allowed': ["agent", "client", "vendor"],
+    },
+    'a_list_of_dicts': {
+        'type': 'list',
+        'schema': {
+            'type': 'dict',
+            'schema': {
+                'sku': {'type': 'string'},
+                'price': {'type': 'integer', 'required': True},
+            },
+        },
+    },
+    'a_list_of_values': {
+        'type': 'list',
+        'items': [{'type': 'string'}, {'type': 'integer'}, ]
+    },
+    'a_list_of_integers': {
+        'type': 'list',
+        'schema': {'type': 'integer'},
+    },
+    'a_dict': {
+        'type': 'dict',
+        'schema': {
+            'address': {'type': 'string'},
+            'city': {'type': 'string', 'required': True}
+        },
+    },
+    'a_dict_with_valueschema': {
+        'type': 'dict',
+        'valueschema': {'type': 'integer'}
+    },
+    'a_dict_with_keyschema': {
+        'type': 'dict',
+        'keyschema': {'type': 'string', 'regex': '[a-z]+'}
+    },
+    'a_list_length': {
+        'type': 'list',
+        'schema': {'type': 'integer'},
+        'minlength': 2,
+        'maxlength': 5,
+    },
+    'a_nullable_field_without_type': {
+        'nullable': True
+    },
+    'a_not_nullable_field_without_type': {
+    },
+}
+
+sample_document = {'name': 'john doe'}

+ 76 - 76
ext/cerberus/tests/test_assorted.py → mncheck/ext/cerberus/tests/test_assorted.py

@@ -1,76 +1,76 @@
-# -*- coding: utf-8 -*-
-
-from decimal import Decimal
-
-from pytest import mark
-
-from cerberus import TypeDefinition, Validator
-from cerberus.tests import assert_fail, assert_success
-from cerberus.utils import validator_factory
-from cerberus.validator import BareValidator
-
-
-def test_clear_cache(validator):
-    assert len(validator._valid_schemas) > 0
-    validator.clear_caches()
-    assert len(validator._valid_schemas) == 0
-
-
-def test_docstring(validator):
-    assert validator.__doc__
-
-
-# Test that testing with the sample schema works as expected
-# as there might be rules with side-effects in it
-
-@mark.parametrize('test,document', ((assert_fail, {'an_integer': 60}),
-                                    (assert_success, {'an_integer': 110})))
-def test_that_test_fails(test, document):
-    try:
-        test(document)
-    except AssertionError:
-        pass
-    else:
-        raise AssertionError("test didn't fail")
-
-
-def test_dynamic_types():
-    decimal_type = TypeDefinition('decimal', (Decimal,), ())
-    document = {'measurement': Decimal(0)}
-    schema = {'measurement': {'type': 'decimal'}}
-
-    validator = Validator()
-    validator.types_mapping['decimal'] = decimal_type
-    assert_success(document, schema, validator)
-
-    class MyValidator(Validator):
-        types_mapping = Validator.types_mapping.copy()
-        types_mapping['decimal'] = decimal_type
-    validator = MyValidator()
-    assert_success(document, schema, validator)
-
-
-def test_mro():
-    assert Validator.__mro__ == (Validator, BareValidator, object), \
-        Validator.__mro__
-
-
-def test_mixin_init():
-    class Mixin(object):
-        def __init__(self, *args, **kwargs):
-            kwargs['test'] = True
-            super(Mixin, self).__init__(*args, **kwargs)
-
-    MyValidator = validator_factory('MyValidator', Mixin)
-    validator = MyValidator()
-    assert validator._config['test']
-
-
-def test_sub_init():
-    class MyValidator(Validator):
-        def __init__(self, *args, **kwargs):
-            kwargs['test'] = True
-            super(MyValidator, self).__init__(*args, **kwargs)
-
-    validator = MyValidator()
-    assert validator._config['test']
+# -*- coding: utf-8 -*-
+
+from decimal import Decimal
+
+from pytest import mark
+
+from cerberus import TypeDefinition, Validator
+from cerberus.tests import assert_fail, assert_success
+from cerberus.utils import validator_factory
+from cerberus.validator import BareValidator
+
+
+def test_clear_cache(validator):
+    assert len(validator._valid_schemas) > 0
+    validator.clear_caches()
+    assert len(validator._valid_schemas) == 0
+
+
+def test_docstring(validator):
+    assert validator.__doc__
+
+
+# Test that testing with the sample schema works as expected
+# as there might be rules with side-effects in it
+
+@mark.parametrize('test,document', ((assert_fail, {'an_integer': 60}),
+                                    (assert_success, {'an_integer': 110})))
+def test_that_test_fails(test, document):
+    try:
+        test(document)
+    except AssertionError:
+        pass
+    else:
+        raise AssertionError("test didn't fail")
+
+
+def test_dynamic_types():
+    decimal_type = TypeDefinition('decimal', (Decimal,), ())
+    document = {'measurement': Decimal(0)}
+    schema = {'measurement': {'type': 'decimal'}}
+
+    validator = Validator()
+    validator.types_mapping['decimal'] = decimal_type
+    assert_success(document, schema, validator)
+
+    class MyValidator(Validator):
+        types_mapping = Validator.types_mapping.copy()
+        types_mapping['decimal'] = decimal_type
+    validator = MyValidator()
+    assert_success(document, schema, validator)
+
+
+def test_mro():
+    assert Validator.__mro__ == (Validator, BareValidator, object), \
+        Validator.__mro__
+
+
+def test_mixin_init():
+    class Mixin(object):
+        def __init__(self, *args, **kwargs):
+            kwargs['test'] = True
+            super(Mixin, self).__init__(*args, **kwargs)
+
+    MyValidator = validator_factory('MyValidator', Mixin)
+    validator = MyValidator()
+    assert validator._config['test']
+
+
+def test_sub_init():
+    class MyValidator(Validator):
+        def __init__(self, *args, **kwargs):
+            kwargs['test'] = True
+            super(MyValidator, self).__init__(*args, **kwargs)
+
+    validator = MyValidator()
+    assert validator._config['test']

+ 77 - 77
ext/cerberus/tests/test_customization.py → mncheck/ext/cerberus/tests/test_customization.py

@@ -1,77 +1,77 @@
-# -*- coding: utf-8 -*-
-
-import cerberus
-from cerberus.tests import assert_fail, assert_success
-from cerberus.tests.conftest import sample_schema
-
-
-def test_contextual_data_preservation():
-
-    class InheritedValidator(cerberus.Validator):
-        def __init__(self, *args, **kwargs):
-            if 'working_dir' in kwargs:
-                self.working_dir = kwargs['working_dir']
-            super(InheritedValidator, self).__init__(*args, **kwargs)
-
-        def _validate_type_test(self, value):
-            if self.working_dir:
-                return True
-
-    assert 'test' in InheritedValidator.types
-    v = InheritedValidator({'test': {'type': 'list',
-                                     'schema': {'type': 'test'}}},
-                           working_dir='/tmp')
-    assert_success({'test': ['foo']}, validator=v)
-
-
-def test_docstring_parsing():
-    class CustomValidator(cerberus.Validator):
-        def _validate_foo(self, argument, field, value):
-            """ {'type': 'zap'} """
-            pass
-
-        def _validate_bar(self, value):
-            """ Test the barreness of a value.
-
-            The rule's arguments are validated against this schema:
-                {'type': 'boolean'}
-            """
-            pass
-
-    assert 'foo' in CustomValidator.validation_rules
-    assert 'bar' in CustomValidator.validation_rules
-
-
-def test_issue_265():
-    class MyValidator(cerberus.Validator):
-        def _validator_oddity(self, field, value):
-            if not value & 1:
-                self._error(field, "Must be an odd number")
-
-    v = MyValidator(schema={'amount': {'validator': 'oddity'}})
-    assert_success(document={'amount': 1}, validator=v)
-    assert_fail(document={'amount': 2}, validator=v,
-                error=('amount', (), cerberus.errors.CUSTOM, None,
-                       ('Must be an odd number',)))
-
-
-def test_schema_validation_can_be_disabled_in_schema_setter():
-
-    class NonvalidatingValidator(cerberus.Validator):
-        """
-        Skips schema validation to speed up initialization
-        """
-        @cerberus.Validator.schema.setter
-        def schema(self, schema):
-            if schema is None:
-                self._schema = None
-            elif self.is_child:
-                self._schema = schema
-            elif isinstance(schema, cerberus.schema.DefinitionSchema):
-                self._schema = schema
-            else:
-                self._schema = cerberus.schema.UnvalidatedSchema(schema)
-
-    v = NonvalidatingValidator(schema=sample_schema)
-    assert v.validate(document={'an_integer': 1})
-    assert not v.validate(document={'an_integer': 'a'})
+# -*- coding: utf-8 -*-
+
+import cerberus
+from cerberus.tests import assert_fail, assert_success
+from cerberus.tests.conftest import sample_schema
+
+
+def test_contextual_data_preservation():
+
+    class InheritedValidator(cerberus.Validator):
+        def __init__(self, *args, **kwargs):
+            if 'working_dir' in kwargs:
+                self.working_dir = kwargs['working_dir']
+            super(InheritedValidator, self).__init__(*args, **kwargs)
+
+        def _validate_type_test(self, value):
+            if self.working_dir:
+                return True
+
+    assert 'test' in InheritedValidator.types
+    v = InheritedValidator({'test': {'type': 'list',
+                                     'schema': {'type': 'test'}}},
+                           working_dir='/tmp')
+    assert_success({'test': ['foo']}, validator=v)
+
+
+def test_docstring_parsing():
+    class CustomValidator(cerberus.Validator):
+        def _validate_foo(self, argument, field, value):
+            """ {'type': 'zap'} """
+            pass
+
+        def _validate_bar(self, value):
+            """ Test the barreness of a value.
+
+            The rule's arguments are validated against this schema:
+                {'type': 'boolean'}
+            """
+            pass
+
+    assert 'foo' in CustomValidator.validation_rules
+    assert 'bar' in CustomValidator.validation_rules
+
+
+def test_issue_265():
+    class MyValidator(cerberus.Validator):
+        def _validator_oddity(self, field, value):
+            if not value & 1:
+                self._error(field, "Must be an odd number")
+
+    v = MyValidator(schema={'amount': {'validator': 'oddity'}})
+    assert_success(document={'amount': 1}, validator=v)
+    assert_fail(document={'amount': 2}, validator=v,
+                error=('amount', (), cerberus.errors.CUSTOM, None,
+                       ('Must be an odd number',)))
+
+
+def test_schema_validation_can_be_disabled_in_schema_setter():
+
+    class NonvalidatingValidator(cerberus.Validator):
+        """
+        Skips schema validation to speed up initialization
+        """
+        @cerberus.Validator.schema.setter
+        def schema(self, schema):
+            if schema is None:
+                self._schema = None
+            elif self.is_child:
+                self._schema = schema
+            elif isinstance(schema, cerberus.schema.DefinitionSchema):
+                self._schema = schema
+            else:
+                self._schema = cerberus.schema.UnvalidatedSchema(schema)
+
+    v = NonvalidatingValidator(schema=sample_schema)
+    assert v.validate(document={'an_integer': 1})
+    assert not v.validate(document={'an_integer': 'a'})

+ 260 - 260
ext/cerberus/tests/test_errors.py → mncheck/ext/cerberus/tests/test_errors.py

@@ -1,260 +1,260 @@
-# -*- coding: utf-8 -*-
-
-from cerberus import Validator, errors
-from cerberus.tests import assert_fail
-
-
-ValidationError = errors.ValidationError
-
-
-def test__error_1():
-    v = Validator(schema={'foo': {'type': 'string'}})
-    v.document = {'foo': 42}
-    v._error('foo', errors.BAD_TYPE, 'string')
-    error = v._errors[0]
-    assert error.document_path == ('foo',)
-    assert error.schema_path == ('foo', 'type')
-    assert error.code == 0x24
-    assert error.rule == 'type'
-    assert error.constraint == 'string'
-    assert error.value == 42
-    assert error.info == ('string',)
-    assert not error.is_group_error
-    assert not error.is_logic_error
-
-
-def test__error_2():
-    v = Validator(schema={'foo': {'keyschema': {'type': 'integer'}}})
-    v.document = {'foo': {'0': 'bar'}}
-    v._error('foo', errors.KEYSCHEMA, ())
-    error = v._errors[0]
-    assert error.document_path == ('foo',)
-    assert error.schema_path == ('foo', 'keyschema')
-    assert error.code == 0x83
-    assert error.rule == 'keyschema'
-    assert error.constraint == {'type': 'integer'}
-    assert error.value == {'0': 'bar'}
-    assert error.info == ((),)
-    assert error.is_group_error
-    assert not error.is_logic_error
-
-
-def test__error_3():
-    valids = [{'type': 'string', 'regex': '0x[0-9a-f]{2}'},
-              {'type': 'integer', 'min': 0, 'max': 255}]
-    v = Validator(schema={'foo': {'oneof': valids}})
-    v.document = {'foo': '0x100'}
-    v._error('foo', errors.ONEOF, (), 0, 2)
-    error = v._errors[0]
-    assert error.document_path == ('foo',)
-    assert error.schema_path == ('foo', 'oneof')
-    assert error.code == 0x92
-    assert error.rule == 'oneof'
-    assert error.constraint == valids
-    assert error.value == '0x100'
-    assert error.info == ((), 0, 2)
-    assert error.is_group_error
-    assert error.is_logic_error
-
-
-def test_error_tree_from_subschema(validator):
-    schema = {'foo': {'schema': {'bar': {'type': 'string'}}}}
-    document = {'foo': {'bar': 0}}
-    assert_fail(document, schema, validator=validator)
-    d_error_tree = validator.document_error_tree
-    s_error_tree = validator.schema_error_tree
-
-    assert 'foo' in d_error_tree
-
-    assert len(d_error_tree['foo'].errors) == 1, d_error_tree['foo']
-    assert d_error_tree['foo'].errors[0].code == errors.MAPPING_SCHEMA.code
-    assert 'bar' in d_error_tree['foo']
-    assert d_error_tree['foo']['bar'].errors[0].value == 0
-    assert d_error_tree.fetch_errors_from(('foo', 'bar'))[0].value == 0
-
-    assert 'foo' in s_error_tree
-    assert 'schema' in s_error_tree['foo']
-    assert 'bar' in s_error_tree['foo']['schema']
-    assert 'type' in s_error_tree['foo']['schema']['bar']
-    assert s_error_tree['foo']['schema']['bar']['type'].errors[0].value == 0
-    assert s_error_tree.fetch_errors_from(
-        ('foo', 'schema', 'bar', 'type'))[0].value == 0
-
-
-def test_error_tree_from_anyof(validator):
-    schema = {'foo': {'anyof': [{'type': 'string'}, {'type': 'integer'}]}}
-    document = {'foo': []}
-    assert_fail(document, schema, validator=validator)
-    d_error_tree = validator.document_error_tree
-    s_error_tree = validator.schema_error_tree
-    assert 'foo' in d_error_tree
-    assert d_error_tree['foo'].errors[0].value == []
-    assert 'foo' in s_error_tree
-    assert 'anyof' in s_error_tree['foo']
-    assert 0 in s_error_tree['foo']['anyof']
-    assert 1 in s_error_tree['foo']['anyof']
-    assert 'type' in s_error_tree['foo']['anyof'][0]
-    assert s_error_tree['foo']['anyof'][0]['type'].errors[0].value == []
-
-
-def test_nested_error_paths(validator):
-    schema = {'a_dict': {'keyschema': {'type': 'integer'},
-                         'valueschema': {'regex': '[a-z]*'}},
-              'a_list': {'schema': {'type': 'string',
-                                    'oneof_regex': ['[a-z]*$', '[A-Z]*']}}}
-    document = {'a_dict': {0: 'abc', 'one': 'abc', 2: 'aBc', 'three': 'abC'},
-                'a_list': [0, 'abc', 'abC']}
-    assert_fail(document, schema, validator=validator)
-
-    _det = validator.document_error_tree
-    _set = validator.schema_error_tree
-
-    assert len(_det.errors) == 0
-    assert len(_set.errors) == 0
-
-    assert len(_det['a_dict'].errors) == 2
-    assert len(_set['a_dict'].errors) == 0
-
-    assert _det['a_dict'][0] is None
-    assert len(_det['a_dict']['one'].errors) == 1
-    assert len(_det['a_dict'][2].errors) == 1
-    assert len(_det['a_dict']['three'].errors) == 2
-
-    assert len(_set['a_dict']['keyschema'].errors) == 1
-    assert len(_set['a_dict']['valueschema'].errors) == 1
-
-    assert len(_set['a_dict']['keyschema']['type'].errors) == 2
-    assert len(_set['a_dict']['valueschema']['regex'].errors) == 2
-
-    _ref_err = ValidationError(
-        ('a_dict', 'one'), ('a_dict', 'keyschema', 'type'),
-        errors.BAD_TYPE.code, 'type', 'integer', 'one', ())
-    assert _det['a_dict']['one'].errors[0] == _ref_err
-    assert _set['a_dict']['keyschema']['type'].errors[0] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_dict', 2), ('a_dict', 'valueschema', 'regex'),
-        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'aBc', ())
-    assert _det['a_dict'][2].errors[0] == _ref_err
-    assert _set['a_dict']['valueschema']['regex'].errors[0] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_dict', 'three'), ('a_dict', 'keyschema', 'type'),
-        errors.BAD_TYPE.code, 'type', 'integer', 'three', ())
-    assert _det['a_dict']['three'].errors[0] == _ref_err
-    assert _set['a_dict']['keyschema']['type'].errors[1] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_dict', 'three'), ('a_dict', 'valueschema', 'regex'),
-        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
-    assert _det['a_dict']['three'].errors[1] == _ref_err
-    assert _set['a_dict']['valueschema']['regex'].errors[1] == _ref_err
-
-    assert len(_det['a_list'].errors) == 1
-    assert len(_det['a_list'][0].errors) == 1
-    assert _det['a_list'][1] is None
-    assert len(_det['a_list'][2].errors) == 3
-    assert len(_set['a_list'].errors) == 0
-    assert len(_set['a_list']['schema'].errors) == 1
-    assert len(_set['a_list']['schema']['type'].errors) == 1
-    assert len(_set['a_list']['schema']['oneof'][0]['regex'].errors) == 1
-    assert len(_set['a_list']['schema']['oneof'][1]['regex'].errors) == 1
-
-    _ref_err = ValidationError(
-        ('a_list', 0), ('a_list', 'schema', 'type'), errors.BAD_TYPE.code,
-        'type', 'string', 0, ())
-    assert _det['a_list'][0].errors[0] == _ref_err
-    assert _set['a_list']['schema']['type'].errors[0] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_list', 2), ('a_list', 'schema', 'oneof'), errors.ONEOF.code,
-        'oneof', 'irrelevant_at_this_point', 'abC', ())
-    assert _det['a_list'][2].errors[0] == _ref_err
-    assert _set['a_list']['schema']['oneof'].errors[0] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_list', 2), ('a_list', 'schema', 'oneof', 0, 'regex'),
-        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
-    assert _det['a_list'][2].errors[1] == _ref_err
-    assert _set['a_list']['schema']['oneof'][0]['regex'].errors[0] == _ref_err
-
-    _ref_err = ValidationError(
-        ('a_list', 2), ('a_list', 'schema', 'oneof', 1, 'regex'),
-        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
-    assert _det['a_list'][2].errors[2] == _ref_err
-    assert _set['a_list']['schema']['oneof'][1]['regex'].errors[0] == _ref_err
-
-
-def test_queries():
-    schema = {'foo': {'type': 'dict',
-                      'schema':
-                          {'bar': {'type': 'number'}}}}
-    document = {'foo': {'bar': 'zero'}}
-    validator = Validator(schema)
-    validator(document)
-
-    assert 'foo' in validator.document_error_tree
-    assert 'bar' in validator.document_error_tree['foo']
-    assert 'foo' in validator.schema_error_tree
-    assert 'schema' in validator.schema_error_tree['foo']
-
-    assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo'].errors
-    assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo']
-    assert errors.BAD_TYPE in validator.document_error_tree['foo']['bar']
-    assert errors.MAPPING_SCHEMA in validator.schema_error_tree['foo']['schema']
-    assert errors.BAD_TYPE in \
-        validator.schema_error_tree['foo']['schema']['bar']['type']
-
-    assert (validator.document_error_tree['foo'][errors.MAPPING_SCHEMA]
-            .child_errors[0].code == errors.BAD_TYPE.code)
-
-
-def test_basic_error_handler():
-    handler = errors.BasicErrorHandler()
-    _errors, ref = [], {}
-
-    _errors.append(ValidationError(
-        ['foo'], ['foo'], 0x63, 'readonly', True, None, ()))
-    ref.update({'foo': [handler.messages[0x63]]})
-    assert handler(_errors) == ref
-
-    _errors.append(ValidationError(
-        ['bar'], ['foo'], 0x42, 'min', 1, 2, ()))
-    ref.update({'bar': [handler.messages[0x42].format(constraint=1)]})
-    assert handler(_errors) == ref
-
-    _errors.append(ValidationError(
-        ['zap', 'foo'], ['zap', 'schema', 'foo'], 0x24, 'type', 'string',
-        True, ()))
-    ref.update({'zap': [{'foo': [handler.messages[0x24].format(
-        constraint='string')]}]})
-    assert handler(_errors) == ref
-
-    _errors.append(ValidationError(
-        ['zap', 'foo'], ['zap', 'schema', 'foo'], 0x41, 'regex',
-        '^p[äe]ng$', 'boom', ()))
-    ref['zap'][0]['foo'].append(
-        handler.messages[0x41].format(constraint='^p[äe]ng$'))
-    assert handler(_errors) == ref
-
-
-def test_basic_error_of_errors(validator):
-    schema = {'foo': {'oneof': [
-        {'type': 'integer'},
-        {'type': 'string'}
-    ]}}
-    document = {'foo': 23.42}
-    error = ('foo', ('foo', 'oneof'), errors.ONEOF,
-             schema['foo']['oneof'], ())
-    child_errors = [
-        (error[0], error[1] + (0, 'type'), errors.BAD_TYPE, 'integer'),
-        (error[0], error[1] + (1, 'type'), errors.BAD_TYPE, 'string')
-    ]
-    assert_fail(document, schema, validator=validator,
-                error=error, child_errors=child_errors)
-    assert validator.errors == {
-        'foo': [errors.BasicErrorHandler.messages[0x92],
-                {'oneof definition 0': ['must be of integer type'],
-                 'oneof definition 1': ['must be of string type']}
-                ]
-    }
+# -*- coding: utf-8 -*-
+
+from cerberus import Validator, errors
+from cerberus.tests import assert_fail
+
+
+ValidationError = errors.ValidationError
+
+
+def test__error_1():
+    v = Validator(schema={'foo': {'type': 'string'}})
+    v.document = {'foo': 42}
+    v._error('foo', errors.BAD_TYPE, 'string')
+    error = v._errors[0]
+    assert error.document_path == ('foo',)
+    assert error.schema_path == ('foo', 'type')
+    assert error.code == 0x24
+    assert error.rule == 'type'
+    assert error.constraint == 'string'
+    assert error.value == 42
+    assert error.info == ('string',)
+    assert not error.is_group_error
+    assert not error.is_logic_error
+
+
+def test__error_2():
+    v = Validator(schema={'foo': {'keyschema': {'type': 'integer'}}})
+    v.document = {'foo': {'0': 'bar'}}
+    v._error('foo', errors.KEYSCHEMA, ())
+    error = v._errors[0]
+    assert error.document_path == ('foo',)
+    assert error.schema_path == ('foo', 'keyschema')
+    assert error.code == 0x83
+    assert error.rule == 'keyschema'
+    assert error.constraint == {'type': 'integer'}
+    assert error.value == {'0': 'bar'}
+    assert error.info == ((),)
+    assert error.is_group_error
+    assert not error.is_logic_error
+
+
+def test__error_3():
+    valids = [{'type': 'string', 'regex': '0x[0-9a-f]{2}'},
+              {'type': 'integer', 'min': 0, 'max': 255}]
+    v = Validator(schema={'foo': {'oneof': valids}})
+    v.document = {'foo': '0x100'}
+    v._error('foo', errors.ONEOF, (), 0, 2)
+    error = v._errors[0]
+    assert error.document_path == ('foo',)
+    assert error.schema_path == ('foo', 'oneof')
+    assert error.code == 0x92
+    assert error.rule == 'oneof'
+    assert error.constraint == valids
+    assert error.value == '0x100'
+    assert error.info == ((), 0, 2)
+    assert error.is_group_error
+    assert error.is_logic_error
+
+
+def test_error_tree_from_subschema(validator):
+    schema = {'foo': {'schema': {'bar': {'type': 'string'}}}}
+    document = {'foo': {'bar': 0}}
+    assert_fail(document, schema, validator=validator)
+    d_error_tree = validator.document_error_tree
+    s_error_tree = validator.schema_error_tree
+
+    assert 'foo' in d_error_tree
+
+    assert len(d_error_tree['foo'].errors) == 1, d_error_tree['foo']
+    assert d_error_tree['foo'].errors[0].code == errors.MAPPING_SCHEMA.code
+    assert 'bar' in d_error_tree['foo']
+    assert d_error_tree['foo']['bar'].errors[0].value == 0
+    assert d_error_tree.fetch_errors_from(('foo', 'bar'))[0].value == 0
+
+    assert 'foo' in s_error_tree
+    assert 'schema' in s_error_tree['foo']
+    assert 'bar' in s_error_tree['foo']['schema']
+    assert 'type' in s_error_tree['foo']['schema']['bar']
+    assert s_error_tree['foo']['schema']['bar']['type'].errors[0].value == 0
+    assert s_error_tree.fetch_errors_from(
+        ('foo', 'schema', 'bar', 'type'))[0].value == 0
+
+
+def test_error_tree_from_anyof(validator):
+    schema = {'foo': {'anyof': [{'type': 'string'}, {'type': 'integer'}]}}
+    document = {'foo': []}
+    assert_fail(document, schema, validator=validator)
+    d_error_tree = validator.document_error_tree
+    s_error_tree = validator.schema_error_tree
+    assert 'foo' in d_error_tree
+    assert d_error_tree['foo'].errors[0].value == []
+    assert 'foo' in s_error_tree
+    assert 'anyof' in s_error_tree['foo']
+    assert 0 in s_error_tree['foo']['anyof']
+    assert 1 in s_error_tree['foo']['anyof']
+    assert 'type' in s_error_tree['foo']['anyof'][0]
+    assert s_error_tree['foo']['anyof'][0]['type'].errors[0].value == []
+
+
+def test_nested_error_paths(validator):
+    schema = {'a_dict': {'keyschema': {'type': 'integer'},
+                         'valueschema': {'regex': '[a-z]*'}},
+              'a_list': {'schema': {'type': 'string',
+                                    'oneof_regex': ['[a-z]*$', '[A-Z]*']}}}
+    document = {'a_dict': {0: 'abc', 'one': 'abc', 2: 'aBc', 'three': 'abC'},
+                'a_list': [0, 'abc', 'abC']}
+    assert_fail(document, schema, validator=validator)
+
+    _det = validator.document_error_tree
+    _set = validator.schema_error_tree
+
+    assert len(_det.errors) == 0
+    assert len(_set.errors) == 0
+
+    assert len(_det['a_dict'].errors) == 2
+    assert len(_set['a_dict'].errors) == 0
+
+    assert _det['a_dict'][0] is None
+    assert len(_det['a_dict']['one'].errors) == 1
+    assert len(_det['a_dict'][2].errors) == 1
+    assert len(_det['a_dict']['three'].errors) == 2
+
+    assert len(_set['a_dict']['keyschema'].errors) == 1
+    assert len(_set['a_dict']['valueschema'].errors) == 1
+
+    assert len(_set['a_dict']['keyschema']['type'].errors) == 2
+    assert len(_set['a_dict']['valueschema']['regex'].errors) == 2
+
+    _ref_err = ValidationError(
+        ('a_dict', 'one'), ('a_dict', 'keyschema', 'type'),
+        errors.BAD_TYPE.code, 'type', 'integer', 'one', ())
+    assert _det['a_dict']['one'].errors[0] == _ref_err
+    assert _set['a_dict']['keyschema']['type'].errors[0] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_dict', 2), ('a_dict', 'valueschema', 'regex'),
+        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'aBc', ())
+    assert _det['a_dict'][2].errors[0] == _ref_err
+    assert _set['a_dict']['valueschema']['regex'].errors[0] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_dict', 'three'), ('a_dict', 'keyschema', 'type'),
+        errors.BAD_TYPE.code, 'type', 'integer', 'three', ())
+    assert _det['a_dict']['three'].errors[0] == _ref_err
+    assert _set['a_dict']['keyschema']['type'].errors[1] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_dict', 'three'), ('a_dict', 'valueschema', 'regex'),
+        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
+    assert _det['a_dict']['three'].errors[1] == _ref_err
+    assert _set['a_dict']['valueschema']['regex'].errors[1] == _ref_err
+
+    assert len(_det['a_list'].errors) == 1
+    assert len(_det['a_list'][0].errors) == 1
+    assert _det['a_list'][1] is None
+    assert len(_det['a_list'][2].errors) == 3
+    assert len(_set['a_list'].errors) == 0
+    assert len(_set['a_list']['schema'].errors) == 1
+    assert len(_set['a_list']['schema']['type'].errors) == 1
+    assert len(_set['a_list']['schema']['oneof'][0]['regex'].errors) == 1
+    assert len(_set['a_list']['schema']['oneof'][1]['regex'].errors) == 1
+
+    _ref_err = ValidationError(
+        ('a_list', 0), ('a_list', 'schema', 'type'), errors.BAD_TYPE.code,
+        'type', 'string', 0, ())
+    assert _det['a_list'][0].errors[0] == _ref_err
+    assert _set['a_list']['schema']['type'].errors[0] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_list', 2), ('a_list', 'schema', 'oneof'), errors.ONEOF.code,
+        'oneof', 'irrelevant_at_this_point', 'abC', ())
+    assert _det['a_list'][2].errors[0] == _ref_err
+    assert _set['a_list']['schema']['oneof'].errors[0] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_list', 2), ('a_list', 'schema', 'oneof', 0, 'regex'),
+        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
+    assert _det['a_list'][2].errors[1] == _ref_err
+    assert _set['a_list']['schema']['oneof'][0]['regex'].errors[0] == _ref_err
+
+    _ref_err = ValidationError(
+        ('a_list', 2), ('a_list', 'schema', 'oneof', 1, 'regex'),
+        errors.REGEX_MISMATCH.code, 'regex', '[a-z]*$', 'abC', ())
+    assert _det['a_list'][2].errors[2] == _ref_err
+    assert _set['a_list']['schema']['oneof'][1]['regex'].errors[0] == _ref_err
+
+
+def test_queries():
+    schema = {'foo': {'type': 'dict',
+                      'schema':
+                          {'bar': {'type': 'number'}}}}
+    document = {'foo': {'bar': 'zero'}}
+    validator = Validator(schema)
+    validator(document)
+
+    assert 'foo' in validator.document_error_tree
+    assert 'bar' in validator.document_error_tree['foo']
+    assert 'foo' in validator.schema_error_tree
+    assert 'schema' in validator.schema_error_tree['foo']
+
+    assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo'].errors
+    assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo']
+    assert errors.BAD_TYPE in validator.document_error_tree['foo']['bar']
+    assert errors.MAPPING_SCHEMA in validator.schema_error_tree['foo']['schema']
+    assert errors.BAD_TYPE in \
+        validator.schema_error_tree['foo']['schema']['bar']['type']
+
+    assert (validator.document_error_tree['foo'][errors.MAPPING_SCHEMA]
+            .child_errors[0].code == errors.BAD_TYPE.code)
+
+
+def test_basic_error_handler():
+    handler = errors.BasicErrorHandler()
+    _errors, ref = [], {}
+
+    _errors.append(ValidationError(
+        ['foo'], ['foo'], 0x63, 'readonly', True, None, ()))
+    ref.update({'foo': [handler.messages[0x63]]})
+    assert handler(_errors) == ref
+
+    _errors.append(ValidationError(
+        ['bar'], ['foo'], 0x42, 'min', 1, 2, ()))
+    ref.update({'bar': [handler.messages[0x42].format(constraint=1)]})
+    assert handler(_errors) == ref
+
+    _errors.append(ValidationError(
+        ['zap', 'foo'], ['zap', 'schema', 'foo'], 0x24, 'type', 'string',
+        True, ()))
+    ref.update({'zap': [{'foo': [handler.messages[0x24].format(
+        constraint='string')]}]})
+    assert handler(_errors) == ref
+
+    _errors.append(ValidationError(
+        ['zap', 'foo'], ['zap', 'schema', 'foo'], 0x41, 'regex',
+        '^p[äe]ng$', 'boom', ()))
+    ref['zap'][0]['foo'].append(
+        handler.messages[0x41].format(constraint='^p[äe]ng$'))
+    assert handler(_errors) == ref
+
+
+def test_basic_error_of_errors(validator):
+    schema = {'foo': {'oneof': [
+        {'type': 'integer'},
+        {'type': 'string'}
+    ]}}
+    document = {'foo': 23.42}
+    error = ('foo', ('foo', 'oneof'), errors.ONEOF,
+             schema['foo']['oneof'], ())
+    child_errors = [
+        (error[0], error[1] + (0, 'type'), errors.BAD_TYPE, 'integer'),
+        (error[0], error[1] + (1, 'type'), errors.BAD_TYPE, 'string')
+    ]
+    assert_fail(document, schema, validator=validator,
+                error=error, child_errors=child_errors)
+    assert validator.errors == {
+        'foo': [errors.BasicErrorHandler.messages[0x92],
+                {'oneof definition 0': ['must be of integer type'],
+                 'oneof definition 1': ['must be of string type']}
+                ]
+    }

+ 3 - 3
ext/cerberus/tests/test_legacy.py → mncheck/ext/cerberus/tests/test_legacy.py

@@ -1,3 +1,3 @@
-# -*- coding: utf-8 -*-
-
-pass
+# -*- coding: utf-8 -*-
+
+pass

+ 485 - 485
ext/cerberus/tests/test_normalization.py → mncheck/ext/cerberus/tests/test_normalization.py

@@ -1,485 +1,485 @@
-# -*- coding: utf-8 -*-
-
-from tempfile import NamedTemporaryFile
-
-from cerberus import Validator, errors
-from cerberus.tests import (assert_fail, assert_has_error, assert_normalized,
-                            assert_success)
-
-
-def test_coerce():
-    schema = {'amount': {'coerce': int}}
-    document = {'amount': '1'}
-    expected = {'amount': 1}
-    assert_normalized(document, expected, schema)
-
-
-def test_coerce_in_dictschema():
-    schema = {'thing': {'type': 'dict',
-                        'schema': {'amount': {'coerce': int}}}}
-    document = {'thing': {'amount': '2'}}
-    expected = {'thing': {'amount': 2}}
-    assert_normalized(document, expected, schema)
-
-
-def test_coerce_in_listschema():
-    schema = {'things': {'type': 'list',
-                         'schema': {'coerce': int}}}
-    document = {'things': ['1', '2', '3']}
-    expected = {'things': [1, 2, 3]}
-    assert_normalized(document, expected, schema)
-
-
-def test_coerce_in_dictschema_in_listschema():
-    item_schema = {'type': 'dict', 'schema': {'amount': {'coerce': int}}}
-    schema = {'things': {'type': 'list', 'schema': item_schema}}
-    document = {'things': [{'amount': '2'}]}
-    expected = {'things': [{'amount': 2}]}
-    assert_normalized(document, expected, schema)
-
-
-def test_coerce_not_destructive():
-    schema = {
-        'amount': {'coerce': int}
-    }
-    v = Validator(schema)
-    doc = {'amount': '1'}
-    v.validate(doc)
-    assert v.document is not doc
-
-
-def test_coerce_catches_ValueError():
-    schema = {'amount': {'coerce': int}}
-    _errors = assert_fail({'amount': 'not_a_number'}, schema)
-    _errors[0].info = ()  # ignore exception message here
-    assert_has_error(_errors, 'amount', ('amount', 'coerce'),
-                     errors.COERCION_FAILED, int)
-
-
-def test_coerce_catches_TypeError():
-    schema = {'name': {'coerce': str.lower}}
-    _errors = assert_fail({'name': 1234}, schema)
-    _errors[0].info = ()  # ignore exception message here
-    assert_has_error(_errors, 'name', ('name', 'coerce'),
-                     errors.COERCION_FAILED, str.lower)
-
-
-def test_coerce_unknown():
-    schema = {'foo': {'schema': {}, 'allow_unknown': {'coerce': int}}}
-    document = {'foo': {'bar': '0'}}
-    expected = {'foo': {'bar': 0}}
-    assert_normalized(document, expected, schema)
-
-
-def test_custom_coerce_and_rename():
-    class MyNormalizer(Validator):
-        def __init__(self, multiplier, *args, **kwargs):
-            super(MyNormalizer, self).__init__(*args, **kwargs)
-            self.multiplier = multiplier
-
-        def _normalize_coerce_multiply(self, value):
-            return value * self.multiplier
-
-    v = MyNormalizer(2, {'foo': {'coerce': 'multiply'}})
-    assert v.normalized({'foo': 2})['foo'] == 4
-
-    v = MyNormalizer(3, allow_unknown={'rename_handler': 'multiply'})
-    assert v.normalized({3: None}) == {9: None}
-
-
-def test_coerce_chain():
-    drop_prefix = lambda x: x[2:]
-    upper = lambda x: x.upper()
-    schema = {'foo': {'coerce': [hex, drop_prefix, upper]}}
-    assert_normalized({'foo': 15}, {'foo': 'F'}, schema)
-
-
-def test_coerce_chain_aborts(validator):
-    def dont_do_me(value):
-        raise AssertionError('The coercion chain did not abort after an '
-                             'error.')
-    schema = {'foo': {'coerce': [hex, dont_do_me]}}
-    validator({'foo': '0'}, schema)
-    assert errors.COERCION_FAILED in validator._errors
-
-
-def test_coerce_non_digit_in_sequence(validator):
-    # https://github.com/pyeve/cerberus/issues/211
-    schema = {'data': {'type': 'list',
-                       'schema': {'type': 'integer', 'coerce': int}}}
-    document = {'data': ['q']}
-    assert validator.validated(document, schema) is None
-    assert (validator.validated(document, schema, always_return_document=True)
-            == document)  # noqa: W503
-
-
-def test_nullables_dont_fail_coerce():
-    schema = {'foo': {'coerce': int, 'nullable': True, 'type': 'integer'}}
-    document = {'foo': None}
-    assert_normalized(document, document, schema)
-
-
-def test_normalized():
-    schema = {'amount': {'coerce': int}}
-    document = {'amount': '2'}
-    expected = {'amount': 2}
-    assert_normalized(document, expected, schema)
-
-
-def test_rename(validator):
-    schema = {'foo': {'rename': 'bar'}}
-    document = {'foo': 0}
-    expected = {'bar': 0}
-    # We cannot use assertNormalized here since there is bug where
-    # Cerberus says that the renamed field is an unknown field:
-    # {'bar': 'unknown field'}
-    validator(document, schema, False)
-    assert validator.document == expected
-
-
-def test_rename_handler():
-    validator = Validator(allow_unknown={'rename_handler': int})
-    schema = {}
-    document = {'0': 'foo'}
-    expected = {0: 'foo'}
-    assert_normalized(document, expected, schema, validator)
-
-
-def test_purge_unknown():
-    validator = Validator(purge_unknown=True)
-    schema = {'foo': {'type': 'string'}}
-    document = {'bar': 'foo'}
-    expected = {}
-    assert_normalized(document, expected, schema, validator)
-
-
-def test_purge_unknown_in_subschema():
-    schema = {'foo': {'type': 'dict',
-                      'schema': {'foo': {'type': 'string'}},
-                      'purge_unknown': True}}
-    document = {'foo': {'bar': ''}}
-    expected = {'foo': {}}
-    assert_normalized(document, expected, schema)
-
-
-def test_issue_147_complex():
-    schema = {'revision': {'coerce': int}}
-    document = {'revision': '5', 'file': NamedTemporaryFile(mode='w+')}
-    document['file'].write(r'foobar')
-    document['file'].seek(0)
-    normalized = Validator(schema, allow_unknown=True).normalized(document)
-    assert normalized['revision'] == 5
-    assert normalized['file'].read() == 'foobar'
-    document['file'].close()
-    normalized['file'].close()
-
-
-def test_issue_147_nested_dict():
-    schema = {'thing': {'type': 'dict',
-                        'schema': {'amount': {'coerce': int}}}}
-    ref_obj = '2'
-    document = {'thing': {'amount': ref_obj}}
-    normalized = Validator(schema).normalized(document)
-    assert document is not normalized
-    assert normalized['thing']['amount'] == 2
-    assert ref_obj == '2'
-    assert document['thing']['amount'] is ref_obj
-
-
-def test_coerce_in_valueschema():
-    # https://github.com/pyeve/cerberus/issues/155
-    schema = {'thing': {'type': 'dict',
-                        'valueschema': {'coerce': int,
-                                        'type': 'integer'}}}
-    document = {'thing': {'amount': '2'}}
-    expected = {'thing': {'amount': 2}}
-    assert_normalized(document, expected, schema)
-
-
-def test_coerce_in_keyschema():
-    # https://github.com/pyeve/cerberus/issues/155
-    schema = {'thing': {'type': 'dict',
-                        'keyschema': {'coerce': int, 'type': 'integer'}}}
-    document = {'thing': {'5': 'foo'}}
-    expected = {'thing': {5: 'foo'}}
-    assert_normalized(document, expected, schema)
-
-
-def test_coercion_of_sequence_items(validator):
-    # https://github.com/pyeve/cerberus/issues/161
-    schema = {'a_list': {'type': 'list', 'schema': {'type': 'float',
-                                                    'coerce': float}}}
-    document = {'a_list': [3, 4, 5]}
-    expected = {'a_list': [3.0, 4.0, 5.0]}
-    assert_normalized(document, expected, schema, validator)
-    for x in validator.document['a_list']:
-        assert isinstance(x, float)
-
-
-def test_default_missing():
-    _test_default_missing({'default': 'bar_value'})
-
-
-def test_default_setter_missing():
-    _test_default_missing({'default_setter': lambda doc: 'bar_value'})
-
-
-def _test_default_missing(default):
-    bar_schema = {'type': 'string'}
-    bar_schema.update(default)
-    schema = {'foo': {'type': 'string'},
-              'bar': bar_schema}
-    document = {'foo': 'foo_value'}
-    expected = {'foo': 'foo_value', 'bar': 'bar_value'}
-    assert_normalized(document, expected, schema)
-
-
-def test_default_existent():
-    _test_default_existent({'default': 'bar_value'})
-
-
-def test_default_setter_existent():
-    def raise_error(doc):
-        raise RuntimeError('should not be called')
-    _test_default_existent({'default_setter': raise_error})
-
-
-def _test_default_existent(default):
-    bar_schema = {'type': 'string'}
-    bar_schema.update(default)
-    schema = {'foo': {'type': 'string'},
-              'bar': bar_schema}
-    document = {'foo': 'foo_value', 'bar': 'non_default'}
-    assert_normalized(document, document.copy(), schema)
-
-
-def test_default_none_nullable():
-    _test_default_none_nullable({'default': 'bar_value'})
-
-
-def test_default_setter_none_nullable():
-    def raise_error(doc):
-        raise RuntimeError('should not be called')
-    _test_default_none_nullable({'default_setter': raise_error})
-
-
-def _test_default_none_nullable(default):
-    bar_schema = {'type': 'string',
-                  'nullable': True}
-    bar_schema.update(default)
-    schema = {'foo': {'type': 'string'},
-              'bar': bar_schema}
-    document = {'foo': 'foo_value', 'bar': None}
-    assert_normalized(document, document.copy(), schema)
-
-
-def test_default_none_nonnullable():
-    _test_default_none_nullable({'default': 'bar_value'})
-
-
-def test_default_setter_none_nonnullable():
-    _test_default_none_nullable(
-        {'default_setter': lambda doc: 'bar_value'})
-
-
-def _test_default_none_nonnullable(default):
-    bar_schema = {'type': 'string',
-                  'nullable': False}
-    bar_schema.update(default)
-    schema = {'foo': {'type': 'string'},
-              'bar': bar_schema}
-    document = {'foo': 'foo_value', 'bar': 'bar_value'}
-    assert_normalized(document, document.copy(), schema)
-
-
-def test_default_none_default_value():
-    schema = {'foo': {'type': 'string'},
-              'bar': {'type': 'string',
-                      'nullable': True,
-                      'default': None}}
-    document = {'foo': 'foo_value'}
-    expected = {'foo': 'foo_value', 'bar': None}
-    assert_normalized(document, expected, schema)
-
-
-def test_default_missing_in_subschema():
-    _test_default_missing_in_subschema({'default': 'bar_value'})
-
-
-def test_default_setter_missing_in_subschema():
-    _test_default_missing_in_subschema(
-        {'default_setter': lambda doc: 'bar_value'})
-
-
-def _test_default_missing_in_subschema(default):
-    bar_schema = {'type': 'string'}
-    bar_schema.update(default)
-    schema = {'thing': {'type': 'dict',
-                        'schema': {'foo': {'type': 'string'},
-                                   'bar': bar_schema}}}
-    document = {'thing': {'foo': 'foo_value'}}
-    expected = {'thing': {'foo': 'foo_value',
-                          'bar': 'bar_value'}}
-    assert_normalized(document, expected, schema)
-
-
-def test_depending_default_setters():
-    schema = {
-        'a': {'type': 'integer'},
-        'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1},
-        'c': {'type': 'integer', 'default_setter': lambda d: d['b'] * 2},
-        'd': {'type': 'integer',
-              'default_setter': lambda d: d['b'] + d['c']}
-    }
-    document = {'a': 1}
-    expected = {'a': 1, 'b': 2, 'c': 4, 'd': 6}
-    assert_normalized(document, expected, schema)
-
-
-def test_circular_depending_default_setters(validator):
-    schema = {
-        'a': {'type': 'integer', 'default_setter': lambda d: d['b'] + 1},
-        'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1}
-    }
-    validator({}, schema)
-    assert errors.SETTING_DEFAULT_FAILED in validator._errors
-
-
-def test_issue_250():
-    # https://github.com/pyeve/cerberus/issues/250
-    schema = {
-        'list': {
-            'type': 'list',
-            'schema': {
-                'type': 'dict',
-                'allow_unknown': True,
-                'schema': {'a': {'type': 'string'}}
-            }
-        }
-    }
-    document = {'list': {'is_a': 'mapping'}}
-    assert_fail(document, schema,
-                error=('list', ('list', 'type'), errors.BAD_TYPE,
-                       schema['list']['type']))
-
-
-def test_issue_250_no_type_pass_on_list():
-    # https://github.com/pyeve/cerberus/issues/250
-    schema = {
-        'list': {
-            'schema': {
-                'allow_unknown': True,
-                'type': 'dict',
-                'schema': {'a': {'type': 'string'}}
-            }
-        }
-    }
-    document = {'list': [{'a': 'known', 'b': 'unknown'}]}
-    assert_normalized(document, document, schema)
-
-
-def test_issue_250_no_type_fail_on_dict():
-    # https://github.com/pyeve/cerberus/issues/250
-    schema = {
-        'list': {
-            'schema': {
-                'allow_unknown': True,
-                'schema': {'a': {'type': 'string'}}
-            }
-        }
-    }
-    document = {'list': {'a': {'a': 'known'}}}
-    assert_fail(document, schema,
-                error=('list', ('list', 'schema'), errors.BAD_TYPE_FOR_SCHEMA,
-                       schema['list']['schema']))
-
-
-def test_issue_250_no_type_fail_pass_on_other():
-    # https://github.com/pyeve/cerberus/issues/250
-    schema = {
-        'list': {
-            'schema': {
-                'allow_unknown': True,
-                'schema': {'a': {'type': 'string'}}
-            }
-        }
-    }
-    document = {'list': 1}
-    assert_normalized(document, document, schema)
-
-
-def test_allow_unknown_with_of_rules():
-    # https://github.com/pyeve/cerberus/issues/251
-    schema = {
-        'test': {
-            'oneof': [
-                {
-                    'type': 'dict',
-                    'allow_unknown': True,
-                    'schema': {'known': {'type': 'string'}}
-                },
-                {
-                    'type': 'dict',
-                    'schema': {'known': {'type': 'string'}}
-                },
-            ]
-        }
-    }
-    # check regression and that allow unknown does not cause any different
-    # than expected behaviour for one-of.
-    document = {'test': {'known': 's'}}
-    assert_fail(document, schema,
-                error=('test', ('test', 'oneof'),
-                       errors.ONEOF, schema['test']['oneof']))
-    # check that allow_unknown is actually applied
-    document = {'test': {'known': 's', 'unknown': 'asd'}}
-    assert_success(document, schema)
-
-
-def test_271_normalising_tuples():
-    # https://github.com/pyeve/cerberus/issues/271
-    schema = {
-        'my_field': {
-            'type': 'list',
-            'schema': {'type': ('string', 'number', 'dict')}
-        }
-    }
-    document = {'my_field': ('foo', 'bar', 42, 'albert',
-                             'kandinsky', {'items': 23})}
-    assert_success(document, schema)
-
-    normalized = Validator(schema).normalized(document)
-    assert normalized['my_field'] == ('foo', 'bar', 42, 'albert',
-                                      'kandinsky', {'items': 23})
-
-
-def test_allow_unknown_wo_schema():
-    # https://github.com/pyeve/cerberus/issues/302
-    v = Validator({'a': {'type': 'dict', 'allow_unknown': True}})
-    v({'a': {}})
-
-
-def test_allow_unknown_with_purge_unknown():
-    validator = Validator(purge_unknown=True)
-    schema = {'foo': {'type': 'dict', 'allow_unknown': True}}
-    document = {'foo': {'bar': True}, 'bar': 'foo'}
-    expected = {'foo': {'bar': True}}
-    assert_normalized(document, expected, schema, validator)
-
-
-def test_allow_unknown_with_purge_unknown_subdocument():
-    validator = Validator(purge_unknown=True)
-    schema = {
-        'foo': {
-            'type': 'dict',
-            'schema': {
-                'bar': {
-                    'type': 'string'
-                }
-            },
-            'allow_unknown': True
-        }
-    }
-    document = {'foo': {'bar': 'baz', 'corge': False}, 'thud': 'xyzzy'}
-    expected = {'foo': {'bar': 'baz', 'corge': False}}
-    assert_normalized(document, expected, schema, validator)
+# -*- coding: utf-8 -*-
+
+from tempfile import NamedTemporaryFile
+
+from cerberus import Validator, errors
+from cerberus.tests import (assert_fail, assert_has_error, assert_normalized,
+                            assert_success)
+
+
+def test_coerce():
+    schema = {'amount': {'coerce': int}}
+    document = {'amount': '1'}
+    expected = {'amount': 1}
+    assert_normalized(document, expected, schema)
+
+
+def test_coerce_in_dictschema():
+    schema = {'thing': {'type': 'dict',
+                        'schema': {'amount': {'coerce': int}}}}
+    document = {'thing': {'amount': '2'}}
+    expected = {'thing': {'amount': 2}}
+    assert_normalized(document, expected, schema)
+
+
+def test_coerce_in_listschema():
+    schema = {'things': {'type': 'list',
+                         'schema': {'coerce': int}}}
+    document = {'things': ['1', '2', '3']}
+    expected = {'things': [1, 2, 3]}
+    assert_normalized(document, expected, schema)
+
+
+def test_coerce_in_dictschema_in_listschema():
+    item_schema = {'type': 'dict', 'schema': {'amount': {'coerce': int}}}
+    schema = {'things': {'type': 'list', 'schema': item_schema}}
+    document = {'things': [{'amount': '2'}]}
+    expected = {'things': [{'amount': 2}]}
+    assert_normalized(document, expected, schema)
+
+
+def test_coerce_not_destructive():
+    schema = {
+        'amount': {'coerce': int}
+    }
+    v = Validator(schema)
+    doc = {'amount': '1'}
+    v.validate(doc)
+    assert v.document is not doc
+
+
+def test_coerce_catches_ValueError():
+    schema = {'amount': {'coerce': int}}
+    _errors = assert_fail({'amount': 'not_a_number'}, schema)
+    _errors[0].info = ()  # ignore exception message here
+    assert_has_error(_errors, 'amount', ('amount', 'coerce'),
+                     errors.COERCION_FAILED, int)
+
+
+def test_coerce_catches_TypeError():
+    schema = {'name': {'coerce': str.lower}}
+    _errors = assert_fail({'name': 1234}, schema)
+    _errors[0].info = ()  # ignore exception message here
+    assert_has_error(_errors, 'name', ('name', 'coerce'),
+                     errors.COERCION_FAILED, str.lower)
+
+
+def test_coerce_unknown():
+    schema = {'foo': {'schema': {}, 'allow_unknown': {'coerce': int}}}
+    document = {'foo': {'bar': '0'}}
+    expected = {'foo': {'bar': 0}}
+    assert_normalized(document, expected, schema)
+
+
+def test_custom_coerce_and_rename():
+    class MyNormalizer(Validator):
+        def __init__(self, multiplier, *args, **kwargs):
+            super(MyNormalizer, self).__init__(*args, **kwargs)
+            self.multiplier = multiplier
+
+        def _normalize_coerce_multiply(self, value):
+            return value * self.multiplier
+
+    v = MyNormalizer(2, {'foo': {'coerce': 'multiply'}})
+    assert v.normalized({'foo': 2})['foo'] == 4
+
+    v = MyNormalizer(3, allow_unknown={'rename_handler': 'multiply'})
+    assert v.normalized({3: None}) == {9: None}
+
+
+def test_coerce_chain():
+    drop_prefix = lambda x: x[2:]
+    upper = lambda x: x.upper()
+    schema = {'foo': {'coerce': [hex, drop_prefix, upper]}}
+    assert_normalized({'foo': 15}, {'foo': 'F'}, schema)
+
+
+def test_coerce_chain_aborts(validator):
+    def dont_do_me(value):
+        raise AssertionError('The coercion chain did not abort after an '
+                             'error.')
+    schema = {'foo': {'coerce': [hex, dont_do_me]}}
+    validator({'foo': '0'}, schema)
+    assert errors.COERCION_FAILED in validator._errors
+
+
+def test_coerce_non_digit_in_sequence(validator):
+    # https://github.com/pyeve/cerberus/issues/211
+    schema = {'data': {'type': 'list',
+                       'schema': {'type': 'integer', 'coerce': int}}}
+    document = {'data': ['q']}
+    assert validator.validated(document, schema) is None
+    assert (validator.validated(document, schema, always_return_document=True)
+            == document)  # noqa: W503
+
+
+def test_nullables_dont_fail_coerce():
+    schema = {'foo': {'coerce': int, 'nullable': True, 'type': 'integer'}}
+    document = {'foo': None}
+    assert_normalized(document, document, schema)
+
+
+def test_normalized():
+    schema = {'amount': {'coerce': int}}
+    document = {'amount': '2'}
+    expected = {'amount': 2}
+    assert_normalized(document, expected, schema)
+
+
+def test_rename(validator):
+    schema = {'foo': {'rename': 'bar'}}
+    document = {'foo': 0}
+    expected = {'bar': 0}
+    # We cannot use assertNormalized here since there is bug where
+    # Cerberus says that the renamed field is an unknown field:
+    # {'bar': 'unknown field'}
+    validator(document, schema, False)
+    assert validator.document == expected
+
+
+def test_rename_handler():
+    validator = Validator(allow_unknown={'rename_handler': int})
+    schema = {}
+    document = {'0': 'foo'}
+    expected = {0: 'foo'}
+    assert_normalized(document, expected, schema, validator)
+
+
+def test_purge_unknown():
+    validator = Validator(purge_unknown=True)
+    schema = {'foo': {'type': 'string'}}
+    document = {'bar': 'foo'}
+    expected = {}
+    assert_normalized(document, expected, schema, validator)
+
+
+def test_purge_unknown_in_subschema():
+    schema = {'foo': {'type': 'dict',
+                      'schema': {'foo': {'type': 'string'}},
+                      'purge_unknown': True}}
+    document = {'foo': {'bar': ''}}
+    expected = {'foo': {}}
+    assert_normalized(document, expected, schema)
+
+
+def test_issue_147_complex():
+    schema = {'revision': {'coerce': int}}
+    document = {'revision': '5', 'file': NamedTemporaryFile(mode='w+')}
+    document['file'].write(r'foobar')
+    document['file'].seek(0)
+    normalized = Validator(schema, allow_unknown=True).normalized(document)
+    assert normalized['revision'] == 5
+    assert normalized['file'].read() == 'foobar'
+    document['file'].close()
+    normalized['file'].close()
+
+
+def test_issue_147_nested_dict():
+    schema = {'thing': {'type': 'dict',
+                        'schema': {'amount': {'coerce': int}}}}
+    ref_obj = '2'
+    document = {'thing': {'amount': ref_obj}}
+    normalized = Validator(schema).normalized(document)
+    assert document is not normalized
+    assert normalized['thing']['amount'] == 2
+    assert ref_obj == '2'
+    assert document['thing']['amount'] is ref_obj
+
+
+def test_coerce_in_valueschema():
+    # https://github.com/pyeve/cerberus/issues/155
+    schema = {'thing': {'type': 'dict',
+                        'valueschema': {'coerce': int,
+                                        'type': 'integer'}}}
+    document = {'thing': {'amount': '2'}}
+    expected = {'thing': {'amount': 2}}
+    assert_normalized(document, expected, schema)
+
+
+def test_coerce_in_keyschema():
+    # https://github.com/pyeve/cerberus/issues/155
+    schema = {'thing': {'type': 'dict',
+                        'keyschema': {'coerce': int, 'type': 'integer'}}}
+    document = {'thing': {'5': 'foo'}}
+    expected = {'thing': {5: 'foo'}}
+    assert_normalized(document, expected, schema)
+
+
+def test_coercion_of_sequence_items(validator):
+    # https://github.com/pyeve/cerberus/issues/161
+    schema = {'a_list': {'type': 'list', 'schema': {'type': 'float',
+                                                    'coerce': float}}}
+    document = {'a_list': [3, 4, 5]}
+    expected = {'a_list': [3.0, 4.0, 5.0]}
+    assert_normalized(document, expected, schema, validator)
+    for x in validator.document['a_list']:
+        assert isinstance(x, float)
+
+
+def test_default_missing():
+    _test_default_missing({'default': 'bar_value'})
+
+
+def test_default_setter_missing():
+    _test_default_missing({'default_setter': lambda doc: 'bar_value'})
+
+
+def _test_default_missing(default):
+    bar_schema = {'type': 'string'}
+    bar_schema.update(default)
+    schema = {'foo': {'type': 'string'},
+              'bar': bar_schema}
+    document = {'foo': 'foo_value'}
+    expected = {'foo': 'foo_value', 'bar': 'bar_value'}
+    assert_normalized(document, expected, schema)
+
+
+def test_default_existent():
+    _test_default_existent({'default': 'bar_value'})
+
+
+def test_default_setter_existent():
+    def raise_error(doc):
+        raise RuntimeError('should not be called')
+    _test_default_existent({'default_setter': raise_error})
+
+
+def _test_default_existent(default):
+    bar_schema = {'type': 'string'}
+    bar_schema.update(default)
+    schema = {'foo': {'type': 'string'},
+              'bar': bar_schema}
+    document = {'foo': 'foo_value', 'bar': 'non_default'}
+    assert_normalized(document, document.copy(), schema)
+
+
+def test_default_none_nullable():
+    _test_default_none_nullable({'default': 'bar_value'})
+
+
+def test_default_setter_none_nullable():
+    def raise_error(doc):
+        raise RuntimeError('should not be called')
+    _test_default_none_nullable({'default_setter': raise_error})
+
+
+def _test_default_none_nullable(default):
+    bar_schema = {'type': 'string',
+                  'nullable': True}
+    bar_schema.update(default)
+    schema = {'foo': {'type': 'string'},
+              'bar': bar_schema}
+    document = {'foo': 'foo_value', 'bar': None}
+    assert_normalized(document, document.copy(), schema)
+
+
+def test_default_none_nonnullable():
+    _test_default_none_nullable({'default': 'bar_value'})
+
+
+def test_default_setter_none_nonnullable():
+    _test_default_none_nullable(
+        {'default_setter': lambda doc: 'bar_value'})
+
+
+def _test_default_none_nonnullable(default):
+    bar_schema = {'type': 'string',
+                  'nullable': False}
+    bar_schema.update(default)
+    schema = {'foo': {'type': 'string'},
+              'bar': bar_schema}
+    document = {'foo': 'foo_value', 'bar': 'bar_value'}
+    assert_normalized(document, document.copy(), schema)
+
+
+def test_default_none_default_value():
+    schema = {'foo': {'type': 'string'},
+              'bar': {'type': 'string',
+                      'nullable': True,
+                      'default': None}}
+    document = {'foo': 'foo_value'}
+    expected = {'foo': 'foo_value', 'bar': None}
+    assert_normalized(document, expected, schema)
+
+
+def test_default_missing_in_subschema():
+    _test_default_missing_in_subschema({'default': 'bar_value'})
+
+
+def test_default_setter_missing_in_subschema():
+    _test_default_missing_in_subschema(
+        {'default_setter': lambda doc: 'bar_value'})
+
+
+def _test_default_missing_in_subschema(default):
+    bar_schema = {'type': 'string'}
+    bar_schema.update(default)
+    schema = {'thing': {'type': 'dict',
+                        'schema': {'foo': {'type': 'string'},
+                                   'bar': bar_schema}}}
+    document = {'thing': {'foo': 'foo_value'}}
+    expected = {'thing': {'foo': 'foo_value',
+                          'bar': 'bar_value'}}
+    assert_normalized(document, expected, schema)
+
+
+def test_depending_default_setters():
+    schema = {
+        'a': {'type': 'integer'},
+        'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1},
+        'c': {'type': 'integer', 'default_setter': lambda d: d['b'] * 2},
+        'd': {'type': 'integer',
+              'default_setter': lambda d: d['b'] + d['c']}
+    }
+    document = {'a': 1}
+    expected = {'a': 1, 'b': 2, 'c': 4, 'd': 6}
+    assert_normalized(document, expected, schema)
+
+
+def test_circular_depending_default_setters(validator):
+    schema = {
+        'a': {'type': 'integer', 'default_setter': lambda d: d['b'] + 1},
+        'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1}
+    }
+    validator({}, schema)
+    assert errors.SETTING_DEFAULT_FAILED in validator._errors
+
+
+def test_issue_250():
+    # https://github.com/pyeve/cerberus/issues/250
+    schema = {
+        'list': {
+            'type': 'list',
+            'schema': {
+                'type': 'dict',
+                'allow_unknown': True,
+                'schema': {'a': {'type': 'string'}}
+            }
+        }
+    }
+    document = {'list': {'is_a': 'mapping'}}
+    assert_fail(document, schema,
+                error=('list', ('list', 'type'), errors.BAD_TYPE,
+                       schema['list']['type']))
+
+
+def test_issue_250_no_type_pass_on_list():
+    # https://github.com/pyeve/cerberus/issues/250
+    schema = {
+        'list': {
+            'schema': {
+                'allow_unknown': True,
+                'type': 'dict',
+                'schema': {'a': {'type': 'string'}}
+            }
+        }
+    }
+    document = {'list': [{'a': 'known', 'b': 'unknown'}]}
+    assert_normalized(document, document, schema)
+
+
+def test_issue_250_no_type_fail_on_dict():
+    # https://github.com/pyeve/cerberus/issues/250
+    schema = {
+        'list': {
+            'schema': {
+                'allow_unknown': True,
+                'schema': {'a': {'type': 'string'}}
+            }
+        }
+    }
+    document = {'list': {'a': {'a': 'known'}}}
+    assert_fail(document, schema,
+                error=('list', ('list', 'schema'), errors.BAD_TYPE_FOR_SCHEMA,
+                       schema['list']['schema']))
+
+
+def test_issue_250_no_type_fail_pass_on_other():
+    # https://github.com/pyeve/cerberus/issues/250
+    schema = {
+        'list': {
+            'schema': {
+                'allow_unknown': True,
+                'schema': {'a': {'type': 'string'}}
+            }
+        }
+    }
+    document = {'list': 1}
+    assert_normalized(document, document, schema)
+
+
+def test_allow_unknown_with_of_rules():
+    # https://github.com/pyeve/cerberus/issues/251
+    schema = {
+        'test': {
+            'oneof': [
+                {
+                    'type': 'dict',
+                    'allow_unknown': True,
+                    'schema': {'known': {'type': 'string'}}
+                },
+                {
+                    'type': 'dict',
+                    'schema': {'known': {'type': 'string'}}
+                },
+            ]
+        }
+    }
+    # check regression and that allow unknown does not cause any different
+    # than expected behaviour for one-of.
+    document = {'test': {'known': 's'}}
+    assert_fail(document, schema,
+                error=('test', ('test', 'oneof'),
+                       errors.ONEOF, schema['test']['oneof']))
+    # check that allow_unknown is actually applied
+    document = {'test': {'known': 's', 'unknown': 'asd'}}
+    assert_success(document, schema)
+
+
+def test_271_normalising_tuples():
+    # https://github.com/pyeve/cerberus/issues/271
+    schema = {
+        'my_field': {
+            'type': 'list',
+            'schema': {'type': ('string', 'number', 'dict')}
+        }
+    }
+    document = {'my_field': ('foo', 'bar', 42, 'albert',
+                             'kandinsky', {'items': 23})}
+    assert_success(document, schema)
+
+    normalized = Validator(schema).normalized(document)
+    assert normalized['my_field'] == ('foo', 'bar', 42, 'albert',
+                                      'kandinsky', {'items': 23})
+
+
+def test_allow_unknown_wo_schema():
+    # https://github.com/pyeve/cerberus/issues/302
+    v = Validator({'a': {'type': 'dict', 'allow_unknown': True}})
+    v({'a': {}})
+
+
+def test_allow_unknown_with_purge_unknown():
+    validator = Validator(purge_unknown=True)
+    schema = {'foo': {'type': 'dict', 'allow_unknown': True}}
+    document = {'foo': {'bar': True}, 'bar': 'foo'}
+    expected = {'foo': {'bar': True}}
+    assert_normalized(document, expected, schema, validator)
+
+
+def test_allow_unknown_with_purge_unknown_subdocument():
+    validator = Validator(purge_unknown=True)
+    schema = {
+        'foo': {
+            'type': 'dict',
+            'schema': {
+                'bar': {
+                    'type': 'string'
+                }
+            },
+            'allow_unknown': True
+        }
+    }
+    document = {'foo': {'bar': 'baz', 'corge': False}, 'thud': 'xyzzy'}
+    expected = {'foo': {'bar': 'baz', 'corge': False}}
+    assert_normalized(document, expected, schema, validator)

+ 82 - 82
ext/cerberus/tests/test_registries.py → mncheck/ext/cerberus/tests/test_registries.py

@@ -1,82 +1,82 @@
-# -*- coding: utf-8 -*-
-
-from cerberus import schema_registry, rules_set_registry, Validator
-from cerberus.tests import (assert_fail, assert_normalized,
-                            assert_schema_error, assert_success)
-
-
-def test_schema_registry_simple():
-    schema_registry.add('foo', {'bar': {'type': 'string'}})
-    schema = {'a': {'schema': 'foo'},
-              'b': {'schema': 'foo'}}
-    document = {'a': {'bar': 'a'}, 'b': {'bar': 'b'}}
-    assert_success(document, schema)
-
-
-def test_top_level_reference():
-    schema_registry.add('peng', {'foo': {'type': 'integer'}})
-    document = {'foo': 42}
-    assert_success(document, 'peng')
-
-
-def test_rules_set_simple():
-    rules_set_registry.add('foo', {'type': 'integer'})
-    assert_success({'bar': 1}, {'bar': 'foo'})
-    assert_fail({'bar': 'one'}, {'bar': 'foo'})
-
-
-def test_allow_unknown_as_reference():
-    rules_set_registry.add('foo', {'type': 'number'})
-    v = Validator(allow_unknown='foo')
-    assert_success({0: 1}, {}, v)
-    assert_fail({0: 'one'}, {}, v)
-
-
-def test_recursion():
-    rules_set_registry.add('self',
-                           {'type': 'dict', 'allow_unknown': 'self'})
-    v = Validator(allow_unknown='self')
-    assert_success({0: {1: {2: {}}}}, {}, v)
-
-
-def test_references_remain_unresolved(validator):
-    rules_set_registry.extend((('boolean', {'type': 'boolean'}),
-                               ('booleans', {'valueschema': 'boolean'})))
-    validator.schema = {'foo': 'booleans'}
-    assert 'booleans' == validator.schema['foo']
-    assert 'boolean' == rules_set_registry._storage['booleans']['valueschema']
-
-
-def test_rules_registry_with_anyof_type():
-    rules_set_registry.add('string_or_integer',
-                           {'anyof_type': ['string', 'integer']})
-    schema = {'soi': 'string_or_integer'}
-    assert_success({'soi': 'hello'}, schema)
-
-
-def test_schema_registry_with_anyof_type():
-    schema_registry.add('soi_id', {'id': {'anyof_type': ['string', 'integer']}})
-    schema = {'soi': {'schema': 'soi_id'}}
-    assert_success({'soi': {'id': 'hello'}}, schema)
-
-
-def test_normalization_with_rules_set():
-    # https://github.com/pyeve/cerberus/issues/283
-    rules_set_registry.add('foo', {'default': 42})
-    assert_normalized({}, {'bar': 42}, {'bar': 'foo'})
-    rules_set_registry.add('foo', {'default_setter': lambda _: 42})
-    assert_normalized({}, {'bar': 42}, {'bar': 'foo'})
-    rules_set_registry.add('foo', {'type': 'integer', 'nullable': True})
-    assert_success({'bar': None}, {'bar': 'foo'})
-
-
-def test_rules_set_with_dict_field():
-    document = {'a_dict': {'foo': 1}}
-    schema = {'a_dict': {'type': 'dict', 'schema': {'foo': 'rule'}}}
-
-    # the schema's not yet added to the valid ones, so test the faulty first
-    rules_set_registry.add('rule', {'tüpe': 'integer'})
-    assert_schema_error(document, schema)
-
-    rules_set_registry.add('rule', {'type': 'integer'})
-    assert_success(document, schema)
+# -*- coding: utf-8 -*-
+
+from cerberus import schema_registry, rules_set_registry, Validator
+from cerberus.tests import (assert_fail, assert_normalized,
+                            assert_schema_error, assert_success)
+
+
+def test_schema_registry_simple():
+    schema_registry.add('foo', {'bar': {'type': 'string'}})
+    schema = {'a': {'schema': 'foo'},
+              'b': {'schema': 'foo'}}
+    document = {'a': {'bar': 'a'}, 'b': {'bar': 'b'}}
+    assert_success(document, schema)
+
+
+def test_top_level_reference():
+    schema_registry.add('peng', {'foo': {'type': 'integer'}})
+    document = {'foo': 42}
+    assert_success(document, 'peng')
+
+
+def test_rules_set_simple():
+    rules_set_registry.add('foo', {'type': 'integer'})
+    assert_success({'bar': 1}, {'bar': 'foo'})
+    assert_fail({'bar': 'one'}, {'bar': 'foo'})
+
+
+def test_allow_unknown_as_reference():
+    rules_set_registry.add('foo', {'type': 'number'})
+    v = Validator(allow_unknown='foo')
+    assert_success({0: 1}, {}, v)
+    assert_fail({0: 'one'}, {}, v)
+
+
+def test_recursion():
+    rules_set_registry.add('self',
+                           {'type': 'dict', 'allow_unknown': 'self'})
+    v = Validator(allow_unknown='self')
+    assert_success({0: {1: {2: {}}}}, {}, v)
+
+
+def test_references_remain_unresolved(validator):
+    rules_set_registry.extend((('boolean', {'type': 'boolean'}),
+                               ('booleans', {'valueschema': 'boolean'})))
+    validator.schema = {'foo': 'booleans'}
+    assert 'booleans' == validator.schema['foo']
+    assert 'boolean' == rules_set_registry._storage['booleans']['valueschema']
+
+
+def test_rules_registry_with_anyof_type():
+    rules_set_registry.add('string_or_integer',
+                           {'anyof_type': ['string', 'integer']})
+    schema = {'soi': 'string_or_integer'}
+    assert_success({'soi': 'hello'}, schema)
+
+
+def test_schema_registry_with_anyof_type():
+    schema_registry.add('soi_id', {'id': {'anyof_type': ['string', 'integer']}})
+    schema = {'soi': {'schema': 'soi_id'}}
+    assert_success({'soi': {'id': 'hello'}}, schema)
+
+
+def test_normalization_with_rules_set():
+    # https://github.com/pyeve/cerberus/issues/283
+    rules_set_registry.add('foo', {'default': 42})
+    assert_normalized({}, {'bar': 42}, {'bar': 'foo'})
+    rules_set_registry.add('foo', {'default_setter': lambda _: 42})
+    assert_normalized({}, {'bar': 42}, {'bar': 'foo'})
+    rules_set_registry.add('foo', {'type': 'integer', 'nullable': True})
+    assert_success({'bar': None}, {'bar': 'foo'})
+
+
+def test_rules_set_with_dict_field():
+    document = {'a_dict': {'foo': 1}}
+    schema = {'a_dict': {'type': 'dict', 'schema': {'foo': 'rule'}}}
+
+    # the schema's not yet added to the valid ones, so test the faulty first
+    rules_set_registry.add('rule', {'tüpe': 'integer'})
+    assert_schema_error(document, schema)
+
+    rules_set_registry.add('rule', {'type': 'integer'})
+    assert_success(document, schema)

+ 111 - 111
ext/cerberus/tests/test_schema.py → mncheck/ext/cerberus/tests/test_schema.py

@@ -1,111 +1,111 @@
-# -*- coding: utf-8 -*-
-
-import pytest
-
-from cerberus import Validator, errors, SchemaError
-from cerberus.schema import UnvalidatedSchema
-from cerberus.tests import assert_schema_error
-
-
-def test_empty_schema():
-    validator = Validator()
-    with pytest.raises(SchemaError, message=errors.SCHEMA_ERROR_MISSING):
-        validator({}, schema=None)
-
-
-def test_bad_schema_type(validator):
-    schema = "this string should really be dict"
-    exp_msg = errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)
-    with pytest.raises(SchemaError, message=exp_msg):
-        validator.schema = schema
-
-
-def test_bad_schema_type_field(validator):
-    field = 'foo'
-    schema = {field: {'schema': {'bar': {'type': 'strong'}}}}
-    with pytest.raises(SchemaError):
-        validator.schema = schema
-
-
-def test_unknown_rule(validator):
-    message = "{'foo': [{'unknown': ['unknown rule']}]}"
-    with pytest.raises(SchemaError, message=message):
-        validator.schema = {'foo': {'unknown': 'rule'}}
-
-
-def test_unknown_type(validator):
-    field = 'name'
-    value = 'catch_me'
-    message = str({field: [{'type': ['unallowed value %s' % value]}]})
-    with pytest.raises(SchemaError, message=message):
-        validator.schema = {'foo': {'unknown': 'rule'}}
-
-
-def test_bad_schema_definition(validator):
-    field = 'name'
-    message = str({field: ['must be of dict type']})
-    with pytest.raises(SchemaError, message=message):
-        validator.schema = {field: 'this should really be a dict'}
-
-
-def test_bad_of_rules():
-    schema = {'foo': {'anyof': {'type': 'string'}}}
-    assert_schema_error({}, schema)
-
-
-def test_normalization_rules_are_invalid_in_of_rules():
-    schema = {0: {'anyof': [{'coerce': lambda x: x}]}}
-    assert_schema_error({}, schema)
-
-
-def test_anyof_allof_schema_validate():
-    # make sure schema with 'anyof' and 'allof' constraints are checked
-    # correctly
-    schema = {'doc': {'type': 'dict',
-                      'anyof': [
-                          {'schema': [{'param': {'type': 'number'}}]}]}}
-    assert_schema_error({'doc': 'this is my document'}, schema)
-
-    schema = {'doc': {'type': 'dict',
-                      'allof': [
-                          {'schema': [{'param': {'type': 'number'}}]}]}}
-    assert_schema_error({'doc': 'this is my document'}, schema)
-
-
-def test_repr():
-    v = Validator({'foo': {'type': 'string'}})
-    assert repr(v.schema) == "{'foo': {'type': 'string'}}"
-
-
-def test_validated_schema_cache():
-    v = Validator({'foozifix': {'coerce': int}})
-    cache_size = len(v._valid_schemas)
-
-    v = Validator({'foozifix': {'type': 'integer'}})
-    cache_size += 1
-    assert len(v._valid_schemas) == cache_size
-
-    v = Validator({'foozifix': {'coerce': int}})
-    assert len(v._valid_schemas) == cache_size
-
-    max_cache_size = 147
-    assert cache_size <= max_cache_size, \
-        "There's an unexpected high amount (%s) of cached valid " \
-        "definition schemas. Unless you added further tests, " \
-        "there are good chances that something is wrong. " \
-        "If you added tests with new schemas, you can try to " \
-        "adjust the variable `max_cache_size` according to " \
-        "the added schemas." % cache_size
-
-
-def test_expansion_in_nested_schema():
-    schema = {'detroit': {'schema': {'anyof_regex': ['^Aladdin', 'Sane$']}}}
-    v = Validator(schema)
-    assert (v.schema['detroit']['schema'] ==
-            {'anyof': [{'regex': '^Aladdin'}, {'regex': 'Sane$'}]})
-
-
-def test_unvalidated_schema_can_be_copied():
-    schema = UnvalidatedSchema()
-    schema_copy = schema.copy()
-    assert schema_copy == schema
+# -*- coding: utf-8 -*-
+
+import pytest
+
+from cerberus import Validator, errors, SchemaError
+from cerberus.schema import UnvalidatedSchema
+from cerberus.tests import assert_schema_error
+
+
+def test_empty_schema():
+    validator = Validator()
+    with pytest.raises(SchemaError, message=errors.SCHEMA_ERROR_MISSING):
+        validator({}, schema=None)
+
+
+def test_bad_schema_type(validator):
+    schema = "this string should really be dict"
+    exp_msg = errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)
+    with pytest.raises(SchemaError, message=exp_msg):
+        validator.schema = schema
+
+
+def test_bad_schema_type_field(validator):
+    field = 'foo'
+    schema = {field: {'schema': {'bar': {'type': 'strong'}}}}
+    with pytest.raises(SchemaError):
+        validator.schema = schema
+
+
+def test_unknown_rule(validator):
+    message = "{'foo': [{'unknown': ['unknown rule']}]}"
+    with pytest.raises(SchemaError, message=message):
+        validator.schema = {'foo': {'unknown': 'rule'}}
+
+
+def test_unknown_type(validator):
+    field = 'name'
+    value = 'catch_me'
+    message = str({field: [{'type': ['unallowed value %s' % value]}]})
+    with pytest.raises(SchemaError, message=message):
+        validator.schema = {'foo': {'unknown': 'rule'}}
+
+
+def test_bad_schema_definition(validator):
+    field = 'name'
+    message = str({field: ['must be of dict type']})
+    with pytest.raises(SchemaError, message=message):
+        validator.schema = {field: 'this should really be a dict'}
+
+
+def test_bad_of_rules():
+    schema = {'foo': {'anyof': {'type': 'string'}}}
+    assert_schema_error({}, schema)
+
+
+def test_normalization_rules_are_invalid_in_of_rules():
+    schema = {0: {'anyof': [{'coerce': lambda x: x}]}}
+    assert_schema_error({}, schema)
+
+
+def test_anyof_allof_schema_validate():
+    # make sure schema with 'anyof' and 'allof' constraints are checked
+    # correctly
+    schema = {'doc': {'type': 'dict',
+                      'anyof': [
+                          {'schema': [{'param': {'type': 'number'}}]}]}}
+    assert_schema_error({'doc': 'this is my document'}, schema)
+
+    schema = {'doc': {'type': 'dict',
+                      'allof': [
+                          {'schema': [{'param': {'type': 'number'}}]}]}}
+    assert_schema_error({'doc': 'this is my document'}, schema)
+
+
+def test_repr():
+    v = Validator({'foo': {'type': 'string'}})
+    assert repr(v.schema) == "{'foo': {'type': 'string'}}"
+
+
+def test_validated_schema_cache():
+    v = Validator({'foozifix': {'coerce': int}})
+    cache_size = len(v._valid_schemas)
+
+    v = Validator({'foozifix': {'type': 'integer'}})
+    cache_size += 1
+    assert len(v._valid_schemas) == cache_size
+
+    v = Validator({'foozifix': {'coerce': int}})
+    assert len(v._valid_schemas) == cache_size
+
+    max_cache_size = 147
+    assert cache_size <= max_cache_size, \
+        "There's an unexpected high amount (%s) of cached valid " \
+        "definition schemas. Unless you added further tests, " \
+        "there are good chances that something is wrong. " \
+        "If you added tests with new schemas, you can try to " \
+        "adjust the variable `max_cache_size` according to " \
+        "the added schemas." % cache_size
+
+
+def test_expansion_in_nested_schema():
+    schema = {'detroit': {'schema': {'anyof_regex': ['^Aladdin', 'Sane$']}}}
+    v = Validator(schema)
+    assert (v.schema['detroit']['schema'] ==
+            {'anyof': [{'regex': '^Aladdin'}, {'regex': 'Sane$'}]})
+
+
+def test_unvalidated_schema_can_be_copied():
+    schema = UnvalidatedSchema()
+    schema_copy = schema.copy()
+    assert schema_copy == schema

+ 1579 - 1579
ext/cerberus/tests/test_validation.py → mncheck/ext/cerberus/tests/test_validation.py

@@ -1,1579 +1,1579 @@
-# -*- coding: utf-8 -*-
-
-import re
-import sys
-from datetime import datetime, date
-from random import choice
-from string import ascii_lowercase
-
-from pytest import mark
-
-from cerberus import errors, Validator
-from cerberus.tests import (
-    assert_bad_type, assert_document_error, assert_fail, assert_has_error,
-    assert_not_has_error, assert_success
-)
-from cerberus.tests.conftest import sample_schema
-
-
-def test_empty_document():
-    assert_document_error(None, sample_schema, None,
-                          errors.DOCUMENT_MISSING)
-
-
-def test_bad_document_type():
-    document = "not a dict"
-    assert_document_error(
-        document, sample_schema, None,
-        errors.DOCUMENT_FORMAT.format(document)
-    )
-
-
-def test_unknown_field(validator):
-    field = 'surname'
-    assert_fail({field: 'doe'}, validator=validator,
-                error=(field, (), errors.UNKNOWN_FIELD, None))
-    assert validator.errors == {field: ['unknown field']}
-
-
-def test_empty_field_definition(document):
-    field = 'name'
-    schema = {field: {}}
-    assert_success(document, schema)
-
-
-def test_required_field(schema):
-    field = 'a_required_string'
-    required_string_extension = {
-        'a_required_string': {'type': 'string',
-                              'minlength': 2,
-                              'maxlength': 10,
-                              'required': True}}
-    schema.update(required_string_extension)
-    assert_fail({'an_integer': 1}, schema,
-                error=(field, (field, 'required'), errors.REQUIRED_FIELD,
-                       True))
-
-
-def test_nullable_field():
-    assert_success({'a_nullable_integer': None})
-    assert_success({'a_nullable_integer': 3})
-    assert_success({'a_nullable_field_without_type': None})
-    assert_fail({'a_nullable_integer': "foo"})
-    assert_fail({'an_integer': None})
-    assert_fail({'a_not_nullable_field_without_type': None})
-
-
-def test_readonly_field():
-    field = 'a_readonly_string'
-    assert_fail({field: 'update me if you can'},
-                error=(field, (field, 'readonly'), errors.READONLY_FIELD, True))
-
-
-def test_readonly_field_first_rule():
-    # test that readonly rule is checked before any other rule, and blocks.
-    # See #63.
-    schema = {
-        'a_readonly_number': {
-            'type': 'integer',
-            'readonly': True,
-            'max': 1
-        }
-    }
-    v = Validator(schema)
-    v.validate({'a_readonly_number': 2})
-    # it would be a list if there's more than one error; we get a dict
-    # instead.
-    assert 'read-only' in v.errors['a_readonly_number'][0]
-
-
-def test_readonly_field_with_default_value():
-    schema = {
-        'created': {
-            'type': 'string',
-            'readonly': True,
-            'default': 'today'
-        },
-        'modified': {
-            'type': 'string',
-            'readonly': True,
-            'default_setter': lambda d: d['created']
-        }
-    }
-    assert_success({}, schema)
-    expected_errors = [('created', ('created', 'readonly'),
-                        errors.READONLY_FIELD,
-                        schema['created']['readonly']),
-                       ('modified', ('modified', 'readonly'),
-                        errors.READONLY_FIELD,
-                        schema['modified']['readonly'])]
-    assert_fail({'created': 'tomorrow', 'modified': 'today'},
-                schema, errors=expected_errors)
-    assert_fail({'created': 'today', 'modified': 'today'},
-                schema, errors=expected_errors)
-
-
-def test_nested_readonly_field_with_default_value():
-    schema = {
-        'some_field': {
-            'type': 'dict',
-            'schema': {
-                'created': {
-                    'type': 'string',
-                    'readonly': True,
-                    'default': 'today'
-                },
-                'modified': {
-                    'type': 'string',
-                    'readonly': True,
-                    'default_setter': lambda d: d['created']
-                }
-            }
-        }
-    }
-    assert_success({'some_field': {}}, schema)
-    expected_errors = [
-        (('some_field', 'created'),
-         ('some_field', 'schema', 'created', 'readonly'),
-         errors.READONLY_FIELD,
-         schema['some_field']['schema']['created']['readonly']),
-        (('some_field', 'modified'),
-         ('some_field', 'schema', 'modified', 'readonly'),
-         errors.READONLY_FIELD,
-         schema['some_field']['schema']['modified']['readonly'])]
-    assert_fail({'some_field': {'created': 'tomorrow', 'modified': 'now'}},
-                schema, errors=expected_errors)
-    assert_fail({'some_field': {'created': 'today', 'modified': 'today'}},
-                schema, errors=expected_errors)
-
-
-def test_repeated_readonly(validator):
-    # https://github.com/pyeve/cerberus/issues/311
-    validator.schema = {'id': {'readonly': True}}
-    assert_fail({'id': 0}, validator=validator)
-    assert_fail({'id': 0}, validator=validator)
-
-
-def test_not_a_string():
-    assert_bad_type('a_string', 'string', 1)
-
-
-def test_not_a_binary():
-    # 'u' literal prefix produces type `str` in Python 3
-    assert_bad_type('a_binary', 'binary', u"i'm not a binary")
-
-
-def test_not_a_integer():
-    assert_bad_type('an_integer', 'integer', "i'm not an integer")
-
-
-def test_not_a_boolean():
-    assert_bad_type('a_boolean', 'boolean', "i'm not a boolean")
-
-
-def test_not_a_datetime():
-    assert_bad_type('a_datetime', 'datetime', "i'm not a datetime")
-
-
-def test_not_a_float():
-    assert_bad_type('a_float', 'float', "i'm not a float")
-
-
-def test_not_a_number():
-    assert_bad_type('a_number', 'number', "i'm not a number")
-
-
-def test_not_a_list():
-    assert_bad_type('a_list_of_values', 'list', "i'm not a list")
-
-
-def test_not_a_dict():
-    assert_bad_type('a_dict', 'dict', "i'm not a dict")
-
-
-def test_bad_max_length(schema):
-    field = 'a_string'
-    max_length = schema[field]['maxlength']
-    value = "".join(choice(ascii_lowercase) for i in range(max_length + 1))
-    assert_fail({field: value},
-                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
-                       max_length, (len(value),)))
-
-
-def test_bad_max_length_binary(schema):
-    field = 'a_binary'
-    max_length = schema[field]['maxlength']
-    value = b'\x00' * (max_length + 1)
-    assert_fail({field: value},
-                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
-                       max_length, (len(value),)))
-
-
-def test_bad_min_length(schema):
-    field = 'a_string'
-    min_length = schema[field]['minlength']
-    value = "".join(choice(ascii_lowercase) for i in range(min_length - 1))
-    assert_fail({field: value},
-                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
-                       min_length, (len(value),)))
-
-
-def test_bad_min_length_binary(schema):
-    field = 'a_binary'
-    min_length = schema[field]['minlength']
-    value = b'\x00' * (min_length - 1)
-    assert_fail({field: value},
-                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
-                       min_length, (len(value),)))
-
-
-def test_bad_max_value(schema):
-    def assert_bad_max_value(field, inc):
-        max_value = schema[field]['max']
-        value = max_value + inc
-        assert_fail({field: value},
-                    error=(field, (field, 'max'), errors.MAX_VALUE, max_value))
-
-    field = 'an_integer'
-    assert_bad_max_value(field, 1)
-    field = 'a_float'
-    assert_bad_max_value(field, 1.0)
-    field = 'a_number'
-    assert_bad_max_value(field, 1)
-
-
-def test_bad_min_value(schema):
-    def assert_bad_min_value(field, inc):
-        min_value = schema[field]['min']
-        value = min_value - inc
-        assert_fail({field: value},
-                    error=(field, (field, 'min'),
-                           errors.MIN_VALUE, min_value))
-
-    field = 'an_integer'
-    assert_bad_min_value(field, 1)
-    field = 'a_float'
-    assert_bad_min_value(field, 1.0)
-    field = 'a_number'
-    assert_bad_min_value(field, 1)
-
-
-def test_bad_schema():
-    field = 'a_dict'
-    subschema_field = 'address'
-    schema = {field: {'type': 'dict',
-                      'schema': {subschema_field: {'type': 'string'},
-                                 'city': {'type': 'string', 'required': True}}
-                      }}
-    document = {field: {subschema_field: 34}}
-    validator = Validator(schema)
-
-    assert_fail(
-        document, validator=validator,
-        error=(field, (field, 'schema'), errors.MAPPING_SCHEMA,
-               validator.schema['a_dict']['schema']),
-        child_errors=[
-            ((field, subschema_field),
-             (field, 'schema', subschema_field, 'type'),
-             errors.BAD_TYPE, 'string'),
-            ((field, 'city'), (field, 'schema', 'city', 'required'),
-             errors.REQUIRED_FIELD, True)]
-    )
-
-    handler = errors.BasicErrorHandler
-    assert field in validator.errors
-    assert subschema_field in validator.errors[field][-1]
-    assert handler.messages[errors.BAD_TYPE.code].format(constraint='string') \
-        in validator.errors[field][-1][subschema_field]
-    assert 'city' in validator.errors[field][-1]
-    assert (handler.messages[errors.REQUIRED_FIELD.code]
-            in validator.errors[field][-1]['city'])
-
-
-def test_bad_valueschema():
-    field = 'a_dict_with_valueschema'
-    schema_field = 'a_string'
-    value = {schema_field: 'not an integer'}
-
-    exp_child_errors = [
-        ((field, schema_field), (field, 'valueschema', 'type'), errors.BAD_TYPE,
-         'integer')]
-    assert_fail({field: value},
-                error=(field, (field, 'valueschema'), errors.VALUESCHEMA,
-                       {'type': 'integer'}), child_errors=exp_child_errors)
-
-
-def test_bad_list_of_values(validator):
-    field = 'a_list_of_values'
-    value = ['a string', 'not an integer']
-    assert_fail({field: value}, validator=validator,
-                error=(field, (field, 'items'), errors.BAD_ITEMS,
-                       [{'type': 'string'}, {'type': 'integer'}]),
-                child_errors=[((field, 1), (field, 'items', 1, 'type'),
-                               errors.BAD_TYPE, 'integer')])
-
-    assert (errors.BasicErrorHandler.messages[errors.BAD_TYPE.code].
-            format(constraint='integer')
-            in validator.errors[field][-1][1])
-
-    value = ['a string', 10, 'an extra item']
-    assert_fail({field: value},
-                error=(field, (field, 'items'), errors.ITEMS_LENGTH,
-                       [{'type': 'string'}, {'type': 'integer'}], (2, 3)))
-
-
-def test_bad_list_of_integers():
-    field = 'a_list_of_integers'
-    value = [34, 'not an integer']
-    assert_fail({field: value})
-
-
-def test_bad_list_of_dicts():
-    field = 'a_list_of_dicts'
-    map_schema = {'sku': {'type': 'string'},
-                  'price': {'type': 'integer', 'required': True}}
-    seq_schema = {'type': 'dict', 'schema': map_schema}
-    schema = {field: {'type': 'list', 'schema': seq_schema}}
-    validator = Validator(schema)
-    value = [{'sku': 'KT123', 'price': '100'}]
-    document = {field: value}
-
-    assert_fail(document, validator=validator,
-                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
-                       seq_schema),
-                child_errors=[((field, 0), (field, 'schema', 'schema'),
-                               errors.MAPPING_SCHEMA, map_schema)])
-
-    assert field in validator.errors
-    assert 0 in validator.errors[field][-1]
-    assert 'price' in validator.errors[field][-1][0][-1]
-    exp_msg = errors.BasicErrorHandler.messages[errors.BAD_TYPE.code] \
-        .format(constraint='integer')
-    assert exp_msg in validator.errors[field][-1][0][-1]['price']
-
-    value = ["not a dict"]
-    exp_child_errors = [((field, 0), (field, 'schema', 'type'),
-                         errors.BAD_TYPE, 'dict', ())]
-    assert_fail({field: value},
-                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
-                       seq_schema),
-                child_errors=exp_child_errors)
-
-
-def test_array_unallowed():
-    field = 'an_array'
-    value = ['agent', 'client', 'profit']
-    assert_fail({field: value},
-                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUES,
-                       ['agent', 'client', 'vendor'], ['profit']))
-
-
-def test_string_unallowed():
-    field = 'a_restricted_string'
-    value = 'profit'
-    assert_fail({field: value},
-                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUE,
-                       ['agent', 'client', 'vendor'], value))
-
-
-def test_integer_unallowed():
-    field = 'a_restricted_integer'
-    value = 2
-    assert_fail({field: value},
-                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUE,
-                       [-1, 0, 1], value))
-
-
-def test_integer_allowed():
-    assert_success({'a_restricted_integer': -1})
-
-
-def test_validate_update():
-    assert_success({'an_integer': 100,
-                    'a_dict': {'address': 'adr'},
-                    'a_list_of_dicts': [{'sku': 'let'}]
-                    }, update=True)
-
-
-def test_string():
-    assert_success({'a_string': 'john doe'})
-
-
-def test_string_allowed():
-    assert_success({'a_restricted_string': 'client'})
-
-
-def test_integer():
-    assert_success({'an_integer': 50})
-
-
-def test_boolean():
-    assert_success({'a_boolean': True})
-
-
-def test_datetime():
-    assert_success({'a_datetime': datetime.now()})
-
-
-def test_float():
-    assert_success({'a_float': 3.5})
-    assert_success({'a_float': 1})
-
-
-def test_number():
-    assert_success({'a_number': 3.5})
-    assert_success({'a_number': 3})
-
-
-def test_array():
-    assert_success({'an_array': ['agent', 'client']})
-
-
-def test_set():
-    assert_success({'a_set': set(['hello', 1])})
-
-
-def test_one_of_two_types(validator):
-    field = 'one_or_more_strings'
-    assert_success({field: 'foo'})
-    assert_success({field: ['foo', 'bar']})
-    exp_child_errors = [((field, 1), (field, 'schema', 'type'),
-                         errors.BAD_TYPE, 'string')]
-    assert_fail({field: ['foo', 23]}, validator=validator,
-                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
-                       {'type': 'string'}),
-                child_errors=exp_child_errors)
-    assert_fail({field: 23},
-                error=((field,), (field, 'type'), errors.BAD_TYPE,
-                       ['string', 'list']))
-    assert validator.errors == {field: [{1: ['must be of string type']}]}
-
-
-def test_regex(validator):
-    field = 'a_regex_email'
-    assert_success({field: 'valid.email@gmail.com'}, validator=validator)
-    assert_fail({field: 'invalid'}, update=True,
-                error=(field, (field, 'regex'), errors.REGEX_MISMATCH,
-                       '^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'))
-
-
-def test_a_list_of_dicts():
-    assert_success(
-        {
-            'a_list_of_dicts': [
-                {'sku': 'AK345', 'price': 100},
-                {'sku': 'YZ069', 'price': 25}
-            ]
-        }
-    )
-
-
-def test_a_list_of_values():
-    assert_success({'a_list_of_values': ['hello', 100]})
-
-
-def test_a_list_of_integers():
-    assert_success({'a_list_of_integers': [99, 100]})
-
-
-def test_a_dict(schema):
-    assert_success({'a_dict': {'address': 'i live here',
-                               'city': 'in my own town'}})
-    assert_fail(
-        {'a_dict': {'address': 8545}},
-        error=('a_dict', ('a_dict', 'schema'), errors.MAPPING_SCHEMA,
-               schema['a_dict']['schema']),
-        child_errors=[(('a_dict', 'address'),
-                       ('a_dict', 'schema', 'address', 'type'),
-                       errors.BAD_TYPE, 'string'),
-                      (('a_dict', 'city'),
-                       ('a_dict', 'schema', 'city', 'required'),
-                       errors.REQUIRED_FIELD, True)]
-    )
-
-
-def test_a_dict_with_valueschema(validator):
-    assert_success({'a_dict_with_valueschema':
-                   {'an integer': 99, 'another integer': 100}})
-
-    error = (
-        'a_dict_with_valueschema', ('a_dict_with_valueschema', 'valueschema'),
-        errors.VALUESCHEMA, {'type': 'integer'})
-    child_errors = [
-        (('a_dict_with_valueschema', 'a string'),
-         ('a_dict_with_valueschema', 'valueschema', 'type'),
-         errors.BAD_TYPE, 'integer')]
-
-    assert_fail({'a_dict_with_valueschema': {'a string': '99'}},
-                validator=validator, error=error, child_errors=child_errors)
-
-    assert 'valueschema' in \
-           validator.schema_error_tree['a_dict_with_valueschema']
-    v = validator.schema_error_tree
-    assert len(v['a_dict_with_valueschema']['valueschema'].descendants) == 1
-
-
-def test_a_dict_with_keyschema():
-    assert_success({'a_dict_with_keyschema': {'key': 'value'}})
-    assert_fail({'a_dict_with_keyschema': {'KEY': 'value'}})
-
-
-def test_a_list_length(schema):
-    field = 'a_list_length'
-    min_length = schema[field]['minlength']
-    max_length = schema[field]['maxlength']
-
-    assert_fail({field: [1] * (min_length - 1)},
-                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
-                       min_length, (min_length - 1,)))
-
-    for i in range(min_length, max_length):
-        value = [1] * i
-        assert_success({field: value})
-
-    assert_fail({field: [1] * (max_length + 1)},
-                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
-                       max_length, (max_length + 1,)))
-
-
-def test_custom_datatype():
-    class MyValidator(Validator):
-        def _validate_type_objectid(self, value):
-            if re.match('[a-f0-9]{24}', value):
-                return True
-
-    schema = {'test_field': {'type': 'objectid'}}
-    validator = MyValidator(schema)
-    assert_success({'test_field': '50ad188438345b1049c88a28'},
-                   validator=validator)
-    assert_fail({'test_field': 'hello'}, validator=validator,
-                error=('test_field', ('test_field', 'type'), errors.BAD_TYPE,
-                       'objectid'))
-
-
-def test_custom_datatype_rule():
-    class MyValidator(Validator):
-        def _validate_min_number(self, min_number, field, value):
-            """ {'type': 'number'} """
-            if value < min_number:
-                self._error(field, 'Below the min')
-
-        # TODO replace with TypeDefintion in next major release
-        def _validate_type_number(self, value):
-            if isinstance(value, int):
-                return True
-
-    schema = {'test_field': {'min_number': 1, 'type': 'number'}}
-    validator = MyValidator(schema)
-    assert_fail({'test_field': '0'}, validator=validator,
-                error=('test_field', ('test_field', 'type'), errors.BAD_TYPE,
-                       'number'))
-    assert_fail({'test_field': 0}, validator=validator,
-                error=('test_field', (), errors.CUSTOM, None,
-                       ('Below the min',)))
-    assert validator.errors == {'test_field': ['Below the min']}
-
-
-def test_custom_validator():
-    class MyValidator(Validator):
-        def _validate_isodd(self, isodd, field, value):
-            """ {'type': 'boolean'} """
-            if isodd and not bool(value & 1):
-                self._error(field, 'Not an odd number')
-
-    schema = {'test_field': {'isodd': True}}
-    validator = MyValidator(schema)
-    assert_success({'test_field': 7}, validator=validator)
-    assert_fail({'test_field': 6}, validator=validator,
-                error=('test_field', (), errors.CUSTOM, None,
-                       ('Not an odd number',)))
-    assert validator.errors == {'test_field': ['Not an odd number']}
-
-
-@mark.parametrize('value, _type',
-                  (('', 'string'), ((), 'list'), ({}, 'dict'), ([], 'list')))
-def test_empty_values(value, _type):
-    field = 'test'
-    schema = {field: {'type': _type}}
-    document = {field: value}
-
-    assert_success(document, schema)
-
-    schema[field]['empty'] = False
-    assert_fail(document, schema,
-                error=(field, (field, 'empty'),
-                       errors.EMPTY_NOT_ALLOWED, False))
-
-    schema[field]['empty'] = True
-    assert_success(document, schema)
-
-
-def test_empty_skips_regex(validator):
-    schema = {'foo': {'empty': True, 'regex': r'\d?\d\.\d\d',
-                      'type': 'string'}}
-    assert validator({'foo': ''}, schema)
-
-
-def test_ignore_none_values():
-    field = 'test'
-    schema = {field: {'type': 'string', 'empty': False, 'required': False}}
-    document = {field: None}
-
-    # Test normal behaviour
-    validator = Validator(schema, ignore_none_values=False)
-    assert_fail(document, validator=validator)
-    validator.schema[field]['required'] = True
-    validator.schema.validate()
-    _errors = assert_fail(document, validator=validator)
-    assert_not_has_error(_errors, field, (field, 'required'),
-                         errors.REQUIRED_FIELD, True)
-
-    # Test ignore None behaviour
-    validator = Validator(schema, ignore_none_values=True)
-    validator.schema[field]['required'] = False
-    validator.schema.validate()
-    assert_success(document, validator=validator)
-    validator.schema[field]['required'] = True
-    _errors = assert_fail(schema=schema, document=document, validator=validator)
-    assert_has_error(_errors, field, (field, 'required'), errors.REQUIRED_FIELD,
-                     True)
-    assert_not_has_error(_errors, field, (field, 'type'), errors.BAD_TYPE,
-                         'string')
-
-
-def test_unknown_keys():
-    schema = {}
-
-    # test that unknown fields are allowed when allow_unknown is True.
-    v = Validator(allow_unknown=True, schema=schema)
-    assert_success({"unknown1": True, "unknown2": "yes"}, validator=v)
-
-    # test that unknown fields are allowed only if they meet the
-    # allow_unknown schema when provided.
-    v.allow_unknown = {'type': 'string'}
-    assert_success(document={'name': 'mark'}, validator=v)
-    assert_fail({"name": 1}, validator=v)
-
-    # test that unknown fields are not allowed if allow_unknown is False
-    v.allow_unknown = False
-    assert_fail({'name': 'mark'}, validator=v)
-
-
-def test_unknown_key_dict(validator):
-    # https://github.com/pyeve/cerberus/issues/177
-    validator.allow_unknown = True
-    document = {'a_dict': {'foo': 'foo_value', 'bar': 25}}
-    assert_success(document, {}, validator=validator)
-
-
-def test_unknown_key_list(validator):
-    # https://github.com/pyeve/cerberus/issues/177
-    validator.allow_unknown = True
-    document = {'a_dict': ['foo', 'bar']}
-    assert_success(document, {}, validator=validator)
-
-
-def test_unknown_keys_list_of_dicts(validator):
-    # test that allow_unknown is honored even for subdicts in lists.
-    # https://github.com/pyeve/cerberus/issues/67.
-    validator.allow_unknown = True
-    document = {'a_list_of_dicts': [{'sku': 'YZ069', 'price': 25,
-                                     'extra': True}]}
-    assert_success(document, validator=validator)
-
-
-def test_unknown_keys_retain_custom_rules():
-    # test that allow_unknown schema respect custom validation rules.
-    # https://github.com/pyeve/cerberus/issues/#66.
-    class CustomValidator(Validator):
-        def _validate_type_foo(self, value):
-            if value == "foo":
-                return True
-
-    validator = CustomValidator({})
-    validator.allow_unknown = {"type": "foo"}
-    assert_success(document={"fred": "foo", "barney": "foo"},
-                   validator=validator)
-
-
-def test_nested_unknown_keys():
-    schema = {
-        'field1': {
-            'type': 'dict',
-            'allow_unknown': True,
-            'schema': {'nested1': {'type': 'string'}}
-        }
-    }
-    document = {
-        'field1': {
-            'nested1': 'foo',
-            'arb1': 'bar',
-            'arb2': 42
-        }
-    }
-    assert_success(document=document, schema=schema)
-
-    schema['field1']['allow_unknown'] = {'type': 'string'}
-    assert_fail(document=document, schema=schema)
-
-
-def test_novalidate_noerrors(validator):
-    """
-    In v0.1.0 and below `self.errors` raised an exception if no
-    validation had been performed yet.
-    """
-    assert validator.errors == {}
-
-
-def test_callable_validator():
-    """
-    Validator instance is callable, functions as a shorthand
-    passthrough to validate()
-    """
-    schema = {'test_field': {'type': 'string'}}
-    v = Validator(schema)
-    assert v.validate({'test_field': 'foo'})
-    assert v({'test_field': 'foo'})
-    assert not v.validate({'test_field': 1})
-    assert not v({'test_field': 1})
-
-
-def test_dependencies_field():
-    schema = {'test_field': {'dependencies': 'foo'},
-              'foo': {'type': 'string'}}
-    assert_success({'test_field': 'foobar', 'foo': 'bar'}, schema)
-    assert_fail({'test_field': 'foobar'}, schema)
-
-
-def test_dependencies_list():
-    schema = {
-        'test_field': {'dependencies': ['foo', 'bar']},
-        'foo': {'type': 'string'},
-        'bar': {'type': 'string'}
-    }
-    assert_success({'test_field': 'foobar', 'foo': 'bar', 'bar': 'foo'},
-                   schema)
-    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
-
-
-def test_dependencies_list_with_required_field():
-    schema = {
-        'test_field': {'required': True, 'dependencies': ['foo', 'bar']},
-        'foo': {'type': 'string'},
-        'bar': {'type': 'string'}
-    }
-    # False: all dependencies missing
-    assert_fail({'test_field': 'foobar'}, schema)
-    # False: one of dependencies missing
-    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
-    # False: one of dependencies missing
-    assert_fail({'test_field': 'foobar', 'bar': 'foo'}, schema)
-    # False: dependencies are validated and field is required
-    assert_fail({'foo': 'bar', 'bar': 'foo'}, schema)
-    # False: All dependencies are optional but field is still required
-    assert_fail({}, schema)
-    # True: dependency missing
-    assert_fail({'foo': 'bar'}, schema)
-    # True: dependencies are validated but field is not required
-    schema['test_field']['required'] = False
-    assert_success({'foo': 'bar', 'bar': 'foo'}, schema)
-
-
-def test_dependencies_list_with_subodcuments_fields():
-    schema = {
-        'test_field': {'dependencies': ['a_dict.foo', 'a_dict.bar']},
-        'a_dict': {
-            'type': 'dict',
-            'schema': {
-                'foo': {'type': 'string'},
-                'bar': {'type': 'string'}
-            }
-        }
-    }
-    assert_success({'test_field': 'foobar',
-                    'a_dict': {'foo': 'foo', 'bar': 'bar'}}, schema)
-    assert_fail({'test_field': 'foobar', 'a_dict': {}}, schema)
-    assert_fail({'test_field': 'foobar',
-                 'a_dict': {'foo': 'foo'}}, schema)
-
-
-def test_dependencies_dict():
-    schema = {
-        'test_field': {'dependencies': {'foo': 'foo', 'bar': 'bar'}},
-        'foo': {'type': 'string'},
-        'bar': {'type': 'string'}
-    }
-    assert_success({'test_field': 'foobar', 'foo': 'foo', 'bar': 'bar'},
-                   schema)
-    assert_fail({'test_field': 'foobar', 'foo': 'foo'}, schema)
-    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
-    assert_fail({'test_field': 'foobar', 'bar': 'bar'}, schema)
-    assert_fail({'test_field': 'foobar', 'bar': 'foo'}, schema)
-    assert_fail({'test_field': 'foobar'}, schema)
-
-
-def test_dependencies_dict_with_required_field():
-    schema = {
-        'test_field': {
-            'required': True,
-            'dependencies': {'foo': 'foo', 'bar': 'bar'}
-        },
-        'foo': {'type': 'string'},
-        'bar': {'type': 'string'}
-    }
-    # False: all dependencies missing
-    assert_fail({'test_field': 'foobar'}, schema)
-    # False: one of dependencies missing
-    assert_fail({'test_field': 'foobar', 'foo': 'foo'}, schema)
-    assert_fail({'test_field': 'foobar', 'bar': 'bar'}, schema)
-    # False: dependencies are validated and field is required
-    assert_fail({'foo': 'foo', 'bar': 'bar'}, schema)
-    # False: All dependencies are optional, but field is still required
-    assert_fail({}, schema)
-    # False: dependency missing
-    assert_fail({'foo': 'bar'}, schema)
-
-    assert_success({'test_field': 'foobar', 'foo': 'foo', 'bar': 'bar'},
-                   schema)
-
-    # True: dependencies are validated but field is not required
-    schema['test_field']['required'] = False
-    assert_success({'foo': 'bar', 'bar': 'foo'}, schema)
-
-
-def test_dependencies_field_satisfy_nullable_field():
-    # https://github.com/pyeve/cerberus/issues/305
-    schema = {
-        'foo': {'nullable': True},
-        'bar': {'dependencies': 'foo'}
-    }
-
-    assert_success({'foo': None, 'bar': 1}, schema)
-    assert_success({'foo': None}, schema)
-    assert_fail({'bar': 1}, schema)
-
-
-def test_dependencies_field_with_mutually_dependent_nullable_fields():
-    # https://github.com/pyeve/cerberus/pull/306
-    schema = {
-        'foo': {'dependencies': 'bar', 'nullable': True},
-        'bar': {'dependencies': 'foo', 'nullable': True}
-    }
-    assert_success({'foo': None, 'bar': None}, schema)
-    assert_success({'foo': 1, 'bar': 1}, schema)
-    assert_success({'foo': None, 'bar': 1}, schema)
-    assert_fail({'foo': None}, schema)
-    assert_fail({'foo': 1}, schema)
-
-
-def test_dependencies_dict_with_subdocuments_fields():
-    schema = {
-        'test_field': {'dependencies': {'a_dict.foo': ['foo', 'bar'],
-                                        'a_dict.bar': 'bar'}},
-        'a_dict': {
-            'type': 'dict',
-            'schema': {
-                'foo': {'type': 'string'},
-                'bar': {'type': 'string'}
-            }
-        }
-    }
-    assert_success({'test_field': 'foobar',
-                    'a_dict': {'foo': 'foo', 'bar': 'bar'}}, schema)
-    assert_success({'test_field': 'foobar',
-                    'a_dict': {'foo': 'bar', 'bar': 'bar'}}, schema)
-    assert_fail({'test_field': 'foobar', 'a_dict': {}}, schema)
-    assert_fail({'test_field': 'foobar',
-                 'a_dict': {'foo': 'foo', 'bar': 'foo'}}, schema)
-    assert_fail({'test_field': 'foobar', 'a_dict': {'bar': 'foo'}},
-                schema)
-    assert_fail({'test_field': 'foobar', 'a_dict': {'bar': 'bar'}},
-                schema)
-
-
-def test_root_relative_dependencies():
-    # https://github.com/pyeve/cerberus/issues/288
-    subschema = {'version': {'dependencies': '^repo'}}
-    schema = {'package': {'allow_unknown': True, 'schema': subschema},
-              'repo': {}}
-    assert_fail(
-        {'package': {'repo': 'somewhere', 'version': 0}}, schema,
-        error=('package', ('package', 'schema'),
-               errors.MAPPING_SCHEMA, subschema),
-        child_errors=[(
-            ('package', 'version'),
-            ('package', 'schema', 'version', 'dependencies'),
-            errors.DEPENDENCIES_FIELD, '^repo', ('^repo',)
-        )]
-    )
-    assert_success({'repo': 'somewhere', 'package': {'version': 1}}, schema)
-
-
-def test_dependencies_errors():
-    v = Validator({'field1': {'required': False},
-                   'field2': {'required': True,
-                              'dependencies': {'field1': ['one', 'two']}}})
-    assert_fail({'field1': 'three', 'field2': 7}, validator=v,
-                error=('field2', ('field2', 'dependencies'),
-                       errors.DEPENDENCIES_FIELD_VALUE,
-                       {'field1': ['one', 'two']}, ({'field1': 'three'},)))
-
-
-def test_options_passed_to_nested_validators(validator):
-    validator.schema = {'sub_dict': {'type': 'dict',
-                                     'schema': {'foo': {'type': 'string'}}}}
-    validator.allow_unknown = True
-    assert_success({'sub_dict': {'foo': 'bar', 'unknown': True}},
-                   validator=validator)
-
-
-def test_self_root_document():
-    """ Make sure self.root_document is always the root document.
-    See:
-    * https://github.com/pyeve/cerberus/pull/42
-    * https://github.com/pyeve/eve/issues/295
-    """
-
-    class MyValidator(Validator):
-        def _validate_root_doc(self, root_doc, field, value):
-            """ {'type': 'boolean'} """
-            if ('sub' not in self.root_document or
-                    len(self.root_document['sub']) != 2):
-                self._error(field, 'self.context is not the root doc!')
-
-    schema = {
-        'sub': {
-            'type': 'list',
-            'root_doc': True,
-            'schema': {
-                'type': 'dict',
-                'schema': {
-                    'foo': {
-                        'type': 'string',
-                        'root_doc': True
-                    }
-                }
-            }
-        }
-    }
-    assert_success({'sub': [{'foo': 'bar'}, {'foo': 'baz'}]},
-                   validator=MyValidator(schema))
-
-
-def test_validator_rule(validator):
-    def validate_name(field, value, error):
-        if not value.islower():
-            error(field, 'must be lowercase')
-
-    validator.schema = {
-        'name': {'validator': validate_name},
-        'age': {'type': 'integer'}
-    }
-
-    assert_fail({'name': 'ItsMe', 'age': 2}, validator=validator,
-                error=('name', (), errors.CUSTOM, None, ('must be lowercase',)))
-    assert validator.errors == {'name': ['must be lowercase']}
-    assert_success({'name': 'itsme', 'age': 2}, validator=validator)
-
-
-def test_validated(validator):
-    validator.schema = {'property': {'type': 'string'}}
-    document = {'property': 'string'}
-    assert validator.validated(document) == document
-    document = {'property': 0}
-    assert validator.validated(document) is None
-
-
-def test_anyof():
-    # prop1 must be either a number between 0 and 10
-    schema = {'prop1': {'min': 0, 'max': 10}}
-    doc = {'prop1': 5}
-
-    assert_success(doc, schema)
-
-    # prop1 must be either a number between 0 and 10 or 100 and 110
-    schema = {'prop1': {'anyof':
-                        [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}}
-    doc = {'prop1': 105}
-
-    assert_success(doc, schema)
-
-    # prop1 must be either a number between 0 and 10 or 100 and 110
-    schema = {'prop1': {'anyof':
-                        [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}}
-    doc = {'prop1': 50}
-
-    assert_fail(doc, schema)
-
-    # prop1 must be an integer that is either be
-    # greater than or equal to 0, or greater than or equal to 10
-    schema = {'prop1': {'type': 'integer',
-                        'anyof': [{'min': 0}, {'min': 10}]}}
-    assert_success({'prop1': 10}, schema)
-    # test that intermediate schemas do not sustain
-    assert 'type' not in schema['prop1']['anyof'][0]
-    assert 'type' not in schema['prop1']['anyof'][1]
-    assert 'allow_unknown' not in schema['prop1']['anyof'][0]
-    assert 'allow_unknown' not in schema['prop1']['anyof'][1]
-    assert_success({'prop1': 5}, schema)
-
-    exp_child_errors = [
-        (('prop1',), ('prop1', 'anyof', 0, 'min'), errors.MIN_VALUE, 0),
-        (('prop1',), ('prop1', 'anyof', 1, 'min'), errors.MIN_VALUE, 10)
-    ]
-    assert_fail({'prop1': -1}, schema,
-                error=(('prop1',), ('prop1', 'anyof'), errors.ANYOF,
-                       [{'min': 0}, {'min': 10}]),
-                child_errors=exp_child_errors)
-    doc = {'prop1': 5.5}
-    assert_fail(doc, schema)
-    doc = {'prop1': '5.5'}
-    assert_fail(doc, schema)
-
-
-def test_allof():
-    # prop1 has to be a float between 0 and 10
-    schema = {'prop1': {'allof': [
-        {'type': 'float'}, {'min': 0}, {'max': 10}]}}
-    doc = {'prop1': -1}
-    assert_fail(doc, schema)
-    doc = {'prop1': 5}
-    assert_success(doc, schema)
-    doc = {'prop1': 11}
-    assert_fail(doc, schema)
-
-    # prop1 has to be a float and an integer
-    schema = {'prop1': {'allof': [{'type': 'float'}, {'type': 'integer'}]}}
-    doc = {'prop1': 11}
-    assert_success(doc, schema)
-    doc = {'prop1': 11.5}
-    assert_fail(doc, schema)
-    doc = {'prop1': '11'}
-    assert_fail(doc, schema)
-
-
-def test_unicode_allowed():
-    # issue 280
-    doc = {'letters': u'♄εℓł☺'}
-
-    schema = {'letters': {'type': 'string', 'allowed': ['a', 'b', 'c']}}
-    assert_fail(doc, schema)
-
-    schema = {'letters': {'type': 'string', 'allowed': [u'♄εℓł☺']}}
-    assert_success(doc, schema)
-
-    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
-    doc = {'letters': '♄εℓł☺'}
-    assert_success(doc, schema)
-
-
-@mark.skipif(sys.version_info[0] < 3,
-             reason='requires python 3.x')
-def test_unicode_allowed_py3():
-    """ All strings are unicode in Python 3.x. Input doc and schema
-    have equal strings and validation yield success."""
-
-    # issue 280
-    doc = {'letters': u'♄εℓł☺'}
-    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
-    assert_success(doc, schema)
-
-
-@mark.skipif(sys.version_info[0] > 2,
-             reason='requires python 2.x')
-def test_unicode_allowed_py2():
-    """ Python 2.x encodes value of allowed using default encoding if
-    the string includes characters outside ASCII range. Produced string
-    does not match input which is an unicode string."""
-
-    # issue 280
-    doc = {'letters': u'♄εℓł☺'}
-    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
-    assert_fail(doc, schema)
-
-
-def test_oneof():
-    # prop1 can only only be:
-    # - greater than 10
-    # - greater than 0
-    # - equal to -5, 5, or 15
-
-    schema = {'prop1': {'type': 'integer', 'oneof': [
-        {'min': 0},
-        {'min': 10},
-        {'allowed': [-5, 5, 15]}]}}
-
-    # document is not valid
-    # prop1 not greater than 0, 10 or equal to -5
-    doc = {'prop1': -1}
-    assert_fail(doc, schema)
-
-    # document is valid
-    # prop1 is less then 0, but is -5
-    doc = {'prop1': -5}
-    assert_success(doc, schema)
-
-    # document is valid
-    # prop1 greater than 0
-    doc = {'prop1': 1}
-    assert_success(doc, schema)
-
-    # document is not valid
-    # prop1 is greater than 0
-    # and equal to 5
-    doc = {'prop1': 5}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    # prop1 is greater than 0
-    # and greater than 10
-    doc = {'prop1': 11}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    # prop1 is greater than 0
-    # and greater than 10
-    # and equal to 15
-    doc = {'prop1': 15}
-    assert_fail(doc, schema)
-
-
-def test_noneof():
-    # prop1 can not be:
-    # - greater than 10
-    # - greater than 0
-    # - equal to -5, 5, or 15
-
-    schema = {'prop1': {'type': 'integer', 'noneof': [
-        {'min': 0},
-        {'min': 10},
-        {'allowed': [-5, 5, 15]}]}}
-
-    # document is valid
-    doc = {'prop1': -1}
-    assert_success(doc, schema)
-
-    # document is not valid
-    # prop1 is equal to -5
-    doc = {'prop1': -5}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    # prop1 greater than 0
-    doc = {'prop1': 1}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    doc = {'prop1': 5}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    doc = {'prop1': 11}
-    assert_fail(doc, schema)
-
-    # document is not valid
-    # and equal to 15
-    doc = {'prop1': 15}
-    assert_fail(doc, schema)
-
-
-def test_anyof_allof():
-    # prop1 can be any number outside of [0-10]
-    schema = {'prop1': {'allof': [{'anyof': [{'type': 'float'},
-                                             {'type': 'integer'}]},
-                                  {'anyof': [{'min': 10},
-                                             {'max': 0}]}
-                                  ]}}
-
-    doc = {'prop1': 11}
-    assert_success(doc, schema)
-    doc = {'prop1': -1}
-    assert_success(doc, schema)
-    doc = {'prop1': 5}
-    assert_fail(doc, schema)
-
-    doc = {'prop1': 11.5}
-    assert_success(doc, schema)
-    doc = {'prop1': -1.5}
-    assert_success(doc, schema)
-    doc = {'prop1': 5.5}
-    assert_fail(doc, schema)
-
-    doc = {'prop1': '5.5'}
-    assert_fail(doc, schema)
-
-
-def test_anyof_schema(validator):
-    # test that a list of schemas can be specified.
-
-    valid_parts =testschema': {'model number': {'type': 'string'},
-                               'count': {'type': 'integer'}}},
-                   {'schema': {'serial number': {'type': 'string'},
-                               'count': {'type': 'integer'}}}]
-    valid_item = {'type': ['dict', 'string'], 'anyof': valid_parts}
-    schema = {'parts': {'type': 'list', 'schema': valid_item}}
-    document = {'parts': [{'model number': 'MX-009', 'count': 100},
-                          {'serial number': '898-001'},
-                          'misc']}
-
-    # document is valid. each entry in 'parts' matches a type or schema
-    assert_success(document, schema, validator=validator)
-
-    document['parts'].append({'product name': "Monitors", 'count': 18})
-    # document is invalid. 'product name' does not match any valid schemas
-    assert_fail(document, schema, validator=validator)
-
-    document['parts'].pop()
-    # document is valid again
-    assert_success(document, schema, validator=validator)
-
-    document['parts'].append({'product name': "Monitors", 'count': 18})
-    document['parts'].append(10)
-    # and invalid. numbers are not allowed.
-
-    exp_child_errors = [
-        (('parts', 3), ('parts', 'schema', 'anyof'), errors.ANYOF,
-         valid_parts),
-        (('parts', 4), ('parts', 'schema', 'type'), errors.BAD_TYPE,
-         ['dict', 'string'])
-    ]
-
-    _errors = assert_fail(document, schema, validator=validator,
-                          error=('parts', ('parts', 'schema'),
-                                 errors.SEQUENCE_SCHEMA, valid_item),
-                          child_errors=exp_child_errors)
-    assert_not_has_error(_errors, ('parts', 4), ('parts', 'schema', 'anyof'),
-                         errors.ANYOF, valid_parts)
-
-    # tests errors.BasicErrorHandler's tree representation
-    v_errors = validator.errors
-    assert 'parts' in v_errors
-    assert 3 in v_errors['parts'][-1]
-    assert v_errors['parts'][-1][3][0] == "no definitions validate"
-    scope = v_errors['parts'][-1][3][-1]
-    assert 'anyof definition 0' in scope
-    assert 'anyof definition 1' in scope
-    assert scope['anyof definition 0'] == [{"product name": ["unknown field"]}]
-    assert scope['anyof definition 1'] == [{"product name": ["unknown field"]}]
-    assert v_errors['parts'][-1][4] == ["must be of ['dict', 'string'] type"]
-
-
-def test_anyof_2():
-    # these two schema should be the same
-    schema1 = {'prop': {'anyof': [{'type': 'dict',
-                                   'schema': {
-                                       'val': {'type': 'integer'}}},
-                                  {'type': 'dict',
-                                   'schema': {
-                                       'val': {'type': 'string'}}}]}}
-    schema2 = {'prop': {'type': 'dict', 'anyof': [
-        {'schema': {'val': {'type': 'integer'}}},
-        {'schema': {'val': {'type': 'string'}}}]}}
-
-    doc = {'prop': {'val': 0}}
-    assert_success(doc, schema1)
-    assert_success(doc, schema2)
-
-    doc = {'prop': {'val': '0'}}
-    assert_success(doc, schema1)
-    assert_success(doc, schema2)
-
-    doc = {'prop': {'val': 1.1}}
-    assert_fail(doc, schema1)
-    assert_fail(doc, schema2)
-
-
-def test_anyof_type():
-    schema = {'anyof_type': {'anyof_type': ['string', 'integer']}}
-    assert_success({'anyof_type': 'bar'}, schema)
-    assert_success({'anyof_type': 23}, schema)
-
-
-def test_oneof_schema():
-    schema = {'oneof_schema': {'type': 'dict',
-                               'oneof_schema':
-                                   [{'digits': {'type': 'integer',
-                                                'min': 0, 'max': 99}},
-                                    {'text': {'type': 'string',
-                                              'regex': '^[0-9]{2}$'}}]}}
-    assert_success({'oneof_schema': {'digits': 19}}, schema)
-    assert_success({'oneof_schema': {'text': '84'}}, schema)
-    assert_fail({'oneof_schema': {'digits': 19, 'text': '84'}}, schema)
-
-
-def test_nested_oneof_type():
-    schema = {'nested_oneof_type':
-              {'valueschema': {'oneof_type': ['string', 'integer']}}}
-    assert_success({'nested_oneof_type': {'foo': 'a'}}, schema)
-    assert_success({'nested_oneof_type': {'bar': 3}}, schema)
-
-
-def test_nested_oneofs(validator):
-    validator.schema = {'abc': {
-        'type': 'dict',
-        'oneof_schema': [
-            {'foo': {
-                'type': 'dict',
-                'schema': {'bar': {'oneof_type': ['integer', 'float']}}
-            }},
-            {'baz': {'type': 'string'}}
-        ]}}
-
-    document = {'abc': {'foo': {'bar': 'bad'}}}
-
-    expected_errors = {
-        'abc': [
-            'none or more than one rule validate',
-            {'oneof definition 0': [
-                {'foo': [{'bar': [
-                    'none or more than one rule validate',
-                    {'oneof definition 0': ['must be of integer type'],
-                     'oneof definition 1': ['must be of float type']}
-                ]}]}],
-             'oneof definition 1': [{'foo': ['unknown field']}]}
-        ]
-    }
-
-    assert_fail(document, validator=validator)
-    assert validator.errors == expected_errors
-
-
-def test_no_of_validation_if_type_fails(validator):
-    valid_parts = [{'schema': {'model number': {'type': 'string'},
-                               'count': {'type': 'integer'}}},
-                   {'schema': {'serial number': {'type': 'string'},
-                               'count': {'type': 'integer'}}}]
-    validator.schema = {'part': {'type': ['dict', 'string'],
-                                 'anyof': valid_parts}}
-    document = {'part': 10}
-    _errors = assert_fail(document, validator=validator)
-    assert len(_errors) == 1
-
-
-def test_issue_107(validator):
-    schema = {'info': {'type': 'dict',
-                       'schema': {'name': {'type': 'string',
-                                           'required': True}}}}
-    document = {'info': {'name': 'my name'}}
-    assert_success(document, schema, validator=validator)
-
-    v = Validator(schema)
-    assert_success(document, schema, v)
-    # it once was observed that this behaves other than the previous line
-    assert v.validate(document)
-
-
-def test_dont_type_validate_nulled_values(validator):
-    assert_fail({'an_integer': None}, validator=validator)
-    assert validator.errors == {'an_integer': ['null value not allowed']}
-
-
-def test_dependencies_error(validator):
-    schema = {'field1': {'required': False},
-              'field2': {'required': True,
-                         'dependencies': {'field1': ['one', 'two']}}}
-    validator.validate({'field2': 7}, schema)
-    exp_msg = errors.BasicErrorHandler \
-        .messages[errors.DEPENDENCIES_FIELD_VALUE.code] \
-        .format(field='field2', constraint={'field1': ['one', 'two']})
-    assert validator.errors == {'field2': [exp_msg]}
-
-
-def test_dependencies_on_boolean_field_with_one_value():
-    # https://github.com/pyeve/cerberus/issues/138
-    schema = {'deleted': {'type': 'boolean'},
-              'text': {'dependencies': {'deleted': False}}}
-    try:
-        assert_success({'text': 'foo', 'deleted': False}, schema)
-        assert_fail({'text': 'foo', 'deleted': True}, schema)
-        assert_fail({'text': 'foo'}, schema)
-    except TypeError as e:
-        if str(e) == "argument of type 'bool' is not iterable":
-            raise AssertionError(
-                "Bug #138 still exists, couldn't use boolean in dependency "
-                "without putting it in a list.\n"
-                "'some_field': True vs 'some_field: [True]")
-        else:
-            raise
-
-
-def test_dependencies_on_boolean_field_with_value_in_list():
-    # https://github.com/pyeve/cerberus/issues/138
-    schema = {'deleted': {'type': 'boolean'},
-              'text': {'dependencies': {'deleted': [False]}}}
-
-    assert_success({'text': 'foo', 'deleted': False}, schema)
-    assert_fail({'text': 'foo', 'deleted': True}, schema)
-    assert_fail({'text': 'foo'}, schema)
-
-
-def test_document_path():
-    class DocumentPathTester(Validator):
-        def _validate_trail(self, constraint, field, value):
-            """ {'type': 'boolean'} """
-            test_doc = self.root_document
-            for crumb in self.document_path:
-                test_doc = test_doc[crumb]
-            assert test_doc == self.document
-
-    v = DocumentPathTester()
-    schema = {'foo': {'schema': {'bar': {'trail': True}}}}
-    document = {'foo': {'bar': {}}}
-    assert_success(document, schema, validator=v)
-
-
-def test_excludes():
-    schema = {'this_field': {'type': 'dict',
-                             'excludes': 'that_field'},
-              'that_field': {'type': 'dict'}}
-    assert_success({'this_field': {}}, schema)
-    assert_success({'that_field': {}}, schema)
-    assert_success({}, schema)
-    assert_fail({'that_field': {}, 'this_field': {}}, schema)
-
-
-def test_mutual_excludes():
-    schema = {'this_field': {'type': 'dict',
-                             'excludes': 'that_field'},
-              'that_field': {'type': 'dict',
-                             'excludes': 'this_field'}}
-    assert_success({'this_field': {}}, schema)
-    assert_success({'that_field': {}}, schema)
-    assert_success({}, schema)
-    assert_fail({'that_field': {}, 'this_field': {}}, schema)
-
-
-def test_required_excludes():
-    schema = {'this_field': {'type': 'dict',
-                             'excludes': 'that_field',
-                             'required': True},
-              'that_field': {'type': 'dict',
-                             'excludes': 'this_field',
-                             'required': True}}
-    assert_success({'this_field': {}}, schema, update=False)
-    assert_success({'that_field': {}}, schema, update=False)
-    assert_fail({}, schema)
-    assert_fail({'that_field': {}, 'this_field': {}}, schema)
-
-
-def test_multiples_exclusions():
-    schema = {'this_field': {'type': 'dict',
-                             'excludes': ['that_field', 'bazo_field']},
-              'that_field': {'type': 'dict',
-                             'excludes': 'this_field'},
-              'bazo_field': {'type': 'dict'}}
-    assert_success({'this_field': {}}, schema)
-    assert_success({'that_field': {}}, schema)
-    assert_fail({'this_field': {}, 'that_field': {}}, schema)
-    assert_fail({'this_field': {}, 'bazo_field': {}}, schema)
-    assert_fail({'that_field': {}, 'this_field': {}, 'bazo_field': {}}, schema)
-    assert_success({'that_field': {}, 'bazo_field': {}}, schema)
-
-
-def test_bad_excludes_fields(validator):
-    validator.schema = {'this_field': {'type': 'dict',
-                                       'excludes': ['that_field', 'bazo_field'],
-                                       'required': True},
-                        'that_field': {'type': 'dict',
-                                       'excludes': 'this_field',
-                                       'required': True}}
-    assert_fail({'that_field': {}, 'this_field': {}}, validator=validator)
-    handler = errors.BasicErrorHandler
-    assert (validator.errors ==
-            {'that_field':
-                [handler.messages[errors.EXCLUDES_FIELD.code].format(
-                    "'this_field'", field="that_field")],
-                'this_field':
-                    [handler.messages[errors.EXCLUDES_FIELD.code].format(
-                        "'that_field', 'bazo_field'", field="this_field")]})
-
-
-def test_boolean_is_not_a_number():
-    # https://github.com/pyeve/cerberus/issues/144
-    assert_fail({'value': True}, {'value': {'type': 'number'}})
-
-
-def test_min_max_date():
-    schema = {'date': {'min': date(1900, 1, 1), 'max': date(1999, 12, 31)}}
-    assert_success({'date': date(1945, 5, 8)}, schema)
-    assert_fail({'date': date(1871, 5, 10)}, schema)
-
-
-def test_dict_length():
-    schema = {'dict': {'minlength': 1}}
-    assert_fail({'dict': {}}, schema)
-    assert_success({'dict': {'foo': 'bar'}}, schema)
-
-
-def test_forbidden():
-    schema = {'user': {'forbidden': ['root', 'admin']}}
-    assert_fail({'user': 'admin'}, schema)
-    assert_success({'user': 'alice'}, schema)
-
-
-def test_mapping_with_sequence_schema():
-    schema = {'list': {'schema': {'allowed': ['a', 'b', 'c']}}}
-    document = {'list': {'is_a': 'mapping'}}
-    assert_fail(document, schema,
-                error=('list', ('list', 'schema'), errors.BAD_TYPE_FOR_SCHEMA,
-                       schema['list']['schema']))
-
-
-def test_sequence_with_mapping_schema():
-    schema = {'list': {'schema': {'foo': {'allowed': ['a', 'b', 'c']}},
-                       'type': 'dict'}}
-    document = {'list': ['a', 'b', 'c']}
-    assert_fail(document, schema)
-
-
-def test_type_error_aborts_validation():
-    schema = {'foo': {'type': 'string', 'allowed': ['a']}}
-    document = {'foo': 0}
-    assert_fail(document, schema,
-                error=('foo', ('foo', 'type'), errors.BAD_TYPE, 'string'))
-
-
-def test_dependencies_in_oneof():
-    # https://github.com/pyeve/cerberus/issues/241
-    schema = {'a': {'type': 'integer',
-                    'oneof': [
-                        {'allowed': [1], 'dependencies': 'b'},
-                        {'allowed': [2], 'dependencies': 'c'}
-                    ]},
-              'b': {},
-              'c': {}}
-    assert_success({'a': 1, 'b': 'foo'}, schema)
-    assert_success({'a': 2, 'c': 'bar'}, schema)
-    assert_fail({'a': 1, 'c': 'foo'}, schema)
-    assert_fail({'a': 2, 'b': 'bar'}, schema)
-
-
-def test_allow_unknown_with_oneof_rules(validator):
-    # https://github.com/pyeve/cerberus/issues/251
-    schema = {
-        'test': {
-            'oneof': [
-                {
-              test  'type': 'dict',
-                    'allow_unknown': True,
-                    'schema': {'known': {'type': 'string'}}
-                },
-                {
-                    'type': 'dict',
-                    'schema': {'known': {'type': 'string'}}
-                },
-            ]
-        }
-    }
-    # check regression and that allow unknown does not cause any different
-    # than expected behaviour for one-of.
-    document = {'test': {'known': 's'}}
-    validator(document, schema)
-    _errotest validator._errors
-    assert len(_errors) == 1
-    assert_has_error(_errors, 'test', ('test', 'oneof'),
-                     errors.ONEOF, schtest'testtestoneof'])
-    assert len(_errors[0].child_errors) == 0
-  testcheck that allow_unknown is actually applied
-    document = {'test': {'known': 's', 'unknown': 'asd'}}
-    assert_success(docutest, validator=validator)
+# -*- coding: utf-8 -*-
+
+import re
+import sys
+from datetime import datetime, date
+from random import choice
+from string import ascii_lowercase
+
+from pytest import mark
+
+from cerberus import errors, Validator
+from cerberus.tests import (
+    assert_bad_type, assert_document_error, assert_fail, assert_has_error,
+    assert_not_has_error, assert_success
+)
+from cerberus.tests.conftest import sample_schema
+
+
+def test_empty_document():
+    assert_document_error(None, sample_schema, None,
+                          errors.DOCUMENT_MISSING)
+
+
+def test_bad_document_type():
+    document = "not a dict"
+    assert_document_error(
+        document, sample_schema, None,
+        errors.DOCUMENT_FORMAT.format(document)
+    )
+
+
+def test_unknown_field(validator):
+    field = 'surname'
+    assert_fail({field: 'doe'}, validator=validator,
+                error=(field, (), errors.UNKNOWN_FIELD, None))
+    assert validator.errors == {field: ['unknown field']}
+
+
+def test_empty_field_definition(document):
+    field = 'name'
+    schema = {field: {}}
+    assert_success(document, schema)
+
+
+def test_required_field(schema):
+    field = 'a_required_string'
+    required_string_extension = {
+        'a_required_string': {'type': 'string',
+                              'minlength': 2,
+                              'maxlength': 10,
+                              'required': True}}
+    schema.update(required_string_extension)
+    assert_fail({'an_integer': 1}, schema,
+                error=(field, (field, 'required'), errors.REQUIRED_FIELD,
+                       True))
+
+
+def test_nullable_field():
+    assert_success({'a_nullable_integer': None})
+    assert_success({'a_nullable_integer': 3})
+    assert_success({'a_nullable_field_without_type': None})
+    assert_fail({'a_nullable_integer': "foo"})
+    assert_fail({'an_integer': None})
+    assert_fail({'a_not_nullable_field_without_type': None})
+
+
+def test_readonly_field():
+    field = 'a_readonly_string'
+    assert_fail({field: 'update me if you can'},
+                error=(field, (field, 'readonly'), errors.READONLY_FIELD, True))
+
+
+def test_readonly_field_first_rule():
+    # test that readonly rule is checked before any other rule, and blocks.
+    # See #63.
+    schema = {
+        'a_readonly_number': {
+            'type': 'integer',
+            'readonly': True,
+            'max': 1
+        }
+    }
+    v = Validator(schema)
+    v.validate({'a_readonly_number': 2})
+    # it would be a list if there's more than one error; we get a dict
+    # instead.
+    assert 'read-only' in v.errors['a_readonly_number'][0]
+
+
+def test_readonly_field_with_default_value():
+    schema = {
+        'created': {
+            'type': 'string',
+            'readonly': True,
+            'default': 'today'
+        },
+        'modified': {
+            'type': 'string',
+            'readonly': True,
+            'default_setter': lambda d: d['created']
+        }
+    }
+    assert_success({}, schema)
+    expected_errors = [('created', ('created', 'readonly'),
+                        errors.READONLY_FIELD,
+                        schema['created']['readonly']),
+                       ('modified', ('modified', 'readonly'),
+                        errors.READONLY_FIELD,
+                        schema['modified']['readonly'])]
+    assert_fail({'created': 'tomorrow', 'modified': 'today'},
+                schema, errors=expected_errors)
+    assert_fail({'created': 'today', 'modified': 'today'},
+                schema, errors=expected_errors)
+
+
+def test_nested_readonly_field_with_default_value():
+    schema = {
+        'some_field': {
+            'type': 'dict',
+            'schema': {
+                'created': {
+                    'type': 'string',
+                    'readonly': True,
+                    'default': 'today'
+                },
+                'modified': {
+                    'type': 'string',
+                    'readonly': True,
+                    'default_setter': lambda d: d['created']
+                }
+            }
+        }
+    }
+    assert_success({'some_field': {}}, schema)
+    expected_errors = [
+        (('some_field', 'created'),
+         ('some_field', 'schema', 'created', 'readonly'),
+         errors.READONLY_FIELD,
+         schema['some_field']['schema']['created']['readonly']),
+        (('some_field', 'modified'),
+         ('some_field', 'schema', 'modified', 'readonly'),
+         errors.READONLY_FIELD,
+         schema['some_field']['schema']['modified']['readonly'])]
+    assert_fail({'some_field': {'created': 'tomorrow', 'modified': 'now'}},
+                schema, errors=expected_errors)
+    assert_fail({'some_field': {'created': 'today', 'modified': 'today'}},
+                schema, errors=expected_errors)
+
+
+def test_repeated_readonly(validator):
+    # https://github.com/pyeve/cerberus/issues/311
+    validator.schema = {'id': {'readonly': True}}
+    assert_fail({'id': 0}, validator=validator)
+    assert_fail({'id': 0}, validator=validator)
+
+
+def test_not_a_string():
+    assert_bad_type('a_string', 'string', 1)
+
+
+def test_not_a_binary():
+    # 'u' literal prefix produces type `str` in Python 3
+    assert_bad_type('a_binary', 'binary', u"i'm not a binary")
+
+
+def test_not_a_integer():
+    assert_bad_type('an_integer', 'integer', "i'm not an integer")
+
+
+def test_not_a_boolean():
+    assert_bad_type('a_boolean', 'boolean', "i'm not a boolean")
+
+
+def test_not_a_datetime():
+    assert_bad_type('a_datetime', 'datetime', "i'm not a datetime")
+
+
+def test_not_a_float():
+    assert_bad_type('a_float', 'float', "i'm not a float")
+
+
+def test_not_a_number():
+    assert_bad_type('a_number', 'number', "i'm not a number")
+
+
+def test_not_a_list():
+    assert_bad_type('a_list_of_values', 'list', "i'm not a list")
+
+
+def test_not_a_dict():
+    assert_bad_type('a_dict', 'dict', "i'm not a dict")
+
+
+def test_bad_max_length(schema):
+    field = 'a_string'
+    max_length = schema[field]['maxlength']
+    value = "".join(choice(ascii_lowercase) for i in range(max_length + 1))
+    assert_fail({field: value},
+                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
+                       max_length, (len(value),)))
+
+
+def test_bad_max_length_binary(schema):
+    field = 'a_binary'
+    max_length = schema[field]['maxlength']
+    value = b'\x00' * (max_length + 1)
+    assert_fail({field: value},
+                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
+                       max_length, (len(value),)))
+
+
+def test_bad_min_length(schema):
+    field = 'a_string'
+    min_length = schema[field]['minlength']
+    value = "".join(choice(ascii_lowercase) for i in range(min_length - 1))
+    assert_fail({field: value},
+                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
+                       min_length, (len(value),)))
+
+
+def test_bad_min_length_binary(schema):
+    field = 'a_binary'
+    min_length = schema[field]['minlength']
+    value = b'\x00' * (min_length - 1)
+    assert_fail({field: value},
+                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
+                       min_length, (len(value),)))
+
+
+def test_bad_max_value(schema):
+    def assert_bad_max_value(field, inc):
+        max_value = schema[field]['max']
+        value = max_value + inc
+        assert_fail({field: value},
+                    error=(field, (field, 'max'), errors.MAX_VALUE, max_value))
+
+    field = 'an_integer'
+    assert_bad_max_value(field, 1)
+    field = 'a_float'
+    assert_bad_max_value(field, 1.0)
+    field = 'a_number'
+    assert_bad_max_value(field, 1)
+
+
+def test_bad_min_value(schema):
+    def assert_bad_min_value(field, inc):
+        min_value = schema[field]['min']
+        value = min_value - inc
+        assert_fail({field: value},
+                    error=(field, (field, 'min'),
+                           errors.MIN_VALUE, min_value))
+
+    field = 'an_integer'
+    assert_bad_min_value(field, 1)
+    field = 'a_float'
+    assert_bad_min_value(field, 1.0)
+    field = 'a_number'
+    assert_bad_min_value(field, 1)
+
+
+def test_bad_schema():
+    field = 'a_dict'
+    subschema_field = 'address'
+    schema = {field: {'type': 'dict',
+                      'schema': {subschema_field: {'type': 'string'},
+                                 'city': {'type': 'string', 'required': True}}
+                      }}
+    document = {field: {subschema_field: 34}}
+    validator = Validator(schema)
+
+    assert_fail(
+        document, validator=validator,
+        error=(field, (field, 'schema'), errors.MAPPING_SCHEMA,
+               validator.schema['a_dict']['schema']),
+        child_errors=[
+            ((field, subschema_field),
+             (field, 'schema', subschema_field, 'type'),
+             errors.BAD_TYPE, 'string'),
+            ((field, 'city'), (field, 'schema', 'city', 'required'),
+             errors.REQUIRED_FIELD, True)]
+    )
+
+    handler = errors.BasicErrorHandler
+    assert field in validator.errors
+    assert subschema_field in validator.errors[field][-1]
+    assert handler.messages[errors.BAD_TYPE.code].format(constraint='string') \
+        in validator.errors[field][-1][subschema_field]
+    assert 'city' in validator.errors[field][-1]
+    assert (handler.messages[errors.REQUIRED_FIELD.code]
+            in validator.errors[field][-1]['city'])
+
+
+def test_bad_valueschema():
+    field = 'a_dict_with_valueschema'
+    schema_field = 'a_string'
+    value = {schema_field: 'not an integer'}
+
+    exp_child_errors = [
+        ((field, schema_field), (field, 'valueschema', 'type'), errors.BAD_TYPE,
+         'integer')]
+    assert_fail({field: value},
+                error=(field, (field, 'valueschema'), errors.VALUESCHEMA,
+                       {'type': 'integer'}), child_errors=exp_child_errors)
+
+
+def test_bad_list_of_values(validator):
+    field = 'a_list_of_values'
+    value = ['a string', 'not an integer']
+    assert_fail({field: value}, validator=validator,
+                error=(field, (field, 'items'), errors.BAD_ITEMS,
+                       [{'type': 'string'}, {'type': 'integer'}]),
+                child_errors=[((field, 1), (field, 'items', 1, 'type'),
+                               errors.BAD_TYPE, 'integer')])
+
+    assert (errors.BasicErrorHandler.messages[errors.BAD_TYPE.code].
+            format(constraint='integer')
+            in validator.errors[field][-1][1])
+
+    value = ['a string', 10, 'an extra item']
+    assert_fail({field: value},
+                error=(field, (field, 'items'), errors.ITEMS_LENGTH,
+                       [{'type': 'string'}, {'type': 'integer'}], (2, 3)))
+
+
+def test_bad_list_of_integers():
+    field = 'a_list_of_integers'
+    value = [34, 'not an integer']
+    assert_fail({field: value})
+
+
+def test_bad_list_of_dicts():
+    field = 'a_list_of_dicts'
+    map_schema = {'sku': {'type': 'string'},
+                  'price': {'type': 'integer', 'required': True}}
+    seq_schema = {'type': 'dict', 'schema': map_schema}
+    schema = {field: {'type': 'list', 'schema': seq_schema}}
+    validator = Validator(schema)
+    value = [{'sku': 'KT123', 'price': '100'}]
+    document = {field: value}
+
+    assert_fail(document, validator=validator,
+                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
+                       seq_schema),
+                child_errors=[((field, 0), (field, 'schema', 'schema'),
+                               errors.MAPPING_SCHEMA, map_schema)])
+
+    assert field in validator.errors
+    assert 0 in validator.errors[field][-1]
+    assert 'price' in validator.errors[field][-1][0][-1]
+    exp_msg = errors.BasicErrorHandler.messages[errors.BAD_TYPE.code] \
+        .format(constraint='integer')
+    assert exp_msg in validator.errors[field][-1][0][-1]['price']
+
+    value = ["not a dict"]
+    exp_child_errors = [((field, 0), (field, 'schema', 'type'),
+                         errors.BAD_TYPE, 'dict', ())]
+    assert_fail({field: value},
+                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
+                       seq_schema),
+                child_errors=exp_child_errors)
+
+
+def test_array_unallowed():
+    field = 'an_array'
+    value = ['agent', 'client', 'profit']
+    assert_fail({field: value},
+                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUES,
+                       ['agent', 'client', 'vendor'], ['profit']))
+
+
+def test_string_unallowed():
+    field = 'a_restricted_string'
+    value = 'profit'
+    assert_fail({field: value},
+                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUE,
+                       ['agent', 'client', 'vendor'], value))
+
+
+def test_integer_unallowed():
+    field = 'a_restricted_integer'
+    value = 2
+    assert_fail({field: value},
+                error=(field, (field, 'allowed'), errors.UNALLOWED_VALUE,
+                       [-1, 0, 1], value))
+
+
+def test_integer_allowed():
+    assert_success({'a_restricted_integer': -1})
+
+
+def test_validate_update():
+    assert_success({'an_integer': 100,
+                    'a_dict': {'address': 'adr'},
+                    'a_list_of_dicts': [{'sku': 'let'}]
+                    }, update=True)
+
+
+def test_string():
+    assert_success({'a_string': 'john doe'})
+
+
+def test_string_allowed():
+    assert_success({'a_restricted_string': 'client'})
+
+
+def test_integer():
+    assert_success({'an_integer': 50})
+
+
+def test_boolean():
+    assert_success({'a_boolean': True})
+
+
+def test_datetime():
+    assert_success({'a_datetime': datetime.now()})
+
+
+def test_float():
+    assert_success({'a_float': 3.5})
+    assert_success({'a_float': 1})
+
+
+def test_number():
+    assert_success({'a_number': 3.5})
+    assert_success({'a_number': 3})
+
+
+def test_array():
+    assert_success({'an_array': ['agent', 'client']})
+
+
+def test_set():
+    assert_success({'a_set': set(['hello', 1])})
+
+
+def test_one_of_two_types(validator):
+    field = 'one_or_more_strings'
+    assert_success({field: 'foo'})
+    assert_success({field: ['foo', 'bar']})
+    exp_child_errors = [((field, 1), (field, 'schema', 'type'),
+                         errors.BAD_TYPE, 'string')]
+    assert_fail({field: ['foo', 23]}, validator=validator,
+                error=(field, (field, 'schema'), errors.SEQUENCE_SCHEMA,
+                       {'type': 'string'}),
+                child_errors=exp_child_errors)
+    assert_fail({field: 23},
+                error=((field,), (field, 'type'), errors.BAD_TYPE,
+                       ['string', 'list']))
+    assert validator.errors == {field: [{1: ['must be of string type']}]}
+
+
+def test_regex(validator):
+    field = 'a_regex_email'
+    assert_success({field: 'valid.email@gmail.com'}, validator=validator)
+    assert_fail({field: 'invalid'}, update=True,
+                error=(field, (field, 'regex'), errors.REGEX_MISMATCH,
+                       '^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'))
+
+
+def test_a_list_of_dicts():
+    assert_success(
+        {
+            'a_list_of_dicts': [
+                {'sku': 'AK345', 'price': 100},
+                {'sku': 'YZ069', 'price': 25}
+            ]
+        }
+    )
+
+
+def test_a_list_of_values():
+    assert_success({'a_list_of_values': ['hello', 100]})
+
+
+def test_a_list_of_integers():
+    assert_success({'a_list_of_integers': [99, 100]})
+
+
+def test_a_dict(schema):
+    assert_success({'a_dict': {'address': 'i live here',
+                               'city': 'in my own town'}})
+    assert_fail(
+        {'a_dict': {'address': 8545}},
+        error=('a_dict', ('a_dict', 'schema'), errors.MAPPING_SCHEMA,
+               schema['a_dict']['schema']),
+        child_errors=[(('a_dict', 'address'),
+                       ('a_dict', 'schema', 'address', 'type'),
+                       errors.BAD_TYPE, 'string'),
+                      (('a_dict', 'city'),
+                       ('a_dict', 'schema', 'city', 'required'),
+                       errors.REQUIRED_FIELD, True)]
+    )
+
+
+def test_a_dict_with_valueschema(validator):
+    assert_success({'a_dict_with_valueschema':
+                   {'an integer': 99, 'another integer': 100}})
+
+    error = (
+        'a_dict_with_valueschema', ('a_dict_with_valueschema', 'valueschema'),
+        errors.VALUESCHEMA, {'type': 'integer'})
+    child_errors = [
+        (('a_dict_with_valueschema', 'a string'),
+         ('a_dict_with_valueschema', 'valueschema', 'type'),
+         errors.BAD_TYPE, 'integer')]
+
+    assert_fail({'a_dict_with_valueschema': {'a string': '99'}},
+                validator=validator, error=error, child_errors=child_errors)
+
+    assert 'valueschema' in \
+           validator.schema_error_tree['a_dict_with_valueschema']
+    v = validator.schema_error_tree
+    assert len(v['a_dict_with_valueschema']['valueschema'].descendants) == 1
+
+
+def test_a_dict_with_keyschema():
+    assert_success({'a_dict_with_keyschema': {'key': 'value'}})
+    assert_fail({'a_dict_with_keyschema': {'KEY': 'value'}})
+
+
+def test_a_list_length(schema):
+    field = 'a_list_length'
+    min_length = schema[field]['minlength']
+    max_length = schema[field]['maxlength']
+
+    assert_fail({field: [1] * (min_length - 1)},
+                error=(field, (field, 'minlength'), errors.MIN_LENGTH,
+                       min_length, (min_length - 1,)))
+
+    for i in range(min_length, max_length):
+        value = [1] * i
+        assert_success({field: value})
+
+    assert_fail({field: [1] * (max_length + 1)},
+                error=(field, (field, 'maxlength'), errors.MAX_LENGTH,
+                       max_length, (max_length + 1,)))
+
+
+def test_custom_datatype():
+    class MyValidator(Validator):
+        def _validate_type_objectid(self, value):
+            if re.match('[a-f0-9]{24}', value):
+                return True
+
+    schema = {'test_field': {'type': 'objectid'}}
+    validator = MyValidator(schema)
+    assert_success({'test_field': '50ad188438345b1049c88a28'},
+                   validator=validator)
+    assert_fail({'test_field': 'hello'}, validator=validator,
+                error=('test_field', ('test_field', 'type'), errors.BAD_TYPE,
+                       'objectid'))
+
+
+def test_custom_datatype_rule():
+    class MyValidator(Validator):
+        def _validate_min_number(self, min_number, field, value):
+            """ {'type': 'number'} """
+            if value < min_number:
+                self._error(field, 'Below the min')
+
+        # TODO replace with TypeDefintion in next major release
+        def _validate_type_number(self, value):
+            if isinstance(value, int):
+                return True
+
+    schema = {'test_field': {'min_number': 1, 'type': 'number'}}
+    validator = MyValidator(schema)
+    assert_fail({'test_field': '0'}, validator=validator,
+                error=('test_field', ('test_field', 'type'), errors.BAD_TYPE,
+                       'number'))
+    assert_fail({'test_field': 0}, validator=validator,
+                error=('test_field', (), errors.CUSTOM, None,
+                       ('Below the min',)))
+    assert validator.errors == {'test_field': ['Below the min']}
+
+
+def test_custom_validator():
+    class MyValidator(Validator):
+        def _validate_isodd(self, isodd, field, value):
+            """ {'type': 'boolean'} """
+            if isodd and not bool(value & 1):
+                self._error(field, 'Not an odd number')
+
+    schema = {'test_field': {'isodd': True}}
+    validator = MyValidator(schema)
+    assert_success({'test_field': 7}, validator=validator)
+    assert_fail({'test_field': 6}, validator=validator,
+                error=('test_field', (), errors.CUSTOM, None,
+                       ('Not an odd number',)))
+    assert validator.errors == {'test_field': ['Not an odd number']}
+
+
+@mark.parametrize('value, _type',
+                  (('', 'string'), ((), 'list'), ({}, 'dict'), ([], 'list')))
+def test_empty_values(value, _type):
+    field = 'test'
+    schema = {field: {'type': _type}}
+    document = {field: value}
+
+    assert_success(document, schema)
+
+    schema[field]['empty'] = False
+    assert_fail(document, schema,
+                error=(field, (field, 'empty'),
+                       errors.EMPTY_NOT_ALLOWED, False))
+
+    schema[field]['empty'] = True
+    assert_success(document, schema)
+
+
+def test_empty_skips_regex(validator):
+    schema = {'foo': {'empty': True, 'regex': r'\d?\d\.\d\d',
+                      'type': 'string'}}
+    assert validator({'foo': ''}, schema)
+
+
+def test_ignore_none_values():
+    field = 'test'
+    schema = {field: {'type': 'string', 'empty': False, 'required': False}}
+    document = {field: None}
+
+    # Test normal behaviour
+    validator = Validator(schema, ignore_none_values=False)
+    assert_fail(document, validator=validator)
+    validator.schema[field]['required'] = True
+    validator.schema.validate()
+    _errors = assert_fail(document, validator=validator)
+    assert_not_has_error(_errors, field, (field, 'required'),
+                         errors.REQUIRED_FIELD, True)
+
+    # Test ignore None behaviour
+    validator = Validator(schema, ignore_none_values=True)
+    validator.schema[field]['required'] = False
+    validator.schema.validate()
+    assert_success(document, validator=validator)
+    validator.schema[field]['required'] = True
+    _errors = assert_fail(schema=schema, document=document, validator=validator)
+    assert_has_error(_errors, field, (field, 'required'), errors.REQUIRED_FIELD,
+                     True)
+    assert_not_has_error(_errors, field, (field, 'type'), errors.BAD_TYPE,
+                         'string')
+
+
+def test_unknown_keys():
+    schema = {}
+
+    # test that unknown fields are allowed when allow_unknown is True.
+    v = Validator(allow_unknown=True, schema=schema)
+    assert_success({"unknown1": True, "unknown2": "yes"}, validator=v)
+
+    # test that unknown fields are allowed only if they meet the
+    # allow_unknown schema when provided.
+    v.allow_unknown = {'type': 'string'}
+    assert_success(document={'name': 'mark'}, validator=v)
+    assert_fail({"name": 1}, validator=v)
+
+    # test that unknown fields are not allowed if allow_unknown is False
+    v.allow_unknown = False
+    assert_fail({'name': 'mark'}, validator=v)
+
+
+def test_unknown_key_dict(validator):
+    # https://github.com/pyeve/cerberus/issues/177
+    validator.allow_unknown = True
+    document = {'a_dict': {'foo': 'foo_value', 'bar': 25}}
+    assert_success(document, {}, validator=validator)
+
+
+def test_unknown_key_list(validator):
+    # https://github.com/pyeve/cerberus/issues/177
+    validator.allow_unknown = True
+    document = {'a_dict': ['foo', 'bar']}
+    assert_success(document, {}, validator=validator)
+
+
+def test_unknown_keys_list_of_dicts(validator):
+    # test that allow_unknown is honored even for subdicts in lists.
+    # https://github.com/pyeve/cerberus/issues/67.
+    validator.allow_unknown = True
+    document = {'a_list_of_dicts': [{'sku': 'YZ069', 'price': 25,
+                                     'extra': True}]}
+    assert_success(document, validator=validator)
+
+
+def test_unknown_keys_retain_custom_rules():
+    # test that allow_unknown schema respect custom validation rules.
+    # https://github.com/pyeve/cerberus/issues/#66.
+    class CustomValidator(Validator):
+        def _validate_type_foo(self, value):
+            if value == "foo":
+                return True
+
+    validator = CustomValidator({})
+    validator.allow_unknown = {"type": "foo"}
+    assert_success(document={"fred": "foo", "barney": "foo"},
+                   validator=validator)
+
+
+def test_nested_unknown_keys():
+    schema = {
+        'field1': {
+            'type': 'dict',
+            'allow_unknown': True,
+            'schema': {'nested1': {'type': 'string'}}
+        }
+    }
+    document = {
+        'field1': {
+            'nested1': 'foo',
+            'arb1': 'bar',
+            'arb2': 42
+        }
+    }
+    assert_success(document=document, schema=schema)
+
+    schema['field1']['allow_unknown'] = {'type': 'string'}
+    assert_fail(document=document, schema=schema)
+
+
+def test_novalidate_noerrors(validator):
+    """
+    In v0.1.0 and below `self.errors` raised an exception if no
+    validation had been performed yet.
+    """
+    assert validator.errors == {}
+
+
+def test_callable_validator():
+    """
+    Validator instance is callable, functions as a shorthand
+    passthrough to validate()
+    """
+    schema = {'test_field': {'type': 'string'}}
+    v = Validator(schema)
+    assert v.validate({'test_field': 'foo'})
+    assert v({'test_field': 'foo'})
+    assert not v.validate({'test_field': 1})
+    assert not v({'test_field': 1})
+
+
+def test_dependencies_field():
+    schema = {'test_field': {'dependencies': 'foo'},
+              'foo': {'type': 'string'}}
+    assert_success({'test_field': 'foobar', 'foo': 'bar'}, schema)
+    assert_fail({'test_field': 'foobar'}, schema)
+
+
+def test_dependencies_list():
+    schema = {
+        'test_field': {'dependencies': ['foo', 'bar']},
+        'foo': {'type': 'string'},
+        'bar': {'type': 'string'}
+    }
+    assert_success({'test_field': 'foobar', 'foo': 'bar', 'bar': 'foo'},
+                   schema)
+    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
+
+
+def test_dependencies_list_with_required_field():
+    schema = {
+        'test_field': {'required': True, 'dependencies': ['foo', 'bar']},
+        'foo': {'type': 'string'},
+        'bar': {'type': 'string'}
+    }
+    # False: all dependencies missing
+    assert_fail({'test_field': 'foobar'}, schema)
+    # False: one of dependencies missing
+    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
+    # False: one of dependencies missing
+    assert_fail({'test_field': 'foobar', 'bar': 'foo'}, schema)
+    # False: dependencies are validated and field is required
+    assert_fail({'foo': 'bar', 'bar': 'foo'}, schema)
+    # False: All dependencies are optional but field is still required
+    assert_fail({}, schema)
+    # True: dependency missing
+    assert_fail({'foo': 'bar'}, schema)
+    # True: dependencies are validated but field is not required
+    schema['test_field']['required'] = False
+    assert_success({'foo': 'bar', 'bar': 'foo'}, schema)
+
+
+def test_dependencies_list_with_subodcuments_fields():
+    schema = {
+        'test_field': {'dependencies': ['a_dict.foo', 'a_dict.bar']},
+        'a_dict': {
+            'type': 'dict',
+            'schema': {
+                'foo': {'type': 'string'},
+                'bar': {'type': 'string'}
+            }
+        }
+    }
+    assert_success({'test_field': 'foobar',
+                    'a_dict': {'foo': 'foo', 'bar': 'bar'}}, schema)
+    assert_fail({'test_field': 'foobar', 'a_dict': {}}, schema)
+    assert_fail({'test_field': 'foobar',
+                 'a_dict': {'foo': 'foo'}}, schema)
+
+
+def test_dependencies_dict():
+    schema = {
+        'test_field': {'dependencies': {'foo': 'foo', 'bar': 'bar'}},
+        'foo': {'type': 'string'},
+        'bar': {'type': 'string'}
+    }
+    assert_success({'test_field': 'foobar', 'foo': 'foo', 'bar': 'bar'},
+                   schema)
+    assert_fail({'test_field': 'foobar', 'foo': 'foo'}, schema)
+    assert_fail({'test_field': 'foobar', 'foo': 'bar'}, schema)
+    assert_fail({'test_field': 'foobar', 'bar': 'bar'}, schema)
+    assert_fail({'test_field': 'foobar', 'bar': 'foo'}, schema)
+    assert_fail({'test_field': 'foobar'}, schema)
+
+
+def test_dependencies_dict_with_required_field():
+    schema = {
+        'test_field': {
+            'required': True,
+            'dependencies': {'foo': 'foo', 'bar': 'bar'}
+        },
+        'foo': {'type': 'string'},
+        'bar': {'type': 'string'}
+    }
+    # False: all dependencies missing
+    assert_fail({'test_field': 'foobar'}, schema)
+    # False: one of dependencies missing
+    assert_fail({'test_field': 'foobar', 'foo': 'foo'}, schema)
+    assert_fail({'test_field': 'foobar', 'bar': 'bar'}, schema)
+    # False: dependencies are validated and field is required
+    assert_fail({'foo': 'foo', 'bar': 'bar'}, schema)
+    # False: All dependencies are optional, but field is still required
+    assert_fail({}, schema)
+    # False: dependency missing
+    assert_fail({'foo': 'bar'}, schema)
+
+    assert_success({'test_field': 'foobar', 'foo': 'foo', 'bar': 'bar'},
+                   schema)
+
+    # True: dependencies are validated but field is not required
+    schema['test_field']['required'] = False
+    assert_success({'foo': 'bar', 'bar': 'foo'}, schema)
+
+
+def test_dependencies_field_satisfy_nullable_field():
+    # https://github.com/pyeve/cerberus/issues/305
+    schema = {
+        'foo': {'nullable': True},
+        'bar': {'dependencies': 'foo'}
+    }
+
+    assert_success({'foo': None, 'bar': 1}, schema)
+    assert_success({'foo': None}, schema)
+    assert_fail({'bar': 1}, schema)
+
+
+def test_dependencies_field_with_mutually_dependent_nullable_fields():
+    # https://github.com/pyeve/cerberus/pull/306
+    schema = {
+        'foo': {'dependencies': 'bar', 'nullable': True},
+        'bar': {'dependencies': 'foo', 'nullable': True}
+    }
+    assert_success({'foo': None, 'bar': None}, schema)
+    assert_success({'foo': 1, 'bar': 1}, schema)
+    assert_success({'foo': None, 'bar': 1}, schema)
+    assert_fail({'foo': None}, schema)
+    assert_fail({'foo': 1}, schema)
+
+
+def test_dependencies_dict_with_subdocuments_fields():
+    schema = {
+        'test_field': {'dependencies': {'a_dict.foo': ['foo', 'bar'],
+                                        'a_dict.bar': 'bar'}},
+        'a_dict': {
+            'type': 'dict',
+            'schema': {
+                'foo': {'type': 'string'},
+                'bar': {'type': 'string'}
+            }
+        }
+    }
+    assert_success({'test_field': 'foobar',
+                    'a_dict': {'foo': 'foo', 'bar': 'bar'}}, schema)
+    assert_success({'test_field': 'foobar',
+                    'a_dict': {'foo': 'bar', 'bar': 'bar'}}, schema)
+    assert_fail({'test_field': 'foobar', 'a_dict': {}}, schema)
+    assert_fail({'test_field': 'foobar',
+                 'a_dict': {'foo': 'foo', 'bar': 'foo'}}, schema)
+    assert_fail({'test_field': 'foobar', 'a_dict': {'bar': 'foo'}},
+                schema)
+    assert_fail({'test_field': 'foobar', 'a_dict': {'bar': 'bar'}},
+                schema)
+
+
+def test_root_relative_dependencies():
+    # https://github.com/pyeve/cerberus/issues/288
+    subschema = {'version': {'dependencies': '^repo'}}
+    schema = {'package': {'allow_unknown': True, 'schema': subschema},
+              'repo': {}}
+    assert_fail(
+        {'package': {'repo': 'somewhere', 'version': 0}}, schema,
+        error=('package', ('package', 'schema'),
+               errors.MAPPING_SCHEMA, subschema),
+        child_errors=[(
+            ('package', 'version'),
+            ('package', 'schema', 'version', 'dependencies'),
+            errors.DEPENDENCIES_FIELD, '^repo', ('^repo',)
+        )]
+    )
+    assert_success({'repo': 'somewhere', 'package': {'version': 1}}, schema)
+
+
+def test_dependencies_errors():
+    v = Validator({'field1': {'required': False},
+                   'field2': {'required': True,
+                              'dependencies': {'field1': ['one', 'two']}}})
+    assert_fail({'field1': 'three', 'field2': 7}, validator=v,
+                error=('field2', ('field2', 'dependencies'),
+                       errors.DEPENDENCIES_FIELD_VALUE,
+                       {'field1': ['one', 'two']}, ({'field1': 'three'},)))
+
+
+def test_options_passed_to_nested_validators(validator):
+    validator.schema = {'sub_dict': {'type': 'dict',
+                                     'schema': {'foo': {'type': 'string'}}}}
+    validator.allow_unknown = True
+    assert_success({'sub_dict': {'foo': 'bar', 'unknown': True}},
+                   validator=validator)
+
+
+def test_self_root_document():
+    """ Make sure self.root_document is always the root document.
+    See:
+    * https://github.com/pyeve/cerberus/pull/42
+    * https://github.com/pyeve/eve/issues/295
+    """
+
+    class MyValidator(Validator):
+        def _validate_root_doc(self, root_doc, field, value):
+            """ {'type': 'boolean'} """
+            if ('sub' not in self.root_document or
+                    len(self.root_document['sub']) != 2):
+                self._error(field, 'self.context is not the root doc!')
+
+    schema = {
+        'sub': {
+            'type': 'list',
+            'root_doc': True,
+            'schema': {
+                'type': 'dict',
+                'schema': {
+                    'foo': {
+                        'type': 'string',
+                        'root_doc': True
+                    }
+                }
+            }
+        }
+    }
+    assert_success({'sub': [{'foo': 'bar'}, {'foo': 'baz'}]},
+                   validator=MyValidator(schema))
+
+
+def test_validator_rule(validator):
+    def validate_name(field, value, error):
+        if not value.islower():
+            error(field, 'must be lowercase')
+
+    validator.schema = {
+        'name': {'validator': validate_name},
+        'age': {'type': 'integer'}
+    }
+
+    assert_fail({'name': 'ItsMe', 'age': 2}, validator=validator,
+                error=('name', (), errors.CUSTOM, None, ('must be lowercase',)))
+    assert validator.errors == {'name': ['must be lowercase']}
+    assert_success({'name': 'itsme', 'age': 2}, validator=validator)
+
+
+def test_validated(validator):
+    validator.schema = {'property': {'type': 'string'}}
+    document = {'property': 'string'}
+    assert validator.validated(document) == document
+    document = {'property': 0}
+    assert validator.validated(document) is None
+
+
+def test_anyof():
+    # prop1 must be either a number between 0 and 10
+    schema = {'prop1': {'min': 0, 'max': 10}}
+    doc = {'prop1': 5}
+
+    assert_success(doc, schema)
+
+    # prop1 must be either a number between 0 and 10 or 100 and 110
+    schema = {'prop1': {'anyof':
+                        [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}}
+    doc = {'prop1': 105}
+
+    assert_success(doc, schema)
+
+    # prop1 must be either a number between 0 and 10 or 100 and 110
+    schema = {'prop1': {'anyof':
+                        [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}}
+    doc = {'prop1': 50}
+
+    assert_fail(doc, schema)
+
+    # prop1 must be an integer that is either be
+    # greater than or equal to 0, or greater than or equal to 10
+    schema = {'prop1': {'type': 'integer',
+                        'anyof': [{'min': 0}, {'min': 10}]}}
+    assert_success({'prop1': 10}, schema)
+    # test that intermediate schemas do not sustain
+    assert 'type' not in schema['prop1']['anyof'][0]
+    assert 'type' not in schema['prop1']['anyof'][1]
+    assert 'allow_unknown' not in schema['prop1']['anyof'][0]
+    assert 'allow_unknown' not in schema['prop1']['anyof'][1]
+    assert_success({'prop1': 5}, schema)
+
+    exp_child_errors = [
+        (('prop1',), ('prop1', 'anyof', 0, 'min'), errors.MIN_VALUE, 0),
+        (('prop1',), ('prop1', 'anyof', 1, 'min'), errors.MIN_VALUE, 10)
+    ]
+    assert_fail({'prop1': -1}, schema,
+                error=(('prop1',), ('prop1', 'anyof'), errors.ANYOF,
+                       [{'min': 0}, {'min': 10}]),
+                child_errors=exp_child_errors)
+    doc = {'prop1': 5.5}
+    assert_fail(doc, schema)
+    doc = {'prop1': '5.5'}
+    assert_fail(doc, schema)
+
+
+def test_allof():
+    # prop1 has to be a float between 0 and 10
+    schema = {'prop1': {'allof': [
+        {'type': 'float'}, {'min': 0}, {'max': 10}]}}
+    doc = {'prop1': -1}
+    assert_fail(doc, schema)
+    doc = {'prop1': 5}
+    assert_success(doc, schema)
+    doc = {'prop1': 11}
+    assert_fail(doc, schema)
+
+    # prop1 has to be a float and an integer
+    schema = {'prop1': {'allof': [{'type': 'float'}, {'type': 'integer'}]}}
+    doc = {'prop1': 11}
+    assert_success(doc, schema)
+    doc = {'prop1': 11.5}
+    assert_fail(doc, schema)
+    doc = {'prop1': '11'}
+    assert_fail(doc, schema)
+
+
+def test_unicode_allowed():
+    # issue 280
+    doc = {'letters': u'♄εℓł☺'}
+
+    schema = {'letters': {'type': 'string', 'allowed': ['a', 'b', 'c']}}
+    assert_fail(doc, schema)
+
+    schema = {'letters': {'type': 'string', 'allowed': [u'♄εℓł☺']}}
+    assert_success(doc, schema)
+
+    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
+    doc = {'letters': '♄εℓł☺'}
+    assert_success(doc, schema)
+
+
+@mark.skipif(sys.version_info[0] < 3,
+             reason='requires python 3.x')
+def test_unicode_allowed_py3():
+    """ All strings are unicode in Python 3.x. Input doc and schema
+    have equal strings and validation yield success."""
+
+    # issue 280
+    doc = {'letters': u'♄εℓł☺'}
+    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
+    assert_success(doc, schema)
+
+
+@mark.skipif(sys.version_info[0] > 2,
+             reason='requires python 2.x')
+def test_unicode_allowed_py2():
+    """ Python 2.x encodes value of allowed using default encoding if
+    the string includes characters outside ASCII range. Produced string
+    does not match input which is an unicode string."""
+
+    # issue 280
+    doc = {'letters': u'♄εℓł☺'}
+    schema = {'letters': {'type': 'string', 'allowed': ['♄εℓł☺']}}
+    assert_fail(doc, schema)
+
+
+def test_oneof():
+    # prop1 can only only be:
+    # - greater than 10
+    # - greater than 0
+    # - equal to -5, 5, or 15
+
+    schema = {'prop1': {'type': 'integer', 'oneof': [
+        {'min': 0},
+        {'min': 10},
+        {'allowed': [-5, 5, 15]}]}}
+
+    # document is not valid
+    # prop1 not greater than 0, 10 or equal to -5
+    doc = {'prop1': -1}
+    assert_fail(doc, schema)
+
+    # document is valid
+    # prop1 is less then 0, but is -5
+    doc = {'prop1': -5}
+    assert_success(doc, schema)
+
+    # document is valid
+    # prop1 greater than 0
+    doc = {'prop1': 1}
+    assert_success(doc, schema)
+
+    # document is not valid
+    # prop1 is greater than 0
+    # and equal to 5
+    doc = {'prop1': 5}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    # prop1 is greater than 0
+    # and greater than 10
+    doc = {'prop1': 11}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    # prop1 is greater than 0
+    # and greater than 10
+    # and equal to 15
+    doc = {'prop1': 15}
+    assert_fail(doc, schema)
+
+
+def test_noneof():
+    # prop1 can not be:
+    # - greater than 10
+    # - greater than 0
+    # - equal to -5, 5, or 15
+
+    schema = {'prop1': {'type': 'integer', 'noneof': [
+        {'min': 0},
+        {'min': 10},
+        {'allowed': [-5, 5, 15]}]}}
+
+    # document is valid
+    doc = {'prop1': -1}
+    assert_success(doc, schema)
+
+    # document is not valid
+    # prop1 is equal to -5
+    doc = {'prop1': -5}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    # prop1 greater than 0
+    doc = {'prop1': 1}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    doc = {'prop1': 5}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    doc = {'prop1': 11}
+    assert_fail(doc, schema)
+
+    # document is not valid
+    # and equal to 15
+    doc = {'prop1': 15}
+    assert_fail(doc, schema)
+
+
+def test_anyof_allof():
+    # prop1 can be any number outside of [0-10]
+    schema = {'prop1': {'allof': [{'anyof': [{'type': 'float'},
+                                             {'type': 'integer'}]},
+                                  {'anyof': [{'min': 10},
+                                             {'max': 0}]}
+                                  ]}}
+
+    doc = {'prop1': 11}
+    assert_success(doc, schema)
+    doc = {'prop1': -1}
+    assert_success(doc, schema)
+    doc = {'prop1': 5}
+    assert_fail(doc, schema)
+
+    doc = {'prop1': 11.5}
+    assert_success(doc, schema)
+    doc = {'prop1': -1.5}
+    assert_success(doc, schema)
+    doc = {'prop1': 5.5}
+    assert_fail(doc, schema)
+
+    doc = {'prop1': '5.5'}
+    assert_fail(doc, schema)
+
+
+def test_anyof_schema(validator):
+    # test that a list of schemas can be specified.
+
+    valid_parts =testschema': {'model number': {'type': 'string'},
+                               'count': {'type': 'integer'}}},
+                   {'schema': {'serial number': {'type': 'string'},
+                               'count': {'type': 'integer'}}}]
+    valid_item = {'type': ['dict', 'string'], 'anyof': valid_parts}
+    schema = {'parts': {'type': 'list', 'schema': valid_item}}
+    document = {'parts': [{'model number': 'MX-009', 'count': 100},
+                          {'serial number': '898-001'},
+                          'misc']}
+
+    # document is valid. each entry in 'parts' matches a type or schema
+    assert_success(document, schema, validator=validator)
+
+    document['parts'].append({'product name': "Monitors", 'count': 18})
+    # document is invalid. 'product name' does not match any valid schemas
+    assert_fail(document, schema, validator=validator)
+
+    document['parts'].pop()
+    # document is valid again
+    assert_success(document, schema, validator=validator)
+
+    document['parts'].append({'product name': "Monitors", 'count': 18})
+    document['parts'].append(10)
+    # and invalid. numbers are not allowed.
+
+    exp_child_errors = [
+        (('parts', 3), ('parts', 'schema', 'anyof'), errors.ANYOF,
+         valid_parts),
+        (('parts', 4), ('parts', 'schema', 'type'), errors.BAD_TYPE,
+         ['dict', 'string'])
+    ]
+
+    _errors = assert_fail(document, schema, validator=validator,
+                          error=('parts', ('parts', 'schema'),
+                                 errors.SEQUENCE_SCHEMA, valid_item),
+                          child_errors=exp_child_errors)
+    assert_not_has_error(_errors, ('parts', 4), ('parts', 'schema', 'anyof'),
+                         errors.ANYOF, valid_parts)
+
+    # tests errors.BasicErrorHandler's tree representation
+    v_errors = validator.errors
+    assert 'parts' in v_errors
+    assert 3 in v_errors['parts'][-1]
+    assert v_errors['parts'][-1][3][0] == "no definitions validate"
+    scope = v_errors['parts'][-1][3][-1]
+    assert 'anyof definition 0' in scope
+    assert 'anyof definition 1' in scope
+    assert scope['anyof definition 0'] == [{"product name": ["unknown field"]}]
+    assert scope['anyof definition 1'] == [{"product name": ["unknown field"]}]
+    assert v_errors['parts'][-1][4] == ["must be of ['dict', 'string'] type"]
+
+
+def test_anyof_2():
+    # these two schema should be the same
+    schema1 = {'prop': {'anyof': [{'type': 'dict',
+                                   'schema': {
+                                       'val': {'type': 'integer'}}},
+                                  {'type': 'dict',
+                                   'schema': {
+                                       'val': {'type': 'string'}}}]}}
+    schema2 = {'prop': {'type': 'dict', 'anyof': [
+        {'schema': {'val': {'type': 'integer'}}},
+        {'schema': {'val': {'type': 'string'}}}]}}
+
+    doc = {'prop': {'val': 0}}
+    assert_success(doc, schema1)
+    assert_success(doc, schema2)
+
+    doc = {'prop': {'val': '0'}}
+    assert_success(doc, schema1)
+    assert_success(doc, schema2)
+
+    doc = {'prop': {'val': 1.1}}
+    assert_fail(doc, schema1)
+    assert_fail(doc, schema2)
+
+
+def test_anyof_type():
+    schema = {'anyof_type': {'anyof_type': ['string', 'integer']}}
+    assert_success({'anyof_type': 'bar'}, schema)
+    assert_success({'anyof_type': 23}, schema)
+
+
+def test_oneof_schema():
+    schema = {'oneof_schema': {'type': 'dict',
+                               'oneof_schema':
+                                   [{'digits': {'type': 'integer',
+                                                'min': 0, 'max': 99}},
+                                    {'text': {'type': 'string',
+                                              'regex': '^[0-9]{2}$'}}]}}
+    assert_success({'oneof_schema': {'digits': 19}}, schema)
+    assert_success({'oneof_schema': {'text': '84'}}, schema)
+    assert_fail({'oneof_schema': {'digits': 19, 'text': '84'}}, schema)
+
+
+def test_nested_oneof_type():
+    schema = {'nested_oneof_type':
+              {'valueschema': {'oneof_type': ['string', 'integer']}}}
+    assert_success({'nested_oneof_type': {'foo': 'a'}}, schema)
+    assert_success({'nested_oneof_type': {'bar': 3}}, schema)
+
+
+def test_nested_oneofs(validator):
+    validator.schema = {'abc': {
+        'type': 'dict',
+        'oneof_schema': [
+            {'foo': {
+                'type': 'dict',
+                'schema': {'bar': {'oneof_type': ['integer', 'float']}}
+            }},
+            {'baz': {'type': 'string'}}
+        ]}}
+
+    document = {'abc': {'foo': {'bar': 'bad'}}}
+
+    expected_errors = {
+        'abc': [
+            'none or more than one rule validate',
+            {'oneof definition 0': [
+                {'foo': [{'bar': [
+                    'none or more than one rule validate',
+                    {'oneof definition 0': ['must be of integer type'],
+                     'oneof definition 1': ['must be of float type']}
+                ]}]}],
+             'oneof definition 1': [{'foo': ['unknown field']}]}
+        ]
+    }
+
+    assert_fail(document, validator=validator)
+    assert validator.errors == expected_errors
+
+
+def test_no_of_validation_if_type_fails(validator):
+    valid_parts = [{'schema': {'model number': {'type': 'string'},
+                               'count': {'type': 'integer'}}},
+                   {'schema': {'serial number': {'type': 'string'},
+                               'count': {'type': 'integer'}}}]
+    validator.schema = {'part': {'type': ['dict', 'string'],
+                                 'anyof': valid_parts}}
+    document = {'part': 10}
+    _errors = assert_fail(document, validator=validator)
+    assert len(_errors) == 1
+
+
+def test_issue_107(validator):
+    schema = {'info': {'type': 'dict',
+                       'schema': {'name': {'type': 'string',
+                                           'required': True}}}}
+    document = {'info': {'name': 'my name'}}
+    assert_success(document, schema, validator=validator)
+
+    v = Validator(schema)
+    assert_success(document, schema, v)
+    # it once was observed that this behaves other than the previous line
+    assert v.validate(document)
+
+
+def test_dont_type_validate_nulled_values(validator):
+    assert_fail({'an_integer': None}, validator=validator)
+    assert validator.errors == {'an_integer': ['null value not allowed']}
+
+
+def test_dependencies_error(validator):
+    schema = {'field1': {'required': False},
+              'field2': {'required': True,
+                         'dependencies': {'field1': ['one', 'two']}}}
+    validator.validate({'field2': 7}, schema)
+    exp_msg = errors.BasicErrorHandler \
+        .messages[errors.DEPENDENCIES_FIELD_VALUE.code] \
+        .format(field='field2', constraint={'field1': ['one', 'two']})
+    assert validator.errors == {'field2': [exp_msg]}
+
+
+def test_dependencies_on_boolean_field_with_one_value():
+    # https://github.com/pyeve/cerberus/issues/138
+    schema = {'deleted': {'type': 'boolean'},
+              'text': {'dependencies': {'deleted': False}}}
+    try:
+        assert_success({'text': 'foo', 'deleted': False}, schema)
+        assert_fail({'text': 'foo', 'deleted': True}, schema)
+        assert_fail({'text': 'foo'}, schema)
+    except TypeError as e:
+        if str(e) == "argument of type 'bool' is not iterable":
+            raise AssertionError(
+                "Bug #138 still exists, couldn't use boolean in dependency "
+                "without putting it in a list.\n"
+                "'some_field': True vs 'some_field: [True]")
+        else:
+            raise
+
+
+def test_dependencies_on_boolean_field_with_value_in_list():
+    # https://github.com/pyeve/cerberus/issues/138
+    schema = {'deleted': {'type': 'boolean'},
+              'text': {'dependencies': {'deleted': [False]}}}
+
+    assert_success({'text': 'foo', 'deleted': False}, schema)
+    assert_fail({'text': 'foo', 'deleted': True}, schema)
+    assert_fail({'text': 'foo'}, schema)
+
+
+def test_document_path():
+    class DocumentPathTester(Validator):
+        def _validate_trail(self, constraint, field, value):
+            """ {'type': 'boolean'} """
+            test_doc = self.root_document
+            for crumb in self.document_path:
+                test_doc = test_doc[crumb]
+            assert test_doc == self.document
+
+    v = DocumentPathTester()
+    schema = {'foo': {'schema': {'bar': {'trail': True}}}}
+    document = {'foo': {'bar': {}}}
+    assert_success(document, schema, validator=v)
+
+
+def test_excludes():
+    schema = {'this_field': {'type': 'dict',
+                             'excludes': 'that_field'},
+              'that_field': {'type': 'dict'}}
+    assert_success({'this_field': {}}, schema)
+    assert_success({'that_field': {}}, schema)
+    assert_success({}, schema)
+    assert_fail({'that_field': {}, 'this_field': {}}, schema)
+
+
+def test_mutual_excludes():
+    schema = {'this_field': {'type': 'dict',
+                             'excludes': 'that_field'},
+              'that_field': {'type': 'dict',
+                             'excludes': 'this_field'}}
+    assert_success({'this_field': {}}, schema)
+    assert_success({'that_field': {}}, schema)
+    assert_success({}, schema)
+    assert_fail({'that_field': {}, 'this_field': {}}, schema)
+
+
+def test_required_excludes():
+    schema = {'this_field': {'type': 'dict',
+                             'excludes': 'that_field',
+                             'required': True},
+              'that_field': {'type': 'dict',
+                             'excludes': 'this_field',
+                             'required': True}}
+    assert_success({'this_field': {}}, schema, update=False)
+    assert_success({'that_field': {}}, schema, update=False)
+    assert_fail({}, schema)
+    assert_fail({'that_field': {}, 'this_field': {}}, schema)
+
+
+def test_multiples_exclusions():
+    schema = {'this_field': {'type': 'dict',
+                             'excludes': ['that_field', 'bazo_field']},
+              'that_field': {'type': 'dict',
+                             'excludes': 'this_field'},
+              'bazo_field': {'type': 'dict'}}
+    assert_success({'this_field': {}}, schema)
+    assert_success({'that_field': {}}, schema)
+    assert_fail({'this_field': {}, 'that_field': {}}, schema)
+    assert_fail({'this_field': {}, 'bazo_field': {}}, schema)
+    assert_fail({'that_field': {}, 'this_field': {}, 'bazo_field': {}}, schema)
+    assert_success({'that_field': {}, 'bazo_field': {}}, schema)
+
+
+def test_bad_excludes_fields(validator):
+    validator.schema = {'this_field': {'type': 'dict',
+                                       'excludes': ['that_field', 'bazo_field'],
+                                       'required': True},
+                        'that_field': {'type': 'dict',
+                                       'excludes': 'this_field',
+                                       'required': True}}
+    assert_fail({'that_field': {}, 'this_field': {}}, validator=validator)
+    handler = errors.BasicErrorHandler
+    assert (validator.errors ==
+            {'that_field':
+                [handler.messages[errors.EXCLUDES_FIELD.code].format(
+                    "'this_field'", field="that_field")],
+                'this_field':
+                    [handler.messages[errors.EXCLUDES_FIELD.code].format(
+                        "'that_field', 'bazo_field'", field="this_field")]})
+
+
+def test_boolean_is_not_a_number():
+    # https://github.com/pyeve/cerberus/issues/144
+    assert_fail({'value': True}, {'value': {'type': 'number'}})
+
+
+def test_min_max_date():
+    schema = {'date': {'min': date(1900, 1, 1), 'max': date(1999, 12, 31)}}
+    assert_success({'date': date(1945, 5, 8)}, schema)
+    assert_fail({'date': date(1871, 5, 10)}, schema)
+
+
+def test_dict_length():
+    schema = {'dict': {'minlength': 1}}
+    assert_fail({'dict': {}}, schema)
+    assert_success({'dict': {'foo': 'bar'}}, schema)
+
+
+def test_forbidden():
+    schema = {'user': {'forbidden': ['root', 'admin']}}
+    assert_fail({'user': 'admin'}, schema)
+    assert_success({'user': 'alice'}, schema)
+
+
+def test_mapping_with_sequence_schema():
+    schema = {'list': {'schema': {'allowed': ['a', 'b', 'c']}}}
+    document = {'list': {'is_a': 'mapping'}}
+    assert_fail(document, schema,
+                error=('list', ('list', 'schema'), errors.BAD_TYPE_FOR_SCHEMA,
+                       schema['list']['schema']))
+
+
+def test_sequence_with_mapping_schema():
+    schema = {'list': {'schema': {'foo': {'allowed': ['a', 'b', 'c']}},
+                       'type': 'dict'}}
+    document = {'list': ['a', 'b', 'c']}
+    assert_fail(document, schema)
+
+
+def test_type_error_aborts_validation():
+    schema = {'foo': {'type': 'string', 'allowed': ['a']}}
+    document = {'foo': 0}
+    assert_fail(document, schema,
+                error=('foo', ('foo', 'type'), errors.BAD_TYPE, 'string'))
+
+
+def test_dependencies_in_oneof():
+    # https://github.com/pyeve/cerberus/issues/241
+    schema = {'a': {'type': 'integer',
+                    'oneof': [
+                        {'allowed': [1], 'dependencies': 'b'},
+                        {'allowed': [2], 'dependencies': 'c'}
+                    ]},
+              'b': {},
+              'c': {}}
+    assert_success({'a': 1, 'b': 'foo'}, schema)
+    assert_success({'a': 2, 'c': 'bar'}, schema)
+    assert_fail({'a': 1, 'c': 'foo'}, schema)
+    assert_fail({'a': 2, 'b': 'bar'}, schema)
+
+
+def test_allow_unknown_with_oneof_rules(validator):
+    # https://github.com/pyeve/cerberus/issues/251
+    schema = {
+        'test': {
+            'oneof': [
+                {
+              test  'type': 'dict',
+                    'allow_unknown': True,
+                    'schema': {'known': {'type': 'string'}}
+                },
+                {
+                    'type': 'dict',
+                    'schema': {'known': {'type': 'string'}}
+                },
+            ]
+        }
+    }
+    # check regression and that allow unknown does not cause any different
+    # than expected behaviour for one-of.
+    document = {'test': {'known': 's'}}
+    validator(document, schema)
+    _errotest validator._errors
+    assert len(_errors) == 1
+    assert_has_error(_errors, 'test', ('test', 'oneof'),
+                     errors.ONEOF, schtest'testtestoneof'])
+    assert len(_errors[0].child_errors) == 0
+  testcheck that allow_unknown is actually applied
+    document = {'test': {'known': 's', 'unknown': 'asd'}}
+    assert_success(docutest, validator=validator)

+ 119 - 119
ext/cerberus/utils.py → mncheck/ext/cerberus/utils.py

@@ -1,119 +1,119 @@
-from __future__ import absolute_import
-
-from collections import Mapping, namedtuple, Sequence
-
-from cerberus.platform import _int_types, _str_type
-
-
-TypeDefinition = namedtuple('TypeDefinition',
-                            'name,included_types,excluded_types')
-"""
-This class is used to define types that can be used as value in the
-:attr:`~cerberus.Validator.types_mapping` property.
-The ``name`` should be descriptive and match the key it is going to be assigned
-to.
-A value that is validated against such definition must be an instance of any of
-the types contained in ``included_types`` and must not match any of the types
-contained in ``excluded_types``.
-"""
-
-
-def compare_paths_lt(x, y):
-    for i in range(min(len(x), len(y))):
-        if isinstance(x[i], type(y[i])):
-            if x[i] != y[i]:
-                return x[i] < y[i]
-        elif isinstance(x[i], _int_types):
-            return True
-        elif isinstance(y[i], _int_types):
-            return False
-    return len(x) < len(y)
-
-
-def drop_item_from_tuple(t, i):
-    return t[:i] + t[i + 1:]
-
-
-def get_Validator_class():
-    global Validator
-    if 'Validator' not in globals():
-        from cerberus.validator import Validator
-    return Validator
-
-
-def mapping_hash(schema):
-    return hash(mapping_to_frozenset(schema))
-
-
-def mapping_to_frozenset(mapping):
-    """ Be aware that this treats any sequence type with the equal members as
-        equal. As it is used to identify equality of schemas, this can be
-        considered okay as definitions are semantically equal regardless the
-        container type. """
-    mapping = mapping.copy()
-    for key, value in mapping.items():
-        if isinstance(value, Mapping):
-            mapping[key] = mapping_to_frozenset(value)
-        elif isinstance(value, Sequence):
-            value = list(value)
-            for i, item in enumerate(value):
-                if isinstance(item, Mapping):
-                    value[i] = mapping_to_frozenset(item)
-            mapping[key] = tuple(value)
-    return frozenset(mapping.items())
-
-
-def isclass(obj):
-    try:
-        issubclass(obj, object)
-    except TypeError:
-        return False
-    else:
-        return True
-
-
-def quote_string(value):
-    if isinstance(value, _str_type):
-        return '"%s"' % value
-    else:
-        return value
-
-
-class readonly_classproperty(property):
-    def __get__(self, instance, owner):
-        return super(readonly_classproperty, self).__get__(owner)
-
-    def __set__(self, instance, value):
-        raise RuntimeError('This is a readonly class property.')
-
-    def __delete__(self, instance):
-        raise RuntimeError('This is a readonly class property.')
-
-
-def validator_factory(name, bases=None, namespace={}):
-    """ Dynamically create a :class:`~cerberus.Validator` subclass.
-        Docstrings of mixin-classes will be added to the resulting
-        class' one if ``__doc__`` is not in :obj:`namespace`.
-
-    :param name: The name of the new class.
-    :type name: :class:`str`
-    :param bases: Class(es) with additional and overriding attributes.
-    :type bases: :class:`tuple` of or a single :term:`class`
-    :param namespace: Attributes for the new class.
-    :type namespace: :class:`dict`
-    :return: The created class.
-    """
-    Validator = get_Validator_class()
-
-    if bases is None:
-        bases = (Validator,)
-    elif isinstance(bases, tuple):
-        bases += (Validator,)
-    else:
-        bases = (bases, Validator)
-
-    docstrings = [x.__doc__ for x in bases if x.__doc__]
-    if len(docstrings) > 1 and '__doc__' not in namespace:
-        namespace.update({'__doc__': '\n'.join(docstrings)})
-
-    return type(name, bases, namespace)
+from __future__ import absolute_import
+
+from collections import Mapping, namedtuple, Sequence
+
+from cerberus.platform import _int_types, _str_type
+
+
+TypeDefinition = namedtuple('TypeDefinition',
+                            'name,included_types,excluded_types')
+"""
+This class is used to define types that can be used as value in the
+:attr:`~cerberus.Validator.types_mapping` property.
+The ``name`` should be descriptive and match the key it is going to be assigned
+to.
+A value that is validated against such definition must be an instance of any of
+the types contained in ``included_types`` and must not match any of the types
+contained in ``excluded_types``.
+"""
+
+
+def compare_paths_lt(x, y):
+    for i in range(min(len(x), len(y))):
+        if isinstance(x[i], type(y[i])):
+            if x[i] != y[i]:
+                return x[i] < y[i]
+        elif isinstance(x[i], _int_types):
+            return True
+        elif isinstance(y[i], _int_types):
+            return False
+    return len(x) < len(y)
+
+
+def drop_item_from_tuple(t, i):
+    return t[:i] + t[i + 1:]
+
+
+def get_Validator_class():
+    global Validator
+    if 'Validator' not in globals():
+        from cerberus.validator import Validator
+    return Validator
+
+
+def mapping_hash(schema):
+    return hash(mapping_to_frozenset(schema))
+
+
+def mapping_to_frozenset(mapping):
+    """ Be aware that this treats any sequence type with the equal members as
+        equal. As it is used to identify equality of schemas, this can be
+        considered okay as definitions are semantically equal regardless the
+        container type. """
+    mapping = mapping.copy()
+    for key, value in mapping.items():
+        if isinstance(value, Mapping):
+            mapping[key] = mapping_to_frozenset(value)
+        elif isinstance(value, Sequence):
+            value = list(value)
+            for i, item in enumerate(value):
+                if isinstance(item, Mapping):
+                    value[i] = mapping_to_frozenset(item)
+            mapping[key] = tuple(value)
+    return frozenset(mapping.items())
+
+
+def isclass(obj):
+    try:
+        issubclass(obj, object)
+    except TypeError:
+        return False
+    else:
+        return True
+
+
+def quote_string(value):
+    if isinstance(value, _str_type):
+        return '"%s"' % value
+    else:
+        return value
+
+
+class readonly_classproperty(property):
+    def __get__(self, instance, owner):
+        return super(readonly_classproperty, self).__get__(owner)
+
+    def __set__(self, instance, value):
+        raise RuntimeError('This is a readonly class property.')
+
+    def __delete__(self, instance):
+        raise RuntimeError('This is a readonly class property.')
+
+
+def validator_factory(name, bases=None, namespace={}):
+    """ Dynamically create a :class:`~cerberus.Validator` subclass.
+        Docstrings of mixin-classes will be added to the resulting
+        class' one if ``__doc__`` is not in :obj:`namespace`.
+
+    :param name: The name of the new class.
+    :type name: :class:`str`
+    :param bases: Class(es) with additional and overriding attributes.
+    :type bases: :class:`tuple` of or a single :term:`class`
+    :param namespace: Attributes for the new class.
+    :type namespace: :class:`dict`
+    :return: The created class.
+    """
+    Validator = get_Validator_class()
+
+    if bases is None:
+        bases = (Validator,)
+    elif isinstance(bases, tuple):
+        bases += (Validator,)
+    else:
+        bases = (bases, Validator)
+
+    docstrings = [x.__doc__ for x in bases if x.__doc__]
+    if len(docstrings) > 1 and '__doc__' not in namespace:
+        namespace.update({'__doc__': '\n'.join(docstrings)})
+
+    return type(name, bases, namespace)

+ 1407 - 1407
ext/cerberus/validator.py → mncheck/ext/cerberus/validator.py

@@ -1,1407 +1,1407 @@
-"""
-    Extensible validation for Python dictionaries.
-    This module implements Cerberus Validator class
-
-    :copyright: 2012-2016 by Nicola Iarocci.
-    :license: ISC, see LICENSE for more details.
-
-    Full documentation is available at http://python-cerberus.org
-"""
-
-from __future__ import absolute_import
-
-from ast import literal_eval
-from collections import Hashable, Iterable, Mapping, Sequence
-from copy import copy
-from datetime import date, datetime
-import re
-from warnings import warn
-
-from cerberus import errors
-from cerberus.platform import _int_types, _str_type
-from cerberus.schema import (schema_registry, rules_set_registry,
-                             DefinitionSchema, SchemaError)
-from cerberus.utils import (drop_item_from_tuple, isclass,
-                            readonly_classproperty, TypeDefinition)
-
-
-toy_error_handler = errors.ToyErrorHandler()
-
-
-def dummy_for_rule_validation(rule_constraints):
-    def dummy(self, constraint, field, value):
-        raise RuntimeError('Dummy method called. Its purpose is to hold just'
-                           'validation constraints for a rule in its '
-                           'docstring.')
-    f = dummy
-    f.__doc__ = rule_constraints
-    return f
-
-
-class DocumentError(Exception):
-    """ Raised when the target document is missing or has the wrong format """
-    pass
-
-
-class _SchemaRuleTypeError(Exception):
-    """ Raised when a schema (list) validation encounters a mapping.
-        Not supposed to be used outside this module. """
-    pass
-
-
-class BareValidator(object):
-    """ Validator class. Normalizes and/or validates any mapping against a
-    validation-schema which is provided as an argument at class instantiation
-    or upon calling the :meth:`~cerberus.Validator.validate`,
-    :meth:`~cerberus.Validator.validated` or
-    :meth:`~cerberus.Validator.normalized` method. An instance itself is
-    callable and executes a validation.
-
-    All instantiation parameters are optional.
-
-    There are the introspective properties :attr:`types`, :attr:`validators`,
-    :attr:`coercers`, :attr:`default_setters`, :attr:`rules`,
-    :attr:`normalization_rules` and :attr:`validation_rules`.
-
-    The attributes reflecting the available rules are assembled considering
-    constraints that are defined in the docstrings of rules' methods and is
-    effectively used as validation schema for :attr:`schema`.
-
-    :param schema: See :attr:`~cerberus.Validator.schema`.
-                   Defaults to :obj:`None`.
-    :type schema: any :term:`mapping`
-    :param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`.
-                               Defaults to ``False``.
-    :type ignore_none_values: :class:`bool`
-    :param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`.
-                          Defaults to ``False``.
-    :type allow_unknown: :class:`bool` or any :term:`mapping`
-    :param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`.
-                          Defaults to to ``False``.
-    :type purge_unknown: :class:`bool`
-    :param error_handler: The error handler that formats the result of
-                          :attr:`~cerberus.Validator.errors`.
-                          When given as two-value tuple with an error-handler
-                          class and a dictionary, the latter is passed to the
-                          initialization of the error handler.
-                          Default: :class:`~cerberus.errors.BasicErrorHandler`.
-    :type error_handler: class or instance based on
-                         :class:`~cerberus.errors.BaseErrorHandler` or
-                         :class:`tuple`
-    """  # noqa: E501
-
-    mandatory_validations = ('nullable',)
-    """ Rules that are evaluated on any field, regardless whether defined in
-        the schema or not.
-        Type: :class:`tuple` """
-    priority_validations = ('nullable', 'readonly', 'type', 'empty')
-    """ Rules that will be processed in that order before any other.
-        Type: :class:`tuple` """
-    types_mapping = {
-        'binary':
-            TypeDefinition('binary', (bytes, bytearray), ()),
-        'boolean':
-            TypeDefinition('boolean', (bool,), ()),
-        'date':
-            TypeDefinition('date', (date,), ()),
-        'datetime':
-            TypeDefinition('datetime', (datetime,), ()),
-        'dict':
-            TypeDefinition('dict', (Mapping,), ()),
-        'float':
-            TypeDefinition('float', (float, _int_types), ()),
-        'integer':
-            TypeDefinition('integer', (_int_types,), ()),
-        'list':
-            TypeDefinition('list', (Sequence,), (_str_type,)),
-        'number':
-            TypeDefinition('number', (_int_types, float), (bool,)),
-        'set':
-            TypeDefinition('set', (set,), ()),
-        'string':
-            TypeDefinition('string', (_str_type), ())
-    }
-    """ This mapping holds all available constraints for the type rule and
-        their assigned :class:`~cerberus.TypeDefinition`. """
-    _valid_schemas = set()
-    """ A :class:`set` of hashes derived from validation schemas that are
-        legit for a particular ``Validator`` class. """
-
-    def __init__(self, *args, **kwargs):
-        """ The arguments will be treated as with this signature:
-
-        ext(self, schema=None, ignore_none_values=False,
-                 allow_unknown=False, purge_unknown=False,
-                 error_handler=errors.BasicErrorHandler)
-        """
-
-        self.document = None
-        """ The document that is or was recently processed.
-            Type: any :term:`mapping` """
-        self._errors = errors.ErrorList()
-        """ The list of errors that were encountered since the last document
-            processing was invoked.
-            Type: :class:`~cerberus.errors.ErrorList` """
-        self.recent_error = None
-        """ The last individual error that was submitted.
-            Type: :class:`~cerberus.errors.ValidationError` """
-        self.document_error_tree = errors.DocumentErrorTree()
-        """ A tree representiation of encountered errors following the
-            structure of the document.
-            Type: :class:`~cerberus.errors.DocumentErrorTree` """
-        self.schema_error_tree = errors.SchemaErrorTree()
-        """ A tree representiation of encountered errors following the
-            structure of the schema.
-            Type: :class:`~cerberus.errors.SchemaErrorTree` """
-        self.document_path = ()
-        """ The path within the document to the current sub-document.
-            Type: :class:`tuple` """
-        self.schema_path = ()
-        """ The path within the schema to the current sub-schema.
-            Type: :class:`tuple` """
-        self.update = False
-        self.error_handler = self.__init_error_handler(kwargs)
-        """ The error handler used to format :attr:`~cerberus.Validator.errors`
-            and process submitted errors with
-            :meth:`~cerberus.Validator._error`.
-            Type: :class:`~cerberus.errors.BaseErrorHandler` """
-        self.__store_config(args, kwargs)
-        self.schema = kwargs.get('schema', None)
-        self.allow_unknown = kwargs.get('allow_unknown', False)
-        self._remaining_rules = []
-        """ Keeps track of the rules that are next in line to be evaluated
-            during the validation of a field.
-            Type: :class:`list` """
-
-        super(BareValidator, self).__init__()
-
-    @staticmethod
-    def __init_error_handler(kwargs):
-        error_handler = kwargs.pop('error_handler', errors.BasicErrorHandler)
-        if isinstance(error_handler, tuple):
-            error_handler, eh_config = error_handler
-        else:
-            eh_config = {}
-        if isclass(error_handler) and \
-                issubclass(error_handler, errors.BaseErrorHandler):
-            return error_handler(**eh_config)
-        elif isinstance(error_handler, errors.BaseErrorHandler):
-            return error_handler
-        else:
-            raise RuntimeError('Invalid error_handler.')
-
-    def __store_config(self, args, kwargs):
-        """ Assign args to kwargs and store configuration. """
-        signature = ('schema', 'ignore_none_values', 'allow_unknown',
-                     'purge_unknown')
-        for i, p in enumerate(signature[:len(args)]):
-            if p in kwargs:
-                raise TypeError("ext got multiple values for argument "
-                                "'%s'" % p)
-            else:
-                kwargs[p] = args[i]
-        self._config = kwargs
-        """ This dictionary holds the configuration arguments that were used to
-            initialize the :class:`Validator` instance except the
-            ``error_handler``. """
-
-    @classmethod
-    def clear_caches(cls):
-        """ Purge the cache of known valid schemas. """
-        cls._valid_schemas.clear()
-
-    def _error(self, *args):
-        """ Creates and adds one or multiple errors.
-
-        :param args: Accepts different argument's signatures.
-
-                     *1. Bulk addition of errors:*
-
-                     - :term:`iterable` of
-                       :class:`~cerberus.errors.ValidationError`-instances
-
-                     The errors will be added to
-                     :attr:`~cerberus.Validator._errors`.
-
-                     *2. Custom error:*
-
-                     - the invalid field's name
-
-                     - the error message
-
-                     A custom error containing the message will be created and
-                     added to :attr:`~cerberus.Validator._errors`.
-                     There will however be fewer information contained in the
-                     error (no reference to the violated rule and its
-                     constraint).
-
-                     *3. Defined error:*
-
-                     - the invalid field's name
-
-                     - the error-reference, see :mod:`cerberus.errors`
-
-                     - arbitrary, supplemental information about the error
-
-                     A :class:`~cerberus.errors.ValidationError` instance will
-                     be created and added to
-                     :attr:`~cerberus.Validator._errors`.
-        """
-        if len(args) == 1:
-            self._errors.extend(args[0])
-            self._errors.sort()
-            for error in args[0]:
-                self.document_error_tree += error
-                self.schema_error_tree += error
-                self.error_handler.emit(error)
-        elif len(args) == 2 and isinstance(args[1], _str_type):
-            self._error(args[0], errors.CUSTOM, args[1])
-        elif len(args) >= 2:
-            field = args[0]
-            code = args[1].code
-            rule = args[1].rule
-            info = args[2:]
-
-            document_path = self.document_path + (field, )
-
-            schema_path = self.schema_path
-            if code != errors.UNKNOWN_FIELD.code and rule is not None:
-                schema_path += (field, rule)
-
-            if not rule:
-                constraint = None
-            else:
-                field_definitions = self._resolve_rules_set(self.schema[field])
-                if rule == 'nullable':
-                    constraint = field_definitions.get(rule, False)
-                else:
-                    constraint = field_definitions[rule]
-
-            value = self.document.get(field)
-
-            self.recent_error = errors.ValidationError(
-                document_path, schema_path, code, rule, constraint, value, info
-            )
-            self._error([self.recent_error])
-
-    def _get_child_validator(self, document_crumb=None, schema_crumb=None,
-                             **kwargs):
-        """ Creates a new instance of Validator-(sub-)class. All initial
-            parameters of the parent are passed to the initialization, unless
-            a parameter is given as an explicit *keyword*-parameter.
-
-        :param document_crumb: Extends the
-                               :attr:`~cerberus.Validator.document_path`
-                               of the child-validator.
-        :type document_crumb: :class:`tuple` or :term:`hashable`
-        :param schema_crumb: Extends the
-                             :attr:`~cerberus.Validator.schema_path`
-                             of the child-validator.
-        :type schema_crumb: :class:`tuple` or hashable
-        :param kwargs: Overriding keyword-arguments for initialization.
-        :type kwargs: :class:`dict`
-
-        :return: an instance of ``self.__class__``
-        """
-        child_config = self._config.copy()
-        child_config.update(kwargs)
-        if not self.is_child:
-            child_config['is_child'] = True
-            child_config['error_handler'] = toy_error_handler
-            child_config['root_allow_unknown'] = self.allow_unknown
-            child_config['root_document'] = self.document
-            child_config['root_schema'] = self.schema
-
-        child_validator = self.__class__(**child_config)
-
-        if document_crumb is None:
-            child_validator.document_path = self.document_path
-        else:
-            if not isinstance(document_crumb, tuple):
-                document_crumb = (document_crumb, )
-            child_validator.document_path = self.document_path + document_crumb
-
-        if schema_crumb is None:
-            child_validator.schema_path = self.schema_path
-        else:
-            if not isinstance(schema_crumb, tuple):
-                schema_crumb = (schema_crumb, )
-            child_validator.schema_path = self.schema_path + schema_crumb
-
-        return child_validator
-
-    def __get_rule_handler(self, domain, rule):
-        methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_'))
-        result = getattr(self, methodname, None)
-        if result is None:
-            raise RuntimeError("There's no handler for '{}' in the '{}' "
-                               "domain.".format(rule, domain))
-        return result
-
-    def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
-        """ Removes nodes by index from an errorpath, relatively to the
-            basepaths of self.
-
-        :param errors: A list of :class:`errors.ValidationError` instances.
-        :param dp_items: A list of integers, pointing at the nodes to drop from
-                         the :attr:`document_path`.
-        :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
-        """
-        dp_basedepth = len(self.document_path)
-        sp_basedepth = len(self.schema_path)
-        for error in _errors:
-            for i in sorted(dp_items, reverse=True):
-                error.document_path = \
-                    drop_item_from_tuple(error.document_path, dp_basedepth + i)
-            for i in sorted(sp_items, reverse=True):
-                error.schema_path = \
-                    drop_item_from_tuple(error.schema_path, sp_basedepth + i)
-            if error.child_errors:
-                self._drop_nodes_from_errorpaths(error.child_errors,
-                                                 dp_items, sp_items)
-
-    def _lookup_field(self, path):
-        """ Searches for a field as defined by path. This method is used by the
-            ``dependency`` evaluation logic.
-
-        :param path: Path elements are separated by a ``.``. A leading ``^``
-                     indicates that the path relates to the document root,
-                     otherwise it relates to the currently evaluated document,
-                     which is possibly a subdocument.
-                     The sequence ``^^`` at the start will be interpreted as a
-                     literal ``^``.
-        :type path: :class:`str`
-        :returns: Either the found field name and its value or :obj:`None` for
-                  both.
-        :rtype: A two-value :class:`tuple`.
-        """
-        if path.startswith('^'):
-            path = path[1:]
-            context = self.document if path.startswith('^') \
-                else self.root_document
-        else:
-            context = self.document
-
-        parts = path.split('.')
-        for part in parts:
-            if part not in context:
-                return None, None
-            context = context.get(part)
-
-        return parts[-1], context
-
-    def _resolve_rules_set(self, rules_set):
-        if isinstance(rules_set, Mapping):
-            return rules_set
-        elif isinstance(rules_set, _str_type):
-            return self.rules_set_registry.get(rules_set)
-        return None
-
-    def _resolve_schema(self, schema):
-        if isinstance(schema, Mapping):
-            return schema
-        elif isinstance(schema, _str_type):
-            return self.schema_registry.get(schema)
-        return None
-
-    # Properties
-
-    @property
-    def allow_unknown(self):
-        """ If ``True`` unknown fields that are not defined in the schema will
-            be ignored. If a mapping with a validation schema is given, any
-            undefined field will be validated against its rules.
-            Also see :ref:`allowing-the-unknown`.
-            Type: :class:`bool` or any :term:`mapping` """
-        return self._config.get('allow_unknown', False)
-
-    @allow_unknown.setter
-    def allow_unknown(self, value):
-        if not (self.is_child or isinstance(value, (bool, DefinitionSchema))):
-            DefinitionSchema(self, {'allow_unknown': value})
-        self._config['allow_unknown'] = value
-
-    @property
-    def errors(self):
-        """ The errors of the last processing formatted by the handler that is
-            bound to :attr:`~cerberus.Validator.error_handler`. """
-        return self.error_handler(self._errors)
-
-    @property
-    def ignore_none_values(self):
-        """ Whether to not process :obj:`None`-values in a document or not.
-            Type: :class:`bool` """
-        return self._config.get('ignore_none_values', False)
-
-    @ignore_none_values.setter
-    def ignore_none_values(self, value):
-        self._config['ignore_none_values'] = value
-
-    @property
-    def is_child(self):
-        """ ``True`` for child-validators obtained with
-        :meth:`~cerberus.Validator._get_child_validator`.
-        Type: :class:`bool` """
-        return self._config.get('is_child', False)
-
-    @property
-    def _is_normalized(self):
-        """ ``True`` if the document is already normalized. """
-        return self._config.get('_is_normalized', False)
-
-    @_is_normalized.setter
-    def _is_normalized(self, value):
-        self._config['_is_normalized'] = value
-
-    @property
-    def purge_unknown(self):
-        """ If ``True`` unknown fields will be deleted from the document
-            unless a validation is called with disabled normalization.
-            Also see :ref:`purging-unknown-fields`. Type: :class:`bool` """
-        return self._config.get('purge_unknown', False)
-
-    @purge_unknown.setter
-    def purge_unknown(self, value):
-        self._config['purge_unknown'] = value
-
-    @property
-    def root_allow_unknown(self):
-        """ The :attr:`~cerberus.Validator.allow_unknown` attribute of the
-            first level ancestor of a child validator. """
-        return self._config.get('root_allow_unknown', self.allow_unknown)
-
-    @property
-    def root_document(self):
-        """ The :attr:`~cerberus.Validator.document` attribute of the
-            first level ancestor of a child validator. """
-        return self._config.get('root_document', self.document)
-
-    @property
-    def rules_set_registry(self):
-        """ The registry that holds referenced rules sets.
-            Type: :class:`~cerberus.Registry` """
-        return self._config.get('rules_set_registry', rules_set_registry)
-
-    @rules_set_registry.setter
-    def rules_set_registry(self, registry):
-        self._config['rules_set_registry'] = registry
-
-    @property
-    def root_schema(self):
-        """ The :attr:`~cerberus.Validator.schema` attribute of the
-            first level ancestor of a child validator. """
-        return self._config.get('root_schema', self.schema)
-
-    @property
-    def schema(self):
-        """ The validation schema of a validator. When a schema is passed to
-            a method, it replaces this attribute.
-            Type: any :term:`mapping` or :obj:`None` """
-        return self._schema
-
-    @schema.setter
-    def schema(self, schema):
-        if schema is None:
-            self._schema = None
-        elif self.is_child or isinstance(schema, DefinitionSchema):
-            self._schema = schema
-        else:
-            self._schema = DefinitionSchema(self, schema)
-
-    @property
-    def schema_registry(self):
-        """ The registry that holds referenced schemas.
-            Type: :class:`~cerberus.Registry` """
-        return self._config.get('schema_registry', schema_registry)
-
-    @schema_registry.setter
-    def schema_registry(self, registry):
-        self._config['schema_registry'] = registry
-
-    # FIXME the returned method has the correct docstring, but doesn't appear
-    #       in the API docs
-    @readonly_classproperty
-    def types(cls):
-        """ The constraints that can be used for the 'type' rule.
-            Type: A tuple of strings. """
-        redundant_types = \
-            set(cls.types_mapping) & set(cls._types_from_methods)
-        if redundant_types:
-            warn("These types are defined both with a method and in the"
-                 "'types_mapping' property of this validator: %s"
-                 % redundant_types)
-
-        return tuple(cls.types_mapping) + cls._types_from_methods
-
-    # Document processing
-
-    def __init_processing(self, document, schema=None):
-        self._errors = errors.ErrorList()
-        self.recent_error = None
-        self.document_error_tree = errors.DocumentErrorTree()
-        self.schema_error_tree = errors.SchemaErrorTree()
-        self.document = copy(document)
-        if not self.is_child:
-            self._is_normalized = False
-
-        if schema is not None:
-            self.schema = DefinitionSchema(self, schema)
-        elif self.schema is None:
-            if isinstance(self.allow_unknown, Mapping):
-                self._schema = {}
-            else:
-                raise SchemaError(errors.SCHEMA_ERROR_MISSING)
-        if document is None:
-            raise DocumentError(errors.DOCUMENT_MISSING)
-        if not isinstance(document, Mapping):
-            raise DocumentError(
-                errors.DOCUMENT_FORMAT.format(document))
-        self.error_handler.start(self)
-
-    def _drop_remaining_rules(self, *rules):
-        """ Drops rules from the queue of the rules that still need to be
-            evaluated for the currently processed field.
-            If no arguments are given, the whole queue is emptied.
-        """
-        if rules:
-            for rule in rules:
-                try:
-                    self._remaining_rules.remove(rule)
-                except ValueError:
-                    pass
-        else:
-            self._remaining_rules = []
-
-    # # Normalizing
-
-    def normalized(self, document, schema=None, always_return_document=False):
-        """ Returns the document normalized according to the specified rules
-        of a schema.
-
-        :param document: The document to normalize.
-        :type document: any :term:`mapping`
-        :param schema: The validation schema. Defaults to :obj:`None`. If not
-                       provided here, the schema must have been provided at
-                       class instantiation.
-        :type schema: any :term:`mapping`
-        :param always_return_document: Return the document, even if an error
-                                       occurred. Defaults to: ``False``.
-        :type always_return_document: :class:`bool`
-        :return: A normalized copy of the provided mapping or :obj:`None` if an
-                 error occurred during normalization.
-        """
-        self.__init_processing(document, schema)
-        self.__normalize_mapping(self.document, self.schema)
-        self.error_handler.end(self)
-        if self._errors and not always_return_document:
-            return None
-        else:
-            return self.document
-
-    def __normalize_mapping(self, mapping, schema):
-        if isinstance(schema, _str_type):
-            schema = self._resolve_schema(schema)
-        schema = schema.copy()
-        for field in schema:
-            schema[field] = self._resolve_rules_set(schema[field])
-
-        self.__normalize_rename_fields(mapping, schema)
-        if self.purge_unknown and not self.allow_unknown:
-            self._normalize_purge_unknown(mapping, schema)
-        # Check `readonly` fields before applying default values because
-        # a field's schema definition might contain both `readonly` and
-        # `default`.
-        self.__validate_readonly_fields(mapping, schema)
-        self.__normalize_default_fields(mapping, schema)
-        self._normalize_coerce(mapping, schema)
-        self.__normalize_containers(mapping, schema)
-        self._is_normalized = True
-        return mapping
-
-    def _normalize_coerce(self, mapping, schema):
-        """ {'oneof': [
-                {'type': 'callable'},
-                {'type': 'list',
-                 'schema': {'oneof': [{'type': 'callable'},
-                                      {'type': 'string'}]}},
-                {'type': 'string'}
-                ]} """
-
-        error = errors.COERCION_FAILED
-        for field in mapping:
-            if field in schema and 'coerce' in schema[field]:
-                mapping[field] = self.__normalize_coerce(
-                    schema[field]['coerce'], field, mapping[field],
-                    schema[field].get('nullable', False), error)
-            elif isinstance(self.allow_unknown, Mapping) and \
-                    'coerce' in self.allow_unknown:
-                mapping[field] = self.__normalize_coerce(
-                    self.allow_unknown['coerce'], field, mapping[field],
-                    self.allow_unknown.get('nullable', False), error)
-
-    def __normalize_coerce(self, processor, field, value, nullable, error):
-        if isinstance(processor, _str_type):
-            processor = self.__get_rule_handler('normalize_coerce', processor)
-
-        elif isinstance(processor, Iterable):
-            result = value
-            for p in processor:
-                result = self.__normalize_coerce(p, field, result,
-                                                 nullable, error)
-                if errors.COERCION_FAILED in \
-                    self.document_error_tree.fetch_errors_from(
-                        self.document_path + (field,)):
-                    break
-            return result
-
-        try:
-            return processor(value)
-        except Exception as e:
-            if not nullable and e is not TypeError:
-                self._error(field, error, str(e))
-            return value
-
-    def __normalize_containers(self, mapping, schema):
-        for field in mapping:
-            if field not in schema:
-                continue
-            # TODO: This check conflates validation and normalization
-            if isinstance(mapping[field], Mapping):
-                if 'keyschema' in schema[field]:
-                    self.__normalize_mapping_per_keyschema(
-                        field, mapping, schema[field]['keyschema'])
-                if 'valueschema' in schema[field]:
-                    self.__normalize_mapping_per_valueschema(
-                        field, mapping, schema[field]['valueschema'])
-                if set(schema[field]) & set(('allow_unknown', 'purge_unknown',
-                                             'schema')):
-                    try:
-                        self.__normalize_mapping_per_schema(
-                            field, mapping, schema)
-                    except _SchemaRuleTypeError:
-                        pass
-            elif isinstance(mapping[field], _str_type):
-                continue
-            elif isinstance(mapping[field], Sequence) and \
-                    'schema' in schema[field]:
-                self.__normalize_sequence(field, mapping, schema)
-
-    def __normalize_mapping_per_keyschema(self, field, mapping, property_rules):
-        schema = dict(((k, property_rules) for k in mapping[field]))
-        document = dict(((k, k) for k in mapping[field]))
-        validator = self._get_child_validator(
-            document_crumb=field, schema_crumb=(field, 'keyschema'),
-            schema=schema)
-        result = validator.normalized(document, always_return_document=True)
-        if validator._errors:
-            self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
-            self._error(validator._errors)
-        for k in result:
-            if k == result[k]:
-                continue
-            if result[k] in mapping[field]:
-                warn("Normalizing keys of {path}: {key} already exists, "
-                     "its value is replaced."
-                     .format(path='.'.join(self.document_path + (field,)),
-                             key=k))
-                mapping[field][result[k]] = mapping[field][k]
-            else:
-                mapping[field][result[k]] = mapping[field][k]
-                del mapping[field][k]
-
-    def __normalize_mapping_per_valueschema(self, field, mapping, value_rules):
-        schema = dict(((k, value_rules) for k in mapping[field]))
-        validator = self._get_child_validator(
-            document_crumb=field, schema_crumb=(field, 'valueschema'),
-            schema=schema)
-        mapping[field] = validator.normalized(mapping[field],
-                                              always_return_document=True)
-        if validator._errors:
-            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
-            self._error(validator._errors)
-
-    def __normalize_mapping_per_schema(self, field, mapping, schema):
-        validator = self._get_child_validator(
-            document_crumb=field, schema_crumb=(field, 'schema'),
-            schema=schema[field].get('schema', {}),
-            allow_unknown=schema[field].get('allow_unknown', self.allow_unknown),  # noqa: E501
-            purge_unknown=schema[field].get('purge_unknown', self.purge_unknown))  # noqa: E501
-        value_type = type(mapping[field])
-        result_value = validator.normalized(mapping[field],
-                                            always_return_document=True)
-        mapping[field] = value_type(result_value)
-        if validator._errors:
-            self._error(validator._errors)
-
-    def __normalize_sequence(self, field, mapping, schema):
-        schema = dict(((k, schema[field]['schema'])
-                       for k in range(len(mapping[field]))))
-        document = dict((k, v) for k, v in enumerate(mapping[field]))
-        validator = self._get_child_validator(
-            document_crumb=field, schema_crumb=(field, 'schema'),
-            schema=schema)
-        value_type = type(mapping[field])
-        result = validator.normalized(document, always_return_document=True)
-        mapping[field] = value_type(result.values())
-        if validator._errors:
-            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
-            self._error(validator._errors)
-
-    @staticmethod
-    def _normalize_purge_unknown(mapping, schema):
-        """ {'type': 'boolean'} """
-        for field in tuple(mapping):
-            if field not in schema:
-                del mapping[field]
-        return mapping
-
-    def __normalize_rename_fields(self, mapping, schema):
-        for field in tuple(mapping):
-            if field in schema:
-                self._normalize_rename(mapping, schema, field)
-                self._normalize_rename_handler(mapping, schema, field)
-            elif isinstance(self.allow_unknown, Mapping) and \
-                    'rename_handler' in self.allow_unknown:
-                self._normalize_rename_handler(
-                    mapping, {field: self.allow_unknown}, field)
-        return mapping
-
-    def _normalize_rename(self, mapping, schema, field):
-        """ {'type': 'hashable'} """
-        if 'rename' in schema[field]:
-            mapping[schema[field]['rename']] = mapping[field]
-            del mapping[field]
-
-    def _normalize_rename_handler(self, mapping, schema, field):
-        """ {'oneof': [
-                {'type': 'callable'},
-                {'type': 'list',
-                 'schema': {'oneof': [{'type': 'callable'},
-                                      {'type': 'string'}]}},
-                {'type': 'string'}
-                ]} """
-        if 'rename_handler' not in schema[field]:
-            return
-        new_name = self.__normalize_coerce(
-            schema[field]['rename_handler'], field, field,
-            False, errors.RENAMING_FAILED)
-        if new_name != field:
-            mapping[new_name] = mapping[field]
-            del mapping[field]
-
-    def __validate_readonly_fields(self, mapping, schema):
-        for field in (x for x in schema if x in mapping and
-                      self._resolve_rules_set(schema[x]).get('readonly')):
-            self._validate_readonly(schema[field]['readonly'], field,
-                                    mapping[field])
-
-    def __normalize_default_fields(self, mapping, schema):
-        fields = [x for x in schema if x not in mapping or
-                  mapping[x] is None and not schema[x].get('nullable', False)]
-        try:
-            fields_with_default = [x for x in fields if 'default' in schema[x]]
-        except TypeError:
-            raise _SchemaRuleTypeError
-        for field in fields_with_default:
-            self._normalize_default(mapping, schema, field)
-
-        known_fields_states = set()
-        fields = [x for x in fields if 'default_setter' in schema[x]]
-        while fields:
-            field = fields.pop(0)
-            try:
-                self._normalize_default_setter(mapping, schema, field)
-            except KeyError:
-                fields.append(field)
-            except Exception as e:
-                self._error(field, errors.SETTING_DEFAULT_FAILED, str(e))
-
-            fields_state = tuple(fields)
-            if fields_state in known_fields_states:
-                for field in fields:
-                    self._error(field, errors.SETTING_DEFAULT_FAILED,
-                                'Circular dependencies of default setters.')
-                break
-            else:
-                known_fields_states.add(fields_state)
-
-    def _normalize_default(self, mapping, schema, field):
-        """ {'nullable': True} """
-        mapping[field] = schema[field]['default']
-
-    def _normalize_default_setter(self, mapping, schema, field):
-        """ {'oneof': [
-                {'type': 'callable'},
-                {'type': 'string'}
-                ]} """
-        if 'default_setter' in schema[field]:
-            setter = schema[field]['default_setter']
-            if isinstance(setter, _str_type):
-                setter = self.__get_rule_handler('normalize_default_setter',
-                                                 setter)
-            mapping[field] = setter(mapping)
-
-    # # Validating
-
-    def validate(self, document, schema=None, update=False, normalize=True):
-        """ Normalizes and validates a mapping against a validation-schema of
-        defined rules.
-
-        :param document: The document to normalize.
-        :type document: any :term:`mapping`
-        :param schema: The validation schema. Defaults to :obj:`None`. If not
-                       provided here, the schema must have been provided at
-                       class instantiation.
-        :type schema: any :term:`mapping`
-        :param update: If ``True``, required fields won't be checked.
-        :type update: :class:`bool`
-        :param normalize: If ``True``, normalize the document before validation.
-        :type normalize: :class:`bool`
-
-        :return: ``True`` if validation succeeds, otherwise ``False``. Check
-                 the :func:`errors` property for a list of processing errors.
-        :rtype: :class:`bool`
-        """
-        self.update = update
-        self._unrequired_by_excludes = set()
-
-        self.__init_processing(document, schema)
-        if normalize:
-            self.__normalize_mapping(self.document, self.schema)
-
-        for field in self.document:
-            if self.ignore_none_values and self.document[field] is None:
-                continue
-            definitions = self.schema.get(field)
-            if definitions is not None:
-                self.__validate_definitions(definitions, field)
-            else:
-                self.__validate_unknown_fields(field)
-
-        if not self.update:
-            self.__validate_required_fields(self.document)
-
-        self.error_handler.end(self)
-
-        return not bool(self._errors)
-
-    __call__ = validate
-
-    def validated(self, *args, **kwargs):
-        """ Wrapper around :meth:`~cerberus.Validator.validate` that returns
-            the normalized and validated document or :obj:`None` if validation
-            failed. """
-        always_return_document = kwargs.pop('always_return_document', False)
-        self.validate(*args, **kwargs)
-        if self._errors and not always_return_document:
-            return None
-        else:
-            return self.document
-
-    def __validate_unknown_fields(self, field):
-        if self.allow_unknown:
-            value = self.document[field]
-            if isinstance(self.allow_unknown, (Mapping, _str_type)):
-                # validate that unknown fields matches the schema
-                # for unknown_fields
-                schema_crumb = 'allow_unknown' if self.is_child \
-                    else '__allow_unknown__'
-                validator = self._get_child_validator(
-                    schema_crumb=schema_crumb,
-                    schema={field: self.allow_unknown})
-                if not validator({field: value}, normalize=False):
-                    self._error(validator._errors)
-        else:
-            self._error(field, errors.UNKNOWN_FIELD)
-
-    def __validate_definitions(self, definitions, field):
-        """ Validate a field's value against its defined rules. """
-
-        def validate_rule(rule):
-            validator = self.__get_rule_handler('validate', rule)
-            return validator(definitions.get(rule, None), field, value)
-
-        definitions = self._resolve_rules_set(definitions)
-        value = self.document[field]
-
-        rules_queue = [x for x in self.priority_validations
-                       if x in definitions or x in self.mandatory_validations]
-        rules_queue.extend(x for x in self.mandatory_validations
-                           if x not in rules_queue)
-        rules_queue.extend(x for x in definitions
-                           if x not in rules_queue and
-                           x not in self.normalization_rules and
-                           x not in ('allow_unknown', 'required'))
-        self._remaining_rules = rules_queue
-
-        while self._remaining_rules:
-            rule = self._remaining_rules.pop(0)
-            try:
-                result = validate_rule(rule)
-                # TODO remove on next breaking release
-                if result:
-                    break
-            except _SchemaRuleTypeError:
-                break
-
-        self._drop_remaining_rules()
-
-    # Remember to keep the validation methods below this line
-    # sorted alphabetically
-
-    _validate_allow_unknown = dummy_for_rule_validation(
-        """ {'oneof': [{'type': 'boolean'},
-                       {'type': ['dict', 'string'],
-                        'validator': 'bulk_schema'}]} """)
-
-    def _validate_allowed(self, allowed_values, field, value):
-        """ {'type': 'list'} """
-        if isinstance(value, Iterable) and not isinstance(value, _str_type):
-            unallowed = set(value) - set(allowed_values)
-            if unallowed:
-                self._error(field, errors.UNALLOWED_VALUES, list(unallowed))
-        else:
-            if value not in allowed_values:
-                self._error(field, errors.UNALLOWED_VALUE, value)
-
-    def _validate_dependencies(self, dependencies, field, value):
-        """ {'type': ('dict', 'hashable', 'list'),
-             'validator': 'dependencies'} """
-        if isinstance(dependencies, _str_type):
-            dependencies = (dependencies,)
-
-        if isinstance(dependencies, Sequence):
-            self.__validate_dependencies_sequence(dependencies, field)
-        elif isinstance(dependencies, Mapping):
-            self.__validate_dependencies_mapping(dependencies, field)
-
-        if self.document_error_tree.fetch_node_from(
-                self.schema_path + (field, 'dependencies')) is not None:
-            return True
-
-    def __validate_dependencies_mapping(self, dependencies, field):
-        validated_dependencies_counter = 0
-        error_info = {}
-        for dependency_name, dependency_values in dependencies.items():
-            if (not isinstance(dependency_values, Sequence) or
-                    isinstance(dependency_values, _str_type)):
-                dependency_values = [dependency_values]
-
-            wanted_field, wanted_field_value = \
-                self._lookup_field(dependency_name)
-            if wanted_field_value in dependency_values:
-                validated_dependencies_counter += 1
-            else:
-                error_info.update({dependency_name: wanted_field_value})
-
-        if validated_dependencies_counter != len(dependencies):
-            self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info)
-
-    def __validate_dependencies_sequence(self, dependencies, field):
-        for dependency in dependencies:
-            if self._lookup_field(dependency)[0] is None:
-                self._error(field, errors.DEPENDENCIES_FIELD, dependency)
-
-    def _validate_empty(self, empty, field, value):
-        """ {'type': 'boolean'} """
-        if isinstance(value, Iterable) and len(value) == 0:
-            self._drop_remaining_rules(
-                'allowed', 'forbidden', 'items', 'minlength', 'maxlength',
-                'regex', 'validator')
-            if not empty:
-                self._error(field, errors.EMPTY_NOT_ALLOWED)
-
-    def _validate_excludes(self, excludes, field, value):
-        """ {'type': ('hashable', 'list'),
-             'schema': {'type': 'hashable'}} """
-        if isinstance(excludes, Hashable):
-            excludes = [excludes]
-
-        # Save required field to be checked latter
-        if 'required' in self.schema[field] and self.schema[field]['required']:
-            self._unrequired_by_excludes.add(field)
-        for exclude in excludes:
-            if (exclude in self.schema and
-                'required' in self.schema[exclude] and
-                    self.schema[exclude]['required']):
-
-                self._unrequired_by_excludes.add(exclude)
-
-        if [True for key in excludes if key in self.document]:
-            # Wrap each field in `excludes` list between quotes
-            exclusion_str = ', '.join("'{0}'"
-                                      .format(word) for word in excludes)
-            self._error(field, errors.EXCLUDES_FIELD, exclusion_str)
-
-    def _validate_forbidden(self, forbidden_values, field, value):
-        """ {'type': 'list'} """
-        if isinstance(value, _str_type):
-            if value in forbidden_values:
-                self._error(field, errors.FORBIDDEN_VALUE, value)
-        elif isinstance(value, Sequence):
-            forbidden = set(value) & set(forbidden_values)
-            if forbidden:
-                self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
-        elif isinstance(value, int):
-            if value in forbidden_values:
-                self._error(field, errors.FORBIDDEN_VALUE, value)
-
-    def _validate_items(self, items, field, values):
-        """ {'type': 'list', 'validator': 'items'} """
-        if len(items) != len(values):
-            self._error(field, errors.ITEMS_LENGTH, len(items), len(values))
-        else:
-            schema = dict((i, definition) for i, definition in enumerate(items))  # noqa: E501
-            validator = self._get_child_validator(document_crumb=field,
-                                                  schema_crumb=(field, 'items'),  # noqa: E501
-                                                  schema=schema)
-            if not validator(dict((i, value) for i, value in enumerate(values)),
-                             update=self.update, normalize=False):
-                self._error(field, errors.BAD_ITEMS, validator._errors)
-
-    def __validate_logical(self, operator, definitions, field, value):
-        """ Validates value against all definitions and logs errors according
-            to the operator. """
-        valid_counter = 0
-        _errors = errors.ErrorList()
-
-        for i, definition in enumerate(definitions):
-            schema = {field: definition.copy()}
-            for rule in ('allow_unknown', 'type'):
-                if rule not in schema[field] and rule in self.schema[field]:
-                    schema[field][rule] = self.schema[field][rule]
-            if 'allow_unknown' not in schema[field]:
-                schema[field]['allow_unknown'] = self.allow_unknown
-
-            validator = self._get_child_validator(
-                schema_crumb=(field, operator, i),
-                schema=schema, allow_unknown=True)
-            if validator(self.document, update=self.update, normalize=False):
-                valid_counter += 1
-            else:
-                self._drop_nodes_from_errorpaths(validator._errors, [], [3])
-                _errors.extend(validator._errors)
-
-        return valid_counter, _errors
-
-    def _validate_anyof(self, definitions, field, value):
-        """ {'type': 'list', 'logical': 'anyof'} """
-        valids, _errors = \
-            self.__validate_logical('anyof', definitions, field, value)
-        if valids < 1:
-            self._error(field, errors.ANYOF, _errors,
-                        valids, len(definitions))
-
-    def _validate_allof(self, definitions, field, value):
-        """ {'type': 'list', 'logical': 'allof'} """
-        valids, _errors = \
-            self.__validate_logical('allof', definitions, field, value)
-        if valids < len(definitions):
-            self._error(field, errors.ALLOF, _errors,
-                        valids, len(definitions))
-
-    def _validate_noneof(self, definitions, field, value):
-        """ {'type': 'list', 'logical': 'noneof'} """
-        valids, _errors = \
-            self.__validate_logical('noneof', definitions, field, value)
-        if valids > 0:
-            self._error(field, errors.NONEOF, _errors,
-                        valids, len(definitions))
-
-    def _validate_oneof(self, definitions, field, value):
-        """ {'type': 'list', 'logical': 'oneof'} """
-        valids, _errors = \
-            self.__validate_logical('oneof', definitions, field, value)
-        if valids != 1:
-            self._error(field, errors.ONEOF, _errors,
-                        valids, len(definitions))
-
-    def _validate_max(self, max_value, field, value):
-        """ {'nullable': False } """
-        try:
-            if value > max_value:
-                self._error(field, errors.MAX_VALUE)
-        except TypeError:
-            pass
-
-    def _validate_min(self, min_value, field, value):
-        """ {'nullable': False } """
-        try:
-            if value < min_value:
-                self._error(field, errors.MIN_VALUE)
-        except TypeError:
-            pass
-
-    def _validate_maxlength(self, max_length, field, value):
-        """ {'type': 'integer'} """
-        if isinstance(value, Iterable) and len(value) > max_length:
-            self._error(field, errors.MAX_LENGTH, len(value))
-
-    def _validate_minlength(self, min_length, field, value):
-        """ {'type': 'integer'} """
-        if isinstance(value, Iterable) and len(value) < min_length:
-            self._error(field, errors.MIN_LENGTH, len(value))
-
-    def _validate_nullable(self, nullable, field, value):
-        """ {'type': 'boolean'} """
-        if value is None:
-            if not nullable:
-                self._error(field, errors.NOT_NULLABLE)
-            self._drop_remaining_rules(
-                'empty', 'forbidden', 'items', 'keyschema', 'min', 'max',
-                'minlength', 'maxlength', 'regex', 'schema', 'type',
-                'valueschema')
-
-    def _validate_keyschema(self, schema, field, value):
-        """ {'type': ['dict', 'string'], 'validator': 'bulk_schema',
-            'forbidden': ['rename', 'rename_handler']} """
-        if isinstance(value, Mapping):
-            validator = self._get_child_validator(
-                document_crumb=field,
-                schema_crumb=(field, 'keyschema'),
-                schema=dict(((k, schema) for k in value.keys())))
-            if not validator(dict(((k, k) for k in value.keys())),
-                             normalize=False):
-                self._drop_nodes_from_errorpaths(validator._errors,
-                                                 [], [2, 4])
-                self._error(field, errors.KEYSCHEMA, validator._errors)
-
-    def _validate_readonly(self, readonly, field, value):
-        """ {'type': 'boolean'} """
-        if readonly:
-            if not self._is_normalized:
-                self._error(field, errors.READONLY_FIELD)
-            # If the document was normalized (and therefore already been
-            # checked for readonly fields), we still have to return True
-            # if an error was filed.
-            has_error = errors.READONLY_FIELD in \
-                self.document_error_tree.fetch_errors_from(
-                    self.document_path + (field,))
-            if self._is_normalized and has_error:
-                self._drop_remaining_rules()
-
-    def _validate_regex(self, pattern, field, value):
-        """ {'type': 'string'} """
-        if not isinstance(value, _str_type):
-            return
-        if not pattern.endswith('$'):
-            pattern += '$'
-        re_obj = re.compile(pattern)
-        if not re_obj.match(value):
-            self._error(field, errors.REGEX_MISMATCH)
-
-    _validate_required = dummy_for_rule_validation(""" {'type': 'boolean'} """)
-
-    def __validate_required_fields(self, document):
-        """ Validates that required fields are not missing.
-
-        :param document: The document being validated.
-        """
-        try:
-            required = set(field for field, definition in self.schema.items()
-                           if self._resolve_rules_set(definition).
-                           get('required') is True)
-        except AttributeError:
-            if self.is_child and self.schema_path[-1] == 'schema':
-                raise _SchemaRuleTypeError
-            else:
-                raise
-        required -= self._unrequired_by_excludes
-        missing = required - set(field for field in document
-                                 if document.get(field) is not None or
-                                 not self.ignore_none_values)
-
-        for field in missing:
-            self._error(field, errors.REQUIRED_FIELD)
-
-        # At least on field from self._unrequired_by_excludes should be
-        # present in document
-        if self._unrequired_by_excludes:
-            fields = set(field for field in document
-                         if document.get(field) is not None)
-            if self._unrequired_by_excludes.isdisjoint(fields):
-                for field in self._unrequired_by_excludes - fields:
-                    self._error(field, errors.REQUIRED_FIELD)
-
-    def _validate_schema(self, schema, field, value):
-        """ {'type': ['dict', 'string'],
-             'anyof': [{'validator': 'schema'},
-                       {'validator': 'bulk_schema'}]} """
-        if schema is None:
-            return
-
-        if isinstance(value, Sequence) and not isinstance(value, _str_type):
-            self.__validate_schema_sequence(field, schema, value)
-        elif isinstance(value, Mapping):
-            self.__validate_schema_mapping(field, schema, value)
-
-    def __validate_schema_mapping(self, field, schema, value):
-        schema = self._resolve_schema(schema)
-        allow_unknown = self.schema[field].get('allow_unknown',
-                                               self.allow_unknown)
-        validator = self._get_child_validator(document_crumb=field,
-                                              schema_crumb=(field, 'schema'),
-                                              schema=schema,
-                                              allow_unknown=allow_unknown)
-        try:
-            if not validator(value, update=self.update, normalize=False):
-                self._error(field, errors.MAPPING_SCHEMA, validator._errors)
-        except _SchemaRuleTypeError:
-            self._error(field, errors.BAD_TYPE_FOR_SCHEMA)
-            raise
-
-    def __validate_schema_sequence(self, field, schema, value):
-        schema = dict(((i, schema) for i in range(len(value))))
-        validator = self._get_child_validator(
-            document_crumb=field, schema_crumb=(field, 'schema'),
-            schema=schema, allow_unknown=self.allow_unknown)
-        validator(dict(((i, v) for i, v in enumerate(value))),
-                  update=self.update, normalize=False)
-
-        if validator._errors:
-            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
-            self._error(field, errors.SEQUENCE_SCHEMA, validator._errors)
-
-    def _validate_type(self, data_type, field, value):
-        """ {'type': ['string', 'list'],
-             'validator': 'type'} """
-        if not data_type:
-            return
-
-        types = (data_type,) if isinstance(data_type, _str_type) else data_type
-
-        for _type in types:
-            # TODO remove this block on next major release
-            # this implementation still supports custom type validation methods
-            type_definition = self.types_mapping.get(_type)
-            if type_definition is not None:
-                matched = isinstance(value, type_definition.included_types) \
-                    and not isinstance(value, type_definition.excluded_types)
-            else:
-                type_handler = self.__get_rule_handler('validate_type', _type)
-                matched = type_handler(value)
-            if matched:
-                return
-
-            # TODO uncomment this block on next major release
-            #      when _validate_type_* methods were deprecated:
-            # type_definition = self.types_mapping[_type]
-            # if isinstance(value, type_definition.included_types) \
-            #         and not isinstance(value, type_definition.excluded_types):  # noqa 501
-            #     return
-
-        self._error(field, errors.BAD_TYPE)
-        self._drop_remaining_rules()
-
-    def _validate_validator(self, validator, field, value):
-        """ {'oneof': [
-                {'type': 'callable'},
-                {'type': 'list',
-                 'schema': {'oneof': [{'type': 'callable'},
-                                      {'type': 'string'}]}},
-                {'type': 'string'}
-                ]} """
-        if isinstance(validator, _str_type):
-            validator = self.__get_rule_handler('validator', validator)
-            validator(field, value)
-        elif isinstance(validator, Iterable):
-            for v in validator:
-                self._validate_validator(v, field, value)
-        else:
-            validator(field, value, self._error)
-
-    def _validate_valueschema(self, schema, field, value):
-        """ {'type': ['dict', 'string'], 'validator': 'bulk_schema',
-            'forbidden': ['rename', 'rename_handler']} """
-        schema_crumb = (field, 'valueschema')
-        if isinstance(value, Mapping):
-            validator = self._get_child_validator(
-                document_crumb=field, schema_crumb=schema_crumb,
-                schema=dict((k, schema) for k in value))
-            validator(value, update=self.update, normalize=False)
-            if validator._errors:
-                self._drop_nodes_from_errorpaths(validator._errors, [], [2])
-                self._error(field, errors.VALUESCHEMA, validator._errors)
-
-
-RULE_SCHEMA_SEPARATOR = \
-    "The rule's arguments are validated against this schema:"
-
-
-class InspectedValidator(type):
-    """ Metaclass for all validators """
-    def __new__(cls, *args):
-        if '__doc__' not in args[2]:
-            args[2].update({'__doc__': args[1][0].__doc__})
-        return super(InspectedValidator, cls).__new__(cls, *args)
-
-    def __init__(cls, *args):
-        def attributes_with_prefix(prefix):
-            return tuple(x.split('_', 2)[-1] for x in dir(cls)
-                         if x.startswith('_' + prefix))
-
-        super(InspectedValidator, cls).__init__(*args)
-
-        cls._types_from_methods, cls.validation_rules = (), {}
-        for attribute in attributes_with_prefix('validate'):
-            # TODO remove inspection of type test methods in next major release
-            if attribute.startswith('type_'):
-                cls._types_from_methods += (attribute[len('type_'):],)
-            else:
-                cls.validation_rules[attribute] = \
-                    cls.__get_rule_schema('_validate_' + attribute)
-
-        # TODO remove on next major release
-        if cls._types_from_methods:
-            warn("Methods for type testing are deprecated, use TypeDefinition "
-                 "and the 'types_mapping'-property of a Validator-instance "
-                 "instead.", DeprecationWarning)
-
-        cls.validators = tuple(x for x in attributes_with_prefix('validator'))
-        x = cls.validation_rules['validator']['oneof']
-        x[1]['schema']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.validators
-
-        for rule in (x for x in cls.mandatory_validations if x != 'nullable'):
-            cls.validation_rules[rule]['required'] = True
-
-        cls.coercers, cls.default_setters, cls.normalization_rules = (), (), {}
-        for attribute in attributes_with_prefix('normalize'):
-            if attribute.startswith('coerce_'):
-                cls.coercers += (attribute[len('coerce_'):],)
-            elif attribute.startswith('default_setter_'):
-                cls.default_setters += (attribute[len('default_setter_'):],)
-            else:
-                cls.normalization_rules[attribute] = \
-                    cls.__get_rule_schema('_normalize_' + attribute)
-
-        for rule in ('coerce', 'rename_handler'):
-            x = cls.normalization_rules[rule]['oneof']
-            x[1]['schema']['oneof'][1]['allowed'] = \
-                x[2]['allowed'] = cls.coercers
-        cls.normalization_rules['default_setter']['oneof'][1]['allowed'] = \
-            cls.default_setters
-
-        cls.rules = {}
-        cls.rules.update(cls.validation_rules)
-        cls.rules.update(cls.normalization_rules)
-
-    def __get_rule_schema(cls, method_name):
-        docstring = getattr(cls, method_name).__doc__
-        if docstring is None:
-            result = {}
-        else:
-            if RULE_SCHEMA_SEPARATOR in docstring:
-                docstring = docstring.split(RULE_SCHEMA_SEPARATOR)[1]
-            try:
-                result = literal_eval(docstring.strip())
-            except Exception:
-                result = {}
-
-        if not result:
-            warn("No validation schema is defined for the arguments of rule "
-                 "'%s'" % method_name.split('_', 2)[-1])
-
-        return result
-
-
-Validator = InspectedValidator('Validator', (BareValidator,), {})
+"""
+    Extensible validation for Python dictionaries.
+    This module implements Cerberus Validator class
+
+    :copyright: 2012-2016 by Nicola Iarocci.
+    :license: ISC, see LICENSE for more details.
+
+    Full documentation is available at http://python-cerberus.org
+"""
+
+from __future__ import absolute_import
+
+from ast import literal_eval
+from collections import Hashable, Iterable, Mapping, Sequence
+from copy import copy
+from datetime import date, datetime
+import re
+from warnings import warn
+
+from cerberus import errors
+from cerberus.platform import _int_types, _str_type
+from cerberus.schema import (schema_registry, rules_set_registry,
+                             DefinitionSchema, SchemaError)
+from cerberus.utils import (drop_item_from_tuple, isclass,
+                            readonly_classproperty, TypeDefinition)
+
+
+toy_error_handler = errors.ToyErrorHandler()
+
+
+def dummy_for_rule_validation(rule_constraints):
+    def dummy(self, constraint, field, value):
+        raise RuntimeError('Dummy method called. Its purpose is to hold just'
+                           'validation constraints for a rule in its '
+                           'docstring.')
+    f = dummy
+    f.__doc__ = rule_constraints
+    return f
+
+
+class DocumentError(Exception):
+    """ Raised when the target document is missing or has the wrong format """
+    pass
+
+
+class _SchemaRuleTypeError(Exception):
+    """ Raised when a schema (list) validation encounters a mapping.
+        Not supposed to be used outside this module. """
+    pass
+
+
+class BareValidator(object):
+    """ Validator class. Normalizes and/or validates any mapping against a
+    validation-schema which is provided as an argument at class instantiation
+    or upon calling the :meth:`~cerberus.Validator.validate`,
+    :meth:`~cerberus.Validator.validated` or
+    :meth:`~cerberus.Validator.normalized` method. An instance itself is
+    callable and executes a validation.
+
+    All instantiation parameters are optional.
+
+    There are the introspective properties :attr:`types`, :attr:`validators`,
+    :attr:`coercers`, :attr:`default_setters`, :attr:`rules`,
+    :attr:`normalization_rules` and :attr:`validation_rules`.
+
+    The attributes reflecting the available rules are assembled considering
+    constraints that are defined in the docstrings of rules' methods and is
+    effectively used as validation schema for :attr:`schema`.
+
+    :param schema: See :attr:`~cerberus.Validator.schema`.
+                   Defaults to :obj:`None`.
+    :type schema: any :term:`mapping`
+    :param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`.
+                               Defaults to ``False``.
+    :type ignore_none_values: :class:`bool`
+    :param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`.
+                          Defaults to ``False``.
+    :type allow_unknown: :class:`bool` or any :term:`mapping`
+    :param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`.
+                          Defaults to to ``False``.
+    :type purge_unknown: :class:`bool`
+    :param error_handler: The error handler that formats the result of
+                          :attr:`~cerberus.Validator.errors`.
+                          When given as two-value tuple with an error-handler
+                          class and a dictionary, the latter is passed to the
+                          initialization of the error handler.
+                          Default: :class:`~cerberus.errors.BasicErrorHandler`.
+    :type error_handler: class or instance based on
+                         :class:`~cerberus.errors.BaseErrorHandler` or
+                         :class:`tuple`
+    """  # noqa: E501
+
+    mandatory_validations = ('nullable',)
+    """ Rules that are evaluated on any field, regardless whether defined in
+        the schema or not.
+        Type: :class:`tuple` """
+    priority_validations = ('nullable', 'readonly', 'type', 'empty')
+    """ Rules that will be processed in that order before any other.
+        Type: :class:`tuple` """
+    types_mapping = {
+        'binary':
+            TypeDefinition('binary', (bytes, bytearray), ()),
+        'boolean':
+            TypeDefinition('boolean', (bool,), ()),
+        'date':
+            TypeDefinition('date', (date,), ()),
+        'datetime':
+            TypeDefinition('datetime', (datetime,), ()),
+        'dict':
+            TypeDefinition('dict', (Mapping,), ()),
+        'float':
+            TypeDefinition('float', (float, _int_types), ()),
+        'integer':
+            TypeDefinition('integer', (_int_types,), ()),
+        'list':
+            TypeDefinition('list', (Sequence,), (_str_type,)),
+        'number':
+            TypeDefinition('number', (_int_types, float), (bool,)),
+        'set':
+            TypeDefinition('set', (set,), ()),
+        'string':
+            TypeDefinition('string', (_str_type), ())
+    }
+    """ This mapping holds all available constraints for the type rule and
+        their assigned :class:`~cerberus.TypeDefinition`. """
+    _valid_schemas = set()
+    """ A :class:`set` of hashes derived from validation schemas that are
+        legit for a particular ``Validator`` class. """
+
+    def __init__(self, *args, **kwargs):
+        """ The arguments will be treated as with this signature:
+
+        ext(self, schema=None, ignore_none_values=False,
+                 allow_unknown=False, purge_unknown=False,
+                 error_handler=errors.BasicErrorHandler)
+        """
+
+        self.document = None
+        """ The document that is or was recently processed.
+            Type: any :term:`mapping` """
+        self._errors = errors.ErrorList()
+        """ The list of errors that were encountered since the last document
+            processing was invoked.
+            Type: :class:`~cerberus.errors.ErrorList` """
+        self.recent_error = None
+        """ The last individual error that was submitted.
+            Type: :class:`~cerberus.errors.ValidationError` """
+        self.document_error_tree = errors.DocumentErrorTree()
+        """ A tree representiation of encountered errors following the
+            structure of the document.
+            Type: :class:`~cerberus.errors.DocumentErrorTree` """
+        self.schema_error_tree = errors.SchemaErrorTree()
+        """ A tree representiation of encountered errors following the
+            structure of the schema.
+            Type: :class:`~cerberus.errors.SchemaErrorTree` """
+        self.document_path = ()
+        """ The path within the document to the current sub-document.
+            Type: :class:`tuple` """
+        self.schema_path = ()
+        """ The path within the schema to the current sub-schema.
+            Type: :class:`tuple` """
+        self.update = False
+        self.error_handler = self.__init_error_handler(kwargs)
+        """ The error handler used to format :attr:`~cerberus.Validator.errors`
+            and process submitted errors with
+            :meth:`~cerberus.Validator._error`.
+            Type: :class:`~cerberus.errors.BaseErrorHandler` """
+        self.__store_config(args, kwargs)
+        self.schema = kwargs.get('schema', None)
+        self.allow_unknown = kwargs.get('allow_unknown', False)
+        self._remaining_rules = []
+        """ Keeps track of the rules that are next in line to be evaluated
+            during the validation of a field.
+            Type: :class:`list` """
+
+        super(BareValidator, self).__init__()
+
+    @staticmethod
+    def __init_error_handler(kwargs):
+        error_handler = kwargs.pop('error_handler', errors.BasicErrorHandler)
+        if isinstance(error_handler, tuple):
+            error_handler, eh_config = error_handler
+        else:
+            eh_config = {}
+        if isclass(error_handler) and \
+                issubclass(error_handler, errors.BaseErrorHandler):
+            return error_handler(**eh_config)
+        elif isinstance(error_handler, errors.BaseErrorHandler):
+            return error_handler
+        else:
+            raise RuntimeError('Invalid error_handler.')
+
+    def __store_config(self, args, kwargs):
+        """ Assign args to kwargs and store configuration. """
+        signature = ('schema', 'ignore_none_values', 'allow_unknown',
+                     'purge_unknown')
+        for i, p in enumerate(signature[:len(args)]):
+            if p in kwargs:
+                raise TypeError("ext got multiple values for argument "
+                                "'%s'" % p)
+            else:
+                kwargs[p] = args[i]
+        self._config = kwargs
+        """ This dictionary holds the configuration arguments that were used to
+            initialize the :class:`Validator` instance except the
+            ``error_handler``. """
+
+    @classmethod
+    def clear_caches(cls):
+        """ Purge the cache of known valid schemas. """
+        cls._valid_schemas.clear()
+
+    def _error(self, *args):
+        """ Creates and adds one or multiple errors.
+
+        :param args: Accepts different argument's signatures.
+
+                     *1. Bulk addition of errors:*
+
+                     - :term:`iterable` of
+                       :class:`~cerberus.errors.ValidationError`-instances
+
+                     The errors will be added to
+                     :attr:`~cerberus.Validator._errors`.
+
+                     *2. Custom error:*
+
+                     - the invalid field's name
+
+                     - the error message
+
+                     A custom error containing the message will be created and
+                     added to :attr:`~cerberus.Validator._errors`.
+                     There will however be fewer information contained in the
+                     error (no reference to the violated rule and its
+                     constraint).
+
+                     *3. Defined error:*
+
+                     - the invalid field's name
+
+                     - the error-reference, see :mod:`cerberus.errors`
+
+                     - arbitrary, supplemental information about the error
+
+                     A :class:`~cerberus.errors.ValidationError` instance will
+                     be created and added to
+                     :attr:`~cerberus.Validator._errors`.
+        """
+        if len(args) == 1:
+            self._errors.extend(args[0])
+            self._errors.sort()
+            for error in args[0]:
+                self.document_error_tree += error
+                self.schema_error_tree += error
+                self.error_handler.emit(error)
+        elif len(args) == 2 and isinstance(args[1], _str_type):
+            self._error(args[0], errors.CUSTOM, args[1])
+        elif len(args) >= 2:
+            field = args[0]
+            code = args[1].code
+            rule = args[1].rule
+            info = args[2:]
+
+            document_path = self.document_path + (field, )
+
+            schema_path = self.schema_path
+            if code != errors.UNKNOWN_FIELD.code and rule is not None:
+                schema_path += (field, rule)
+
+            if not rule:
+                constraint = None
+            else:
+                field_definitions = self._resolve_rules_set(self.schema[field])
+                if rule == 'nullable':
+                    constraint = field_definitions.get(rule, False)
+                else:
+                    constraint = field_definitions[rule]
+
+            value = self.document.get(field)
+
+            self.recent_error = errors.ValidationError(
+                document_path, schema_path, code, rule, constraint, value, info
+            )
+            self._error([self.recent_error])
+
+    def _get_child_validator(self, document_crumb=None, schema_crumb=None,
+                             **kwargs):
+        """ Creates a new instance of Validator-(sub-)class. All initial
+            parameters of the parent are passed to the initialization, unless
+            a parameter is given as an explicit *keyword*-parameter.
+
+        :param document_crumb: Extends the
+                               :attr:`~cerberus.Validator.document_path`
+                               of the child-validator.
+        :type document_crumb: :class:`tuple` or :term:`hashable`
+        :param schema_crumb: Extends the
+                             :attr:`~cerberus.Validator.schema_path`
+                             of the child-validator.
+        :type schema_crumb: :class:`tuple` or hashable
+        :param kwargs: Overriding keyword-arguments for initialization.
+        :type kwargs: :class:`dict`
+
+        :return: an instance of ``self.__class__``
+        """
+        child_config = self._config.copy()
+        child_config.update(kwargs)
+        if not self.is_child:
+            child_config['is_child'] = True
+            child_config['error_handler'] = toy_error_handler
+            child_config['root_allow_unknown'] = self.allow_unknown
+            child_config['root_document'] = self.document
+            child_config['root_schema'] = self.schema
+
+        child_validator = self.__class__(**child_config)
+
+        if document_crumb is None:
+            child_validator.document_path = self.document_path
+        else:
+            if not isinstance(document_crumb, tuple):
+                document_crumb = (document_crumb, )
+            child_validator.document_path = self.document_path + document_crumb
+
+        if schema_crumb is None:
+            child_validator.schema_path = self.schema_path
+        else:
+            if not isinstance(schema_crumb, tuple):
+                schema_crumb = (schema_crumb, )
+            child_validator.schema_path = self.schema_path + schema_crumb
+
+        return child_validator
+
+    def __get_rule_handler(self, domain, rule):
+        methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_'))
+        result = getattr(self, methodname, None)
+        if result is None:
+            raise RuntimeError("There's no handler for '{}' in the '{}' "
+                               "domain.".format(rule, domain))
+        return result
+
+    def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
+        """ Removes nodes by index from an errorpath, relatively to the
+            basepaths of self.
+
+        :param errors: A list of :class:`errors.ValidationError` instances.
+        :param dp_items: A list of integers, pointing at the nodes to drop from
+                         the :attr:`document_path`.
+        :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
+        """
+        dp_basedepth = len(self.document_path)
+        sp_basedepth = len(self.schema_path)
+        for error in _errors:
+            for i in sorted(dp_items, reverse=True):
+                error.document_path = \
+                    drop_item_from_tuple(error.document_path, dp_basedepth + i)
+            for i in sorted(sp_items, reverse=True):
+                error.schema_path = \
+                    drop_item_from_tuple(error.schema_path, sp_basedepth + i)
+            if error.child_errors:
+                self._drop_nodes_from_errorpaths(error.child_errors,
+                                                 dp_items, sp_items)
+
+    def _lookup_field(self, path):
+        """ Searches for a field as defined by path. This method is used by the
+            ``dependency`` evaluation logic.
+
+        :param path: Path elements are separated by a ``.``. A leading ``^``
+                     indicates that the path relates to the document root,
+                     otherwise it relates to the currently evaluated document,
+                     which is possibly a subdocument.
+                     The sequence ``^^`` at the start will be interpreted as a
+                     literal ``^``.
+        :type path: :class:`str`
+        :returns: Either the found field name and its value or :obj:`None` for
+                  both.
+        :rtype: A two-value :class:`tuple`.
+        """
+        if path.startswith('^'):
+            path = path[1:]
+            context = self.document if path.startswith('^') \
+                else self.root_document
+        else:
+            context = self.document
+
+        parts = path.split('.')
+        for part in parts:
+            if part not in context:
+                return None, None
+            context = context.get(part)
+
+        return parts[-1], context
+
+    def _resolve_rules_set(self, rules_set):
+        if isinstance(rules_set, Mapping):
+            return rules_set
+        elif isinstance(rules_set, _str_type):
+            return self.rules_set_registry.get(rules_set)
+        return None
+
+    def _resolve_schema(self, schema):
+        if isinstance(schema, Mapping):
+            return schema
+        elif isinstance(schema, _str_type):
+            return self.schema_registry.get(schema)
+        return None
+
+    # Properties
+
+    @property
+    def allow_unknown(self):
+        """ If ``True`` unknown fields that are not defined in the schema will
+            be ignored. If a mapping with a validation schema is given, any
+            undefined field will be validated against its rules.
+            Also see :ref:`allowing-the-unknown`.
+            Type: :class:`bool` or any :term:`mapping` """
+        return self._config.get('allow_unknown', False)
+
+    @allow_unknown.setter
+    def allow_unknown(self, value):
+        if not (self.is_child or isinstance(value, (bool, DefinitionSchema))):
+            DefinitionSchema(self, {'allow_unknown': value})
+        self._config['allow_unknown'] = value
+
+    @property
+    def errors(self):
+        """ The errors of the last processing formatted by the handler that is
+            bound to :attr:`~cerberus.Validator.error_handler`. """
+        return self.error_handler(self._errors)
+
+    @property
+    def ignore_none_values(self):
+        """ Whether to not process :obj:`None`-values in a document or not.
+            Type: :class:`bool` """
+        return self._config.get('ignore_none_values', False)
+
+    @ignore_none_values.setter
+    def ignore_none_values(self, value):
+        self._config['ignore_none_values'] = value
+
+    @property
+    def is_child(self):
+        """ ``True`` for child-validators obtained with
+        :meth:`~cerberus.Validator._get_child_validator`.
+        Type: :class:`bool` """
+        return self._config.get('is_child', False)
+
+    @property
+    def _is_normalized(self):
+        """ ``True`` if the document is already normalized. """
+        return self._config.get('_is_normalized', False)
+
+    @_is_normalized.setter
+    def _is_normalized(self, value):
+        self._config['_is_normalized'] = value
+
+    @property
+    def purge_unknown(self):
+        """ If ``True`` unknown fields will be deleted from the document
+            unless a validation is called with disabled normalization.
+            Also see :ref:`purging-unknown-fields`. Type: :class:`bool` """
+        return self._config.get('purge_unknown', False)
+
+    @purge_unknown.setter
+    def purge_unknown(self, value):
+        self._config['purge_unknown'] = value
+
+    @property
+    def root_allow_unknown(self):
+        """ The :attr:`~cerberus.Validator.allow_unknown` attribute of the
+            first level ancestor of a child validator. """
+        return self._config.get('root_allow_unknown', self.allow_unknown)
+
+    @property
+    def root_document(self):
+        """ The :attr:`~cerberus.Validator.document` attribute of the
+            first level ancestor of a child validator. """
+        return self._config.get('root_document', self.document)
+
+    @property
+    def rules_set_registry(self):
+        """ The registry that holds referenced rules sets.
+            Type: :class:`~cerberus.Registry` """
+        return self._config.get('rules_set_registry', rules_set_registry)
+
+    @rules_set_registry.setter
+    def rules_set_registry(self, registry):
+        self._config['rules_set_registry'] = registry
+
+    @property
+    def root_schema(self):
+        """ The :attr:`~cerberus.Validator.schema` attribute of the
+            first level ancestor of a child validator. """
+        return self._config.get('root_schema', self.schema)
+
+    @property
+    def schema(self):
+        """ The validation schema of a validator. When a schema is passed to
+            a method, it replaces this attribute.
+            Type: any :term:`mapping` or :obj:`None` """
+        return self._schema
+
+    @schema.setter
+    def schema(self, schema):
+        if schema is None:
+            self._schema = None
+        elif self.is_child or isinstance(schema, DefinitionSchema):
+            self._schema = schema
+        else:
+            self._schema = DefinitionSchema(self, schema)
+
+    @property
+    def schema_registry(self):
+        """ The registry that holds referenced schemas.
+            Type: :class:`~cerberus.Registry` """
+        return self._config.get('schema_registry', schema_registry)
+
+    @schema_registry.setter
+    def schema_registry(self, registry):
+        self._config['schema_registry'] = registry
+
+    # FIXME the returned method has the correct docstring, but doesn't appear
+    #       in the API docs
+    @readonly_classproperty
+    def types(cls):
+        """ The constraints that can be used for the 'type' rule.
+            Type: A tuple of strings. """
+        redundant_types = \
+            set(cls.types_mapping) & set(cls._types_from_methods)
+        if redundant_types:
+            warn("These types are defined both with a method and in the"
+                 "'types_mapping' property of this validator: %s"
+                 % redundant_types)
+
+        return tuple(cls.types_mapping) + cls._types_from_methods
+
+    # Document processing
+
+    def __init_processing(self, document, schema=None):
+        self._errors = errors.ErrorList()
+        self.recent_error = None
+        self.document_error_tree = errors.DocumentErrorTree()
+        self.schema_error_tree = errors.SchemaErrorTree()
+        self.document = copy(document)
+        if not self.is_child:
+            self._is_normalized = False
+
+        if schema is not None:
+            self.schema = DefinitionSchema(self, schema)
+        elif self.schema is None:
+            if isinstance(self.allow_unknown, Mapping):
+                self._schema = {}
+            else:
+                raise SchemaError(errors.SCHEMA_ERROR_MISSING)
+        if document is None:
+            raise DocumentError(errors.DOCUMENT_MISSING)
+        if not isinstance(document, Mapping):
+            raise DocumentError(
+                errors.DOCUMENT_FORMAT.format(document))
+        self.error_handler.start(self)
+
+    def _drop_remaining_rules(self, *rules):
+        """ Drops rules from the queue of the rules that still need to be
+            evaluated for the currently processed field.
+            If no arguments are given, the whole queue is emptied.
+        """
+        if rules:
+            for rule in rules:
+                try:
+                    self._remaining_rules.remove(rule)
+                except ValueError:
+                    pass
+        else:
+            self._remaining_rules = []
+
+    # # Normalizing
+
+    def normalized(self, document, schema=None, always_return_document=False):
+        """ Returns the document normalized according to the specified rules
+        of a schema.
+
+        :param document: The document to normalize.
+        :type document: any :term:`mapping`
+        :param schema: The validation schema. Defaults to :obj:`None`. If not
+                       provided here, the schema must have been provided at
+                       class instantiation.
+        :type schema: any :term:`mapping`
+        :param always_return_document: Return the document, even if an error
+                                       occurred. Defaults to: ``False``.
+        :type always_return_document: :class:`bool`
+        :return: A normalized copy of the provided mapping or :obj:`None` if an
+                 error occurred during normalization.
+        """
+        self.__init_processing(document, schema)
+        self.__normalize_mapping(self.document, self.schema)
+        self.error_handler.end(self)
+        if self._errors and not always_return_document:
+            return None
+        else:
+            return self.document
+
+    def __normalize_mapping(self, mapping, schema):
+        if isinstance(schema, _str_type):
+            schema = self._resolve_schema(schema)
+        schema = schema.copy()
+        for field in schema:
+            schema[field] = self._resolve_rules_set(schema[field])
+
+        self.__normalize_rename_fields(mapping, schema)
+        if self.purge_unknown and not self.allow_unknown:
+            self._normalize_purge_unknown(mapping, schema)
+        # Check `readonly` fields before applying default values because
+        # a field's schema definition might contain both `readonly` and
+        # `default`.
+        self.__validate_readonly_fields(mapping, schema)
+        self.__normalize_default_fields(mapping, schema)
+        self._normalize_coerce(mapping, schema)
+        self.__normalize_containers(mapping, schema)
+        self._is_normalized = True
+        return mapping
+
+    def _normalize_coerce(self, mapping, schema):
+        """ {'oneof': [
+                {'type': 'callable'},
+                {'type': 'list',
+                 'schema': {'oneof': [{'type': 'callable'},
+                                      {'type': 'string'}]}},
+                {'type': 'string'}
+                ]} """
+
+        error = errors.COERCION_FAILED
+        for field in mapping:
+            if field in schema and 'coerce' in schema[field]:
+                mapping[field] = self.__normalize_coerce(
+                    schema[field]['coerce'], field, mapping[field],
+                    schema[field].get('nullable', False), error)
+            elif isinstance(self.allow_unknown, Mapping) and \
+                    'coerce' in self.allow_unknown:
+                mapping[field] = self.__normalize_coerce(
+                    self.allow_unknown['coerce'], field, mapping[field],
+                    self.allow_unknown.get('nullable', False), error)
+
+    def __normalize_coerce(self, processor, field, value, nullable, error):
+        if isinstance(processor, _str_type):
+            processor = self.__get_rule_handler('normalize_coerce', processor)
+
+        elif isinstance(processor, Iterable):
+            result = value
+            for p in processor:
+                result = self.__normalize_coerce(p, field, result,
+                                                 nullable, error)
+                if errors.COERCION_FAILED in \
+                    self.document_error_tree.fetch_errors_from(
+                        self.document_path + (field,)):
+                    break
+            return result
+
+        try:
+            return processor(value)
+        except Exception as e:
+            if not nullable and e is not TypeError:
+                self._error(field, error, str(e))
+            return value
+
+    def __normalize_containers(self, mapping, schema):
+        for field in mapping:
+            if field not in schema:
+                continue
+            # TODO: This check conflates validation and normalization
+            if isinstance(mapping[field], Mapping):
+                if 'keyschema' in schema[field]:
+                    self.__normalize_mapping_per_keyschema(
+                        field, mapping, schema[field]['keyschema'])
+                if 'valueschema' in schema[field]:
+                    self.__normalize_mapping_per_valueschema(
+                        field, mapping, schema[field]['valueschema'])
+                if set(schema[field]) & set(('allow_unknown', 'purge_unknown',
+                                             'schema')):
+                    try:
+                        self.__normalize_mapping_per_schema(
+                            field, mapping, schema)
+                    except _SchemaRuleTypeError:
+                        pass
+            elif isinstance(mapping[field], _str_type):
+                continue
+            elif isinstance(mapping[field], Sequence) and \
+                    'schema' in schema[field]:
+                self.__normalize_sequence(field, mapping, schema)
+
+    def __normalize_mapping_per_keyschema(self, field, mapping, property_rules):
+        schema = dict(((k, property_rules) for k in mapping[field]))
+        document = dict(((k, k) for k in mapping[field]))
+        validator = self._get_child_validator(
+            document_crumb=field, schema_crumb=(field, 'keyschema'),
+            schema=schema)
+        result = validator.normalized(document, always_return_document=True)
+        if validator._errors:
+            self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
+            self._error(validator._errors)
+        for k in result:
+            if k == result[k]:
+                continue
+            if result[k] in mapping[field]:
+                warn("Normalizing keys of {path}: {key} already exists, "
+                     "its value is replaced."
+                     .format(path='.'.join(self.document_path + (field,)),
+                             key=k))
+                mapping[field][result[k]] = mapping[field][k]
+            else:
+                mapping[field][result[k]] = mapping[field][k]
+                del mapping[field][k]
+
+    def __normalize_mapping_per_valueschema(self, field, mapping, value_rules):
+        schema = dict(((k, value_rules) for k in mapping[field]))
+        validator = self._get_child_validator(
+            document_crumb=field, schema_crumb=(field, 'valueschema'),
+            schema=schema)
+        mapping[field] = validator.normalized(mapping[field],
+                                              always_return_document=True)
+        if validator._errors:
+            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
+            self._error(validator._errors)
+
+    def __normalize_mapping_per_schema(self, field, mapping, schema):
+        validator = self._get_child_validator(
+            document_crumb=field, schema_crumb=(field, 'schema'),
+            schema=schema[field].get('schema', {}),
+            allow_unknown=schema[field].get('allow_unknown', self.allow_unknown),  # noqa: E501
+            purge_unknown=schema[field].get('purge_unknown', self.purge_unknown))  # noqa: E501
+        value_type = type(mapping[field])
+        result_value = validator.normalized(mapping[field],
+                                            always_return_document=True)
+        mapping[field] = value_type(result_value)
+        if validator._errors:
+            self._error(validator._errors)
+
+    def __normalize_sequence(self, field, mapping, schema):
+        schema = dict(((k, schema[field]['schema'])
+                       for k in range(len(mapping[field]))))
+        document = dict((k, v) for k, v in enumerate(mapping[field]))
+        validator = self._get_child_validator(
+            document_crumb=field, schema_crumb=(field, 'schema'),
+            schema=schema)
+        value_type = type(mapping[field])
+        result = validator.normalized(document, always_return_document=True)
+        mapping[field] = value_type(result.values())
+        if validator._errors:
+            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
+            self._error(validator._errors)
+
+    @staticmethod
+    def _normalize_purge_unknown(mapping, schema):
+        """ {'type': 'boolean'} """
+        for field in tuple(mapping):
+            if field not in schema:
+                del mapping[field]
+        return mapping
+
+    def __normalize_rename_fields(self, mapping, schema):
+        for field in tuple(mapping):
+            if field in schema:
+                self._normalize_rename(mapping, schema, field)
+                self._normalize_rename_handler(mapping, schema, field)
+            elif isinstance(self.allow_unknown, Mapping) and \
+                    'rename_handler' in self.allow_unknown:
+                self._normalize_rename_handler(
+                    mapping, {field: self.allow_unknown}, field)
+        return mapping
+
+    def _normalize_rename(self, mapping, schema, field):
+        """ {'type': 'hashable'} """
+        if 'rename' in schema[field]:
+            mapping[schema[field]['rename']] = mapping[field]
+            del mapping[field]
+
+    def _normalize_rename_handler(self, mapping, schema, field):
+        """ {'oneof': [
+                {'type': 'callable'},
+                {'type': 'list',
+                 'schema': {'oneof': [{'type': 'callable'},
+                                      {'type': 'string'}]}},
+                {'type': 'string'}
+                ]} """
+        if 'rename_handler' not in schema[field]:
+            return
+        new_name = self.__normalize_coerce(
+            schema[field]['rename_handler'], field, field,
+            False, errors.RENAMING_FAILED)
+        if new_name != field:
+            mapping[new_name] = mapping[field]
+            del mapping[field]
+
+    def __validate_readonly_fields(self, mapping, schema):
+        for field in (x for x in schema if x in mapping and
+                      self._resolve_rules_set(schema[x]).get('readonly')):
+            self._validate_readonly(schema[field]['readonly'], field,
+                                    mapping[field])
+
+    def __normalize_default_fields(self, mapping, schema):
+        fields = [x for x in schema if x not in mapping or
+                  mapping[x] is None and not schema[x].get('nullable', False)]
+        try:
+            fields_with_default = [x for x in fields if 'default' in schema[x]]
+        except TypeError:
+            raise _SchemaRuleTypeError
+        for field in fields_with_default:
+            self._normalize_default(mapping, schema, field)
+
+        known_fields_states = set()
+        fields = [x for x in fields if 'default_setter' in schema[x]]
+        while fields:
+            field = fields.pop(0)
+            try:
+                self._normalize_default_setter(mapping, schema, field)
+            except KeyError:
+                fields.append(field)
+            except Exception as e:
+                self._error(field, errors.SETTING_DEFAULT_FAILED, str(e))
+
+            fields_state = tuple(fields)
+            if fields_state in known_fields_states:
+                for field in fields:
+                    self._error(field, errors.SETTING_DEFAULT_FAILED,
+                                'Circular dependencies of default setters.')
+                break
+            else:
+                known_fields_states.add(fields_state)
+
+    def _normalize_default(self, mapping, schema, field):
+        """ {'nullable': True} """
+        mapping[field] = schema[field]['default']
+
+    def _normalize_default_setter(self, mapping, schema, field):
+        """ {'oneof': [
+                {'type': 'callable'},
+                {'type': 'string'}
+                ]} """
+        if 'default_setter' in schema[field]:
+            setter = schema[field]['default_setter']
+            if isinstance(setter, _str_type):
+                setter = self.__get_rule_handler('normalize_default_setter',
+                                                 setter)
+            mapping[field] = setter(mapping)
+
+    # # Validating
+
+    def validate(self, document, schema=None, update=False, normalize=True):
+        """ Normalizes and validates a mapping against a validation-schema of
+        defined rules.
+
+        :param document: The document to normalize.
+        :type document: any :term:`mapping`
+        :param schema: The validation schema. Defaults to :obj:`None`. If not
+                       provided here, the schema must have been provided at
+                       class instantiation.
+        :type schema: any :term:`mapping`
+        :param update: If ``True``, required fields won't be checked.
+        :type update: :class:`bool`
+        :param normalize: If ``True``, normalize the document before validation.
+        :type normalize: :class:`bool`
+
+        :return: ``True`` if validation succeeds, otherwise ``False``. Check
+                 the :func:`errors` property for a list of processing errors.
+        :rtype: :class:`bool`
+        """
+        self.update = update
+        self._unrequired_by_excludes = set()
+
+        self.__init_processing(document, schema)
+        if normalize:
+            self.__normalize_mapping(self.document, self.schema)
+
+        for field in self.document:
+            if self.ignore_none_values and self.document[field] is None:
+                continue
+            definitions = self.schema.get(field)
+            if definitions is not None:
+                self.__validate_definitions(definitions, field)
+            else:
+                self.__validate_unknown_fields(field)
+
+        if not self.update:
+            self.__validate_required_fields(self.document)
+
+        self.error_handler.end(self)
+
+        return not bool(self._errors)
+
+    __call__ = validate
+
+    def validated(self, *args, **kwargs):
+        """ Wrapper around :meth:`~cerberus.Validator.validate` that returns
+            the normalized and validated document or :obj:`None` if validation
+            failed. """
+        always_return_document = kwargs.pop('always_return_document', False)
+        self.validate(*args, **kwargs)
+        if self._errors and not always_return_document:
+            return None
+        else:
+            return self.document
+
+    def __validate_unknown_fields(self, field):
+        if self.allow_unknown:
+            value = self.document[field]
+            if isinstance(self.allow_unknown, (Mapping, _str_type)):
+                # validate that unknown fields matches the schema
+                # for unknown_fields
+                schema_crumb = 'allow_unknown' if self.is_child \
+                    else '__allow_unknown__'
+                validator = self._get_child_validator(
+                    schema_crumb=schema_crumb,
+                    schema={field: self.allow_unknown})
+                if not validator({field: value}, normalize=False):
+                    self._error(validator._errors)
+        else:
+            self._error(field, errors.UNKNOWN_FIELD)
+
+    def __validate_definitions(self, definitions, field):
+        """ Validate a field's value against its defined rules. """
+
+        def validate_rule(rule):
+            validator = self.__get_rule_handler('validate', rule)
+            return validator(definitions.get(rule, None), field, value)
+
+        definitions = self._resolve_rules_set(definitions)
+        value = self.document[field]
+
+        rules_queue = [x for x in self.priority_validations
+                       if x in definitions or x in self.mandatory_validations]
+        rules_queue.extend(x for x in self.mandatory_validations
+                           if x not in rules_queue)
+        rules_queue.extend(x for x in definitions
+                           if x not in rules_queue and
+                           x not in self.normalization_rules and
+                           x not in ('allow_unknown', 'required'))
+        self._remaining_rules = rules_queue
+
+        while self._remaining_rules:
+            rule = self._remaining_rules.pop(0)
+            try:
+                result = validate_rule(rule)
+                # TODO remove on next breaking release
+                if result:
+                    break
+            except _SchemaRuleTypeError:
+                break
+
+        self._drop_remaining_rules()
+
+    # Remember to keep the validation methods below this line
+    # sorted alphabetically
+
+    _validate_allow_unknown = dummy_for_rule_validation(
+        """ {'oneof': [{'type': 'boolean'},
+                       {'type': ['dict', 'string'],
+                        'validator': 'bulk_schema'}]} """)
+
+    def _validate_allowed(self, allowed_values, field, value):
+        """ {'type': 'list'} """
+        if isinstance(value, Iterable) and not isinstance(value, _str_type):
+            unallowed = set(value) - set(allowed_values)
+            if unallowed:
+                self._error(field, errors.UNALLOWED_VALUES, list(unallowed))
+        else:
+            if value not in allowed_values:
+                self._error(field, errors.UNALLOWED_VALUE, value)
+
+    def _validate_dependencies(self, dependencies, field, value):
+        """ {'type': ('dict', 'hashable', 'list'),
+             'validator': 'dependencies'} """
+        if isinstance(dependencies, _str_type):
+            dependencies = (dependencies,)
+
+        if isinstance(dependencies, Sequence):
+            self.__validate_dependencies_sequence(dependencies, field)
+        elif isinstance(dependencies, Mapping):
+            self.__validate_dependencies_mapping(dependencies, field)
+
+        if self.document_error_tree.fetch_node_from(
+                self.schema_path + (field, 'dependencies')) is not None:
+            return True
+
+    def __validate_dependencies_mapping(self, dependencies, field):
+        validated_dependencies_counter = 0
+        error_info = {}
+        for dependency_name, dependency_values in dependencies.items():
+            if (not isinstance(dependency_values, Sequence) or
+                    isinstance(dependency_values, _str_type)):
+                dependency_values = [dependency_values]
+
+            wanted_field, wanted_field_value = \
+                self._lookup_field(dependency_name)
+            if wanted_field_value in dependency_values:
+                validated_dependencies_counter += 1
+            else:
+                error_info.update({dependency_name: wanted_field_value})
+
+        if validated_dependencies_counter != len(dependencies):
+            self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info)
+
+    def __validate_dependencies_sequence(self, dependencies, field):
+        for dependency in dependencies:
+            if self._lookup_field(dependency)[0] is None:
+                self._error(field, errors.DEPENDENCIES_FIELD, dependency)
+
+    def _validate_empty(self, empty, field, value):
+        """ {'type': 'boolean'} """
+        if isinstance(value, Iterable) and len(value) == 0:
+            self._drop_remaining_rules(
+                'allowed', 'forbidden', 'items', 'minlength', 'maxlength',
+                'regex', 'validator')
+            if not empty:
+                self._error(field, errors.EMPTY_NOT_ALLOWED)
+
+    def _validate_excludes(self, excludes, field, value):
+        """ {'type': ('hashable', 'list'),
+             'schema': {'type': 'hashable'}} """
+        if isinstance(excludes, Hashable):
+            excludes = [excludes]
+
+        # Save required field to be checked latter
+        if 'required' in self.schema[field] and self.schema[field]['required']:
+            self._unrequired_by_excludes.add(field)
+        for exclude in excludes:
+            if (exclude in self.schema and
+                'required' in self.schema[exclude] and
+                    self.schema[exclude]['required']):
+
+                self._unrequired_by_excludes.add(exclude)
+
+        if [True for key in excludes if key in self.document]:
+            # Wrap each field in `excludes` list between quotes
+            exclusion_str = ', '.join("'{0}'"
+                                      .format(word) for word in excludes)
+            self._error(field, errors.EXCLUDES_FIELD, exclusion_str)
+
+    def _validate_forbidden(self, forbidden_values, field, value):
+        """ {'type': 'list'} """
+        if isinstance(value, _str_type):
+            if value in forbidden_values:
+                self._error(field, errors.FORBIDDEN_VALUE, value)
+        elif isinstance(value, Sequence):
+            forbidden = set(value) & set(forbidden_values)
+            if forbidden:
+                self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
+        elif isinstance(value, int):
+            if value in forbidden_values:
+                self._error(field, errors.FORBIDDEN_VALUE, value)
+
+    def _validate_items(self, items, field, values):
+        """ {'type': 'list', 'validator': 'items'} """
+        if len(items) != len(values):
+            self._error(field, errors.ITEMS_LENGTH, len(items), len(values))
+        else:
+            schema = dict((i, definition) for i, definition in enumerate(items))  # noqa: E501
+            validator = self._get_child_validator(document_crumb=field,
+                                                  schema_crumb=(field, 'items'),  # noqa: E501
+                                                  schema=schema)
+            if not validator(dict((i, value) for i, value in enumerate(values)),
+                             update=self.update, normalize=False):
+                self._error(field, errors.BAD_ITEMS, validator._errors)
+
+    def __validate_logical(self, operator, definitions, field, value):
+        """ Validates value against all definitions and logs errors according
+            to the operator. """
+        valid_counter = 0
+        _errors = errors.ErrorList()
+
+        for i, definition in enumerate(definitions):
+            schema = {field: definition.copy()}
+            for rule in ('allow_unknown', 'type'):
+                if rule not in schema[field] and rule in self.schema[field]:
+                    schema[field][rule] = self.schema[field][rule]
+            if 'allow_unknown' not in schema[field]:
+                schema[field]['allow_unknown'] = self.allow_unknown
+
+            validator = self._get_child_validator(
+                schema_crumb=(field, operator, i),
+                schema=schema, allow_unknown=True)
+            if validator(self.document, update=self.update, normalize=False):
+                valid_counter += 1
+            else:
+                self._drop_nodes_from_errorpaths(validator._errors, [], [3])
+                _errors.extend(validator._errors)
+
+        return valid_counter, _errors
+
+    def _validate_anyof(self, definitions, field, value):
+        """ {'type': 'list', 'logical': 'anyof'} """
+        valids, _errors = \
+            self.__validate_logical('anyof', definitions, field, value)
+        if valids < 1:
+            self._error(field, errors.ANYOF, _errors,
+                        valids, len(definitions))
+
+    def _validate_allof(self, definitions, field, value):
+        """ {'type': 'list', 'logical': 'allof'} """
+        valids, _errors = \
+            self.__validate_logical('allof', definitions, field, value)
+        if valids < len(definitions):
+            self._error(field, errors.ALLOF, _errors,
+                        valids, len(definitions))
+
+    def _validate_noneof(self, definitions, field, value):
+        """ {'type': 'list', 'logical': 'noneof'} """
+        valids, _errors = \
+            self.__validate_logical('noneof', definitions, field, value)
+        if valids > 0:
+            self._error(field, errors.NONEOF, _errors,
+                        valids, len(definitions))
+
+    def _validate_oneof(self, definitions, field, value):
+        """ {'type': 'list', 'logical': 'oneof'} """
+        valids, _errors = \
+            self.__validate_logical('oneof', definitions, field, value)
+        if valids != 1:
+            self._error(field, errors.ONEOF, _errors,
+                        valids, len(definitions))
+
+    def _validate_max(self, max_value, field, value):
+        """ {'nullable': False } """
+        try:
+            if value > max_value:
+                self._error(field, errors.MAX_VALUE)
+        except TypeError:
+            pass
+
+    def _validate_min(self, min_value, field, value):
+        """ {'nullable': False } """
+        try:
+            if value < min_value:
+                self._error(field, errors.MIN_VALUE)
+        except TypeError:
+            pass
+
+    def _validate_maxlength(self, max_length, field, value):
+        """ {'type': 'integer'} """
+        if isinstance(value, Iterable) and len(value) > max_length:
+            self._error(field, errors.MAX_LENGTH, len(value))
+
+    def _validate_minlength(self, min_length, field, value):
+        """ {'type': 'integer'} """
+        if isinstance(value, Iterable) and len(value) < min_length:
+            self._error(field, errors.MIN_LENGTH, len(value))
+
+    def _validate_nullable(self, nullable, field, value):
+        """ {'type': 'boolean'} """
+        if value is None:
+            if not nullable:
+                self._error(field, errors.NOT_NULLABLE)
+            self._drop_remaining_rules(
+                'empty', 'forbidden', 'items', 'keyschema', 'min', 'max',
+                'minlength', 'maxlength', 'regex', 'schema', 'type',
+                'valueschema')
+
+    def _validate_keyschema(self, schema, field, value):
+        """ {'type': ['dict', 'string'], 'validator': 'bulk_schema',
+            'forbidden': ['rename', 'rename_handler']} """
+        if isinstance(value, Mapping):
+            validator = self._get_child_validator(
+                document_crumb=field,
+                schema_crumb=(field, 'keyschema'),
+                schema=dict(((k, schema) for k in value.keys())))
+            if not validator(dict(((k, k) for k in value.keys())),
+                             normalize=False):
+                self._drop_nodes_from_errorpaths(validator._errors,
+                                                 [], [2, 4])
+                self._error(field, errors.KEYSCHEMA, validator._errors)
+
+    def _validate_readonly(self, readonly, field, value):
+        """ {'type': 'boolean'} """
+        if readonly:
+            if not self._is_normalized:
+                self._error(field, errors.READONLY_FIELD)
+            # If the document was normalized (and therefore already been
+            # checked for readonly fields), we still have to return True
+            # if an error was filed.
+            has_error = errors.READONLY_FIELD in \
+                self.document_error_tree.fetch_errors_from(
+                    self.document_path + (field,))
+            if self._is_normalized and has_error:
+                self._drop_remaining_rules()
+
+    def _validate_regex(self, pattern, field, value):
+        """ {'type': 'string'} """
+        if not isinstance(value, _str_type):
+            return
+        if not pattern.endswith('$'):
+            pattern += '$'
+        re_obj = re.compile(pattern)
+        if not re_obj.match(value):
+            self._error(field, errors.REGEX_MISMATCH)
+
+    _validate_required = dummy_for_rule_validation(""" {'type': 'boolean'} """)
+
+    def __validate_required_fields(self, document):
+        """ Validates that required fields are not missing.
+
+        :param document: The document being validated.
+        """
+        try:
+            required = set(field for field, definition in self.schema.items()
+                           if self._resolve_rules_set(definition).
+                           get('required') is True)
+        except AttributeError:
+            if self.is_child and self.schema_path[-1] == 'schema':
+                raise _SchemaRuleTypeError
+            else:
+                raise
+        required -= self._unrequired_by_excludes
+        missing = required - set(field for field in document
+                                 if document.get(field) is not None or
+                                 not self.ignore_none_values)
+
+        for field in missing:
+            self._error(field, errors.REQUIRED_FIELD)
+
+        # At least on field from self._unrequired_by_excludes should be
+        # present in document
+        if self._unrequired_by_excludes:
+            fields = set(field for field in document
+                         if document.get(field) is not None)
+            if self._unrequired_by_excludes.isdisjoint(fields):
+                for field in self._unrequired_by_excludes - fields:
+                    self._error(field, errors.REQUIRED_FIELD)
+
+    def _validate_schema(self, schema, field, value):
+        """ {'type': ['dict', 'string'],
+             'anyof': [{'validator': 'schema'},
+                       {'validator': 'bulk_schema'}]} """
+        if schema is None:
+            return
+
+        if isinstance(value, Sequence) and not isinstance(value, _str_type):
+            self.__validate_schema_sequence(field, schema, value)
+        elif isinstance(value, Mapping):
+            self.__validate_schema_mapping(field, schema, value)
+
+    def __validate_schema_mapping(self, field, schema, value):
+        schema = self._resolve_schema(schema)
+        allow_unknown = self.schema[field].get('allow_unknown',
+                                               self.allow_unknown)
+        validator = self._get_child_validator(document_crumb=field,
+                                              schema_crumb=(field, 'schema'),
+                                              schema=schema,
+                                              allow_unknown=allow_unknown)
+        try:
+            if not validator(value, update=self.update, normalize=False):
+                self._error(field, errors.MAPPING_SCHEMA, validator._errors)
+        except _SchemaRuleTypeError:
+            self._error(field, errors.BAD_TYPE_FOR_SCHEMA)
+            raise
+
+    def __validate_schema_sequence(self, field, schema, value):
+        schema = dict(((i, schema) for i in range(len(value))))
+        validator = self._get_child_validator(
+            document_crumb=field, schema_crumb=(field, 'schema'),
+            schema=schema, allow_unknown=self.allow_unknown)
+        validator(dict(((i, v) for i, v in enumerate(value))),
+                  update=self.update, normalize=False)
+
+        if validator._errors:
+            self._drop_nodes_from_errorpaths(validator._errors, [], [2])
+            self._error(field, errors.SEQUENCE_SCHEMA, validator._errors)
+
+    def _validate_type(self, data_type, field, value):
+        """ {'type': ['string', 'list'],
+             'validator': 'type'} """
+        if not data_type:
+            return
+
+        types = (data_type,) if isinstance(data_type, _str_type) else data_type
+
+        for _type in types:
+            # TODO remove this block on next major release
+            # this implementation still supports custom type validation methods
+            type_definition = self.types_mapping.get(_type)
+            if type_definition is not None:
+                matched = isinstance(value, type_definition.included_types) \
+                    and not isinstance(value, type_definition.excluded_types)
+            else:
+                type_handler = self.__get_rule_handler('validate_type', _type)
+                matched = type_handler(value)
+            if matched:
+                return
+
+            # TODO uncomment this block on next major release
+            #      when _validate_type_* methods were deprecated:
+            # type_definition = self.types_mapping[_type]
+            # if isinstance(value, type_definition.included_types) \
+            #         and not isinstance(value, type_definition.excluded_types):  # noqa 501
+            #     return
+
+        self._error(field, errors.BAD_TYPE)
+        self._drop_remaining_rules()
+
+    def _validate_validator(self, validator, field, value):
+        """ {'oneof': [
+                {'type': 'callable'},
+                {'type': 'list',
+                 'schema': {'oneof': [{'type': 'callable'},
+                                      {'type': 'string'}]}},
+                {'type': 'string'}
+                ]} """
+        if isinstance(validator, _str_type):
+            validator = self.__get_rule_handler('validator', validator)
+            validator(field, value)
+        elif isinstance(validator, Iterable):
+            for v in validator:
+                self._validate_validator(v, field, value)
+        else:
+            validator(field, value, self._error)
+
+    def _validate_valueschema(self, schema, field, value):
+        """ {'type': ['dict', 'string'], 'validator': 'bulk_schema',
+            'forbidden': ['rename', 'rename_handler']} """
+        schema_crumb = (field, 'valueschema')
+        if isinstance(value, Mapping):
+            validator = self._get_child_validator(
+                document_crumb=field, schema_crumb=schema_crumb,
+                schema=dict((k, schema) for k in value))
+            validator(value, update=self.update, normalize=False)
+            if validator._errors:
+                self._drop_nodes_from_errorpaths(validator._errors, [], [2])
+                self._error(field, errors.VALUESCHEMA, validator._errors)
+
+
+RULE_SCHEMA_SEPARATOR = \
+    "The rule's arguments are validated against this schema:"
+
+
+class InspectedValidator(type):
+    """ Metaclass for all validators """
+    def __new__(cls, *args):
+        if '__doc__' not in args[2]:
+            args[2].update({'__doc__': args[1][0].__doc__})
+        return super(InspectedValidator, cls).__new__(cls, *args)
+
+    def __init__(cls, *args):
+        def attributes_with_prefix(prefix):
+            return tuple(x.split('_', 2)[-1] for x in dir(cls)
+                         if x.startswith('_' + prefix))
+
+        super(InspectedValidator, cls).__init__(*args)
+
+        cls._types_from_methods, cls.validation_rules = (), {}
+        for attribute in attributes_with_prefix('validate'):
+            # TODO remove inspection of type test methods in next major release
+            if attribute.startswith('type_'):
+                cls._types_from_methods += (attribute[len('type_'):],)
+            else:
+                cls.validation_rules[attribute] = \
+                    cls.__get_rule_schema('_validate_' + attribute)
+
+        # TODO remove on next major release
+        if cls._types_from_methods:
+            warn("Methods for type testing are deprecated, use TypeDefinition "
+                 "and the 'types_mapping'-property of a Validator-instance "
+                 "instead.", DeprecationWarning)
+
+        cls.validators = tuple(x for x in attributes_with_prefix('validator'))
+        x = cls.validation_rules['validator']['oneof']
+        x[1]['schema']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.validators
+
+        for rule in (x for x in cls.mandatory_validations if x != 'nullable'):
+            cls.validation_rules[rule]['required'] = True
+
+        cls.coercers, cls.default_setters, cls.normalization_rules = (), (), {}
+        for attribute in attributes_with_prefix('normalize'):
+            if attribute.startswith('coerce_'):
+                cls.coercers += (attribute[len('coerce_'):],)
+            elif attribute.startswith('default_setter_'):
+                cls.default_setters += (attribute[len('default_setter_'):],)
+            else:
+                cls.normalization_rules[attribute] = \
+                    cls.__get_rule_schema('_normalize_' + attribute)
+
+        for rule in ('coerce', 'rename_handler'):
+            x = cls.normalization_rules[rule]['oneof']
+            x[1]['schema']['oneof'][1]['allowed'] = \
+                x[2]['allowed'] = cls.coercers
+        cls.normalization_rules['default_setter']['oneof'][1]['allowed'] = \
+            cls.default_setters
+
+        cls.rules = {}
+        cls.rules.update(cls.validation_rules)
+        cls.rules.update(cls.normalization_rules)
+
+    def __get_rule_schema(cls, method_name):
+        docstring = getattr(cls, method_name).__doc__
+        if docstring is None:
+            result = {}
+        else:
+            if RULE_SCHEMA_SEPARATOR in docstring:
+                docstring = docstring.split(RULE_SCHEMA_SEPARATOR)[1]
+            try:
+                result = literal_eval(docstring.strip())
+            except Exception:
+                result = {}
+
+        if not result:
+            warn("No validation schema is defined for the arguments of rule "
+                 "'%s'" % method_name.split('_', 2)[-1])
+
+        return result
+
+
+Validator = InspectedValidator('Validator', (BareValidator,), {})

+ 17 - 17
ext/importlib_metadata/__init__.py → mncheck/ext/importlib_metadata/__init__.py

@@ -1,17 +1,17 @@
-from .api import distribution, Distribution, PackageNotFoundError  # noqa: F401
-from .api import metadata, entry_points, version, files, requires
-
-# Import for installation side-effects.
-from . import _hooks  # noqa: F401
-
-
-__all__ = [
-    'entry_points',
-    'files',
-    'metadata',
-    'requires',
-    'version',
-    ]
-
-
-__version__ = version(__name__)
+from .api import distribution, Distribution, PackageNotFoundError  # noqa: F401
+from .api import metadata, entry_points, version, files, requires
+
+# Import for installation side-effects.
+from . import _hooks  # noqa: F401
+
+
+__all__ = [
+    'entry_points',
+    'files',
+    'metadata',
+    'requires',
+    'version',
+    ]
+
+
+__version__ = version(__name__)

+ 154 - 154
ext/importlib_metadata/_hooks.py → mncheck/ext/importlib_metadata/_hooks.py

@@ -1,154 +1,154 @@
-from __future__ import unicode_literals, absolute_import
-
-import re
-import sys
-import zipp
-import itertools
-
-from .api import Distribution
-
-if sys.version_info >= (3,):  # pragma: nocover
-    from contextlib import suppress
-    from pathlib import Path
-else:  # pragma: nocover
-    from contextlib2 import suppress  # noqa
-    from itertools import imap as map  # type: ignore
-    from pathlib2 import Path
-
-    FileNotFoundError = IOError, OSError
-    __metaclass__ = type
-
-
-def install(cls):
-    """Class decorator for installation on sys.meta_path."""
-    sys.meta_path.append(cls)
-    return cls
-
-
-class NullFinder:
-    @staticmethod
-    def find_spec(*args, **kwargs):
-        return None
-
-    # In Python 2, the import system requires finders
-    # to have a find_module() method, but this usage
-    # is deprecated in Python 3 in favor of find_spec().
-    # For the purposes of this finder (i.e. being present
-    # on sys.meta_path but having no other import
-    # system functionality), the two methods are identical.
-    find_module = find_spec
-
-
-@install
-class MetadataPathFinder(NullFinder):
-    """A degenerate finder for distribution packages on the file system.
-
-    This finder supplies only a find_distributions() method for versions
-    of Python that do not have a PathFinder find_distributions().
-    """
-    search_template = r'{pattern}(-.*)?\.(dist|egg)-info'
-
-    @classmethod
-    def find_distributions(cls, name=None, path=None):
-        """Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching the name
-        (or all names if not supplied) along the paths in the list
-        of directories ``path`` (defaults to sys.path).
-        """
-        if path is None:
-            path = sys.path
-        pattern = '.*' if name is None else re.escape(name)
-        found = cls._search_paths(pattern, path)
-        return map(PathDistribution, found)
-
-    @classmethod
-    def _search_paths(cls, pattern, paths):
-        """
-        Find metadata directories in paths heuristically.
-        """
-        return itertools.chain.from_iterable(
-            cls._search_path(path, pattern)
-            for path in map(Path, paths)
-            )
-
-    @classmethod
-    def _search_path(cls, root, pattern):
-        if not root.is_dir():
-            return ()
-        normalized = pattern.replace('-', '_')
-        return (
-            item
-            for item in root.iterdir()
-            if item.is_dir()
-            and re.match(
-                cls.search_template.format(pattern=normalized),
-                str(item.name),
-                flags=re.IGNORECASE,
-                )
-            )
-
-
-class PathDistribution(Distribution):
-    def __init__(self, path):
-        """Construct a distribution from a path to the metadata directory."""
-        self._path = path
-
-    def read_text(self, filename):
-        with suppress(FileNotFoundError):
-            with self._path.joinpath(filename).open(encoding='utf-8') as fp:
-                return fp.read()
-        return None
-    read_text.__doc__ = Distribution.read_text.__doc__
-
-    def locate_file(self, path):
-        return self._path.parent / path
-
-
-@install
-class WheelMetadataFinder(NullFinder):
-    """A degenerate finder for distribution packages in wheels.
-
-    This finder supplies only a find_distributions() method for versions
-    of Python that do not have a PathFinder find_distributions().
-    """
-    search_template = r'{pattern}(-.*)?\.whl'
-
-    @classmethod
-    def find_distributions(cls, name=None, path=None):
-        """Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching the name
-        (or all names if not supplied) along the paths in the list
-        of directories ``path`` (defaults to sys.path).
-        """
-        if path is None:
-            path = sys.path
-        pattern = '.*' if name is None else re.escape(name)
-        found = cls._search_paths(pattern, path)
-        return map(WheelDistribution, found)
-
-    @classmethod
-    def _search_paths(cls, pattern, paths):
-        return (
-            path
-            for path in map(Path, paths)
-            if re.match(
-                cls.search_template.format(pattern=pattern),
-                str(path.name),
-                flags=re.IGNORECASE,
-                )
-            )
-
-
-class WheelDistribution(Distribution):
-    def __init__(self, archive):
-        self._archive = zipp.Path(archive)
-        name, version = archive.name.split('-')[0:2]
-        self._dist_info = '{}-{}.dist-info'.format(name, version)
-
-    def read_text(self, filename):
-        target = self._archive / self._dist_info / filename
-        return target.read_text() if target.exists() else None
-    read_text.__doc__ = Distribution.read_text.__doc__
-
-    def locate_file(self, path):
-        return self._archive / path
+from __future__ import unicode_literals, absolute_import
+
+import re
+import sys
+import zipp
+import itertools
+
+from .api import Distribution
+
+if sys.version_info >= (3,):  # pragma: nocover
+    from contextlib import suppress
+    from pathlib import Path
+else:  # pragma: nocover
+    from contextlib2 import suppress  # noqa
+    from itertools import imap as map  # type: ignore
+    from pathlib2 import Path
+
+    FileNotFoundError = IOError, OSError
+    __metaclass__ = type
+
+
+def install(cls):
+    """Class decorator for installation on sys.meta_path."""
+    sys.meta_path.append(cls)
+    return cls
+
+
+class NullFinder:
+    @staticmethod
+    def find_spec(*args, **kwargs):
+        return None
+
+    # In Python 2, the import system requires finders
+    # to have a find_module() method, but this usage
+    # is deprecated in Python 3 in favor of find_spec().
+    # For the purposes of this finder (i.e. being present
+    # on sys.meta_path but having no other import
+    # system functionality), the two methods are identical.
+    find_module = find_spec
+
+
+@install
+class MetadataPathFinder(NullFinder):
+    """A degenerate finder for distribution packages on the file system.
+
+    This finder supplies only a find_distributions() method for versions
+    of Python that do not have a PathFinder find_distributions().
+    """
+    search_template = r'{pattern}(-.*)?\.(dist|egg)-info'
+
+    @classmethod
+    def find_distributions(cls, name=None, path=None):
+        """Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching the name
+        (or all names if not supplied) along the paths in the list
+        of directories ``path`` (defaults to sys.path).
+        """
+        if path is None:
+            path = sys.path
+        pattern = '.*' if name is None else re.escape(name)
+        found = cls._search_paths(pattern, path)
+        return map(PathDistribution, found)
+
+    @classmethod
+    def _search_paths(cls, pattern, paths):
+        """
+        Find metadata directories in paths heuristically.
+        """
+        return itertools.chain.from_iterable(
+            cls._search_path(path, pattern)
+            for path in map(Path, paths)
+            )
+
+    @classmethod
+    def _search_path(cls, root, pattern):
+        if not root.is_dir():
+            return ()
+        normalized = pattern.replace('-', '_')
+        return (
+            item
+            for item in root.iterdir()
+            if item.is_dir()
+            and re.match(
+                cls.search_template.format(pattern=normalized),
+                str(item.name),
+                flags=re.IGNORECASE,
+                )
+            )
+
+
+class PathDistribution(Distribution):
+    def __init__(self, path):
+        """Construct a distribution from a path to the metadata directory."""
+        self._path = path
+
+    def read_text(self, filename):
+        with suppress(FileNotFoundError):
+            with self._path.joinpath(filename).open(encoding='utf-8') as fp:
+                return fp.read()
+        return None
+    read_text.__doc__ = Distribution.read_text.__doc__
+
+    def locate_file(self, path):
+        return self._path.parent / path
+
+
+@install
+class WheelMetadataFinder(NullFinder):
+    """A degenerate finder for distribution packages in wheels.
+
+    This finder supplies only a find_distributions() method for versions
+    of Python that do not have a PathFinder find_distributions().
+    """
+    search_template = r'{pattern}(-.*)?\.whl'
+
+    @classmethod
+    def find_distributions(cls, name=None, path=None):
+        """Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching the name
+        (or all names if not supplied) along the paths in the list
+        of directories ``path`` (defaults to sys.path).
+        """
+        if path is None:
+            path = sys.path
+        pattern = '.*' if name is None else re.escape(name)
+        found = cls._search_paths(pattern, path)
+        return map(WheelDistribution, found)
+
+    @classmethod
+    def _search_paths(cls, pattern, paths):
+        return (
+            path
+            for path in map(Path, paths)
+            if re.match(
+                cls.search_template.format(pattern=pattern),
+                str(path.name),
+                flags=re.IGNORECASE,
+                )
+            )
+
+
+class WheelDistribution(Distribution):
+    def __init__(self, archive):
+        self._archive = zipp.Path(archive)
+        name, version = archive.name.split('-')[0:2]
+        self._dist_info = '{}-{}.dist-info'.format(name, version)
+
+    def read_text(self, filename):
+        target = self._archive / self._dist_info / filename
+        return target.read_text() if target.exists() else None
+    read_text.__doc__ = Distribution.read_text.__doc__
+
+    def locate_file(self, path):
+        return self._archive / path

+ 375 - 375
ext/importlib_metadata/api.py → mncheck/ext/importlib_metadata/api.py

@@ -1,375 +1,375 @@
-import io
-import re
-import abc
-import csv
-import sys
-import email
-import operator
-import functools
-import itertools
-import collections
-
-from importlib import import_module
-from itertools import starmap
-
-if sys.version_info > (3,):  # pragma: nocover
-    import pathlib
-    from configparser import ConfigParser
-else:  # pragma: nocover
-    import pathlib2 as pathlib
-    from backports.configparser import ConfigParser
-    from itertools import imap as map  # type: ignore
-
-try:
-    BaseClass = ModuleNotFoundError
-except NameError:                                 # pragma: nocover
-    BaseClass = ImportError                       # type: ignore
-
-
-__metaclass__ = type
-
-
-class PackageNotFoundError(BaseClass):
-    """The package was not found."""
-
-
-class EntryPoint(collections.namedtuple('EntryPointBase', 'name value group')):
-    """An entry point as defined by Python packaging conventions."""
-
-    pattern = re.compile(
-        r'(?P<module>[\w.]+)\s*'
-        r'(:\s*(?P<attr>[\w.]+))?\s*'
-        r'(?P<extras>\[.*\])?\s*$'
-        )
-    """
-    A regular expression describing the syntax for an entry point,
-    which might look like:
-
-        - module
-        - package.module
-        - package.module:attribute
-        - package.module:object.attribute
-        - package.module:attr [extra1, extra2]
-
-    Other combinations are possible as well.
-
-    The expression is lenient about whitespace around the ':',
-    following the attr, and following any extras.
-    """
-
-    def load(self):
-        """Load the entry point from its definition. If only a module
-        is indicated by the value, return that module. Otherwise,
-        return the named object.
-        """
-        match = self.pattern.match(self.value)
-        module = import_module(match.group('module'))
-        attrs = filter(None, match.group('attr').split('.'))
-        return functools.reduce(getattr, attrs, module)
-
-    @property
-    def extras(self):
-        match = self.pattern.match(self.value)
-        return list(re.finditer(r'\w+', match.group('extras') or ''))
-
-    @classmethod
-    def _from_config(cls, config):
-        return [
-            cls(name, value, group)
-            for group in config.sections()
-            for name, value in config.items(group)
-            ]
-
-    @classmethod
-    def _from_text(cls, text):
-        config = ConfigParser()
-        try:
-            config.read_string(text)
-        except AttributeError:  # pragma: nocover
-            # Python 2 has no read_string
-            config.readfp(io.StringIO(text))
-        return EntryPoint._from_config(config)
-
-    def __iter__(self):
-        """
-        Supply iter so one may construct dicts of EntryPoints easily.
-        """
-        return iter((self.name, self))
-
-
-class PackagePath(pathlib.PosixPath):
-    """A reference to a path in a package"""
-
-    def read_text(self, encoding='utf-8'):
-        with self.locate().open(encoding=encoding) as stream:
-            return stream.read()
-
-    def read_binary(self):
-        with self.locate().open('rb') as stream:
-            return stream.read()
-
-    def locate(self):
-        """Return a path-like object for this path"""
-        return self.dist.locate_file(self)
-
-
-class FileHash:
-    def __init__(self, spec):
-        self.mode, _, self.value = spec.partition('=')
-
-    def __repr__(self):
-        return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
-
-
-class Distribution:
-    """A Python distribution package."""
-
-    @abc.abstractmethod
-    def read_text(self, filename):
-        """Attempt to load metadata file given by the name.
-
-        :param filename: The name of the file in the distribution info.
-        :return: The text if found, otherwise None.
-        """
-
-    @abc.abstractmethod
-    def locate_file(self, path):
-        """
-        Given a path to a file in this distribution, return a path
-        to it.
-        """
-
-    @classmethod
-    def from_name(cls, name):
-        """Return the Distribution for the given package name.
-
-        :param name: The name of the distribution package to search for.
-        :return: The Distribution instance (or subclass thereof) for the named
-            package, if found.
-        :raises PackageNotFoundError: When the named package's distribution
-            metadata cannot be found.
-        """
-        for resolver in cls._discover_resolvers():
-            dists = resolver(name)
-            dist = next(dists, None)
-            if dist is not None:
-                return dist
-        else:
-            raise PackageNotFoundError(name)
-
-    @classmethod
-    def discover(cls):
-        """Return an iterable of Distribution objects for all packages.
-
-        :return: Iterable of Distribution objects for all packages.
-        """
-        return itertools.chain.from_iterable(
-            resolver()
-            for resolver in cls._discover_resolvers()
-            )
-
-    @staticmethod
-    def _discover_resolvers():
-        """Search the meta_path for resolvers."""
-        declared = (
-            getattr(finder, 'find_distributions', None)
-            for finder in sys.meta_path
-            )
-        return filter(None, declared)
-
-    @classmethod
-    def find_local(cls):
-        dists = itertools.chain.from_iterable(
-            resolver(path=['.'])
-            for resolver in cls._discover_resolvers()
-            )
-        dist, = dists
-        return dist
-
-    @property
-    def metadata(self):
-        """Return the parsed metadata for this Distribution.
-
-        The returned object will have keys that name the various bits of
-        metadata.  See PEP 566 for details.
-        """
-        text = self.read_text('METADATA') or self.read_text('PKG-INFO')
-        return _email_message_from_string(text)
-
-    @property
-    def version(self):
-        """Return the 'Version' metadata for the distribution package."""
-        return self.metadata['Version']
-
-    @property
-    def entry_points(self):
-        return EntryPoint._from_text(self.read_text('entry_points.txt'))
-
-    @property
-    def files(self):
-        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
-
-        def make_file(name, hash=None, size_str=None):
-            result = PackagePath(name)
-            result.hash = FileHash(hash) if hash else None
-            result.size = int(size_str) if size_str else None
-            result.dist = self
-            return result
-
-        return file_lines and starmap(make_file, csv.reader(file_lines))
-
-    def _read_files_distinfo(self):
-        """
-        Read the lines of RECORD
-        """
-        text = self.read_text('RECORD')
-        return text and text.splitlines()
-
-    def _read_files_egginfo(self):
-        """
-        SOURCES.txt might contain literal commas, so wrap each line
-        in quotes.
-        """
-        text = self.read_text('SOURCES.txt')
-        return text and map('"{}"'.format, text.splitlines())
-
-    @property
-    def requires(self):
-        return self._read_dist_info_reqs() or self._read_egg_info_reqs()
-
-    def _read_dist_info_reqs(self):
-        spec = self.metadata['Requires-Dist']
-        return spec and filter(None, spec.splitlines())
-
-    def _read_egg_info_reqs(self):
-        source = self.read_text('requires.txt')
-        return self._deps_from_requires_text(source)
-
-    @classmethod
-    def _deps_from_requires_text(cls, source):
-        section_pairs = cls._read_sections(source.splitlines())
-        sections = {
-            section: list(map(operator.itemgetter('line'), results))
-            for section, results in
-            itertools.groupby(section_pairs, operator.itemgetter('section'))
-            }
-        return cls._convert_egg_info_reqs_to_simple_reqs(sections)
-
-    @staticmethod
-    def _read_sections(lines):
-        section = None
-        for line in filter(None, lines):
-            section_match = re.match(r'\[(.*)\]$', line)
-            if section_match:
-                section = section_match.group(1)
-                continue
-            yield locals()
-
-    @staticmethod
-    def _convert_egg_info_reqs_to_simple_reqs(sections):
-        """
-        Historically, setuptools would solicit and store 'extra'
-        requirements, including those with environment markers,
-        in separate sections. More modern tools expect each
-        dependency to be defined separately, with any relevant
-        extras and environment markers attached directly to that
-        requirement. This method converts the former to the
-        latter. See _test_deps_from_requires_text for an example.
-        """
-        def make_condition(name):
-            return name and 'extra == "{name}"'.format(name=name)
-
-        def parse_condition(section):
-            section = section or ''
-            extra, sep, markers = section.partition(':')
-            if extra and markers:
-                markers = '({markers})'.format(markers=markers)
-            conditions = list(filter(None, [markers, make_condition(extra)]))
-            return '; ' + ' and '.join(conditions) if conditions else ''
-
-        for section, deps in sections.items():
-            for dep in deps:
-                yield dep + parse_condition(section)
-
-
-def _email_message_from_string(text):
-    # Work around https://bugs.python.org/issue25545 where
-    # email.message_from_string cannot handle Unicode on Python 2.
-    if sys.version_info < (3,):                     # nocoverpy3
-        io_buffer = io.StringIO(text)
-        return email.message_from_file(io_buffer)
-    return email.message_from_string(text)          # nocoverpy2
-
-
-def distribution(package):
-    """Get the ``Distribution`` instance for the given package.
-
-    :param package: The name of the package as a string.
-    :return: A ``Distribution`` instance (or subclass thereof).
-    """
-    return Distribution.from_name(package)
-
-
-def distributions():
-    """Get all ``Distribution`` instances in the current environment.
-
-    :return: An iterable of ``Distribution`` instances.
-    """
-    return Distribution.discover()
-
-
-def local_distribution():
-    """Get the ``Distribution`` instance for the package in CWD.
-
-    :return: A ``Distribution`` instance (or subclass thereof).
-    """
-    return Distribution.find_local()
-
-
-def metadata(package):
-    """Get the metadata for the package.
-
-    :param package: The name of the distribution package to query.
-    :return: An email.Message containing the parsed metadata.
-    """
-    return Distribution.from_name(package).metadata
-
-
-def version(package):
-    """Get the version string for the named package.
-
-    :param package: The name of the distribution package to query.
-    :return: The version string for the package as defined in the package's
-        "Version" metadata key.
-    """
-    return distribution(package).version
-
-
-def entry_points(name=None):
-    """Return EntryPoint objects for all installed packages.
-
-    :return: EntryPoint objects for all installed packages.
-    """
-    eps = itertools.chain.from_iterable(
-        dist.entry_points for dist in distributions())
-    by_group = operator.attrgetter('group')
-    ordered = sorted(eps, key=by_group)
-    grouped = itertools.groupby(ordered, by_group)
-    return {
-        group: tuple(eps)
-        for group, eps in grouped
-        }
-
-
-def files(package):
-    return distribution(package).files
-
-
-def requires(package):
-    """
-    Return a list of requirements for the indicated distribution.
-
-    :return: An iterator of requirements, suitable for
-    packaging.requirement.Requirement.
-    """
-    return distribution(package).requires
+import io
+import re
+import abc
+import csv
+import sys
+import email
+import operator
+import functools
+import itertools
+import collections
+
+from importlib import import_module
+from itertools import starmap
+
+if sys.version_info > (3,):  # pragma: nocover
+    import pathlib
+    from configparser import ConfigParser
+else:  # pragma: nocover
+    import pathlib2 as pathlib
+    from backports.configparser import ConfigParser
+    from itertools import imap as map  # type: ignore
+
+try:
+    BaseClass = ModuleNotFoundError
+except NameError:                                 # pragma: nocover
+    BaseClass = ImportError                       # type: ignore
+
+
+__metaclass__ = type
+
+
+class PackageNotFoundError(BaseClass):
+    """The package was not found."""
+
+
+class EntryPoint(collections.namedtuple('EntryPointBase', 'name value group')):
+    """An entry point as defined by Python packaging conventions."""
+
+    pattern = re.compile(
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+))?\s*'
+        r'(?P<extras>\[.*\])?\s*$'
+        )
+    """
+    A regular expression describing the syntax for an entry point,
+    which might look like:
+
+        - module
+        - package.module
+        - package.module:attribute
+        - package.module:object.attribute
+        - package.module:attr [extra1, extra2]
+
+    Other combinations are possible as well.
+
+    The expression is lenient about whitespace around the ':',
+    following the attr, and following any extras.
+    """
+
+    def load(self):
+        """Load the entry point from its definition. If only a module
+        is indicated by the value, return that module. Otherwise,
+        return the named object.
+        """
+        match = self.pattern.match(self.value)
+        module = import_module(match.group('module'))
+        attrs = filter(None, match.group('attr').split('.'))
+        return functools.reduce(getattr, attrs, module)
+
+    @property
+    def extras(self):
+        match = self.pattern.match(self.value)
+        return list(re.finditer(r'\w+', match.group('extras') or ''))
+
+    @classmethod
+    def _from_config(cls, config):
+        return [
+            cls(name, value, group)
+            for group in config.sections()
+            for name, value in config.items(group)
+            ]
+
+    @classmethod
+    def _from_text(cls, text):
+        config = ConfigParser()
+        try:
+            config.read_string(text)
+        except AttributeError:  # pragma: nocover
+            # Python 2 has no read_string
+            config.readfp(io.StringIO(text))
+        return EntryPoint._from_config(config)
+
+    def __iter__(self):
+        """
+        Supply iter so one may construct dicts of EntryPoints easily.
+        """
+        return iter((self.name, self))
+
+
+class PackagePath(pathlib.PosixPath):
+    """A reference to a path in a package"""
+
+    def read_text(self, encoding='utf-8'):
+        with self.locate().open(encoding=encoding) as stream:
+            return stream.read()
+
+    def read_binary(self):
+        with self.locate().open('rb') as stream:
+            return stream.read()
+
+    def locate(self):
+        """Return a path-like object for this path"""
+        return self.dist.locate_file(self)
+
+
+class FileHash:
+    def __init__(self, spec):
+        self.mode, _, self.value = spec.partition('=')
+
+    def __repr__(self):
+        return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
+
+
+class Distribution:
+    """A Python distribution package."""
+
+    @abc.abstractmethod
+    def read_text(self, filename):
+        """Attempt to load metadata file given by the name.
+
+        :param filename: The name of the file in the distribution info.
+        :return: The text if found, otherwise None.
+        """
+
+    @abc.abstractmethod
+    def locate_file(self, path):
+        """
+        Given a path to a file in this distribution, return a path
+        to it.
+        """
+
+    @classmethod
+    def from_name(cls, name):
+        """Return the Distribution for the given package name.
+
+        :param name: The name of the distribution package to search for.
+        :return: The Distribution instance (or subclass thereof) for the named
+            package, if found.
+        :raises PackageNotFoundError: When the named package's distribution
+            metadata cannot be found.
+        """
+        for resolver in cls._discover_resolvers():
+            dists = resolver(name)
+            dist = next(dists, None)
+            if dist is not None:
+                return dist
+        else:
+            raise PackageNotFoundError(name)
+
+    @classmethod
+    def discover(cls):
+        """Return an iterable of Distribution objects for all packages.
+
+        :return: Iterable of Distribution objects for all packages.
+        """
+        return itertools.chain.from_iterable(
+            resolver()
+            for resolver in cls._discover_resolvers()
+            )
+
+    @staticmethod
+    def _discover_resolvers():
+        """Search the meta_path for resolvers."""
+        declared = (
+            getattr(finder, 'find_distributions', None)
+            for finder in sys.meta_path
+            )
+        return filter(None, declared)
+
+    @classmethod
+    def find_local(cls):
+        dists = itertools.chain.from_iterable(
+            resolver(path=['.'])
+            for resolver in cls._discover_resolvers()
+            )
+        dist, = dists
+        return dist
+
+    @property
+    def metadata(self):
+        """Return the parsed metadata for this Distribution.
+
+        The returned object will have keys that name the various bits of
+        metadata.  See PEP 566 for details.
+        """
+        text = self.read_text('METADATA') or self.read_text('PKG-INFO')
+        return _email_message_from_string(text)
+
+    @property
+    def version(self):
+        """Return the 'Version' metadata for the distribution package."""
+        return self.metadata['Version']
+
+    @property
+    def entry_points(self):
+        return EntryPoint._from_text(self.read_text('entry_points.txt'))
+
+    @property
+    def files(self):
+        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
+
+        def make_file(name, hash=None, size_str=None):
+            result = PackagePath(name)
+            result.hash = FileHash(hash) if hash else None
+            result.size = int(size_str) if size_str else None
+            result.dist = self
+            return result
+
+        return file_lines and starmap(make_file, csv.reader(file_lines))
+
+    def _read_files_distinfo(self):
+        """
+        Read the lines of RECORD
+        """
+        text = self.read_text('RECORD')
+        return text and text.splitlines()
+
+    def _read_files_egginfo(self):
+        """
+        SOURCES.txt might contain literal commas, so wrap each line
+        in quotes.
+        """
+        text = self.read_text('SOURCES.txt')
+        return text and map('"{}"'.format, text.splitlines())
+
+    @property
+    def requires(self):
+        return self._read_dist_info_reqs() or self._read_egg_info_reqs()
+
+    def _read_dist_info_reqs(self):
+        spec = self.metadata['Requires-Dist']
+        return spec and filter(None, spec.splitlines())
+
+    def _read_egg_info_reqs(self):
+        source = self.read_text('requires.txt')
+        return self._deps_from_requires_text(source)
+
+    @classmethod
+    def _deps_from_requires_text(cls, source):
+        section_pairs = cls._read_sections(source.splitlines())
+        sections = {
+            section: list(map(operator.itemgetter('line'), results))
+            for section, results in
+            itertools.groupby(section_pairs, operator.itemgetter('section'))
+            }
+        return cls._convert_egg_info_reqs_to_simple_reqs(sections)
+
+    @staticmethod
+    def _read_sections(lines):
+        section = None
+        for line in filter(None, lines):
+            section_match = re.match(r'\[(.*)\]$', line)
+            if section_match:
+                section = section_match.group(1)
+                continue
+            yield locals()
+
+    @staticmethod
+    def _convert_egg_info_reqs_to_simple_reqs(sections):
+        """
+        Historically, setuptools would solicit and store 'extra'
+        requirements, including those with environment markers,
+        in separate sections. More modern tools expect each
+        dependency to be defined separately, with any relevant
+        extras and environment markers attached directly to that
+        requirement. This method converts the former to the
+        latter. See _test_deps_from_requires_text for an example.
+        """
+        def make_condition(name):
+            return name and 'extra == "{name}"'.format(name=name)
+
+        def parse_condition(section):
+            section = section or ''
+            extra, sep, markers = section.partition(':')
+            if extra and markers:
+                markers = '({markers})'.format(markers=markers)
+            conditions = list(filter(None, [markers, make_condition(extra)]))
+            return '; ' + ' and '.join(conditions) if conditions else ''
+
+        for section, deps in sections.items():
+            for dep in deps:
+                yield dep + parse_condition(section)
+
+
+def _email_message_from_string(text):
+    # Work around https://bugs.python.org/issue25545 where
+    # email.message_from_string cannot handle Unicode on Python 2.
+    if sys.version_info < (3,):                     # nocoverpy3
+        io_buffer = io.StringIO(text)
+        return email.message_from_file(io_buffer)
+    return email.message_from_string(text)          # nocoverpy2
+
+
+def distribution(package):
+    """Get the ``Distribution`` instance for the given package.
+
+    :param package: The name of the package as a string.
+    :return: A ``Distribution`` instance (or subclass thereof).
+    """
+    return Distribution.from_name(package)
+
+
+def distributions():
+    """Get all ``Distribution`` instances in the current environment.
+
+    :return: An iterable of ``Distribution`` instances.
+    """
+    return Distribution.discover()
+
+
+def local_distribution():
+    """Get the ``Distribution`` instance for the package in CWD.
+
+    :return: A ``Distribution`` instance (or subclass thereof).
+    """
+    return Distribution.find_local()
+
+
+def metadata(package):
+    """Get the metadata for the package.
+
+    :param package: The name of the distribution package to query.
+    :return: An email.Message containing the parsed metadata.
+    """
+    return Distribution.from_name(package).metadata
+
+
+def version(package):
+    """Get the version string for the named package.
+
+    :param package: The name of the distribution package to query.
+    :return: The version string for the package as defined in the package's
+        "Version" metadata key.
+    """
+    return distribution(package).version
+
+
+def entry_points(name=None):
+    """Return EntryPoint objects for all installed packages.
+
+    :return: EntryPoint objects for all installed packages.
+    """
+    eps = itertools.chain.from_iterable(
+        dist.entry_points for dist in distributions())
+    by_group = operator.attrgetter('group')
+    ordered = sorted(eps, key=by_group)
+    grouped = itertools.groupby(ordered, by_group)
+    return {
+        group: tuple(eps)
+        for group, eps in grouped
+        }
+
+
+def files(package):
+    return distribution(package).files
+
+
+def requires(package):
+    """
+    Return a list of requirements for the indicated distribution.
+
+    :return: An iterator of requirements, suitable for
+    packaging.requirement.Requirement.
+    """
+    return distribution(package).requires

+ 0 - 0
ext/importlib_metadata/docs/__init__.py → mncheck/ext/importlib_metadata/docs/__init__.py


+ 85 - 85
ext/importlib_metadata/docs/changelog.rst → mncheck/ext/importlib_metadata/docs/changelog.rst

@@ -1,85 +1,85 @@
-=========================
- importlib_metadata NEWS
-=========================
-
-0.8
-===
-* This library can now discover/enumerate all installed packages. **This
-  backward-incompatible change alters the protocol finders must
-  implement to support distribution package discovery.** Closes #24.
-* The signature of ``find_distributions()`` on custom installer finders
-  should now accept two parameters, ``name`` and ``path`` and
-  these parameters must supply defaults.
-* The ``entry_points()`` method no longer accepts a package name
-  but instead returns all entry points in a dictionary keyed by the
-  ``EntryPoint.group``. The ``resolve`` method has been removed. Instead,
-  call ``EntryPoint.load()``, which has the same semantics as
-  ``pkg_resources`` and ``entrypoints``.  **This is a backward incompatible
-  change.**
-* Metadata is now always returned as Unicode text regardless of
-  Python version. Closes #29.
-* This library can now discover metadata for a 'local' package (found
-  in the current-working directory). Closes #27.
-* Added ``files()`` function for resolving files from a distribution.
-* Added a new ``requires()`` function, which returns the requirements
-  for a package suitable for parsing by
-  ``packaging.requirements.Requirement``. Closes #18.
-* The top-level ``read_text()`` function has been removed.  Use
-  ``PackagePath.read_text()`` on instances returned by the ``files()``
-  function.  **This is a backward incompatible change.**
-* Release dates are now automatically injected into the changelog
-  based on SCM tags.
-
-0.7
-===
-* Fixed issue where packages with dashes in their names would
-  not be discovered. Closes #21.
-* Distribution lookup is now case-insensitive. Closes #20.
-* Wheel distributions can no longer be discovered by their module
-  name. Like Path distributions, they must be indicated by their
-  distribution package name.
-
-0.6
-===
-* Removed ``importlib_metadata.distribution`` function. Now
-  the public interface is primarily the utility functions exposed
-  in ``importlib_metadata.__all__``. Closes #14.
-* Added two new utility functions ``read_text`` and
-  ``metadata``.
-
-0.5
-===
-* Updated README and removed details about Distribution
-  class, now considered private. Closes #15.
-* Added test suite support for Python 3.4+.
-* Fixed SyntaxErrors on Python 3.4 and 3.5. !12
-* Fixed errors on Windows joining Path elements. !15
-
-0.4
-===
-* Housekeeping.
-
-0.3
-===
-* Added usage documentation.  Closes #8
-* Add support for getting metadata from wheels on ``sys.path``.  Closes #9
-
-0.2
-===
-* Added ``importlib_metadata.entry_points()``.  Closes #1
-* Added ``importlib_metadata.resolve()``.  Closes #12
-* Add support for Python 2.7.  Closes #4
-
-0.1
-===
-* Initial release.
-
-
-..
-   Local Variables:
-   mode: change-log-mode
-   indent-tabs-mode: nil
-   sentence-end-double-space: t
-   fill-column: 78
-   coding: utf-8
-   End:
+=========================
+ importlib_metadata NEWS
+=========================
+
+0.8
+===
+* This library can now discover/enumerate all installed packages. **This
+  backward-incompatible change alters the protocol finders must
+  implement to support distribution package discovery.** Closes #24.
+* The signature of ``find_distributions()`` on custom installer finders
+  should now accept two parameters, ``name`` and ``path`` and
+  these parameters must supply defaults.
+* The ``entry_points()`` method no longer accepts a package name
+  but instead returns all entry points in a dictionary keyed by the
+  ``EntryPoint.group``. The ``resolve`` method has been removed. Instead,
+  call ``EntryPoint.load()``, which has the same semantics as
+  ``pkg_resources`` and ``entrypoints``.  **This is a backward incompatible
+  change.**
+* Metadata is now always returned as Unicode text regardless of
+  Python version. Closes #29.
+* This library can now discover metadata for a 'local' package (found
+  in the current-working directory). Closes #27.
+* Added ``files()`` function for resolving files from a distribution.
+* Added a new ``requires()`` function, which returns the requirements
+  for a package suitable for parsing by
+  ``packaging.requirements.Requirement``. Closes #18.
+* The top-level ``read_text()`` function has been removed.  Use
+  ``PackagePath.read_text()`` on instances returned by the ``files()``
+  function.  **This is a backward incompatible change.**
+* Release dates are now automatically injected into the changelog
+  based on SCM tags.
+
+0.7
+===
+* Fixed issue where packages with dashes in their names would
+  not be discovered. Closes #21.
+* Distribution lookup is now case-insensitive. Closes #20.
+* Wheel distributions can no longer be discovered by their module
+  name. Like Path distributions, they must be indicated by their
+  distribution package name.
+
+0.6
+===
+* Removed ``importlib_metadata.distribution`` function. Now
+  the public interface is primarily the utility functions exposed
+  in ``importlib_metadata.__all__``. Closes #14.
+* Added two new utility functions ``read_text`` and
+  ``metadata``.
+
+0.5
+===
+* Updated README and removed details about Distribution
+  class, now considered private. Closes #15.
+* Added test suite support for Python 3.4+.
+* Fixed SyntaxErrors on Python 3.4 and 3.5. !12
+* Fixed errors on Windows joining Path elements. !15
+
+0.4
+===
+* Housekeeping.
+
+0.3
+===
+* Added usage documentation.  Closes #8
+* Add support for getting metadata from wheels on ``sys.path``.  Closes #9
+
+0.2
+===
+* Added ``importlib_metadata.entry_points()``.  Closes #1
+* Added ``importlib_metadata.resolve()``.  Closes #12
+* Add support for Python 2.7.  Closes #4
+
+0.1
+===
+* Initial release.
+
+
+..
+   Local Variables:
+   mode: change-log-mode
+   indent-tabs-mode: nil
+   sentence-end-double-space: t
+   fill-column: 78
+   coding: utf-8
+   End:

+ 196 - 196
ext/importlib_metadata/docs/conf.py → mncheck/ext/importlib_metadata/docs/conf.py

@@ -1,196 +1,196 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-#
-# flake8: noqa
-#
-# importlib_metadata documentation build configuration file, created by
-# sphinx-quickstart on Thu Nov 30 10:21:00 2017.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
-
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
-    'rst.linker',
-    'sphinx.ext.autodoc',
-    'sphinx.ext.coverage',
-    'sphinx.ext.doctest',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.viewcode',
-    ]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-#
-# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = 'importlib_metadata'
-copyright = '2017-2018, Jason Coombs, Barry Warsaw'
-author = 'Jason Coombs, Barry Warsaw'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.1'
-# The full version, including alpha/beta/rc tags.
-release = '0.1'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This patterns also effect to html_static_path and html_extra_path
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-#
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#
-# html_theme_options = {}
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Custom sidebar templates, must be a dictionary that maps document names
-# to template names.
-#
-# This is required for the alabaster theme
-# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
-html_sidebars = {
-    '**': [
-        'relations.html',  # needs 'show_related': True theme option to display
-        'searchbox.html',
-    ]
-}
-
-
-# -- Options for HTMLHelp output ------------------------------------------
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'importlib_metadatadoc'
-
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
-    # The paper size ('letterpaper' or 'a4paper').
-    #
-    # 'papersize': 'letterpaper',
-
-    # The font size ('10pt', '11pt' or '12pt').
-    #
-    # 'pointsize': '10pt',
-
-    # Additional stuff for the LaTeX preamble.
-    #
-    # 'preamble': '',
-
-    # Latex figure (float) alignment
-    #
-    # 'figure_align': 'htbp',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-#  author, documentclass [howto, manual, or own class]).
-latex_documents = [
-    (master_doc, 'importlib_metadata.tex', 'importlib\\_metadata Documentation',
-     'Brett Cannon, Barry Warsaw', 'manual'),
-]
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
-     [author], 1)
-]
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
-     author, 'importlib_metadata', 'One line description of project.',
-     'Miscellaneous'),
-]
-
-
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {
-    'python': ('https://docs.python.org/3', None),
-    }
-
-
-# For rst.linker, inject release dates into changelog.rst
-link_files = {
-    'changelog.rst': dict(
-        replace=[
-            dict(
-                pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
-                with_scm='{text}\n{rev[timestamp]:%Y-%m-%d}\n\n',
-            ),
-        ],
-    ),
-}
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# flake8: noqa
+#
+# importlib_metadata documentation build configuration file, created by
+# sphinx-quickstart on Thu Nov 30 10:21:00 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'rst.linker',
+    'sphinx.ext.autodoc',
+    'sphinx.ext.coverage',
+    'sphinx.ext.doctest',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.viewcode',
+    ]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'importlib_metadata'
+copyright = '2017-2018, Jason Coombs, Barry Warsaw'
+author = 'Jason Coombs, Barry Warsaw'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# This is required for the alabaster theme
+# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
+html_sidebars = {
+    '**': [
+        'relations.html',  # needs 'show_related': True theme option to display
+        'searchbox.html',
+    ]
+}
+
+
+# -- Options for HTMLHelp output ------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'importlib_metadatadoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+    # The paper size ('letterpaper' or 'a4paper').
+    #
+    # 'papersize': 'letterpaper',
+
+    # The font size ('10pt', '11pt' or '12pt').
+    #
+    # 'pointsize': '10pt',
+
+    # Additional stuff for the LaTeX preamble.
+    #
+    # 'preamble': '',
+
+    # Latex figure (float) alignment
+    #
+    # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'importlib_metadata.tex', 'importlib\\_metadata Documentation',
+     'Brett Cannon, Barry Warsaw', 'manual'),
+]
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
+     [author], 1)
+]
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
+     author, 'importlib_metadata', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+    'python': ('https://docs.python.org/3', None),
+    }
+
+
+# For rst.linker, inject release dates into changelog.rst
+link_files = {
+    'changelog.rst': dict(
+        replace=[
+            dict(
+                pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
+                with_scm='{text}\n{rev[timestamp]:%Y-%m-%d}\n\n',
+            ),
+        ],
+    ),
+}

+ 53 - 53
ext/importlib_metadata/docs/index.rst → mncheck/ext/importlib_metadata/docs/index.rst

@@ -1,53 +1,53 @@
-===============================
- Welcome to importlib_metadata
-===============================
-
-``importlib_metadata`` is a library which provides an API for accessing an
-installed package's `metadata`_, such as its entry points or its top-level
-name.  This functionality intends to replace most uses of ``pkg_resources``
-`entry point API`_ and `metadata API`_.  Along with ``importlib.resources`` in
-`Python 3.7 and newer`_ (backported as `importlib_resources`_ for older
-versions of Python), this can eliminate the need to use the older and less
-efficient ``pkg_resources`` package.
-
-``importlib_metadata`` is a backport of Python 3.8's standard library
-`importlib.metadata`_ module for Python 2.7, and 3.4 through 3.7.  Users of
-Python 3.8 and beyond are encouraged to use the standard library module, and
-in fact for these versions, ``importlib_metadata`` just shadows that module.
-Developers looking for detailed API descriptions should refer to the Python
-3.8 standard library documentation.
-
-The documentation here includes a general :ref:`usage <using>` guide.
-
-
-.. toctree::
-   :maxdepth: 2
-   :caption: Contents:
-
-   using.rst
-   changelog (links).rst
-
-
-Project details
-===============
-
- * Project home: https://gitlab.com/python-devs/importlib_metadata
- * Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
- * Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
- * Documentation: http://importlib_metadata.readthedocs.io/
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
-
-.. _`metadata`: https://www.python.org/dev/peps/pep-0566/
-.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
-.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
-.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
-.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
-.. _`importlib.metadata`: TBD
+===============================
+ Welcome to importlib_metadata
+===============================
+
+``importlib_metadata`` is a library which provides an API for accessing an
+installed package's `metadata`_, such as its entry points or its top-level
+name.  This functionality intends to replace most uses of ``pkg_resources``
+`entry point API`_ and `metadata API`_.  Along with ``importlib.resources`` in
+`Python 3.7 and newer`_ (backported as `importlib_resources`_ for older
+versions of Python), this can eliminate the need to use the older and less
+efficient ``pkg_resources`` package.
+
+``importlib_metadata`` is a backport of Python 3.8's standard library
+`importlib.metadata`_ module for Python 2.7, and 3.4 through 3.7.  Users of
+Python 3.8 and beyond are encouraged to use the standard library module, and
+in fact for these versions, ``importlib_metadata`` just shadows that module.
+Developers looking for detailed API descriptions should refer to the Python
+3.8 standard library documentation.
+
+The documentation here includes a general :ref:`usage <using>` guide.
+
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+   using.rst
+   changelog (links).rst
+
+
+Project details
+===============
+
+ * Project home: https://gitlab.com/python-devs/importlib_metadata
+ * Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
+ * Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
+ * Documentation: http://importlib_metadata.readthedocs.io/
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. _`metadata`: https://www.python.org/dev/peps/pep-0566/
+.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
+.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
+.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
+.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
+.. _`importlib.metadata`: TBD

+ 254 - 254
ext/importlib_metadata/docs/using.rst → mncheck/ext/importlib_metadata/docs/using.rst

@@ -1,254 +1,254 @@
-.. _using:
-
-==========================
- Using importlib_metadata
-==========================
-
-``importlib_metadata`` is a library that provides for access to installed
-package metadata.  Built in part on Python's import system, this library
-intends to replace similar functionality in ``pkg_resources`` `entry point
-API`_ and `metadata API`_.  Along with ``importlib.resources`` in `Python 3.7
-and newer`_ (backported as `importlib_resources`_ for older versions of
-Python), this can eliminate the need to use the older and less efficient
-``pkg_resources`` package.
-
-By "installed package" we generally mean a third party package installed into
-Python's ``site-packages`` directory via tools such as ``pip``.  Specifically,
-it means a package with either a discoverable ``dist-info`` or ``egg-info``
-directory, and metadata defined by `PEP 566`_ or its older specifications.
-By default, package metadata can live on the file system or in wheels on
-``sys.path``.  Through an extension mechanism, the metadata can live almost
-anywhere.
-
-
-Overview
-========
-
-Let's say you wanted to get the version string for a package you've installed
-using ``pip``.  We start by creating a virtual environment and installing
-something into it::
-
-    $ python3 -m venv example
-    $ source example/bin/activate
-    (example) $ pip install importlib_metadata
-    (example) $ pip install wheel
-
-You can get the version string for ``wheel`` by running the following::
-
-    (example) $ python
-    >>> from importlib_metadata import version
-    >>> version('wheel')
-    '0.32.3'
-
-You can also get the set of entry points keyed by group, such as
-``console_scripts``, ``distutils.commands`` and others.  Each group contains a
-sequence of :ref:`EntryPoint <entry-points>` objects.
-
-You can get the :ref:`metadata for a distribution <metadata>`::
-
-    >>> list(metadata('wheel'))
-    ['Metadata-Version', 'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Project-URL', 'Project-URL', 'Project-URL', 'Keywords', 'Platform', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Requires-Python', 'Provides-Extra', 'Requires-Dist', 'Requires-Dist']
-
-You can also get a :ref:`distribution's version number <version>`, list its
-:ref:`constituent files <files>`_, and get a list of the distribution's
-:ref:`requirements`_.
-
-
-Distributions
-=============
-
-.. CAUTION:: The ``Distribution`` class described here may or may not end up
-             in the final stable public API.  Consider this class `provisional
-             <https://www.python.org/dev/peps/pep-0411/>`_ until the 1.0
-             release.
-
-While the above API is the most common and convenient usage, you can get all
-of that information from the ``Distribution`` class.  A ``Distribution`` is an
-abstract object that represents the metadata for a Python package.  You can
-get the ``Distribution`` instance::
-
-    >>> from importlib_metadata import distribution
-    >>> dist = distribution('wheel')
-
-Thus, an alternative way to get the version number is through the
-``Distribution`` instance::
-
-    >>> dist.version
-    '0.32.3'
-
-There are all kinds of additional metadata available on the ``Distribution``
-instance::
-
-    >>> d.metadata['Requires-Python']
-    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
-    >>> d.metadata['License']
-    'MIT'
-
-The full set of available metadata is not described here.  See `PEP 566
-<https://www.python.org/dev/peps/pep-0566/>`_ for additional details.
-
-
-Functional API
-==============
-
-This package provides the following functionality via its public API.
-
-
-.. _entry-points::
-
-Entry points
-------------
-
-The ``entry_points()`` function returns a dictionary of all entry points,
-keyed by group.  Entry points are represented by ``EntryPoint`` instances;
-each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and
-a ``.load()`` method to resolve the value.
-
-    >>> eps = entry_points()
-    >>> list(eps)
-    ['console_scripts', 'distutils.commands', 'distutils.setup_keywords', 'egg_info.writers', 'setuptools.installation']
-    >>> scripts = eps['console_scripts']
-    >>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0]
-    >>> wheel
-    EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts')
-    >>> main = wheel.load()
-    >>> main
-    <function main at 0x103528488>
-
-The ``group`` and ``name`` are arbitrary values defined by the package author
-and usually a client will wish to resolve all entry points for a particular
-group.  Read `the setuptools docs
-<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
-for more information on entrypoints, their definition, and usage.
-
-
-.. _metadata::
-
-Distribution metadata
----------------------
-
-Every distribution includes some metadata, which you can extract using the
-``metadata()`` function::
-
-    >>> wheel_metadata = metadata('wheel')
-
-The keys of the returned data structure [#f1]_ name the metadata keywords, and
-their values are returned unparsed from the distribution metadata::
-
-    >>> wheel_metadata['Requires-Python']
-    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
-
-
-.. _version::
-
-Distribution versions
----------------------
-
-The ``version()`` function is the quickest way to get a distribution's version
-number, as a string::
-
-    >>> version('wheel')
-    '0.32.3'
-
-
-.. _files::
-
-Distribution files
-------------------
-
-You can also get the full set of files contained within a distribution.  The
-``files()`` function takes a distribution package name and returns all of the
-files installed by this distribution.  Each file object returned is a
-``PackagePath``, a `pathlib.Path`_ derived object with additional ``dist``,
-``size``, and ``hash`` properties as indicated by the metadata.  For example::
-
-    >>> util = [p for p in files('wheel') if 'util.py' in str(p)][0]
-    >>> util
-    PackagePath('wheel/util.py')
-    >>> util.size
-    859
-    >>> util.dist
-    <importlib_metadata._hooks.PathDistribution object at 0x101e0cef0>
-    >>> util.hash
-    <FileHash mode: sha256 value: bYkw5oMccfazVCoYQwKkkemoVyMAFoR34mmKBx8R1NI>
-
-Once you have the file, you can also read its contents::
-
-    >>> print(util.read_text())
-    import base64
-    import sys
-    ...
-    def as_bytes(s):
-        if isinstance(s, text_type):
-            return s.encode('utf-8')
-        return s
-
-
-.. _requirements::
-
-Distribution requirements
--------------------------
-
-To get the full set of requirements for a distribution, use the ``requires()``
-function.  Note that this returns an iterator::
-
-    >>> list(requires('wheel'))
-    ["pytest (>=3.0.0) ; extra == 'test'"]
-
-
-
-Extending the search algorithm
-==============================
-
-Because package metadata is not available through ``sys.path`` searches, or
-package loaders directly, the metadata for a package is found through import
-system `finders`_.  To find a distribution package's metadata,
-``importlib_metadata`` queries the list of `meta path finders`_ on
-`sys.meta_path`_.
-
-By default ``importlib_metadata`` installs a finder for distribution packages
-found on the file system.  This finder doesn't actually find any *packages*,
-but it can find the packages' metadata.
-
-The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the
-interface expected of finders by Python's import system.
-``importlib_metadata`` extends this protocol by looking for an optional
-``find_distributions`` callable on the finders from
-``sys.meta_path``.  If the finder has this method, it must return
-an iterator over instances of the ``Distribution`` abstract class. This
-method must have the signature::
-
-    def find_distributions(name=None, path=sys.path):
-        """Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching the name
-        (or all names if not supplied) along the paths in the list
-        of directories ``path`` (defaults to sys.path).
-        """
-
-What this means in practice is that to support finding distribution package
-metadata in locations other than the file system, you should derive from
-``Distribution`` and implement the ``load_metadata()`` method.  This takes a
-single argument which is the name of the package whose metadata is being
-found.  This instance of the ``Distribution`` base abstract class is what your
-finder's ``find_distributions()`` method should return.
-
-
-.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
-.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
-.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
-.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
-.. _`PEP 566`: https://www.python.org/dev/peps/pep-0566/
-.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders
-.. _`meta path finders`: https://docs.python.org/3/glossary.html#term-meta-path-finder
-.. _`sys.meta_path`: https://docs.python.org/3/library/sys.html#sys.meta_path
-.. _`pathlib.Path`: https://docs.python.org/3/library/pathlib.html#pathlib.Path
-
-
-.. rubric:: Footnotes
-
-.. [#f1] Technically, the returned distribution metadata object is an
-         `email.message.Message
-         <https://docs.python.org/3/library/email.message.html#email.message.EmailMessage>`_
-         instance, but this is an implementation detail, and not part of the
-         stable API.  You should only use dictionary-like methods and syntax
-         to access the metadata contents.
+.. _using:
+
+==========================
+ Using importlib_metadata
+==========================
+
+``importlib_metadata`` is a library that provides for access to installed
+package metadata.  Built in part on Python's import system, this library
+intends to replace similar functionality in ``pkg_resources`` `entry point
+API`_ and `metadata API`_.  Along with ``importlib.resources`` in `Python 3.7
+and newer`_ (backported as `importlib_resources`_ for older versions of
+Python), this can eliminate the need to use the older and less efficient
+``pkg_resources`` package.
+
+By "installed package" we generally mean a third party package installed into
+Python's ``site-packages`` directory via tools such as ``pip``.  Specifically,
+it means a package with either a discoverable ``dist-info`` or ``egg-info``
+directory, and metadata defined by `PEP 566`_ or its older specifications.
+By default, package metadata can live on the file system or in wheels on
+``sys.path``.  Through an extension mechanism, the metadata can live almost
+anywhere.
+
+
+Overview
+========
+
+Let's say you wanted to get the version string for a package you've installed
+using ``pip``.  We start by creating a virtual environment and installing
+something into it::
+
+    $ python3 -m venv example
+    $ source example/bin/activate
+    (example) $ pip install importlib_metadata
+    (example) $ pip install wheel
+
+You can get the version string for ``wheel`` by running the following::
+
+    (example) $ python
+    >>> from importlib_metadata import version
+    >>> version('wheel')
+    '0.32.3'
+
+You can also get the set of entry points keyed by group, such as
+``console_scripts``, ``distutils.commands`` and others.  Each group contains a
+sequence of :ref:`EntryPoint <entry-points>` objects.
+
+You can get the :ref:`metadata for a distribution <metadata>`::
+
+    >>> list(metadata('wheel'))
+    ['Metadata-Version', 'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Project-URL', 'Project-URL', 'Project-URL', 'Keywords', 'Platform', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Requires-Python', 'Provides-Extra', 'Requires-Dist', 'Requires-Dist']
+
+You can also get a :ref:`distribution's version number <version>`, list its
+:ref:`constituent files <files>`_, and get a list of the distribution's
+:ref:`requirements`_.
+
+
+Distributions
+=============
+
+.. CAUTION:: The ``Distribution`` class described here may or may not end up
+             in the final stable public API.  Consider this class `provisional
+             <https://www.python.org/dev/peps/pep-0411/>`_ until the 1.0
+             release.
+
+While the above API is the most common and convenient usage, you can get all
+of that information from the ``Distribution`` class.  A ``Distribution`` is an
+abstract object that represents the metadata for a Python package.  You can
+get the ``Distribution`` instance::
+
+    >>> from importlib_metadata import distribution
+    >>> dist = distribution('wheel')
+
+Thus, an alternative way to get the version number is through the
+``Distribution`` instance::
+
+    >>> dist.version
+    '0.32.3'
+
+There are all kinds of additional metadata available on the ``Distribution``
+instance::
+
+    >>> d.metadata['Requires-Python']
+    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
+    >>> d.metadata['License']
+    'MIT'
+
+The full set of available metadata is not described here.  See `PEP 566
+<https://www.python.org/dev/peps/pep-0566/>`_ for additional details.
+
+
+Functional API
+==============
+
+This package provides the following functionality via its public API.
+
+
+.. _entry-points::
+
+Entry points
+------------
+
+The ``entry_points()`` function returns a dictionary of all entry points,
+keyed by group.  Entry points are represented by ``EntryPoint`` instances;
+each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and
+a ``.load()`` method to resolve the value.
+
+    >>> eps = entry_points()
+    >>> list(eps)
+    ['console_scripts', 'distutils.commands', 'distutils.setup_keywords', 'egg_info.writers', 'setuptools.installation']
+    >>> scripts = eps['console_scripts']
+    >>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0]
+    >>> wheel
+    EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts')
+    >>> main = wheel.load()
+    >>> main
+    <function main at 0x103528488>
+
+The ``group`` and ``name`` are arbitrary values defined by the package author
+and usually a client will wish to resolve all entry points for a particular
+group.  Read `the setuptools docs
+<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
+for more information on entrypoints, their definition, and usage.
+
+
+.. _metadata::
+
+Distribution metadata
+---------------------
+
+Every distribution includes some metadata, which you can extract using the
+``metadata()`` function::
+
+    >>> wheel_metadata = metadata('wheel')
+
+The keys of the returned data structure [#f1]_ name the metadata keywords, and
+their values are returned unparsed from the distribution metadata::
+
+    >>> wheel_metadata['Requires-Python']
+    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
+
+
+.. _version::
+
+Distribution versions
+---------------------
+
+The ``version()`` function is the quickest way to get a distribution's version
+number, as a string::
+
+    >>> version('wheel')
+    '0.32.3'
+
+
+.. _files::
+
+Distribution files
+------------------
+
+You can also get the full set of files contained within a distribution.  The
+``files()`` function takes a distribution package name and returns all of the
+files installed by this distribution.  Each file object returned is a
+``PackagePath``, a `pathlib.Path`_ derived object with additional ``dist``,
+``size``, and ``hash`` properties as indicated by the metadata.  For example::
+
+    >>> util = [p for p in files('wheel') if 'util.py' in str(p)][0]
+    >>> util
+    PackagePath('wheel/util.py')
+    >>> util.size
+    859
+    >>> util.dist
+    <importlib_metadata._hooks.PathDistribution object at 0x101e0cef0>
+    >>> util.hash
+    <FileHash mode: sha256 value: bYkw5oMccfazVCoYQwKkkemoVyMAFoR34mmKBx8R1NI>
+
+Once you have the file, you can also read its contents::
+
+    >>> print(util.read_text())
+    import base64
+    import sys
+    ...
+    def as_bytes(s):
+        if isinstance(s, text_type):
+            return s.encode('utf-8')
+        return s
+
+
+.. _requirements::
+
+Distribution requirements
+-------------------------
+
+To get the full set of requirements for a distribution, use the ``requires()``
+function.  Note that this returns an iterator::
+
+    >>> list(requires('wheel'))
+    ["pytest (>=3.0.0) ; extra == 'test'"]
+
+
+
+Extending the search algorithm
+==============================
+
+Because package metadata is not available through ``sys.path`` searches, or
+package loaders directly, the metadata for a package is found through import
+system `finders`_.  To find a distribution package's metadata,
+``importlib_metadata`` queries the list of `meta path finders`_ on
+`sys.meta_path`_.
+
+By default ``importlib_metadata`` installs a finder for distribution packages
+found on the file system.  This finder doesn't actually find any *packages*,
+but it can find the packages' metadata.
+
+The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the
+interface expected of finders by Python's import system.
+``importlib_metadata`` extends this protocol by looking for an optional
+``find_distributions`` callable on the finders from
+``sys.meta_path``.  If the finder has this method, it must return
+an iterator over instances of the ``Distribution`` abstract class. This
+method must have the signature::
+
+    def find_distributions(name=None, path=sys.path):
+        """Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching the name
+        (or all names if not supplied) along the paths in the list
+        of directories ``path`` (defaults to sys.path).
+        """
+
+What this means in practice is that to support finding distribution package
+metadata in locations other than the file system, you should derive from
+``Distribution`` and implement the ``load_metadata()`` method.  This takes a
+single argument which is the name of the package whose metadata is being
+found.  This instance of the ``Distribution`` base abstract class is what your
+finder's ``find_distributions()`` method should return.
+
+
+.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
+.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
+.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
+.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
+.. _`PEP 566`: https://www.python.org/dev/peps/pep-0566/
+.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders
+.. _`meta path finders`: https://docs.python.org/3/glossary.html#term-meta-path-finder
+.. _`sys.meta_path`: https://docs.python.org/3/library/sys.html#sys.meta_path
+.. _`pathlib.Path`: https://docs.python.org/3/library/pathlib.html#pathlib.Path
+
+
+.. rubric:: Footnotes
+
+.. [#f1] Technically, the returned distribution metadata object is an
+         `email.message.Message
+         <https://docs.python.org/3/library/email.message.html#email.message.EmailMessage>`_
+         instance, but this is an implementation detail, and not part of the
+         stable API.  You should only use dictionary-like methods and syntax
+         to access the metadata contents.

+ 0 - 0
ext/importlib_metadata/tests/__init__.py → mncheck/ext/importlib_metadata/tests/__init__.py


+ 0 - 0
ext/importlib_metadata/tests/data/__init__.py → mncheck/ext/importlib_metadata/tests/data/__init__.py


+ 35 - 35
ext/importlib_metadata/tests/fixtures.py → mncheck/ext/importlib_metadata/tests/fixtures.py

@@ -1,35 +1,35 @@
-import sys
-import shutil
-import tempfile
-import contextlib
-
-try:
-    from contextlib import ExitStack
-except ImportError:
-    from contextlib2 import ExitStack
-
-try:
-    import pathlib
-except ImportError:
-    import pathlib2 as pathlib
-
-
-__metaclass__ = type
-
-
-class SiteDir:
-    @staticmethod
-    @contextlib.contextmanager
-    def site_dir():
-        tmpdir = tempfile.mkdtemp()
-        sys.path[:0] = [tmpdir]
-        try:
-            yield pathlib.Path(tmpdir)
-        finally:
-            sys.path.remove(tmpdir)
-            shutil.rmtree(tmpdir)
-
-    def setUp(self):
-        self.fixtures = ExitStack()
-        self.addCleanup(self.fixtures.close)
-        self.site_dir = self.fixtures.enter_context(self.site_dir())
+import sys
+import shutil
+import tempfile
+import contextlib
+
+try:
+    from contextlib import ExitStack
+except ImportError:
+    from contextlib2 import ExitStack
+
+try:
+    import pathlib
+except ImportError:
+    import pathlib2 as pathlib
+
+
+__metaclass__ = type
+
+
+class SiteDir:
+    @staticmethod
+    @contextlib.contextmanager
+    def site_dir():
+        tmpdir = tempfile.mkdtemp()
+        sys.path[:0] = [tmpdir]
+        try:
+            yield pathlib.Path(tmpdir)
+        finally:
+            sys.path.remove(tmpdir)
+            shutil.rmtree(tmpdir)
+
+    def setUp(self):
+        self.fixtures = ExitStack()
+        self.addCleanup(self.fixtures.close)
+        self.site_dir = self.fixtures.enter_context(self.site_dir())

+ 154 - 154
ext/importlib_metadata/tests/test_api.py → mncheck/ext/importlib_metadata/tests/test_api.py

@@ -1,154 +1,154 @@
-import re
-import textwrap
-import unittest
-import importlib_metadata
-import packaging.requirements
-
-try:
-    from collections.abc import Iterator
-except ImportError:
-    from collections import Iterator  # noqa: F401
-
-try:
-    from builtins import str as text
-except ImportError:
-    from __builtin__ import unicode as text
-
-
-class APITests(unittest.TestCase):
-    version_pattern = r'\d+\.\d+(\.\d)?'
-
-    def test_retrieves_version_of_self(self):
-        version = importlib_metadata.version('importlib_metadata')
-        assert isinstance(version, text)
-        assert re.match(self.version_pattern, version)
-
-    def test_retrieves_version_of_pip(self):
-        # Assume pip is installed and retrieve the version of pip.
-        version = importlib_metadata.version('pip')
-        assert isinstance(version, text)
-        assert re.match(self.version_pattern, version)
-
-    def test_for_name_does_not_exist(self):
-        with self.assertRaises(importlib_metadata.PackageNotFoundError):
-            importlib_metadata.distribution('does-not-exist')
-
-    def test_for_top_level(self):
-        distribution = importlib_metadata.distribution('importlib_metadata')
-        self.assertEqual(
-            distribution.read_text('top_level.txt').strip(),
-            'importlib_metadata')
-
-    def test_read_text(self):
-        top_level = [
-            path for path in importlib_metadata.files('importlib_metadata')
-            if path.name == 'top_level.txt'
-            ][0]
-        self.assertEqual(top_level.read_text(), 'importlib_metadata\n')
-
-    def test_entry_points(self):
-        scripts = importlib_metadata.entry_points()['console_scripts']
-        scripts = dict(scripts)
-        pip_ep = scripts['pip']
-        # We should probably not be dependent on a third party package's
-        # internal API staying stable.
-        self.assertEqual(pip_ep.value, 'pip._internal:main')
-        self.assertEqual(pip_ep.extras, [])
-
-    def test_metadata_for_this_package(self):
-        md = importlib_metadata.metadata('importlib_metadata')
-        assert md['author'] == 'Barry Warsaw'
-        assert md['LICENSE'] == 'Apache Software License'
-        assert md['Name'] == 'importlib-metadata'
-        classifiers = md.get_all('Classifier')
-        assert 'Topic :: Software Development :: Libraries' in classifiers
-
-    def test_importlib_metadata_version(self):
-        assert re.match(self.version_pattern, importlib_metadata.__version__)
-
-    @staticmethod
-    def _test_files(files_iter):
-        assert isinstance(files_iter, Iterator)
-        files = list(files_iter)
-        root = files[0].root
-        for file in files:
-            assert file.root == root
-            assert not file.hash or file.hash.value
-            assert not file.hash or file.hash.mode == 'sha256'
-            assert not file.size or file.size >= 0
-            assert file.locate().exists()
-            assert isinstance(file.read_binary(), bytes)
-            if file.name.endswith('.py'):
-                file.read_text()
-
-    def test_file_hash_repr(self):
-        try:
-            assertRegex = self.assertRegex
-        except AttributeError:
-            # Python 2
-            assertRegex = self.assertRegexpMatches
-
-        util = [
-            p for p in importlib_metadata.files('wheel')
-            if p.name == 'util.py'
-            ][0]
-        assertRegex(
-            repr(util.hash),
-            '<FileHash mode: sha256 value: .*>')
-
-    def test_files_dist_info(self):
-        self._test_files(importlib_metadata.files('pip'))
-
-    def test_files_egg_info(self):
-        self._test_files(importlib_metadata.files('importlib_metadata'))
-
-    def test_find_local(self):
-        dist = importlib_metadata.api.local_distribution()
-        assert dist.metadata['Name'] == 'importlib-metadata'
-
-    def test_requires(self):
-        deps = importlib_metadata.requires('importlib_metadata')
-        parsed = list(map(packaging.requirements.Requirement, deps))
-        assert all(parsed)
-        assert any(
-            dep.name == 'pathlib2' and dep.marker
-            for dep in parsed
-            )
-
-    def test_requires_dist_info(self):
-        # assume 'packaging' is installed as a wheel with dist-info
-        deps = importlib_metadata.requires('packaging')
-        parsed = list(map(packaging.requirements.Requirement, deps))
-        assert parsed
-
-    def test_more_complex_deps_requires_text(self):
-        requires = textwrap.dedent("""
-            dep1
-            dep2
-
-            [:python_version < "3"]
-            dep3
-
-            [extra1]
-            dep4
-
-            [extra2:python_version < "3"]
-            dep5
-            """)
-        deps = sorted(
-            importlib_metadata.api.Distribution._deps_from_requires_text(
-                requires)
-            )
-        expected = [
-            'dep1',
-            'dep2',
-            'dep3; python_version < "3"',
-            'dep4; extra == "extra1"',
-            'dep5; (python_version < "3") and extra == "extra2"',
-            ]
-        # It's important that the environment marker expression be
-        # wrapped in parentheses to avoid the following 'and' binding more
-        # tightly than some other part of the environment expression.
-
-        assert deps == expected
-        assert all(map(packaging.requirements.Requirement, deps))
+import re
+import textwrap
+import unittest
+import importlib_metadata
+import packaging.requirements
+
+try:
+    from collections.abc import Iterator
+except ImportError:
+    from collections import Iterator  # noqa: F401
+
+try:
+    from builtins import str as text
+except ImportError:
+    from __builtin__ import unicode as text
+
+
+class APITests(unittest.TestCase):
+    version_pattern = r'\d+\.\d+(\.\d)?'
+
+    def test_retrieves_version_of_self(self):
+        version = importlib_metadata.version('importlib_metadata')
+        assert isinstance(version, text)
+        assert re.match(self.version_pattern, version)
+
+    def test_retrieves_version_of_pip(self):
+        # Assume pip is installed and retrieve the version of pip.
+        version = importlib_metadata.version('pip')
+        assert isinstance(version, text)
+        assert re.match(self.version_pattern, version)
+
+    def test_for_name_does_not_exist(self):
+        with self.assertRaises(importlib_metadata.PackageNotFoundError):
+            importlib_metadata.distribution('does-not-exist')
+
+    def test_for_top_level(self):
+        distribution = importlib_metadata.distribution('importlib_metadata')
+        self.assertEqual(
+            distribution.read_text('top_level.txt').strip(),
+            'importlib_metadata')
+
+    def test_read_text(self):
+        top_level = [
+            path for path in importlib_metadata.files('importlib_metadata')
+            if path.name == 'top_level.txt'
+            ][0]
+        self.assertEqual(top_level.read_text(), 'importlib_metadata\n')
+
+    def test_entry_points(self):
+        scripts = importlib_metadata.entry_points()['console_scripts']
+        scripts = dict(scripts)
+        pip_ep = scripts['pip']
+        # We should probably not be dependent on a third party package's
+        # internal API staying stable.
+        self.assertEqual(pip_ep.value, 'pip._internal:main')
+        self.assertEqual(pip_ep.extras, [])
+
+    def test_metadata_for_this_package(self):
+        md = importlib_metadata.metadata('importlib_metadata')
+        assert md['author'] == 'Barry Warsaw'
+        assert md['LICENSE'] == 'Apache Software License'
+        assert md['Name'] == 'importlib-metadata'
+        classifiers = md.get_all('Classifier')
+        assert 'Topic :: Software Development :: Libraries' in classifiers
+
+    def test_importlib_metadata_version(self):
+        assert re.match(self.version_pattern, importlib_metadata.__version__)
+
+    @staticmethod
+    def _test_files(files_iter):
+        assert isinstance(files_iter, Iterator)
+        files = list(files_iter)
+        root = files[0].root
+        for file in files:
+            assert file.root == root
+            assert not file.hash or file.hash.value
+            assert not file.hash or file.hash.mode == 'sha256'
+            assert not file.size or file.size >= 0
+            assert file.locate().exists()
+            assert isinstance(file.read_binary(), bytes)
+            if file.name.endswith('.py'):
+                file.read_text()
+
+    def test_file_hash_repr(self):
+        try:
+            assertRegex = self.assertRegex
+        except AttributeError:
+            # Python 2
+            assertRegex = self.assertRegexpMatches
+
+        util = [
+            p for p in importlib_metadata.files('wheel')
+            if p.name == 'util.py'
+            ][0]
+        assertRegex(
+            repr(util.hash),
+            '<FileHash mode: sha256 value: .*>')
+
+    def test_files_dist_info(self):
+        self._test_files(importlib_metadata.files('pip'))
+
+    def test_files_egg_info(self):
+        self._test_files(importlib_metadata.files('importlib_metadata'))
+
+    def test_find_local(self):
+        dist = importlib_metadata.api.local_distribution()
+        assert dist.metadata['Name'] == 'importlib-metadata'
+
+    def test_requires(self):
+        deps = importlib_metadata.requires('importlib_metadata')
+        parsed = list(map(packaging.requirements.Requirement, deps))
+        assert all(parsed)
+        assert any(
+            dep.name == 'pathlib2' and dep.marker
+            for dep in parsed
+            )
+
+    def test_requires_dist_info(self):
+        # assume 'packaging' is installed as a wheel with dist-info
+        deps = importlib_metadata.requires('packaging')
+        parsed = list(map(packaging.requirements.Requirement, deps))
+        assert parsed
+
+    def test_more_complex_deps_requires_text(self):
+        requires = textwrap.dedent("""
+            dep1
+            dep2
+
+            [:python_version < "3"]
+            dep3
+
+            [extra1]
+            dep4
+
+            [extra2:python_version < "3"]
+            dep5
+            """)
+        deps = sorted(
+            importlib_metadata.api.Distribution._deps_from_requires_text(
+                requires)
+            )
+        expected = [
+            'dep1',
+            'dep2',
+            'dep3; python_version < "3"',
+            'dep4; extra == "extra1"',
+            'dep5; (python_version < "3") and extra == "extra2"',
+            ]
+        # It's important that the environment marker expression be
+        # wrapped in parentheses to avoid the following 'and' binding more
+        # tightly than some other part of the environment expression.
+
+        assert deps == expected
+        assert all(map(packaging.requirements.Requirement, deps))

+ 155 - 155
ext/importlib_metadata/tests/test_main.py → mncheck/ext/importlib_metadata/tests/test_main.py

@@ -1,155 +1,155 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-import textwrap
-import unittest
-import importlib
-import importlib_metadata
-
-from . import fixtures
-from importlib_metadata import _hooks
-
-try:
-    from builtins import str as text
-except ImportError:
-    from __builtin__ import unicode as text
-
-
-class BasicTests(unittest.TestCase):
-    version_pattern = r'\d+\.\d+(\.\d)?'
-
-    def test_retrieves_version_of_pip(self):
-        # Assume pip is installed and retrieve the version of pip.
-        dist = importlib_metadata.Distribution.from_name('pip')
-        assert isinstance(dist.version, text)
-        assert re.match(self.version_pattern, dist.version)
-
-    def test_for_name_does_not_exist(self):
-        with self.assertRaises(importlib_metadata.PackageNotFoundError):
-            importlib_metadata.Distribution.from_name('does-not-exist')
-
-    def test_new_style_classes(self):
-        self.assertIsInstance(importlib_metadata.Distribution, type)
-        self.assertIsInstance(_hooks.MetadataPathFinder, type)
-        self.assertIsInstance(_hooks.WheelMetadataFinder, type)
-        self.assertIsInstance(_hooks.WheelDistribution, type)
-
-
-class ImportTests(unittest.TestCase):
-    def test_import_nonexistent_module(self):
-        # Ensure that the MetadataPathFinder does not crash an import of a
-        # non-existant module.
-        with self.assertRaises(ImportError):
-            importlib.import_module('does_not_exist')
-
-    def test_resolve(self):
-        scripts = dict(importlib_metadata.entry_points()['console_scripts'])
-        pip_ep = scripts['pip']
-        import pip._internal
-        self.assertEqual(pip_ep.load(), pip._internal.main)
-
-
-class NameNormalizationTests(fixtures.SiteDir, unittest.TestCase):
-    @staticmethod
-    def pkg_with_dashes(site_dir):
-        """
-        Create minimal metadata for a package with dashes
-        in the name (and thus underscores in the filename).
-        """
-        metadata_dir = site_dir / 'my_pkg.dist-info'
-        metadata_dir.mkdir()
-        metadata = metadata_dir / 'METADATA'
-        with metadata.open('w') as strm:
-            strm.write('Version: 1.0\n')
-        return 'my-pkg'
-
-    def test_dashes_in_dist_name_found_as_underscores(self):
-        """
-        For a package with a dash in the name, the dist-info metadata
-        uses underscores in the name. Ensure the metadata loads.
-        """
-        pkg_name = self.pkg_with_dashes(self.site_dir)
-        assert importlib_metadata.version(pkg_name) == '1.0'
-
-    @staticmethod
-    def pkg_with_mixed_case(site_dir):
-        """
-        Create minimal metadata for a package with mixed case
-        in the name.
-        """
-        metadata_dir = site_dir / 'CherryPy.dist-info'
-        metadata_dir.mkdir()
-        metadata = metadata_dir / 'METADATA'
-        with metadata.open('w') as strm:
-            strm.write('Version: 1.0\n')
-        return 'CherryPy'
-
-    def test_dist_name_found_as_any_case(self):
-        """
-        Ensure the metadata loads when queried with any case.
-        """
-        pkg_name = self.pkg_with_mixed_case(self.site_dir)
-        assert importlib_metadata.version(pkg_name) == '1.0'
-        assert importlib_metadata.version(pkg_name.lower()) == '1.0'
-        assert importlib_metadata.version(pkg_name.upper()) == '1.0'
-
-
-class NonASCIITests(fixtures.SiteDir, unittest.TestCase):
-    @staticmethod
-    def pkg_with_non_ascii_description(site_dir):
-        """
-        Create minimal metadata for a package with non-ASCII in
-        the description.
-        """
-        metadata_dir = site_dir / 'portend.dist-info'
-        metadata_dir.mkdir()
-        metadata = metadata_dir / 'METADATA'
-        with metadata.open('w', encoding='utf-8') as fp:
-            fp.write('Description: pôrˈtend\n')
-        return 'portend'
-
-    @staticmethod
-    def pkg_with_non_ascii_description_egg_info(site_dir):
-        """
-        Create minimal metadata for an egg-info package with
-        non-ASCII in the description.
-        """
-        metadata_dir = site_dir / 'portend.dist-info'
-        metadata_dir.mkdir()
-        metadata = metadata_dir / 'METADATA'
-        with metadata.open('w', encoding='utf-8') as fp:
-            fp.write(textwrap.dedent("""
-                Name: portend
-
-                pôrˈtend
-                """).lstrip())
-        return 'portend'
-
-    def test_metadata_loads(self):
-        pkg_name = self.pkg_with_non_ascii_description(self.site_dir)
-        meta = importlib_metadata.metadata(pkg_name)
-        assert meta['Description'] == 'pôrˈtend'
-
-    def test_metadata_loads_egg_info(self):
-        pkg_name = self.pkg_with_non_ascii_description_egg_info(self.site_dir)
-        meta = importlib_metadata.metadata(pkg_name)
-        assert meta.get_payload() == 'pôrˈtend\n'
-
-
-class DiscoveryTests(unittest.TestCase):
-
-    def test_package_discovery(self):
-        dists = list(importlib_metadata.api.distributions())
-        assert all(
-            isinstance(dist, importlib_metadata.Distribution)
-            for dist in dists
-            )
-        assert any(
-            dist.metadata['Name'] == 'importlib-metadata'
-            for dist in dists
-            )
-        assert any(
-            dist.metadata['Name'] == 'pip'
-            for dist in dists
-            )
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import textwrap
+import unittest
+import importlib
+import importlib_metadata
+
+from . import fixtures
+from importlib_metadata import _hooks
+
+try:
+    from builtins import str as text
+except ImportError:
+    from __builtin__ import unicode as text
+
+
+class BasicTests(unittest.TestCase):
+    version_pattern = r'\d+\.\d+(\.\d)?'
+
+    def test_retrieves_version_of_pip(self):
+        # Assume pip is installed and retrieve the version of pip.
+        dist = importlib_metadata.Distribution.from_name('pip')
+        assert isinstance(dist.version, text)
+        assert re.match(self.version_pattern, dist.version)
+
+    def test_for_name_does_not_exist(self):
+        with self.assertRaises(importlib_metadata.PackageNotFoundError):
+            importlib_metadata.Distribution.from_name('does-not-exist')
+
+    def test_new_style_classes(self):
+        self.assertIsInstance(importlib_metadata.Distribution, type)
+        self.assertIsInstance(_hooks.MetadataPathFinder, type)
+        self.assertIsInstance(_hooks.WheelMetadataFinder, type)
+        self.assertIsInstance(_hooks.WheelDistribution, type)
+
+
+class ImportTests(unittest.TestCase):
+    def test_import_nonexistent_module(self):
+        # Ensure that the MetadataPathFinder does not crash an import of a
+        # non-existant module.
+        with self.assertRaises(ImportError):
+            importlib.import_module('does_not_exist')
+
+    def test_resolve(self):
+        scripts = dict(importlib_metadata.entry_points()['console_scripts'])
+        pip_ep = scripts['pip']
+        import pip._internal
+        self.assertEqual(pip_ep.load(), pip._internal.main)
+
+
+class NameNormalizationTests(fixtures.SiteDir, unittest.TestCase):
+    @staticmethod
+    def pkg_with_dashes(site_dir):
+        """
+        Create minimal metadata for a package with dashes
+        in the name (and thus underscores in the filename).
+        """
+        metadata_dir = site_dir / 'my_pkg.dist-info'
+        metadata_dir.mkdir()
+        metadata = metadata_dir / 'METADATA'
+        with metadata.open('w') as strm:
+            strm.write('Version: 1.0\n')
+        return 'my-pkg'
+
+    def test_dashes_in_dist_name_found_as_underscores(self):
+        """
+        For a package with a dash in the name, the dist-info metadata
+        uses underscores in the name. Ensure the metadata loads.
+        """
+        pkg_name = self.pkg_with_dashes(self.site_dir)
+        assert importlib_metadata.version(pkg_name) == '1.0'
+
+    @staticmethod
+    def pkg_with_mixed_case(site_dir):
+        """
+        Create minimal metadata for a package with mixed case
+        in the name.
+        """
+        metadata_dir = site_dir / 'CherryPy.dist-info'
+        metadata_dir.mkdir()
+        metadata = metadata_dir / 'METADATA'
+        with metadata.open('w') as strm:
+            strm.write('Version: 1.0\n')
+        return 'CherryPy'
+
+    def test_dist_name_found_as_any_case(self):
+        """
+        Ensure the metadata loads when queried with any case.
+        """
+        pkg_name = self.pkg_with_mixed_case(self.site_dir)
+        assert importlib_metadata.version(pkg_name) == '1.0'
+        assert importlib_metadata.version(pkg_name.lower()) == '1.0'
+        assert importlib_metadata.version(pkg_name.upper()) == '1.0'
+
+
+class NonASCIITests(fixtures.SiteDir, unittest.TestCase):
+    @staticmethod
+    def pkg_with_non_ascii_description(site_dir):
+        """
+        Create minimal metadata for a package with non-ASCII in
+        the description.
+        """
+        metadata_dir = site_dir / 'portend.dist-info'
+        metadata_dir.mkdir()
+        metadata = metadata_dir / 'METADATA'
+        with metadata.open('w', encoding='utf-8') as fp:
+            fp.write('Description: pôrˈtend\n')
+        return 'portend'
+
+    @staticmethod
+    def pkg_with_non_ascii_description_egg_info(site_dir):
+        """
+        Create minimal metadata for an egg-info package with
+        non-ASCII in the description.
+        """
+        metadata_dir = site_dir / 'portend.dist-info'
+        metadata_dir.mkdir()
+        metadata = metadata_dir / 'METADATA'
+        with metadata.open('w', encoding='utf-8') as fp:
+            fp.write(textwrap.dedent("""
+                Name: portend
+
+                pôrˈtend
+                """).lstrip())
+        return 'portend'
+
+    def test_metadata_loads(self):
+        pkg_name = self.pkg_with_non_ascii_description(self.site_dir)
+        meta = importlib_metadata.metadata(pkg_name)
+        assert meta['Description'] == 'pôrˈtend'
+
+    def test_metadata_loads_egg_info(self):
+        pkg_name = self.pkg_with_non_ascii_description_egg_info(self.site_dir)
+        meta = importlib_metadata.metadata(pkg_name)
+        assert meta.get_payload() == 'pôrˈtend\n'
+
+
+class DiscoveryTests(unittest.TestCase):
+
+    def test_package_discovery(self):
+        dists = list(importlib_metadata.api.distributions())
+        assert all(
+            isinstance(dist, importlib_metadata.Distribution)
+            for dist in dists
+            )
+        assert any(
+            dist.metadata['Name'] == 'importlib-metadata'
+            for dist in dists
+            )
+        assert any(
+            dist.metadata['Name'] == 'pip'
+            for dist in dists
+            )

+ 48 - 48
ext/importlib_metadata/tests/test_zip.py → mncheck/ext/importlib_metadata/tests/test_zip.py

@@ -1,48 +1,48 @@
-import sys
-import unittest
-import importlib_metadata
-
-from importlib_resources import path
-
-try:
-    from contextlib import ExitStack
-except ImportError:
-    from contextlib2 import ExitStack
-
-
-class BespokeLoader:
-    archive = 'bespoke'
-
-
-class TestZip(unittest.TestCase):
-    def setUp(self):
-        # Find the path to the example.*.whl so we can add it to the front of
-        # sys.path, where we'll then try to find the metadata thereof.
-        self.resources = ExitStack()
-        self.addCleanup(self.resources.close)
-        wheel = self.resources.enter_context(
-            path('importlib_metadata.tests.data',
-                 'example-21.12-py3-none-any.whl'))
-        sys.path.insert(0, str(wheel))
-        self.resources.callback(sys.path.pop, 0)
-
-    def test_zip_version(self):
-        self.assertEqual(importlib_metadata.version('example'), '21.12')
-
-    def test_zip_entry_points(self):
-        scripts = dict(importlib_metadata.entry_points()['console_scripts'])
-        entry_point = scripts['example']
-        self.assertEqual(entry_point.value, 'example:main')
-
-    def test_missing_metadata(self):
-        distribution = importlib_metadata.distribution('example')
-        self.assertIsNone(distribution.read_text('does not exist'))
-
-    def test_case_insensitive(self):
-        self.assertEqual(importlib_metadata.version('Example'), '21.12')
-
-    def test_files(self):
-        files = importlib_metadata.files('example')
-        for file in files:
-            path = str(file.dist.locate_file(file))
-            assert '.whl/' in path, path
+import sys
+import unittest
+import importlib_metadata
+
+from importlib_resources import path
+
+try:
+    from contextlib import ExitStack
+except ImportError:
+    from contextlib2 import ExitStack
+
+
+class BespokeLoader:
+    archive = 'bespoke'
+
+
+class TestZip(unittest.TestCase):
+    def setUp(self):
+        # Find the path to the example.*.whl so we can add it to the front of
+        # sys.path, where we'll then try to find the metadata thereof.
+        self.resources = ExitStack()
+        self.addCleanup(self.resources.close)
+        wheel = self.resources.enter_context(
+            path('importlib_metadata.tests.data',
+                 'example-21.12-py3-none-any.whl'))
+        sys.path.insert(0, str(wheel))
+        self.resources.callback(sys.path.pop, 0)
+
+    def test_zip_version(self):
+        self.assertEqual(importlib_metadata.version('example'), '21.12')
+
+    def test_zip_entry_points(self):
+        scripts = dict(importlib_metadata.entry_points()['console_scripts'])
+        entry_point = scripts['example']
+        self.assertEqual(entry_point.value, 'example:main')
+
+    def test_missing_metadata(self):
+        distribution = importlib_metadata.distribution('example')
+        self.assertIsNone(distribution.read_text('does not exist'))
+
+    def test_case_insensitive(self):
+        self.assertEqual(importlib_metadata.version('Example'), '21.12')
+
+    def test_files(self):
+        files = importlib_metadata.files('example')
+        for file in files:
+            path = str(file.dist.locate_file(file))
+            assert '.whl/' in path, path

+ 1969 - 1969
ext/path.py → mncheck/ext/path.py

@@ -1,1969 +1,1969 @@
-#
-# Copyright (c) 2010 Mikhail Gusarov
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-#
-
-"""
-path.py - An object representing a path to a file or directory.
-
-https://github.com/jaraco/path.py
-
-Example::
-
-    from path import Path
-    d = Path('/home/guido/bin')
-
-    # Globbing
-    for f in d.files('*.py'):
-        f.chmod(0o755)
-
-    # Changing the working directory:
-    with Path("somewhere"):
-        # cwd in now `somewhere`
-        ...
-
-    # Concatenate paths with /
-    foo_txt = Path("bar") / "foo.txt"
-"""
-
-from __future__ import unicode_literals
-
-import sys
-import warnings
-import os
-import fnmatch
-import glob
-import shutil
-import hashlib
-import errno
-import tempfile
-import functools
-import operator
-import re
-import contextlib
-import io
-import distutils.dir_util
-import importlib
-import itertools
-
-try:
-    import win32security
-except ImportError:
-    pass
-
-try:
-    import pwd
-except ImportError:
-    pass
-
-try:
-    import grp
-except ImportError:
-    pass
-
-##############################################################################
-# Python 2/3 support
-PY3 = sys.version_info >= (3,)
-PY2 = not PY3
-
-string_types = str,
-text_type = str
-getcwdu = os.getcwd
-
-
-if PY2:
-    import __builtin__
-    string_types = __builtin__.basestring,
-    text_type = __builtin__.unicode
-    getcwdu = os.getcwdu
-    map = itertools.imap
-
-
-@contextlib.contextmanager
-def io_error_compat():
-    try:
-        yield
-    except IOError as io_err:
-        # On Python 2, io.open raises IOError; transform to OSError for
-        # future compatibility.
-        os_err = OSError(*io_err.args)
-        os_err.filename = getattr(io_err, 'filename', None)
-        raise os_err
-
-##############################################################################
-
-
-__all__ = ['Path', 'TempDir', 'CaseInsensitivePattern']
-
-
-LINESEPS = ['\r\n', '\r', '\n']
-U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
-NEWLINE = re.compile('|'.join(LINESEPS))
-U_NEWLINE = re.compile('|'.join(U_LINESEPS))
-NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern))
-U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern))
-
-
-try:
-    import pkg_resources
-    __version__ = pkg_resources.require('path.py')[0].version
-except Exception:
-    __version__ = 'unknown'
-
-
-class TreeWalkWarning(Warning):
-    pass
-
-
-# from jaraco.functools
-def compose(*funcs):
-    compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))  # noqa
-    return functools.reduce(compose_two, funcs)
-
-
-def simple_cache(func):
-    """
-    Save results for the :meth:'path.using_module' classmethod.
-    When Python 3.2 is available, use functools.lru_cache instead.
-    """
-    saved_results = {}
-
-    def wrapper(cls, module):
-        if module in saved_results:
-            return saved_results[module]
-        saved_results[module] = func(cls, module)
-        return saved_results[module]
-    return wrapper
-
-
-class ClassProperty(property):
-    def __get__(self, cls, owner):
-        return self.fget.__get__(None, owner)()
-
-
-class multimethod(object):
-    """
-    Acts like a classmethod when invoked from the class and like an
-    instancemethod when invoked from the instance.
-    """
-    def __init__(self, func):
-        self.func = func
-
-    def __get__(self, instance, owner):
-        return (
-            functools.partial(self.func, owner) if instance is None
-            else functools.partial(self.func, owner, instance)
-        )
-
-
-class Path(text_type):
-    """
-    Represents a filesystem path.
-
-    For documentation on individual methods, consult their
-    counterparts in :mod:`os.path`.
-
-    Some methods are additionally included from :mod:`shutil`.
-    The functions are linked directly into the class namespace
-    such that they will be bound to the Path instance. For example,
-    ``Path(src).copy(target)`` is equivalent to
-    ``shutil.copy(src, target)``. Therefore, when referencing
-    the docs for these methods, assume `src` references `self`,
-    the Path instance.
-    """
-
-    module = os.path
-    """ The path module to use for path operations.
-
-    .. seealso:: :mod:`os.path`
-    """
-
-    def __init__(self, other=''):
-        if other is None:
-            raise TypeError("Invalid initial value for path: None")
-
-    @classmethod
-    @simple_cache
-    def using_module(cls, module):
-        subclass_name = cls.__name__ + '_' + module.__name__
-        if PY2:
-            subclass_name = str(subclass_name)
-        bases = (cls,)
-        ns = {'module': module}
-        return type(subclass_name, bases, ns)
-
-    @ClassProperty
-    @classmethod
-    def _next_class(cls):
-        """
-        What class should be used to construct new instances from this class
-        """
-        return cls
-
-    # --- Special Python methods.
-
-    def __repr__(self):
-        return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__())
-
-    # Adding a Path and a string yields a Path.
-    def __add__(self, more):
-        try:
-            return self._next_class(super(Path, self).__add__(more))
-        except TypeError:  # Python bug
-            return NotImplemented
-
-    def __radd__(self, other):
-        if not isinstance(other, string_types):
-            return NotImplemented
-        return self._next_class(other.__add__(self))
-
-    # The / operator joins Paths.
-    def __div__(self, rel):
-        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
-
-        Join two path components, adding a separator character if
-        needed.
-
-        .. seealso:: :func:`os.path.join`
-        """
-        return self._next_class(self.module.join(self, rel))
-
-    # Make the / operator work even when true division is enabled.
-    __truediv__ = __div__
-
-    # The / operator joins Paths the other way around
-    def __rdiv__(self, rel):
-        """ fp.__rdiv__(rel) == rel / fp
-
-        Join two path components, adding a separator character if
-        needed.
-
-        .. seealso:: :func:`os.path.join`
-        """
-        return self._next_class(self.module.join(rel, self))
-
-    # Make the / operator work even when true division is enabled.
-    __rtruediv__ = __rdiv__
-
-    def __enter__(self):
-        self._old_dir = self.getcwd()
-        os.chdir(self)
-        return self
-
-    def __exit__(self, *_):
-        os.chdir(self._old_dir)
-
-    def __fspath__(self):
-        return self
-
-    @classmethod
-    def getcwd(cls):
-        """ Return the current working directory as a path object.
-
-        .. seealso:: :func:`os.getcwdu`
-        """
-        return cls(getcwdu())
-
-    #
-    # --- Operations on Path strings.
-
-    def abspath(self):
-        """ .. seealso:: :func:`os.path.abspath` """
-        return self._next_class(self.module.abspath(self))
-
-    def normcase(self):
-        """ .. seealso:: :func:`os.path.normcase` """
-        return self._next_class(self.module.normcase(self))
-
-    def normpath(self):
-        """ .. seealso:: :func:`os.path.normpath` """
-        return self._next_class(self.module.normpath(self))
-
-    def realpath(self):
-        """ .. seealso:: :func:`os.path.realpath` """
-        return self._next_class(self.module.realpath(self))
-
-    def expanduser(self):
-        """ .. seealso:: :func:`os.path.expanduser` """
-        return self._next_class(self.module.expanduser(self))
-
-    def expandvars(self):
-        """ .. seealso:: :func:`os.path.expandvars` """
-        return self._next_class(self.module.expandvars(self))
-
-    def dirname(self):
-        """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """
-        return self._next_class(self.module.dirname(self))
-
-    def basename(self):
-        """ .. seealso:: :attr:`name`, :func:`os.path.basename` """
-        return self._next_class(self.module.basename(self))
-
-    def expand(self):
-        """ Clean up a filename by calling :meth:`expandvars()`,
-        :meth:`expanduser()`, and :meth:`normpath()` on it.
-
-        This is commonly everything needed to clean up a filename
-        read from a configuration file, for example.
-        """
-        return self.expandvars().expanduser().normpath()
-
-    @property
-    def stem(self):
-        """ The same as :meth:`name`, but with one file extension stripped off.
-
-        >>> Path('/home/guido/python.tar.gz').stem
-        'python.tar'
-        """
-        base, ext = self.module.splitext(self.name)
-        return base
-
-    @property
-    def namebase(self):
-        warnings.warn("Use .stem instead of .namebase", DeprecationWarning)
-        return self.stem
-
-    @property
-    def ext(self):
-        """ The file extension, for example ``'.py'``. """
-        f, ext = self.module.splitext(self)
-        return ext
-
-    def with_suffix(self, suffix):
-        """ Return a new path with the file suffix changed (or added, if none)
-
-        >>> Path('/home/guido/python.tar.gz').with_suffix(".foo")
-        Path('/home/guido/python.tar.foo')
-
-        >>> Path('python').with_suffix('.zip')
-        Path('python.zip')
-
-        >>> Path('filename.ext').with_suffix('zip')
-        Traceback (most recent call last):
-        ...
-        ValueError: Invalid suffix 'zip'
-        """
-        if not suffix.startswith('.'):
-            raise ValueError("Invalid suffix {suffix!r}".format(**locals()))
-
-        return self.stripext() + suffix
-
-    @property
-    def drive(self):
-        """ The drive specifier, for example ``'C:'``.
-
-        This is always empty on systems that don't use drive specifiers.
-        """
-        drive, r = self.module.splitdrive(self)
-        return self._next_class(drive)
-
-    parent = property(
-        dirname, None, None,
-        """ This path's parent directory, as a new Path object.
-
-        For example,
-        ``Path('/usr/local/lib/libpython.so').parent ==
-        Path('/usr/local/lib')``
-
-        .. seealso:: :meth:`dirname`, :func:`os.path.dirname`
-        """)
-
-    name = property(
-        basename, None, None,
-        """ The name of this file or directory without the full path.
-
-        For example,
-        ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
-
-        .. seealso:: :meth:`basename`, :func:`os.path.basename`
-        """)
-
-    def splitpath(self):
-        """ p.splitpath() -> Return ``(p.parent, p.name)``.
-
-        .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
-        """
-        parent, child = self.module.split(self)
-        return self._next_class(parent), child
-
-    def splitdrive(self):
-        """ p.splitdrive() -> Return ``(p.drive, <the rest of p>)``.
-
-        Split the drive specifier from this path.  If there is
-        no drive specifier, :samp:`{p.drive}` is empty, so the return value
-        is simply ``(Path(''), p)``.  This is always the case on Unix.
-
-        .. seealso:: :func:`os.path.splitdrive`
-        """
-        drive, rel = self.module.splitdrive(self)
-        return self._next_class(drive), rel
-
-    def splitext(self):
-        """ p.splitext() -> Return ``(p.stripext(), p.ext)``.
-
-        Split the filename extension from this path and return
-        the two parts.  Either part may be empty.
-
-        The extension is everything from ``'.'`` to the end of the
-        last path segment.  This has the property that if
-        ``(a, b) == p.splitext()``, then ``a + b == p``.
-
-        .. seealso:: :func:`os.path.splitext`
-        """
-        filename, ext = self.module.splitext(self)
-        return self._next_class(filename), ext
-
-    def stripext(self):
-        """ p.stripext() -> Remove one file extension from the path.
-
-        For example, ``Path('/home/guido/python.tar.gz').stripext()``
-        returns ``Path('/home/guido/python.tar')``.
-        """
-        return self.splitext()[0]
-
-    def splitunc(self):
-        """ .. seealso:: :func:`os.path.splitunc` """
-        unc, rest = self.module.splitunc(self)
-        return self._next_class(unc), rest
-
-    @property
-    def uncshare(self):
-        """
-        The UNC mount point for this path.
-        This is empty for paths on local drives.
-        """
-        unc, r = self.module.splitunc(self)
-        return self._next_class(unc)
-
-    @multimethod
-    def joinpath(cls, first, *others):
-        """
-        Join first to zero or more :class:`Path` components,
-        adding a separator character (:samp:`{first}.module.sep`)
-        if needed.  Returns a new instance of
-        :samp:`{first}._next_class`.
-
-        .. seealso:: :func:`os.path.join`
-        """
-        if not isinstance(first, cls):
-            first = cls(first)
-        return first._next_class(first.module.join(first, *others))
-
-    def splitall(self):
-        r""" Return a list of the path components in this path.
-
-        The first item in the list will be a Path.  Its value will be
-        either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
-        directory of this path (for example, ``'/'`` or ``'C:\\'``).  The
-        other items in the list will be strings.
-
-        ``path.Path.joinpath(*result)`` will yield the original path.
-        """
-        parts = []
-        loc = self
-        while loc != os.curdir and loc != os.pardir:
-            prev = loc
-            loc, child = prev.splitpath()
-            if loc == prev:
-                break
-            parts.append(child)
-        parts.append(loc)
-        parts.reverse()
-        return parts
-
-    def relpath(self, start='.'):
-        """ Return this path as a relative path,
-        based from `start`, which defaults to the current working directory.
-        """
-        cwd = self._next_class(start)
-        return cwd.relpathto(self)
-
-    def relpathto(self, dest):
-        """ Return a relative path from `self` to `dest`.
-
-        If there is no relative path from `self` to `dest`, for example if
-        they reside on different drives in Windows, then this returns
-        ``dest.abspath()``.
-        """
-        origin = self.abspath()
-        dest = self._next_class(dest).abspath()
-
-        orig_list = origin.normcase().splitall()
-        # Don't normcase dest!  We want to preserve the case.
-        dest_list = dest.splitall()
-
-        if orig_list[0] != self.module.normcase(dest_list[0]):
-            # Can't get here from there.
-            return dest
-
-        # Find the location where the two paths start to differ.
-        i = 0
-        for start_seg, dest_seg in zip(orig_list, dest_list):
-            if start_seg != self.module.normcase(dest_seg):
-                break
-            i += 1
-
-        # Now i is the point where the two paths diverge.
-        # Need a certain number of "os.pardir"s to work up
-        # from the origin to the point of divergence.
-        segments = [os.pardir] * (len(orig_list) - i)
-        # Need to add the diverging part of dest_list.
-        segments += dest_list[i:]
-        if len(segments) == 0:
-            # If they happen to be identical, use os.curdir.
-            relpath = os.curdir
-        else:
-            relpath = self.module.join(*segments)
-        return self._next_class(relpath)
-
-    # --- Listing, searching, walking, and matching
-
-    def listdir(self, pattern=None):
-        """ D.listdir() -> List of items in this directory.
-
-        Use :meth:`files` or :meth:`dirs` instead if you want a listing
-        of just files or just subdirectories.
-
-        The elements of the list are Path objects.
-
-        With the optional `pattern` argument, this only lists
-        items whose names match the given pattern.
-
-        .. seealso:: :meth:`files`, :meth:`dirs`
-        """
-        if pattern is None:
-            pattern = '*'
-        return [
-            self / child
-            for child in os.listdir(self)
-            if self._next_class(child).fnmatch(pattern)
-        ]
-
-    def dirs(self, pattern=None):
-        """ D.dirs() -> List of this directory's subdirectories.
-
-        The elements of the list are Path objects.
-        This does not walk recursively into subdirectories
-        (but see :meth:`walkdirs`).
-
-        With the optional `pattern` argument, this only lists
-        directories whose names match the given pattern.  For
-        example, ``d.dirs('build-*')``.
-        """
-        return [p for p in self.listdir(pattern) if p.isdir()]
-
-    def files(self, pattern=None):
-        """ D.files() -> List of the files in this directory.
-
-        The elements of the list are Path objects.
-        This does not walk into subdirectories (see :meth:`walkfiles`).
-
-        With the optional `pattern` argument, this only lists files
-        whose names match the given pattern.  For example,
-        ``d.files('*.pyc')``.
-        """
-
-        return [p for p in self.listdir(pattern) if p.isfile()]
-
-    def walk(self, pattern=None, errors='strict'):
-        """ D.walk() -> iterator over files and subdirs, recursively.
-
-        The iterator yields Path objects naming each child item of
-        this directory and its descendants.  This requires that
-        ``D.isdir()``.
-
-        This performs a depth-first traversal of the directory tree.
-        Each directory is returned just before all its children.
-
-        The `errors=` keyword argument controls behavior when an
-        error occurs.  The default is ``'strict'``, which causes an
-        exception.  Other allowed values are ``'warn'`` (which
-        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
-        `errors` may also be an arbitrary callable taking a msg parameter.
-        """
-        class Handlers:
-            def strict(msg):
-                raise
-
-            def warn(msg):
-                warnings.warn(msg, TreeWalkWarning)
-
-            def ignore(msg):
-                pass
-
-        if not callable(errors) and errors not in vars(Handlers):
-            raise ValueError("invalid errors parameter")
-        errors = vars(Handlers).get(errors, errors)
-
-        try:
-            childList = self.listdir()
-        except Exception:
-            exc = sys.exc_info()[1]
-            tmpl = "Unable to list directory '%(self)s': %(exc)s"
-            msg = tmpl % locals()
-            errors(msg)
-            return
-
-        for child in childList:
-            if pattern is None or child.fnmatch(pattern):
-                yield child
-            try:
-                isdir = child.isdir()
-            except Exception:
-                exc = sys.exc_info()[1]
-                tmpl = "Unable to access '%(child)s': %(exc)s"
-                msg = tmpl % locals()
-                errors(msg)
-                isdir = False
-
-            if isdir:
-                for item in child.walk(pattern, errors):
-                    yield item
-
-    def walkdirs(self, pattern=None, errors='strict'):
-        """ D.walkdirs() -> iterator over subdirs, recursively.
-
-        With the optional `pattern` argument, this yields only
-        directories whose names match the given pattern.  For
-        example, ``mydir.walkdirs('*test')`` yields only directories
-        with names ending in ``'test'``.
-
-        The `errors=` keyword argument controls behavior when an
-        error occurs.  The default is ``'strict'``, which causes an
-        exception.  The other allowed values are ``'warn'`` (which
-        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
-        """
-        if errors not in ('strict', 'warn', 'ignore'):
-            raise ValueError("invalid errors parameter")
-
-        try:
-            dirs = self.dirs()
-        except Exception:
-            if errors == 'ignore':
-                return
-            elif errors == 'warn':
-                warnings.warn(
-                    "Unable to list directory '%s': %s"
-                    % (self, sys.exc_info()[1]),
-                    TreeWalkWarning)
-                return
-            else:
-                raise
-
-        for child in dirs:
-            if pattern is None or child.fnmatch(pattern):
-                yield child
-            for subsubdir in child.walkdirs(pattern, errors):
-                yield subsubdir
-
-    def walkfiles(self, pattern=None, errors='strict'):
-        """ D.walkfiles() -> iterator over files in D, recursively.
-
-        The optional argument `pattern` limits the results to files
-        with names that match the pattern.  For example,
-        ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
-        extension.
-        """
-        if errors not in ('strict', 'warn', 'ignore'):
-            raise ValueError("invalid errors parameter")
-
-        try:
-            childList = self.listdir()
-        except Exception:
-            if errors == 'ignore':
-                return
-            elif errors == 'warn':
-                warnings.warn(
-                    "Unable to list directory '%s': %s"
-                    % (self, sys.exc_info()[1]),
-                    TreeWalkWarning)
-                return
-            else:
-                raise
-
-        for child in childList:
-            try:
-                isfile = child.isfile()
-                isdir = not isfile and child.isdir()
-            except Exception:
-                if errors == 'ignore':
-                    continue
-                elif errors == 'warn':
-                    warnings.warn(
-                        "Unable to access '%s': %s"
-                        % (self, sys.exc_info()[1]),
-                        TreeWalkWarning)
-                    continue
-                else:
-                    raise
-
-            if isfile:
-                if pattern is None or child.fnmatch(pattern):
-                    yield child
-            elif isdir:
-                for f in child.walkfiles(pattern, errors):
-                    yield f
-
-    def fnmatch(self, pattern, normcase=None):
-        """ Return ``True`` if `self.name` matches the given `pattern`.
-
-        `pattern` - A filename pattern with wildcards,
-            for example ``'*.py'``. If the pattern contains a `normcase`
-            attribute, it is applied to the name and path prior to comparison.
-
-        `normcase` - (optional) A function used to normalize the pattern and
-            filename before matching. Defaults to :meth:`self.module`, which
-            defaults to :meth:`os.path.normcase`.
-
-        .. seealso:: :func:`fnmatch.fnmatch`
-        """
-        default_normcase = getattr(pattern, 'normcase', self.module.normcase)
-        normcase = normcase or default_normcase
-        name = normcase(self.name)
-        pattern = normcase(pattern)
-        return fnmatch.fnmatchcase(name, pattern)
-
-    def glob(self, pattern):
-        """ Return a list of Path objects that match the pattern.
-
-        `pattern` - a path relative to this directory, with wildcards.
-
-        For example, ``Path('/users').glob('*/bin/*')`` returns a list
-        of all the files users have in their :file:`bin` directories.
-
-        .. seealso:: :func:`glob.glob`
-
-        .. note:: Glob is **not** recursive, even when using ``**``.
-                  To do recursive globbing see :func:`walk`,
-                  :func:`walkdirs` or :func:`walkfiles`.
-        """
-        cls = self._next_class
-        return [cls(s) for s in glob.glob(self / pattern)]
-
-    def iglob(self, pattern):
-        """ Return an iterator of Path objects that match the pattern.
-
-        `pattern` - a path relative to this directory, with wildcards.
-
-        For example, ``Path('/users').iglob('*/bin/*')`` returns an
-        iterator of all the files users have in their :file:`bin`
-        directories.
-
-        .. seealso:: :func:`glob.iglob`
-
-        .. note:: Glob is **not** recursive, even when using ``**``.
-                  To do recursive globbing see :func:`walk`,
-                  :func:`walkdirs` or :func:`walkfiles`.
-        """
-        cls = self._next_class
-        return (cls(s) for s in glob.iglob(self / pattern))
-
-    #
-    # --- Reading or writing an entire file at once.
-
-    def open(self, *args, **kwargs):
-        """ Open this file and return a corresponding :class:`file` object.
-
-        Keyword arguments work as in :func:`io.open`.  If the file cannot be
-        opened, an :class:`~exceptions.OSError` is raised.
-        """
-        with io_error_compat():
-            return io.open(self, *args, **kwargs)
-
-    def bytes(self):
-        """ Open this file, read all bytes, return them as a string. """
-        with self.open('rb') as f:
-            return f.read()
-
-    def chunks(self, size, *args, **kwargs):
-        """ Returns a generator yielding chunks of the file, so it can
-            be read piece by piece with a simple for loop.
-
-           Any argument you pass after `size` will be passed to :meth:`open`.
-
-           :example:
-
-               >>> hash = hashlib.md5()
-               >>> for chunk in Path("path.py").chunks(8192, mode='rb'):
-               ...     hash.update(chunk)
-
-            This will read the file by chunks of 8192 bytes.
-        """
-        with self.open(*args, **kwargs) as f:
-            for chunk in iter(lambda: f.read(size) or None, None):
-                yield chunk
-
-    def write_bytes(self, bytes, append=False):
-        """ Open this file and write the given bytes to it.
-
-        Default behavior is to overwrite any existing file.
-        Call ``p.write_bytes(bytes, append=True)`` to append instead.
-        """
-        if append:
-            mode = 'ab'
-        else:
-            mode = 'wb'
-        with self.open(mode) as f:
-            f.write(bytes)
-
-    def text(self, encoding=None, errors='strict'):
-        r""" Open this file, read it in, return the content as a string.
-
-        All newline sequences are converted to ``'\n'``.  Keyword arguments
-        will be passed to :meth:`open`.
-
-        .. seealso:: :meth:`lines`
-        """
-        with self.open(mode='r', encoding=encoding, errors=errors) as f:
-            return U_NEWLINE.sub('\n', f.read())
-
-    def write_text(self, text, encoding=None, errors='strict',
-                   linesep=os.linesep, append=False):
-        r""" Write the given text to this file.
-
-        The default behavior is to overwrite any existing file;
-        to append instead, use the `append=True` keyword argument.
-
-        There are two differences between :meth:`write_text` and
-        :meth:`write_bytes`: newline handling and Unicode handling.
-        See below.
-
-        Parameters:
-
-          `text` - str/unicode - The text to be written.
-
-          `encoding` - str - The Unicode encoding that will be used.
-              This is ignored if `text` isn't a Unicode string.
-
-          `errors` - str - How to handle Unicode encoding errors.
-              Default is ``'strict'``.  See ``help(unicode.encode)`` for the
-              options.  This is ignored if `text` isn't a Unicode
-              string.
-
-          `linesep` - keyword argument - str/unicode - The sequence of
-              characters to be used to mark end-of-line.  The default is
-              :data:`os.linesep`.  You can also specify ``None`` to
-              leave all newlines as they are in `text`.
-
-          `append` - keyword argument - bool - Specifies what to do if
-              the file already exists (``True``: append to the end of it;
-              ``False``: overwrite it.)  The default is ``False``.
-
-
-        --- Newline handling.
-
-        ``write_text()`` converts all standard end-of-line sequences
-        (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
-        end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
-        the end-of-line marker is ``'\r\n'``).
-
-        If you don't like your platform's default, you can override it
-        using the `linesep=` keyword argument.  If you specifically want
-        ``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
-
-        This applies to Unicode text the same as to 8-bit text, except
-        there are three additional standard Unicode end-of-line sequences:
-        ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
-
-        (This is slightly different from when you open a file for
-        writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
-        in Python.)
-
-
-        --- Unicode
-
-        If `text` isn't Unicode, then apart from newline handling, the
-        bytes are written verbatim to the file.  The `encoding` and
-        `errors` arguments are not used and must be omitted.
-
-        If `text` is Unicode, it is first converted to :func:`bytes` using the
-        specified `encoding` (or the default encoding if `encoding`
-        isn't specified).  The `errors` argument applies only to this
-        conversion.
-
-        """
-        if isinstance(text, text_type):
-            if linesep is not None:
-                text = U_NEWLINE.sub(linesep, text)
-            text = text.encode(encoding or sys.getdefaultencoding(), errors)
-        else:
-            assert encoding is None
-            text = NEWLINE.sub(linesep, text)
-        self.write_bytes(text, append=append)
-
-    def lines(self, encoding=None, errors='strict', retain=True):
-        r""" Open this file, read all lines, return them in a list.
-
-        Optional arguments:
-            `encoding` - The Unicode encoding (or character set) of
-                the file.  The default is ``None``, meaning the content
-                of the file is read as 8-bit characters and returned
-                as a list of (non-Unicode) str objects.
-            `errors` - How to handle Unicode errors; see help(str.decode)
-                for the options.  Default is ``'strict'``.
-            `retain` - If ``True``, retain newline characters; but all newline
-                character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
-                translated to ``'\n'``.  If ``False``, newline characters are
-                stripped off.  Default is ``True``.
-
-        This uses ``'U'`` mode.
-
-        .. seealso:: :meth:`text`
-        """
-        if encoding is None and retain:
-            with self.open('U') as f:
-                return f.readlines()
-        else:
-            return self.text(encoding, errors).splitlines(retain)
-
-    def write_lines(self, lines, encoding=None, errors='strict',
-                    linesep=os.linesep, append=False):
-        r""" Write the given lines of text to this file.
-
-        By default this overwrites any existing file at this path.
-
-        This puts a platform-specific newline sequence on every line.
-        See `linesep` below.
-
-            `lines` - A list of strings.
-
-            `encoding` - A Unicode encoding to use.  This applies only if
-                `lines` contains any Unicode strings.
-
-            `errors` - How to handle errors in Unicode encoding.  This
-                also applies only to Unicode strings.
-
-            linesep - The desired line-ending.  This line-ending is
-                applied to every line.  If a line already has any
-                standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
-                ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
-                be stripped off and this will be used instead.  The
-                default is os.linesep, which is platform-dependent
-                (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
-                Specify ``None`` to write the lines as-is, like
-                :meth:`file.writelines`.
-
-        Use the keyword argument ``append=True`` to append lines to the
-        file.  The default is to overwrite the file.
-
-        .. warning ::
-
-            When you use this with Unicode data, if the encoding of the
-            existing data in the file is different from the encoding
-            you specify with the `encoding=` parameter, the result is
-            mixed-encoding data, which can really confuse someone trying
-            to read the file later.
-        """
-        with self.open('ab' if append else 'wb') as f:
-            for line in lines:
-                isUnicode = isinstance(line, text_type)
-                if linesep is not None:
-                    pattern = U_NL_END if isUnicode else NL_END
-                    line = pattern.sub('', line) + linesep
-                if isUnicode:
-                    line = line.encode(
-                        encoding or sys.getdefaultencoding(), errors)
-                f.write(line)
-
-    def read_md5(self):
-        """ Calculate the md5 hash for this file.
-
-        This reads through the entire file.
-
-        .. seealso:: :meth:`read_hash`
-        """
-        return self.read_hash('md5')
-
-    def _hash(self, hash_name):
-        """ Returns a hash object for the file at the current path.
-
-        `hash_name` should be a hash algo name (such as ``'md5'``
-        or ``'sha1'``) that's available in the :mod:`hashlib` module.
-        """
-        m = hashlib.new(hash_name)
-        for chunk in self.chunks(8192, mode="rb"):
-            m.update(chunk)
-        return m
-
-    def read_hash(self, hash_name):
-        """ Calculate given hash for this file.
-
-        List of supported hashes can be obtained from :mod:`hashlib` package.
-        This reads the entire file.
-
-        .. seealso:: :meth:`hashlib.hash.digest`
-        """
-        return self._hash(hash_name).digest()
-
-    def read_hexhash(self, hash_name):
-        """ Calculate given hash for this file, returning hexdigest.
-
-        List of supported hashes can be obtained from :mod:`hashlib` package.
-        This reads the entire file.
-
-        .. seealso:: :meth:`hashlib.hash.hexdigest`
-        """
-        return self._hash(hash_name).hexdigest()
-
-    # --- Methods for querying the filesystem.
-    # N.B. On some platforms, the os.path functions may be implemented in C
-    # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
-    # bound. Playing it safe and wrapping them all in method calls.
-
-    def isabs(self):
-        """ .. seealso:: :func:`os.path.isabs` """
-        return self.module.isabs(self)
-
-    def exists(self):
-        """ .. seealso:: :func:`os.path.exists` """
-        return self.module.exists(self)
-
-    def isdir(self):
-        """ .. seealso:: :func:`os.path.isdir` """
-        return self.module.isdir(self)
-
-    def isfile(self):
-        """ .. seealso:: :func:`os.path.isfile` """
-        return self.module.isfile(self)
-
-    def islink(self):
-        """ .. seealso:: :func:`os.path.islink` """
-        return self.module.islink(self)
-
-    def ismount(self):
-        """ .. seealso:: :func:`os.path.ismount` """
-        return self.module.ismount(self)
-
-    def samefile(self, other):
-        """ .. seealso:: :func:`os.path.samefile` """
-        if not hasattr(self.module, 'samefile'):
-            other = Path(other).realpath().normpath().normcase()
-            return self.realpath().normpath().normcase() == other
-        return self.module.samefile(self, other)
-
-    def getatime(self):
-        """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """
-        return self.module.getatime(self)
-
-    atime = property(
-        getatime, None, None,
-        """ Last access time of the file.
-
-        .. seealso:: :meth:`getatime`, :func:`os.path.getatime`
-        """)
-
-    def getmtime(self):
-        """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """
-        return self.module.getmtime(self)
-
-    mtime = property(
-        getmtime, None, None,
-        """ Last-modified time of the file.
-
-        .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
-        """)
-
-    def getctime(self):
-        """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """
-        return self.module.getctime(self)
-
-    ctime = property(
-        getctime, None, None,
-        """ Creation time of the file.
-
-        .. seealso:: :meth:`getctime`, :func:`os.path.getctime`
-        """)
-
-    def getsize(self):
-        """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """
-        return self.module.getsize(self)
-
-    size = property(
-        getsize, None, None,
-        """ Size of the file, in bytes.
-
-        .. seealso:: :meth:`getsize`, :func:`os.path.getsize`
-        """)
-
-    if hasattr(os, 'access'):
-        def access(self, mode):
-            """ Return ``True`` if current user has access to this path.
-
-            mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`,
-            :data:`os.W_OK`, :data:`os.X_OK`
-
-            .. seealso:: :func:`os.access`
-            """
-            return os.access(self, mode)
-
-    def stat(self):
-        """ Perform a ``stat()`` system call on this path.
-
-        .. seealso:: :meth:`lstat`, :func:`os.stat`
-        """
-        return os.stat(self)
-
-    def lstat(self):
-        """ Like :meth:`stat`, but do not follow symbolic links.
-
-        .. seealso:: :meth:`stat`, :func:`os.lstat`
-        """
-        return os.lstat(self)
-
-    def __get_owner_windows(self):
-        """
-        Return the name of the owner of this file or directory. Follow
-        symbolic links.
-
-        Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
-
-        .. seealso:: :attr:`owner`
-        """
-        desc = win32security.GetFileSecurity(
-            self, win32security.OWNER_SECURITY_INFORMATION)
-        sid = desc.GetSecurityDescriptorOwner()
-        account, domain, typecode = win32security.LookupAccountSid(None, sid)
-        return domain + '\\' + account
-
-    def __get_owner_unix(self):
-        """
-        Return the name of the owner of this file or directory. Follow
-        symbolic links.
-
-        .. seealso:: :attr:`owner`
-        """
-        st = self.stat()
-        return pwd.getpwuid(st.st_uid).pw_name
-
-    def __get_owner_not_implemented(self):
-        raise NotImplementedError("Ownership not available on this platform.")
-
-    if 'win32security' in globals():
-        get_owner = __get_owner_windows
-    elif 'pwd' in globals():
-        get_owner = __get_owner_unix
-    else:
-        get_owner = __get_owner_not_implemented
-
-    owner = property(
-        get_owner, None, None,
-        """ Name of the owner of this file or directory.
-
-        .. seealso:: :meth:`get_owner`""")
-
-    if hasattr(os, 'statvfs'):
-        def statvfs(self):
-            """ Perform a ``statvfs()`` system call on this path.
-
-            .. seealso:: :func:`os.statvfs`
-            """
-            return os.statvfs(self)
-
-    if hasattr(os, 'pathconf'):
-        def pathconf(self, name):
-            """ .. seealso:: :func:`os.pathconf` """
-            return os.pathconf(self, name)
-
-    #
-    # --- Modifying operations on files and directories
-
-    def utime(self, times):
-        """ Set the access and modified times of this file.
-
-        .. seealso:: :func:`os.utime`
-        """
-        os.utime(self, times)
-        return self
-
-    def chmod(self, mode):
-        """
-        Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
-        mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
-
-        .. seealso:: :func:`os.chmod`
-        """
-        if isinstance(mode, string_types):
-            mask = _multi_permission_mask(mode)
-            mode = mask(self.stat().st_mode)
-        os.chmod(self, mode)
-        return self
-
-    def chown(self, uid=-1, gid=-1):
-        """
-        Change the owner and group by names rather than the uid or gid numbers.
-
-        .. seealso:: :func:`os.chown`
-        """
-        if hasattr(os, 'chown'):
-            if 'pwd' in globals() and isinstance(uid, string_types):
-                uid = pwd.getpwnam(uid).pw_uid
-            if 'grp' in globals() and isinstance(gid, string_types):
-                gid = grp.getgrnam(gid).gr_gid
-            os.chown(self, uid, gid)
-        else:
-            msg = "Ownership not available on this platform."
-            raise NotImplementedError(msg)
-        return self
-
-    def rename(self, new):
-        """ .. seealso:: :func:`os.rename` """
-        os.rename(self, new)
-        return self._next_class(new)
-
-    def renames(self, new):
-        """ .. seealso:: :func:`os.renames` """
-        os.renames(self, new)
-        return self._next_class(new)
-
-    #
-    # --- Create/delete operations on directories
-
-    def mkdir(self, mode=0o777):
-        """ .. seealso:: :func:`os.mkdir` """
-        os.mkdir(self, mode)
-        return self
-
-    def mkdir_p(self, mode=0o777):
-        """ Like :meth:`mkdir`, but does not raise an exception if the
-        directory already exists. """
-        try:
-            self.mkdir(mode)
-        except OSError:
-            _, e, _ = sys.exc_info()
-            if e.errno != errno.EEXIST:
-                raise
-        return self
-
-    def makedirs(self, mode=0o777):
-        """ .. seealso:: :func:`os.makedirs` """
-        os.makedirs(self, mode)
-        return self
-
-    def makedirs_p(self, mode=0o777):
-        """ Like :meth:`makedirs`, but does not raise an exception if the
-        directory already exists. """
-        try:
-            self.makedirs(mode)
-        except OSError:
-            _, e, _ = sys.exc_info()
-            if e.errno != errno.EEXIST:
-                raise
-        return self
-
-    def rmdir(self):
-        """ .. seealso:: :func:`os.rmdir` """
-        os.rmdir(self)
-        return self
-
-    def rmdir_p(self):
-        """ Like :meth:`rmdir`, but does not raise an exception if the
-        directory is not empty or does not exist. """
-        try:
-            self.rmdir()
-        except OSError:
-            _, e, _ = sys.exc_info()
-            bypass_codes = errno.ENOTEMPTY, errno.EEXIST, errno.ENOENT
-            if e.errno not in bypass_codes:
-                raise
-        return self
-
-    def removedirs(self):
-        """ .. seealso:: :func:`os.removedirs` """
-        os.removedirs(self)
-        return self
-
-    def removedirs_p(self):
-        """ Like :meth:`removedirs`, but does not raise an exception if the
-        directory is not empty or does not exist. """
-        try:
-            self.removedirs()
-        except OSError:
-            _, e, _ = sys.exc_info()
-            if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
-                raise
-        return self
-
-    # --- Modifying operations on files
-
-    def touch(self):
-        """ Set the access/modified times of this file to the current time.
-        Create the file if it does not exist.
-        """
-        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
-        os.close(fd)
-        os.utime(self, None)
-        return self
-
-    def remove(self):
-        """ .. seealso:: :func:`os.remove` """
-        os.remove(self)
-        return self
-
-    def remove_p(self):
-        """ Like :meth:`remove`, but does not raise an exception if the
-        file does not exist. """
-        try:
-            self.unlink()
-        except OSError:
-            _, e, _ = sys.exc_info()
-            if e.errno != errno.ENOENT:
-                raise
-        return self
-
-    def unlink(self):
-        """ .. seealso:: :func:`os.unlink` """
-        os.unlink(self)
-        return self
-
-    def unlink_p(self):
-        """ Like :meth:`unlink`, but does not raise an exception if the
-        file does not exist. """
-        self.remove_p()
-        return self
-
-    # --- Links
-
-    if hasattr(os, 'link'):
-        def link(self, newpath):
-            """ Create a hard link at `newpath`, pointing to this file.
-
-            .. seealso:: :func:`os.link`
-            """
-            os.link(self, newpath)
-            return self._next_class(newpath)
-
-    if hasattr(os, 'symlink'):
-        def symlink(self, newlink=None):
-            """ Create a symbolic link at `newlink`, pointing here.
-
-            If newlink is not supplied, the symbolic link will assume
-            the name self.basename(), creating the link in the cwd.
-
-            .. seealso:: :func:`os.symlink`
-            """
-            if newlink is None:
-                newlink = self.basename()
-            os.symlink(self, newlink)
-            return self._next_class(newlink)
-
-    if hasattr(os, 'readlink'):
-        def readlink(self):
-            """ Return the path to which this symbolic link points.
-
-            The result may be an absolute or a relative path.
-
-            .. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
-            """
-            return self._next_class(os.readlink(self))
-
-        def readlinkabs(self):
-            """ Return the path to which this symbolic link points.
-
-            The result is always an absolute path.
-
-            .. seealso:: :meth:`readlink`, :func:`os.readlink`
-            """
-            p = self.readlink()
-            if p.isabs():
-                return p
-            else:
-                return (self.parent / p).abspath()
-
-    # High-level functions from shutil
-    # These functions will be bound to the instance such that
-    # Path(name).copy(target) will invoke shutil.copy(name, target)
-
-    copyfile = shutil.copyfile
-    copymode = shutil.copymode
-    copystat = shutil.copystat
-    copy = shutil.copy
-    copy2 = shutil.copy2
-    copytree = shutil.copytree
-    if hasattr(shutil, 'move'):
-        move = shutil.move
-    rmtree = shutil.rmtree
-
-    def rmtree_p(self):
-        """ Like :meth:`rmtree`, but does not raise an exception if the
-        directory does not exist. """
-        try:
-            self.rmtree()
-        except OSError:
-            _, e, _ = sys.exc_info()
-            if e.errno != errno.ENOENT:
-                raise
-        return self
-
-    def chdir(self):
-        """ .. seealso:: :func:`os.chdir` """
-        os.chdir(self)
-
-    cd = chdir
-
-    def merge_tree(self, dst, symlinks=False, *args, **kwargs):
-        """
-        Copy entire contents of self to dst, overwriting existing
-        contents in dst with those in self.
-
-        If the additional keyword `update` is True, each
-        `src` will only be copied if `dst` does not exist,
-        or `src` is newer than `dst`.
-
-        Note that the technique employed stages the files in a temporary
-        directory first, so this function is not suitable for merging
-        trees with large files, especially if the temporary directory
-        is not capable of storing a copy of the entire source tree.
-        """
-        update = kwargs.pop('update', False)
-        with TempDir() as _temp_dir:
-            # first copy the tree to a stage directory to support
-            #  the parameters and behavior of copytree.
-            stage = _temp_dir / str(hash(self))
-            self.copytree(stage, symlinks, *args, **kwargs)
-            # now copy everything from the stage directory using
-            #  the semantics of dir_util.copy_tree
-            distutils.dir_util.copy_tree(
-                stage,
-                dst,
-                preserve_symlinks=symlinks,
-                update=update,
-            )
-
-    #
-    # --- Special stuff from os
-
-    if hasattr(os, 'chroot'):
-        def chroot(self):
-            """ .. seealso:: :func:`os.chroot` """
-            os.chroot(self)
-
-    if hasattr(os, 'startfile'):
-        def startfile(self):
-            """ .. seealso:: :func:`os.startfile` """
-            os.startfile(self)
-            return self
-
-    # in-place re-writing, courtesy of Martijn Pieters
-    # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
-    @contextlib.contextmanager
-    def in_place(
-            self, mode='r', buffering=-1, encoding=None, errors=None,
-            newline=None, backup_extension=None,
-    ):
-        """
-        A context in which a file may be re-written in-place with
-        new content.
-
-        Yields a tuple of :samp:`({readable}, {writable})` file
-        objects, where `writable` replaces `readable`.
-
-        If an exception occurs, the old file is restored, removing the
-        written data.
-
-        Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only
-        read-only-modes are allowed. A :exc:`ValueError` is raised
-        on invalid modes.
-
-        For example, to add line numbers to a file::
-
-            p = Path(filename)
-            assert p.isfile()
-            with p.in_place() as (reader, writer):
-                for number, line in enumerate(reader, 1):
-                    writer.write('{0:3}: '.format(number)))
-                    writer.write(line)
-
-        Thereafter, the file at `filename` will have line numbers in it.
-        """
-        import io
-
-        if set(mode).intersection('wa+'):
-            raise ValueError('Only read-only file modes can be used')
-
-        # move existing file to backup, create new file with same permissions
-        # borrowed extensively from the fileinput module
-        backup_fn = self + (backup_extension or os.extsep + 'bak')
-        try:
-            os.unlink(backup_fn)
-        except os.error:
-            pass
-        os.rename(self, backup_fn)
-        readable = io.open(
-            backup_fn, mode, buffering=buffering,
-            encoding=encoding, errors=errors, newline=newline,
-        )
-        try:
-            perm = os.fstat(readable.fileno()).st_mode
-        except OSError:
-            writable = open(
-                self, 'w' + mode.replace('r', ''),
-                buffering=buffering, encoding=encoding, errors=errors,
-                newline=newline,
-            )
-        else:
-            os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
-            if hasattr(os, 'O_BINARY'):
-                os_mode |= os.O_BINARY
-            fd = os.open(self, os_mode, perm)
-            writable = io.open(
-                fd, "w" + mode.replace('r', ''),
-                buffering=buffering, encoding=encoding, errors=errors,
-                newline=newline,
-            )
-            try:
-                if hasattr(os, 'chmod'):
-                    os.chmod(self, perm)
-            except OSError:
-                pass
-        try:
-            yield readable, writable
-        except Exception:
-            # move backup back
-            readable.close()
-            writable.close()
-            try:
-                os.unlink(self)
-            except os.error:
-                pass
-            os.rename(backup_fn, self)
-            raise
-        else:
-            readable.close()
-            writable.close()
-        finally:
-            try:
-                os.unlink(backup_fn)
-            except os.error:
-                pass
-
-    @ClassProperty
-    @classmethod
-    def special(cls):
-        """
-        Return a SpecialResolver object suitable referencing a suitable
-        directory for the relevant platform for the given
-        type of content.
-
-        For example, to get a user config directory, invoke:
-
-            dir = Path.special().user.config
-
-        Uses the `appdirs
-        <https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve
-        the paths in a platform-friendly way.
-
-        To create a config directory for 'My App', consider:
-
-            dir = Path.special("My App").user.config.makedirs_p()
-
-        If the ``appdirs`` module is not installed, invocation
-        of special will raise an ImportError.
-        """
-        return functools.partial(SpecialResolver, cls)
-
-
-class SpecialResolver(object):
-    class ResolverScope:
-        def __init__(self, paths, scope):
-            self.paths = paths
-            self.scope = scope
-
-        def __getattr__(self, class_):
-            return self.paths.get_dir(self.scope, class_)
-
-    def __init__(self, path_class, *args, **kwargs):
-        appdirs = importlib.import_module('appdirs')
-
-        # let appname default to None until
-        # https://github.com/ActiveState/appdirs/issues/55 is solved.
-        not args and kwargs.setdefault('appname', None)
-
-        vars(self).update(
-            path_class=path_class,
-            wrapper=appdirs.AppDirs(*args, **kwargs),
-        )
-
-    def __getattr__(self, scope):
-        return self.ResolverScope(self, scope)
-
-    def get_dir(self, scope, class_):
-        """
-        Return the callable function from appdirs, but with the
-        result wrapped in self.path_class
-        """
-        prop_name = '{scope}_{class_}_dir'.format(**locals())
-        value = getattr(self.wrapper, prop_name)
-        MultiPath = Multi.for_class(self.path_class)
-        return MultiPath.detect(value)
-
-
-class Multi:
-    """
-    A mix-in for a Path which may contain multiple Path separated by pathsep.
-    """
-    @classmethod
-    def for_class(cls, path_cls):
-        name = 'Multi' + path_cls.__name__
-        if PY2:
-            name = str(name)
-        return type(name, (cls, path_cls), {})
-
-    @classmethod
-    def detect(cls, input):
-        if os.pathsep not in input:
-            cls = cls._next_class
-        return cls(input)
-
-    def __iter__(self):
-        return iter(map(self._next_class, self.split(os.pathsep)))
-
-    @ClassProperty
-    @classmethod
-    def _next_class(cls):
-        """
-        Multi-subclasses should use the parent class
-        """
-        return next(
-            class_
-            for class_ in cls.__mro__
-            if not issubclass(class_, Multi)
-        )
-
-
-class TempDir(Path):
-    """
-    A temporary directory via :func:`tempfile.mkdtemp`, and
-    constructed with the same parameters that you can use
-    as a context manager.
-
-    Example::
-
-        with TempDir() as d:
-            # do stuff with the Path object "d"
-
-        # here the directory is deleted automatically
-
-    .. seealso:: :func:`tempfile.mkdtemp`
-    """
-
-    @ClassProperty
-    @classmethod
-    def _next_class(cls):
-        return Path
-
-    def __new__(cls, *args, **kwargs):
-        dirname = tempfile.mkdtemp(*args, **kwargs)
-        return super(TempDir, cls).__new__(cls, dirname)
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __enter__(self):
-        # TempDir should return a Path version of itself and not itself
-        # so that a second context manager does not create a second
-        # temporary directory, but rather changes CWD to the location
-        # of the temporary directory.
-        return self._next_class(self)
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        if not exc_value:
-            self.rmtree()
-
-
-# For backwards compatibility.
-tempdir = TempDir
-
-
-def _multi_permission_mask(mode):
-    """
-    Support multiple, comma-separated Unix chmod symbolic modes.
-
-    >>> _multi_permission_mask('a=r,u+w')(0) == 0o644
-    True
-    """
-    def compose(f, g):
-        return lambda *args, **kwargs: g(f(*args, **kwargs))
-    return functools.reduce(compose, map(_permission_mask, mode.split(',')))
-
-
-def _permission_mask(mode):
-    """
-    Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
-    suitable for applying to a mask to affect that change.
-
-    >>> mask = _permission_mask('ugo+rwx')
-    >>> mask(0o554) == 0o777
-    True
-
-    >>> _permission_mask('go-x')(0o777) == 0o766
-    True
-
-    >>> _permission_mask('o-x')(0o445) == 0o444
-    True
-
-    >>> _permission_mask('a+x')(0) == 0o111
-    True
-
-    >>> _permission_mask('a=rw')(0o057) == 0o666
-    True
-
-    >>> _permission_mask('u=x')(0o666) == 0o166
-    True
-
-    >>> _permission_mask('g=')(0o157) == 0o107
-    True
-    """
-    # parse the symbolic mode
-    parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode)
-    if not parsed:
-        raise ValueError("Unrecognized symbolic mode", mode)
-
-    # generate a mask representing the specified permission
-    spec_map = dict(r=4, w=2, x=1)
-    specs = (spec_map[perm] for perm in parsed.group('what'))
-    spec = functools.reduce(operator.or_, specs, 0)
-
-    # now apply spec to each subject in who
-    shift_map = dict(u=6, g=3, o=0)
-    who = parsed.group('who').replace('a', 'ugo')
-    masks = (spec << shift_map[subj] for subj in who)
-    mask = functools.reduce(operator.or_, masks)
-
-    op = parsed.group('op')
-
-    # if op is -, invert the mask
-    if op == '-':
-        mask ^= 0o777
-
-    # if op is =, retain extant values for unreferenced subjects
-    if op == '=':
-        masks = (0o7 << shift_map[subj] for subj in who)
-        retain = functools.reduce(operator.or_, masks) ^ 0o777
-
-    op_map = {
-        '+': operator.or_,
-        '-': operator.and_,
-        '=': lambda mask, target: target & retain ^ mask,
-    }
-    return functools.partial(op_map[op], mask)
-
-
-class CaseInsensitivePattern(text_type):
-    """
-    A string with a ``'normcase'`` property, suitable for passing to
-    :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
-    :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
-
-    For example, to get all files ending in .py, .Py, .pY, or .PY in the
-    current directory::
-
-        from path import Path, CaseInsensitivePattern as ci
-        Path('.').files(ci('*.py'))
-    """
-
-    @property
-    def normcase(self):
-        return __import__('ntpath').normcase
-
-
-class FastPath(Path):
-    """
-    Performance optimized version of Path for use
-    on embedded platforms and other systems with limited
-    CPU. See #115 and #116 for background.
-    """
-
-    def listdir(self, pattern=None):
-        children = os.listdir(self)
-        if pattern is None:
-            return [self / child for child in children]
-
-        pattern, normcase = self.__prepare(pattern)
-        return [
-            self / child
-            for child in children
-            if self._next_class(child).__fnmatch(pattern, normcase)
-        ]
-
-    def walk(self, pattern=None, errors='strict'):
-        class Handlers:
-            def strict(msg):
-                raise
-
-            def warn(msg):
-                warnings.warn(msg, TreeWalkWarning)
-
-            def ignore(msg):
-                pass
-
-        if not callable(errors) and errors not in vars(Handlers):
-            raise ValueError("invalid errors parameter")
-        errors = vars(Handlers).get(errors, errors)
-
-        if pattern:
-            pattern, normcase = self.__prepare(pattern)
-        else:
-            normcase = None
-
-        return self.__walk(pattern, normcase, errors)
-
-    def __walk(self, pattern, normcase, errors):
-        """ Prepared version of walk """
-        try:
-            childList = self.listdir()
-        except Exception:
-            exc = sys.exc_info()[1]
-            tmpl = "Unable to list directory '%(self)s': %(exc)s"
-            msg = tmpl % locals()
-            errors(msg)
-            return
-
-        for child in childList:
-            if pattern is None or child.__fnmatch(pattern, normcase):
-                yield child
-            try:
-                isdir = child.isdir()
-            except Exception:
-                exc = sys.exc_info()[1]
-                tmpl = "Unable to access '%(child)s': %(exc)s"
-                msg = tmpl % locals()
-                errors(msg)
-                isdir = False
-
-            if isdir:
-                for item in child.__walk(pattern, normcase, errors):
-                    yield item
-
-    def walkdirs(self, pattern=None, errors='strict'):
-        if errors not in ('strict', 'warn', 'ignore'):
-            raise ValueError("invalid errors parameter")
-
-        if pattern:
-            pattern, normcase = self.__prepare(pattern)
-        else:
-            normcase = None
-
-        return self.__walkdirs(pattern, normcase, errors)
-
-    def __walkdirs(self, pattern, normcase, errors):
-        """ Prepared version of walkdirs """
-        try:
-            dirs = self.dirs()
-        except Exception:
-            if errors == 'ignore':
-                return
-            elif errors == 'warn':
-                warnings.warn(
-                    "Unable to list directory '%s': %s"
-                    % (self, sys.exc_info()[1]),
-                    TreeWalkWarning)
-                return
-            else:
-                raise
-
-        for child in dirs:
-            if pattern is None or child.__fnmatch(pattern, normcase):
-                yield child
-            for subsubdir in child.__walkdirs(pattern, normcase, errors):
-                yield subsubdir
-
-    def walkfiles(self, pattern=None, errors='strict'):
-        if errors not in ('strict', 'warn', 'ignore'):
-            raise ValueError("invalid errors parameter")
-
-        if pattern:
-            pattern, normcase = self.__prepare(pattern)
-        else:
-            normcase = None
-
-        return self.__walkfiles(pattern, normcase, errors)
-
-    def __walkfiles(self, pattern, normcase, errors):
-        """ Prepared version of walkfiles """
-        try:
-            childList = self.listdir()
-        except Exception:
-            if errors == 'ignore':
-                return
-            elif errors == 'warn':
-                warnings.warn(
-                    "Unable to list directory '%s': %s"
-                    % (self, sys.exc_info()[1]),
-                    TreeWalkWarning)
-                return
-            else:
-                raise
-
-        for child in childList:
-            try:
-                isfile = child.isfile()
-                isdir = not isfile and child.isdir()
-            except Exception:
-                if errors == 'ignore':
-                    continue
-                elif errors == 'warn':
-                    warnings.warn(
-                        "Unable to access '%s': %s"
-                        % (self, sys.exc_info()[1]),
-                        TreeWalkWarning)
-                    continue
-                else:
-                    raise
-
-            if isfile:
-                if pattern is None or child.__fnmatch(pattern, normcase):
-                    yield child
-            elif isdir:
-                for f in child.__walkfiles(pattern, normcase, errors):
-                    yield f
-
-    def __fnmatch(self, pattern, normcase):
-        """ Return ``True`` if `self.name` matches the given `pattern`,
-        prepared version.
-        `pattern` - A filename pattern with wildcards,
-            for example ``'*.py'``. The pattern is expected to be normcase'd
-            already.
-        `normcase` - A function used to normalize the pattern and
-            filename before matching.
-        .. seealso:: :func:`Path.fnmatch`
-        """
-        return fnmatch.fnmatchcase(normcase(self.name), pattern)
-
-    def __prepare(self, pattern, normcase=None):
-        """ Prepares a fmatch_pattern for use with ``FastPath.__fnmatch`.
-        `pattern` - A filename pattern with wildcards,
-            for example ``'*.py'``. If the pattern contains a `normcase`
-            attribute, it is applied to the name and path prior to comparison.
-        `normcase` - (optional) A function used to normalize the pattern and
-            filename before matching. Defaults to :meth:`self.module`,
-            which defaults to :meth:`os.path.normcase`.
-        .. seealso:: :func:`FastPath.__fnmatch`
-        """
-        if not normcase:
-            normcase = getattr(pattern, 'normcase', self.module.normcase)
-        pattern = normcase(pattern)
-        return pattern, normcase
-
-    def fnmatch(self, pattern, normcase=None):
-        if not pattern:
-            raise ValueError("No pattern provided")
-
-        pattern, normcase = self.__prepare(pattern, normcase)
-        return self.__fnmatch(pattern, normcase)
+#
+# Copyright (c) 2010 Mikhail Gusarov
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+
+"""
+path.py - An object representing a path to a file or directory.
+
+https://github.com/jaraco/path.py
+
+Example::
+
+    from path import Path
+    d = Path('/home/guido/bin')
+
+    # Globbing
+    for f in d.files('*.py'):
+        f.chmod(0o755)
+
+    # Changing the working directory:
+    with Path("somewhere"):
+        # cwd in now `somewhere`
+        ...
+
+    # Concatenate paths with /
+    foo_txt = Path("bar") / "foo.txt"
+"""
+
+from __future__ import unicode_literals
+
+import sys
+import warnings
+import os
+import fnmatch
+import glob
+import shutil
+import hashlib
+import errno
+import tempfile
+import functools
+import operator
+import re
+import contextlib
+import io
+import distutils.dir_util
+import importlib
+import itertools
+
+try:
+    import win32security
+except ImportError:
+    pass
+
+try:
+    import pwd
+except ImportError:
+    pass
+
+try:
+    import grp
+except ImportError:
+    pass
+
+##############################################################################
+# Python 2/3 support
+PY3 = sys.version_info >= (3,)
+PY2 = not PY3
+
+string_types = str,
+text_type = str
+getcwdu = os.getcwd
+
+
+if PY2:
+    import __builtin__
+    string_types = __builtin__.basestring,
+    text_type = __builtin__.unicode
+    getcwdu = os.getcwdu
+    map = itertools.imap
+
+
+@contextlib.contextmanager
+def io_error_compat():
+    try:
+        yield
+    except IOError as io_err:
+        # On Python 2, io.open raises IOError; transform to OSError for
+        # future compatibility.
+        os_err = OSError(*io_err.args)
+        os_err.filename = getattr(io_err, 'filename', None)
+        raise os_err
+
+##############################################################################
+
+
+__all__ = ['Path', 'TempDir', 'CaseInsensitivePattern']
+
+
+LINESEPS = ['\r\n', '\r', '\n']
+U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
+NEWLINE = re.compile('|'.join(LINESEPS))
+U_NEWLINE = re.compile('|'.join(U_LINESEPS))
+NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern))
+U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern))
+
+
+try:
+    import pkg_resources
+    __version__ = pkg_resources.require('path.py')[0].version
+except Exception:
+    __version__ = 'unknown'
+
+
+class TreeWalkWarning(Warning):
+    pass
+
+
+# from jaraco.functools
+def compose(*funcs):
+    compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))  # noqa
+    return functools.reduce(compose_two, funcs)
+
+
+def simple_cache(func):
+    """
+    Save results for the :meth:'path.using_module' classmethod.
+    When Python 3.2 is available, use functools.lru_cache instead.
+    """
+    saved_results = {}
+
+    def wrapper(cls, module):
+        if module in saved_results:
+            return saved_results[module]
+        saved_results[module] = func(cls, module)
+        return saved_results[module]
+    return wrapper
+
+
+class ClassProperty(property):
+    def __get__(self, cls, owner):
+        return self.fget.__get__(None, owner)()
+
+
+class multimethod(object):
+    """
+    Acts like a classmethod when invoked from the class and like an
+    instancemethod when invoked from the instance.
+    """
+    def __init__(self, func):
+        self.func = func
+
+    def __get__(self, instance, owner):
+        return (
+            functools.partial(self.func, owner) if instance is None
+            else functools.partial(self.func, owner, instance)
+        )
+
+
+class Path(text_type):
+    """
+    Represents a filesystem path.
+
+    For documentation on individual methods, consult their
+    counterparts in :mod:`os.path`.
+
+    Some methods are additionally included from :mod:`shutil`.
+    The functions are linked directly into the class namespace
+    such that they will be bound to the Path instance. For example,
+    ``Path(src).copy(target)`` is equivalent to
+    ``shutil.copy(src, target)``. Therefore, when referencing
+    the docs for these methods, assume `src` references `self`,
+    the Path instance.
+    """
+
+    module = os.path
+    """ The path module to use for path operations.
+
+    .. seealso:: :mod:`os.path`
+    """
+
+    def __init__(self, other=''):
+        if other is None:
+            raise TypeError("Invalid initial value for path: None")
+
+    @classmethod
+    @simple_cache
+    def using_module(cls, module):
+        subclass_name = cls.__name__ + '_' + module.__name__
+        if PY2:
+            subclass_name = str(subclass_name)
+        bases = (cls,)
+        ns = {'module': module}
+        return type(subclass_name, bases, ns)
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        """
+        What class should be used to construct new instances from this class
+        """
+        return cls
+
+    # --- Special Python methods.
+
+    def __repr__(self):
+        return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__())
+
+    # Adding a Path and a string yields a Path.
+    def __add__(self, more):
+        try:
+            return self._next_class(super(Path, self).__add__(more))
+        except TypeError:  # Python bug
+            return NotImplemented
+
+    def __radd__(self, other):
+        if not isinstance(other, string_types):
+            return NotImplemented
+        return self._next_class(other.__add__(self))
+
+    # The / operator joins Paths.
+    def __div__(self, rel):
+        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
+
+        Join two path components, adding a separator character if
+        needed.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        return self._next_class(self.module.join(self, rel))
+
+    # Make the / operator work even when true division is enabled.
+    __truediv__ = __div__
+
+    # The / operator joins Paths the other way around
+    def __rdiv__(self, rel):
+        """ fp.__rdiv__(rel) == rel / fp
+
+        Join two path components, adding a separator character if
+        needed.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        return self._next_class(self.module.join(rel, self))
+
+    # Make the / operator work even when true division is enabled.
+    __rtruediv__ = __rdiv__
+
+    def __enter__(self):
+        self._old_dir = self.getcwd()
+        os.chdir(self)
+        return self
+
+    def __exit__(self, *_):
+        os.chdir(self._old_dir)
+
+    def __fspath__(self):
+        return self
+
+    @classmethod
+    def getcwd(cls):
+        """ Return the current working directory as a path object.
+
+        .. seealso:: :func:`os.getcwdu`
+        """
+        return cls(getcwdu())
+
+    #
+    # --- Operations on Path strings.
+
+    def abspath(self):
+        """ .. seealso:: :func:`os.path.abspath` """
+        return self._next_class(self.module.abspath(self))
+
+    def normcase(self):
+        """ .. seealso:: :func:`os.path.normcase` """
+        return self._next_class(self.module.normcase(self))
+
+    def normpath(self):
+        """ .. seealso:: :func:`os.path.normpath` """
+        return self._next_class(self.module.normpath(self))
+
+    def realpath(self):
+        """ .. seealso:: :func:`os.path.realpath` """
+        return self._next_class(self.module.realpath(self))
+
+    def expanduser(self):
+        """ .. seealso:: :func:`os.path.expanduser` """
+        return self._next_class(self.module.expanduser(self))
+
+    def expandvars(self):
+        """ .. seealso:: :func:`os.path.expandvars` """
+        return self._next_class(self.module.expandvars(self))
+
+    def dirname(self):
+        """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """
+        return self._next_class(self.module.dirname(self))
+
+    def basename(self):
+        """ .. seealso:: :attr:`name`, :func:`os.path.basename` """
+        return self._next_class(self.module.basename(self))
+
+    def expand(self):
+        """ Clean up a filename by calling :meth:`expandvars()`,
+        :meth:`expanduser()`, and :meth:`normpath()` on it.
+
+        This is commonly everything needed to clean up a filename
+        read from a configuration file, for example.
+        """
+        return self.expandvars().expanduser().normpath()
+
+    @property
+    def stem(self):
+        """ The same as :meth:`name`, but with one file extension stripped off.
+
+        >>> Path('/home/guido/python.tar.gz').stem
+        'python.tar'
+        """
+        base, ext = self.module.splitext(self.name)
+        return base
+
+    @property
+    def namebase(self):
+        warnings.warn("Use .stem instead of .namebase", DeprecationWarning)
+        return self.stem
+
+    @property
+    def ext(self):
+        """ The file extension, for example ``'.py'``. """
+        f, ext = self.module.splitext(self)
+        return ext
+
+    def with_suffix(self, suffix):
+        """ Return a new path with the file suffix changed (or added, if none)
+
+        >>> Path('/home/guido/python.tar.gz').with_suffix(".foo")
+        Path('/home/guido/python.tar.foo')
+
+        >>> Path('python').with_suffix('.zip')
+        Path('python.zip')
+
+        >>> Path('filename.ext').with_suffix('zip')
+        Traceback (most recent call last):
+        ...
+        ValueError: Invalid suffix 'zip'
+        """
+        if not suffix.startswith('.'):
+            raise ValueError("Invalid suffix {suffix!r}".format(**locals()))
+
+        return self.stripext() + suffix
+
+    @property
+    def drive(self):
+        """ The drive specifier, for example ``'C:'``.
+
+        This is always empty on systems that don't use drive specifiers.
+        """
+        drive, r = self.module.splitdrive(self)
+        return self._next_class(drive)
+
+    parent = property(
+        dirname, None, None,
+        """ This path's parent directory, as a new Path object.
+
+        For example,
+        ``Path('/usr/local/lib/libpython.so').parent ==
+        Path('/usr/local/lib')``
+
+        .. seealso:: :meth:`dirname`, :func:`os.path.dirname`
+        """)
+
+    name = property(
+        basename, None, None,
+        """ The name of this file or directory without the full path.
+
+        For example,
+        ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
+
+        .. seealso:: :meth:`basename`, :func:`os.path.basename`
+        """)
+
+    def splitpath(self):
+        """ p.splitpath() -> Return ``(p.parent, p.name)``.
+
+        .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
+        """
+        parent, child = self.module.split(self)
+        return self._next_class(parent), child
+
+    def splitdrive(self):
+        """ p.splitdrive() -> Return ``(p.drive, <the rest of p>)``.
+
+        Split the drive specifier from this path.  If there is
+        no drive specifier, :samp:`{p.drive}` is empty, so the return value
+        is simply ``(Path(''), p)``.  This is always the case on Unix.
+
+        .. seealso:: :func:`os.path.splitdrive`
+        """
+        drive, rel = self.module.splitdrive(self)
+        return self._next_class(drive), rel
+
+    def splitext(self):
+        """ p.splitext() -> Return ``(p.stripext(), p.ext)``.
+
+        Split the filename extension from this path and return
+        the two parts.  Either part may be empty.
+
+        The extension is everything from ``'.'`` to the end of the
+        last path segment.  This has the property that if
+        ``(a, b) == p.splitext()``, then ``a + b == p``.
+
+        .. seealso:: :func:`os.path.splitext`
+        """
+        filename, ext = self.module.splitext(self)
+        return self._next_class(filename), ext
+
+    def stripext(self):
+        """ p.stripext() -> Remove one file extension from the path.
+
+        For example, ``Path('/home/guido/python.tar.gz').stripext()``
+        returns ``Path('/home/guido/python.tar')``.
+        """
+        return self.splitext()[0]
+
+    def splitunc(self):
+        """ .. seealso:: :func:`os.path.splitunc` """
+        unc, rest = self.module.splitunc(self)
+        return self._next_class(unc), rest
+
+    @property
+    def uncshare(self):
+        """
+        The UNC mount point for this path.
+        This is empty for paths on local drives.
+        """
+        unc, r = self.module.splitunc(self)
+        return self._next_class(unc)
+
+    @multimethod
+    def joinpath(cls, first, *others):
+        """
+        Join first to zero or more :class:`Path` components,
+        adding a separator character (:samp:`{first}.module.sep`)
+        if needed.  Returns a new instance of
+        :samp:`{first}._next_class`.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        if not isinstance(first, cls):
+            first = cls(first)
+        return first._next_class(first.module.join(first, *others))
+
+    def splitall(self):
+        r""" Return a list of the path components in this path.
+
+        The first item in the list will be a Path.  Its value will be
+        either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
+        directory of this path (for example, ``'/'`` or ``'C:\\'``).  The
+        other items in the list will be strings.
+
+        ``path.Path.joinpath(*result)`` will yield the original path.
+        """
+        parts = []
+        loc = self
+        while loc != os.curdir and loc != os.pardir:
+            prev = loc
+            loc, child = prev.splitpath()
+            if loc == prev:
+                break
+            parts.append(child)
+        parts.append(loc)
+        parts.reverse()
+        return parts
+
+    def relpath(self, start='.'):
+        """ Return this path as a relative path,
+        based from `start`, which defaults to the current working directory.
+        """
+        cwd = self._next_class(start)
+        return cwd.relpathto(self)
+
+    def relpathto(self, dest):
+        """ Return a relative path from `self` to `dest`.
+
+        If there is no relative path from `self` to `dest`, for example if
+        they reside on different drives in Windows, then this returns
+        ``dest.abspath()``.
+        """
+        origin = self.abspath()
+        dest = self._next_class(dest).abspath()
+
+        orig_list = origin.normcase().splitall()
+        # Don't normcase dest!  We want to preserve the case.
+        dest_list = dest.splitall()
+
+        if orig_list[0] != self.module.normcase(dest_list[0]):
+            # Can't get here from there.
+            return dest
+
+        # Find the location where the two paths start to differ.
+        i = 0
+        for start_seg, dest_seg in zip(orig_list, dest_list):
+            if start_seg != self.module.normcase(dest_seg):
+                break
+            i += 1
+
+        # Now i is the point where the two paths diverge.
+        # Need a certain number of "os.pardir"s to work up
+        # from the origin to the point of divergence.
+        segments = [os.pardir] * (len(orig_list) - i)
+        # Need to add the diverging part of dest_list.
+        segments += dest_list[i:]
+        if len(segments) == 0:
+            # If they happen to be identical, use os.curdir.
+            relpath = os.curdir
+        else:
+            relpath = self.module.join(*segments)
+        return self._next_class(relpath)
+
+    # --- Listing, searching, walking, and matching
+
+    def listdir(self, pattern=None):
+        """ D.listdir() -> List of items in this directory.
+
+        Use :meth:`files` or :meth:`dirs` instead if you want a listing
+        of just files or just subdirectories.
+
+        The elements of the list are Path objects.
+
+        With the optional `pattern` argument, this only lists
+        items whose names match the given pattern.
+
+        .. seealso:: :meth:`files`, :meth:`dirs`
+        """
+        if pattern is None:
+            pattern = '*'
+        return [
+            self / child
+            for child in os.listdir(self)
+            if self._next_class(child).fnmatch(pattern)
+        ]
+
+    def dirs(self, pattern=None):
+        """ D.dirs() -> List of this directory's subdirectories.
+
+        The elements of the list are Path objects.
+        This does not walk recursively into subdirectories
+        (but see :meth:`walkdirs`).
+
+        With the optional `pattern` argument, this only lists
+        directories whose names match the given pattern.  For
+        example, ``d.dirs('build-*')``.
+        """
+        return [p for p in self.listdir(pattern) if p.isdir()]
+
+    def files(self, pattern=None):
+        """ D.files() -> List of the files in this directory.
+
+        The elements of the list are Path objects.
+        This does not walk into subdirectories (see :meth:`walkfiles`).
+
+        With the optional `pattern` argument, this only lists files
+        whose names match the given pattern.  For example,
+        ``d.files('*.pyc')``.
+        """
+
+        return [p for p in self.listdir(pattern) if p.isfile()]
+
+    def walk(self, pattern=None, errors='strict'):
+        """ D.walk() -> iterator over files and subdirs, recursively.
+
+        The iterator yields Path objects naming each child item of
+        this directory and its descendants.  This requires that
+        ``D.isdir()``.
+
+        This performs a depth-first traversal of the directory tree.
+        Each directory is returned just before all its children.
+
+        The `errors=` keyword argument controls behavior when an
+        error occurs.  The default is ``'strict'``, which causes an
+        exception.  Other allowed values are ``'warn'`` (which
+        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+        `errors` may also be an arbitrary callable taking a msg parameter.
+        """
+        class Handlers:
+            def strict(msg):
+                raise
+
+            def warn(msg):
+                warnings.warn(msg, TreeWalkWarning)
+
+            def ignore(msg):
+                pass
+
+        if not callable(errors) and errors not in vars(Handlers):
+            raise ValueError("invalid errors parameter")
+        errors = vars(Handlers).get(errors, errors)
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            exc = sys.exc_info()[1]
+            tmpl = "Unable to list directory '%(self)s': %(exc)s"
+            msg = tmpl % locals()
+            errors(msg)
+            return
+
+        for child in childList:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            try:
+                isdir = child.isdir()
+            except Exception:
+                exc = sys.exc_info()[1]
+                tmpl = "Unable to access '%(child)s': %(exc)s"
+                msg = tmpl % locals()
+                errors(msg)
+                isdir = False
+
+            if isdir:
+                for item in child.walk(pattern, errors):
+                    yield item
+
+    def walkdirs(self, pattern=None, errors='strict'):
+        """ D.walkdirs() -> iterator over subdirs, recursively.
+
+        With the optional `pattern` argument, this yields only
+        directories whose names match the given pattern.  For
+        example, ``mydir.walkdirs('*test')`` yields only directories
+        with names ending in ``'test'``.
+
+        The `errors=` keyword argument controls behavior when an
+        error occurs.  The default is ``'strict'``, which causes an
+        exception.  The other allowed values are ``'warn'`` (which
+        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            dirs = self.dirs()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in dirs:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            for subsubdir in child.walkdirs(pattern, errors):
+                yield subsubdir
+
+    def walkfiles(self, pattern=None, errors='strict'):
+        """ D.walkfiles() -> iterator over files in D, recursively.
+
+        The optional argument `pattern` limits the results to files
+        with names that match the pattern.  For example,
+        ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
+        extension.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in childList:
+            try:
+                isfile = child.isfile()
+                isdir = not isfile and child.isdir()
+            except Exception:
+                if errors == 'ignore':
+                    continue
+                elif errors == 'warn':
+                    warnings.warn(
+                        "Unable to access '%s': %s"
+                        % (self, sys.exc_info()[1]),
+                        TreeWalkWarning)
+                    continue
+                else:
+                    raise
+
+            if isfile:
+                if pattern is None or child.fnmatch(pattern):
+                    yield child
+            elif isdir:
+                for f in child.walkfiles(pattern, errors):
+                    yield f
+
+    def fnmatch(self, pattern, normcase=None):
+        """ Return ``True`` if `self.name` matches the given `pattern`.
+
+        `pattern` - A filename pattern with wildcards,
+            for example ``'*.py'``. If the pattern contains a `normcase`
+            attribute, it is applied to the name and path prior to comparison.
+
+        `normcase` - (optional) A function used to normalize the pattern and
+            filename before matching. Defaults to :meth:`self.module`, which
+            defaults to :meth:`os.path.normcase`.
+
+        .. seealso:: :func:`fnmatch.fnmatch`
+        """
+        default_normcase = getattr(pattern, 'normcase', self.module.normcase)
+        normcase = normcase or default_normcase
+        name = normcase(self.name)
+        pattern = normcase(pattern)
+        return fnmatch.fnmatchcase(name, pattern)
+
+    def glob(self, pattern):
+        """ Return a list of Path objects that match the pattern.
+
+        `pattern` - a path relative to this directory, with wildcards.
+
+        For example, ``Path('/users').glob('*/bin/*')`` returns a list
+        of all the files users have in their :file:`bin` directories.
+
+        .. seealso:: :func:`glob.glob`
+
+        .. note:: Glob is **not** recursive, even when using ``**``.
+                  To do recursive globbing see :func:`walk`,
+                  :func:`walkdirs` or :func:`walkfiles`.
+        """
+        cls = self._next_class
+        return [cls(s) for s in glob.glob(self / pattern)]
+
+    def iglob(self, pattern):
+        """ Return an iterator of Path objects that match the pattern.
+
+        `pattern` - a path relative to this directory, with wildcards.
+
+        For example, ``Path('/users').iglob('*/bin/*')`` returns an
+        iterator of all the files users have in their :file:`bin`
+        directories.
+
+        .. seealso:: :func:`glob.iglob`
+
+        .. note:: Glob is **not** recursive, even when using ``**``.
+                  To do recursive globbing see :func:`walk`,
+                  :func:`walkdirs` or :func:`walkfiles`.
+        """
+        cls = self._next_class
+        return (cls(s) for s in glob.iglob(self / pattern))
+
+    #
+    # --- Reading or writing an entire file at once.
+
+    def open(self, *args, **kwargs):
+        """ Open this file and return a corresponding :class:`file` object.
+
+        Keyword arguments work as in :func:`io.open`.  If the file cannot be
+        opened, an :class:`~exceptions.OSError` is raised.
+        """
+        with io_error_compat():
+            return io.open(self, *args, **kwargs)
+
+    def bytes(self):
+        """ Open this file, read all bytes, return them as a string. """
+        with self.open('rb') as f:
+            return f.read()
+
+    def chunks(self, size, *args, **kwargs):
+        """ Returns a generator yielding chunks of the file, so it can
+            be read piece by piece with a simple for loop.
+
+           Any argument you pass after `size` will be passed to :meth:`open`.
+
+           :example:
+
+               >>> hash = hashlib.md5()
+               >>> for chunk in Path("path.py").chunks(8192, mode='rb'):
+               ...     hash.update(chunk)
+
+            This will read the file by chunks of 8192 bytes.
+        """
+        with self.open(*args, **kwargs) as f:
+            for chunk in iter(lambda: f.read(size) or None, None):
+                yield chunk
+
+    def write_bytes(self, bytes, append=False):
+        """ Open this file and write the given bytes to it.
+
+        Default behavior is to overwrite any existing file.
+        Call ``p.write_bytes(bytes, append=True)`` to append instead.
+        """
+        if append:
+            mode = 'ab'
+        else:
+            mode = 'wb'
+        with self.open(mode) as f:
+            f.write(bytes)
+
+    def text(self, encoding=None, errors='strict'):
+        r""" Open this file, read it in, return the content as a string.
+
+        All newline sequences are converted to ``'\n'``.  Keyword arguments
+        will be passed to :meth:`open`.
+
+        .. seealso:: :meth:`lines`
+        """
+        with self.open(mode='r', encoding=encoding, errors=errors) as f:
+            return U_NEWLINE.sub('\n', f.read())
+
+    def write_text(self, text, encoding=None, errors='strict',
+                   linesep=os.linesep, append=False):
+        r""" Write the given text to this file.
+
+        The default behavior is to overwrite any existing file;
+        to append instead, use the `append=True` keyword argument.
+
+        There are two differences between :meth:`write_text` and
+        :meth:`write_bytes`: newline handling and Unicode handling.
+        See below.
+
+        Parameters:
+
+          `text` - str/unicode - The text to be written.
+
+          `encoding` - str - The Unicode encoding that will be used.
+              This is ignored if `text` isn't a Unicode string.
+
+          `errors` - str - How to handle Unicode encoding errors.
+              Default is ``'strict'``.  See ``help(unicode.encode)`` for the
+              options.  This is ignored if `text` isn't a Unicode
+              string.
+
+          `linesep` - keyword argument - str/unicode - The sequence of
+              characters to be used to mark end-of-line.  The default is
+              :data:`os.linesep`.  You can also specify ``None`` to
+              leave all newlines as they are in `text`.
+
+          `append` - keyword argument - bool - Specifies what to do if
+              the file already exists (``True``: append to the end of it;
+              ``False``: overwrite it.)  The default is ``False``.
+
+
+        --- Newline handling.
+
+        ``write_text()`` converts all standard end-of-line sequences
+        (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
+        end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
+        the end-of-line marker is ``'\r\n'``).
+
+        If you don't like your platform's default, you can override it
+        using the `linesep=` keyword argument.  If you specifically want
+        ``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
+
+        This applies to Unicode text the same as to 8-bit text, except
+        there are three additional standard Unicode end-of-line sequences:
+        ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
+
+        (This is slightly different from when you open a file for
+        writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
+        in Python.)
+
+
+        --- Unicode
+
+        If `text` isn't Unicode, then apart from newline handling, the
+        bytes are written verbatim to the file.  The `encoding` and
+        `errors` arguments are not used and must be omitted.
+
+        If `text` is Unicode, it is first converted to :func:`bytes` using the
+        specified `encoding` (or the default encoding if `encoding`
+        isn't specified).  The `errors` argument applies only to this
+        conversion.
+
+        """
+        if isinstance(text, text_type):
+            if linesep is not None:
+                text = U_NEWLINE.sub(linesep, text)
+            text = text.encode(encoding or sys.getdefaultencoding(), errors)
+        else:
+            assert encoding is None
+            text = NEWLINE.sub(linesep, text)
+        self.write_bytes(text, append=append)
+
+    def lines(self, encoding=None, errors='strict', retain=True):
+        r""" Open this file, read all lines, return them in a list.
+
+        Optional arguments:
+            `encoding` - The Unicode encoding (or character set) of
+                the file.  The default is ``None``, meaning the content
+                of the file is read as 8-bit characters and returned
+                as a list of (non-Unicode) str objects.
+            `errors` - How to handle Unicode errors; see help(str.decode)
+                for the options.  Default is ``'strict'``.
+            `retain` - If ``True``, retain newline characters; but all newline
+                character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
+                translated to ``'\n'``.  If ``False``, newline characters are
+                stripped off.  Default is ``True``.
+
+        This uses ``'U'`` mode.
+
+        .. seealso:: :meth:`text`
+        """
+        if encoding is None and retain:
+            with self.open('U') as f:
+                return f.readlines()
+        else:
+            return self.text(encoding, errors).splitlines(retain)
+
+    def write_lines(self, lines, encoding=None, errors='strict',
+                    linesep=os.linesep, append=False):
+        r""" Write the given lines of text to this file.
+
+        By default this overwrites any existing file at this path.
+
+        This puts a platform-specific newline sequence on every line.
+        See `linesep` below.
+
+            `lines` - A list of strings.
+
+            `encoding` - A Unicode encoding to use.  This applies only if
+                `lines` contains any Unicode strings.
+
+            `errors` - How to handle errors in Unicode encoding.  This
+                also applies only to Unicode strings.
+
+            linesep - The desired line-ending.  This line-ending is
+                applied to every line.  If a line already has any
+                standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
+                ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
+                be stripped off and this will be used instead.  The
+                default is os.linesep, which is platform-dependent
+                (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
+                Specify ``None`` to write the lines as-is, like
+                :meth:`file.writelines`.
+
+        Use the keyword argument ``append=True`` to append lines to the
+        file.  The default is to overwrite the file.
+
+        .. warning ::
+
+            When you use this with Unicode data, if the encoding of the
+            existing data in the file is different from the encoding
+            you specify with the `encoding=` parameter, the result is
+            mixed-encoding data, which can really confuse someone trying
+            to read the file later.
+        """
+        with self.open('ab' if append else 'wb') as f:
+            for line in lines:
+                isUnicode = isinstance(line, text_type)
+                if linesep is not None:
+                    pattern = U_NL_END if isUnicode else NL_END
+                    line = pattern.sub('', line) + linesep
+                if isUnicode:
+                    line = line.encode(
+                        encoding or sys.getdefaultencoding(), errors)
+                f.write(line)
+
+    def read_md5(self):
+        """ Calculate the md5 hash for this file.
+
+        This reads through the entire file.
+
+        .. seealso:: :meth:`read_hash`
+        """
+        return self.read_hash('md5')
+
+    def _hash(self, hash_name):
+        """ Returns a hash object for the file at the current path.
+
+        `hash_name` should be a hash algo name (such as ``'md5'``
+        or ``'sha1'``) that's available in the :mod:`hashlib` module.
+        """
+        m = hashlib.new(hash_name)
+        for chunk in self.chunks(8192, mode="rb"):
+            m.update(chunk)
+        return m
+
+    def read_hash(self, hash_name):
+        """ Calculate given hash for this file.
+
+        List of supported hashes can be obtained from :mod:`hashlib` package.
+        This reads the entire file.
+
+        .. seealso:: :meth:`hashlib.hash.digest`
+        """
+        return self._hash(hash_name).digest()
+
+    def read_hexhash(self, hash_name):
+        """ Calculate given hash for this file, returning hexdigest.
+
+        List of supported hashes can be obtained from :mod:`hashlib` package.
+        This reads the entire file.
+
+        .. seealso:: :meth:`hashlib.hash.hexdigest`
+        """
+        return self._hash(hash_name).hexdigest()
+
+    # --- Methods for querying the filesystem.
+    # N.B. On some platforms, the os.path functions may be implemented in C
+    # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
+    # bound. Playing it safe and wrapping them all in method calls.
+
+    def isabs(self):
+        """ .. seealso:: :func:`os.path.isabs` """
+        return self.module.isabs(self)
+
+    def exists(self):
+        """ .. seealso:: :func:`os.path.exists` """
+        return self.module.exists(self)
+
+    def isdir(self):
+        """ .. seealso:: :func:`os.path.isdir` """
+        return self.module.isdir(self)
+
+    def isfile(self):
+        """ .. seealso:: :func:`os.path.isfile` """
+        return self.module.isfile(self)
+
+    def islink(self):
+        """ .. seealso:: :func:`os.path.islink` """
+        return self.module.islink(self)
+
+    def ismount(self):
+        """ .. seealso:: :func:`os.path.ismount` """
+        return self.module.ismount(self)
+
+    def samefile(self, other):
+        """ .. seealso:: :func:`os.path.samefile` """
+        if not hasattr(self.module, 'samefile'):
+            other = Path(other).realpath().normpath().normcase()
+            return self.realpath().normpath().normcase() == other
+        return self.module.samefile(self, other)
+
+    def getatime(self):
+        """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """
+        return self.module.getatime(self)
+
+    atime = property(
+        getatime, None, None,
+        """ Last access time of the file.
+
+        .. seealso:: :meth:`getatime`, :func:`os.path.getatime`
+        """)
+
+    def getmtime(self):
+        """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """
+        return self.module.getmtime(self)
+
+    mtime = property(
+        getmtime, None, None,
+        """ Last-modified time of the file.
+
+        .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
+        """)
+
+    def getctime(self):
+        """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """
+        return self.module.getctime(self)
+
+    ctime = property(
+        getctime, None, None,
+        """ Creation time of the file.
+
+        .. seealso:: :meth:`getctime`, :func:`os.path.getctime`
+        """)
+
+    def getsize(self):
+        """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """
+        return self.module.getsize(self)
+
+    size = property(
+        getsize, None, None,
+        """ Size of the file, in bytes.
+
+        .. seealso:: :meth:`getsize`, :func:`os.path.getsize`
+        """)
+
+    if hasattr(os, 'access'):
+        def access(self, mode):
+            """ Return ``True`` if current user has access to this path.
+
+            mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`,
+            :data:`os.W_OK`, :data:`os.X_OK`
+
+            .. seealso:: :func:`os.access`
+            """
+            return os.access(self, mode)
+
+    def stat(self):
+        """ Perform a ``stat()`` system call on this path.
+
+        .. seealso:: :meth:`lstat`, :func:`os.stat`
+        """
+        return os.stat(self)
+
+    def lstat(self):
+        """ Like :meth:`stat`, but do not follow symbolic links.
+
+        .. seealso:: :meth:`stat`, :func:`os.lstat`
+        """
+        return os.lstat(self)
+
+    def __get_owner_windows(self):
+        """
+        Return the name of the owner of this file or directory. Follow
+        symbolic links.
+
+        Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
+
+        .. seealso:: :attr:`owner`
+        """
+        desc = win32security.GetFileSecurity(
+            self, win32security.OWNER_SECURITY_INFORMATION)
+        sid = desc.GetSecurityDescriptorOwner()
+        account, domain, typecode = win32security.LookupAccountSid(None, sid)
+        return domain + '\\' + account
+
+    def __get_owner_unix(self):
+        """
+        Return the name of the owner of this file or directory. Follow
+        symbolic links.
+
+        .. seealso:: :attr:`owner`
+        """
+        st = self.stat()
+        return pwd.getpwuid(st.st_uid).pw_name
+
+    def __get_owner_not_implemented(self):
+        raise NotImplementedError("Ownership not available on this platform.")
+
+    if 'win32security' in globals():
+        get_owner = __get_owner_windows
+    elif 'pwd' in globals():
+        get_owner = __get_owner_unix
+    else:
+        get_owner = __get_owner_not_implemented
+
+    owner = property(
+        get_owner, None, None,
+        """ Name of the owner of this file or directory.
+
+        .. seealso:: :meth:`get_owner`""")
+
+    if hasattr(os, 'statvfs'):
+        def statvfs(self):
+            """ Perform a ``statvfs()`` system call on this path.
+
+            .. seealso:: :func:`os.statvfs`
+            """
+            return os.statvfs(self)
+
+    if hasattr(os, 'pathconf'):
+        def pathconf(self, name):
+            """ .. seealso:: :func:`os.pathconf` """
+            return os.pathconf(self, name)
+
+    #
+    # --- Modifying operations on files and directories
+
+    def utime(self, times):
+        """ Set the access and modified times of this file.
+
+        .. seealso:: :func:`os.utime`
+        """
+        os.utime(self, times)
+        return self
+
+    def chmod(self, mode):
+        """
+        Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
+        mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
+
+        .. seealso:: :func:`os.chmod`
+        """
+        if isinstance(mode, string_types):
+            mask = _multi_permission_mask(mode)
+            mode = mask(self.stat().st_mode)
+        os.chmod(self, mode)
+        return self
+
+    def chown(self, uid=-1, gid=-1):
+        """
+        Change the owner and group by names rather than the uid or gid numbers.
+
+        .. seealso:: :func:`os.chown`
+        """
+        if hasattr(os, 'chown'):
+            if 'pwd' in globals() and isinstance(uid, string_types):
+                uid = pwd.getpwnam(uid).pw_uid
+            if 'grp' in globals() and isinstance(gid, string_types):
+                gid = grp.getgrnam(gid).gr_gid
+            os.chown(self, uid, gid)
+        else:
+            msg = "Ownership not available on this platform."
+            raise NotImplementedError(msg)
+        return self
+
+    def rename(self, new):
+        """ .. seealso:: :func:`os.rename` """
+        os.rename(self, new)
+        return self._next_class(new)
+
+    def renames(self, new):
+        """ .. seealso:: :func:`os.renames` """
+        os.renames(self, new)
+        return self._next_class(new)
+
+    #
+    # --- Create/delete operations on directories
+
+    def mkdir(self, mode=0o777):
+        """ .. seealso:: :func:`os.mkdir` """
+        os.mkdir(self, mode)
+        return self
+
+    def mkdir_p(self, mode=0o777):
+        """ Like :meth:`mkdir`, but does not raise an exception if the
+        directory already exists. """
+        try:
+            self.mkdir(mode)
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.EEXIST:
+                raise
+        return self
+
+    def makedirs(self, mode=0o777):
+        """ .. seealso:: :func:`os.makedirs` """
+        os.makedirs(self, mode)
+        return self
+
+    def makedirs_p(self, mode=0o777):
+        """ Like :meth:`makedirs`, but does not raise an exception if the
+        directory already exists. """
+        try:
+            self.makedirs(mode)
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.EEXIST:
+                raise
+        return self
+
+    def rmdir(self):
+        """ .. seealso:: :func:`os.rmdir` """
+        os.rmdir(self)
+        return self
+
+    def rmdir_p(self):
+        """ Like :meth:`rmdir`, but does not raise an exception if the
+        directory is not empty or does not exist. """
+        try:
+            self.rmdir()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            bypass_codes = errno.ENOTEMPTY, errno.EEXIST, errno.ENOENT
+            if e.errno not in bypass_codes:
+                raise
+        return self
+
+    def removedirs(self):
+        """ .. seealso:: :func:`os.removedirs` """
+        os.removedirs(self)
+        return self
+
+    def removedirs_p(self):
+        """ Like :meth:`removedirs`, but does not raise an exception if the
+        directory is not empty or does not exist. """
+        try:
+            self.removedirs()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
+                raise
+        return self
+
+    # --- Modifying operations on files
+
+    def touch(self):
+        """ Set the access/modified times of this file to the current time.
+        Create the file if it does not exist.
+        """
+        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
+        os.close(fd)
+        os.utime(self, None)
+        return self
+
+    def remove(self):
+        """ .. seealso:: :func:`os.remove` """
+        os.remove(self)
+        return self
+
+    def remove_p(self):
+        """ Like :meth:`remove`, but does not raise an exception if the
+        file does not exist. """
+        try:
+            self.unlink()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOENT:
+                raise
+        return self
+
+    def unlink(self):
+        """ .. seealso:: :func:`os.unlink` """
+        os.unlink(self)
+        return self
+
+    def unlink_p(self):
+        """ Like :meth:`unlink`, but does not raise an exception if the
+        file does not exist. """
+        self.remove_p()
+        return self
+
+    # --- Links
+
+    if hasattr(os, 'link'):
+        def link(self, newpath):
+            """ Create a hard link at `newpath`, pointing to this file.
+
+            .. seealso:: :func:`os.link`
+            """
+            os.link(self, newpath)
+            return self._next_class(newpath)
+
+    if hasattr(os, 'symlink'):
+        def symlink(self, newlink=None):
+            """ Create a symbolic link at `newlink`, pointing here.
+
+            If newlink is not supplied, the symbolic link will assume
+            the name self.basename(), creating the link in the cwd.
+
+            .. seealso:: :func:`os.symlink`
+            """
+            if newlink is None:
+                newlink = self.basename()
+            os.symlink(self, newlink)
+            return self._next_class(newlink)
+
+    if hasattr(os, 'readlink'):
+        def readlink(self):
+            """ Return the path to which this symbolic link points.
+
+            The result may be an absolute or a relative path.
+
+            .. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
+            """
+            return self._next_class(os.readlink(self))
+
+        def readlinkabs(self):
+            """ Return the path to which this symbolic link points.
+
+            The result is always an absolute path.
+
+            .. seealso:: :meth:`readlink`, :func:`os.readlink`
+            """
+            p = self.readlink()
+            if p.isabs():
+                return p
+            else:
+                return (self.parent / p).abspath()
+
+    # High-level functions from shutil
+    # These functions will be bound to the instance such that
+    # Path(name).copy(target) will invoke shutil.copy(name, target)
+
+    copyfile = shutil.copyfile
+    copymode = shutil.copymode
+    copystat = shutil.copystat
+    copy = shutil.copy
+    copy2 = shutil.copy2
+    copytree = shutil.copytree
+    if hasattr(shutil, 'move'):
+        move = shutil.move
+    rmtree = shutil.rmtree
+
+    def rmtree_p(self):
+        """ Like :meth:`rmtree`, but does not raise an exception if the
+        directory does not exist. """
+        try:
+            self.rmtree()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOENT:
+                raise
+        return self
+
+    def chdir(self):
+        """ .. seealso:: :func:`os.chdir` """
+        os.chdir(self)
+
+    cd = chdir
+
+    def merge_tree(self, dst, symlinks=False, *args, **kwargs):
+        """
+        Copy entire contents of self to dst, overwriting existing
+        contents in dst with those in self.
+
+        If the additional keyword `update` is True, each
+        `src` will only be copied if `dst` does not exist,
+        or `src` is newer than `dst`.
+
+        Note that the technique employed stages the files in a temporary
+        directory first, so this function is not suitable for merging
+        trees with large files, especially if the temporary directory
+        is not capable of storing a copy of the entire source tree.
+        """
+        update = kwargs.pop('update', False)
+        with TempDir() as _temp_dir:
+            # first copy the tree to a stage directory to support
+            #  the parameters and behavior of copytree.
+            stage = _temp_dir / str(hash(self))
+            self.copytree(stage, symlinks, *args, **kwargs)
+            # now copy everything from the stage directory using
+            #  the semantics of dir_util.copy_tree
+            distutils.dir_util.copy_tree(
+                stage,
+                dst,
+                preserve_symlinks=symlinks,
+                update=update,
+            )
+
+    #
+    # --- Special stuff from os
+
+    if hasattr(os, 'chroot'):
+        def chroot(self):
+            """ .. seealso:: :func:`os.chroot` """
+            os.chroot(self)
+
+    if hasattr(os, 'startfile'):
+        def startfile(self):
+            """ .. seealso:: :func:`os.startfile` """
+            os.startfile(self)
+            return self
+
+    # in-place re-writing, courtesy of Martijn Pieters
+    # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
+    @contextlib.contextmanager
+    def in_place(
+            self, mode='r', buffering=-1, encoding=None, errors=None,
+            newline=None, backup_extension=None,
+    ):
+        """
+        A context in which a file may be re-written in-place with
+        new content.
+
+        Yields a tuple of :samp:`({readable}, {writable})` file
+        objects, where `writable` replaces `readable`.
+
+        If an exception occurs, the old file is restored, removing the
+        written data.
+
+        Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only
+        read-only-modes are allowed. A :exc:`ValueError` is raised
+        on invalid modes.
+
+        For example, to add line numbers to a file::
+
+            p = Path(filename)
+            assert p.isfile()
+            with p.in_place() as (reader, writer):
+                for number, line in enumerate(reader, 1):
+                    writer.write('{0:3}: '.format(number)))
+                    writer.write(line)
+
+        Thereafter, the file at `filename` will have line numbers in it.
+        """
+        import io
+
+        if set(mode).intersection('wa+'):
+            raise ValueError('Only read-only file modes can be used')
+
+        # move existing file to backup, create new file with same permissions
+        # borrowed extensively from the fileinput module
+        backup_fn = self + (backup_extension or os.extsep + 'bak')
+        try:
+            os.unlink(backup_fn)
+        except os.error:
+            pass
+        os.rename(self, backup_fn)
+        readable = io.open(
+            backup_fn, mode, buffering=buffering,
+            encoding=encoding, errors=errors, newline=newline,
+        )
+        try:
+            perm = os.fstat(readable.fileno()).st_mode
+        except OSError:
+            writable = open(
+                self, 'w' + mode.replace('r', ''),
+                buffering=buffering, encoding=encoding, errors=errors,
+                newline=newline,
+            )
+        else:
+            os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
+            if hasattr(os, 'O_BINARY'):
+                os_mode |= os.O_BINARY
+            fd = os.open(self, os_mode, perm)
+            writable = io.open(
+                fd, "w" + mode.replace('r', ''),
+                buffering=buffering, encoding=encoding, errors=errors,
+                newline=newline,
+            )
+            try:
+                if hasattr(os, 'chmod'):
+                    os.chmod(self, perm)
+            except OSError:
+                pass
+        try:
+            yield readable, writable
+        except Exception:
+            # move backup back
+            readable.close()
+            writable.close()
+            try:
+                os.unlink(self)
+            except os.error:
+                pass
+            os.rename(backup_fn, self)
+            raise
+        else:
+            readable.close()
+            writable.close()
+        finally:
+            try:
+                os.unlink(backup_fn)
+            except os.error:
+                pass
+
+    @ClassProperty
+    @classmethod
+    def special(cls):
+        """
+        Return a SpecialResolver object suitable referencing a suitable
+        directory for the relevant platform for the given
+        type of content.
+
+        For example, to get a user config directory, invoke:
+
+            dir = Path.special().user.config
+
+        Uses the `appdirs
+        <https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve
+        the paths in a platform-friendly way.
+
+        To create a config directory for 'My App', consider:
+
+            dir = Path.special("My App").user.config.makedirs_p()
+
+        If the ``appdirs`` module is not installed, invocation
+        of special will raise an ImportError.
+        """
+        return functools.partial(SpecialResolver, cls)
+
+
+class SpecialResolver(object):
+    class ResolverScope:
+        def __init__(self, paths, scope):
+            self.paths = paths
+            self.scope = scope
+
+        def __getattr__(self, class_):
+            return self.paths.get_dir(self.scope, class_)
+
+    def __init__(self, path_class, *args, **kwargs):
+        appdirs = importlib.import_module('appdirs')
+
+        # let appname default to None until
+        # https://github.com/ActiveState/appdirs/issues/55 is solved.
+        not args and kwargs.setdefault('appname', None)
+
+        vars(self).update(
+            path_class=path_class,
+            wrapper=appdirs.AppDirs(*args, **kwargs),
+        )
+
+    def __getattr__(self, scope):
+        return self.ResolverScope(self, scope)
+
+    def get_dir(self, scope, class_):
+        """
+        Return the callable function from appdirs, but with the
+        result wrapped in self.path_class
+        """
+        prop_name = '{scope}_{class_}_dir'.format(**locals())
+        value = getattr(self.wrapper, prop_name)
+        MultiPath = Multi.for_class(self.path_class)
+        return MultiPath.detect(value)
+
+
+class Multi:
+    """
+    A mix-in for a Path which may contain multiple Path separated by pathsep.
+    """
+    @classmethod
+    def for_class(cls, path_cls):
+        name = 'Multi' + path_cls.__name__
+        if PY2:
+            name = str(name)
+        return type(name, (cls, path_cls), {})
+
+    @classmethod
+    def detect(cls, input):
+        if os.pathsep not in input:
+            cls = cls._next_class
+        return cls(input)
+
+    def __iter__(self):
+        return iter(map(self._next_class, self.split(os.pathsep)))
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        """
+        Multi-subclasses should use the parent class
+        """
+        return next(
+            class_
+            for class_ in cls.__mro__
+            if not issubclass(class_, Multi)
+        )
+
+
+class TempDir(Path):
+    """
+    A temporary directory via :func:`tempfile.mkdtemp`, and
+    constructed with the same parameters that you can use
+    as a context manager.
+
+    Example::
+
+        with TempDir() as d:
+            # do stuff with the Path object "d"
+
+        # here the directory is deleted automatically
+
+    .. seealso:: :func:`tempfile.mkdtemp`
+    """
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        return Path
+
+    def __new__(cls, *args, **kwargs):
+        dirname = tempfile.mkdtemp(*args, **kwargs)
+        return super(TempDir, cls).__new__(cls, dirname)
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __enter__(self):
+        # TempDir should return a Path version of itself and not itself
+        # so that a second context manager does not create a second
+        # temporary directory, but rather changes CWD to the location
+        # of the temporary directory.
+        return self._next_class(self)
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if not exc_value:
+            self.rmtree()
+
+
+# For backwards compatibility.
+tempdir = TempDir
+
+
+def _multi_permission_mask(mode):
+    """
+    Support multiple, comma-separated Unix chmod symbolic modes.
+
+    >>> _multi_permission_mask('a=r,u+w')(0) == 0o644
+    True
+    """
+    def compose(f, g):
+        return lambda *args, **kwargs: g(f(*args, **kwargs))
+    return functools.reduce(compose, map(_permission_mask, mode.split(',')))
+
+
+def _permission_mask(mode):
+    """
+    Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
+    suitable for applying to a mask to affect that change.
+
+    >>> mask = _permission_mask('ugo+rwx')
+    >>> mask(0o554) == 0o777
+    True
+
+    >>> _permission_mask('go-x')(0o777) == 0o766
+    True
+
+    >>> _permission_mask('o-x')(0o445) == 0o444
+    True
+
+    >>> _permission_mask('a+x')(0) == 0o111
+    True
+
+    >>> _permission_mask('a=rw')(0o057) == 0o666
+    True
+
+    >>> _permission_mask('u=x')(0o666) == 0o166
+    True
+
+    >>> _permission_mask('g=')(0o157) == 0o107
+    True
+    """
+    # parse the symbolic mode
+    parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode)
+    if not parsed:
+        raise ValueError("Unrecognized symbolic mode", mode)
+
+    # generate a mask representing the specified permission
+    spec_map = dict(r=4, w=2, x=1)
+    specs = (spec_map[perm] for perm in parsed.group('what'))
+    spec = functools.reduce(operator.or_, specs, 0)
+
+    # now apply spec to each subject in who
+    shift_map = dict(u=6, g=3, o=0)
+    who = parsed.group('who').replace('a', 'ugo')
+    masks = (spec << shift_map[subj] for subj in who)
+    mask = functools.reduce(operator.or_, masks)
+
+    op = parsed.group('op')
+
+    # if op is -, invert the mask
+    if op == '-':
+        mask ^= 0o777
+
+    # if op is =, retain extant values for unreferenced subjects
+    if op == '=':
+        masks = (0o7 << shift_map[subj] for subj in who)
+        retain = functools.reduce(operator.or_, masks) ^ 0o777
+
+    op_map = {
+        '+': operator.or_,
+        '-': operator.and_,
+        '=': lambda mask, target: target & retain ^ mask,
+    }
+    return functools.partial(op_map[op], mask)
+
+
+class CaseInsensitivePattern(text_type):
+    """
+    A string with a ``'normcase'`` property, suitable for passing to
+    :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
+    :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
+
+    For example, to get all files ending in .py, .Py, .pY, or .PY in the
+    current directory::
+
+        from path import Path, CaseInsensitivePattern as ci
+        Path('.').files(ci('*.py'))
+    """
+
+    @property
+    def normcase(self):
+        return __import__('ntpath').normcase
+
+
+class FastPath(Path):
+    """
+    Performance optimized version of Path for use
+    on embedded platforms and other systems with limited
+    CPU. See #115 and #116 for background.
+    """
+
+    def listdir(self, pattern=None):
+        children = os.listdir(self)
+        if pattern is None:
+            return [self / child for child in children]
+
+        pattern, normcase = self.__prepare(pattern)
+        return [
+            self / child
+            for child in children
+            if self._next_class(child).__fnmatch(pattern, normcase)
+        ]
+
+    def walk(self, pattern=None, errors='strict'):
+        class Handlers:
+            def strict(msg):
+                raise
+
+            def warn(msg):
+                warnings.warn(msg, TreeWalkWarning)
+
+            def ignore(msg):
+                pass
+
+        if not callable(errors) and errors not in vars(Handlers):
+            raise ValueError("invalid errors parameter")
+        errors = vars(Handlers).get(errors, errors)
+
+        if pattern:
+            pattern, normcase = self.__prepare(pattern)
+        else:
+            normcase = None
+
+        return self.__walk(pattern, normcase, errors)
+
+    def __walk(self, pattern, normcase, errors):
+        """ Prepared version of walk """
+        try:
+            childList = self.listdir()
+        except Exception:
+            exc = sys.exc_info()[1]
+            tmpl = "Unable to list directory '%(self)s': %(exc)s"
+            msg = tmpl % locals()
+            errors(msg)
+            return
+
+        for child in childList:
+            if pattern is None or child.__fnmatch(pattern, normcase):
+                yield child
+            try:
+                isdir = child.isdir()
+            except Exception:
+                exc = sys.exc_info()[1]
+                tmpl = "Unable to access '%(child)s': %(exc)s"
+                msg = tmpl % locals()
+                errors(msg)
+                isdir = False
+
+            if isdir:
+                for item in child.__walk(pattern, normcase, errors):
+                    yield item
+
+    def walkdirs(self, pattern=None, errors='strict'):
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        if pattern:
+            pattern, normcase = self.__prepare(pattern)
+        else:
+            normcase = None
+
+        return self.__walkdirs(pattern, normcase, errors)
+
+    def __walkdirs(self, pattern, normcase, errors):
+        """ Prepared version of walkdirs """
+        try:
+            dirs = self.dirs()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in dirs:
+            if pattern is None or child.__fnmatch(pattern, normcase):
+                yield child
+            for subsubdir in child.__walkdirs(pattern, normcase, errors):
+                yield subsubdir
+
+    def walkfiles(self, pattern=None, errors='strict'):
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        if pattern:
+            pattern, normcase = self.__prepare(pattern)
+        else:
+            normcase = None
+
+        return self.__walkfiles(pattern, normcase, errors)
+
+    def __walkfiles(self, pattern, normcase, errors):
+        """ Prepared version of walkfiles """
+        try:
+            childList = self.listdir()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in childList:
+            try:
+                isfile = child.isfile()
+                isdir = not isfile and child.isdir()
+            except Exception:
+                if errors == 'ignore':
+                    continue
+                elif errors == 'warn':
+                    warnings.warn(
+                        "Unable to access '%s': %s"
+                        % (self, sys.exc_info()[1]),
+                        TreeWalkWarning)
+                    continue
+                else:
+                    raise
+
+            if isfile:
+                if pattern is None or child.__fnmatch(pattern, normcase):
+                    yield child
+            elif isdir:
+                for f in child.__walkfiles(pattern, normcase, errors):
+                    yield f
+
+    def __fnmatch(self, pattern, normcase):
+        """ Return ``True`` if `self.name` matches the given `pattern`,
+        prepared version.
+        `pattern` - A filename pattern with wildcards,
+            for example ``'*.py'``. The pattern is expected to be normcase'd
+            already.
+        `normcase` - A function used to normalize the pattern and
+            filename before matching.
+        .. seealso:: :func:`Path.fnmatch`
+        """
+        return fnmatch.fnmatchcase(normcase(self.name), pattern)
+
+    def __prepare(self, pattern, normcase=None):
+        """ Prepares a fmatch_pattern for use with ``FastPath.__fnmatch`.
+        `pattern` - A filename pattern with wildcards,
+            for example ``'*.py'``. If the pattern contains a `normcase`
+            attribute, it is applied to the name and path prior to comparison.
+        `normcase` - (optional) A function used to normalize the pattern and
+            filename before matching. Defaults to :meth:`self.module`,
+            which defaults to :meth:`os.path.normcase`.
+        .. seealso:: :func:`FastPath.__fnmatch`
+        """
+        if not normcase:
+            normcase = getattr(pattern, 'normcase', self.module.normcase)
+        pattern = normcase(pattern)
+        return pattern, normcase
+
+    def fnmatch(self, pattern, normcase=None):
+        if not pattern:
+            raise ValueError("No pattern provided")
+
+        pattern, normcase = self.__prepare(pattern, normcase)
+        return self.__fnmatch(pattern, normcase)

+ 312 - 312
ext/yaml/__init__.py → mncheck/ext/yaml/__init__.py

@@ -1,312 +1,312 @@
-
-from .error import *
-
-from .tokens import *
-from .events import *
-from .nodes import *
-
-from .loader import *
-from .dumper import *
-
-__version__ = '3.13'
-try:
-    from .cyaml import *
-    __with_libyaml__ = True
-except ImportError:
-    __with_libyaml__ = False
-
-import io
-
-def scan(stream, Loader=Loader):
-    """
-    Scan a YAML stream and produce scanning tokens.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_token():
-            yield loader.get_token()
-    finally:
-        loader.dispose()
-
-def parse(stream, Loader=Loader):
-    """
-    Parse a YAML stream and produce parsing events.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_event():
-            yield loader.get_event()
-    finally:
-        loader.dispose()
-
-def compose(stream, Loader=Loader):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding representation tree.
-    """
-    loader = Loader(stream)
-    try:
-        return loader.get_single_node()
-    finally:
-        loader.dispose()
-
-def compose_all(stream, Loader=Loader):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding representation trees.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_node():
-            yield loader.get_node()
-    finally:
-        loader.dispose()
-
-def load(stream, Loader=Loader):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding Python object.
-    """
-    loader = Loader(stream)
-    try:
-        return loader.get_single_data()
-    finally:
-        loader.dispose()
-
-def load_all(stream, Loader=Loader):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding Python objects.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_data():
-            yield loader.get_data()
-    finally:
-        loader.dispose()
-
-def safe_load(stream):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding Python object.
-    Resolve only basic YAML tags.
-    """
-    return load(stream, SafeLoader)
-
-def safe_load_all(stream):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding Python objects.
-    Resolve only basic YAML tags.
-    """
-    return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None):
-    """
-    Emit YAML parsing events into a stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        stream = io.StringIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break)
-    try:
-        for event in events:
-            dumper.emit(event)
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding=None, explicit_start=None, explicit_end=None,
-        version=None, tags=None):
-    """
-    Serialize a sequence of representation trees into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        if encoding is None:
-            stream = io.StringIO()
-        else:
-            stream = io.BytesIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
-    try:
-        dumper.open()
-        for node in nodes:
-            dumper.serialize(node)
-        dumper.close()
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
-    """
-    Serialize a representation tree into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
-        default_style=None, default_flow_style=None,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding=None, explicit_start=None, explicit_end=None,
-        version=None, tags=None):
-    """
-    Serialize a sequence of Python objects into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        if encoding is None:
-            stream = io.StringIO()
-        else:
-            stream = io.BytesIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, default_style=default_style,
-            default_flow_style=default_flow_style,
-            canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
-    try:
-        dumper.open()
-        for data in documents:
-            dumper.represent(data)
-        dumper.close()
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
-    """
-    Serialize a Python object into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
-    """
-    Serialize a sequence of Python objects into a YAML stream.
-    Produce only basic YAML tags.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
-    """
-    Serialize a Python object into a YAML stream.
-    Produce only basic YAML tags.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
-        Loader=Loader, Dumper=Dumper):
-    """
-    Add an implicit scalar detector.
-    If an implicit scalar value matches the given regexp,
-    the corresponding tag is assigned to the scalar.
-    first is a sequence of possible initial characters or None.
-    """
-    Loader.add_implicit_resolver(tag, regexp, first)
-    Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
-    """
-    Add a path based resolver for the given tag.
-    A path is a list of keys that forms a path
-    to a node in the representation tree.
-    Keys can be string values, integers, or None.
-    """
-    Loader.add_path_resolver(tag, path, kind)
-    Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
-    """
-    Add a constructor for the given tag.
-    Constructor is a function that accepts a Loader instance
-    and a node object and produces the corresponding Python object.
-    """
-    Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
-    """
-    Add a multi-constructor for the given tag prefix.
-    Multi-constructor is called for a node if its tag starts with tag_prefix.
-    Multi-constructor accepts a Loader instance, a tag suffix,
-    and a node object and produces the corresponding Python object.
-    """
-    Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
-    """
-    Add a representer for the given type.
-    Representer is a function accepting a Dumper instance
-    and an instance of the given data type
-    and producing the corresponding representation node.
-    """
-    Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
-    """
-    Add a representer for the given type.
-    Multi-representer is a function accepting a Dumper instance
-    and an instance of the given data type or subtype
-    and producing the corresponding representation node.
-    """
-    Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
-    """
-    The metaclass for YAMLObject.
-    """
-    def __init__(cls, name, bases, kwds):
-        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
-        if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
-            cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
-            cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(metaclass=YAMLObjectMetaclass):
-    """
-    An object that can dump itself to a YAML stream
-    and load itself from a YAML stream.
-    """
-
-    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
-
-    yaml_loader = Loader
-    yaml_dumper = Dumper
-
-    yaml_tag = None
-    yaml_flow_style = None
-
-    @classmethod
-    def from_yaml(cls, loader, node):
-        """
-        Convert a representation node to a Python object.
-        """
-        return loader.construct_yaml_object(node, cls)
-
-    @classmethod
-    def to_yaml(cls, dumper, data):
-        """
-        Convert a Python object to a representation node.
-        """
-        return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
-                flow_style=cls.yaml_flow_style)
-
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.13'
+try:
+    from .cyaml import *
+    __with_libyaml__ = True
+except ImportError:
+    __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+    """
+    Scan a YAML stream and produce scanning tokens.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_token():
+            yield loader.get_token()
+    finally:
+        loader.dispose()
+
+def parse(stream, Loader=Loader):
+    """
+    Parse a YAML stream and produce parsing events.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_event():
+            yield loader.get_event()
+    finally:
+        loader.dispose()
+
+def compose(stream, Loader=Loader):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding representation tree.
+    """
+    loader = Loader(stream)
+    try:
+        return loader.get_single_node()
+    finally:
+        loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding representation trees.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_node():
+            yield loader.get_node()
+    finally:
+        loader.dispose()
+
+def load(stream, Loader=Loader):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    """
+    loader = Loader(stream)
+    try:
+        return loader.get_single_data()
+    finally:
+        loader.dispose()
+
+def load_all(stream, Loader=Loader):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_data():
+            yield loader.get_data()
+    finally:
+        loader.dispose()
+
+def safe_load(stream):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    Resolve only basic YAML tags.
+    """
+    return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    Resolve only basic YAML tags.
+    """
+    return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None):
+    """
+    Emit YAML parsing events into a stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        stream = io.StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break)
+    try:
+        for event in events:
+            dumper.emit(event)
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding=None, explicit_start=None, explicit_end=None,
+        version=None, tags=None):
+    """
+    Serialize a sequence of representation trees into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        if encoding is None:
+            stream = io.StringIO()
+        else:
+            stream = io.BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end)
+    try:
+        dumper.open()
+        for node in nodes:
+            dumper.serialize(node)
+        dumper.close()
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a representation tree into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+        default_style=None, default_flow_style=None,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding=None, explicit_start=None, explicit_end=None,
+        version=None, tags=None):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        if encoding is None:
+            stream = io.StringIO()
+        else:
+            stream = io.BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, default_style=default_style,
+            default_flow_style=default_flow_style,
+            canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end)
+    try:
+        dumper.open()
+        for data in documents:
+            dumper.represent(data)
+        dumper.close()
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+        Loader=Loader, Dumper=Dumper):
+    """
+    Add an implicit scalar detector.
+    If an implicit scalar value matches the given regexp,
+    the corresponding tag is assigned to the scalar.
+    first is a sequence of possible initial characters or None.
+    """
+    Loader.add_implicit_resolver(tag, regexp, first)
+    Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+    """
+    Add a path based resolver for the given tag.
+    A path is a list of keys that forms a path
+    to a node in the representation tree.
+    Keys can be string values, integers, or None.
+    """
+    Loader.add_path_resolver(tag, path, kind)
+    Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+    """
+    Add a constructor for the given tag.
+    Constructor is a function that accepts a Loader instance
+    and a node object and produces the corresponding Python object.
+    """
+    Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+    """
+    Add a multi-constructor for the given tag prefix.
+    Multi-constructor is called for a node if its tag starts with tag_prefix.
+    Multi-constructor accepts a Loader instance, a tag suffix,
+    and a node object and produces the corresponding Python object.
+    """
+    Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Representer is a function accepting a Dumper instance
+    and an instance of the given data type
+    and producing the corresponding representation node.
+    """
+    Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Multi-representer is a function accepting a Dumper instance
+    and an instance of the given data type or subtype
+    and producing the corresponding representation node.
+    """
+    Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+    """
+    The metaclass for YAMLObject.
+    """
+    def __init__(cls, name, bases, kwds):
+        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+        if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+            cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+            cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+    """
+    An object that can dump itself to a YAML stream
+    and load itself from a YAML stream.
+    """
+
+    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
+
+    yaml_loader = Loader
+    yaml_dumper = Dumper
+
+    yaml_tag = None
+    yaml_flow_style = None
+
+    @classmethod
+    def from_yaml(cls, loader, node):
+        """
+        Convert a representation node to a Python object.
+        """
+        return loader.construct_yaml_object(node, cls)
+
+    @classmethod
+    def to_yaml(cls, dumper, data):
+        """
+        Convert a Python object to a representation node.
+        """
+        return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+                flow_style=cls.yaml_flow_style)
+

+ 139 - 139
ext/yaml/composer.py → mncheck/ext/yaml/composer.py

@@ -1,139 +1,139 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
-    pass
-
-class Composer:
-
-    def __init__(self):
-        self.anchors = {}
-
-    def check_node(self):
-        # Drop the STREAM-START event.
-        if self.check_event(StreamStartEvent):
-            self.get_event()
-
-        # If there are more documents available?
-        return not self.check_event(StreamEndEvent)
-
-    def get_node(self):
-        # Get the root node of the next document.
-        if not self.check_event(StreamEndEvent):
-            return self.compose_document()
-
-    def get_single_node(self):
-        # Drop the STREAM-START event.
-        self.get_event()
-
-        # Compose a document if the stream is not empty.
-        document = None
-        if not self.check_event(StreamEndEvent):
-            document = self.compose_document()
-
-        # Ensure that the stream contains no more documents.
-        if not self.check_event(StreamEndEvent):
-            event = self.get_event()
-            raise ComposerError("expected a single document in the stream",
-                    document.start_mark, "but found another document",
-                    event.start_mark)
-
-        # Drop the STREAM-END event.
-        self.get_event()
-
-        return document
-
-    def compose_document(self):
-        # Drop the DOCUMENT-START event.
-        self.get_event()
-
-        # Compose the root node.
-        node = self.compose_node(None, None)
-
-        # Drop the DOCUMENT-END event.
-        self.get_event()
-
-        self.anchors = {}
-        return node
-
-    def compose_node(self, parent, index):
-        if self.check_event(AliasEvent):
-            event = self.get_event()
-            anchor = event.anchor
-            if anchor not in self.anchors:
-                raise ComposerError(None, None, "found undefined alias %r"
-                        % anchor, event.start_mark)
-            return self.anchors[anchor]
-        event = self.peek_event()
-        anchor = event.anchor
-        if anchor is not None:
-            if anchor in self.anchors:
-                raise ComposerError("found duplicate anchor %r; first occurence"
-                        % anchor, self.anchors[anchor].start_mark,
-                        "second occurence", event.start_mark)
-        self.descend_resolver(parent, index)
-        if self.check_event(ScalarEvent):
-            node = self.compose_scalar_node(anchor)
-        elif self.check_event(SequenceStartEvent):
-            node = self.compose_sequence_node(anchor)
-        elif self.check_event(MappingStartEvent):
-            node = self.compose_mapping_node(anchor)
-        self.ascend_resolver()
-        return node
-
-    def compose_scalar_node(self, anchor):
-        event = self.get_event()
-        tag = event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(ScalarNode, event.value, event.implicit)
-        node = ScalarNode(tag, event.value,
-                event.start_mark, event.end_mark, style=event.style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        return node
-
-    def compose_sequence_node(self, anchor):
-        start_event = self.get_event()
-        tag = start_event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(SequenceNode, None, start_event.implicit)
-        node = SequenceNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        index = 0
-        while not self.check_event(SequenceEndEvent):
-            node.value.append(self.compose_node(node, index))
-            index += 1
-        end_event = self.get_event()
-        node.end_mark = end_event.end_mark
-        return node
-
-    def compose_mapping_node(self, anchor):
-        start_event = self.get_event()
-        tag = start_event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(MappingNode, None, start_event.implicit)
-        node = MappingNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        while not self.check_event(MappingEndEvent):
-            #key_event = self.peek_event()
-            item_key = self.compose_node(node, None)
-            #if item_key in node.value:
-            #    raise ComposerError("while composing a mapping", start_event.start_mark,
-            #            "found duplicate key", key_event.start_mark)
-            item_value = self.compose_node(node, item_key)
-            #node.value[item_key] = item_value
-            node.value.append((item_key, item_value))
-        end_event = self.get_event()
-        node.end_mark = end_event.end_mark
-        return node
-
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+    pass
+
+class Composer:
+
+    def __init__(self):
+        self.anchors = {}
+
+    def check_node(self):
+        # Drop the STREAM-START event.
+        if self.check_event(StreamStartEvent):
+            self.get_event()
+
+        # If there are more documents available?
+        return not self.check_event(StreamEndEvent)
+
+    def get_node(self):
+        # Get the root node of the next document.
+        if not self.check_event(StreamEndEvent):
+            return self.compose_document()
+
+    def get_single_node(self):
+        # Drop the STREAM-START event.
+        self.get_event()
+
+        # Compose a document if the stream is not empty.
+        document = None
+        if not self.check_event(StreamEndEvent):
+            document = self.compose_document()
+
+        # Ensure that the stream contains no more documents.
+        if not self.check_event(StreamEndEvent):
+            event = self.get_event()
+            raise ComposerError("expected a single document in the stream",
+                    document.start_mark, "but found another document",
+                    event.start_mark)
+
+        # Drop the STREAM-END event.
+        self.get_event()
+
+        return document
+
+    def compose_document(self):
+        # Drop the DOCUMENT-START event.
+        self.get_event()
+
+        # Compose the root node.
+        node = self.compose_node(None, None)
+
+        # Drop the DOCUMENT-END event.
+        self.get_event()
+
+        self.anchors = {}
+        return node
+
+    def compose_node(self, parent, index):
+        if self.check_event(AliasEvent):
+            event = self.get_event()
+            anchor = event.anchor
+            if anchor not in self.anchors:
+                raise ComposerError(None, None, "found undefined alias %r"
+                        % anchor, event.start_mark)
+            return self.anchors[anchor]
+        event = self.peek_event()
+        anchor = event.anchor
+        if anchor is not None:
+            if anchor in self.anchors:
+                raise ComposerError("found duplicate anchor %r; first occurence"
+                        % anchor, self.anchors[anchor].start_mark,
+                        "second occurence", event.start_mark)
+        self.descend_resolver(parent, index)
+        if self.check_event(ScalarEvent):
+            node = self.compose_scalar_node(anchor)
+        elif self.check_event(SequenceStartEvent):
+            node = self.compose_sequence_node(anchor)
+        elif self.check_event(MappingStartEvent):
+            node = self.compose_mapping_node(anchor)
+        self.ascend_resolver()
+        return node
+
+    def compose_scalar_node(self, anchor):
+        event = self.get_event()
+        tag = event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(ScalarNode, event.value, event.implicit)
+        node = ScalarNode(tag, event.value,
+                event.start_mark, event.end_mark, style=event.style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        return node
+
+    def compose_sequence_node(self, anchor):
+        start_event = self.get_event()
+        tag = start_event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(SequenceNode, None, start_event.implicit)
+        node = SequenceNode(tag, [],
+                start_event.start_mark, None,
+                flow_style=start_event.flow_style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        index = 0
+        while not self.check_event(SequenceEndEvent):
+            node.value.append(self.compose_node(node, index))
+            index += 1
+        end_event = self.get_event()
+        node.end_mark = end_event.end_mark
+        return node
+
+    def compose_mapping_node(self, anchor):
+        start_event = self.get_event()
+        tag = start_event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(MappingNode, None, start_event.implicit)
+        node = MappingNode(tag, [],
+                start_event.start_mark, None,
+                flow_style=start_event.flow_style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        while not self.check_event(MappingEndEvent):
+            #key_event = self.peek_event()
+            item_key = self.compose_node(node, None)
+            #if item_key in node.value:
+            #    raise ComposerError("while composing a mapping", start_event.start_mark,
+            #            "found duplicate key", key_event.start_mark)
+            item_value = self.compose_node(node, item_key)
+            #node.value[item_key] = item_value
+            node.value.append((item_key, item_value))
+        end_event = self.get_event()
+        node.end_mark = end_event.end_mark
+        return node
+

+ 686 - 686
ext/yaml/constructor.py → mncheck/ext/yaml/constructor.py

@@ -1,686 +1,686 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
-    'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
-    pass
-
-class BaseConstructor:
-
-    yaml_constructors = {}
-    yaml_multi_constructors = {}
-
-    def __init__(self):
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.state_generators = []
-        self.deep_construct = False
-
-    def check_data(self):
-        # If there are more documents available?
-        return self.check_node()
-
-    def get_data(self):
-        # Construct and return the next document.
-        if self.check_node():
-            return self.construct_document(self.get_node())
-
-    def get_single_data(self):
-        # Ensure that the stream contains a single document and construct it.
-        node = self.get_single_node()
-        if node is not None:
-            return self.construct_document(node)
-        return None
-
-    def construct_document(self, node):
-        data = self.construct_object(node)
-        while self.state_generators:
-            state_generators = self.state_generators
-            self.state_generators = []
-            for generator in state_generators:
-                for dummy in generator:
-                    pass
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.deep_construct = False
-        return data
-
-    def construct_object(self, node, deep=False):
-        if node in self.constructed_objects:
-            return self.constructed_objects[node]
-        if deep:
-            old_deep = self.deep_construct
-            self.deep_construct = True
-        if node in self.recursive_objects:
-            raise ConstructorError(None, None,
-                    "found unconstructable recursive node", node.start_mark)
-        self.recursive_objects[node] = None
-        constructor = None
-        tag_suffix = None
-        if node.tag in self.yaml_constructors:
-            constructor = self.yaml_constructors[node.tag]
-        else:
-            for tag_prefix in self.yaml_multi_constructors:
-                if node.tag.startswith(tag_prefix):
-                    tag_suffix = node.tag[len(tag_prefix):]
-                    constructor = self.yaml_multi_constructors[tag_prefix]
-                    break
-            else:
-                if None in self.yaml_multi_constructors:
-                    tag_suffix = node.tag
-                    constructor = self.yaml_multi_constructors[None]
-                elif None in self.yaml_constructors:
-                    constructor = self.yaml_constructors[None]
-                elif isinstance(node, ScalarNode):
-                    constructor = self.__class__.construct_scalar
-                elif isinstance(node, SequenceNode):
-                    constructor = self.__class__.construct_sequence
-                elif isinstance(node, MappingNode):
-                    constructor = self.__class__.construct_mapping
-        if tag_suffix is None:
-            data = constructor(self, node)
-        else:
-            data = constructor(self, tag_suffix, node)
-        if isinstance(data, types.GeneratorType):
-            generator = data
-            data = next(generator)
-            if self.deep_construct:
-                for dummy in generator:
-                    pass
-            else:
-                self.state_generators.append(generator)
-        self.constructed_objects[node] = data
-        del self.recursive_objects[node]
-        if deep:
-            self.deep_construct = old_deep
-        return data
-
-    def construct_scalar(self, node):
-        if not isinstance(node, ScalarNode):
-            raise ConstructorError(None, None,
-                    "expected a scalar node, but found %s" % node.id,
-                    node.start_mark)
-        return node.value
-
-    def construct_sequence(self, node, deep=False):
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError(None, None,
-                    "expected a sequence node, but found %s" % node.id,
-                    node.start_mark)
-        return [self.construct_object(child, deep=deep)
-                for child in node.value]
-
-    def construct_mapping(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        mapping = {}
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            if not isinstance(key, collections.Hashable):
-                raise ConstructorError("while constructing a mapping", node.start_mark,
-                        "found unhashable key", key_node.start_mark)
-            value = self.construct_object(value_node, deep=deep)
-            mapping[key] = value
-        return mapping
-
-    def construct_pairs(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        pairs = []
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            value = self.construct_object(value_node, deep=deep)
-            pairs.append((key, value))
-        return pairs
-
-    @classmethod
-    def add_constructor(cls, tag, constructor):
-        if not 'yaml_constructors' in cls.__dict__:
-            cls.yaml_constructors = cls.yaml_constructors.copy()
-        cls.yaml_constructors[tag] = constructor
-
-    @classmethod
-    def add_multi_constructor(cls, tag_prefix, multi_constructor):
-        if not 'yaml_multi_constructors' in cls.__dict__:
-            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
-        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
-    def construct_scalar(self, node):
-        if isinstance(node, MappingNode):
-            for key_node, value_node in node.value:
-                if key_node.tag == 'tag:yaml.org,2002:value':
-                    return self.construct_scalar(value_node)
-        return super().construct_scalar(node)
-
-    def flatten_mapping(self, node):
-        merge = []
-        index = 0
-        while index < len(node.value):
-            key_node, value_node = node.value[index]
-            if key_node.tag == 'tag:yaml.org,2002:merge':
-                del node.value[index]
-                if isinstance(value_node, MappingNode):
-                    self.flatten_mapping(value_node)
-                    merge.extend(value_node.value)
-                elif isinstance(value_node, SequenceNode):
-                    submerge = []
-                    for subnode in value_node.value:
-                        if not isinstance(subnode, MappingNode):
-                            raise ConstructorError("while constructing a mapping",
-                                    node.start_mark,
-                                    "expected a mapping for merging, but found %s"
-                                    % subnode.id, subnode.start_mark)
-                        self.flatten_mapping(subnode)
-                        submerge.append(subnode.value)
-                    submerge.reverse()
-                    for value in submerge:
-                        merge.extend(value)
-                else:
-                    raise ConstructorError("while constructing a mapping", node.start_mark,
-                            "expected a mapping or list of mappings for merging, but found %s"
-                            % value_node.id, value_node.start_mark)
-            elif key_node.tag == 'tag:yaml.org,2002:value':
-                key_node.tag = 'tag:yaml.org,2002:str'
-                index += 1
-            else:
-                index += 1
-        if merge:
-            node.value = merge + node.value
-
-    def construct_mapping(self, node, deep=False):
-        if isinstance(node, MappingNode):
-            self.flatten_mapping(node)
-        return super().construct_mapping(node, deep=deep)
-
-    def construct_yaml_null(self, node):
-        self.construct_scalar(node)
-        return None
-
-    bool_values = {
-        'yes':      True,
-        'no':       False,
-        'true':     True,
-        'false':    False,
-        'on':       True,
-        'off':      False,
-    }
-
-    def construct_yaml_bool(self, node):
-        value = self.construct_scalar(node)
-        return self.bool_values[value.lower()]
-
-    def construct_yaml_int(self, node):
-        value = self.construct_scalar(node)
-        value = value.replace('_', '')
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '0':
-            return 0
-        elif value.startswith('0b'):
-            return sign*int(value[2:], 2)
-        elif value.startswith('0x'):
-            return sign*int(value[2:], 16)
-        elif value[0] == '0':
-            return sign*int(value, 8)
-        elif ':' in value:
-            digits = [int(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*int(value)
-
-    inf_value = 1e300
-    while inf_value != inf_value*inf_value:
-        inf_value *= inf_value
-    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
-
-    def construct_yaml_float(self, node):
-        value = self.construct_scalar(node)
-        value = value.replace('_', '').lower()
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '.inf':
-            return sign*self.inf_value
-        elif value == '.nan':
-            return self.nan_value
-        elif ':' in value:
-            digits = [float(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0.0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*float(value)
-
-    def construct_yaml_binary(self, node):
-        try:
-            value = self.construct_scalar(node).encode('ascii')
-        except UnicodeEncodeError as exc:
-            raise ConstructorError(None, None,
-                    "failed to convert base64 data into ascii: %s" % exc,
-                    node.start_mark)
-        try:
-            if hasattr(base64, 'decodebytes'):
-                return base64.decodebytes(value)
-            else:
-                return base64.decodestring(value)
-        except binascii.Error as exc:
-            raise ConstructorError(None, None,
-                    "failed to decode base64 data: %s" % exc, node.start_mark)
-
-    timestamp_regexp = re.compile(
-            r'''^(?P<year>[0-9][0-9][0-9][0-9])
-                -(?P<month>[0-9][0-9]?)
-                -(?P<day>[0-9][0-9]?)
-                (?:(?:[Tt]|[ \t]+)
-                (?P<hour>[0-9][0-9]?)
-                :(?P<minute>[0-9][0-9])
-                :(?P<second>[0-9][0-9])
-                (?:\.(?P<fraction>[0-9]*))?
-                (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
-                (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
-    def construct_yaml_timestamp(self, node):
-        value = self.construct_scalar(node)
-        match = self.timestamp_regexp.match(node.value)
-        values = match.groupdict()
-        year = int(values['year'])
-        month = int(values['month'])
-        day = int(values['day'])
-        if not values['hour']:
-            return datetime.date(year, month, day)
-        hour = int(values['hour'])
-        minute = int(values['minute'])
-        second = int(values['second'])
-        fraction = 0
-        if values['fraction']:
-            fraction = values['fraction'][:6]
-            while len(fraction) < 6:
-                fraction += '0'
-            fraction = int(fraction)
-        delta = None
-        if values['tz_sign']:
-            tz_hour = int(values['tz_hour'])
-            tz_minute = int(values['tz_minute'] or 0)
-            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
-            if values['tz_sign'] == '-':
-                delta = -delta
-        data = datetime.datetime(year, month, day, hour, minute, second, fraction)
-        if delta:
-            data -= delta
-        return data
-
-    def construct_yaml_omap(self, node):
-        # Note: we do not check for duplicate keys, because it's too
-        # CPU-expensive.
-        omap = []
-        yield omap
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing an ordered map", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            omap.append((key, value))
-
-    def construct_yaml_pairs(self, node):
-        # Note: the same code as `construct_yaml_omap`.
-        pairs = []
-        yield pairs
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing pairs", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            pairs.append((key, value))
-
-    def construct_yaml_set(self, node):
-        data = set()
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_str(self, node):
-        return self.construct_scalar(node)
-
-    def construct_yaml_seq(self, node):
-        data = []
-        yield data
-        data.extend(self.construct_sequence(node))
-
-    def construct_yaml_map(self, node):
-        data = {}
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_object(self, node, cls):
-        data = cls.__new__(cls)
-        yield data
-        if hasattr(data, '__setstate__'):
-            state = self.construct_mapping(node, deep=True)
-            data.__setstate__(state)
-        else:
-            state = self.construct_mapping(node)
-            data.__dict__.update(state)
-
-    def construct_undefined(self, node):
-        raise ConstructorError(None, None,
-                "could not determine a constructor for the tag %r" % node.tag,
-                node.start_mark)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:null',
-        SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:bool',
-        SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:int',
-        SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:float',
-        SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:binary',
-        SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:timestamp',
-        SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:omap',
-        SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:pairs',
-        SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:set',
-        SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:str',
-        SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:seq',
-        SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:map',
-        SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
-        SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
-    def construct_python_str(self, node):
-        return self.construct_scalar(node)
-
-    def construct_python_unicode(self, node):
-        return self.construct_scalar(node)
-
-    def construct_python_bytes(self, node):
-        try:
-            value = self.construct_scalar(node).encode('ascii')
-        except UnicodeEncodeError as exc:
-            raise ConstructorError(None, None,
-                    "failed to convert base64 data into ascii: %s" % exc,
-                    node.start_mark)
-        try:
-            if hasattr(base64, 'decodebytes'):
-                return base64.decodebytes(value)
-            else:
-                return base64.decodestring(value)
-        except binascii.Error as exc:
-            raise ConstructorError(None, None,
-                    "failed to decode base64 data: %s" % exc, node.start_mark)
-
-    def construct_python_long(self, node):
-        return self.construct_yaml_int(node)
-
-    def construct_python_complex(self, node):
-       return complex(self.construct_scalar(node))
-
-    def construct_python_tuple(self, node):
-        return tuple(self.construct_sequence(node))
-
-    def find_python_module(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "expected non-empty name appended to the tag", mark)
-        try:
-            __import__(name)
-        except ImportError as exc:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "cannot find module %r (%s)" % (name, exc), mark)
-        return sys.modules[name]
-
-    def find_python_name(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "expected non-empty name appended to the tag", mark)
-        if '.' in name:
-            module_name, object_name = name.rsplit('.', 1)
-        else:
-            module_name = 'builtins'
-            object_name = name
-        try:
-            __import__(module_name)
-        except ImportError as exc:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find module %r (%s)" % (module_name, exc), mark)
-        module = sys.modules[module_name]
-        if not hasattr(module, object_name):
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find %r in the module %r"
-                    % (object_name, module.__name__), mark)
-        return getattr(module, object_name)
-
-    def construct_python_name(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python name", node.start_mark,
-                    "expected the empty value, but found %r" % value, node.start_mark)
-        return self.find_python_name(suffix, node.start_mark)
-
-    def construct_python_module(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python module", node.start_mark,
-                    "expected the empty value, but found %r" % value, node.start_mark)
-        return self.find_python_module(suffix, node.start_mark)
-
-    def make_python_instance(self, suffix, node,
-            args=None, kwds=None, newobj=False):
-        if not args:
-            args = []
-        if not kwds:
-            kwds = {}
-        cls = self.find_python_name(suffix, node.start_mark)
-        if newobj and isinstance(cls, type):
-            return cls.__new__(cls, *args, **kwds)
-        else:
-            return cls(*args, **kwds)
-
-    def set_python_instance_state(self, instance, state):
-        if hasattr(instance, '__setstate__'):
-            instance.__setstate__(state)
-        else:
-            slotstate = {}
-            if isinstance(state, tuple) and len(state) == 2:
-                state, slotstate = state
-            if hasattr(instance, '__dict__'):
-                instance.__dict__.update(state)
-            elif state:
-                slotstate.update(state)
-            for key, value in slotstate.items():
-                setattr(object, key, value)
-
-    def construct_python_object(self, suffix, node):
-        # Format:
-        #   !!python/object:module.name { ... state ... }
-        instance = self.make_python_instance(suffix, node, newobj=True)
-        yield instance
-        deep = hasattr(instance, '__setstate__')
-        state = self.construct_mapping(node, deep=deep)
-        self.set_python_instance_state(instance, state)
-
-    def construct_python_object_apply(self, suffix, node, newobj=False):
-        # Format:
-        #   !!python/object/apply       # (or !!python/object/new)
-        #   args: [ ... arguments ... ]
-        #   kwds: { ... keywords ... }
-        #   state: ... state ...
-        #   listitems: [ ... listitems ... ]
-        #   dictitems: { ... dictitems ... }
-        # or short format:
-        #   !!python/object/apply [ ... arguments ... ]
-        # The difference between !!python/object/apply and !!python/object/new
-        # is how an object is created, check make_python_instance for details.
-        if isinstance(node, SequenceNode):
-            args = self.construct_sequence(node, deep=True)
-            kwds = {}
-            state = {}
-            listitems = []
-            dictitems = {}
-        else:
-            value = self.construct_mapping(node, deep=True)
-            args = value.get('args', [])
-            kwds = value.get('kwds', {})
-            state = value.get('state', {})
-            listitems = value.get('listitems', [])
-            dictitems = value.get('dictitems', {})
-        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
-        if state:
-            self.set_python_instance_state(instance, state)
-        if listitems:
-            instance.extend(listitems)
-        if dictitems:
-            for key in dictitems:
-                instance[key] = dictitems[key]
-        return instance
-
-    def construct_python_object_new(self, suffix, node):
-        return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/none',
-    Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/bool',
-    Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/str',
-    Constructor.construct_python_str)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/unicode',
-    Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/bytes',
-    Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/int',
-    Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/long',
-    Constructor.construct_python_long)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/float',
-    Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/complex',
-    Constructor.construct_python_complex)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/list',
-    Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/tuple',
-    Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/dict',
-    Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/name:',
-    Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/module:',
-    Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object:',
-    Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object/apply:',
-    Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object/new:',
-    Constructor.construct_python_object_new)
-
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+    'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+    pass
+
+class BaseConstructor:
+
+    yaml_constructors = {}
+    yaml_multi_constructors = {}
+
+    def __init__(self):
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.state_generators = []
+        self.deep_construct = False
+
+    def check_data(self):
+        # If there are more documents available?
+        return self.check_node()
+
+    def get_data(self):
+        # Construct and return the next document.
+        if self.check_node():
+            return self.construct_document(self.get_node())
+
+    def get_single_data(self):
+        # Ensure that the stream contains a single document and construct it.
+        node = self.get_single_node()
+        if node is not None:
+            return self.construct_document(node)
+        return None
+
+    def construct_document(self, node):
+        data = self.construct_object(node)
+        while self.state_generators:
+            state_generators = self.state_generators
+            self.state_generators = []
+            for generator in state_generators:
+                for dummy in generator:
+                    pass
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.deep_construct = False
+        return data
+
+    def construct_object(self, node, deep=False):
+        if node in self.constructed_objects:
+            return self.constructed_objects[node]
+        if deep:
+            old_deep = self.deep_construct
+            self.deep_construct = True
+        if node in self.recursive_objects:
+            raise ConstructorError(None, None,
+                    "found unconstructable recursive node", node.start_mark)
+        self.recursive_objects[node] = None
+        constructor = None
+        tag_suffix = None
+        if node.tag in self.yaml_constructors:
+            constructor = self.yaml_constructors[node.tag]
+        else:
+            for tag_prefix in self.yaml_multi_constructors:
+                if node.tag.startswith(tag_prefix):
+                    tag_suffix = node.tag[len(tag_prefix):]
+                    constructor = self.yaml_multi_constructors[tag_prefix]
+                    break
+            else:
+                if None in self.yaml_multi_constructors:
+                    tag_suffix = node.tag
+                    constructor = self.yaml_multi_constructors[None]
+                elif None in self.yaml_constructors:
+                    constructor = self.yaml_constructors[None]
+                elif isinstance(node, ScalarNode):
+                    constructor = self.__class__.construct_scalar
+                elif isinstance(node, SequenceNode):
+                    constructor = self.__class__.construct_sequence
+                elif isinstance(node, MappingNode):
+                    constructor = self.__class__.construct_mapping
+        if tag_suffix is None:
+            data = constructor(self, node)
+        else:
+            data = constructor(self, tag_suffix, node)
+        if isinstance(data, types.GeneratorType):
+            generator = data
+            data = next(generator)
+            if self.deep_construct:
+                for dummy in generator:
+                    pass
+            else:
+                self.state_generators.append(generator)
+        self.constructed_objects[node] = data
+        del self.recursive_objects[node]
+        if deep:
+            self.deep_construct = old_deep
+        return data
+
+    def construct_scalar(self, node):
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(None, None,
+                    "expected a scalar node, but found %s" % node.id,
+                    node.start_mark)
+        return node.value
+
+    def construct_sequence(self, node, deep=False):
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(None, None,
+                    "expected a sequence node, but found %s" % node.id,
+                    node.start_mark)
+        return [self.construct_object(child, deep=deep)
+                for child in node.value]
+
+    def construct_mapping(self, node, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(None, None,
+                    "expected a mapping node, but found %s" % node.id,
+                    node.start_mark)
+        mapping = {}
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            if not isinstance(key, collections.Hashable):
+                raise ConstructorError("while constructing a mapping", node.start_mark,
+                        "found unhashable key", key_node.start_mark)
+            value = self.construct_object(value_node, deep=deep)
+            mapping[key] = value
+        return mapping
+
+    def construct_pairs(self, node, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(None, None,
+                    "expected a mapping node, but found %s" % node.id,
+                    node.start_mark)
+        pairs = []
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            value = self.construct_object(value_node, deep=deep)
+            pairs.append((key, value))
+        return pairs
+
+    @classmethod
+    def add_constructor(cls, tag, constructor):
+        if not 'yaml_constructors' in cls.__dict__:
+            cls.yaml_constructors = cls.yaml_constructors.copy()
+        cls.yaml_constructors[tag] = constructor
+
+    @classmethod
+    def add_multi_constructor(cls, tag_prefix, multi_constructor):
+        if not 'yaml_multi_constructors' in cls.__dict__:
+            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+    def construct_scalar(self, node):
+        if isinstance(node, MappingNode):
+            for key_node, value_node in node.value:
+                if key_node.tag == 'tag:yaml.org,2002:value':
+                    return self.construct_scalar(value_node)
+        return super().construct_scalar(node)
+
+    def flatten_mapping(self, node):
+        merge = []
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == 'tag:yaml.org,2002:merge':
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    self.flatten_mapping(value_node)
+                    merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError("while constructing a mapping",
+                                    node.start_mark,
+                                    "expected a mapping for merging, but found %s"
+                                    % subnode.id, subnode.start_mark)
+                        self.flatten_mapping(subnode)
+                        submerge.append(subnode.value)
+                    submerge.reverse()
+                    for value in submerge:
+                        merge.extend(value)
+                else:
+                    raise ConstructorError("while constructing a mapping", node.start_mark,
+                            "expected a mapping or list of mappings for merging, but found %s"
+                            % value_node.id, value_node.start_mark)
+            elif key_node.tag == 'tag:yaml.org,2002:value':
+                key_node.tag = 'tag:yaml.org,2002:str'
+                index += 1
+            else:
+                index += 1
+        if merge:
+            node.value = merge + node.value
+
+    def construct_mapping(self, node, deep=False):
+        if isinstance(node, MappingNode):
+            self.flatten_mapping(node)
+        return super().construct_mapping(node, deep=deep)
+
+    def construct_yaml_null(self, node):
+        self.construct_scalar(node)
+        return None
+
+    bool_values = {
+        'yes':      True,
+        'no':       False,
+        'true':     True,
+        'false':    False,
+        'on':       True,
+        'off':      False,
+    }
+
+    def construct_yaml_bool(self, node):
+        value = self.construct_scalar(node)
+        return self.bool_values[value.lower()]
+
+    def construct_yaml_int(self, node):
+        value = self.construct_scalar(node)
+        value = value.replace('_', '')
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '0':
+            return 0
+        elif value.startswith('0b'):
+            return sign*int(value[2:], 2)
+        elif value.startswith('0x'):
+            return sign*int(value[2:], 16)
+        elif value[0] == '0':
+            return sign*int(value, 8)
+        elif ':' in value:
+            digits = [int(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*int(value)
+
+    inf_value = 1e300
+    while inf_value != inf_value*inf_value:
+        inf_value *= inf_value
+    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
+
+    def construct_yaml_float(self, node):
+        value = self.construct_scalar(node)
+        value = value.replace('_', '').lower()
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '.inf':
+            return sign*self.inf_value
+        elif value == '.nan':
+            return self.nan_value
+        elif ':' in value:
+            digits = [float(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0.0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*float(value)
+
+    def construct_yaml_binary(self, node):
+        try:
+            value = self.construct_scalar(node).encode('ascii')
+        except UnicodeEncodeError as exc:
+            raise ConstructorError(None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+        try:
+            if hasattr(base64, 'decodebytes'):
+                return base64.decodebytes(value)
+            else:
+                return base64.decodestring(value)
+        except binascii.Error as exc:
+            raise ConstructorError(None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    timestamp_regexp = re.compile(
+            r'''^(?P<year>[0-9][0-9][0-9][0-9])
+                -(?P<month>[0-9][0-9]?)
+                -(?P<day>[0-9][0-9]?)
+                (?:(?:[Tt]|[ \t]+)
+                (?P<hour>[0-9][0-9]?)
+                :(?P<minute>[0-9][0-9])
+                :(?P<second>[0-9][0-9])
+                (?:\.(?P<fraction>[0-9]*))?
+                (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+                (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+    def construct_yaml_timestamp(self, node):
+        value = self.construct_scalar(node)
+        match = self.timestamp_regexp.match(node.value)
+        values = match.groupdict()
+        year = int(values['year'])
+        month = int(values['month'])
+        day = int(values['day'])
+        if not values['hour']:
+            return datetime.date(year, month, day)
+        hour = int(values['hour'])
+        minute = int(values['minute'])
+        second = int(values['second'])
+        fraction = 0
+        if values['fraction']:
+            fraction = values['fraction'][:6]
+            while len(fraction) < 6:
+                fraction += '0'
+            fraction = int(fraction)
+        delta = None
+        if values['tz_sign']:
+            tz_hour = int(values['tz_hour'])
+            tz_minute = int(values['tz_minute'] or 0)
+            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+            if values['tz_sign'] == '-':
+                delta = -delta
+        data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+        if delta:
+            data -= delta
+        return data
+
+    def construct_yaml_omap(self, node):
+        # Note: we do not check for duplicate keys, because it's too
+        # CPU-expensive.
+        omap = []
+        yield omap
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError("while constructing an ordered map", node.start_mark,
+                    "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError("while constructing an ordered map", node.start_mark,
+                        "expected a mapping of length 1, but found %s" % subnode.id,
+                        subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError("while constructing an ordered map", node.start_mark,
+                        "expected a single mapping item, but found %d items" % len(subnode.value),
+                        subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            omap.append((key, value))
+
+    def construct_yaml_pairs(self, node):
+        # Note: the same code as `construct_yaml_omap`.
+        pairs = []
+        yield pairs
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError("while constructing pairs", node.start_mark,
+                    "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError("while constructing pairs", node.start_mark,
+                        "expected a mapping of length 1, but found %s" % subnode.id,
+                        subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError("while constructing pairs", node.start_mark,
+                        "expected a single mapping item, but found %d items" % len(subnode.value),
+                        subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            pairs.append((key, value))
+
+    def construct_yaml_set(self, node):
+        data = set()
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_str(self, node):
+        return self.construct_scalar(node)
+
+    def construct_yaml_seq(self, node):
+        data = []
+        yield data
+        data.extend(self.construct_sequence(node))
+
+    def construct_yaml_map(self, node):
+        data = {}
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_object(self, node, cls):
+        data = cls.__new__(cls)
+        yield data
+        if hasattr(data, '__setstate__'):
+            state = self.construct_mapping(node, deep=True)
+            data.__setstate__(state)
+        else:
+            state = self.construct_mapping(node)
+            data.__dict__.update(state)
+
+    def construct_undefined(self, node):
+        raise ConstructorError(None, None,
+                "could not determine a constructor for the tag %r" % node.tag,
+                node.start_mark)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:null',
+        SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:bool',
+        SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:int',
+        SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:float',
+        SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:binary',
+        SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:timestamp',
+        SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:omap',
+        SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:pairs',
+        SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:set',
+        SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:str',
+        SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:seq',
+        SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:map',
+        SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+        SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+    def construct_python_str(self, node):
+        return self.construct_scalar(node)
+
+    def construct_python_unicode(self, node):
+        return self.construct_scalar(node)
+
+    def construct_python_bytes(self, node):
+        try:
+            value = self.construct_scalar(node).encode('ascii')
+        except UnicodeEncodeError as exc:
+            raise ConstructorError(None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+        try:
+            if hasattr(base64, 'decodebytes'):
+                return base64.decodebytes(value)
+            else:
+                return base64.decodestring(value)
+        except binascii.Error as exc:
+            raise ConstructorError(None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    def construct_python_long(self, node):
+        return self.construct_yaml_int(node)
+
+    def construct_python_complex(self, node):
+       return complex(self.construct_scalar(node))
+
+    def construct_python_tuple(self, node):
+        return tuple(self.construct_sequence(node))
+
+    def find_python_module(self, name, mark):
+        if not name:
+            raise ConstructorError("while constructing a Python module", mark,
+                    "expected non-empty name appended to the tag", mark)
+        try:
+            __import__(name)
+        except ImportError as exc:
+            raise ConstructorError("while constructing a Python module", mark,
+                    "cannot find module %r (%s)" % (name, exc), mark)
+        return sys.modules[name]
+
+    def find_python_name(self, name, mark):
+        if not name:
+            raise ConstructorError("while constructing a Python object", mark,
+                    "expected non-empty name appended to the tag", mark)
+        if '.' in name:
+            module_name, object_name = name.rsplit('.', 1)
+        else:
+            module_name = 'builtins'
+            object_name = name
+        try:
+            __import__(module_name)
+        except ImportError as exc:
+            raise ConstructorError("while constructing a Python object", mark,
+                    "cannot find module %r (%s)" % (module_name, exc), mark)
+        module = sys.modules[module_name]
+        if not hasattr(module, object_name):
+            raise ConstructorError("while constructing a Python object", mark,
+                    "cannot find %r in the module %r"
+                    % (object_name, module.__name__), mark)
+        return getattr(module, object_name)
+
+    def construct_python_name(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError("while constructing a Python name", node.start_mark,
+                    "expected the empty value, but found %r" % value, node.start_mark)
+        return self.find_python_name(suffix, node.start_mark)
+
+    def construct_python_module(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError("while constructing a Python module", node.start_mark,
+                    "expected the empty value, but found %r" % value, node.start_mark)
+        return self.find_python_module(suffix, node.start_mark)
+
+    def make_python_instance(self, suffix, node,
+            args=None, kwds=None, newobj=False):
+        if not args:
+            args = []
+        if not kwds:
+            kwds = {}
+        cls = self.find_python_name(suffix, node.start_mark)
+        if newobj and isinstance(cls, type):
+            return cls.__new__(cls, *args, **kwds)
+        else:
+            return cls(*args, **kwds)
+
+    def set_python_instance_state(self, instance, state):
+        if hasattr(instance, '__setstate__'):
+            instance.__setstate__(state)
+        else:
+            slotstate = {}
+            if isinstance(state, tuple) and len(state) == 2:
+                state, slotstate = state
+            if hasattr(instance, '__dict__'):
+                instance.__dict__.update(state)
+            elif state:
+                slotstate.update(state)
+            for key, value in slotstate.items():
+                setattr(object, key, value)
+
+    def construct_python_object(self, suffix, node):
+        # Format:
+        #   !!python/object:module.name { ... state ... }
+        instance = self.make_python_instance(suffix, node, newobj=True)
+        yield instance
+        deep = hasattr(instance, '__setstate__')
+        state = self.construct_mapping(node, deep=deep)
+        self.set_python_instance_state(instance, state)
+
+    def construct_python_object_apply(self, suffix, node, newobj=False):
+        # Format:
+        #   !!python/object/apply       # (or !!python/object/new)
+        #   args: [ ... arguments ... ]
+        #   kwds: { ... keywords ... }
+        #   state: ... state ...
+        #   listitems: [ ... listitems ... ]
+        #   dictitems: { ... dictitems ... }
+        # or short format:
+        #   !!python/object/apply [ ... arguments ... ]
+        # The difference between !!python/object/apply and !!python/object/new
+        # is how an object is created, check make_python_instance for details.
+        if isinstance(node, SequenceNode):
+            args = self.construct_sequence(node, deep=True)
+            kwds = {}
+            state = {}
+            listitems = []
+            dictitems = {}
+        else:
+            value = self.construct_mapping(node, deep=True)
+            args = value.get('args', [])
+            kwds = value.get('kwds', {})
+            state = value.get('state', {})
+            listitems = value.get('listitems', [])
+            dictitems = value.get('dictitems', {})
+        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+        if state:
+            self.set_python_instance_state(instance, state)
+        if listitems:
+            instance.extend(listitems)
+        if dictitems:
+            for key in dictitems:
+                instance[key] = dictitems[key]
+        return instance
+
+    def construct_python_object_new(self, suffix, node):
+        return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/none',
+    Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/bool',
+    Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/str',
+    Constructor.construct_python_str)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/unicode',
+    Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/bytes',
+    Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/int',
+    Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/long',
+    Constructor.construct_python_long)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/float',
+    Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/complex',
+    Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/list',
+    Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/tuple',
+    Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+    'tag:yaml.org,2002:python/dict',
+    Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/name:',
+    Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/module:',
+    Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object:',
+    Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object/apply:',
+    Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object/new:',
+    Constructor.construct_python_object_new)
+

+ 85 - 85
ext/yaml/cyaml.py → mncheck/ext/yaml/cyaml.py

@@ -1,85 +1,85 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
-        'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+        'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        BaseConstructor.__init__(self)
+        BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        SafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        SafeRepresenter.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+

+ 62 - 62
ext/yaml/dumper.py → mncheck/ext/yaml/dumper.py

@@ -1,62 +1,62 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        SafeRepresenter.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=None,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+

+ 1137 - 1137
ext/yaml/emitter.py → mncheck/ext/yaml/emitter.py

@@ -1,1137 +1,1137 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
-    pass
-
-class ScalarAnalysis:
-    def __init__(self, scalar, empty, multiline,
-            allow_flow_plain, allow_block_plain,
-            allow_single_quoted, allow_double_quoted,
-            allow_block):
-        self.scalar = scalar
-        self.empty = empty
-        self.multiline = multiline
-        self.allow_flow_plain = allow_flow_plain
-        self.allow_block_plain = allow_block_plain
-        self.allow_single_quoted = allow_single_quoted
-        self.allow_double_quoted = allow_double_quoted
-        self.allow_block = allow_block
-
-class Emitter:
-
-    DEFAULT_TAG_PREFIXES = {
-        '!' : '!',
-        'tag:yaml.org,2002:' : '!!',
-    }
-
-    def __init__(self, stream, canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None):
-
-        # The stream should have the methods `write` and possibly `flush`.
-        self.stream = stream
-
-        # Encoding can be overriden by STREAM-START.
-        self.encoding = None
-
-        # Emitter is a state machine with a stack of states to handle nested
-        # structures.
-        self.states = []
-        self.state = self.expect_stream_start
-
-        # Current event and the event queue.
-        self.events = []
-        self.event = None
-
-        # The current indentation level and the stack of previous indents.
-        self.indents = []
-        self.indent = None
-
-        # Flow level.
-        self.flow_level = 0
-
-        # Contexts.
-        self.root_context = False
-        self.sequence_context = False
-        self.mapping_context = False
-        self.simple_key_context = False
-
-        # Characteristics of the last emitted character:
-        #  - current position.
-        #  - is it a whitespace?
-        #  - is it an indention character
-        #    (indentation space, '-', '?', or ':')?
-        self.line = 0
-        self.column = 0
-        self.whitespace = True
-        self.indention = True
-
-        # Whether the document requires an explicit document indicator
-        self.open_ended = False
-
-        # Formatting details.
-        self.canonical = canonical
-        self.allow_unicode = allow_unicode
-        self.best_indent = 2
-        if indent and 1 < indent < 10:
-            self.best_indent = indent
-        self.best_width = 80
-        if width and width > self.best_indent*2:
-            self.best_width = width
-        self.best_line_break = '\n'
-        if line_break in ['\r', '\n', '\r\n']:
-            self.best_line_break = line_break
-
-        # Tag prefixes.
-        self.tag_prefixes = None
-
-        # Prepared anchor and tag.
-        self.prepared_anchor = None
-        self.prepared_tag = None
-
-        # Scalar analysis and style.
-        self.analysis = None
-        self.style = None
-
-    def dispose(self):
-        # Reset the state attributes (to clear self-references)
-        self.states = []
-        self.state = None
-
-    def emit(self, event):
-        self.events.append(event)
-        while not self.need_more_events():
-            self.event = self.events.pop(0)
-            self.state()
-            self.event = None
-
-    # In some cases, we wait for a few next events before emitting.
-
-    def need_more_events(self):
-        if not self.events:
-            return True
-        event = self.events[0]
-        if isinstance(event, DocumentStartEvent):
-            return self.need_events(1)
-        elif isinstance(event, SequenceStartEvent):
-            return self.need_events(2)
-        elif isinstance(event, MappingStartEvent):
-            return self.need_events(3)
-        else:
-            return False
-
-    def need_events(self, count):
-        level = 0
-        for event in self.events[1:]:
-            if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
-                level += 1
-            elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
-                level -= 1
-            elif isinstance(event, StreamEndEvent):
-                level = -1
-            if level < 0:
-                return False
-        return (len(self.events) < count+1)
-
-    def increase_indent(self, flow=False, indentless=False):
-        self.indents.append(self.indent)
-        if self.indent is None:
-            if flow:
-                self.indent = self.best_indent
-            else:
-                self.indent = 0
-        elif not indentless:
-            self.indent += self.best_indent
-
-    # States.
-
-    # Stream handlers.
-
-    def expect_stream_start(self):
-        if isinstance(self.event, StreamStartEvent):
-            if self.event.encoding and not hasattr(self.stream, 'encoding'):
-                self.encoding = self.event.encoding
-            self.write_stream_start()
-            self.state = self.expect_first_document_start
-        else:
-            raise EmitterError("expected StreamStartEvent, but got %s"
-                    % self.event)
-
-    def expect_nothing(self):
-        raise EmitterError("expected nothing, but got %s" % self.event)
-
-    # Document handlers.
-
-    def expect_first_document_start(self):
-        return self.expect_document_start(first=True)
-
-    def expect_document_start(self, first=False):
-        if isinstance(self.event, DocumentStartEvent):
-            if (self.event.version or self.event.tags) and self.open_ended:
-                self.write_indicator('...', True)
-                self.write_indent()
-            if self.event.version:
-                version_text = self.prepare_version(self.event.version)
-                self.write_version_directive(version_text)
-            self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
-            if self.event.tags:
-                handles = sorted(self.event.tags.keys())
-                for handle in handles:
-                    prefix = self.event.tags[handle]
-                    self.tag_prefixes[prefix] = handle
-                    handle_text = self.prepare_tag_handle(handle)
-                    prefix_text = self.prepare_tag_prefix(prefix)
-                    self.write_tag_directive(handle_text, prefix_text)
-            implicit = (first and not self.event.explicit and not self.canonical
-                    and not self.event.version and not self.event.tags
-                    and not self.check_empty_document())
-            if not implicit:
-                self.write_indent()
-                self.write_indicator('---', True)
-                if self.canonical:
-                    self.write_indent()
-            self.state = self.expect_document_root
-        elif isinstance(self.event, StreamEndEvent):
-            if self.open_ended:
-                self.write_indicator('...', True)
-                self.write_indent()
-            self.write_stream_end()
-            self.state = self.expect_nothing
-        else:
-            raise EmitterError("expected DocumentStartEvent, but got %s"
-                    % self.event)
-
-    def expect_document_end(self):
-        if isinstance(self.event, DocumentEndEvent):
-            self.write_indent()
-            if self.event.explicit:
-                self.write_indicator('...', True)
-                self.write_indent()
-            self.flush_stream()
-            self.state = self.expect_document_start
-        else:
-            raise EmitterError("expected DocumentEndEvent, but got %s"
-                    % self.event)
-
-    def expect_document_root(self):
-        self.states.append(self.expect_document_end)
-        self.expect_node(root=True)
-
-    # Node handlers.
-
-    def expect_node(self, root=False, sequence=False, mapping=False,
-            simple_key=False):
-        self.root_context = root
-        self.sequence_context = sequence
-        self.mapping_context = mapping
-        self.simple_key_context = simple_key
-        if isinstance(self.event, AliasEvent):
-            self.expect_alias()
-        elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
-            self.process_anchor('&')
-            self.process_tag()
-            if isinstance(self.event, ScalarEvent):
-                self.expect_scalar()
-            elif isinstance(self.event, SequenceStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
-                        or self.check_empty_sequence():
-                    self.expect_flow_sequence()
-                else:
-                    self.expect_block_sequence()
-            elif isinstance(self.event, MappingStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
-                        or self.check_empty_mapping():
-                    self.expect_flow_mapping()
-                else:
-                    self.expect_block_mapping()
-        else:
-            raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
-    def expect_alias(self):
-        if self.event.anchor is None:
-            raise EmitterError("anchor is not specified for alias")
-        self.process_anchor('*')
-        self.state = self.states.pop()
-
-    def expect_scalar(self):
-        self.increase_indent(flow=True)
-        self.process_scalar()
-        self.indent = self.indents.pop()
-        self.state = self.states.pop()
-
-    # Flow sequence handlers.
-
-    def expect_flow_sequence(self):
-        self.write_indicator('[', True, whitespace=True)
-        self.flow_level += 1
-        self.increase_indent(flow=True)
-        self.state = self.expect_first_flow_sequence_item
-
-    def expect_first_flow_sequence_item(self):
-        if isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            self.write_indicator(']', False)
-            self.state = self.states.pop()
-        else:
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            self.states.append(self.expect_flow_sequence_item)
-            self.expect_node(sequence=True)
-
-    def expect_flow_sequence_item(self):
-        if isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            if self.canonical:
-                self.write_indicator(',', False)
-                self.write_indent()
-            self.write_indicator(']', False)
-            self.state = self.states.pop()
-        else:
-            self.write_indicator(',', False)
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            self.states.append(self.expect_flow_sequence_item)
-            self.expect_node(sequence=True)
-
-    # Flow mapping handlers.
-
-    def expect_flow_mapping(self):
-        self.write_indicator('{', True, whitespace=True)
-        self.flow_level += 1
-        self.increase_indent(flow=True)
-        self.state = self.expect_first_flow_mapping_key
-
-    def expect_first_flow_mapping_key(self):
-        if isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            self.write_indicator('}', False)
-            self.state = self.states.pop()
-        else:
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            if not self.canonical and self.check_simple_key():
-                self.states.append(self.expect_flow_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True)
-                self.states.append(self.expect_flow_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_flow_mapping_key(self):
-        if isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            if self.canonical:
-                self.write_indicator(',', False)
-                self.write_indent()
-            self.write_indicator('}', False)
-            self.state = self.states.pop()
-        else:
-            self.write_indicator(',', False)
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            if not self.canonical and self.check_simple_key():
-                self.states.append(self.expect_flow_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True)
-                self.states.append(self.expect_flow_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_flow_mapping_simple_value(self):
-        self.write_indicator(':', False)
-        self.states.append(self.expect_flow_mapping_key)
-        self.expect_node(mapping=True)
-
-    def expect_flow_mapping_value(self):
-        if self.canonical or self.column > self.best_width:
-            self.write_indent()
-        self.write_indicator(':', True)
-        self.states.append(self.expect_flow_mapping_key)
-        self.expect_node(mapping=True)
-
-    # Block sequence handlers.
-
-    def expect_block_sequence(self):
-        indentless = (self.mapping_context and not self.indention)
-        self.increase_indent(flow=False, indentless=indentless)
-        self.state = self.expect_first_block_sequence_item
-
-    def expect_first_block_sequence_item(self):
-        return self.expect_block_sequence_item(first=True)
-
-    def expect_block_sequence_item(self, first=False):
-        if not first and isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.state = self.states.pop()
-        else:
-            self.write_indent()
-            self.write_indicator('-', True, indention=True)
-            self.states.append(self.expect_block_sequence_item)
-            self.expect_node(sequence=True)
-
-    # Block mapping handlers.
-
-    def expect_block_mapping(self):
-        self.increase_indent(flow=False)
-        self.state = self.expect_first_block_mapping_key
-
-    def expect_first_block_mapping_key(self):
-        return self.expect_block_mapping_key(first=True)
-
-    def expect_block_mapping_key(self, first=False):
-        if not first and isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.state = self.states.pop()
-        else:
-            self.write_indent()
-            if self.check_simple_key():
-                self.states.append(self.expect_block_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True, indention=True)
-                self.states.append(self.expect_block_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_block_mapping_simple_value(self):
-        self.write_indicator(':', False)
-        self.states.append(self.expect_block_mapping_key)
-        self.expect_node(mapping=True)
-
-    def expect_block_mapping_value(self):
-        self.write_indent()
-        self.write_indicator(':', True, indention=True)
-        self.states.append(self.expect_block_mapping_key)
-        self.expect_node(mapping=True)
-
-    # Checkers.
-
-    def check_empty_sequence(self):
-        return (isinstance(self.event, SequenceStartEvent) and self.events
-                and isinstance(self.events[0], SequenceEndEvent))
-
-    def check_empty_mapping(self):
-        return (isinstance(self.event, MappingStartEvent) and self.events
-                and isinstance(self.events[0], MappingEndEvent))
-
-    def check_empty_document(self):
-        if not isinstance(self.event, DocumentStartEvent) or not self.events:
-            return False
-        event = self.events[0]
-        return (isinstance(event, ScalarEvent) and event.anchor is None
-                and event.tag is None and event.implicit and event.value == '')
-
-    def check_simple_key(self):
-        length = 0
-        if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
-            if self.prepared_anchor is None:
-                self.prepared_anchor = self.prepare_anchor(self.event.anchor)
-            length += len(self.prepared_anchor)
-        if isinstance(self.event, (ScalarEvent, CollectionStartEvent))  \
-                and self.event.tag is not None:
-            if self.prepared_tag is None:
-                self.prepared_tag = self.prepare_tag(self.event.tag)
-            length += len(self.prepared_tag)
-        if isinstance(self.event, ScalarEvent):
-            if self.analysis is None:
-                self.analysis = self.analyze_scalar(self.event.value)
-            length += len(self.analysis.scalar)
-        return (length < 128 and (isinstance(self.event, AliasEvent)
-            or (isinstance(self.event, ScalarEvent)
-                    and not self.analysis.empty and not self.analysis.multiline)
-            or self.check_empty_sequence() or self.check_empty_mapping()))
-
-    # Anchor, Tag, and Scalar processors.
-
-    def process_anchor(self, indicator):
-        if self.event.anchor is None:
-            self.prepared_anchor = None
-            return
-        if self.prepared_anchor is None:
-            self.prepared_anchor = self.prepare_anchor(self.event.anchor)
-        if self.prepared_anchor:
-            self.write_indicator(indicator+self.prepared_anchor, True)
-        self.prepared_anchor = None
-
-    def process_tag(self):
-        tag = self.event.tag
-        if isinstance(self.event, ScalarEvent):
-            if self.style is None:
-                self.style = self.choose_scalar_style()
-            if ((not self.canonical or tag is None) and
-                ((self.style == '' and self.event.implicit[0])
-                        or (self.style != '' and self.event.implicit[1]))):
-                self.prepared_tag = None
-                return
-            if self.event.implicit[0] and tag is None:
-                tag = '!'
-                self.prepared_tag = None
-        else:
-            if (not self.canonical or tag is None) and self.event.implicit:
-                self.prepared_tag = None
-                return
-        if tag is None:
-            raise EmitterError("tag is not specified")
-        if self.prepared_tag is None:
-            self.prepared_tag = self.prepare_tag(tag)
-        if self.prepared_tag:
-            self.write_indicator(self.prepared_tag, True)
-        self.prepared_tag = None
-
-    def choose_scalar_style(self):
-        if self.analysis is None:
-            self.analysis = self.analyze_scalar(self.event.value)
-        if self.event.style == '"' or self.canonical:
-            return '"'
-        if not self.event.style and self.event.implicit[0]:
-            if (not (self.simple_key_context and
-                    (self.analysis.empty or self.analysis.multiline))
-                and (self.flow_level and self.analysis.allow_flow_plain
-                    or (not self.flow_level and self.analysis.allow_block_plain))):
-                return ''
-        if self.event.style and self.event.style in '|>':
-            if (not self.flow_level and not self.simple_key_context
-                    and self.analysis.allow_block):
-                return self.event.style
-        if not self.event.style or self.event.style == '\'':
-            if (self.analysis.allow_single_quoted and
-                    not (self.simple_key_context and self.analysis.multiline)):
-                return '\''
-        return '"'
-
-    def process_scalar(self):
-        if self.analysis is None:
-            self.analysis = self.analyze_scalar(self.event.value)
-        if self.style is None:
-            self.style = self.choose_scalar_style()
-        split = (not self.simple_key_context)
-        #if self.analysis.multiline and split    \
-        #        and (not self.style or self.style in '\'\"'):
-        #    self.write_indent()
-        if self.style == '"':
-            self.write_double_quoted(self.analysis.scalar, split)
-        elif self.style == '\'':
-            self.write_single_quoted(self.analysis.scalar, split)
-        elif self.style == '>':
-            self.write_folded(self.analysis.scalar)
-        elif self.style == '|':
-            self.write_literal(self.analysis.scalar)
-        else:
-            self.write_plain(self.analysis.scalar, split)
-        self.analysis = None
-        self.style = None
-
-    # Analyzers.
-
-    def prepare_version(self, version):
-        major, minor = version
-        if major != 1:
-            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
-        return '%d.%d' % (major, minor)
-
-    def prepare_tag_handle(self, handle):
-        if not handle:
-            raise EmitterError("tag handle must not be empty")
-        if handle[0] != '!' or handle[-1] != '!':
-            raise EmitterError("tag handle must start and end with '!': %r" % handle)
-        for ch in handle[1:-1]:
-            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
-                    or ch in '-_'):
-                raise EmitterError("invalid character %r in the tag handle: %r"
-                        % (ch, handle))
-        return handle
-
-    def prepare_tag_prefix(self, prefix):
-        if not prefix:
-            raise EmitterError("tag prefix must not be empty")
-        chunks = []
-        start = end = 0
-        if prefix[0] == '!':
-            end = 1
-        while end < len(prefix):
-            ch = prefix[end]
-            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
-                    or ch in '-;/?!:@&=+$,_.~*\'()[]':
-                end += 1
-            else:
-                if start < end:
-                    chunks.append(prefix[start:end])
-                start = end = end+1
-                data = ch.encode('utf-8')
-                for ch in data:
-                    chunks.append('%%%02X' % ord(ch))
-        if start < end:
-            chunks.append(prefix[start:end])
-        return ''.join(chunks)
-
-    def prepare_tag(self, tag):
-        if not tag:
-            raise EmitterError("tag must not be empty")
-        if tag == '!':
-            return tag
-        handle = None
-        suffix = tag
-        prefixes = sorted(self.tag_prefixes.keys())
-        for prefix in prefixes:
-            if tag.startswith(prefix)   \
-                    and (prefix == '!' or len(prefix) < len(tag)):
-                handle = self.tag_prefixes[prefix]
-                suffix = tag[len(prefix):]
-        chunks = []
-        start = end = 0
-        while end < len(suffix):
-            ch = suffix[end]
-            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
-                    or ch in '-;/?:@&=+$,_.~*\'()[]'   \
-                    or (ch == '!' and handle != '!'):
-                end += 1
-            else:
-                if start < end:
-                    chunks.append(suffix[start:end])
-                start = end = end+1
-                data = ch.encode('utf-8')
-                for ch in data:
-                    chunks.append('%%%02X' % ord(ch))
-        if start < end:
-            chunks.append(suffix[start:end])
-        suffix_text = ''.join(chunks)
-        if handle:
-            return '%s%s' % (handle, suffix_text)
-        else:
-            return '!<%s>' % suffix_text
-
-    def prepare_anchor(self, anchor):
-        if not anchor:
-            raise EmitterError("anchor must not be empty")
-        for ch in anchor:
-            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
-                    or ch in '-_'):
-                raise EmitterError("invalid character %r in the anchor: %r"
-                        % (ch, anchor))
-        return anchor
-
-    def analyze_scalar(self, scalar):
-
-        # Empty scalar is a special case.
-        if not scalar:
-            return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
-                    allow_flow_plain=False, allow_block_plain=True,
-                    allow_single_quoted=True, allow_double_quoted=True,
-                    allow_block=False)
-
-        # Indicators and special characters.
-        block_indicators = False
-        flow_indicators = False
-        line_breaks = False
-        special_characters = False
-
-        # Important whitespace combinations.
-        leading_space = False
-        leading_break = False
-        trailing_space = False
-        trailing_break = False
-        break_space = False
-        space_break = False
-
-        # Check document indicators.
-        if scalar.startswith('---') or scalar.startswith('...'):
-            block_indicators = True
-            flow_indicators = True
-
-        # First character or preceded by a whitespace.
-        preceeded_by_whitespace = True
-
-        # Last character or followed by a whitespace.
-        followed_by_whitespace = (len(scalar) == 1 or
-                scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
-        # The previous character is a space.
-        previous_space = False
-
-        # The previous character is a break.
-        previous_break = False
-
-        index = 0
-        while index < len(scalar):
-            ch = scalar[index]
-
-            # Check for indicators.
-            if index == 0:
-                # Leading indicators are special characters.
-                if ch in '#,[]{}&*!|>\'\"%@`': 
-                    flow_indicators = True
-                    block_indicators = True
-                if ch in '?:':
-                    flow_indicators = True
-                    if followed_by_whitespace:
-                        block_indicators = True
-                if ch == '-' and followed_by_whitespace:
-                    flow_indicators = True
-                    block_indicators = True
-            else:
-                # Some indicators cannot appear within a scalar as well.
-                if ch in ',?[]{}':
-                    flow_indicators = True
-                if ch == ':':
-                    flow_indicators = True
-                    if followed_by_whitespace:
-                        block_indicators = True
-                if ch == '#' and preceeded_by_whitespace:
-                    flow_indicators = True
-                    block_indicators = True
-
-            # Check for line breaks, special, and unicode characters.
-            if ch in '\n\x85\u2028\u2029':
-                line_breaks = True
-            if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
-                if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
-                        or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
-                    unicode_characters = True
-                    if not self.allow_unicode:
-                        special_characters = True
-                else:
-                    special_characters = True
-
-            # Detect important whitespace combinations.
-            if ch == ' ':
-                if index == 0:
-                    leading_space = True
-                if index == len(scalar)-1:
-                    trailing_space = True
-                if previous_break:
-                    break_space = True
-                previous_space = True
-                previous_break = False
-            elif ch in '\n\x85\u2028\u2029':
-                if index == 0:
-                    leading_break = True
-                if index == len(scalar)-1:
-                    trailing_break = True
-                if previous_space:
-                    space_break = True
-                previous_space = False
-                previous_break = True
-            else:
-                previous_space = False
-                previous_break = False
-
-            # Prepare for the next character.
-            index += 1
-            preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
-            followed_by_whitespace = (index+1 >= len(scalar) or
-                    scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
-        # Let's decide what styles are allowed.
-        allow_flow_plain = True
-        allow_block_plain = True
-        allow_single_quoted = True
-        allow_double_quoted = True
-        allow_block = True
-
-        # Leading and trailing whitespaces are bad for plain scalars.
-        if (leading_space or leading_break
-                or trailing_space or trailing_break):
-            allow_flow_plain = allow_block_plain = False
-
-        # We do not permit trailing spaces for block scalars.
-        if trailing_space:
-            allow_block = False
-
-        # Spaces at the beginning of a new line are only acceptable for block
-        # scalars.
-        if break_space:
-            allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
-        # Spaces followed by breaks, as well as special character are only
-        # allowed for double quoted scalars.
-        if space_break or special_characters:
-            allow_flow_plain = allow_block_plain =  \
-            allow_single_quoted = allow_block = False
-
-        # Although the plain scalar writer supports breaks, we never emit
-        # multiline plain scalars.
-        if line_breaks:
-            allow_flow_plain = allow_block_plain = False
-
-        # Flow indicators are forbidden for flow plain scalars.
-        if flow_indicators:
-            allow_flow_plain = False
-
-        # Block indicators are forbidden for block plain scalars.
-        if block_indicators:
-            allow_block_plain = False
-
-        return ScalarAnalysis(scalar=scalar,
-                empty=False, multiline=line_breaks,
-                allow_flow_plain=allow_flow_plain,
-                allow_block_plain=allow_block_plain,
-                allow_single_quoted=allow_single_quoted,
-                allow_double_quoted=allow_double_quoted,
-                allow_block=allow_block)
-
-    # Writers.
-
-    def flush_stream(self):
-        if hasattr(self.stream, 'flush'):
-            self.stream.flush()
-
-    def write_stream_start(self):
-        # Write BOM if needed.
-        if self.encoding and self.encoding.startswith('utf-16'):
-            self.stream.write('\uFEFF'.encode(self.encoding))
-
-    def write_stream_end(self):
-        self.flush_stream()
-
-    def write_indicator(self, indicator, need_whitespace,
-            whitespace=False, indention=False):
-        if self.whitespace or not need_whitespace:
-            data = indicator
-        else:
-            data = ' '+indicator
-        self.whitespace = whitespace
-        self.indention = self.indention and indention
-        self.column += len(data)
-        self.open_ended = False
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-
-    def write_indent(self):
-        indent = self.indent or 0
-        if not self.indention or self.column > indent   \
-                or (self.column == indent and not self.whitespace):
-            self.write_line_break()
-        if self.column < indent:
-            self.whitespace = True
-            data = ' '*(indent-self.column)
-            self.column = indent
-            if self.encoding:
-                data = data.encode(self.encoding)
-            self.stream.write(data)
-
-    def write_line_break(self, data=None):
-        if data is None:
-            data = self.best_line_break
-        self.whitespace = True
-        self.indention = True
-        self.line += 1
-        self.column = 0
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-
-    def write_version_directive(self, version_text):
-        data = '%%YAML %s' % version_text
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-        self.write_line_break()
-
-    def write_tag_directive(self, handle_text, prefix_text):
-        data = '%%TAG %s %s' % (handle_text, prefix_text)
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-        self.write_line_break()
-
-    # Scalar streams.
-
-    def write_single_quoted(self, text, split=True):
-        self.write_indicator('\'', True)
-        spaces = False
-        breaks = False
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if spaces:
-                if ch is None or ch != ' ':
-                    if start+1 == end and self.column > self.best_width and split   \
-                            and start != 0 and end != len(text):
-                        self.write_indent()
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            elif breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    if text[start] == '\n':
-                        self.write_line_break()
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    self.write_indent()
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
-                    if start < end:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                        start = end
-            if ch == '\'':
-                data = '\'\''
-                self.column += 2
-                if self.encoding:
-                    data = data.encode(self.encoding)
-                self.stream.write(data)
-                start = end + 1
-            if ch is not None:
-                spaces = (ch == ' ')
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-        self.write_indicator('\'', False)
-
-    ESCAPE_REPLACEMENTS = {
-        '\0':       '0',
-        '\x07':     'a',
-        '\x08':     'b',
-        '\x09':     't',
-        '\x0A':     'n',
-        '\x0B':     'v',
-        '\x0C':     'f',
-        '\x0D':     'r',
-        '\x1B':     'e',
-        '\"':       '\"',
-        '\\':       '\\',
-        '\x85':     'N',
-        '\xA0':     '_',
-        '\u2028':   'L',
-        '\u2029':   'P',
-    }
-
-    def write_double_quoted(self, text, split=True):
-        self.write_indicator('"', True)
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
-                    or not ('\x20' <= ch <= '\x7E'
-                        or (self.allow_unicode
-                            and ('\xA0' <= ch <= '\uD7FF'
-                                or '\uE000' <= ch <= '\uFFFD'))):
-                if start < end:
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end
-                if ch is not None:
-                    if ch in self.ESCAPE_REPLACEMENTS:
-                        data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
-                    elif ch <= '\xFF':
-                        data = '\\x%02X' % ord(ch)
-                    elif ch <= '\uFFFF':
-                        data = '\\u%04X' % ord(ch)
-                    else:
-                        data = '\\U%08X' % ord(ch)
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end+1
-            if 0 < end < len(text)-1 and (ch == ' ' or start >= end)    \
-                    and self.column+(end-start) > self.best_width and split:
-                data = text[start:end]+'\\'
-                if start < end:
-                    start = end
-                self.column += len(data)
-                if self.encoding:
-                    data = data.encode(self.encoding)
-                self.stream.write(data)
-                self.write_indent()
-                self.whitespace = False
-                self.indention = False
-                if text[start] == ' ':
-                    data = '\\'
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-            end += 1
-        self.write_indicator('"', False)
-
-    def determine_block_hints(self, text):
-        hints = ''
-        if text:
-            if text[0] in ' \n\x85\u2028\u2029':
-                hints += str(self.best_indent)
-            if text[-1] not in '\n\x85\u2028\u2029':
-                hints += '-'
-            elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
-                hints += '+'
-        return hints
-
-    def write_folded(self, text):
-        hints = self.determine_block_hints(text)
-        self.write_indicator('>'+hints, True)
-        if hints[-1:] == '+':
-            self.open_ended = True
-        self.write_line_break()
-        leading_space = True
-        spaces = False
-        breaks = True
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    if not leading_space and ch is not None and ch != ' '   \
-                            and text[start] == '\n':
-                        self.write_line_break()
-                    leading_space = (ch == ' ')
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    if ch is not None:
-                        self.write_indent()
-                    start = end
-            elif spaces:
-                if ch != ' ':
-                    if start+1 == end and self.column > self.best_width:
-                        self.write_indent()
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029':
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    if ch is None:
-                        self.write_line_break()
-                    start = end
-            if ch is not None:
-                breaks = (ch in '\n\x85\u2028\u2029')
-                spaces = (ch == ' ')
-            end += 1
-
-    def write_literal(self, text):
-        hints = self.determine_block_hints(text)
-        self.write_indicator('|'+hints, True)
-        if hints[-1:] == '+':
-            self.open_ended = True
-        self.write_line_break()
-        breaks = True
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    if ch is not None:
-                        self.write_indent()
-                    start = end
-            else:
-                if ch is None or ch in '\n\x85\u2028\u2029':
-                    data = text[start:end]
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    if ch is None:
-                        self.write_line_break()
-                    start = end
-            if ch is not None:
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-
-    def write_plain(self, text, split=True):
-        if self.root_context:
-            self.open_ended = True
-        if not text:
-            return
-        if not self.whitespace:
-            data = ' '
-            self.column += len(data)
-            if self.encoding:
-                data = data.encode(self.encoding)
-            self.stream.write(data)
-        self.whitespace = False
-        self.indention = False
-        spaces = False
-        breaks = False
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if spaces:
-                if ch != ' ':
-                    if start+1 == end and self.column > self.best_width and split:
-                        self.write_indent()
-                        self.whitespace = False
-                        self.indention = False
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            elif breaks:
-                if ch not in '\n\x85\u2028\u2029':
-                    if text[start] == '\n':
-                        self.write_line_break()
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    self.write_indent()
-                    self.whitespace = False
-                    self.indention = False
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029':
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end
-            if ch is not None:
-                spaces = (ch == ' ')
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+    pass
+
+class ScalarAnalysis:
+    def __init__(self, scalar, empty, multiline,
+            allow_flow_plain, allow_block_plain,
+            allow_single_quoted, allow_double_quoted,
+            allow_block):
+        self.scalar = scalar
+        self.empty = empty
+        self.multiline = multiline
+        self.allow_flow_plain = allow_flow_plain
+        self.allow_block_plain = allow_block_plain
+        self.allow_single_quoted = allow_single_quoted
+        self.allow_double_quoted = allow_double_quoted
+        self.allow_block = allow_block
+
+class Emitter:
+
+    DEFAULT_TAG_PREFIXES = {
+        '!' : '!',
+        'tag:yaml.org,2002:' : '!!',
+    }
+
+    def __init__(self, stream, canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None):
+
+        # The stream should have the methods `write` and possibly `flush`.
+        self.stream = stream
+
+        # Encoding can be overriden by STREAM-START.
+        self.encoding = None
+
+        # Emitter is a state machine with a stack of states to handle nested
+        # structures.
+        self.states = []
+        self.state = self.expect_stream_start
+
+        # Current event and the event queue.
+        self.events = []
+        self.event = None
+
+        # The current indentation level and the stack of previous indents.
+        self.indents = []
+        self.indent = None
+
+        # Flow level.
+        self.flow_level = 0
+
+        # Contexts.
+        self.root_context = False
+        self.sequence_context = False
+        self.mapping_context = False
+        self.simple_key_context = False
+
+        # Characteristics of the last emitted character:
+        #  - current position.
+        #  - is it a whitespace?
+        #  - is it an indention character
+        #    (indentation space, '-', '?', or ':')?
+        self.line = 0
+        self.column = 0
+        self.whitespace = True
+        self.indention = True
+
+        # Whether the document requires an explicit document indicator
+        self.open_ended = False
+
+        # Formatting details.
+        self.canonical = canonical
+        self.allow_unicode = allow_unicode
+        self.best_indent = 2
+        if indent and 1 < indent < 10:
+            self.best_indent = indent
+        self.best_width = 80
+        if width and width > self.best_indent*2:
+            self.best_width = width
+        self.best_line_break = '\n'
+        if line_break in ['\r', '\n', '\r\n']:
+            self.best_line_break = line_break
+
+        # Tag prefixes.
+        self.tag_prefixes = None
+
+        # Prepared anchor and tag.
+        self.prepared_anchor = None
+        self.prepared_tag = None
+
+        # Scalar analysis and style.
+        self.analysis = None
+        self.style = None
+
+    def dispose(self):
+        # Reset the state attributes (to clear self-references)
+        self.states = []
+        self.state = None
+
+    def emit(self, event):
+        self.events.append(event)
+        while not self.need_more_events():
+            self.event = self.events.pop(0)
+            self.state()
+            self.event = None
+
+    # In some cases, we wait for a few next events before emitting.
+
+    def need_more_events(self):
+        if not self.events:
+            return True
+        event = self.events[0]
+        if isinstance(event, DocumentStartEvent):
+            return self.need_events(1)
+        elif isinstance(event, SequenceStartEvent):
+            return self.need_events(2)
+        elif isinstance(event, MappingStartEvent):
+            return self.need_events(3)
+        else:
+            return False
+
+    def need_events(self, count):
+        level = 0
+        for event in self.events[1:]:
+            if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+                level += 1
+            elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+                level -= 1
+            elif isinstance(event, StreamEndEvent):
+                level = -1
+            if level < 0:
+                return False
+        return (len(self.events) < count+1)
+
+    def increase_indent(self, flow=False, indentless=False):
+        self.indents.append(self.indent)
+        if self.indent is None:
+            if flow:
+                self.indent = self.best_indent
+            else:
+                self.indent = 0
+        elif not indentless:
+            self.indent += self.best_indent
+
+    # States.
+
+    # Stream handlers.
+
+    def expect_stream_start(self):
+        if isinstance(self.event, StreamStartEvent):
+            if self.event.encoding and not hasattr(self.stream, 'encoding'):
+                self.encoding = self.event.encoding
+            self.write_stream_start()
+            self.state = self.expect_first_document_start
+        else:
+            raise EmitterError("expected StreamStartEvent, but got %s"
+                    % self.event)
+
+    def expect_nothing(self):
+        raise EmitterError("expected nothing, but got %s" % self.event)
+
+    # Document handlers.
+
+    def expect_first_document_start(self):
+        return self.expect_document_start(first=True)
+
+    def expect_document_start(self, first=False):
+        if isinstance(self.event, DocumentStartEvent):
+            if (self.event.version or self.event.tags) and self.open_ended:
+                self.write_indicator('...', True)
+                self.write_indent()
+            if self.event.version:
+                version_text = self.prepare_version(self.event.version)
+                self.write_version_directive(version_text)
+            self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+            if self.event.tags:
+                handles = sorted(self.event.tags.keys())
+                for handle in handles:
+                    prefix = self.event.tags[handle]
+                    self.tag_prefixes[prefix] = handle
+                    handle_text = self.prepare_tag_handle(handle)
+                    prefix_text = self.prepare_tag_prefix(prefix)
+                    self.write_tag_directive(handle_text, prefix_text)
+            implicit = (first and not self.event.explicit and not self.canonical
+                    and not self.event.version and not self.event.tags
+                    and not self.check_empty_document())
+            if not implicit:
+                self.write_indent()
+                self.write_indicator('---', True)
+                if self.canonical:
+                    self.write_indent()
+            self.state = self.expect_document_root
+        elif isinstance(self.event, StreamEndEvent):
+            if self.open_ended:
+                self.write_indicator('...', True)
+                self.write_indent()
+            self.write_stream_end()
+            self.state = self.expect_nothing
+        else:
+            raise EmitterError("expected DocumentStartEvent, but got %s"
+                    % self.event)
+
+    def expect_document_end(self):
+        if isinstance(self.event, DocumentEndEvent):
+            self.write_indent()
+            if self.event.explicit:
+                self.write_indicator('...', True)
+                self.write_indent()
+            self.flush_stream()
+            self.state = self.expect_document_start
+        else:
+            raise EmitterError("expected DocumentEndEvent, but got %s"
+                    % self.event)
+
+    def expect_document_root(self):
+        self.states.append(self.expect_document_end)
+        self.expect_node(root=True)
+
+    # Node handlers.
+
+    def expect_node(self, root=False, sequence=False, mapping=False,
+            simple_key=False):
+        self.root_context = root
+        self.sequence_context = sequence
+        self.mapping_context = mapping
+        self.simple_key_context = simple_key
+        if isinstance(self.event, AliasEvent):
+            self.expect_alias()
+        elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+            self.process_anchor('&')
+            self.process_tag()
+            if isinstance(self.event, ScalarEvent):
+                self.expect_scalar()
+            elif isinstance(self.event, SequenceStartEvent):
+                if self.flow_level or self.canonical or self.event.flow_style   \
+                        or self.check_empty_sequence():
+                    self.expect_flow_sequence()
+                else:
+                    self.expect_block_sequence()
+            elif isinstance(self.event, MappingStartEvent):
+                if self.flow_level or self.canonical or self.event.flow_style   \
+                        or self.check_empty_mapping():
+                    self.expect_flow_mapping()
+                else:
+                    self.expect_block_mapping()
+        else:
+            raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+    def expect_alias(self):
+        if self.event.anchor is None:
+            raise EmitterError("anchor is not specified for alias")
+        self.process_anchor('*')
+        self.state = self.states.pop()
+
+    def expect_scalar(self):
+        self.increase_indent(flow=True)
+        self.process_scalar()
+        self.indent = self.indents.pop()
+        self.state = self.states.pop()
+
+    # Flow sequence handlers.
+
+    def expect_flow_sequence(self):
+        self.write_indicator('[', True, whitespace=True)
+        self.flow_level += 1
+        self.increase_indent(flow=True)
+        self.state = self.expect_first_flow_sequence_item
+
+    def expect_first_flow_sequence_item(self):
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            self.write_indicator(']', False)
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    def expect_flow_sequence_item(self):
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            if self.canonical:
+                self.write_indicator(',', False)
+                self.write_indent()
+            self.write_indicator(']', False)
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(',', False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Flow mapping handlers.
+
+    def expect_flow_mapping(self):
+        self.write_indicator('{', True, whitespace=True)
+        self.flow_level += 1
+        self.increase_indent(flow=True)
+        self.state = self.expect_first_flow_mapping_key
+
+    def expect_first_flow_mapping_key(self):
+        if isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            self.write_indicator('}', False)
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_key(self):
+        if isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            if self.canonical:
+                self.write_indicator(',', False)
+                self.write_indent()
+            self.write_indicator('}', False)
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(',', False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_simple_value(self):
+        self.write_indicator(':', False)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_flow_mapping_value(self):
+        if self.canonical or self.column > self.best_width:
+            self.write_indent()
+        self.write_indicator(':', True)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Block sequence handlers.
+
+    def expect_block_sequence(self):
+        indentless = (self.mapping_context and not self.indention)
+        self.increase_indent(flow=False, indentless=indentless)
+        self.state = self.expect_first_block_sequence_item
+
+    def expect_first_block_sequence_item(self):
+        return self.expect_block_sequence_item(first=True)
+
+    def expect_block_sequence_item(self, first=False):
+        if not first and isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+        else:
+            self.write_indent()
+            self.write_indicator('-', True, indention=True)
+            self.states.append(self.expect_block_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Block mapping handlers.
+
+    def expect_block_mapping(self):
+        self.increase_indent(flow=False)
+        self.state = self.expect_first_block_mapping_key
+
+    def expect_first_block_mapping_key(self):
+        return self.expect_block_mapping_key(first=True)
+
+    def expect_block_mapping_key(self, first=False):
+        if not first and isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+        else:
+            self.write_indent()
+            if self.check_simple_key():
+                self.states.append(self.expect_block_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True, indention=True)
+                self.states.append(self.expect_block_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_block_mapping_simple_value(self):
+        self.write_indicator(':', False)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_block_mapping_value(self):
+        self.write_indent()
+        self.write_indicator(':', True, indention=True)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Checkers.
+
+    def check_empty_sequence(self):
+        return (isinstance(self.event, SequenceStartEvent) and self.events
+                and isinstance(self.events[0], SequenceEndEvent))
+
+    def check_empty_mapping(self):
+        return (isinstance(self.event, MappingStartEvent) and self.events
+                and isinstance(self.events[0], MappingEndEvent))
+
+    def check_empty_document(self):
+        if not isinstance(self.event, DocumentStartEvent) or not self.events:
+            return False
+        event = self.events[0]
+        return (isinstance(event, ScalarEvent) and event.anchor is None
+                and event.tag is None and event.implicit and event.value == '')
+
+    def check_simple_key(self):
+        length = 0
+        if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+            if self.prepared_anchor is None:
+                self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+            length += len(self.prepared_anchor)
+        if isinstance(self.event, (ScalarEvent, CollectionStartEvent))  \
+                and self.event.tag is not None:
+            if self.prepared_tag is None:
+                self.prepared_tag = self.prepare_tag(self.event.tag)
+            length += len(self.prepared_tag)
+        if isinstance(self.event, ScalarEvent):
+            if self.analysis is None:
+                self.analysis = self.analyze_scalar(self.event.value)
+            length += len(self.analysis.scalar)
+        return (length < 128 and (isinstance(self.event, AliasEvent)
+            or (isinstance(self.event, ScalarEvent)
+                    and not self.analysis.empty and not self.analysis.multiline)
+            or self.check_empty_sequence() or self.check_empty_mapping()))
+
+    # Anchor, Tag, and Scalar processors.
+
+    def process_anchor(self, indicator):
+        if self.event.anchor is None:
+            self.prepared_anchor = None
+            return
+        if self.prepared_anchor is None:
+            self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+        if self.prepared_anchor:
+            self.write_indicator(indicator+self.prepared_anchor, True)
+        self.prepared_anchor = None
+
+    def process_tag(self):
+        tag = self.event.tag
+        if isinstance(self.event, ScalarEvent):
+            if self.style is None:
+                self.style = self.choose_scalar_style()
+            if ((not self.canonical or tag is None) and
+                ((self.style == '' and self.event.implicit[0])
+                        or (self.style != '' and self.event.implicit[1]))):
+                self.prepared_tag = None
+                return
+            if self.event.implicit[0] and tag is None:
+                tag = '!'
+                self.prepared_tag = None
+        else:
+            if (not self.canonical or tag is None) and self.event.implicit:
+                self.prepared_tag = None
+                return
+        if tag is None:
+            raise EmitterError("tag is not specified")
+        if self.prepared_tag is None:
+            self.prepared_tag = self.prepare_tag(tag)
+        if self.prepared_tag:
+            self.write_indicator(self.prepared_tag, True)
+        self.prepared_tag = None
+
+    def choose_scalar_style(self):
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.event.style == '"' or self.canonical:
+            return '"'
+        if not self.event.style and self.event.implicit[0]:
+            if (not (self.simple_key_context and
+                    (self.analysis.empty or self.analysis.multiline))
+                and (self.flow_level and self.analysis.allow_flow_plain
+                    or (not self.flow_level and self.analysis.allow_block_plain))):
+                return ''
+        if self.event.style and self.event.style in '|>':
+            if (not self.flow_level and not self.simple_key_context
+                    and self.analysis.allow_block):
+                return self.event.style
+        if not self.event.style or self.event.style == '\'':
+            if (self.analysis.allow_single_quoted and
+                    not (self.simple_key_context and self.analysis.multiline)):
+                return '\''
+        return '"'
+
+    def process_scalar(self):
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.style is None:
+            self.style = self.choose_scalar_style()
+        split = (not self.simple_key_context)
+        #if self.analysis.multiline and split    \
+        #        and (not self.style or self.style in '\'\"'):
+        #    self.write_indent()
+        if self.style == '"':
+            self.write_double_quoted(self.analysis.scalar, split)
+        elif self.style == '\'':
+            self.write_single_quoted(self.analysis.scalar, split)
+        elif self.style == '>':
+            self.write_folded(self.analysis.scalar)
+        elif self.style == '|':
+            self.write_literal(self.analysis.scalar)
+        else:
+            self.write_plain(self.analysis.scalar, split)
+        self.analysis = None
+        self.style = None
+
+    # Analyzers.
+
+    def prepare_version(self, version):
+        major, minor = version
+        if major != 1:
+            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+        return '%d.%d' % (major, minor)
+
+    def prepare_tag_handle(self, handle):
+        if not handle:
+            raise EmitterError("tag handle must not be empty")
+        if handle[0] != '!' or handle[-1] != '!':
+            raise EmitterError("tag handle must start and end with '!': %r" % handle)
+        for ch in handle[1:-1]:
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
+                raise EmitterError("invalid character %r in the tag handle: %r"
+                        % (ch, handle))
+        return handle
+
+    def prepare_tag_prefix(self, prefix):
+        if not prefix:
+            raise EmitterError("tag prefix must not be empty")
+        chunks = []
+        start = end = 0
+        if prefix[0] == '!':
+            end = 1
+        while end < len(prefix):
+            ch = prefix[end]
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+                    or ch in '-;/?!:@&=+$,_.~*\'()[]':
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(prefix[start:end])
+                start = end = end+1
+                data = ch.encode('utf-8')
+                for ch in data:
+                    chunks.append('%%%02X' % ord(ch))
+        if start < end:
+            chunks.append(prefix[start:end])
+        return ''.join(chunks)
+
+    def prepare_tag(self, tag):
+        if not tag:
+            raise EmitterError("tag must not be empty")
+        if tag == '!':
+            return tag
+        handle = None
+        suffix = tag
+        prefixes = sorted(self.tag_prefixes.keys())
+        for prefix in prefixes:
+            if tag.startswith(prefix)   \
+                    and (prefix == '!' or len(prefix) < len(tag)):
+                handle = self.tag_prefixes[prefix]
+                suffix = tag[len(prefix):]
+        chunks = []
+        start = end = 0
+        while end < len(suffix):
+            ch = suffix[end]
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+                    or ch in '-;/?:@&=+$,_.~*\'()[]'   \
+                    or (ch == '!' and handle != '!'):
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(suffix[start:end])
+                start = end = end+1
+                data = ch.encode('utf-8')
+                for ch in data:
+                    chunks.append('%%%02X' % ord(ch))
+        if start < end:
+            chunks.append(suffix[start:end])
+        suffix_text = ''.join(chunks)
+        if handle:
+            return '%s%s' % (handle, suffix_text)
+        else:
+            return '!<%s>' % suffix_text
+
+    def prepare_anchor(self, anchor):
+        if not anchor:
+            raise EmitterError("anchor must not be empty")
+        for ch in anchor:
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
+                raise EmitterError("invalid character %r in the anchor: %r"
+                        % (ch, anchor))
+        return anchor
+
+    def analyze_scalar(self, scalar):
+
+        # Empty scalar is a special case.
+        if not scalar:
+            return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+                    allow_flow_plain=False, allow_block_plain=True,
+                    allow_single_quoted=True, allow_double_quoted=True,
+                    allow_block=False)
+
+        # Indicators and special characters.
+        block_indicators = False
+        flow_indicators = False
+        line_breaks = False
+        special_characters = False
+
+        # Important whitespace combinations.
+        leading_space = False
+        leading_break = False
+        trailing_space = False
+        trailing_break = False
+        break_space = False
+        space_break = False
+
+        # Check document indicators.
+        if scalar.startswith('---') or scalar.startswith('...'):
+            block_indicators = True
+            flow_indicators = True
+
+        # First character or preceded by a whitespace.
+        preceeded_by_whitespace = True
+
+        # Last character or followed by a whitespace.
+        followed_by_whitespace = (len(scalar) == 1 or
+                scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+        # The previous character is a space.
+        previous_space = False
+
+        # The previous character is a break.
+        previous_break = False
+
+        index = 0
+        while index < len(scalar):
+            ch = scalar[index]
+
+            # Check for indicators.
+            if index == 0:
+                # Leading indicators are special characters.
+                if ch in '#,[]{}&*!|>\'\"%@`': 
+                    flow_indicators = True
+                    block_indicators = True
+                if ch in '?:':
+                    flow_indicators = True
+                    if followed_by_whitespace:
+                        block_indicators = True
+                if ch == '-' and followed_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+            else:
+                # Some indicators cannot appear within a scalar as well.
+                if ch in ',?[]{}':
+                    flow_indicators = True
+                if ch == ':':
+                    flow_indicators = True
+                    if followed_by_whitespace:
+                        block_indicators = True
+                if ch == '#' and preceeded_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+
+            # Check for line breaks, special, and unicode characters.
+            if ch in '\n\x85\u2028\u2029':
+                line_breaks = True
+            if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+                if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+                        or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+                    unicode_characters = True
+                    if not self.allow_unicode:
+                        special_characters = True
+                else:
+                    special_characters = True
+
+            # Detect important whitespace combinations.
+            if ch == ' ':
+                if index == 0:
+                    leading_space = True
+                if index == len(scalar)-1:
+                    trailing_space = True
+                if previous_break:
+                    break_space = True
+                previous_space = True
+                previous_break = False
+            elif ch in '\n\x85\u2028\u2029':
+                if index == 0:
+                    leading_break = True
+                if index == len(scalar)-1:
+                    trailing_break = True
+                if previous_space:
+                    space_break = True
+                previous_space = False
+                previous_break = True
+            else:
+                previous_space = False
+                previous_break = False
+
+            # Prepare for the next character.
+            index += 1
+            preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+            followed_by_whitespace = (index+1 >= len(scalar) or
+                    scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+        # Let's decide what styles are allowed.
+        allow_flow_plain = True
+        allow_block_plain = True
+        allow_single_quoted = True
+        allow_double_quoted = True
+        allow_block = True
+
+        # Leading and trailing whitespaces are bad for plain scalars.
+        if (leading_space or leading_break
+                or trailing_space or trailing_break):
+            allow_flow_plain = allow_block_plain = False
+
+        # We do not permit trailing spaces for block scalars.
+        if trailing_space:
+            allow_block = False
+
+        # Spaces at the beginning of a new line are only acceptable for block
+        # scalars.
+        if break_space:
+            allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+        # Spaces followed by breaks, as well as special character are only
+        # allowed for double quoted scalars.
+        if space_break or special_characters:
+            allow_flow_plain = allow_block_plain =  \
+            allow_single_quoted = allow_block = False
+
+        # Although the plain scalar writer supports breaks, we never emit
+        # multiline plain scalars.
+        if line_breaks:
+            allow_flow_plain = allow_block_plain = False
+
+        # Flow indicators are forbidden for flow plain scalars.
+        if flow_indicators:
+            allow_flow_plain = False
+
+        # Block indicators are forbidden for block plain scalars.
+        if block_indicators:
+            allow_block_plain = False
+
+        return ScalarAnalysis(scalar=scalar,
+                empty=False, multiline=line_breaks,
+                allow_flow_plain=allow_flow_plain,
+                allow_block_plain=allow_block_plain,
+                allow_single_quoted=allow_single_quoted,
+                allow_double_quoted=allow_double_quoted,
+                allow_block=allow_block)
+
+    # Writers.
+
+    def flush_stream(self):
+        if hasattr(self.stream, 'flush'):
+            self.stream.flush()
+
+    def write_stream_start(self):
+        # Write BOM if needed.
+        if self.encoding and self.encoding.startswith('utf-16'):
+            self.stream.write('\uFEFF'.encode(self.encoding))
+
+    def write_stream_end(self):
+        self.flush_stream()
+
+    def write_indicator(self, indicator, need_whitespace,
+            whitespace=False, indention=False):
+        if self.whitespace or not need_whitespace:
+            data = indicator
+        else:
+            data = ' '+indicator
+        self.whitespace = whitespace
+        self.indention = self.indention and indention
+        self.column += len(data)
+        self.open_ended = False
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_indent(self):
+        indent = self.indent or 0
+        if not self.indention or self.column > indent   \
+                or (self.column == indent and not self.whitespace):
+            self.write_line_break()
+        if self.column < indent:
+            self.whitespace = True
+            data = ' '*(indent-self.column)
+            self.column = indent
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+
+    def write_line_break(self, data=None):
+        if data is None:
+            data = self.best_line_break
+        self.whitespace = True
+        self.indention = True
+        self.line += 1
+        self.column = 0
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_version_directive(self, version_text):
+        data = '%%YAML %s' % version_text
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    def write_tag_directive(self, handle_text, prefix_text):
+        data = '%%TAG %s %s' % (handle_text, prefix_text)
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    # Scalar streams.
+
+    def write_single_quoted(self, text, split=True):
+        self.write_indicator('\'', True)
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch is None or ch != ' ':
+                    if start+1 == end and self.column > self.best_width and split   \
+                            and start != 0 and end != len(text):
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+                    if start < end:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                        start = end
+            if ch == '\'':
+                data = '\'\''
+                self.column += 2
+                if self.encoding:
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                start = end + 1
+            if ch is not None:
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
+        self.write_indicator('\'', False)
+
+    ESCAPE_REPLACEMENTS = {
+        '\0':       '0',
+        '\x07':     'a',
+        '\x08':     'b',
+        '\x09':     't',
+        '\x0A':     'n',
+        '\x0B':     'v',
+        '\x0C':     'f',
+        '\x0D':     'r',
+        '\x1B':     'e',
+        '\"':       '\"',
+        '\\':       '\\',
+        '\x85':     'N',
+        '\xA0':     '_',
+        '\u2028':   'L',
+        '\u2029':   'P',
+    }
+
+    def write_double_quoted(self, text, split=True):
+        self.write_indicator('"', True)
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+                    or not ('\x20' <= ch <= '\x7E'
+                        or (self.allow_unicode
+                            and ('\xA0' <= ch <= '\uD7FF'
+                                or '\uE000' <= ch <= '\uFFFD'))):
+                if start < end:
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end
+                if ch is not None:
+                    if ch in self.ESCAPE_REPLACEMENTS:
+                        data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+                    elif ch <= '\xFF':
+                        data = '\\x%02X' % ord(ch)
+                    elif ch <= '\uFFFF':
+                        data = '\\u%04X' % ord(ch)
+                    else:
+                        data = '\\U%08X' % ord(ch)
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end+1
+            if 0 < end < len(text)-1 and (ch == ' ' or start >= end)    \
+                    and self.column+(end-start) > self.best_width and split:
+                data = text[start:end]+'\\'
+                if start < end:
+                    start = end
+                self.column += len(data)
+                if self.encoding:
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                self.write_indent()
+                self.whitespace = False
+                self.indention = False
+                if text[start] == ' ':
+                    data = '\\'
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+            end += 1
+        self.write_indicator('"', False)
+
+    def determine_block_hints(self, text):
+        hints = ''
+        if text:
+            if text[0] in ' \n\x85\u2028\u2029':
+                hints += str(self.best_indent)
+            if text[-1] not in '\n\x85\u2028\u2029':
+                hints += '-'
+            elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+                hints += '+'
+        return hints
+
+    def write_folded(self, text):
+        hints = self.determine_block_hints(text)
+        self.write_indicator('>'+hints, True)
+        if hints[-1:] == '+':
+            self.open_ended = True
+        self.write_line_break()
+        leading_space = True
+        spaces = False
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if not leading_space and ch is not None and ch != ' '   \
+                            and text[start] == '\n':
+                        self.write_line_break()
+                    leading_space = (ch == ' ')
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        self.write_indent()
+                    start = end
+            elif spaces:
+                if ch != ' ':
+                    if start+1 == end and self.column > self.best_width:
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029':
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = (ch in '\n\x85\u2028\u2029')
+                spaces = (ch == ' ')
+            end += 1
+
+    def write_literal(self, text):
+        hints = self.determine_block_hints(text)
+        self.write_indicator('|'+hints, True)
+        if hints[-1:] == '+':
+            self.open_ended = True
+        self.write_line_break()
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in '\n\x85\u2028\u2029':
+                    data = text[start:end]
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
+
+    def write_plain(self, text, split=True):
+        if self.root_context:
+            self.open_ended = True
+        if not text:
+            return
+        if not self.whitespace:
+            data = ' '
+            self.column += len(data)
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+        self.whitespace = False
+        self.indention = False
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch != ' ':
+                    if start+1 == end and self.column > self.best_width and split:
+                        self.write_indent()
+                        self.whitespace = False
+                        self.indention = False
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    self.whitespace = False
+                    self.indention = False
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029':
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end
+            if ch is not None:
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
+

+ 75 - 75
ext/yaml/error.py → mncheck/ext/yaml/error.py

@@ -1,75 +1,75 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
-    def __init__(self, name, index, line, column, buffer, pointer):
-        self.name = name
-        self.index = index
-        self.line = line
-        self.column = column
-        self.buffer = buffer
-        self.pointer = pointer
-
-    def get_snippet(self, indent=4, max_length=75):
-        if self.buffer is None:
-            return None
-        head = ''
-        start = self.pointer
-        while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
-            start -= 1
-            if self.pointer-start > max_length/2-1:
-                head = ' ... '
-                start += 5
-                break
-        tail = ''
-        end = self.pointer
-        while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
-            end += 1
-            if end-self.pointer > max_length/2-1:
-                tail = ' ... '
-                end -= 5
-                break
-        snippet = self.buffer[start:end]
-        return ' '*indent + head + snippet + tail + '\n'  \
-                + ' '*(indent+self.pointer-start+len(head)) + '^'
-
-    def __str__(self):
-        snippet = self.get_snippet()
-        where = "  in \"%s\", line %d, column %d"   \
-                % (self.name, self.line+1, self.column+1)
-        if snippet is not None:
-            where += ":\n"+snippet
-        return where
-
-class YAMLError(Exception):
-    pass
-
-class MarkedYAMLError(YAMLError):
-
-    def __init__(self, context=None, context_mark=None,
-            problem=None, problem_mark=None, note=None):
-        self.context = context
-        self.context_mark = context_mark
-        self.problem = problem
-        self.problem_mark = problem_mark
-        self.note = note
-
-    def __str__(self):
-        lines = []
-        if self.context is not None:
-            lines.append(self.context)
-        if self.context_mark is not None  \
-            and (self.problem is None or self.problem_mark is None
-                    or self.context_mark.name != self.problem_mark.name
-                    or self.context_mark.line != self.problem_mark.line
-                    or self.context_mark.column != self.problem_mark.column):
-            lines.append(str(self.context_mark))
-        if self.problem is not None:
-            lines.append(self.problem)
-        if self.problem_mark is not None:
-            lines.append(str(self.problem_mark))
-        if self.note is not None:
-            lines.append(self.note)
-        return '\n'.join(lines)
-
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+    def __init__(self, name, index, line, column, buffer, pointer):
+        self.name = name
+        self.index = index
+        self.line = line
+        self.column = column
+        self.buffer = buffer
+        self.pointer = pointer
+
+    def get_snippet(self, indent=4, max_length=75):
+        if self.buffer is None:
+            return None
+        head = ''
+        start = self.pointer
+        while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+            start -= 1
+            if self.pointer-start > max_length/2-1:
+                head = ' ... '
+                start += 5
+                break
+        tail = ''
+        end = self.pointer
+        while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+            end += 1
+            if end-self.pointer > max_length/2-1:
+                tail = ' ... '
+                end -= 5
+                break
+        snippet = self.buffer[start:end]
+        return ' '*indent + head + snippet + tail + '\n'  \
+                + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+    def __str__(self):
+        snippet = self.get_snippet()
+        where = "  in \"%s\", line %d, column %d"   \
+                % (self.name, self.line+1, self.column+1)
+        if snippet is not None:
+            where += ":\n"+snippet
+        return where
+
+class YAMLError(Exception):
+    pass
+
+class MarkedYAMLError(YAMLError):
+
+    def __init__(self, context=None, context_mark=None,
+            problem=None, problem_mark=None, note=None):
+        self.context = context
+        self.context_mark = context_mark
+        self.problem = problem
+        self.problem_mark = problem_mark
+        self.note = note
+
+    def __str__(self):
+        lines = []
+        if self.context is not None:
+            lines.append(self.context)
+        if self.context_mark is not None  \
+            and (self.problem is None or self.problem_mark is None
+                    or self.context_mark.name != self.problem_mark.name
+                    or self.context_mark.line != self.problem_mark.line
+                    or self.context_mark.column != self.problem_mark.column):
+            lines.append(str(self.context_mark))
+        if self.problem is not None:
+            lines.append(self.problem)
+        if self.problem_mark is not None:
+            lines.append(str(self.problem_mark))
+        if self.note is not None:
+            lines.append(self.note)
+        return '\n'.join(lines)
+

+ 86 - 86
ext/yaml/events.py → mncheck/ext/yaml/events.py

@@ -1,86 +1,86 @@
-
-# Abstract classes.
-
-class Event(object):
-    def __init__(self, start_mark=None, end_mark=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
-                if hasattr(self, key)]
-        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
-        return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
-    def __init__(self, anchor, start_mark=None, end_mark=None):
-        self.anchor = anchor
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
-    def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
-            flow_style=None):
-        self.anchor = anchor
-        self.tag = tag
-        self.implicit = implicit
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
-    pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None, encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.encoding = encoding
-
-class StreamEndEvent(Event):
-    pass
-
-class DocumentStartEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None,
-            explicit=None, version=None, tags=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.explicit = explicit
-        self.version = version
-        self.tags = tags
-
-class DocumentEndEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None,
-            explicit=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.explicit = explicit
-
-class AliasEvent(NodeEvent):
-    pass
-
-class ScalarEvent(NodeEvent):
-    def __init__(self, anchor, tag, implicit, value,
-            start_mark=None, end_mark=None, style=None):
-        self.anchor = anchor
-        self.tag = tag
-        self.implicit = implicit
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
-    pass
-
-class SequenceEndEvent(CollectionEndEvent):
-    pass
-
-class MappingStartEvent(CollectionStartEvent):
-    pass
-
-class MappingEndEvent(CollectionEndEvent):
-    pass
-
+
+# Abstract classes.
+
+class Event(object):
+    def __init__(self, start_mark=None, end_mark=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+                if hasattr(self, key)]
+        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+                for key in attributes])
+        return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+    def __init__(self, anchor, start_mark=None, end_mark=None):
+        self.anchor = anchor
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+    def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+            flow_style=None):
+        self.anchor = anchor
+        self.tag = tag
+        self.implicit = implicit
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+    pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None, encoding=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.encoding = encoding
+
+class StreamEndEvent(Event):
+    pass
+
+class DocumentStartEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None,
+            explicit=None, version=None, tags=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.explicit = explicit
+        self.version = version
+        self.tags = tags
+
+class DocumentEndEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None,
+            explicit=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+    pass
+
+class ScalarEvent(NodeEvent):
+    def __init__(self, anchor, tag, implicit, value,
+            start_mark=None, end_mark=None, style=None):
+        self.anchor = anchor
+        self.tag = tag
+        self.implicit = implicit
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+    pass
+
+class SequenceEndEvent(CollectionEndEvent):
+    pass
+
+class MappingStartEvent(CollectionStartEvent):
+    pass
+
+class MappingEndEvent(CollectionEndEvent):
+    pass
+

+ 40 - 40
ext/yaml/loader.py → mncheck/ext/yaml/loader.py

@@ -1,40 +1,40 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        BaseConstructor.__init__(self)
+        BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        SafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
+

+ 49 - 49
ext/yaml/nodes.py → mncheck/ext/yaml/nodes.py

@@ -1,49 +1,49 @@
-
-class Node(object):
-    def __init__(self, tag, value, start_mark, end_mark):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        value = self.value
-        #if isinstance(value, list):
-        #    if len(value) == 0:
-        #        value = '<empty>'
-        #    elif len(value) == 1:
-        #        value = '<1 item>'
-        #    else:
-        #        value = '<%d items>' % len(value)
-        #else:
-        #    if len(value) > 75:
-        #        value = repr(value[:70]+u' ... ')
-        #    else:
-        #        value = repr(value)
-        value = repr(value)
-        return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
-    id = 'scalar'
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
-class CollectionNode(Node):
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, flow_style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
-    id = 'sequence'
-
-class MappingNode(CollectionNode):
-    id = 'mapping'
-
+
+class Node(object):
+    def __init__(self, tag, value, start_mark, end_mark):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        value = self.value
+        #if isinstance(value, list):
+        #    if len(value) == 0:
+        #        value = '<empty>'
+        #    elif len(value) == 1:
+        #        value = '<1 item>'
+        #    else:
+        #        value = '<%d items>' % len(value)
+        #else:
+        #    if len(value) > 75:
+        #        value = repr(value[:70]+u' ... ')
+        #    else:
+        #        value = repr(value)
+        value = repr(value)
+        return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+    id = 'scalar'
+    def __init__(self, tag, value,
+            start_mark=None, end_mark=None, style=None):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+
+class CollectionNode(Node):
+    def __init__(self, tag, value,
+            start_mark=None, end_mark=None, flow_style=None):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+    id = 'sequence'
+
+class MappingNode(CollectionNode):
+    id = 'mapping'
+

+ 589 - 589
ext/yaml/parser.py → mncheck/ext/yaml/parser.py

@@ -1,589 +1,589 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream            ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-#                       ALIAS
-#                       | properties (block_content | indentless_block_sequence)?
-#                       | block_content
-#                       | indentless_block_sequence
-# block_node        ::= ALIAS
-#                       | properties block_content?
-#                       | block_content
-# flow_node         ::= ALIAS
-#                       | properties flow_content?
-#                       | flow_content
-# properties        ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content     ::= block_collection | flow_collection | SCALAR
-# flow_content      ::= flow_collection | SCALAR
-# block_collection  ::= block_sequence | block_mapping
-# flow_collection   ::= flow_sequence | flow_mapping
-# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
-# block_mapping     ::= BLOCK-MAPPING_START
-#                       ((KEY block_node_or_indentless_sequence?)?
-#                       (VALUE block_node_or_indentless_sequence?)?)*
-#                       BLOCK-END
-# flow_sequence     ::= FLOW-SEQUENCE-START
-#                       (flow_sequence_entry FLOW-ENTRY)*
-#                       flow_sequence_entry?
-#                       FLOW-SEQUENCE-END
-# flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping      ::= FLOW-MAPPING-START
-#                       (flow_mapping_entry FLOW-ENTRY)*
-#                       flow_mapping_entry?
-#                       FLOW-MAPPING-END
-# flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
-    pass
-
-class Parser:
-    # Since writing a recursive-descendant parser is a straightforward task, we
-    # do not give many comments here.
-
-    DEFAULT_TAGS = {
-        '!':   '!',
-        '!!':  'tag:yaml.org,2002:',
-    }
-
-    def __init__(self):
-        self.current_event = None
-        self.yaml_version = None
-        self.tag_handles = {}
-        self.states = []
-        self.marks = []
-        self.state = self.parse_stream_start
-
-    def dispose(self):
-        # Reset the state attributes (to clear self-references)
-        self.states = []
-        self.state = None
-
-    def check_event(self, *choices):
-        # Check the type of the next event.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        if self.current_event is not None:
-            if not choices:
-                return True
-            for choice in choices:
-                if isinstance(self.current_event, choice):
-                    return True
-        return False
-
-    def peek_event(self):
-        # Get the next event.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        return self.current_event
-
-    def get_event(self):
-        # Get the next event and proceed further.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        value = self.current_event
-        self.current_event = None
-        return value
-
-    # stream    ::= STREAM-START implicit_document? explicit_document* STREAM-END
-    # implicit_document ::= block_node DOCUMENT-END*
-    # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
-    def parse_stream_start(self):
-
-        # Parse the stream start.
-        token = self.get_token()
-        event = StreamStartEvent(token.start_mark, token.end_mark,
-                encoding=token.encoding)
-
-        # Prepare the next state.
-        self.state = self.parse_implicit_document_start
-
-        return event
-
-    def parse_implicit_document_start(self):
-
-        # Parse an implicit document.
-        if not self.check_token(DirectiveToken, DocumentStartToken,
-                StreamEndToken):
-            self.tag_handles = self.DEFAULT_TAGS
-            token = self.peek_token()
-            start_mark = end_mark = token.start_mark
-            event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=False)
-
-            # Prepare the next state.
-            self.states.append(self.parse_document_end)
-            self.state = self.parse_block_node
-
-            return event
-
-        else:
-            return self.parse_document_start()
-
-    def parse_document_start(self):
-
-        # Parse any extra document end indicators.
-        while self.check_token(DocumentEndToken):
-            self.get_token()
-
-        # Parse an explicit document.
-        if not self.check_token(StreamEndToken):
-            token = self.peek_token()
-            start_mark = token.start_mark
-            version, tags = self.process_directives()
-            if not self.check_token(DocumentStartToken):
-                raise ParserError(None, None,
-                        "expected '<document start>', but found %r"
-                        % self.peek_token().id,
-                        self.peek_token().start_mark)
-            token = self.get_token()
-            end_mark = token.end_mark
-            event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=True, version=version, tags=tags)
-            self.states.append(self.parse_document_end)
-            self.state = self.parse_document_content
-        else:
-            # Parse the end of the stream.
-            token = self.get_token()
-            event = StreamEndEvent(token.start_mark, token.end_mark)
-            assert not self.states
-            assert not self.marks
-            self.state = None
-        return event
-
-    def parse_document_end(self):
-
-        # Parse the document end.
-        token = self.peek_token()
-        start_mark = end_mark = token.start_mark
-        explicit = False
-        if self.check_token(DocumentEndToken):
-            token = self.get_token()
-            end_mark = token.end_mark
-            explicit = True
-        event = DocumentEndEvent(start_mark, end_mark,
-                explicit=explicit)
-
-        # Prepare the next state.
-        self.state = self.parse_document_start
-
-        return event
-
-    def parse_document_content(self):
-        if self.check_token(DirectiveToken,
-                DocumentStartToken, DocumentEndToken, StreamEndToken):
-            event = self.process_empty_scalar(self.peek_token().start_mark)
-            self.state = self.states.pop()
-            return event
-        else:
-            return self.parse_block_node()
-
-    def process_directives(self):
-        self.yaml_version = None
-        self.tag_handles = {}
-        while self.check_token(DirectiveToken):
-            token = self.get_token()
-            if token.name == 'YAML':
-                if self.yaml_version is not None:
-                    raise ParserError(None, None,
-                            "found duplicate YAML directive", token.start_mark)
-                major, minor = token.value
-                if major != 1:
-                    raise ParserError(None, None,
-                            "found incompatible YAML document (version 1.* is required)",
-                            token.start_mark)
-                self.yaml_version = token.value
-            elif token.name == 'TAG':
-                handle, prefix = token.value
-                if handle in self.tag_handles:
-                    raise ParserError(None, None,
-                            "duplicate tag handle %r" % handle,
-                            token.start_mark)
-                self.tag_handles[handle] = prefix
-        if self.tag_handles:
-            value = self.yaml_version, self.tag_handles.copy()
-        else:
-            value = self.yaml_version, None
-        for key in self.DEFAULT_TAGS:
-            if key not in self.tag_handles:
-                self.tag_handles[key] = self.DEFAULT_TAGS[key]
-        return value
-
-    # block_node_or_indentless_sequence ::= ALIAS
-    #               | properties (block_content | indentless_block_sequence)?
-    #               | block_content
-    #               | indentless_block_sequence
-    # block_node    ::= ALIAS
-    #                   | properties block_content?
-    #                   | block_content
-    # flow_node     ::= ALIAS
-    #                   | properties flow_content?
-    #                   | flow_content
-    # properties    ::= TAG ANCHOR? | ANCHOR TAG?
-    # block_content     ::= block_collection | flow_collection | SCALAR
-    # flow_content      ::= flow_collection | SCALAR
-    # block_collection  ::= block_sequence | block_mapping
-    # flow_collection   ::= flow_sequence | flow_mapping
-
-    def parse_block_node(self):
-        return self.parse_node(block=True)
-
-    def parse_flow_node(self):
-        return self.parse_node()
-
-    def parse_block_node_or_indentless_sequence(self):
-        return self.parse_node(block=True, indentless_sequence=True)
-
-    def parse_node(self, block=False, indentless_sequence=False):
-        if self.check_token(AliasToken):
-            token = self.get_token()
-            event = AliasEvent(token.value, token.start_mark, token.end_mark)
-            self.state = self.states.pop()
-        else:
-            anchor = None
-            tag = None
-            start_mark = end_mark = tag_mark = None
-            if self.check_token(AnchorToken):
-                token = self.get_token()
-                start_mark = token.start_mark
-                end_mark = token.end_mark
-                anchor = token.value
-                if self.check_token(TagToken):
-                    token = self.get_token()
-                    tag_mark = token.start_mark
-                    end_mark = token.end_mark
-                    tag = token.value
-            elif self.check_token(TagToken):
-                token = self.get_token()
-                start_mark = tag_mark = token.start_mark
-                end_mark = token.end_mark
-                tag = token.value
-                if self.check_token(AnchorToken):
-                    token = self.get_token()
-                    end_mark = token.end_mark
-                    anchor = token.value
-            if tag is not None:
-                handle, suffix = tag
-                if handle is not None:
-                    if handle not in self.tag_handles:
-                        raise ParserError("while parsing a node", start_mark,
-                                "found undefined tag handle %r" % handle,
-                                tag_mark)
-                    tag = self.tag_handles[handle]+suffix
-                else:
-                    tag = suffix
-            #if tag == '!':
-            #    raise ParserError("while parsing a node", start_mark,
-            #            "found non-specific tag '!'", tag_mark,
-            #            "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
-            if start_mark is None:
-                start_mark = end_mark = self.peek_token().start_mark
-            event = None
-            implicit = (tag is None or tag == '!')
-            if indentless_sequence and self.check_token(BlockEntryToken):
-                end_mark = self.peek_token().end_mark
-                event = SequenceStartEvent(anchor, tag, implicit,
-                        start_mark, end_mark)
-                self.state = self.parse_indentless_sequence_entry
-            else:
-                if self.check_token(ScalarToken):
-                    token = self.get_token()
-                    end_mark = token.end_mark
-                    if (token.plain and tag is None) or tag == '!':
-                        implicit = (True, False)
-                    elif tag is None:
-                        implicit = (False, True)
-                    else:
-                        implicit = (False, False)
-                    event = ScalarEvent(anchor, tag, implicit, token.value,
-                            start_mark, end_mark, style=token.style)
-                    self.state = self.states.pop()
-                elif self.check_token(FlowSequenceStartToken):
-                    end_mark = self.peek_token().end_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
-                    self.state = self.parse_flow_sequence_first_entry
-                elif self.check_token(FlowMappingStartToken):
-                    end_mark = self.peek_token().end_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
-                    self.state = self.parse_flow_mapping_first_key
-                elif block and self.check_token(BlockSequenceStartToken):
-                    end_mark = self.peek_token().start_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
-                    self.state = self.parse_block_sequence_first_entry
-                elif block and self.check_token(BlockMappingStartToken):
-                    end_mark = self.peek_token().start_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
-                    self.state = self.parse_block_mapping_first_key
-                elif anchor is not None or tag is not None:
-                    # Empty scalars are allowed even if a tag or an anchor is
-                    # specified.
-                    event = ScalarEvent(anchor, tag, (implicit, False), '',
-                            start_mark, end_mark)
-                    self.state = self.states.pop()
-                else:
-                    if block:
-                        node = 'block'
-                    else:
-                        node = 'flow'
-                    token = self.peek_token()
-                    raise ParserError("while parsing a %s node" % node, start_mark,
-                            "expected the node content, but found %r" % token.id,
-                            token.start_mark)
-        return event
-
-    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
-    def parse_block_sequence_first_entry(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_block_sequence_entry()
-
-    def parse_block_sequence_entry(self):
-        if self.check_token(BlockEntryToken):
-            token = self.get_token()
-            if not self.check_token(BlockEntryToken, BlockEndToken):
-                self.states.append(self.parse_block_sequence_entry)
-                return self.parse_block_node()
-            else:
-                self.state = self.parse_block_sequence_entry
-                return self.process_empty_scalar(token.end_mark)
-        if not self.check_token(BlockEndToken):
-            token = self.peek_token()
-            raise ParserError("while parsing a block collection", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
-        token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
-    def parse_indentless_sequence_entry(self):
-        if self.check_token(BlockEntryToken):
-            token = self.get_token()
-            if not self.check_token(BlockEntryToken,
-                    KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_indentless_sequence_entry)
-                return self.parse_block_node()
-            else:
-                self.state = self.parse_indentless_sequence_entry
-                return self.process_empty_scalar(token.end_mark)
-        token = self.peek_token()
-        event = SequenceEndEvent(token.start_mark, token.start_mark)
-        self.state = self.states.pop()
-        return event
-
-    # block_mapping     ::= BLOCK-MAPPING_START
-    #                       ((KEY block_node_or_indentless_sequence?)?
-    #                       (VALUE block_node_or_indentless_sequence?)?)*
-    #                       BLOCK-END
-
-    def parse_block_mapping_first_key(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_block_mapping_key()
-
-    def parse_block_mapping_key(self):
-        if self.check_token(KeyToken):
-            token = self.get_token()
-            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_block_mapping_value)
-                return self.parse_block_node_or_indentless_sequence()
-            else:
-                self.state = self.parse_block_mapping_value
-                return self.process_empty_scalar(token.end_mark)
-        if not self.check_token(BlockEndToken):
-            token = self.peek_token()
-            raise ParserError("while parsing a block mapping", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
-        token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_block_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_block_mapping_key)
-                return self.parse_block_node_or_indentless_sequence()
-            else:
-                self.state = self.parse_block_mapping_key
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_block_mapping_key
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    # flow_sequence     ::= FLOW-SEQUENCE-START
-    #                       (flow_sequence_entry FLOW-ENTRY)*
-    #                       flow_sequence_entry?
-    #                       FLOW-SEQUENCE-END
-    # flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-    #
-    # Note that while production rules for both flow_sequence_entry and
-    # flow_mapping_entry are equal, their interpretations are different.
-    # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
-    # generate an inline mapping (set syntax).
-
-    def parse_flow_sequence_first_entry(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_flow_sequence_entry(first=True)
-
-    def parse_flow_sequence_entry(self, first=False):
-        if not self.check_token(FlowSequenceEndToken):
-            if not first:
-                if self.check_token(FlowEntryToken):
-                    self.get_token()
-                else:
-                    token = self.peek_token()
-                    raise ParserError("while parsing a flow sequence", self.marks[-1],
-                            "expected ',' or ']', but got %r" % token.id, token.start_mark)
-            
-            if self.check_token(KeyToken):
-                token = self.peek_token()
-                event = MappingStartEvent(None, None, True,
-                        token.start_mark, token.end_mark,
-                        flow_style=True)
-                self.state = self.parse_flow_sequence_entry_mapping_key
-                return event
-            elif not self.check_token(FlowSequenceEndToken):
-                self.states.append(self.parse_flow_sequence_entry)
-                return self.parse_flow_node()
-        token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_flow_sequence_entry_mapping_key(self):
-        token = self.get_token()
-        if not self.check_token(ValueToken,
-                FlowEntryToken, FlowSequenceEndToken):
-            self.states.append(self.parse_flow_sequence_entry_mapping_value)
-            return self.parse_flow_node()
-        else:
-            self.state = self.parse_flow_sequence_entry_mapping_value
-            return self.process_empty_scalar(token.end_mark)
-
-    def parse_flow_sequence_entry_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
-                self.states.append(self.parse_flow_sequence_entry_mapping_end)
-                return self.parse_flow_node()
-            else:
-                self.state = self.parse_flow_sequence_entry_mapping_end
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_flow_sequence_entry_mapping_end
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    def parse_flow_sequence_entry_mapping_end(self):
-        self.state = self.parse_flow_sequence_entry
-        token = self.peek_token()
-        return MappingEndEvent(token.start_mark, token.start_mark)
-
-    # flow_mapping  ::= FLOW-MAPPING-START
-    #                   (flow_mapping_entry FLOW-ENTRY)*
-    #                   flow_mapping_entry?
-    #                   FLOW-MAPPING-END
-    # flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-    def parse_flow_mapping_first_key(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_flow_mapping_key(first=True)
-
-    def parse_flow_mapping_key(self, first=False):
-        if not self.check_token(FlowMappingEndToken):
-            if not first:
-                if self.check_token(FlowEntryToken):
-                    self.get_token()
-                else:
-                    token = self.peek_token()
-                    raise ParserError("while parsing a flow mapping", self.marks[-1],
-                            "expected ',' or '}', but got %r" % token.id, token.start_mark)
-            if self.check_token(KeyToken):
-                token = self.get_token()
-                if not self.check_token(ValueToken,
-                        FlowEntryToken, FlowMappingEndToken):
-                    self.states.append(self.parse_flow_mapping_value)
-                    return self.parse_flow_node()
-                else:
-                    self.state = self.parse_flow_mapping_value
-                    return self.process_empty_scalar(token.end_mark)
-            elif not self.check_token(FlowMappingEndToken):
-                self.states.append(self.parse_flow_mapping_empty_value)
-                return self.parse_flow_node()
-        token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_flow_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(FlowEntryToken, FlowMappingEndToken):
-                self.states.append(self.parse_flow_mapping_key)
-                return self.parse_flow_node()
-            else:
-                self.state = self.parse_flow_mapping_key
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_flow_mapping_key
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    def parse_flow_mapping_empty_value(self):
-        self.state = self.parse_flow_mapping_key
-        return self.process_empty_scalar(self.peek_token().start_mark)
-
-    def process_empty_scalar(self, mark):
-        return ScalarEvent(None, None, (True, False), '', mark, mark)
-
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream            ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+#                       ALIAS
+#                       | properties (block_content | indentless_block_sequence)?
+#                       | block_content
+#                       | indentless_block_sequence
+# block_node        ::= ALIAS
+#                       | properties block_content?
+#                       | block_content
+# flow_node         ::= ALIAS
+#                       | properties flow_content?
+#                       | flow_content
+# properties        ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content     ::= block_collection | flow_collection | SCALAR
+# flow_content      ::= flow_collection | SCALAR
+# block_collection  ::= block_sequence | block_mapping
+# flow_collection   ::= flow_sequence | flow_mapping
+# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
+# block_mapping     ::= BLOCK-MAPPING_START
+#                       ((KEY block_node_or_indentless_sequence?)?
+#                       (VALUE block_node_or_indentless_sequence?)?)*
+#                       BLOCK-END
+# flow_sequence     ::= FLOW-SEQUENCE-START
+#                       (flow_sequence_entry FLOW-ENTRY)*
+#                       flow_sequence_entry?
+#                       FLOW-SEQUENCE-END
+# flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping      ::= FLOW-MAPPING-START
+#                       (flow_mapping_entry FLOW-ENTRY)*
+#                       flow_mapping_entry?
+#                       FLOW-MAPPING-END
+# flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+    pass
+
+class Parser:
+    # Since writing a recursive-descendant parser is a straightforward task, we
+    # do not give many comments here.
+
+    DEFAULT_TAGS = {
+        '!':   '!',
+        '!!':  'tag:yaml.org,2002:',
+    }
+
+    def __init__(self):
+        self.current_event = None
+        self.yaml_version = None
+        self.tag_handles = {}
+        self.states = []
+        self.marks = []
+        self.state = self.parse_stream_start
+
+    def dispose(self):
+        # Reset the state attributes (to clear self-references)
+        self.states = []
+        self.state = None
+
+    def check_event(self, *choices):
+        # Check the type of the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        if self.current_event is not None:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.current_event, choice):
+                    return True
+        return False
+
+    def peek_event(self):
+        # Get the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        return self.current_event
+
+    def get_event(self):
+        # Get the next event and proceed further.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        value = self.current_event
+        self.current_event = None
+        return value
+
+    # stream    ::= STREAM-START implicit_document? explicit_document* STREAM-END
+    # implicit_document ::= block_node DOCUMENT-END*
+    # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+    def parse_stream_start(self):
+
+        # Parse the stream start.
+        token = self.get_token()
+        event = StreamStartEvent(token.start_mark, token.end_mark,
+                encoding=token.encoding)
+
+        # Prepare the next state.
+        self.state = self.parse_implicit_document_start
+
+        return event
+
+    def parse_implicit_document_start(self):
+
+        # Parse an implicit document.
+        if not self.check_token(DirectiveToken, DocumentStartToken,
+                StreamEndToken):
+            self.tag_handles = self.DEFAULT_TAGS
+            token = self.peek_token()
+            start_mark = end_mark = token.start_mark
+            event = DocumentStartEvent(start_mark, end_mark,
+                    explicit=False)
+
+            # Prepare the next state.
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_block_node
+
+            return event
+
+        else:
+            return self.parse_document_start()
+
+    def parse_document_start(self):
+
+        # Parse any extra document end indicators.
+        while self.check_token(DocumentEndToken):
+            self.get_token()
+
+        # Parse an explicit document.
+        if not self.check_token(StreamEndToken):
+            token = self.peek_token()
+            start_mark = token.start_mark
+            version, tags = self.process_directives()
+            if not self.check_token(DocumentStartToken):
+                raise ParserError(None, None,
+                        "expected '<document start>', but found %r"
+                        % self.peek_token().id,
+                        self.peek_token().start_mark)
+            token = self.get_token()
+            end_mark = token.end_mark
+            event = DocumentStartEvent(start_mark, end_mark,
+                    explicit=True, version=version, tags=tags)
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_document_content
+        else:
+            # Parse the end of the stream.
+            token = self.get_token()
+            event = StreamEndEvent(token.start_mark, token.end_mark)
+            assert not self.states
+            assert not self.marks
+            self.state = None
+        return event
+
+    def parse_document_end(self):
+
+        # Parse the document end.
+        token = self.peek_token()
+        start_mark = end_mark = token.start_mark
+        explicit = False
+        if self.check_token(DocumentEndToken):
+            token = self.get_token()
+            end_mark = token.end_mark
+            explicit = True
+        event = DocumentEndEvent(start_mark, end_mark,
+                explicit=explicit)
+
+        # Prepare the next state.
+        self.state = self.parse_document_start
+
+        return event
+
+    def parse_document_content(self):
+        if self.check_token(DirectiveToken,
+                DocumentStartToken, DocumentEndToken, StreamEndToken):
+            event = self.process_empty_scalar(self.peek_token().start_mark)
+            self.state = self.states.pop()
+            return event
+        else:
+            return self.parse_block_node()
+
+    def process_directives(self):
+        self.yaml_version = None
+        self.tag_handles = {}
+        while self.check_token(DirectiveToken):
+            token = self.get_token()
+            if token.name == 'YAML':
+                if self.yaml_version is not None:
+                    raise ParserError(None, None,
+                            "found duplicate YAML directive", token.start_mark)
+                major, minor = token.value
+                if major != 1:
+                    raise ParserError(None, None,
+                            "found incompatible YAML document (version 1.* is required)",
+                            token.start_mark)
+                self.yaml_version = token.value
+            elif token.name == 'TAG':
+                handle, prefix = token.value
+                if handle in self.tag_handles:
+                    raise ParserError(None, None,
+                            "duplicate tag handle %r" % handle,
+                            token.start_mark)
+                self.tag_handles[handle] = prefix
+        if self.tag_handles:
+            value = self.yaml_version, self.tag_handles.copy()
+        else:
+            value = self.yaml_version, None
+        for key in self.DEFAULT_TAGS:
+            if key not in self.tag_handles:
+                self.tag_handles[key] = self.DEFAULT_TAGS[key]
+        return value
+
+    # block_node_or_indentless_sequence ::= ALIAS
+    #               | properties (block_content | indentless_block_sequence)?
+    #               | block_content
+    #               | indentless_block_sequence
+    # block_node    ::= ALIAS
+    #                   | properties block_content?
+    #                   | block_content
+    # flow_node     ::= ALIAS
+    #                   | properties flow_content?
+    #                   | flow_content
+    # properties    ::= TAG ANCHOR? | ANCHOR TAG?
+    # block_content     ::= block_collection | flow_collection | SCALAR
+    # flow_content      ::= flow_collection | SCALAR
+    # block_collection  ::= block_sequence | block_mapping
+    # flow_collection   ::= flow_sequence | flow_mapping
+
+    def parse_block_node(self):
+        return self.parse_node(block=True)
+
+    def parse_flow_node(self):
+        return self.parse_node()
+
+    def parse_block_node_or_indentless_sequence(self):
+        return self.parse_node(block=True, indentless_sequence=True)
+
+    def parse_node(self, block=False, indentless_sequence=False):
+        if self.check_token(AliasToken):
+            token = self.get_token()
+            event = AliasEvent(token.value, token.start_mark, token.end_mark)
+            self.state = self.states.pop()
+        else:
+            anchor = None
+            tag = None
+            start_mark = end_mark = tag_mark = None
+            if self.check_token(AnchorToken):
+                token = self.get_token()
+                start_mark = token.start_mark
+                end_mark = token.end_mark
+                anchor = token.value
+                if self.check_token(TagToken):
+                    token = self.get_token()
+                    tag_mark = token.start_mark
+                    end_mark = token.end_mark
+                    tag = token.value
+            elif self.check_token(TagToken):
+                token = self.get_token()
+                start_mark = tag_mark = token.start_mark
+                end_mark = token.end_mark
+                tag = token.value
+                if self.check_token(AnchorToken):
+                    token = self.get_token()
+                    end_mark = token.end_mark
+                    anchor = token.value
+            if tag is not None:
+                handle, suffix = tag
+                if handle is not None:
+                    if handle not in self.tag_handles:
+                        raise ParserError("while parsing a node", start_mark,
+                                "found undefined tag handle %r" % handle,
+                                tag_mark)
+                    tag = self.tag_handles[handle]+suffix
+                else:
+                    tag = suffix
+            #if tag == '!':
+            #    raise ParserError("while parsing a node", start_mark,
+            #            "found non-specific tag '!'", tag_mark,
+            #            "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+            if start_mark is None:
+                start_mark = end_mark = self.peek_token().start_mark
+            event = None
+            implicit = (tag is None or tag == '!')
+            if indentless_sequence and self.check_token(BlockEntryToken):
+                end_mark = self.peek_token().end_mark
+                event = SequenceStartEvent(anchor, tag, implicit,
+                        start_mark, end_mark)
+                self.state = self.parse_indentless_sequence_entry
+            else:
+                if self.check_token(ScalarToken):
+                    token = self.get_token()
+                    end_mark = token.end_mark
+                    if (token.plain and tag is None) or tag == '!':
+                        implicit = (True, False)
+                    elif tag is None:
+                        implicit = (False, True)
+                    else:
+                        implicit = (False, False)
+                    event = ScalarEvent(anchor, tag, implicit, token.value,
+                            start_mark, end_mark, style=token.style)
+                    self.state = self.states.pop()
+                elif self.check_token(FlowSequenceStartToken):
+                    end_mark = self.peek_token().end_mark
+                    event = SequenceStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=True)
+                    self.state = self.parse_flow_sequence_first_entry
+                elif self.check_token(FlowMappingStartToken):
+                    end_mark = self.peek_token().end_mark
+                    event = MappingStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=True)
+                    self.state = self.parse_flow_mapping_first_key
+                elif block and self.check_token(BlockSequenceStartToken):
+                    end_mark = self.peek_token().start_mark
+                    event = SequenceStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=False)
+                    self.state = self.parse_block_sequence_first_entry
+                elif block and self.check_token(BlockMappingStartToken):
+                    end_mark = self.peek_token().start_mark
+                    event = MappingStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=False)
+                    self.state = self.parse_block_mapping_first_key
+                elif anchor is not None or tag is not None:
+                    # Empty scalars are allowed even if a tag or an anchor is
+                    # specified.
+                    event = ScalarEvent(anchor, tag, (implicit, False), '',
+                            start_mark, end_mark)
+                    self.state = self.states.pop()
+                else:
+                    if block:
+                        node = 'block'
+                    else:
+                        node = 'flow'
+                    token = self.peek_token()
+                    raise ParserError("while parsing a %s node" % node, start_mark,
+                            "expected the node content, but found %r" % token.id,
+                            token.start_mark)
+        return event
+
+    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+    def parse_block_sequence_first_entry(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_block_sequence_entry()
+
+    def parse_block_sequence_entry(self):
+        if self.check_token(BlockEntryToken):
+            token = self.get_token()
+            if not self.check_token(BlockEntryToken, BlockEndToken):
+                self.states.append(self.parse_block_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_block_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        if not self.check_token(BlockEndToken):
+            token = self.peek_token()
+            raise ParserError("while parsing a block collection", self.marks[-1],
+                    "expected <block end>, but found %r" % token.id, token.start_mark)
+        token = self.get_token()
+        event = SequenceEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+    def parse_indentless_sequence_entry(self):
+        if self.check_token(BlockEntryToken):
+            token = self.get_token()
+            if not self.check_token(BlockEntryToken,
+                    KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_indentless_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_indentless_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        token = self.peek_token()
+        event = SequenceEndEvent(token.start_mark, token.start_mark)
+        self.state = self.states.pop()
+        return event
+
+    # block_mapping     ::= BLOCK-MAPPING_START
+    #                       ((KEY block_node_or_indentless_sequence?)?
+    #                       (VALUE block_node_or_indentless_sequence?)?)*
+    #                       BLOCK-END
+
+    def parse_block_mapping_first_key(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_block_mapping_key()
+
+    def parse_block_mapping_key(self):
+        if self.check_token(KeyToken):
+            token = self.get_token()
+            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_value)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_value
+                return self.process_empty_scalar(token.end_mark)
+        if not self.check_token(BlockEndToken):
+            token = self.peek_token()
+            raise ParserError("while parsing a block mapping", self.marks[-1],
+                    "expected <block end>, but found %r" % token.id, token.start_mark)
+        token = self.get_token()
+        event = MappingEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_block_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_key)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_key
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_block_mapping_key
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    # flow_sequence     ::= FLOW-SEQUENCE-START
+    #                       (flow_sequence_entry FLOW-ENTRY)*
+    #                       flow_sequence_entry?
+    #                       FLOW-SEQUENCE-END
+    # flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+    #
+    # Note that while production rules for both flow_sequence_entry and
+    # flow_mapping_entry are equal, their interpretations are different.
+    # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+    # generate an inline mapping (set syntax).
+
+    def parse_flow_sequence_first_entry(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_sequence_entry(first=True)
+
+    def parse_flow_sequence_entry(self, first=False):
+        if not self.check_token(FlowSequenceEndToken):
+            if not first:
+                if self.check_token(FlowEntryToken):
+                    self.get_token()
+                else:
+                    token = self.peek_token()
+                    raise ParserError("while parsing a flow sequence", self.marks[-1],
+                            "expected ',' or ']', but got %r" % token.id, token.start_mark)
+            
+            if self.check_token(KeyToken):
+                token = self.peek_token()
+                event = MappingStartEvent(None, None, True,
+                        token.start_mark, token.end_mark,
+                        flow_style=True)
+                self.state = self.parse_flow_sequence_entry_mapping_key
+                return event
+            elif not self.check_token(FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry)
+                return self.parse_flow_node()
+        token = self.get_token()
+        event = SequenceEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_sequence_entry_mapping_key(self):
+        token = self.get_token()
+        if not self.check_token(ValueToken,
+                FlowEntryToken, FlowSequenceEndToken):
+            self.states.append(self.parse_flow_sequence_entry_mapping_value)
+            return self.parse_flow_node()
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_value
+            return self.process_empty_scalar(token.end_mark)
+
+    def parse_flow_sequence_entry_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry_mapping_end)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_sequence_entry_mapping_end
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_end
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_sequence_entry_mapping_end(self):
+        self.state = self.parse_flow_sequence_entry
+        token = self.peek_token()
+        return MappingEndEvent(token.start_mark, token.start_mark)
+
+    # flow_mapping  ::= FLOW-MAPPING-START
+    #                   (flow_mapping_entry FLOW-ENTRY)*
+    #                   flow_mapping_entry?
+    #                   FLOW-MAPPING-END
+    # flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+    def parse_flow_mapping_first_key(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_mapping_key(first=True)
+
+    def parse_flow_mapping_key(self, first=False):
+        if not self.check_token(FlowMappingEndToken):
+            if not first:
+                if self.check_token(FlowEntryToken):
+                    self.get_token()
+                else:
+                    token = self.peek_token()
+                    raise ParserError("while parsing a flow mapping", self.marks[-1],
+                            "expected ',' or '}', but got %r" % token.id, token.start_mark)
+            if self.check_token(KeyToken):
+                token = self.get_token()
+                if not self.check_token(ValueToken,
+                        FlowEntryToken, FlowMappingEndToken):
+                    self.states.append(self.parse_flow_mapping_value)
+                    return self.parse_flow_node()
+                else:
+                    self.state = self.parse_flow_mapping_value
+                    return self.process_empty_scalar(token.end_mark)
+            elif not self.check_token(FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_empty_value)
+                return self.parse_flow_node()
+        token = self.get_token()
+        event = MappingEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_key)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_mapping_key
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_mapping_key
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_mapping_empty_value(self):
+        self.state = self.parse_flow_mapping_key
+        return self.process_empty_scalar(self.peek_token().start_mark)
+
+    def process_empty_scalar(self, mark):
+        return ScalarEvent(None, None, (True, False), '', mark, mark)
+

+ 192 - 192
ext/yaml/reader.py → mncheck/ext/yaml/reader.py

@@ -1,192 +1,192 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-#   Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-#   Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-#   reader.peek(length=1) - return the next `length` characters
-#   reader.forward(length=1) - move the current position to `length` characters.
-#   reader.index - the number of the current character.
-#   reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from .error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
-    def __init__(self, name, position, character, encoding, reason):
-        self.name = name
-        self.character = character
-        self.position = position
-        self.encoding = encoding
-        self.reason = reason
-
-    def __str__(self):
-        if isinstance(self.character, bytes):
-            return "'%s' codec can't decode byte #x%02x: %s\n"  \
-                    "  in \"%s\", position %d"    \
-                    % (self.encoding, ord(self.character), self.reason,
-                            self.name, self.position)
-        else:
-            return "unacceptable character #x%04x: %s\n"    \
-                    "  in \"%s\", position %d"    \
-                    % (self.character, self.reason,
-                            self.name, self.position)
-
-class Reader(object):
-    # Reader:
-    # - determines the data encoding and converts it to a unicode string,
-    # - checks if characters are in allowed range,
-    # - adds '\0' to the end.
-
-    # Reader accepts
-    #  - a `bytes` object,
-    #  - a `str` object,
-    #  - a file-like object with its `read` method returning `str`,
-    #  - a file-like object with its `read` method returning `unicode`.
-
-    # Yeah, it's ugly and slow.
-
-    def __init__(self, stream):
-        self.name = None
-        self.stream = None
-        self.stream_pointer = 0
-        self.eof = True
-        self.buffer = ''
-        self.pointer = 0
-        self.raw_buffer = None
-        self.raw_decode = None
-        self.encoding = None
-        self.index = 0
-        self.line = 0
-        self.column = 0
-        if isinstance(stream, str):
-            self.name = "<unicode string>"
-            self.check_printable(stream)
-            self.buffer = stream+'\0'
-        elif isinstance(stream, bytes):
-            self.name = "<byte string>"
-            self.raw_buffer = stream
-            self.determine_encoding()
-        else:
-            self.stream = stream
-            self.name = getattr(stream, 'name', "<file>")
-            self.eof = False
-            self.raw_buffer = None
-            self.determine_encoding()
-
-    def peek(self, index=0):
-        try:
-            return self.buffer[self.pointer+index]
-        except IndexError:
-            self.update(index+1)
-            return self.buffer[self.pointer+index]
-
-    def prefix(self, length=1):
-        if self.pointer+length >= len(self.buffer):
-            self.update(length)
-        return self.buffer[self.pointer:self.pointer+length]
-
-    def forward(self, length=1):
-        if self.pointer+length+1 >= len(self.buffer):
-            self.update(length+1)
-        while length:
-            ch = self.buffer[self.pointer]
-            self.pointer += 1
-            self.index += 1
-            if ch in '\n\x85\u2028\u2029'  \
-                    or (ch == '\r' and self.buffer[self.pointer] != '\n'):
-                self.line += 1
-                self.column = 0
-            elif ch != '\uFEFF':
-                self.column += 1
-            length -= 1
-
-    def get_mark(self):
-        if self.stream is None:
-            return Mark(self.name, self.index, self.line, self.column,
-                    self.buffer, self.pointer)
-        else:
-            return Mark(self.name, self.index, self.line, self.column,
-                    None, None)
-
-    def determine_encoding(self):
-        while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
-            self.update_raw()
-        if isinstance(self.raw_buffer, bytes):
-            if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
-                self.raw_decode = codecs.utf_16_le_decode
-                self.encoding = 'utf-16-le'
-            elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
-                self.raw_decode = codecs.utf_16_be_decode
-                self.encoding = 'utf-16-be'
-            else:
-                self.raw_decode = codecs.utf_8_decode
-                self.encoding = 'utf-8'
-        self.update(1)
-
-    NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
-    def check_printable(self, data):
-        match = self.NON_PRINTABLE.search(data)
-        if match:
-            character = match.group()
-            position = self.index+(len(self.buffer)-self.pointer)+match.start()
-            raise ReaderError(self.name, position, ord(character),
-                    'unicode', "special characters are not allowed")
-
-    def update(self, length):
-        if self.raw_buffer is None:
-            return
-        self.buffer = self.buffer[self.pointer:]
-        self.pointer = 0
-        while len(self.buffer) < length:
-            if not self.eof:
-                self.update_raw()
-            if self.raw_decode is not None:
-                try:
-                    data, converted = self.raw_decode(self.raw_buffer,
-                            'strict', self.eof)
-                except UnicodeDecodeError as exc:
-                    character = self.raw_buffer[exc.start]
-                    if self.stream is not None:
-                        position = self.stream_pointer-len(self.raw_buffer)+exc.start
-                    else:
-                        position = exc.start
-                    raise ReaderError(self.name, position, character,
-                            exc.encoding, exc.reason)
-            else:
-                data = self.raw_buffer
-                converted = len(data)
-            self.check_printable(data)
-            self.buffer += data
-            self.raw_buffer = self.raw_buffer[converted:]
-            if self.eof:
-                self.buffer += '\0'
-                self.raw_buffer = None
-                break
-
-    def update_raw(self, size=4096):
-        data = self.stream.read(size)
-        if self.raw_buffer is None:
-            self.raw_buffer = data
-        else:
-            self.raw_buffer += data
-        self.stream_pointer += len(data)
-        if not data:
-            self.eof = True
-
-#try:
-#    import psyco
-#    psyco.bind(Reader)
-#except ImportError:
-#    pass
-
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+#   Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+#   Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+#   reader.peek(length=1) - return the next `length` characters
+#   reader.forward(length=1) - move the current position to `length` characters.
+#   reader.index - the number of the current character.
+#   reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+    def __init__(self, name, position, character, encoding, reason):
+        self.name = name
+        self.character = character
+        self.position = position
+        self.encoding = encoding
+        self.reason = reason
+
+    def __str__(self):
+        if isinstance(self.character, bytes):
+            return "'%s' codec can't decode byte #x%02x: %s\n"  \
+                    "  in \"%s\", position %d"    \
+                    % (self.encoding, ord(self.character), self.reason,
+                            self.name, self.position)
+        else:
+            return "unacceptable character #x%04x: %s\n"    \
+                    "  in \"%s\", position %d"    \
+                    % (self.character, self.reason,
+                            self.name, self.position)
+
+class Reader(object):
+    # Reader:
+    # - determines the data encoding and converts it to a unicode string,
+    # - checks if characters are in allowed range,
+    # - adds '\0' to the end.
+
+    # Reader accepts
+    #  - a `bytes` object,
+    #  - a `str` object,
+    #  - a file-like object with its `read` method returning `str`,
+    #  - a file-like object with its `read` method returning `unicode`.
+
+    # Yeah, it's ugly and slow.
+
+    def __init__(self, stream):
+        self.name = None
+        self.stream = None
+        self.stream_pointer = 0
+        self.eof = True
+        self.buffer = ''
+        self.pointer = 0
+        self.raw_buffer = None
+        self.raw_decode = None
+        self.encoding = None
+        self.index = 0
+        self.line = 0
+        self.column = 0
+        if isinstance(stream, str):
+            self.name = "<unicode string>"
+            self.check_printable(stream)
+            self.buffer = stream+'\0'
+        elif isinstance(stream, bytes):
+            self.name = "<byte string>"
+            self.raw_buffer = stream
+            self.determine_encoding()
+        else:
+            self.stream = stream
+            self.name = getattr(stream, 'name', "<file>")
+            self.eof = False
+            self.raw_buffer = None
+            self.determine_encoding()
+
+    def peek(self, index=0):
+        try:
+            return self.buffer[self.pointer+index]
+        except IndexError:
+            self.update(index+1)
+            return self.buffer[self.pointer+index]
+
+    def prefix(self, length=1):
+        if self.pointer+length >= len(self.buffer):
+            self.update(length)
+        return self.buffer[self.pointer:self.pointer+length]
+
+    def forward(self, length=1):
+        if self.pointer+length+1 >= len(self.buffer):
+            self.update(length+1)
+        while length:
+            ch = self.buffer[self.pointer]
+            self.pointer += 1
+            self.index += 1
+            if ch in '\n\x85\u2028\u2029'  \
+                    or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+                self.line += 1
+                self.column = 0
+            elif ch != '\uFEFF':
+                self.column += 1
+            length -= 1
+
+    def get_mark(self):
+        if self.stream is None:
+            return Mark(self.name, self.index, self.line, self.column,
+                    self.buffer, self.pointer)
+        else:
+            return Mark(self.name, self.index, self.line, self.column,
+                    None, None)
+
+    def determine_encoding(self):
+        while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+            self.update_raw()
+        if isinstance(self.raw_buffer, bytes):
+            if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+                self.raw_decode = codecs.utf_16_le_decode
+                self.encoding = 'utf-16-le'
+            elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+                self.raw_decode = codecs.utf_16_be_decode
+                self.encoding = 'utf-16-be'
+            else:
+                self.raw_decode = codecs.utf_8_decode
+                self.encoding = 'utf-8'
+        self.update(1)
+
+    NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+    def check_printable(self, data):
+        match = self.NON_PRINTABLE.search(data)
+        if match:
+            character = match.group()
+            position = self.index+(len(self.buffer)-self.pointer)+match.start()
+            raise ReaderError(self.name, position, ord(character),
+                    'unicode', "special characters are not allowed")
+
+    def update(self, length):
+        if self.raw_buffer is None:
+            return
+        self.buffer = self.buffer[self.pointer:]
+        self.pointer = 0
+        while len(self.buffer) < length:
+            if not self.eof:
+                self.update_raw()
+            if self.raw_decode is not None:
+                try:
+                    data, converted = self.raw_decode(self.raw_buffer,
+                            'strict', self.eof)
+                except UnicodeDecodeError as exc:
+                    character = self.raw_buffer[exc.start]
+                    if self.stream is not None:
+                        position = self.stream_pointer-len(self.raw_buffer)+exc.start
+                    else:
+                        position = exc.start
+                    raise ReaderError(self.name, position, character,
+                            exc.encoding, exc.reason)
+            else:
+                data = self.raw_buffer
+                converted = len(data)
+            self.check_printable(data)
+            self.buffer += data
+            self.raw_buffer = self.raw_buffer[converted:]
+            if self.eof:
+                self.buffer += '\0'
+                self.raw_buffer = None
+                break
+
+    def update_raw(self, size=4096):
+        data = self.stream.read(size)
+        if self.raw_buffer is None:
+            self.raw_buffer = data
+        else:
+            self.raw_buffer += data
+        self.stream_pointer += len(data)
+        if not data:
+            self.eof = True
+
+#try:
+#    import psyco
+#    psyco.bind(Reader)
+#except ImportError:
+#    pass
+

+ 387 - 387
ext/yaml/representer.py → mncheck/ext/yaml/representer.py

@@ -1,387 +1,387 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
-    'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64, collections
-
-class RepresenterError(YAMLError):
-    pass
-
-class BaseRepresenter:
-
-    yaml_representers = {}
-    yaml_multi_representers = {}
-
-    def __init__(self, default_style=None, default_flow_style=None):
-        self.default_style = default_style
-        self.default_flow_style = default_flow_style
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def represent(self, data):
-        node = self.represent_data(data)
-        self.serialize(node)
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def represent_data(self, data):
-        if self.ignore_aliases(data):
-            self.alias_key = None
-        else:
-            self.alias_key = id(data)
-        if self.alias_key is not None:
-            if self.alias_key in self.represented_objects:
-                node = self.represented_objects[self.alias_key]
-                #if node is None:
-                #    raise RepresenterError("recursive objects are not allowed: %r" % data)
-                return node
-            #self.represented_objects[alias_key] = None
-            self.object_keeper.append(data)
-        data_types = type(data).__mro__
-        if data_types[0] in self.yaml_representers:
-            node = self.yaml_representers[data_types[0]](self, data)
-        else:
-            for data_type in data_types:
-                if data_type in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[data_type](self, data)
-                    break
-            else:
-                if None in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[None](self, data)
-                elif None in self.yaml_representers:
-                    node = self.yaml_representers[None](self, data)
-                else:
-                    node = ScalarNode(None, str(data))
-        #if alias_key is not None:
-        #    self.represented_objects[alias_key] = node
-        return node
-
-    @classmethod
-    def add_representer(cls, data_type, representer):
-        if not 'yaml_representers' in cls.__dict__:
-            cls.yaml_representers = cls.yaml_representers.copy()
-        cls.yaml_representers[data_type] = representer
-
-    @classmethod
-    def add_multi_representer(cls, data_type, representer):
-        if not 'yaml_multi_representers' in cls.__dict__:
-            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
-        cls.yaml_multi_representers[data_type] = representer
-
-    def represent_scalar(self, tag, value, style=None):
-        if style is None:
-            style = self.default_style
-        node = ScalarNode(tag, value, style=style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        return node
-
-    def represent_sequence(self, tag, sequence, flow_style=None):
-        value = []
-        node = SequenceNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        for item in sequence:
-            node_item = self.represent_data(item)
-            if not (isinstance(node_item, ScalarNode) and not node_item.style):
-                best_style = False
-            value.append(node_item)
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def represent_mapping(self, tag, mapping, flow_style=None):
-        value = []
-        node = MappingNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        if hasattr(mapping, 'items'):
-            mapping = list(mapping.items())
-            try:
-                mapping = sorted(mapping)
-            except TypeError:
-                pass
-        for item_key, item_value in mapping:
-            node_key = self.represent_data(item_key)
-            node_value = self.represent_data(item_value)
-            if not (isinstance(node_key, ScalarNode) and not node_key.style):
-                best_style = False
-            if not (isinstance(node_value, ScalarNode) and not node_value.style):
-                best_style = False
-            value.append((node_key, node_value))
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def ignore_aliases(self, data):
-        return False
-
-class SafeRepresenter(BaseRepresenter):
-
-    def ignore_aliases(self, data):
-        if data is None:
-            return True
-        if isinstance(data, tuple) and data == ():
-            return True
-        if isinstance(data, (str, bytes, bool, int, float)):
-            return True
-
-    def represent_none(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
-    def represent_str(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:str', data)
-
-    def represent_binary(self, data):
-        if hasattr(base64, 'encodebytes'):
-            data = base64.encodebytes(data).decode('ascii')
-        else:
-            data = base64.encodestring(data).decode('ascii')
-        return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
-    def represent_bool(self, data):
-        if data:
-            value = 'true'
-        else:
-            value = 'false'
-        return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
-    def represent_int(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
-    inf_value = 1e300
-    while repr(inf_value) != repr(inf_value*inf_value):
-        inf_value *= inf_value
-
-    def represent_float(self, data):
-        if data != data or (data == 0.0 and data == 1.0):
-            value = '.nan'
-        elif data == self.inf_value:
-            value = '.inf'
-        elif data == -self.inf_value:
-            value = '-.inf'
-        else:
-            value = repr(data).lower()
-            # Note that in some cases `repr(data)` represents a float number
-            # without the decimal parts.  For instance:
-            #   >>> repr(1e17)
-            #   '1e17'
-            # Unfortunately, this is not a valid float representation according
-            # to the definition of the `!!float` tag.  We fix this by adding
-            # '.0' before the 'e' symbol.
-            if '.' not in value and 'e' in value:
-                value = value.replace('e', '.0e', 1)
-        return self.represent_scalar('tag:yaml.org,2002:float', value)
-
-    def represent_list(self, data):
-        #pairs = (len(data) > 0 and isinstance(data, list))
-        #if pairs:
-        #    for item in data:
-        #        if not isinstance(item, tuple) or len(item) != 2:
-        #            pairs = False
-        #            break
-        #if not pairs:
-            return self.represent_sequence('tag:yaml.org,2002:seq', data)
-        #value = []
-        #for item_key, item_value in data:
-        #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
-        #        [(item_key, item_value)]))
-        #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
-    def represent_dict(self, data):
-        return self.represent_mapping('tag:yaml.org,2002:map', data)
-
-    def represent_set(self, data):
-        value = {}
-        for key in data:
-            value[key] = None
-        return self.represent_mapping('tag:yaml.org,2002:set', value)
-
-    def represent_date(self, data):
-        value = data.isoformat()
-        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
-    def represent_datetime(self, data):
-        value = data.isoformat(' ')
-        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
-    def represent_yaml_object(self, tag, data, cls, flow_style=None):
-        if hasattr(data, '__getstate__'):
-            state = data.__getstate__()
-        else:
-            state = data.__dict__.copy()
-        return self.represent_mapping(tag, state, flow_style=flow_style)
-
-    def represent_undefined(self, data):
-        raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
-        SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
-        SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
-        SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
-        SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
-        SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
-        SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
-        SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
-        SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
-        SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
-        SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
-        SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
-    def represent_complex(self, data):
-        if data.imag == 0.0:
-            data = '%r' % data.real
-        elif data.real == 0.0:
-            data = '%rj' % data.imag
-        elif data.imag > 0:
-            data = '%r+%rj' % (data.real, data.imag)
-        else:
-            data = '%r%rj' % (data.real, data.imag)
-        return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
-    def represent_tuple(self, data):
-        return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
-    def represent_name(self, data):
-        name = '%s.%s' % (data.__module__, data.__name__)
-        return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
-    def represent_module(self, data):
-        return self.represent_scalar(
-                'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
-    def represent_object(self, data):
-        # We use __reduce__ API to save the data. data.__reduce__ returns
-        # a tuple of length 2-5:
-        #   (function, args, state, listitems, dictitems)
-
-        # For reconstructing, we calls function(*args), then set its state,
-        # listitems, and dictitems if they are not None.
-
-        # A special case is when function.__name__ == '__newobj__'. In this
-        # case we create the object with args[0].__new__(*args).
-
-        # Another special case is when __reduce__ returns a string - we don't
-        # support it.
-
-        # We produce a !!python/object, !!python/object/new or
-        # !!python/object/apply node.
-
-        cls = type(data)
-        if cls in copyreg.dispatch_table:
-            reduce = copyreg.dispatch_table[cls](data)
-        elif hasattr(data, '__reduce_ex__'):
-            reduce = data.__reduce_ex__(2)
-        elif hasattr(data, '__reduce__'):
-            reduce = data.__reduce__()
-        else:
-            raise RepresenterError("cannot represent object: %r" % data)
-        reduce = (list(reduce)+[None]*5)[:5]
-        function, args, state, listitems, dictitems = reduce
-        args = list(args)
-        if state is None:
-            state = {}
-        if listitems is not None:
-            listitems = list(listitems)
-        if dictitems is not None:
-            dictitems = dict(dictitems)
-        if function.__name__ == '__newobj__':
-            function = args[0]
-            args = args[1:]
-            tag = 'tag:yaml.org,2002:python/object/new:'
-            newobj = True
-        else:
-            tag = 'tag:yaml.org,2002:python/object/apply:'
-            newobj = False
-        function_name = '%s.%s' % (function.__module__, function.__name__)
-        if not args and not listitems and not dictitems \
-                and isinstance(state, dict) and newobj:
-            return self.represent_mapping(
-                    'tag:yaml.org,2002:python/object:'+function_name, state)
-        if not listitems and not dictitems  \
-                and isinstance(state, dict) and not state:
-            return self.represent_sequence(tag+function_name, args)
-        value = {}
-        if args:
-            value['args'] = args
-        if state or not isinstance(state, dict):
-            value['state'] = state
-        if listitems:
-            value['listitems'] = listitems
-        if dictitems:
-            value['dictitems'] = dictitems
-        return self.represent_mapping(tag+function_name, value)
-
-    def represent_ordered_dict(self, data):
-        # Provide uniform representation across different Python versions.
-        data_type = type(data)
-        tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
-                % (data_type.__module__, data_type.__name__)
-        items = [[key, value] for key, value in data.items()]
-        return self.represent_sequence(tag, [items])
-
-Representer.add_representer(complex,
-        Representer.represent_complex)
-
-Representer.add_representer(tuple,
-        Representer.represent_tuple)
-
-Representer.add_representer(type,
-        Representer.represent_name)
-
-Representer.add_representer(collections.OrderedDict,
-        Representer.represent_ordered_dict)
-
-Representer.add_representer(types.FunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
-        Representer.represent_module)
-
-Representer.add_multi_representer(object,
-        Representer.represent_object)
-
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+    'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+    pass
+
+class BaseRepresenter:
+
+    yaml_representers = {}
+    yaml_multi_representers = {}
+
+    def __init__(self, default_style=None, default_flow_style=None):
+        self.default_style = default_style
+        self.default_flow_style = default_flow_style
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent(self, data):
+        node = self.represent_data(data)
+        self.serialize(node)
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent_data(self, data):
+        if self.ignore_aliases(data):
+            self.alias_key = None
+        else:
+            self.alias_key = id(data)
+        if self.alias_key is not None:
+            if self.alias_key in self.represented_objects:
+                node = self.represented_objects[self.alias_key]
+                #if node is None:
+                #    raise RepresenterError("recursive objects are not allowed: %r" % data)
+                return node
+            #self.represented_objects[alias_key] = None
+            self.object_keeper.append(data)
+        data_types = type(data).__mro__
+        if data_types[0] in self.yaml_representers:
+            node = self.yaml_representers[data_types[0]](self, data)
+        else:
+            for data_type in data_types:
+                if data_type in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[data_type](self, data)
+                    break
+            else:
+                if None in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[None](self, data)
+                elif None in self.yaml_representers:
+                    node = self.yaml_representers[None](self, data)
+                else:
+                    node = ScalarNode(None, str(data))
+        #if alias_key is not None:
+        #    self.represented_objects[alias_key] = node
+        return node
+
+    @classmethod
+    def add_representer(cls, data_type, representer):
+        if not 'yaml_representers' in cls.__dict__:
+            cls.yaml_representers = cls.yaml_representers.copy()
+        cls.yaml_representers[data_type] = representer
+
+    @classmethod
+    def add_multi_representer(cls, data_type, representer):
+        if not 'yaml_multi_representers' in cls.__dict__:
+            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+        cls.yaml_multi_representers[data_type] = representer
+
+    def represent_scalar(self, tag, value, style=None):
+        if style is None:
+            style = self.default_style
+        node = ScalarNode(tag, value, style=style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        return node
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        value = []
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item in sequence:
+            node_item = self.represent_data(item)
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        value = []
+        node = MappingNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        if hasattr(mapping, 'items'):
+            mapping = list(mapping.items())
+            try:
+                mapping = sorted(mapping)
+            except TypeError:
+                pass
+        for item_key, item_value in mapping:
+            node_key = self.represent_data(item_key)
+            node_value = self.represent_data(item_value)
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def ignore_aliases(self, data):
+        return False
+
+class SafeRepresenter(BaseRepresenter):
+
+    def ignore_aliases(self, data):
+        if data is None:
+            return True
+        if isinstance(data, tuple) and data == ():
+            return True
+        if isinstance(data, (str, bytes, bool, int, float)):
+            return True
+
+    def represent_none(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+    def represent_str(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+    def represent_binary(self, data):
+        if hasattr(base64, 'encodebytes'):
+            data = base64.encodebytes(data).decode('ascii')
+        else:
+            data = base64.encodestring(data).decode('ascii')
+        return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+    def represent_bool(self, data):
+        if data:
+            value = 'true'
+        else:
+            value = 'false'
+        return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+    def represent_int(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+    inf_value = 1e300
+    while repr(inf_value) != repr(inf_value*inf_value):
+        inf_value *= inf_value
+
+    def represent_float(self, data):
+        if data != data or (data == 0.0 and data == 1.0):
+            value = '.nan'
+        elif data == self.inf_value:
+            value = '.inf'
+        elif data == -self.inf_value:
+            value = '-.inf'
+        else:
+            value = repr(data).lower()
+            # Note that in some cases `repr(data)` represents a float number
+            # without the decimal parts.  For instance:
+            #   >>> repr(1e17)
+            #   '1e17'
+            # Unfortunately, this is not a valid float representation according
+            # to the definition of the `!!float` tag.  We fix this by adding
+            # '.0' before the 'e' symbol.
+            if '.' not in value and 'e' in value:
+                value = value.replace('e', '.0e', 1)
+        return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+    def represent_list(self, data):
+        #pairs = (len(data) > 0 and isinstance(data, list))
+        #if pairs:
+        #    for item in data:
+        #        if not isinstance(item, tuple) or len(item) != 2:
+        #            pairs = False
+        #            break
+        #if not pairs:
+            return self.represent_sequence('tag:yaml.org,2002:seq', data)
+        #value = []
+        #for item_key, item_value in data:
+        #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+        #        [(item_key, item_value)]))
+        #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+    def represent_dict(self, data):
+        return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+    def represent_set(self, data):
+        value = {}
+        for key in data:
+            value[key] = None
+        return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+    def represent_date(self, data):
+        value = data.isoformat()
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+    def represent_datetime(self, data):
+        value = data.isoformat(' ')
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+    def represent_yaml_object(self, tag, data, cls, flow_style=None):
+        if hasattr(data, '__getstate__'):
+            state = data.__getstate__()
+        else:
+            state = data.__dict__.copy()
+        return self.represent_mapping(tag, state, flow_style=flow_style)
+
+    def represent_undefined(self, data):
+        raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+        SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+        SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+        SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+        SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+        SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+        SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+        SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+        SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+        SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+        SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+        SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+        SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+        SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+    def represent_complex(self, data):
+        if data.imag == 0.0:
+            data = '%r' % data.real
+        elif data.real == 0.0:
+            data = '%rj' % data.imag
+        elif data.imag > 0:
+            data = '%r+%rj' % (data.real, data.imag)
+        else:
+            data = '%r%rj' % (data.real, data.imag)
+        return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+    def represent_tuple(self, data):
+        return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+    def represent_name(self, data):
+        name = '%s.%s' % (data.__module__, data.__name__)
+        return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+    def represent_module(self, data):
+        return self.represent_scalar(
+                'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+    def represent_object(self, data):
+        # We use __reduce__ API to save the data. data.__reduce__ returns
+        # a tuple of length 2-5:
+        #   (function, args, state, listitems, dictitems)
+
+        # For reconstructing, we calls function(*args), then set its state,
+        # listitems, and dictitems if they are not None.
+
+        # A special case is when function.__name__ == '__newobj__'. In this
+        # case we create the object with args[0].__new__(*args).
+
+        # Another special case is when __reduce__ returns a string - we don't
+        # support it.
+
+        # We produce a !!python/object, !!python/object/new or
+        # !!python/object/apply node.
+
+        cls = type(data)
+        if cls in copyreg.dispatch_table:
+            reduce = copyreg.dispatch_table[cls](data)
+        elif hasattr(data, '__reduce_ex__'):
+            reduce = data.__reduce_ex__(2)
+        elif hasattr(data, '__reduce__'):
+            reduce = data.__reduce__()
+        else:
+            raise RepresenterError("cannot represent object: %r" % data)
+        reduce = (list(reduce)+[None]*5)[:5]
+        function, args, state, listitems, dictitems = reduce
+        args = list(args)
+        if state is None:
+            state = {}
+        if listitems is not None:
+            listitems = list(listitems)
+        if dictitems is not None:
+            dictitems = dict(dictitems)
+        if function.__name__ == '__newobj__':
+            function = args[0]
+            args = args[1:]
+            tag = 'tag:yaml.org,2002:python/object/new:'
+            newobj = True
+        else:
+            tag = 'tag:yaml.org,2002:python/object/apply:'
+            newobj = False
+        function_name = '%s.%s' % (function.__module__, function.__name__)
+        if not args and not listitems and not dictitems \
+                and isinstance(state, dict) and newobj:
+            return self.represent_mapping(
+                    'tag:yaml.org,2002:python/object:'+function_name, state)
+        if not listitems and not dictitems  \
+                and isinstance(state, dict) and not state:
+            return self.represent_sequence(tag+function_name, args)
+        value = {}
+        if args:
+            value['args'] = args
+        if state or not isinstance(state, dict):
+            value['state'] = state
+        if listitems:
+            value['listitems'] = listitems
+        if dictitems:
+            value['dictitems'] = dictitems
+        return self.represent_mapping(tag+function_name, value)
+
+    def represent_ordered_dict(self, data):
+        # Provide uniform representation across different Python versions.
+        data_type = type(data)
+        tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+                % (data_type.__module__, data_type.__name__)
+        items = [[key, value] for key, value in data.items()]
+        return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+        Representer.represent_complex)
+
+Representer.add_representer(tuple,
+        Representer.represent_tuple)
+
+Representer.add_representer(type,
+        Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+        Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+        Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+        Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+        Representer.represent_module)
+
+Representer.add_multi_representer(object,
+        Representer.represent_object)
+

+ 227 - 227
ext/yaml/resolver.py → mncheck/ext/yaml/resolver.py

@@ -1,227 +1,227 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
-    pass
-
-class BaseResolver:
-
-    DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
-    DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
-    DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
-    yaml_implicit_resolvers = {}
-    yaml_path_resolvers = {}
-
-    def __init__(self):
-        self.resolver_exact_paths = []
-        self.resolver_prefix_paths = []
-
-    @classmethod
-    def add_implicit_resolver(cls, tag, regexp, first):
-        if not 'yaml_implicit_resolvers' in cls.__dict__:
-            implicit_resolvers = {}
-            for key in cls.yaml_implicit_resolvers:
-                implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
-            cls.yaml_implicit_resolvers = implicit_resolvers
-        if first is None:
-            first = [None]
-        for ch in first:
-            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
-    @classmethod
-    def add_path_resolver(cls, tag, path, kind=None):
-        # Note: `add_path_resolver` is experimental.  The API could be changed.
-        # `new_path` is a pattern that is matched against the path from the
-        # root to the node that is being considered.  `node_path` elements are
-        # tuples `(node_check, index_check)`.  `node_check` is a node class:
-        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
-        # matches any kind of a node.  `index_check` could be `None`, a boolean
-        # value, a string value, or a number.  `None` and `False` match against
-        # any _value_ of sequence and mapping nodes.  `True` matches against
-        # any _key_ of a mapping node.  A string `index_check` matches against
-        # a mapping value that corresponds to a scalar key which content is
-        # equal to the `index_check` value.  An integer `index_check` matches
-        # against a sequence value with the index equal to `index_check`.
-        if not 'yaml_path_resolvers' in cls.__dict__:
-            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
-        new_path = []
-        for element in path:
-            if isinstance(element, (list, tuple)):
-                if len(element) == 2:
-                    node_check, index_check = element
-                elif len(element) == 1:
-                    node_check = element[0]
-                    index_check = True
-                else:
-                    raise ResolverError("Invalid path element: %s" % element)
-            else:
-                node_check = None
-                index_check = element
-            if node_check is str:
-                node_check = ScalarNode
-            elif node_check is list:
-                node_check = SequenceNode
-            elif node_check is dict:
-                node_check = MappingNode
-            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
-                    and not isinstance(node_check, str) \
-                    and node_check is not None:
-                raise ResolverError("Invalid node checker: %s" % node_check)
-            if not isinstance(index_check, (str, int))  \
-                    and index_check is not None:
-                raise ResolverError("Invalid index checker: %s" % index_check)
-            new_path.append((node_check, index_check))
-        if kind is str:
-            kind = ScalarNode
-        elif kind is list:
-            kind = SequenceNode
-        elif kind is dict:
-            kind = MappingNode
-        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
-                and kind is not None:
-            raise ResolverError("Invalid node kind: %s" % kind)
-        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
-    def descend_resolver(self, current_node, current_index):
-        if not self.yaml_path_resolvers:
-            return
-        exact_paths = {}
-        prefix_paths = []
-        if current_node:
-            depth = len(self.resolver_prefix_paths)
-            for path, kind in self.resolver_prefix_paths[-1]:
-                if self.check_resolver_prefix(depth, path, kind,
-                        current_node, current_index):
-                    if len(path) > depth:
-                        prefix_paths.append((path, kind))
-                    else:
-                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-        else:
-            for path, kind in self.yaml_path_resolvers:
-                if not path:
-                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-                else:
-                    prefix_paths.append((path, kind))
-        self.resolver_exact_paths.append(exact_paths)
-        self.resolver_prefix_paths.append(prefix_paths)
-
-    def ascend_resolver(self):
-        if not self.yaml_path_resolvers:
-            return
-        self.resolver_exact_paths.pop()
-        self.resolver_prefix_paths.pop()
-
-    def check_resolver_prefix(self, depth, path, kind,
-            current_node, current_index):
-        node_check, index_check = path[depth-1]
-        if isinstance(node_check, str):
-            if current_node.tag != node_check:
-                return
-        elif node_check is not None:
-            if not isinstance(current_node, node_check):
-                return
-        if index_check is True and current_index is not None:
-            return
-        if (index_check is False or index_check is None)    \
-                and current_index is None:
-            return
-        if isinstance(index_check, str):
-            if not (isinstance(current_index, ScalarNode)
-                    and index_check == current_index.value):
-                return
-        elif isinstance(index_check, int) and not isinstance(index_check, bool):
-            if index_check != current_index:
-                return
-        return True
-
-    def resolve(self, kind, value, implicit):
-        if kind is ScalarNode and implicit[0]:
-            if value == '':
-                resolvers = self.yaml_implicit_resolvers.get('', [])
-            else:
-                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
-            resolvers += self.yaml_implicit_resolvers.get(None, [])
-            for tag, regexp in resolvers:
-                if regexp.match(value):
-                    return tag
-            implicit = implicit[1]
-        if self.yaml_path_resolvers:
-            exact_paths = self.resolver_exact_paths[-1]
-            if kind in exact_paths:
-                return exact_paths[kind]
-            if None in exact_paths:
-                return exact_paths[None]
-        if kind is ScalarNode:
-            return self.DEFAULT_SCALAR_TAG
-        elif kind is SequenceNode:
-            return self.DEFAULT_SEQUENCE_TAG
-        elif kind is MappingNode:
-            return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
-    pass
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:bool',
-        re.compile(r'''^(?:yes|Yes|YES|no|No|NO
-                    |true|True|TRUE|false|False|FALSE
-                    |on|On|ON|off|Off|OFF)$''', re.X),
-        list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:float',
-        re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
-                    |\.[0-9_]+(?:[eE][-+][0-9]+)?
-                    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
-                    |[-+]?\.(?:inf|Inf|INF)
-                    |\.(?:nan|NaN|NAN))$''', re.X),
-        list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:int',
-        re.compile(r'''^(?:[-+]?0b[0-1_]+
-                    |[-+]?0[0-7_]+
-                    |[-+]?(?:0|[1-9][0-9_]*)
-                    |[-+]?0x[0-9a-fA-F_]+
-                    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
-        list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:merge',
-        re.compile(r'^(?:<<)$'),
-        ['<'])
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:null',
-        re.compile(r'''^(?: ~
-                    |null|Null|NULL
-                    | )$''', re.X),
-        ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:timestamp',
-        re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
-                    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
-                     (?:[Tt]|[ \t]+)[0-9][0-9]?
-                     :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
-                     (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
-        list('0123456789'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:value',
-        re.compile(r'^(?:=)$'),
-        ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:yaml',
-        re.compile(r'^(?:!|&|\*)$'),
-        list('!&*'))
-
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+    pass
+
+class BaseResolver:
+
+    DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+    DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+    DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+    yaml_implicit_resolvers = {}
+    yaml_path_resolvers = {}
+
+    def __init__(self):
+        self.resolver_exact_paths = []
+        self.resolver_prefix_paths = []
+
+    @classmethod
+    def add_implicit_resolver(cls, tag, regexp, first):
+        if not 'yaml_implicit_resolvers' in cls.__dict__:
+            implicit_resolvers = {}
+            for key in cls.yaml_implicit_resolvers:
+                implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+            cls.yaml_implicit_resolvers = implicit_resolvers
+        if first is None:
+            first = [None]
+        for ch in first:
+            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+    @classmethod
+    def add_path_resolver(cls, tag, path, kind=None):
+        # Note: `add_path_resolver` is experimental.  The API could be changed.
+        # `new_path` is a pattern that is matched against the path from the
+        # root to the node that is being considered.  `node_path` elements are
+        # tuples `(node_check, index_check)`.  `node_check` is a node class:
+        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
+        # matches any kind of a node.  `index_check` could be `None`, a boolean
+        # value, a string value, or a number.  `None` and `False` match against
+        # any _value_ of sequence and mapping nodes.  `True` matches against
+        # any _key_ of a mapping node.  A string `index_check` matches against
+        # a mapping value that corresponds to a scalar key which content is
+        # equal to the `index_check` value.  An integer `index_check` matches
+        # against a sequence value with the index equal to `index_check`.
+        if not 'yaml_path_resolvers' in cls.__dict__:
+            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+        new_path = []
+        for element in path:
+            if isinstance(element, (list, tuple)):
+                if len(element) == 2:
+                    node_check, index_check = element
+                elif len(element) == 1:
+                    node_check = element[0]
+                    index_check = True
+                else:
+                    raise ResolverError("Invalid path element: %s" % element)
+            else:
+                node_check = None
+                index_check = element
+            if node_check is str:
+                node_check = ScalarNode
+            elif node_check is list:
+                node_check = SequenceNode
+            elif node_check is dict:
+                node_check = MappingNode
+            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
+                    and not isinstance(node_check, str) \
+                    and node_check is not None:
+                raise ResolverError("Invalid node checker: %s" % node_check)
+            if not isinstance(index_check, (str, int))  \
+                    and index_check is not None:
+                raise ResolverError("Invalid index checker: %s" % index_check)
+            new_path.append((node_check, index_check))
+        if kind is str:
+            kind = ScalarNode
+        elif kind is list:
+            kind = SequenceNode
+        elif kind is dict:
+            kind = MappingNode
+        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
+                and kind is not None:
+            raise ResolverError("Invalid node kind: %s" % kind)
+        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+    def descend_resolver(self, current_node, current_index):
+        if not self.yaml_path_resolvers:
+            return
+        exact_paths = {}
+        prefix_paths = []
+        if current_node:
+            depth = len(self.resolver_prefix_paths)
+            for path, kind in self.resolver_prefix_paths[-1]:
+                if self.check_resolver_prefix(depth, path, kind,
+                        current_node, current_index):
+                    if len(path) > depth:
+                        prefix_paths.append((path, kind))
+                    else:
+                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+        else:
+            for path, kind in self.yaml_path_resolvers:
+                if not path:
+                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+                else:
+                    prefix_paths.append((path, kind))
+        self.resolver_exact_paths.append(exact_paths)
+        self.resolver_prefix_paths.append(prefix_paths)
+
+    def ascend_resolver(self):
+        if not self.yaml_path_resolvers:
+            return
+        self.resolver_exact_paths.pop()
+        self.resolver_prefix_paths.pop()
+
+    def check_resolver_prefix(self, depth, path, kind,
+            current_node, current_index):
+        node_check, index_check = path[depth-1]
+        if isinstance(node_check, str):
+            if current_node.tag != node_check:
+                return
+        elif node_check is not None:
+            if not isinstance(current_node, node_check):
+                return
+        if index_check is True and current_index is not None:
+            return
+        if (index_check is False or index_check is None)    \
+                and current_index is None:
+            return
+        if isinstance(index_check, str):
+            if not (isinstance(current_index, ScalarNode)
+                    and index_check == current_index.value):
+                return
+        elif isinstance(index_check, int) and not isinstance(index_check, bool):
+            if index_check != current_index:
+                return
+        return True
+
+    def resolve(self, kind, value, implicit):
+        if kind is ScalarNode and implicit[0]:
+            if value == '':
+                resolvers = self.yaml_implicit_resolvers.get('', [])
+            else:
+                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+            resolvers += self.yaml_implicit_resolvers.get(None, [])
+            for tag, regexp in resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if self.yaml_path_resolvers:
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+    pass
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:bool',
+        re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+                    |true|True|TRUE|false|False|FALSE
+                    |on|On|ON|off|Off|OFF)$''', re.X),
+        list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:float',
+        re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+                    |\.[0-9_]+(?:[eE][-+][0-9]+)?
+                    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+                    |[-+]?\.(?:inf|Inf|INF)
+                    |\.(?:nan|NaN|NAN))$''', re.X),
+        list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:int',
+        re.compile(r'''^(?:[-+]?0b[0-1_]+
+                    |[-+]?0[0-7_]+
+                    |[-+]?(?:0|[1-9][0-9_]*)
+                    |[-+]?0x[0-9a-fA-F_]+
+                    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+        list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:merge',
+        re.compile(r'^(?:<<)$'),
+        ['<'])
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:null',
+        re.compile(r'''^(?: ~
+                    |null|Null|NULL
+                    | )$''', re.X),
+        ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:timestamp',
+        re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+                    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+                     (?:[Tt]|[ \t]+)[0-9][0-9]?
+                     :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+                     (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+        list('0123456789'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:value',
+        re.compile(r'^(?:=)$'),
+        ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:yaml',
+        re.compile(r'^(?:!|&|\*)$'),
+        list('!&*'))
+

+ 1444 - 1444
ext/yaml/scanner.py → mncheck/ext/yaml/scanner.py

@@ -1,1444 +1,1444 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
-    pass
-
-class SimpleKey:
-    # See below simple keys treatment.
-
-    def __init__(self, token_number, required, index, line, column, mark):
-        self.token_number = token_number
-        self.required = required
-        self.index = index
-        self.line = line
-        self.column = column
-        self.mark = mark
-
-class Scanner:
-
-    def __init__(self):
-        """Initialize the scanner."""
-        # It is assumed that Scanner and Reader will have a common descendant.
-        # Reader do the dirty work of checking for BOM and converting the
-        # input data to Unicode. It also adds NUL to the end.
-        #
-        # Reader supports the following methods
-        #   self.peek(i=0)       # peek the next i-th character
-        #   self.prefix(l=1)     # peek the next l characters
-        #   self.forward(l=1)    # read the next l characters and move the pointer.
-
-        # Had we reached the end of the stream?
-        self.done = False
-
-        # The number of unclosed '{' and '['. `flow_level == 0` means block
-        # context.
-        self.flow_level = 0
-
-        # List of processed tokens that are not yet emitted.
-        self.tokens = []
-
-        # Add the STREAM-START token.
-        self.fetch_stream_start()
-
-        # Number of tokens that were emitted through the `get_token` method.
-        self.tokens_taken = 0
-
-        # The current indentation level.
-        self.indent = -1
-
-        # Past indentation levels.
-        self.indents = []
-
-        # Variables related to simple keys treatment.
-
-        # A simple key is a key that is not denoted by the '?' indicator.
-        # Example of simple keys:
-        #   ---
-        #   block simple key: value
-        #   ? not a simple key:
-        #   : { flow simple key: value }
-        # We emit the KEY token before all keys, so when we find a potential
-        # simple key, we try to locate the corresponding ':' indicator.
-        # Simple keys should be limited to a single line and 1024 characters.
-
-        # Can a simple key start at the current position? A simple key may
-        # start:
-        # - at the beginning of the line, not counting indentation spaces
-        #       (in block context),
-        # - after '{', '[', ',' (in the flow context),
-        # - after '?', ':', '-' (in the block context).
-        # In the block context, this flag also signifies if a block collection
-        # may start at the current position.
-        self.allow_simple_key = True
-
-        # Keep track of possible simple keys. This is a dictionary. The key
-        # is `flow_level`; there can be no more that one possible simple key
-        # for each level. The value is a SimpleKey record:
-        #   (token_number, required, index, line, column, mark)
-        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
-        # '[', or '{' tokens.
-        self.possible_simple_keys = {}
-
-    # Public methods.
-
-    def check_token(self, *choices):
-        # Check if the next token is one of the given types.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            if not choices:
-                return True
-            for choice in choices:
-                if isinstance(self.tokens[0], choice):
-                    return True
-        return False
-
-    def peek_token(self):
-        # Return the next token, but do not delete if from the queue.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            return self.tokens[0]
-
-    def get_token(self):
-        # Return the next token.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            self.tokens_taken += 1
-            return self.tokens.pop(0)
-
-    # Private methods.
-
-    def need_more_tokens(self):
-        if self.done:
-            return False
-        if not self.tokens:
-            return True
-        # The current token may be a potential simple key, so we
-        # need to look further.
-        self.stale_possible_simple_keys()
-        if self.next_possible_simple_key() == self.tokens_taken:
-            return True
-
-    def fetch_more_tokens(self):
-
-        # Eat whitespaces and comments until we reach the next token.
-        self.scan_to_next_token()
-
-        # Remove obsolete possible simple keys.
-        self.stale_possible_simple_keys()
-
-        # Compare the current indentation and column. It may add some tokens
-        # and decrease the current indentation level.
-        self.unwind_indent(self.column)
-
-        # Peek the next character.
-        ch = self.peek()
-
-        # Is it the end of stream?
-        if ch == '\0':
-            return self.fetch_stream_end()
-
-        # Is it a directive?
-        if ch == '%' and self.check_directive():
-            return self.fetch_directive()
-
-        # Is it the document start?
-        if ch == '-' and self.check_document_start():
-            return self.fetch_document_start()
-
-        # Is it the document end?
-        if ch == '.' and self.check_document_end():
-            return self.fetch_document_end()
-
-        # TODO: support for BOM within a stream.
-        #if ch == '\uFEFF':
-        #    return self.fetch_bom()    <-- issue BOMToken
-
-        # Note: the order of the following checks is NOT significant.
-
-        # Is it the flow sequence start indicator?
-        if ch == '[':
-            return self.fetch_flow_sequence_start()
-
-        # Is it the flow mapping start indicator?
-        if ch == '{':
-            return self.fetch_flow_mapping_start()
-
-        # Is it the flow sequence end indicator?
-        if ch == ']':
-            return self.fetch_flow_sequence_end()
-
-        # Is it the flow mapping end indicator?
-        if ch == '}':
-            return self.fetch_flow_mapping_end()
-
-        # Is it the flow entry indicator?
-        if ch == ',':
-            return self.fetch_flow_entry()
-
-        # Is it the block entry indicator?
-        if ch == '-' and self.check_block_entry():
-            return self.fetch_block_entry()
-
-        # Is it the key indicator?
-        if ch == '?' and self.check_key():
-            return self.fetch_key()
-
-        # Is it the value indicator?
-        if ch == ':' and self.check_value():
-            return self.fetch_value()
-
-        # Is it an alias?
-        if ch == '*':
-            return self.fetch_alias()
-
-        # Is it an anchor?
-        if ch == '&':
-            return self.fetch_anchor()
-
-        # Is it a tag?
-        if ch == '!':
-            return self.fetch_tag()
-
-        # Is it a literal scalar?
-        if ch == '|' and not self.flow_level:
-            return self.fetch_literal()
-
-        # Is it a folded scalar?
-        if ch == '>' and not self.flow_level:
-            return self.fetch_folded()
-
-        # Is it a single quoted scalar?
-        if ch == '\'':
-            return self.fetch_single()
-
-        # Is it a double quoted scalar?
-        if ch == '\"':
-            return self.fetch_double()
-
-        # It must be a plain scalar then.
-        if self.check_plain():
-            return self.fetch_plain()
-
-        # No? It's an error. Let's produce a nice error message.
-        raise ScannerError("while scanning for the next token", None,
-                "found character %r that cannot start any token" % ch,
-                self.get_mark())
-
-    # Simple keys treatment.
-
-    def next_possible_simple_key(self):
-        # Return the number of the nearest possible simple key. Actually we
-        # don't need to loop through the whole dictionary. We may replace it
-        # with the following code:
-        #   if not self.possible_simple_keys:
-        #       return None
-        #   return self.possible_simple_keys[
-        #           min(self.possible_simple_keys.keys())].token_number
-        min_token_number = None
-        for level in self.possible_simple_keys:
-            key = self.possible_simple_keys[level]
-            if min_token_number is None or key.token_number < min_token_number:
-                min_token_number = key.token_number
-        return min_token_number
-
-    def stale_possible_simple_keys(self):
-        # Remove entries that are no longer possible simple keys. According to
-        # the YAML specification, simple keys
-        # - should be limited to a single line,
-        # - should be no longer than 1024 characters.
-        # Disabling this procedure will allow simple keys of any length and
-        # height (may cause problems if indentation is broken though).
-        for level in list(self.possible_simple_keys):
-            key = self.possible_simple_keys[level]
-            if key.line != self.line  \
-                    or self.index-key.index > 1024:
-                if key.required:
-                    raise ScannerError("while scanning a simple key", key.mark,
-                            "could not find expected ':'", self.get_mark())
-                del self.possible_simple_keys[level]
-
-    def save_possible_simple_key(self):
-        # The next token may start a simple key. We check if it's possible
-        # and save its position. This function is called for
-        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
-        # Check if a simple key is required at the current position.
-        required = not self.flow_level and self.indent == self.column
-
-        # The next token might be a simple key. Let's save it's number and
-        # position.
-        if self.allow_simple_key:
-            self.remove_possible_simple_key()
-            token_number = self.tokens_taken+len(self.tokens)
-            key = SimpleKey(token_number, required,
-                    self.index, self.line, self.column, self.get_mark())
-            self.possible_simple_keys[self.flow_level] = key
-
-    def remove_possible_simple_key(self):
-        # Remove the saved possible key position at the current flow level.
-        if self.flow_level in self.possible_simple_keys:
-            key = self.possible_simple_keys[self.flow_level]
-            
-            if key.required:
-                raise ScannerError("while scanning a simple key", key.mark,
-                        "could not find expected ':'", self.get_mark())
-
-            del self.possible_simple_keys[self.flow_level]
-
-    # Indentation functions.
-
-    def unwind_indent(self, column):
-
-        ## In flow context, tokens should respect indentation.
-        ## Actually the condition should be `self.indent >= column` according to
-        ## the spec. But this condition will prohibit intuitively correct
-        ## constructions such as
-        ## key : {
-        ## }
-        #if self.flow_level and self.indent > column:
-        #    raise ScannerError(None, None,
-        #            "invalid intendation or unclosed '[' or '{'",
-        #            self.get_mark())
-
-        # In the flow context, indentation is ignored. We make the scanner less
-        # restrictive then specification requires.
-        if self.flow_level:
-            return
-
-        # In block context, we may need to issue the BLOCK-END tokens.
-        while self.indent > column:
-            mark = self.get_mark()
-            self.indent = self.indents.pop()
-            self.tokens.append(BlockEndToken(mark, mark))
-
-    def add_indent(self, column):
-        # Check if we need to increase indentation.
-        if self.indent < column:
-            self.indents.append(self.indent)
-            self.indent = column
-            return True
-        return False
-
-    # Fetchers.
-
-    def fetch_stream_start(self):
-        # We always add STREAM-START as the first token and STREAM-END as the
-        # last token.
-
-        # Read the token.
-        mark = self.get_mark()
-        
-        # Add STREAM-START.
-        self.tokens.append(StreamStartToken(mark, mark,
-            encoding=self.encoding))
-        
-
-    def fetch_stream_end(self):
-
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-        self.possible_simple_keys = {}
-
-        # Read the token.
-        mark = self.get_mark()
-        
-        # Add STREAM-END.
-        self.tokens.append(StreamEndToken(mark, mark))
-
-        # The steam is finished.
-        self.done = True
-
-    def fetch_directive(self):
-        
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-
-        # Scan and add DIRECTIVE.
-        self.tokens.append(self.scan_directive())
-
-    def fetch_document_start(self):
-        self.fetch_document_indicator(DocumentStartToken)
-
-    def fetch_document_end(self):
-        self.fetch_document_indicator(DocumentEndToken)
-
-    def fetch_document_indicator(self, TokenClass):
-
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys. Note that there could not be a block collection
-        # after '---'.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-
-        # Add DOCUMENT-START or DOCUMENT-END.
-        start_mark = self.get_mark()
-        self.forward(3)
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_sequence_start(self):
-        self.fetch_flow_collection_start(FlowSequenceStartToken)
-
-    def fetch_flow_mapping_start(self):
-        self.fetch_flow_collection_start(FlowMappingStartToken)
-
-    def fetch_flow_collection_start(self, TokenClass):
-
-        # '[' and '{' may start a simple key.
-        self.save_possible_simple_key()
-
-        # Increase the flow level.
-        self.flow_level += 1
-
-        # Simple keys are allowed after '[' and '{'.
-        self.allow_simple_key = True
-
-        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_sequence_end(self):
-        self.fetch_flow_collection_end(FlowSequenceEndToken)
-
-    def fetch_flow_mapping_end(self):
-        self.fetch_flow_collection_end(FlowMappingEndToken)
-
-    def fetch_flow_collection_end(self, TokenClass):
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Decrease the flow level.
-        self.flow_level -= 1
-
-        # No simple keys after ']' or '}'.
-        self.allow_simple_key = False
-
-        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_entry(self):
-
-        # Simple keys are allowed after ','.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add FLOW-ENTRY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
-    def fetch_block_entry(self):
-
-        # Block context needs additional checks.
-        if not self.flow_level:
-
-            # Are we allowed to start a new entry?
-            if not self.allow_simple_key:
-                raise ScannerError(None, None,
-                        "sequence entries are not allowed here",
-                        self.get_mark())
-
-            # We may need to add BLOCK-SEQUENCE-START.
-            if self.add_indent(self.column):
-                mark = self.get_mark()
-                self.tokens.append(BlockSequenceStartToken(mark, mark))
-
-        # It's an error for the block entry to occur in the flow context,
-        # but we let the parser detect this.
-        else:
-            pass
-
-        # Simple keys are allowed after '-'.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add BLOCK-ENTRY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
-    def fetch_key(self):
-        
-        # Block context needs additional checks.
-        if not self.flow_level:
-
-            # Are we allowed to start a key (not nessesary a simple)?
-            if not self.allow_simple_key:
-                raise ScannerError(None, None,
-                        "mapping keys are not allowed here",
-                        self.get_mark())
-
-            # We may need to add BLOCK-MAPPING-START.
-            if self.add_indent(self.column):
-                mark = self.get_mark()
-                self.tokens.append(BlockMappingStartToken(mark, mark))
-
-        # Simple keys are allowed after '?' in the block context.
-        self.allow_simple_key = not self.flow_level
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add KEY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(KeyToken(start_mark, end_mark))
-
-    def fetch_value(self):
-
-        # Do we determine a simple key?
-        if self.flow_level in self.possible_simple_keys:
-
-            # Add KEY.
-            key = self.possible_simple_keys[self.flow_level]
-            del self.possible_simple_keys[self.flow_level]
-            self.tokens.insert(key.token_number-self.tokens_taken,
-                    KeyToken(key.mark, key.mark))
-
-            # If this key starts a new block mapping, we need to add
-            # BLOCK-MAPPING-START.
-            if not self.flow_level:
-                if self.add_indent(key.column):
-                    self.tokens.insert(key.token_number-self.tokens_taken,
-                            BlockMappingStartToken(key.mark, key.mark))
-
-            # There cannot be two simple keys one after another.
-            self.allow_simple_key = False
-
-        # It must be a part of a complex key.
-        else:
-            
-            # Block context needs additional checks.
-            # (Do we really need them? They will be catched by the parser
-            # anyway.)
-            if not self.flow_level:
-
-                # We are allowed to start a complex value if and only if
-                # we can start a simple key.
-                if not self.allow_simple_key:
-                    raise ScannerError(None, None,
-                            "mapping values are not allowed here",
-                            self.get_mark())
-
-            # If this value starts a new block mapping, we need to add
-            # BLOCK-MAPPING-START.  It will be detected as an error later by
-            # the parser.
-            if not self.flow_level:
-                if self.add_indent(self.column):
-                    mark = self.get_mark()
-                    self.tokens.append(BlockMappingStartToken(mark, mark))
-
-            # Simple keys are allowed after ':' in the block context.
-            self.allow_simple_key = not self.flow_level
-
-            # Reset possible simple key on the current level.
-            self.remove_possible_simple_key()
-
-        # Add VALUE.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(ValueToken(start_mark, end_mark))
-
-    def fetch_alias(self):
-
-        # ALIAS could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after ALIAS.
-        self.allow_simple_key = False
-
-        # Scan and add ALIAS.
-        self.tokens.append(self.scan_anchor(AliasToken))
-
-    def fetch_anchor(self):
-
-        # ANCHOR could start a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after ANCHOR.
-        self.allow_simple_key = False
-
-        # Scan and add ANCHOR.
-        self.tokens.append(self.scan_anchor(AnchorToken))
-
-    def fetch_tag(self):
-
-        # TAG could start a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after TAG.
-        self.allow_simple_key = False
-
-        # Scan and add TAG.
-        self.tokens.append(self.scan_tag())
-
-    def fetch_literal(self):
-        self.fetch_block_scalar(style='|')
-
-    def fetch_folded(self):
-        self.fetch_block_scalar(style='>')
-
-    def fetch_block_scalar(self, style):
-
-        # A simple key may follow a block scalar.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Scan and add SCALAR.
-        self.tokens.append(self.scan_block_scalar(style))
-
-    def fetch_single(self):
-        self.fetch_flow_scalar(style='\'')
-
-    def fetch_double(self):
-        self.fetch_flow_scalar(style='"')
-
-    def fetch_flow_scalar(self, style):
-
-        # A flow scalar could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after flow scalars.
-        self.allow_simple_key = False
-
-        # Scan and add SCALAR.
-        self.tokens.append(self.scan_flow_scalar(style))
-
-    def fetch_plain(self):
-
-        # A plain scalar could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after plain scalars. But note that `scan_plain` will
-        # change this flag if the scan is finished at the beginning of the
-        # line.
-        self.allow_simple_key = False
-
-        # Scan and add SCALAR. May change `allow_simple_key`.
-        self.tokens.append(self.scan_plain())
-
-    # Checkers.
-
-    def check_directive(self):
-
-        # DIRECTIVE:        ^ '%' ...
-        # The '%' indicator is already checked.
-        if self.column == 0:
-            return True
-
-    def check_document_start(self):
-
-        # DOCUMENT-START:   ^ '---' (' '|'\n')
-        if self.column == 0:
-            if self.prefix(3) == '---'  \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return True
-
-    def check_document_end(self):
-
-        # DOCUMENT-END:     ^ '...' (' '|'\n')
-        if self.column == 0:
-            if self.prefix(3) == '...'  \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return True
-
-    def check_block_entry(self):
-
-        # BLOCK-ENTRY:      '-' (' '|'\n')
-        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_key(self):
-
-        # KEY(flow context):    '?'
-        if self.flow_level:
-            return True
-
-        # KEY(block context):   '?' (' '|'\n')
-        else:
-            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_value(self):
-
-        # VALUE(flow context):  ':'
-        if self.flow_level:
-            return True
-
-        # VALUE(block context): ':' (' '|'\n')
-        else:
-            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_plain(self):
-
-        # A plain scalar may start with any non-space character except:
-        #   '-', '?', ':', ',', '[', ']', '{', '}',
-        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
-        #   '%', '@', '`'.
-        #
-        # It may also start with
-        #   '-', '?', ':'
-        # if it is followed by a non-space character.
-        #
-        # Note that we limit the last rule to the block context (except the
-        # '-' character) because we want the flow context to be space
-        # independent.
-        ch = self.peek()
-        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
-                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
-                        and (ch == '-' or (not self.flow_level and ch in '?:')))
-
-    # Scanners.
-
-    def scan_to_next_token(self):
-        # We ignore spaces, line breaks and comments.
-        # If we find a line break in the block context, we set the flag
-        # `allow_simple_key` on.
-        # The byte order mark is stripped if it's the first character in the
-        # stream. We do not yet support BOM inside the stream as the
-        # specification requires. Any such mark will be considered as a part
-        # of the document.
-        #
-        # TODO: We need to make tab handling rules more sane. A good rule is
-        #   Tabs cannot precede tokens
-        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
-        #   KEY(block), VALUE(block), BLOCK-ENTRY
-        # So the checking code is
-        #   if <TAB>:
-        #       self.allow_simple_keys = False
-        # We also need to add the check for `allow_simple_keys == True` to
-        # `unwind_indent` before issuing BLOCK-END.
-        # Scanners for block, flow, and plain scalars need to be modified.
-
-        if self.index == 0 and self.peek() == '\uFEFF':
-            self.forward()
-        found = False
-        while not found:
-            while self.peek() == ' ':
-                self.forward()
-            if self.peek() == '#':
-                while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                    self.forward()
-            if self.scan_line_break():
-                if not self.flow_level:
-                    self.allow_simple_key = True
-            else:
-                found = True
-
-    def scan_directive(self):
-        # See the specification for details.
-        start_mark = self.get_mark()
-        self.forward()
-        name = self.scan_directive_name(start_mark)
-        value = None
-        if name == 'YAML':
-            value = self.scan_yaml_directive_value(start_mark)
-            end_mark = self.get_mark()
-        elif name == 'TAG':
-            value = self.scan_tag_directive_value(start_mark)
-            end_mark = self.get_mark()
-        else:
-            end_mark = self.get_mark()
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        self.scan_directive_ignored_line(start_mark)
-        return DirectiveToken(name, value, start_mark, end_mark)
-
-    def scan_directive_name(self, start_mark):
-        # See the specification for details.
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-_':
-            length += 1
-            ch = self.peek(length)
-        if not length:
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        value = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        return value
-
-    def scan_yaml_directive_value(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        major = self.scan_yaml_directive_number(start_mark)
-        if self.peek() != '.':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or '.', but found %r" % self.peek(),
-                    self.get_mark())
-        self.forward()
-        minor = self.scan_yaml_directive_number(start_mark)
-        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or ' ', but found %r" % self.peek(),
-                    self.get_mark())
-        return (major, minor)
-
-    def scan_yaml_directive_number(self, start_mark):
-        # See the specification for details.
-        ch = self.peek()
-        if not ('0' <= ch <= '9'):
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit, but found %r" % ch, self.get_mark())
-        length = 0
-        while '0' <= self.peek(length) <= '9':
-            length += 1
-        value = int(self.prefix(length))
-        self.forward(length)
-        return value
-
-    def scan_tag_directive_value(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        handle = self.scan_tag_directive_handle(start_mark)
-        while self.peek() == ' ':
-            self.forward()
-        prefix = self.scan_tag_directive_prefix(start_mark)
-        return (handle, prefix)
-
-    def scan_tag_directive_handle(self, start_mark):
-        # See the specification for details.
-        value = self.scan_tag_handle('directive', start_mark)
-        ch = self.peek()
-        if ch != ' ':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        return value
-
-    def scan_tag_directive_prefix(self, start_mark):
-        # See the specification for details.
-        value = self.scan_tag_uri('directive', start_mark)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        return value
-
-    def scan_directive_ignored_line(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        if self.peek() == '#':
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a comment or a line break, but found %r"
-                        % ch, self.get_mark())
-        self.scan_line_break()
-
-    def scan_anchor(self, TokenClass):
-        # The specification does not restrict characters for anchors and
-        # aliases. This may lead to problems, for instance, the document:
-        #   [ *alias, value ]
-        # can be interpteted in two ways, as
-        #   [ "value" ]
-        # and
-        #   [ *alias , "value" ]
-        # Therefore we restrict aliases to numbers and ASCII letters.
-        start_mark = self.get_mark()
-        indicator = self.peek()
-        if indicator == '*':
-            name = 'alias'
-        else:
-            name = 'anchor'
-        self.forward()
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-_':
-            length += 1
-            ch = self.peek(length)
-        if not length:
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        value = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        end_mark = self.get_mark()
-        return TokenClass(value, start_mark, end_mark)
-
-    def scan_tag(self):
-        # See the specification for details.
-        start_mark = self.get_mark()
-        ch = self.peek(1)
-        if ch == '<':
-            handle = None
-            self.forward(2)
-            suffix = self.scan_tag_uri('tag', start_mark)
-            if self.peek() != '>':
-                raise ScannerError("while parsing a tag", start_mark,
-                        "expected '>', but found %r" % self.peek(),
-                        self.get_mark())
-            self.forward()
-        elif ch in '\0 \t\r\n\x85\u2028\u2029':
-            handle = None
-            suffix = '!'
-            self.forward()
-        else:
-            length = 1
-            use_handle = False
-            while ch not in '\0 \r\n\x85\u2028\u2029':
-                if ch == '!':
-                    use_handle = True
-                    break
-                length += 1
-                ch = self.peek(length)
-            handle = '!'
-            if use_handle:
-                handle = self.scan_tag_handle('tag', start_mark)
-            else:
-                handle = '!'
-                self.forward()
-            suffix = self.scan_tag_uri('tag', start_mark)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a tag", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        value = (handle, suffix)
-        end_mark = self.get_mark()
-        return TagToken(value, start_mark, end_mark)
-
-    def scan_block_scalar(self, style):
-        # See the specification for details.
-
-        if style == '>':
-            folded = True
-        else:
-            folded = False
-
-        chunks = []
-        start_mark = self.get_mark()
-
-        # Scan the header.
-        self.forward()
-        chomping, increment = self.scan_block_scalar_indicators(start_mark)
-        self.scan_block_scalar_ignored_line(start_mark)
-
-        # Determine the indentation level and go to the first non-empty line.
-        min_indent = self.indent+1
-        if min_indent < 1:
-            min_indent = 1
-        if increment is None:
-            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
-            indent = max(min_indent, max_indent)
-        else:
-            indent = min_indent+increment-1
-            breaks, end_mark = self.scan_block_scalar_breaks(indent)
-        line_break = ''
-
-        # Scan the inner part of the block scalar.
-        while self.column == indent and self.peek() != '\0':
-            chunks.extend(breaks)
-            leading_non_space = self.peek() not in ' \t'
-            length = 0
-            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
-                length += 1
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            line_break = self.scan_line_break()
-            breaks, end_mark = self.scan_block_scalar_breaks(indent)
-            if self.column == indent and self.peek() != '\0':
-
-                # Unfortunately, folding rules are ambiguous.
-                #
-                # This is the folding according to the specification:
-                
-                if folded and line_break == '\n'    \
-                        and leading_non_space and self.peek() not in ' \t':
-                    if not breaks:
-                        chunks.append(' ')
-                else:
-                    chunks.append(line_break)
-                
-                # This is Clark Evans's interpretation (also in the spec
-                # examples):
-                #
-                #if folded and line_break == '\n':
-                #    if not breaks:
-                #        if self.peek() not in ' \t':
-                #            chunks.append(' ')
-                #        else:
-                #            chunks.append(line_break)
-                #else:
-                #    chunks.append(line_break)
-            else:
-                break
-
-        # Chomp the tail.
-        if chomping is not False:
-            chunks.append(line_break)
-        if chomping is True:
-            chunks.extend(breaks)
-
-        # We are done.
-        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
-                style)
-
-    def scan_block_scalar_indicators(self, start_mark):
-        # See the specification for details.
-        chomping = None
-        increment = None
-        ch = self.peek()
-        if ch in '+-':
-            if ch == '+':
-                chomping = True
-            else:
-                chomping = False
-            self.forward()
-            ch = self.peek()
-            if ch in '0123456789':
-                increment = int(ch)
-                if increment == 0:
-                    raise ScannerError("while scanning a block scalar", start_mark,
-                            "expected indentation indicator in the range 1-9, but found 0",
-                            self.get_mark())
-                self.forward()
-        elif ch in '0123456789':
-            increment = int(ch)
-            if increment == 0:
-                raise ScannerError("while scanning a block scalar", start_mark,
-                        "expected indentation indicator in the range 1-9, but found 0",
-                        self.get_mark())
-            self.forward()
-            ch = self.peek()
-            if ch in '+-':
-                if ch == '+':
-                    chomping = True
-                else:
-                    chomping = False
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected chomping or indentation indicators, but found %r"
-                    % ch, self.get_mark())
-        return chomping, increment
-
-    def scan_block_scalar_ignored_line(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        if self.peek() == '#':
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected a comment or a line break, but found %r" % ch,
-                    self.get_mark())
-        self.scan_line_break()
-
-    def scan_block_scalar_indentation(self):
-        # See the specification for details.
-        chunks = []
-        max_indent = 0
-        end_mark = self.get_mark()
-        while self.peek() in ' \r\n\x85\u2028\u2029':
-            if self.peek() != ' ':
-                chunks.append(self.scan_line_break())
-                end_mark = self.get_mark()
-            else:
-                self.forward()
-                if self.column > max_indent:
-                    max_indent = self.column
-        return chunks, max_indent, end_mark
-
-    def scan_block_scalar_breaks(self, indent):
-        # See the specification for details.
-        chunks = []
-        end_mark = self.get_mark()
-        while self.column < indent and self.peek() == ' ':
-            self.forward()
-        while self.peek() in '\r\n\x85\u2028\u2029':
-            chunks.append(self.scan_line_break())
-            end_mark = self.get_mark()
-            while self.column < indent and self.peek() == ' ':
-                self.forward()
-        return chunks, end_mark
-
-    def scan_flow_scalar(self, style):
-        # See the specification for details.
-        # Note that we loose indentation rules for quoted scalars. Quoted
-        # scalars don't need to adhere indentation because " and ' clearly
-        # mark the beginning and the end of them. Therefore we are less
-        # restrictive then the specification requires. We only need to check
-        # that document separators are not included in scalars.
-        if style == '"':
-            double = True
-        else:
-            double = False
-        chunks = []
-        start_mark = self.get_mark()
-        quote = self.peek()
-        self.forward()
-        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
-        while self.peek() != quote:
-            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
-            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
-        self.forward()
-        end_mark = self.get_mark()
-        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
-                style)
-
-    ESCAPE_REPLACEMENTS = {
-        '0':    '\0',
-        'a':    '\x07',
-        'b':    '\x08',
-        't':    '\x09',
-        '\t':   '\x09',
-        'n':    '\x0A',
-        'v':    '\x0B',
-        'f':    '\x0C',
-        'r':    '\x0D',
-        'e':    '\x1B',
-        ' ':    '\x20',
-        '\"':   '\"',
-        '\\':   '\\',
-        'N':    '\x85',
-        '_':    '\xA0',
-        'L':    '\u2028',
-        'P':    '\u2029',
-    }
-
-    ESCAPE_CODES = {
-        'x':    2,
-        'u':    4,
-        'U':    8,
-    }
-
-    def scan_flow_scalar_non_spaces(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        while True:
-            length = 0
-            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
-                length += 1
-            if length:
-                chunks.append(self.prefix(length))
-                self.forward(length)
-            ch = self.peek()
-            if not double and ch == '\'' and self.peek(1) == '\'':
-                chunks.append('\'')
-                self.forward(2)
-            elif (double and ch == '\'') or (not double and ch in '\"\\'):
-                chunks.append(ch)
-                self.forward()
-            elif double and ch == '\\':
-                self.forward()
-                ch = self.peek()
-                if ch in self.ESCAPE_REPLACEMENTS:
-                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
-                    self.forward()
-                elif ch in self.ESCAPE_CODES:
-                    length = self.ESCAPE_CODES[ch]
-                    self.forward()
-                    for k in range(length):
-                        if self.peek(k) not in '0123456789ABCDEFabcdef':
-                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
-                                        (length, self.peek(k)), self.get_mark())
-                    code = int(self.prefix(length), 16)
-                    chunks.append(chr(code))
-                    self.forward(length)
-                elif ch in '\r\n\x85\u2028\u2029':
-                    self.scan_line_break()
-                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
-                else:
-                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                            "found unknown escape character %r" % ch, self.get_mark())
-            else:
-                return chunks
-
-    def scan_flow_scalar_spaces(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        length = 0
-        while self.peek(length) in ' \t':
-            length += 1
-        whitespaces = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch == '\0':
-            raise ScannerError("while scanning a quoted scalar", start_mark,
-                    "found unexpected end of stream", self.get_mark())
-        elif ch in '\r\n\x85\u2028\u2029':
-            line_break = self.scan_line_break()
-            breaks = self.scan_flow_scalar_breaks(double, start_mark)
-            if line_break != '\n':
-                chunks.append(line_break)
-            elif not breaks:
-                chunks.append(' ')
-            chunks.extend(breaks)
-        else:
-            chunks.append(whitespaces)
-        return chunks
-
-    def scan_flow_scalar_breaks(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        while True:
-            # Instead of checking indentation, we check for document
-            # separators.
-            prefix = self.prefix(3)
-            if (prefix == '---' or prefix == '...')   \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                raise ScannerError("while scanning a quoted scalar", start_mark,
-                        "found unexpected document separator", self.get_mark())
-            while self.peek() in ' \t':
-                self.forward()
-            if self.peek() in '\r\n\x85\u2028\u2029':
-                chunks.append(self.scan_line_break())
-            else:
-                return chunks
-
-    def scan_plain(self):
-        # See the specification for details.
-        # We add an additional restriction for the flow context:
-        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
-        # We also keep track of the `allow_simple_key` flag here.
-        # Indentation rules are loosed for the flow context.
-        chunks = []
-        start_mark = self.get_mark()
-        end_mark = start_mark
-        indent = self.indent+1
-        # We allow zero indentation for scalars, but then we need to check for
-        # document separators at the beginning of the line.
-        #if indent == 0:
-        #    indent = 1
-        spaces = []
-        while True:
-            length = 0
-            if self.peek() == '#':
-                break
-            while True:
-                ch = self.peek(length)
-                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
-                        or (not self.flow_level and ch == ':' and
-                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
-                        or (self.flow_level and ch in ',:?[]{}'):
-                    break
-                length += 1
-            # It's not clear what we should do with ':' in the flow context.
-            if (self.flow_level and ch == ':'
-                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
-                self.forward(length)
-                raise ScannerError("while scanning a plain scalar", start_mark,
-                    "found unexpected ':'", self.get_mark(),
-                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
-            if length == 0:
-                break
-            self.allow_simple_key = False
-            chunks.extend(spaces)
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            end_mark = self.get_mark()
-            spaces = self.scan_plain_spaces(indent, start_mark)
-            if not spaces or self.peek() == '#' \
-                    or (not self.flow_level and self.column < indent):
-                break
-        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
-    def scan_plain_spaces(self, indent, start_mark):
-        # See the specification for details.
-        # The specification is really confusing about tabs in plain scalars.
-        # We just forbid them completely. Do not use tabs in YAML!
-        chunks = []
-        length = 0
-        while self.peek(length) in ' ':
-            length += 1
-        whitespaces = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch in '\r\n\x85\u2028\u2029':
-            line_break = self.scan_line_break()
-            self.allow_simple_key = True
-            prefix = self.prefix(3)
-            if (prefix == '---' or prefix == '...')   \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return
-            breaks = []
-            while self.peek() in ' \r\n\x85\u2028\u2029':
-                if self.peek() == ' ':
-                    self.forward()
-                else:
-                    breaks.append(self.scan_line_break())
-                    prefix = self.prefix(3)
-                    if (prefix == '---' or prefix == '...')   \
-                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                        return
-            if line_break != '\n':
-                chunks.append(line_break)
-            elif not breaks:
-                chunks.append(' ')
-            chunks.extend(breaks)
-        elif whitespaces:
-            chunks.append(whitespaces)
-        return chunks
-
-    def scan_tag_handle(self, name, start_mark):
-        # See the specification for details.
-        # For some strange reasons, the specification does not allow '_' in
-        # tag handles. I have allowed it anyway.
-        ch = self.peek()
-        if ch != '!':
-            raise ScannerError("while scanning a %s" % name, start_mark,
-                    "expected '!', but found %r" % ch, self.get_mark())
-        length = 1
-        ch = self.peek(length)
-        if ch != ' ':
-            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                    or ch in '-_':
-                length += 1
-                ch = self.peek(length)
-            if ch != '!':
-                self.forward(length)
-                raise ScannerError("while scanning a %s" % name, start_mark,
-                        "expected '!', but found %r" % ch, self.get_mark())
-            length += 1
-        value = self.prefix(length)
-        self.forward(length)
-        return value
-
-    def scan_tag_uri(self, name, start_mark):
-        # See the specification for details.
-        # Note: we do not check if URI is well-formed.
-        chunks = []
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
-            if ch == '%':
-                chunks.append(self.prefix(length))
-                self.forward(length)
-                length = 0
-                chunks.append(self.scan_uri_escapes(name, start_mark))
-            else:
-                length += 1
-            ch = self.peek(length)
-        if length:
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            length = 0
-        if not chunks:
-            raise ScannerError("while parsing a %s" % name, start_mark,
-                    "expected URI, but found %r" % ch, self.get_mark())
-        return ''.join(chunks)
-
-    def scan_uri_escapes(self, name, start_mark):
-        # See the specification for details.
-        codes = []
-        mark = self.get_mark()
-        while self.peek() == '%':
-            self.forward()
-            for k in range(2):
-                if self.peek(k) not in '0123456789ABCDEFabcdef':
-                    raise ScannerError("while scanning a %s" % name, start_mark,
-                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
-                            % self.peek(k), self.get_mark())
-            codes.append(int(self.prefix(2), 16))
-            self.forward(2)
-        try:
-            value = bytes(codes).decode('utf-8')
-        except UnicodeDecodeError as exc:
-            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
-        return value
-
-    def scan_line_break(self):
-        # Transforms:
-        #   '\r\n'      :   '\n'
-        #   '\r'        :   '\n'
-        #   '\n'        :   '\n'
-        #   '\x85'      :   '\n'
-        #   '\u2028'    :   '\u2028'
-        #   '\u2029     :   '\u2029'
-        #   default     :   ''
-        ch = self.peek()
-        if ch in '\r\n\x85':
-            if self.prefix(2) == '\r\n':
-                self.forward(2)
-            else:
-                self.forward()
-            return '\n'
-        elif ch in '\u2028\u2029':
-            self.forward()
-            return ch
-        return ''
-
-#try:
-#    import psyco
-#    psyco.bind(Scanner)
-#except ImportError:
-#    pass
-
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+    pass
+
+class SimpleKey:
+    # See below simple keys treatment.
+
+    def __init__(self, token_number, required, index, line, column, mark):
+        self.token_number = token_number
+        self.required = required
+        self.index = index
+        self.line = line
+        self.column = column
+        self.mark = mark
+
+class Scanner:
+
+    def __init__(self):
+        """Initialize the scanner."""
+        # It is assumed that Scanner and Reader will have a common descendant.
+        # Reader do the dirty work of checking for BOM and converting the
+        # input data to Unicode. It also adds NUL to the end.
+        #
+        # Reader supports the following methods
+        #   self.peek(i=0)       # peek the next i-th character
+        #   self.prefix(l=1)     # peek the next l characters
+        #   self.forward(l=1)    # read the next l characters and move the pointer.
+
+        # Had we reached the end of the stream?
+        self.done = False
+
+        # The number of unclosed '{' and '['. `flow_level == 0` means block
+        # context.
+        self.flow_level = 0
+
+        # List of processed tokens that are not yet emitted.
+        self.tokens = []
+
+        # Add the STREAM-START token.
+        self.fetch_stream_start()
+
+        # Number of tokens that were emitted through the `get_token` method.
+        self.tokens_taken = 0
+
+        # The current indentation level.
+        self.indent = -1
+
+        # Past indentation levels.
+        self.indents = []
+
+        # Variables related to simple keys treatment.
+
+        # A simple key is a key that is not denoted by the '?' indicator.
+        # Example of simple keys:
+        #   ---
+        #   block simple key: value
+        #   ? not a simple key:
+        #   : { flow simple key: value }
+        # We emit the KEY token before all keys, so when we find a potential
+        # simple key, we try to locate the corresponding ':' indicator.
+        # Simple keys should be limited to a single line and 1024 characters.
+
+        # Can a simple key start at the current position? A simple key may
+        # start:
+        # - at the beginning of the line, not counting indentation spaces
+        #       (in block context),
+        # - after '{', '[', ',' (in the flow context),
+        # - after '?', ':', '-' (in the block context).
+        # In the block context, this flag also signifies if a block collection
+        # may start at the current position.
+        self.allow_simple_key = True
+
+        # Keep track of possible simple keys. This is a dictionary. The key
+        # is `flow_level`; there can be no more that one possible simple key
+        # for each level. The value is a SimpleKey record:
+        #   (token_number, required, index, line, column, mark)
+        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+        # '[', or '{' tokens.
+        self.possible_simple_keys = {}
+
+    # Public methods.
+
+    def check_token(self, *choices):
+        # Check if the next token is one of the given types.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.tokens[0], choice):
+                    return True
+        return False
+
+    def peek_token(self):
+        # Return the next token, but do not delete if from the queue.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            return self.tokens[0]
+
+    def get_token(self):
+        # Return the next token.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            self.tokens_taken += 1
+            return self.tokens.pop(0)
+
+    # Private methods.
+
+    def need_more_tokens(self):
+        if self.done:
+            return False
+        if not self.tokens:
+            return True
+        # The current token may be a potential simple key, so we
+        # need to look further.
+        self.stale_possible_simple_keys()
+        if self.next_possible_simple_key() == self.tokens_taken:
+            return True
+
+    def fetch_more_tokens(self):
+
+        # Eat whitespaces and comments until we reach the next token.
+        self.scan_to_next_token()
+
+        # Remove obsolete possible simple keys.
+        self.stale_possible_simple_keys()
+
+        # Compare the current indentation and column. It may add some tokens
+        # and decrease the current indentation level.
+        self.unwind_indent(self.column)
+
+        # Peek the next character.
+        ch = self.peek()
+
+        # Is it the end of stream?
+        if ch == '\0':
+            return self.fetch_stream_end()
+
+        # Is it a directive?
+        if ch == '%' and self.check_directive():
+            return self.fetch_directive()
+
+        # Is it the document start?
+        if ch == '-' and self.check_document_start():
+            return self.fetch_document_start()
+
+        # Is it the document end?
+        if ch == '.' and self.check_document_end():
+            return self.fetch_document_end()
+
+        # TODO: support for BOM within a stream.
+        #if ch == '\uFEFF':
+        #    return self.fetch_bom()    <-- issue BOMToken
+
+        # Note: the order of the following checks is NOT significant.
+
+        # Is it the flow sequence start indicator?
+        if ch == '[':
+            return self.fetch_flow_sequence_start()
+
+        # Is it the flow mapping start indicator?
+        if ch == '{':
+            return self.fetch_flow_mapping_start()
+
+        # Is it the flow sequence end indicator?
+        if ch == ']':
+            return self.fetch_flow_sequence_end()
+
+        # Is it the flow mapping end indicator?
+        if ch == '}':
+            return self.fetch_flow_mapping_end()
+
+        # Is it the flow entry indicator?
+        if ch == ',':
+            return self.fetch_flow_entry()
+
+        # Is it the block entry indicator?
+        if ch == '-' and self.check_block_entry():
+            return self.fetch_block_entry()
+
+        # Is it the key indicator?
+        if ch == '?' and self.check_key():
+            return self.fetch_key()
+
+        # Is it the value indicator?
+        if ch == ':' and self.check_value():
+            return self.fetch_value()
+
+        # Is it an alias?
+        if ch == '*':
+            return self.fetch_alias()
+
+        # Is it an anchor?
+        if ch == '&':
+            return self.fetch_anchor()
+
+        # Is it a tag?
+        if ch == '!':
+            return self.fetch_tag()
+
+        # Is it a literal scalar?
+        if ch == '|' and not self.flow_level:
+            return self.fetch_literal()
+
+        # Is it a folded scalar?
+        if ch == '>' and not self.flow_level:
+            return self.fetch_folded()
+
+        # Is it a single quoted scalar?
+        if ch == '\'':
+            return self.fetch_single()
+
+        # Is it a double quoted scalar?
+        if ch == '\"':
+            return self.fetch_double()
+
+        # It must be a plain scalar then.
+        if self.check_plain():
+            return self.fetch_plain()
+
+        # No? It's an error. Let's produce a nice error message.
+        raise ScannerError("while scanning for the next token", None,
+                "found character %r that cannot start any token" % ch,
+                self.get_mark())
+
+    # Simple keys treatment.
+
+    def next_possible_simple_key(self):
+        # Return the number of the nearest possible simple key. Actually we
+        # don't need to loop through the whole dictionary. We may replace it
+        # with the following code:
+        #   if not self.possible_simple_keys:
+        #       return None
+        #   return self.possible_simple_keys[
+        #           min(self.possible_simple_keys.keys())].token_number
+        min_token_number = None
+        for level in self.possible_simple_keys:
+            key = self.possible_simple_keys[level]
+            if min_token_number is None or key.token_number < min_token_number:
+                min_token_number = key.token_number
+        return min_token_number
+
+    def stale_possible_simple_keys(self):
+        # Remove entries that are no longer possible simple keys. According to
+        # the YAML specification, simple keys
+        # - should be limited to a single line,
+        # - should be no longer than 1024 characters.
+        # Disabling this procedure will allow simple keys of any length and
+        # height (may cause problems if indentation is broken though).
+        for level in list(self.possible_simple_keys):
+            key = self.possible_simple_keys[level]
+            if key.line != self.line  \
+                    or self.index-key.index > 1024:
+                if key.required:
+                    raise ScannerError("while scanning a simple key", key.mark,
+                            "could not find expected ':'", self.get_mark())
+                del self.possible_simple_keys[level]
+
+    def save_possible_simple_key(self):
+        # The next token may start a simple key. We check if it's possible
+        # and save its position. This function is called for
+        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+        # Check if a simple key is required at the current position.
+        required = not self.flow_level and self.indent == self.column
+
+        # The next token might be a simple key. Let's save it's number and
+        # position.
+        if self.allow_simple_key:
+            self.remove_possible_simple_key()
+            token_number = self.tokens_taken+len(self.tokens)
+            key = SimpleKey(token_number, required,
+                    self.index, self.line, self.column, self.get_mark())
+            self.possible_simple_keys[self.flow_level] = key
+
+    def remove_possible_simple_key(self):
+        # Remove the saved possible key position at the current flow level.
+        if self.flow_level in self.possible_simple_keys:
+            key = self.possible_simple_keys[self.flow_level]
+            
+            if key.required:
+                raise ScannerError("while scanning a simple key", key.mark,
+                        "could not find expected ':'", self.get_mark())
+
+            del self.possible_simple_keys[self.flow_level]
+
+    # Indentation functions.
+
+    def unwind_indent(self, column):
+
+        ## In flow context, tokens should respect indentation.
+        ## Actually the condition should be `self.indent >= column` according to
+        ## the spec. But this condition will prohibit intuitively correct
+        ## constructions such as
+        ## key : {
+        ## }
+        #if self.flow_level and self.indent > column:
+        #    raise ScannerError(None, None,
+        #            "invalid intendation or unclosed '[' or '{'",
+        #            self.get_mark())
+
+        # In the flow context, indentation is ignored. We make the scanner less
+        # restrictive then specification requires.
+        if self.flow_level:
+            return
+
+        # In block context, we may need to issue the BLOCK-END tokens.
+        while self.indent > column:
+            mark = self.get_mark()
+            self.indent = self.indents.pop()
+            self.tokens.append(BlockEndToken(mark, mark))
+
+    def add_indent(self, column):
+        # Check if we need to increase indentation.
+        if self.indent < column:
+            self.indents.append(self.indent)
+            self.indent = column
+            return True
+        return False
+
+    # Fetchers.
+
+    def fetch_stream_start(self):
+        # We always add STREAM-START as the first token and STREAM-END as the
+        # last token.
+
+        # Read the token.
+        mark = self.get_mark()
+        
+        # Add STREAM-START.
+        self.tokens.append(StreamStartToken(mark, mark,
+            encoding=self.encoding))
+        
+
+    def fetch_stream_end(self):
+
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+        self.possible_simple_keys = {}
+
+        # Read the token.
+        mark = self.get_mark()
+        
+        # Add STREAM-END.
+        self.tokens.append(StreamEndToken(mark, mark))
+
+        # The steam is finished.
+        self.done = True
+
+    def fetch_directive(self):
+        
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Scan and add DIRECTIVE.
+        self.tokens.append(self.scan_directive())
+
+    def fetch_document_start(self):
+        self.fetch_document_indicator(DocumentStartToken)
+
+    def fetch_document_end(self):
+        self.fetch_document_indicator(DocumentEndToken)
+
+    def fetch_document_indicator(self, TokenClass):
+
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys. Note that there could not be a block collection
+        # after '---'.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Add DOCUMENT-START or DOCUMENT-END.
+        start_mark = self.get_mark()
+        self.forward(3)
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_start(self):
+        self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+    def fetch_flow_mapping_start(self):
+        self.fetch_flow_collection_start(FlowMappingStartToken)
+
+    def fetch_flow_collection_start(self, TokenClass):
+
+        # '[' and '{' may start a simple key.
+        self.save_possible_simple_key()
+
+        # Increase the flow level.
+        self.flow_level += 1
+
+        # Simple keys are allowed after '[' and '{'.
+        self.allow_simple_key = True
+
+        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_end(self):
+        self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+    def fetch_flow_mapping_end(self):
+        self.fetch_flow_collection_end(FlowMappingEndToken)
+
+    def fetch_flow_collection_end(self, TokenClass):
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Decrease the flow level.
+        self.flow_level -= 1
+
+        # No simple keys after ']' or '}'.
+        self.allow_simple_key = False
+
+        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_entry(self):
+
+        # Simple keys are allowed after ','.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add FLOW-ENTRY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+    def fetch_block_entry(self):
+
+        # Block context needs additional checks.
+        if not self.flow_level:
+
+            # Are we allowed to start a new entry?
+            if not self.allow_simple_key:
+                raise ScannerError(None, None,
+                        "sequence entries are not allowed here",
+                        self.get_mark())
+
+            # We may need to add BLOCK-SEQUENCE-START.
+            if self.add_indent(self.column):
+                mark = self.get_mark()
+                self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+        # It's an error for the block entry to occur in the flow context,
+        # but we let the parser detect this.
+        else:
+            pass
+
+        # Simple keys are allowed after '-'.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add BLOCK-ENTRY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+    def fetch_key(self):
+        
+        # Block context needs additional checks.
+        if not self.flow_level:
+
+            # Are we allowed to start a key (not nessesary a simple)?
+            if not self.allow_simple_key:
+                raise ScannerError(None, None,
+                        "mapping keys are not allowed here",
+                        self.get_mark())
+
+            # We may need to add BLOCK-MAPPING-START.
+            if self.add_indent(self.column):
+                mark = self.get_mark()
+                self.tokens.append(BlockMappingStartToken(mark, mark))
+
+        # Simple keys are allowed after '?' in the block context.
+        self.allow_simple_key = not self.flow_level
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add KEY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(KeyToken(start_mark, end_mark))
+
+    def fetch_value(self):
+
+        # Do we determine a simple key?
+        if self.flow_level in self.possible_simple_keys:
+
+            # Add KEY.
+            key = self.possible_simple_keys[self.flow_level]
+            del self.possible_simple_keys[self.flow_level]
+            self.tokens.insert(key.token_number-self.tokens_taken,
+                    KeyToken(key.mark, key.mark))
+
+            # If this key starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.
+            if not self.flow_level:
+                if self.add_indent(key.column):
+                    self.tokens.insert(key.token_number-self.tokens_taken,
+                            BlockMappingStartToken(key.mark, key.mark))
+
+            # There cannot be two simple keys one after another.
+            self.allow_simple_key = False
+
+        # It must be a part of a complex key.
+        else:
+            
+            # Block context needs additional checks.
+            # (Do we really need them? They will be catched by the parser
+            # anyway.)
+            if not self.flow_level:
+
+                # We are allowed to start a complex value if and only if
+                # we can start a simple key.
+                if not self.allow_simple_key:
+                    raise ScannerError(None, None,
+                            "mapping values are not allowed here",
+                            self.get_mark())
+
+            # If this value starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.  It will be detected as an error later by
+            # the parser.
+            if not self.flow_level:
+                if self.add_indent(self.column):
+                    mark = self.get_mark()
+                    self.tokens.append(BlockMappingStartToken(mark, mark))
+
+            # Simple keys are allowed after ':' in the block context.
+            self.allow_simple_key = not self.flow_level
+
+            # Reset possible simple key on the current level.
+            self.remove_possible_simple_key()
+
+        # Add VALUE.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(ValueToken(start_mark, end_mark))
+
+    def fetch_alias(self):
+
+        # ALIAS could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after ALIAS.
+        self.allow_simple_key = False
+
+        # Scan and add ALIAS.
+        self.tokens.append(self.scan_anchor(AliasToken))
+
+    def fetch_anchor(self):
+
+        # ANCHOR could start a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after ANCHOR.
+        self.allow_simple_key = False
+
+        # Scan and add ANCHOR.
+        self.tokens.append(self.scan_anchor(AnchorToken))
+
+    def fetch_tag(self):
+
+        # TAG could start a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after TAG.
+        self.allow_simple_key = False
+
+        # Scan and add TAG.
+        self.tokens.append(self.scan_tag())
+
+    def fetch_literal(self):
+        self.fetch_block_scalar(style='|')
+
+    def fetch_folded(self):
+        self.fetch_block_scalar(style='>')
+
+    def fetch_block_scalar(self, style):
+
+        # A simple key may follow a block scalar.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_block_scalar(style))
+
+    def fetch_single(self):
+        self.fetch_flow_scalar(style='\'')
+
+    def fetch_double(self):
+        self.fetch_flow_scalar(style='"')
+
+    def fetch_flow_scalar(self, style):
+
+        # A flow scalar could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after flow scalars.
+        self.allow_simple_key = False
+
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_flow_scalar(style))
+
+    def fetch_plain(self):
+
+        # A plain scalar could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after plain scalars. But note that `scan_plain` will
+        # change this flag if the scan is finished at the beginning of the
+        # line.
+        self.allow_simple_key = False
+
+        # Scan and add SCALAR. May change `allow_simple_key`.
+        self.tokens.append(self.scan_plain())
+
+    # Checkers.
+
+    def check_directive(self):
+
+        # DIRECTIVE:        ^ '%' ...
+        # The '%' indicator is already checked.
+        if self.column == 0:
+            return True
+
+    def check_document_start(self):
+
+        # DOCUMENT-START:   ^ '---' (' '|'\n')
+        if self.column == 0:
+            if self.prefix(3) == '---'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return True
+
+    def check_document_end(self):
+
+        # DOCUMENT-END:     ^ '...' (' '|'\n')
+        if self.column == 0:
+            if self.prefix(3) == '...'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return True
+
+    def check_block_entry(self):
+
+        # BLOCK-ENTRY:      '-' (' '|'\n')
+        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_key(self):
+
+        # KEY(flow context):    '?'
+        if self.flow_level:
+            return True
+
+        # KEY(block context):   '?' (' '|'\n')
+        else:
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_value(self):
+
+        # VALUE(flow context):  ':'
+        if self.flow_level:
+            return True
+
+        # VALUE(block context): ':' (' '|'\n')
+        else:
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_plain(self):
+
+        # A plain scalar may start with any non-space character except:
+        #   '-', '?', ':', ',', '[', ']', '{', '}',
+        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
+        #   '%', '@', '`'.
+        #
+        # It may also start with
+        #   '-', '?', ':'
+        # if it is followed by a non-space character.
+        #
+        # Note that we limit the last rule to the block context (except the
+        # '-' character) because we want the flow context to be space
+        # independent.
+        ch = self.peek()
+        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
+                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+                        and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+    # Scanners.
+
+    def scan_to_next_token(self):
+        # We ignore spaces, line breaks and comments.
+        # If we find a line break in the block context, we set the flag
+        # `allow_simple_key` on.
+        # The byte order mark is stripped if it's the first character in the
+        # stream. We do not yet support BOM inside the stream as the
+        # specification requires. Any such mark will be considered as a part
+        # of the document.
+        #
+        # TODO: We need to make tab handling rules more sane. A good rule is
+        #   Tabs cannot precede tokens
+        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+        #   KEY(block), VALUE(block), BLOCK-ENTRY
+        # So the checking code is
+        #   if <TAB>:
+        #       self.allow_simple_keys = False
+        # We also need to add the check for `allow_simple_keys == True` to
+        # `unwind_indent` before issuing BLOCK-END.
+        # Scanners for block, flow, and plain scalars need to be modified.
+
+        if self.index == 0 and self.peek() == '\uFEFF':
+            self.forward()
+        found = False
+        while not found:
+            while self.peek() == ' ':
+                self.forward()
+            if self.peek() == '#':
+                while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                    self.forward()
+            if self.scan_line_break():
+                if not self.flow_level:
+                    self.allow_simple_key = True
+            else:
+                found = True
+
+    def scan_directive(self):
+        # See the specification for details.
+        start_mark = self.get_mark()
+        self.forward()
+        name = self.scan_directive_name(start_mark)
+        value = None
+        if name == 'YAML':
+            value = self.scan_yaml_directive_value(start_mark)
+            end_mark = self.get_mark()
+        elif name == 'TAG':
+            value = self.scan_tag_directive_value(start_mark)
+            end_mark = self.get_mark()
+        else:
+            end_mark = self.get_mark()
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        self.scan_directive_ignored_line(start_mark)
+        return DirectiveToken(name, value, start_mark, end_mark)
+
+    def scan_directive_name(self, start_mark):
+        # See the specification for details.
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
+            length += 1
+            ch = self.peek(length)
+        if not length:
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        value = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        return value
+
+    def scan_yaml_directive_value(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        major = self.scan_yaml_directive_number(start_mark)
+        if self.peek() != '.':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit or '.', but found %r" % self.peek(),
+                    self.get_mark())
+        self.forward()
+        minor = self.scan_yaml_directive_number(start_mark)
+        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit or ' ', but found %r" % self.peek(),
+                    self.get_mark())
+        return (major, minor)
+
+    def scan_yaml_directive_number(self, start_mark):
+        # See the specification for details.
+        ch = self.peek()
+        if not ('0' <= ch <= '9'):
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit, but found %r" % ch, self.get_mark())
+        length = 0
+        while '0' <= self.peek(length) <= '9':
+            length += 1
+        value = int(self.prefix(length))
+        self.forward(length)
+        return value
+
+    def scan_tag_directive_value(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        handle = self.scan_tag_directive_handle(start_mark)
+        while self.peek() == ' ':
+            self.forward()
+        prefix = self.scan_tag_directive_prefix(start_mark)
+        return (handle, prefix)
+
+    def scan_tag_directive_handle(self, start_mark):
+        # See the specification for details.
+        value = self.scan_tag_handle('directive', start_mark)
+        ch = self.peek()
+        if ch != ' ':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        return value
+
+    def scan_tag_directive_prefix(self, start_mark):
+        # See the specification for details.
+        value = self.scan_tag_uri('directive', start_mark)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        return value
+
+    def scan_directive_ignored_line(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0\r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a comment or a line break, but found %r"
+                        % ch, self.get_mark())
+        self.scan_line_break()
+
+    def scan_anchor(self, TokenClass):
+        # The specification does not restrict characters for anchors and
+        # aliases. This may lead to problems, for instance, the document:
+        #   [ *alias, value ]
+        # can be interpteted in two ways, as
+        #   [ "value" ]
+        # and
+        #   [ *alias , "value" ]
+        # Therefore we restrict aliases to numbers and ASCII letters.
+        start_mark = self.get_mark()
+        indicator = self.peek()
+        if indicator == '*':
+            name = 'alias'
+        else:
+            name = 'anchor'
+        self.forward()
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
+            length += 1
+            ch = self.peek(length)
+        if not length:
+            raise ScannerError("while scanning an %s" % name, start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        value = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+            raise ScannerError("while scanning an %s" % name, start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        end_mark = self.get_mark()
+        return TokenClass(value, start_mark, end_mark)
+
+    def scan_tag(self):
+        # See the specification for details.
+        start_mark = self.get_mark()
+        ch = self.peek(1)
+        if ch == '<':
+            handle = None
+            self.forward(2)
+            suffix = self.scan_tag_uri('tag', start_mark)
+            if self.peek() != '>':
+                raise ScannerError("while parsing a tag", start_mark,
+                        "expected '>', but found %r" % self.peek(),
+                        self.get_mark())
+            self.forward()
+        elif ch in '\0 \t\r\n\x85\u2028\u2029':
+            handle = None
+            suffix = '!'
+            self.forward()
+        else:
+            length = 1
+            use_handle = False
+            while ch not in '\0 \r\n\x85\u2028\u2029':
+                if ch == '!':
+                    use_handle = True
+                    break
+                length += 1
+                ch = self.peek(length)
+            handle = '!'
+            if use_handle:
+                handle = self.scan_tag_handle('tag', start_mark)
+            else:
+                handle = '!'
+                self.forward()
+            suffix = self.scan_tag_uri('tag', start_mark)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a tag", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        value = (handle, suffix)
+        end_mark = self.get_mark()
+        return TagToken(value, start_mark, end_mark)
+
+    def scan_block_scalar(self, style):
+        # See the specification for details.
+
+        if style == '>':
+            folded = True
+        else:
+            folded = False
+
+        chunks = []
+        start_mark = self.get_mark()
+
+        # Scan the header.
+        self.forward()
+        chomping, increment = self.scan_block_scalar_indicators(start_mark)
+        self.scan_block_scalar_ignored_line(start_mark)
+
+        # Determine the indentation level and go to the first non-empty line.
+        min_indent = self.indent+1
+        if min_indent < 1:
+            min_indent = 1
+        if increment is None:
+            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+            indent = max(min_indent, max_indent)
+        else:
+            indent = min_indent+increment-1
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+        line_break = ''
+
+        # Scan the inner part of the block scalar.
+        while self.column == indent and self.peek() != '\0':
+            chunks.extend(breaks)
+            leading_non_space = self.peek() not in ' \t'
+            length = 0
+            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+                length += 1
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            line_break = self.scan_line_break()
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+            if self.column == indent and self.peek() != '\0':
+
+                # Unfortunately, folding rules are ambiguous.
+                #
+                # This is the folding according to the specification:
+                
+                if folded and line_break == '\n'    \
+                        and leading_non_space and self.peek() not in ' \t':
+                    if not breaks:
+                        chunks.append(' ')
+                else:
+                    chunks.append(line_break)
+                
+                # This is Clark Evans's interpretation (also in the spec
+                # examples):
+                #
+                #if folded and line_break == '\n':
+                #    if not breaks:
+                #        if self.peek() not in ' \t':
+                #            chunks.append(' ')
+                #        else:
+                #            chunks.append(line_break)
+                #else:
+                #    chunks.append(line_break)
+            else:
+                break
+
+        # Chomp the tail.
+        if chomping is not False:
+            chunks.append(line_break)
+        if chomping is True:
+            chunks.extend(breaks)
+
+        # We are done.
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+                style)
+
+    def scan_block_scalar_indicators(self, start_mark):
+        # See the specification for details.
+        chomping = None
+        increment = None
+        ch = self.peek()
+        if ch in '+-':
+            if ch == '+':
+                chomping = True
+            else:
+                chomping = False
+            self.forward()
+            ch = self.peek()
+            if ch in '0123456789':
+                increment = int(ch)
+                if increment == 0:
+                    raise ScannerError("while scanning a block scalar", start_mark,
+                            "expected indentation indicator in the range 1-9, but found 0",
+                            self.get_mark())
+                self.forward()
+        elif ch in '0123456789':
+            increment = int(ch)
+            if increment == 0:
+                raise ScannerError("while scanning a block scalar", start_mark,
+                        "expected indentation indicator in the range 1-9, but found 0",
+                        self.get_mark())
+            self.forward()
+            ch = self.peek()
+            if ch in '+-':
+                if ch == '+':
+                    chomping = True
+                else:
+                    chomping = False
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a block scalar", start_mark,
+                    "expected chomping or indentation indicators, but found %r"
+                    % ch, self.get_mark())
+        return chomping, increment
+
+    def scan_block_scalar_ignored_line(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0\r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a block scalar", start_mark,
+                    "expected a comment or a line break, but found %r" % ch,
+                    self.get_mark())
+        self.scan_line_break()
+
+    def scan_block_scalar_indentation(self):
+        # See the specification for details.
+        chunks = []
+        max_indent = 0
+        end_mark = self.get_mark()
+        while self.peek() in ' \r\n\x85\u2028\u2029':
+            if self.peek() != ' ':
+                chunks.append(self.scan_line_break())
+                end_mark = self.get_mark()
+            else:
+                self.forward()
+                if self.column > max_indent:
+                    max_indent = self.column
+        return chunks, max_indent, end_mark
+
+    def scan_block_scalar_breaks(self, indent):
+        # See the specification for details.
+        chunks = []
+        end_mark = self.get_mark()
+        while self.column < indent and self.peek() == ' ':
+            self.forward()
+        while self.peek() in '\r\n\x85\u2028\u2029':
+            chunks.append(self.scan_line_break())
+            end_mark = self.get_mark()
+            while self.column < indent and self.peek() == ' ':
+                self.forward()
+        return chunks, end_mark
+
+    def scan_flow_scalar(self, style):
+        # See the specification for details.
+        # Note that we loose indentation rules for quoted scalars. Quoted
+        # scalars don't need to adhere indentation because " and ' clearly
+        # mark the beginning and the end of them. Therefore we are less
+        # restrictive then the specification requires. We only need to check
+        # that document separators are not included in scalars.
+        if style == '"':
+            double = True
+        else:
+            double = False
+        chunks = []
+        start_mark = self.get_mark()
+        quote = self.peek()
+        self.forward()
+        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        while self.peek() != quote:
+            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        self.forward()
+        end_mark = self.get_mark()
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+                style)
+
+    ESCAPE_REPLACEMENTS = {
+        '0':    '\0',
+        'a':    '\x07',
+        'b':    '\x08',
+        't':    '\x09',
+        '\t':   '\x09',
+        'n':    '\x0A',
+        'v':    '\x0B',
+        'f':    '\x0C',
+        'r':    '\x0D',
+        'e':    '\x1B',
+        ' ':    '\x20',
+        '\"':   '\"',
+        '\\':   '\\',
+        'N':    '\x85',
+        '_':    '\xA0',
+        'L':    '\u2028',
+        'P':    '\u2029',
+    }
+
+    ESCAPE_CODES = {
+        'x':    2,
+        'u':    4,
+        'U':    8,
+    }
+
+    def scan_flow_scalar_non_spaces(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        while True:
+            length = 0
+            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+                length += 1
+            if length:
+                chunks.append(self.prefix(length))
+                self.forward(length)
+            ch = self.peek()
+            if not double and ch == '\'' and self.peek(1) == '\'':
+                chunks.append('\'')
+                self.forward(2)
+            elif (double and ch == '\'') or (not double and ch in '\"\\'):
+                chunks.append(ch)
+                self.forward()
+            elif double and ch == '\\':
+                self.forward()
+                ch = self.peek()
+                if ch in self.ESCAPE_REPLACEMENTS:
+                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+                    self.forward()
+                elif ch in self.ESCAPE_CODES:
+                    length = self.ESCAPE_CODES[ch]
+                    self.forward()
+                    for k in range(length):
+                        if self.peek(k) not in '0123456789ABCDEFabcdef':
+                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
+                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
+                                        (length, self.peek(k)), self.get_mark())
+                    code = int(self.prefix(length), 16)
+                    chunks.append(chr(code))
+                    self.forward(length)
+                elif ch in '\r\n\x85\u2028\u2029':
+                    self.scan_line_break()
+                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+                else:
+                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
+                            "found unknown escape character %r" % ch, self.get_mark())
+            else:
+                return chunks
+
+    def scan_flow_scalar_spaces(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        length = 0
+        while self.peek(length) in ' \t':
+            length += 1
+        whitespaces = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch == '\0':
+            raise ScannerError("while scanning a quoted scalar", start_mark,
+                    "found unexpected end of stream", self.get_mark())
+        elif ch in '\r\n\x85\u2028\u2029':
+            line_break = self.scan_line_break()
+            breaks = self.scan_flow_scalar_breaks(double, start_mark)
+            if line_break != '\n':
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(' ')
+            chunks.extend(breaks)
+        else:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_flow_scalar_breaks(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        while True:
+            # Instead of checking indentation, we check for document
+            # separators.
+            prefix = self.prefix(3)
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                raise ScannerError("while scanning a quoted scalar", start_mark,
+                        "found unexpected document separator", self.get_mark())
+            while self.peek() in ' \t':
+                self.forward()
+            if self.peek() in '\r\n\x85\u2028\u2029':
+                chunks.append(self.scan_line_break())
+            else:
+                return chunks
+
+    def scan_plain(self):
+        # See the specification for details.
+        # We add an additional restriction for the flow context:
+        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
+        # We also keep track of the `allow_simple_key` flag here.
+        # Indentation rules are loosed for the flow context.
+        chunks = []
+        start_mark = self.get_mark()
+        end_mark = start_mark
+        indent = self.indent+1
+        # We allow zero indentation for scalars, but then we need to check for
+        # document separators at the beginning of the line.
+        #if indent == 0:
+        #    indent = 1
+        spaces = []
+        while True:
+            length = 0
+            if self.peek() == '#':
+                break
+            while True:
+                ch = self.peek(length)
+                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
+                        or (not self.flow_level and ch == ':' and
+                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+                        or (self.flow_level and ch in ',:?[]{}'):
+                    break
+                length += 1
+            # It's not clear what we should do with ':' in the flow context.
+            if (self.flow_level and ch == ':'
+                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+                self.forward(length)
+                raise ScannerError("while scanning a plain scalar", start_mark,
+                    "found unexpected ':'", self.get_mark(),
+                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+            if length == 0:
+                break
+            self.allow_simple_key = False
+            chunks.extend(spaces)
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            end_mark = self.get_mark()
+            spaces = self.scan_plain_spaces(indent, start_mark)
+            if not spaces or self.peek() == '#' \
+                    or (not self.flow_level and self.column < indent):
+                break
+        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+    def scan_plain_spaces(self, indent, start_mark):
+        # See the specification for details.
+        # The specification is really confusing about tabs in plain scalars.
+        # We just forbid them completely. Do not use tabs in YAML!
+        chunks = []
+        length = 0
+        while self.peek(length) in ' ':
+            length += 1
+        whitespaces = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch in '\r\n\x85\u2028\u2029':
+            line_break = self.scan_line_break()
+            self.allow_simple_key = True
+            prefix = self.prefix(3)
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return
+            breaks = []
+            while self.peek() in ' \r\n\x85\u2028\u2029':
+                if self.peek() == ' ':
+                    self.forward()
+                else:
+                    breaks.append(self.scan_line_break())
+                    prefix = self.prefix(3)
+                    if (prefix == '---' or prefix == '...')   \
+                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                        return
+            if line_break != '\n':
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(' ')
+            chunks.extend(breaks)
+        elif whitespaces:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_tag_handle(self, name, start_mark):
+        # See the specification for details.
+        # For some strange reasons, the specification does not allow '_' in
+        # tag handles. I have allowed it anyway.
+        ch = self.peek()
+        if ch != '!':
+            raise ScannerError("while scanning a %s" % name, start_mark,
+                    "expected '!', but found %r" % ch, self.get_mark())
+        length = 1
+        ch = self.peek(length)
+        if ch != ' ':
+            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                    or ch in '-_':
+                length += 1
+                ch = self.peek(length)
+            if ch != '!':
+                self.forward(length)
+                raise ScannerError("while scanning a %s" % name, start_mark,
+                        "expected '!', but found %r" % ch, self.get_mark())
+            length += 1
+        value = self.prefix(length)
+        self.forward(length)
+        return value
+
+    def scan_tag_uri(self, name, start_mark):
+        # See the specification for details.
+        # Note: we do not check if URI is well-formed.
+        chunks = []
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+            if ch == '%':
+                chunks.append(self.prefix(length))
+                self.forward(length)
+                length = 0
+                chunks.append(self.scan_uri_escapes(name, start_mark))
+            else:
+                length += 1
+            ch = self.peek(length)
+        if length:
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            length = 0
+        if not chunks:
+            raise ScannerError("while parsing a %s" % name, start_mark,
+                    "expected URI, but found %r" % ch, self.get_mark())
+        return ''.join(chunks)
+
+    def scan_uri_escapes(self, name, start_mark):
+        # See the specification for details.
+        codes = []
+        mark = self.get_mark()
+        while self.peek() == '%':
+            self.forward()
+            for k in range(2):
+                if self.peek(k) not in '0123456789ABCDEFabcdef':
+                    raise ScannerError("while scanning a %s" % name, start_mark,
+                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+                            % self.peek(k), self.get_mark())
+            codes.append(int(self.prefix(2), 16))
+            self.forward(2)
+        try:
+            value = bytes(codes).decode('utf-8')
+        except UnicodeDecodeError as exc:
+            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+        return value
+
+    def scan_line_break(self):
+        # Transforms:
+        #   '\r\n'      :   '\n'
+        #   '\r'        :   '\n'
+        #   '\n'        :   '\n'
+        #   '\x85'      :   '\n'
+        #   '\u2028'    :   '\u2028'
+        #   '\u2029     :   '\u2029'
+        #   default     :   ''
+        ch = self.peek()
+        if ch in '\r\n\x85':
+            if self.prefix(2) == '\r\n':
+                self.forward(2)
+            else:
+                self.forward()
+            return '\n'
+        elif ch in '\u2028\u2029':
+            self.forward()
+            return ch
+        return ''
+
+#try:
+#    import psyco
+#    psyco.bind(Scanner)
+#except ImportError:
+#    pass
+

+ 111 - 111
ext/yaml/serializer.py → mncheck/ext/yaml/serializer.py

@@ -1,111 +1,111 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
-    pass
-
-class Serializer:
-
-    ANCHOR_TEMPLATE = 'id%03d'
-
-    def __init__(self, encoding=None,
-            explicit_start=None, explicit_end=None, version=None, tags=None):
-        self.use_encoding = encoding
-        self.use_explicit_start = explicit_start
-        self.use_explicit_end = explicit_end
-        self.use_version = version
-        self.use_tags = tags
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-        self.closed = None
-
-    def open(self):
-        if self.closed is None:
-            self.emit(StreamStartEvent(encoding=self.use_encoding))
-            self.closed = False
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        else:
-            raise SerializerError("serializer is already opened")
-
-    def close(self):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif not self.closed:
-            self.emit(StreamEndEvent())
-            self.closed = True
-
-    #def __del__(self):
-    #    self.close()
-
-    def serialize(self, node):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
-            version=self.use_version, tags=self.use_tags))
-        self.anchor_node(node)
-        self.serialize_node(node, None, None)
-        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-
-    def anchor_node(self, node):
-        if node in self.anchors:
-            if self.anchors[node] is None:
-                self.anchors[node] = self.generate_anchor(node)
-        else:
-            self.anchors[node] = None
-            if isinstance(node, SequenceNode):
-                for item in node.value:
-                    self.anchor_node(item)
-            elif isinstance(node, MappingNode):
-                for key, value in node.value:
-                    self.anchor_node(key)
-                    self.anchor_node(value)
-
-    def generate_anchor(self, node):
-        self.last_anchor_id += 1
-        return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
-    def serialize_node(self, node, parent, index):
-        alias = self.anchors[node]
-        if node in self.serialized_nodes:
-            self.emit(AliasEvent(alias))
-        else:
-            self.serialized_nodes[node] = True
-            self.descend_resolver(parent, index)
-            if isinstance(node, ScalarNode):
-                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
-                default_tag = self.resolve(ScalarNode, node.value, (False, True))
-                implicit = (node.tag == detected_tag), (node.tag == default_tag)
-                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
-                    style=node.style))
-            elif isinstance(node, SequenceNode):
-                implicit = (node.tag
-                            == self.resolve(SequenceNode, node.value, True))
-                self.emit(SequenceStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                index = 0
-                for item in node.value:
-                    self.serialize_node(item, node, index)
-                    index += 1
-                self.emit(SequenceEndEvent())
-            elif isinstance(node, MappingNode):
-                implicit = (node.tag
-                            == self.resolve(MappingNode, node.value, True))
-                self.emit(MappingStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                for key, value in node.value:
-                    self.serialize_node(key, node, None)
-                    self.serialize_node(value, node, key)
-                self.emit(MappingEndEvent())
-            self.ascend_resolver()
-
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+    pass
+
+class Serializer:
+
+    ANCHOR_TEMPLATE = 'id%03d'
+
+    def __init__(self, encoding=None,
+            explicit_start=None, explicit_end=None, version=None, tags=None):
+        self.use_encoding = encoding
+        self.use_explicit_start = explicit_start
+        self.use_explicit_end = explicit_end
+        self.use_version = version
+        self.use_tags = tags
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+        self.closed = None
+
+    def open(self):
+        if self.closed is None:
+            self.emit(StreamStartEvent(encoding=self.use_encoding))
+            self.closed = False
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        else:
+            raise SerializerError("serializer is already opened")
+
+    def close(self):
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif not self.closed:
+            self.emit(StreamEndEvent())
+            self.closed = True
+
+    #def __del__(self):
+    #    self.close()
+
+    def serialize(self, node):
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+            version=self.use_version, tags=self.use_tags))
+        self.anchor_node(node)
+        self.serialize_node(node, None, None)
+        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+
+    def anchor_node(self, node):
+        if node in self.anchors:
+            if self.anchors[node] is None:
+                self.anchors[node] = self.generate_anchor(node)
+        else:
+            self.anchors[node] = None
+            if isinstance(node, SequenceNode):
+                for item in node.value:
+                    self.anchor_node(item)
+            elif isinstance(node, MappingNode):
+                for key, value in node.value:
+                    self.anchor_node(key)
+                    self.anchor_node(value)
+
+    def generate_anchor(self, node):
+        self.last_anchor_id += 1
+        return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+    def serialize_node(self, node, parent, index):
+        alias = self.anchors[node]
+        if node in self.serialized_nodes:
+            self.emit(AliasEvent(alias))
+        else:
+            self.serialized_nodes[node] = True
+            self.descend_resolver(parent, index)
+            if isinstance(node, ScalarNode):
+                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+                default_tag = self.resolve(ScalarNode, node.value, (False, True))
+                implicit = (node.tag == detected_tag), (node.tag == default_tag)
+                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+                    style=node.style))
+            elif isinstance(node, SequenceNode):
+                implicit = (node.tag
+                            == self.resolve(SequenceNode, node.value, True))
+                self.emit(SequenceStartEvent(alias, node.tag, implicit,
+                    flow_style=node.flow_style))
+                index = 0
+                for item in node.value:
+                    self.serialize_node(item, node, index)
+                    index += 1
+                self.emit(SequenceEndEvent())
+            elif isinstance(node, MappingNode):
+                implicit = (node.tag
+                            == self.resolve(MappingNode, node.value, True))
+                self.emit(MappingStartEvent(alias, node.tag, implicit,
+                    flow_style=node.flow_style))
+                for key, value in node.value:
+                    self.serialize_node(key, node, None)
+                    self.serialize_node(value, node, key)
+                self.emit(MappingEndEvent())
+            self.ascend_resolver()
+

+ 104 - 104
ext/yaml/tokens.py → mncheck/ext/yaml/tokens.py

@@ -1,104 +1,104 @@
-
-class Token(object):
-    def __init__(self, start_mark, end_mark):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        attributes = [key for key in self.__dict__
-                if not key.endswith('_mark')]
-        attributes.sort()
-        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
-        return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-#    id = '<byte order mark>'
-
-class DirectiveToken(Token):
-    id = '<directive>'
-    def __init__(self, name, value, start_mark, end_mark):
-        self.name = name
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class DocumentStartToken(Token):
-    id = '<document start>'
-
-class DocumentEndToken(Token):
-    id = '<document end>'
-
-class StreamStartToken(Token):
-    id = '<stream start>'
-    def __init__(self, start_mark=None, end_mark=None,
-            encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.encoding = encoding
-
-class StreamEndToken(Token):
-    id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
-    id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
-    id = '<block mapping start>'
-
-class BlockEndToken(Token):
-    id = '<block end>'
-
-class FlowSequenceStartToken(Token):
-    id = '['
-
-class FlowMappingStartToken(Token):
-    id = '{'
-
-class FlowSequenceEndToken(Token):
-    id = ']'
-
-class FlowMappingEndToken(Token):
-    id = '}'
-
-class KeyToken(Token):
-    id = '?'
-
-class ValueToken(Token):
-    id = ':'
-
-class BlockEntryToken(Token):
-    id = '-'
-
-class FlowEntryToken(Token):
-    id = ','
-
-class AliasToken(Token):
-    id = '<alias>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class AnchorToken(Token):
-    id = '<anchor>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class TagToken(Token):
-    id = '<tag>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class ScalarToken(Token):
-    id = '<scalar>'
-    def __init__(self, value, plain, start_mark, end_mark, style=None):
-        self.value = value
-        self.plain = plain
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
+
+class Token(object):
+    def __init__(self, start_mark, end_mark):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        attributes = [key for key in self.__dict__
+                if not key.endswith('_mark')]
+        attributes.sort()
+        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+                for key in attributes])
+        return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+#    id = '<byte order mark>'
+
+class DirectiveToken(Token):
+    id = '<directive>'
+    def __init__(self, name, value, start_mark, end_mark):
+        self.name = name
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+    id = '<document start>'
+
+class DocumentEndToken(Token):
+    id = '<document end>'
+
+class StreamStartToken(Token):
+    id = '<stream start>'
+    def __init__(self, start_mark=None, end_mark=None,
+            encoding=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.encoding = encoding
+
+class StreamEndToken(Token):
+    id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+    id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+    id = '<block mapping start>'
+
+class BlockEndToken(Token):
+    id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+    id = '['
+
+class FlowMappingStartToken(Token):
+    id = '{'
+
+class FlowSequenceEndToken(Token):
+    id = ']'
+
+class FlowMappingEndToken(Token):
+    id = '}'
+
+class KeyToken(Token):
+    id = '?'
+
+class ValueToken(Token):
+    id = ':'
+
+class BlockEntryToken(Token):
+    id = '-'
+
+class FlowEntryToken(Token):
+    id = ','
+
+class AliasToken(Token):
+    id = '<alias>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class AnchorToken(Token):
+    id = '<anchor>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class TagToken(Token):
+    id = '<tag>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class ScalarToken(Token):
+    id = '<scalar>'
+    def __init__(self, value, plain, start_mark, end_mark, style=None):
+        self.value = value
+        self.plain = plain
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+

+ 110 - 110
ext/zipp.py → mncheck/ext/zipp.py

@@ -1,110 +1,110 @@
-"""
->>> root = Path(getfixture('zipfile_abcde'))
->>> a, b = root.iterdir()
->>> a
-Path('abcde.zip', 'a.txt')
->>> b
-Path('abcde.zip', 'b/')
->>> b.name
-'b'
->>> c = b / 'c.txt'
->>> c
-Path('abcde.zip', 'b/c.txt')
->>> c.name
-'c.txt'
->>> c.read_text()
-'content of c'
->>> c.exists()
-True
->>> (b / 'missing.txt').exists()
-False
->>> str(c)
-'abcde.zip/b/c.txt'
-"""
-
-from __future__ import division
-
-import io
-import sys
-import posixpath
-import zipfile
-import operator
-import functools
-
-__metaclass__ = type
-
-
-class Path:
-    __repr = '{self.__class__.__name__}({self.root.filename!r}, {self.at!r})'
-
-    def __init__(self, root, at=''):
-        self.root = root if isinstance(root, zipfile.ZipFile) \
-            else zipfile.ZipFile(self._pathlib_compat(root))
-        self.at = at
-
-    @staticmethod
-    def _pathlib_compat(path):
-        """
-        For path-like objects, convert to a filename for compatibility
-        on Python 3.6.1 and earlier.
-        """
-        try:
-            return path.__fspath__()
-        except AttributeError:
-            return str(path)
-
-    @property
-    def open(self):
-        return functools.partial(self.root.open, self.at)
-
-    @property
-    def name(self):
-        return posixpath.basename(self.at.rstrip('/'))
-
-    def read_text(self, *args, **kwargs):
-        with self.open() as strm:
-            return io.TextIOWrapper(strm, *args, **kwargs).read()
-
-    def read_bytes(self):
-        with self.open() as strm:
-            return strm.read()
-
-    def _is_child(self, path):
-        return posixpath.dirname(path.at.rstrip('/')) == self.at.rstrip('/')
-
-    def _next(self, at):
-        return Path(self.root, at)
-
-    def is_dir(self):
-        return not self.at or self.at.endswith('/')
-
-    def is_file(self):
-        return not self.is_dir()
-
-    def exists(self):
-        return self.at in self.root.namelist()
-
-    def iterdir(self):
-        if not self.is_dir():
-            raise ValueError("Can't listdir a file")
-        names = map(operator.attrgetter('filename'), self.root.infolist())
-        subs = map(self._next, names)
-        return filter(self._is_child, subs)
-
-    def __str__(self):
-        return posixpath.join(self.root.filename, self.at)
-
-    def __repr__(self):
-        return self.__repr.format(self=self)
-
-    def __truediv__(self, add):
-        add = self._pathlib_compat(add)
-        next = posixpath.join(self.at, add)
-        next_dir = posixpath.join(self.at, add, '')
-        names = self.root.namelist()
-        return self._next(
-            next_dir if next not in names and next_dir in names else next
-        )
-
-    if sys.version_info < (3,):
-        __div__ = __truediv__
+"""
+>>> root = Path(getfixture('zipfile_abcde'))
+>>> a, b = root.iterdir()
+>>> a
+Path('abcde.zip', 'a.txt')
+>>> b
+Path('abcde.zip', 'b/')
+>>> b.name
+'b'
+>>> c = b / 'c.txt'
+>>> c
+Path('abcde.zip', 'b/c.txt')
+>>> c.name
+'c.txt'
+>>> c.read_text()
+'content of c'
+>>> c.exists()
+True
+>>> (b / 'missing.txt').exists()
+False
+>>> str(c)
+'abcde.zip/b/c.txt'
+"""
+
+from __future__ import division
+
+import io
+import sys
+import posixpath
+import zipfile
+import operator
+import functools
+
+__metaclass__ = type
+
+
+class Path:
+    __repr = '{self.__class__.__name__}({self.root.filename!r}, {self.at!r})'
+
+    def __init__(self, root, at=''):
+        self.root = root if isinstance(root, zipfile.ZipFile) \
+            else zipfile.ZipFile(self._pathlib_compat(root))
+        self.at = at
+
+    @staticmethod
+    def _pathlib_compat(path):
+        """
+        For path-like objects, convert to a filename for compatibility
+        on Python 3.6.1 and earlier.
+        """
+        try:
+            return path.__fspath__()
+        except AttributeError:
+            return str(path)
+
+    @property
+    def open(self):
+        return functools.partial(self.root.open, self.at)
+
+    @property
+    def name(self):
+        return posixpath.basename(self.at.rstrip('/'))
+
+    def read_text(self, *args, **kwargs):
+        with self.open() as strm:
+            return io.TextIOWrapper(strm, *args, **kwargs).read()
+
+    def read_bytes(self):
+        with self.open() as strm:
+            return strm.read()
+
+    def _is_child(self, path):
+        return posixpath.dirname(path.at.rstrip('/')) == self.at.rstrip('/')
+
+    def _next(self, at):
+        return Path(self.root, at)
+
+    def is_dir(self):
+        return not self.at or self.at.endswith('/')
+
+    def is_file(self):
+        return not self.is_dir()
+
+    def exists(self):
+        return self.at in self.root.namelist()
+
+    def iterdir(self):
+        if not self.is_dir():
+            raise ValueError("Can't listdir a file")
+        names = map(operator.attrgetter('filename'), self.root.infolist())
+        subs = map(self._next, names)
+        return filter(self._is_child, subs)
+
+    def __str__(self):
+        return posixpath.join(self.root.filename, self.at)
+
+    def __repr__(self):
+        return self.__repr.format(self=self)
+
+    def __truediv__(self, add):
+        add = self._pathlib_compat(add)
+        next = posixpath.join(self.at, add)
+        next_dir = posixpath.join(self.at, add, '')
+        names = self.root.namelist()
+        return self._next(
+            next_dir if next not in names and next_dir in names else next
+        )
+
+    if sys.version_info < (3,):
+        __div__ = __truediv__

+ 0 - 0
icon.png → mncheck/icon.png


+ 3 - 3
main.py → mncheck/main.py

@@ -10,15 +10,15 @@
  ***************************************************************************/
 
 """
-from core import logging_
+from mncheck.core import logging_
 import logging
 
 from PyQt5.QtCore import Qt
 from PyQt5.QtGui import QIcon
 from PyQt5.QtWidgets import QAction, QStyle, QApplication
 
-from core.constants import MAIN, VERSION
-from ui.dlg_main import DlgMain
+from mncheck.core.constants import MAIN, VERSION
+from mncheck.ui.dlg_main import DlgMain
 
 
 __VERSION__ = VERSION

+ 1 - 1
metadata.txt → mncheck/metadata.txt

@@ -1,6 +1,6 @@
 # THIS FILE WAS GENERATED BY THE BUILD.PY SCRIPT, DO NOT MODIFY IT DIRECLY
 [general]
-name = MnCheck
+name = mncheck
 qgisminimumversion = 3.4
 description = Contrôle des données FTTH format MN
 version = 1.0.0

+ 0 - 0
schemas/__init__.py → mncheck/schemas/__init__.py


+ 4 - 4
schemas/mn1_rec.py → mncheck/schemas/mn1_rec.py

@@ -6,14 +6,14 @@
 '''
 
 import logging
-from qgis.core import QgsProject
+from qgis.core import QgsProject  #@UnresolvedImport
 
-from core.cerberus_ import is_float, is_multi_int, is_int, \
+from mncheck.core.cerberus_ import is_float, is_multi_int, is_int, \
     is_modern_french_date, CerberusValidator, CerberusErrorHandler, \
     _translate_messages, is_positive_float, is_positive_int, \
     is_strictly_positive_float, is_strictly_positive_int
-from core.checking import BaseChecker, CheckingException
-from core.mncheck import QgsModel
+from mncheck.core.checking import BaseChecker, CheckingException
+from mncheck.core.mncheck import QgsModel
 
 
 logger = logging.getLogger("mncheck")

+ 4 - 4
schemas/mn2_rec.py → mncheck/schemas/mn2_rec.py

@@ -6,14 +6,14 @@
 '''
 
 import logging
-from qgis.core import QgsProject
+from qgis.core import QgsProject  #@UnresolvedImport
 
-from core.cerberus_ import is_float, is_multi_int, is_int, \
+from mncheck.core.cerberus_ import is_float, is_multi_int, is_int, \
     is_modern_french_date, CerberusValidator, CerberusErrorHandler, \
     _translate_messages, is_positive_int, is_strictly_positive_float, \
     is_positive_float, is_strictly_positive_int, _tofloat
-from core.checking import BaseChecker, CheckingException
-from core.mncheck import QgsModel
+from mncheck.core.checking import BaseChecker, CheckingException
+from mncheck.core.mncheck import QgsModel
 
 
 logger = logging.getLogger("mncheck")

+ 0 - 0
test/__init__.py → mncheck/test/__init__.py


+ 1 - 1
test/_base.py → mncheck/test/_base.py

@@ -2,7 +2,7 @@
 
 @author: olivier.massot, 2019
 '''
-from qgis.core import QgsProject
+from qgis.core import QgsProject  #@UnresolvedImport
 import unittest
 
 

+ 0 - 0
test/projects/mn1_rec/0_empty/0_empty.qgz → mncheck/test/projects/mn1_rec/0_empty/0_empty.qgz


+ 0 - 0
test/projects/mn1_rec/0_empty/ARTERE_GEO.cpg → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.cpg


+ 0 - 0
test/projects/mn1_rec/0_empty/ARTERE_GEO.dbf → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/ARTERE_GEO.prj → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.prj


+ 1 - 1
test/projects/mn1_rec/0_empty/ARTERE_GEO.qpj → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.qpj

@@ -1 +1 @@
-PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]
+PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]

+ 0 - 0
test/projects/mn1_rec/0_empty/ARTERE_GEO.shp → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.shp


+ 0 - 0
test/projects/mn1_rec/0_empty/ARTERE_GEO.shx → mncheck/test/projects/mn1_rec/0_empty/ARTERE_GEO.shx


+ 0 - 0
test/projects/mn1_rec/0_empty/CABLE_GEO.cpg → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.cpg


+ 0 - 0
test/projects/mn1_rec/0_empty/CABLE_GEO.dbf → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/CABLE_GEO.prj → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.prj


+ 1 - 1
test/projects/mn1_rec/1_valid/CABLE_GEO.qpj → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.qpj

@@ -1 +1 @@
-PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]
+PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]

+ 0 - 0
test/projects/mn1_rec/0_empty/CABLE_GEO.shp → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.shp


+ 0 - 0
test/projects/mn1_rec/0_empty/CABLE_GEO.shx → mncheck/test/projects/mn1_rec/0_empty/CABLE_GEO.shx


+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.cpg → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.cpg


+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.dbf → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.prj → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.prj


+ 1 - 1
test/projects/mn1_rec/1_valid/EQUIPEMENT_PASSIF.qpj → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.qpj

@@ -1 +1 @@
-PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]
+PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]

+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shp → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shp


+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shx → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.shx


+ 0 - 0
test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.xlsx → mncheck/test/projects/mn1_rec/0_empty/EQUIPEMENT_PASSIF.xlsx


+ 0 - 0
test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.dbf → mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.prj → mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.prj


+ 0 - 0
test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shp → mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shp


+ 0 - 0
test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shx → mncheck/test/projects/mn1_rec/0_empty/FTTH_SITES_GEO_VALIDE.shx


+ 0 - 0
test/projects/mn1_rec/0_empty/NOEUD_GEO.cpg → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.cpg


+ 0 - 0
test/projects/mn1_rec/0_empty/NOEUD_GEO.dbf → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/NOEUD_GEO.prj → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.prj


+ 1 - 1
test/projects/mn1_rec/1_valid/NOEUD_GEO.qpj → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.qpj

@@ -1 +1 @@
-PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]
+PROJCS["RGF93 / CC49",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6171"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4171"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",48.25],PARAMETER["standard_parallel_2",49.75],PARAMETER["latitude_of_origin",49],PARAMETER["central_meridian",3],PARAMETER["false_easting",1700000],PARAMETER["false_northing",8200000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3949"]]

+ 0 - 0
test/projects/mn1_rec/0_empty/NOEUD_GEO.shp → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.shp


+ 0 - 0
test/projects/mn1_rec/0_empty/NOEUD_GEO.shx → mncheck/test/projects/mn1_rec/0_empty/NOEUD_GEO.shx


+ 0 - 0
test/projects/mn1_rec/0_empty/TRANCHEE_GEO.cpg → mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.cpg


+ 0 - 0
test/projects/mn1_rec/0_empty/TRANCHEE_GEO.dbf → mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.dbf


+ 0 - 0
test/projects/mn1_rec/0_empty/TRANCHEE_GEO.prj → mncheck/test/projects/mn1_rec/0_empty/TRANCHEE_GEO.prj


Some files were not shown because too many files changed in this diff