From 1ceab36e9b5fd1d7d565db050850879ef4c2470b Mon Sep 17 00:00:00 2001 From: Loudrob Date: Fri, 10 Apr 2015 07:50:25 -0400 Subject: [PATCH 1/2] fix daniels bank gui --- toontown/estate/BankGUI.py | 41 ++++++++++++++------------------------ 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/toontown/estate/BankGUI.py b/toontown/estate/BankGUI.py index 175ac522..19beeee4 100644 --- a/toontown/estate/BankGUI.py +++ b/toontown/estate/BankGUI.py @@ -62,8 +62,6 @@ class BankGui(DirectFrame): messenger.send(self.doneEvent, [0]) def __requestTransaction(self): - self.ignore(localAvatar.uniqueName('moneyChange')) - self.ignore(localAvatar.uniqueName('bankMoneyChange')) messenger.send(self.doneEvent, [self.__transactionAmount]) def __updateTransaction(self, amount): @@ -99,19 +97,14 @@ class BankGui(DirectFrame): def __runCounter(self, task): if task.time - task.prevTime < task.delayTime: return Task.cont - - task.delayTime /= 2 - task.prevTime = task.time - - if task.delayTime < 0.005: - task.amount *= 1.1 - - hitLimit = self.__updateTransaction(int(task.amount))[0] - - if hitLimit: - return Task.done - - return Task.cont + else: + task.delayTime = max(0.05, task.delayTime * 0.75) + task.prevTime = task.time + hitLimit, jar, bank, trans = self.__updateTransaction(task.delta) + if hitLimit: + return Task.done + else: + return Task.cont def __depositButtonUp(self, event): messenger.send('wakeup') @@ -119,13 +112,11 @@ class BankGui(DirectFrame): def __depositButtonDown(self, event): messenger.send('wakeup') - task = Task(self.__runCounter) - task.delayTime = 0.2 + task.delayTime = 0.4 task.prevTime = 0.0 - task.amount = 1.0 - hitLimit = self.__updateTransaction(int(task.amount))[0] - + task.delta = 1 + hitLimit, jar, bank, trans = self.__updateTransaction(task.delta) if not hitLimit: taskMgr.add(task, self.taskName('runCounter')) @@ -135,13 +126,11 @@ class BankGui(DirectFrame): def __withdrawButtonDown(self, event): messenger.send('wakeup') - task = Task(self.__runCounter) - task.delayTime = 0.2 + task.delayTime = 0.4 task.prevTime = 0.0 - task.amount = 1.0 - hitLimit = self.__updateTransaction(int(task.amount))[0] - + task.delta = -1 + hitLimit, jar, bank, trans = self.__updateTransaction(task.delta) if not hitLimit: taskMgr.add(task, self.taskName('runCounter')) @@ -149,4 +138,4 @@ class BankGui(DirectFrame): self.__updateTransaction(0) def __bankMoneyChange(self, bankMoney): - self.__updateTransaction(0) \ No newline at end of file + self.__updateTransaction(0) From 40a79453242fd90d404859a0e1c1f693270f5d01 Mon Sep 17 00:00:00 2001 From: Loudrob Date: Fri, 10 Apr 2015 07:57:49 -0400 Subject: [PATCH 2/2] idek --- .../pip-6.1.1.dist-info/DESCRIPTION.rst | 24 + .../pip-6.1.1.dist-info/METADATA | 52 + .../site-packages/pip-6.1.1.dist-info/RECORD | 473 ++ .../site-packages/pip-6.1.1.dist-info/WHEEL | 6 + .../pip-6.1.1.dist-info/entry_points.txt | 5 + .../pip-6.1.1.dist-info/top_level.txt | 1 + .../python/Lib/site-packages/pip/__init__.py | 315 ++ .../python/Lib/site-packages/pip/__main__.py | 19 + .../Lib/site-packages/pip/_vendor/__init__.py | 108 + .../pip/_vendor/_markerlib/__init__.py | 16 + .../pip/_vendor/_markerlib/markers.py | 119 + .../pip/_vendor/cachecontrol/__init__.py | 11 + .../pip/_vendor/cachecontrol/adapter.py | 117 + .../pip/_vendor/cachecontrol/cache.py | 39 + .../_vendor/cachecontrol/caches/__init__.py | 18 + .../_vendor/cachecontrol/caches/file_cache.py | 103 + .../cachecontrol/caches/redis_cache.py | 41 + .../pip/_vendor/cachecontrol/compat.py | 14 + .../pip/_vendor/cachecontrol/controller.py | 299 + .../pip/_vendor/cachecontrol/filewrapper.py | 63 + .../pip/_vendor/cachecontrol/heuristics.py | 134 + .../pip/_vendor/cachecontrol/serialize.py | 184 + .../pip/_vendor/cachecontrol/wrapper.py | 21 + .../pip/_vendor/colorama/__init__.py | 7 + .../pip/_vendor/colorama/ansi.py | 99 + .../pip/_vendor/colorama/ansitowin32.py | 228 + .../pip/_vendor/colorama/initialise.py | 66 + .../pip/_vendor/colorama/win32.py | 146 + .../pip/_vendor/colorama/winterm.py | 151 + .../pip/_vendor/distlib/__init__.py | 23 + .../pip/_vendor/distlib/_backport/__init__.py | 6 + .../pip/_vendor/distlib/_backport/misc.py | 41 + .../pip/_vendor/distlib/_backport/shutil.py | 761 +++ .../_vendor/distlib/_backport/sysconfig.cfg | 84 + .../_vendor/distlib/_backport/sysconfig.py | 788 +++ .../pip/_vendor/distlib/_backport/tarfile.py | 2607 +++++++++ .../pip/_vendor/distlib/compat.py | 1102 ++++ .../pip/_vendor/distlib/database.py | 1303 +++++ .../pip/_vendor/distlib/index.py | 513 ++ .../pip/_vendor/distlib/locators.py | 1233 ++++ .../pip/_vendor/distlib/manifest.py | 367 ++ .../pip/_vendor/distlib/markers.py | 190 + .../pip/_vendor/distlib/metadata.py | 1058 ++++ .../pip/_vendor/distlib/resources.py | 323 ++ .../pip/_vendor/distlib/scripts.py | 335 ++ .../site-packages/pip/_vendor/distlib/t32.exe | Bin 0 -> 91648 bytes .../site-packages/pip/_vendor/distlib/t64.exe | Bin 0 -> 95232 bytes .../site-packages/pip/_vendor/distlib/util.py | 1579 ++++++ .../pip/_vendor/distlib/version.py | 742 +++ .../site-packages/pip/_vendor/distlib/w32.exe | Bin 0 -> 88576 bytes .../site-packages/pip/_vendor/distlib/w64.exe | Bin 0 -> 92160 bytes .../pip/_vendor/distlib/wheel.py | 976 ++++ .../pip/_vendor/html5lib/__init__.py | 23 + .../pip/_vendor/html5lib/constants.py | 3104 ++++++++++ .../pip/_vendor/html5lib/filters/__init__.py | 0 .../pip/_vendor/html5lib/filters/_base.py | 12 + .../filters/alphabeticalattributes.py | 20 + .../html5lib/filters/inject_meta_charset.py | 65 + .../pip/_vendor/html5lib/filters/lint.py | 93 + .../_vendor/html5lib/filters/optionaltags.py | 205 + .../pip/_vendor/html5lib/filters/sanitizer.py | 12 + .../_vendor/html5lib/filters/whitespace.py | 38 + .../pip/_vendor/html5lib/html5parser.py | 2713 +++++++++ .../pip/_vendor/html5lib/ihatexml.py | 285 + .../pip/_vendor/html5lib/inputstream.py | 886 +++ .../pip/_vendor/html5lib/sanitizer.py | 271 + .../_vendor/html5lib/serializer/__init__.py | 16 + .../html5lib/serializer/htmlserializer.py | 320 ++ .../pip/_vendor/html5lib/tokenizer.py | 1731 ++++++ .../_vendor/html5lib/treeadapters/__init__.py | 0 .../pip/_vendor/html5lib/treeadapters/sax.py | 44 + .../_vendor/html5lib/treebuilders/__init__.py | 76 + .../_vendor/html5lib/treebuilders/_base.py | 377 ++ .../pip/_vendor/html5lib/treebuilders/dom.py | 227 + .../_vendor/html5lib/treebuilders/etree.py | 337 ++ .../html5lib/treebuilders/etree_lxml.py | 369 ++ .../_vendor/html5lib/treewalkers/__init__.py | 57 + .../pip/_vendor/html5lib/treewalkers/_base.py | 200 + .../pip/_vendor/html5lib/treewalkers/dom.py | 46 + .../pip/_vendor/html5lib/treewalkers/etree.py | 138 + .../html5lib/treewalkers/genshistream.py | 69 + .../_vendor/html5lib/treewalkers/lxmletree.py | 204 + .../_vendor/html5lib/treewalkers/pulldom.py | 63 + .../pip/_vendor/html5lib/trie/__init__.py | 12 + .../pip/_vendor/html5lib/trie/_base.py | 37 + .../pip/_vendor/html5lib/trie/datrie.py | 44 + .../pip/_vendor/html5lib/trie/py.py | 67 + .../pip/_vendor/html5lib/utils.py | 82 + .../site-packages/pip/_vendor/ipaddress.py | 2171 +++++++ .../pip/_vendor/lockfile/__init__.py | 326 ++ .../pip/_vendor/lockfile/linklockfile.py | 73 + .../pip/_vendor/lockfile/mkdirlockfile.py | 83 + .../pip/_vendor/lockfile/pidlockfile.py | 193 + .../pip/_vendor/lockfile/sqlitelockfile.py | 155 + .../pip/_vendor/lockfile/symlinklockfile.py | 69 + .../pip/_vendor/packaging/__about__.py | 31 + .../pip/_vendor/packaging/__init__.py | 24 + .../pip/_vendor/packaging/_compat.py | 40 + .../pip/_vendor/packaging/_structures.py | 78 + .../pip/_vendor/packaging/specifiers.py | 772 +++ .../pip/_vendor/packaging/version.py | 401 ++ .../pip/_vendor/pkg_resources/__init__.py | 3066 ++++++++++ .../_vendor/pkg_resources/tests/__init__.py | 0 .../pkg_resources/tests/test_pkg_resources.py | 111 + .../pkg_resources/tests/test_resources.py | 661 +++ .../pip/_vendor/progress/__init__.py | 123 + .../site-packages/pip/_vendor/progress/bar.py | 86 + .../pip/_vendor/progress/counter.py | 49 + .../pip/_vendor/progress/helpers.py | 92 + .../pip/_vendor/progress/spinner.py | 42 + .../site-packages/pip/_vendor/re-vendor.py | 34 + .../pip/_vendor/requests/__init__.py | 77 + .../pip/_vendor/requests/adapters.py | 437 ++ .../site-packages/pip/_vendor/requests/api.py | 146 + .../pip/_vendor/requests/auth.py | 211 + .../pip/_vendor/requests/cacert.pem | 5026 +++++++++++++++++ .../pip/_vendor/requests/certs.py | 25 + .../pip/_vendor/requests/compat.py | 62 + .../pip/_vendor/requests/cookies.py | 463 ++ .../pip/_vendor/requests/exceptions.py | 99 + .../pip/_vendor/requests/hooks.py | 45 + .../pip/_vendor/requests/models.py | 842 +++ .../pip/_vendor/requests/packages/__init__.py | 107 + .../requests/packages/chardet/__init__.py | 32 + .../requests/packages/chardet/big5freq.py | 925 +++ .../requests/packages/chardet/big5prober.py | 42 + .../requests/packages/chardet/chardetect.py | 80 + .../packages/chardet/chardistribution.py | 231 + .../packages/chardet/charsetgroupprober.py | 106 + .../packages/chardet/charsetprober.py | 62 + .../packages/chardet/codingstatemachine.py | 61 + .../requests/packages/chardet/compat.py | 34 + .../requests/packages/chardet/constants.py | 39 + .../requests/packages/chardet/cp949prober.py | 44 + .../requests/packages/chardet/escprober.py | 86 + .../requests/packages/chardet/escsm.py | 242 + .../requests/packages/chardet/eucjpprober.py | 90 + .../requests/packages/chardet/euckrfreq.py | 596 ++ .../requests/packages/chardet/euckrprober.py | 42 + .../requests/packages/chardet/euctwfreq.py | 428 ++ .../requests/packages/chardet/euctwprober.py | 41 + .../requests/packages/chardet/gb2312freq.py | 472 ++ .../requests/packages/chardet/gb2312prober.py | 41 + .../requests/packages/chardet/hebrewprober.py | 283 + .../requests/packages/chardet/jisfreq.py | 569 ++ .../requests/packages/chardet/jpcntx.py | 227 + .../packages/chardet/langbulgarianmodel.py | 229 + .../packages/chardet/langcyrillicmodel.py | 329 ++ .../packages/chardet/langgreekmodel.py | 225 + .../packages/chardet/langhebrewmodel.py | 201 + .../packages/chardet/langhungarianmodel.py | 225 + .../packages/chardet/langthaimodel.py | 200 + .../requests/packages/chardet/latin1prober.py | 139 + .../packages/chardet/mbcharsetprober.py | 86 + .../packages/chardet/mbcsgroupprober.py | 54 + .../requests/packages/chardet/mbcssm.py | 572 ++ .../packages/chardet/sbcharsetprober.py | 120 + .../packages/chardet/sbcsgroupprober.py | 69 + .../requests/packages/chardet/sjisprober.py | 91 + .../packages/chardet/universaldetector.py | 170 + .../requests/packages/chardet/utf8prober.py | 76 + .../requests/packages/urllib3/__init__.py | 66 + .../requests/packages/urllib3/_collections.py | 320 ++ .../requests/packages/urllib3/connection.py | 262 + .../packages/urllib3/connectionpool.py | 796 +++ .../packages/urllib3/contrib/__init__.py | 0 .../packages/urllib3/contrib/ntlmpool.py | 114 + .../packages/urllib3/contrib/pyopenssl.py | 308 + .../requests/packages/urllib3/exceptions.py | 164 + .../requests/packages/urllib3/fields.py | 177 + .../requests/packages/urllib3/filepost.py | 93 + .../packages/urllib3/packages/__init__.py | 4 + .../packages/urllib3/packages/ordered_dict.py | 259 + .../requests/packages/urllib3/packages/six.py | 385 ++ .../packages/ssl_match_hostname/__init__.py | 13 + .../ssl_match_hostname/_implementation.py | 105 + .../requests/packages/urllib3/poolmanager.py | 280 + .../requests/packages/urllib3/request.py | 141 + .../requests/packages/urllib3/response.py | 353 ++ .../packages/urllib3/util/__init__.py | 24 + .../packages/urllib3/util/connection.py | 98 + .../requests/packages/urllib3/util/request.py | 71 + .../packages/urllib3/util/response.py | 22 + .../requests/packages/urllib3/util/retry.py | 285 + .../requests/packages/urllib3/util/ssl_.py | 266 + .../requests/packages/urllib3/util/timeout.py | 240 + .../requests/packages/urllib3/util/url.py | 212 + .../pip/_vendor/requests/sessions.py | 685 +++ .../pip/_vendor/requests/status_codes.py | 89 + .../pip/_vendor/requests/structures.py | 104 + .../pip/_vendor/requests/utils.py | 707 +++ .../Lib/site-packages/pip/_vendor/retrying.py | 267 + .../Lib/site-packages/pip/_vendor/six.py | 838 +++ .../Lib/site-packages/pip/basecommand.py | 284 + .../Lib/site-packages/pip/baseparser.py | 269 + .../Lib/site-packages/pip/cmdoptions.py | 458 ++ .../site-packages/pip/commands/__init__.py | 85 + .../site-packages/pip/commands/completion.py | 68 + .../Lib/site-packages/pip/commands/freeze.py | 66 + .../Lib/site-packages/pip/commands/help.py | 35 + .../Lib/site-packages/pip/commands/install.py | 424 ++ .../Lib/site-packages/pip/commands/list.py | 210 + .../Lib/site-packages/pip/commands/search.py | 139 + .../Lib/site-packages/pip/commands/show.py | 130 + .../site-packages/pip/commands/uninstall.py | 70 + .../Lib/site-packages/pip/commands/unzip.py | 9 + .../Lib/site-packages/pip/commands/wheel.py | 243 + .../Lib/site-packages/pip/commands/zip.py | 410 ++ .../Lib/site-packages/pip/compat/__init__.py | 103 + .../site-packages/pip/compat/dictconfig.py | 565 ++ .../python/Lib/site-packages/pip/download.py | 913 +++ .../Lib/site-packages/pip/exceptions.py | 47 + panda/python/Lib/site-packages/pip/index.py | 1202 ++++ .../python/Lib/site-packages/pip/locations.py | 259 + .../Lib/site-packages/pip/models/__init__.py | 4 + .../Lib/site-packages/pip/models/index.py | 16 + .../site-packages/pip/operations/__init__.py | 0 .../site-packages/pip/operations/freeze.py | 111 + .../Lib/site-packages/pip/pep425tags.py | 136 + .../Lib/site-packages/pip/req/__init__.py | 9 + .../Lib/site-packages/pip/req/req_file.py | 137 + .../Lib/site-packages/pip/req/req_install.py | 1161 ++++ .../site-packages/pip/req/req_requirement.py | 43 + .../Lib/site-packages/pip/req/req_set.py | 707 +++ .../site-packages/pip/req/req_uninstall.py | 206 + .../Lib/site-packages/pip/status_codes.py | 8 + .../Lib/site-packages/pip/utils/__init__.py | 848 +++ .../Lib/site-packages/pip/utils/appdirs.py | 256 + .../Lib/site-packages/pip/utils/build.py | 42 + .../site-packages/pip/utils/deprecation.py | 68 + .../Lib/site-packages/pip/utils/filesystem.py | 28 + .../Lib/site-packages/pip/utils/logging.py | 130 + .../Lib/site-packages/pip/utils/outdated.py | 149 + .../python/Lib/site-packages/pip/utils/ui.py | 199 + .../Lib/site-packages/pip/vcs/__init__.py | 348 ++ .../Lib/site-packages/pip/vcs/bazaar.py | 132 + panda/python/Lib/site-packages/pip/vcs/git.py | 209 + .../Lib/site-packages/pip/vcs/mercurial.py | 140 + .../Lib/site-packages/pip/vcs/subversion.py | 291 + panda/python/Lib/site-packages/pip/wheel.py | 624 ++ .../ply-3.4-py2.7.egg-info/PKG-INFO | 22 + .../ply-3.4-py2.7.egg-info/SOURCES.txt | 12 + .../dependency_links.txt | 1 + .../installed-files.txt | 15 + .../ply-3.4-py2.7.egg-info/top_level.txt | 1 + .../python/Lib/site-packages/ply/__init__.py | 4 + panda/python/Lib/site-packages/ply/cpp.py | 898 +++ panda/python/Lib/site-packages/ply/ctokens.py | 133 + panda/python/Lib/site-packages/ply/lex.py | 1058 ++++ panda/python/Lib/site-packages/ply/yacc.py | 3276 +++++++++++ tools/findterm.py | 19 + tools/findterm_otp.py | 19 + 252 files changed, 78129 insertions(+) create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/DESCRIPTION.rst create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/METADATA create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/RECORD create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/WHEEL create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/entry_points.txt create mode 100644 panda/python/Lib/site-packages/pip-6.1.1.dist-info/top_level.txt create mode 100644 panda/python/Lib/site-packages/pip/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/__main__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/_markerlib/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/_markerlib/markers.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/cache.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/compat.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/controller.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/ansi.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/initialise.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/win32.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/colorama/winterm.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/compat.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/database.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/index.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/locators.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/manifest.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/markers.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/metadata.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/resources.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/scripts.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/t32.exe create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/t64.exe create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/util.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/version.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/w32.exe create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/w64.exe create mode 100644 panda/python/Lib/site-packages/pip/_vendor/distlib/wheel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/constants.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/_base.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/html5parser.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/ihatexml.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/inputstream.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/sanitizer.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/serializer/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/serializer/htmlserializer.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/tokenizer.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/_base.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/_base.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/trie/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/trie/_base.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/trie/datrie.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/trie/py.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/html5lib/utils.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/ipaddress.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/linklockfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/mkdirlockfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/pidlockfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/sqlitelockfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/lockfile/symlinklockfile.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/__about__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/_compat.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/_structures.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/specifiers.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/packaging/version.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/pkg_resources/tests/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/pkg_resources/tests/test_pkg_resources.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/pkg_resources/tests/test_resources.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/progress/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/progress/bar.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/progress/counter.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/progress/helpers.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/progress/spinner.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/re-vendor.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/adapters.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/api.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/auth.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/cacert.pem create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/certs.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/compat.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/cookies.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/exceptions.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/hooks.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/models.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/big5freq.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/compat.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/constants.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/escprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/escsm.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/gb2312freq.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/connection.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/request.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/request.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/retry.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/sessions.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/status_codes.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/structures.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/requests/utils.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/retrying.py create mode 100644 panda/python/Lib/site-packages/pip/_vendor/six.py create mode 100644 panda/python/Lib/site-packages/pip/basecommand.py create mode 100644 panda/python/Lib/site-packages/pip/baseparser.py create mode 100644 panda/python/Lib/site-packages/pip/cmdoptions.py create mode 100644 panda/python/Lib/site-packages/pip/commands/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/commands/completion.py create mode 100644 panda/python/Lib/site-packages/pip/commands/freeze.py create mode 100644 panda/python/Lib/site-packages/pip/commands/help.py create mode 100644 panda/python/Lib/site-packages/pip/commands/install.py create mode 100644 panda/python/Lib/site-packages/pip/commands/list.py create mode 100644 panda/python/Lib/site-packages/pip/commands/search.py create mode 100644 panda/python/Lib/site-packages/pip/commands/show.py create mode 100644 panda/python/Lib/site-packages/pip/commands/uninstall.py create mode 100644 panda/python/Lib/site-packages/pip/commands/unzip.py create mode 100644 panda/python/Lib/site-packages/pip/commands/wheel.py create mode 100644 panda/python/Lib/site-packages/pip/commands/zip.py create mode 100644 panda/python/Lib/site-packages/pip/compat/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/compat/dictconfig.py create mode 100644 panda/python/Lib/site-packages/pip/download.py create mode 100644 panda/python/Lib/site-packages/pip/exceptions.py create mode 100644 panda/python/Lib/site-packages/pip/index.py create mode 100644 panda/python/Lib/site-packages/pip/locations.py create mode 100644 panda/python/Lib/site-packages/pip/models/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/models/index.py create mode 100644 panda/python/Lib/site-packages/pip/operations/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/operations/freeze.py create mode 100644 panda/python/Lib/site-packages/pip/pep425tags.py create mode 100644 panda/python/Lib/site-packages/pip/req/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/req/req_file.py create mode 100644 panda/python/Lib/site-packages/pip/req/req_install.py create mode 100644 panda/python/Lib/site-packages/pip/req/req_requirement.py create mode 100644 panda/python/Lib/site-packages/pip/req/req_set.py create mode 100644 panda/python/Lib/site-packages/pip/req/req_uninstall.py create mode 100644 panda/python/Lib/site-packages/pip/status_codes.py create mode 100644 panda/python/Lib/site-packages/pip/utils/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/utils/appdirs.py create mode 100644 panda/python/Lib/site-packages/pip/utils/build.py create mode 100644 panda/python/Lib/site-packages/pip/utils/deprecation.py create mode 100644 panda/python/Lib/site-packages/pip/utils/filesystem.py create mode 100644 panda/python/Lib/site-packages/pip/utils/logging.py create mode 100644 panda/python/Lib/site-packages/pip/utils/outdated.py create mode 100644 panda/python/Lib/site-packages/pip/utils/ui.py create mode 100644 panda/python/Lib/site-packages/pip/vcs/__init__.py create mode 100644 panda/python/Lib/site-packages/pip/vcs/bazaar.py create mode 100644 panda/python/Lib/site-packages/pip/vcs/git.py create mode 100644 panda/python/Lib/site-packages/pip/vcs/mercurial.py create mode 100644 panda/python/Lib/site-packages/pip/vcs/subversion.py create mode 100644 panda/python/Lib/site-packages/pip/wheel.py create mode 100644 panda/python/Lib/site-packages/ply-3.4-py2.7.egg-info/PKG-INFO create mode 100644 panda/python/Lib/site-packages/ply-3.4-py2.7.egg-info/SOURCES.txt create mode 100644 panda/python/Lib/site-packages/ply-3.4-py2.7.egg-info/dependency_links.txt create mode 100644 panda/python/Lib/site-packages/ply-3.4-py2.7.egg-info/installed-files.txt create mode 100644 panda/python/Lib/site-packages/ply-3.4-py2.7.egg-info/top_level.txt create mode 100644 panda/python/Lib/site-packages/ply/__init__.py create mode 100644 panda/python/Lib/site-packages/ply/cpp.py create mode 100644 panda/python/Lib/site-packages/ply/ctokens.py create mode 100644 panda/python/Lib/site-packages/ply/lex.py create mode 100644 panda/python/Lib/site-packages/ply/yacc.py create mode 100644 tools/findterm.py create mode 100644 tools/findterm_otp.py diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/DESCRIPTION.rst b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..0e037916 --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,24 @@ +pip +=== + +The `PyPA recommended +`_ +tool for installing Python packages. + +* `Installation `_ +* `Documentation `_ +* `Changelog `_ +* `Github Page `_ +* `Issue Tracking `_ +* `Mailing list `_ +* User IRC: #pypa on Freenode. +* Dev IRC: #pypa-dev on Freenode. + + +.. image:: https://pypip.in/v/pip/badge.png + :target: https://pypi.python.org/pypi/pip + +.. image:: https://secure.travis-ci.org/pypa/pip.png?branch=develop + :target: http://travis-ci.org/pypa/pip + + diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/METADATA b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/METADATA new file mode 100644 index 00000000..75c19d71 --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/METADATA @@ -0,0 +1,52 @@ +Metadata-Version: 2.0 +Name: pip +Version: 6.1.1 +Summary: The PyPA recommended tool for installing Python packages. +Home-page: https://pip.pypa.io/ +Author: The pip developers +Author-email: python-virtualenv@groups.google.com +License: MIT +Keywords: easy_install distutils setuptools egg virtualenv +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Build Tools +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: PyPy +Provides-Extra: testing +Requires-Dist: pytest; extra == 'testing' +Requires-Dist: virtualenv (>=1.10); extra == 'testing' +Requires-Dist: scripttest (>=1.3); extra == 'testing' +Requires-Dist: mock; extra == 'testing' + +pip +=== + +The `PyPA recommended +`_ +tool for installing Python packages. + +* `Installation `_ +* `Documentation `_ +* `Changelog `_ +* `Github Page `_ +* `Issue Tracking `_ +* `Mailing list `_ +* User IRC: #pypa on Freenode. +* Dev IRC: #pypa-dev on Freenode. + + +.. image:: https://pypip.in/v/pip/badge.png + :target: https://pypi.python.org/pypi/pip + +.. image:: https://secure.travis-ci.org/pypa/pip.png?branch=develop + :target: http://travis-ci.org/pypa/pip + + diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/RECORD b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/RECORD new file mode 100644 index 00000000..0ab373ea --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/RECORD @@ -0,0 +1,473 @@ +pip/__init__.py,sha256=u9MsAhrM-b6AL7okrrd3TGnOS7SqiDdbcrTEr0NPYt4,10414 +pip/__main__.py,sha256=V6Kh-IEDEFpt1cahRE6MajUF_14qJR_Qsvn4MjWZXzE,584 +pip/basecommand.py,sha256=Md3Uw4pxLiskfmVNOLQNLKk3QpTiuk4OlTN6Qpchlpg,9898 +pip/baseparser.py,sha256=MYWjzKbV90-m2EH2tPcP-x-q8ctRgIn7-pQODvWZLnk,9643 +pip/cmdoptions.py,sha256=Mn4CV6-S9acQ4xLcGQ56npPJc54TVXXOEfTPbCdyi6Q,11475 +pip/download.py,sha256=KtVjNqg9-lsDBJULReycWwzxvaN_1Uv_4xRgstqGgzA,32074 +pip/exceptions.py,sha256=AKZqu1nR8jsbiRSoVFL57X6yiRdD4SMN8QtIT4PNrrY,1125 +pip/index.py,sha256=hA8AaEy61hlH8LM5VccFIjqJkBQWVojdq30CDOyu8mQ,44478 +pip/locations.py,sha256=gbYuVp3IQdVWFyEeFpDVFrvQoD2q1hBpZ9V_Qi0hxaU,7778 +pip/pep425tags.py,sha256=2gR_XYuwHOpJ6Wx-4YgK3SFEdMvnonpeqSNwcgqZ0Cs,4431 +pip/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 +pip/wheel.py,sha256=JT_R2WTC9I5wDxK2AAjy7Q5dOT9o9Mhbp5c_i353J8w,22339 +pip/_vendor/__init__.py,sha256=-XooiQmvydDvselwuxGWa047WR18T2lcZZnlfb4w21A,4063 +pip/_vendor/ipaddress.py,sha256=46210_lEAJIBtEx99yGKvwMpiF2jZFfLuNJw6GXb6j8,72089 +pip/_vendor/re-vendor.py,sha256=PcdZ40d0ohMsdJmA4t0AeAWbPXi1tFsvAwA5KE5FGeY,773 +pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972 +pip/_vendor/six.py,sha256=zKxWCKje5Gpr06IIWNgQL8-8GJ9rwAulE0DnWBKzmhE,29664 +pip/_vendor/_markerlib/__init__.py,sha256=2hgtRuYDOrimZF9-ENCkrP8gnJ59HZEtlk-zoTEvn1Y,564 +pip/_vendor/_markerlib/markers.py,sha256=YuFp0-osufFIoqnzG3L0Z2fDCx4Vln3VUDeXJ2DA_1I,3979 +pip/_vendor/cachecontrol/__init__.py,sha256=grysRMnPF3leJyC-au4eBzL83unVt6uivrn-NCigm6Y,302 +pip/_vendor/cachecontrol/adapter.py,sha256=eizWWJwOnG5TToxL-XiEywPEf2k20--e-5C6u6wAEts,4196 +pip/_vendor/cachecontrol/cache.py,sha256=xtl-V-pr9KSt9VvFDRCB9yrHPEvqvbk-5M1vAInZb5k,790 +pip/_vendor/cachecontrol/compat.py,sha256=kjqVbQGXwDm8xsBFtnLtJ50VQoOAUzgLvv5bjxrzguM,311 +pip/_vendor/cachecontrol/controller.py,sha256=NyC69aG2oMqlex7zH_tihj0rLmWzSngj3vw7GzMs-tU,10124 +pip/_vendor/cachecontrol/filewrapper.py,sha256=jkC0GOorbWIpy9CzP3PwxSHx5J2CXBAxNbGH68HmP1M,2168 +pip/_vendor/cachecontrol/heuristics.py,sha256=2P6eQo5sn4v6Eu3VzF3k8SflNUuaTSiY4q_j2q3jPQc,4053 +pip/_vendor/cachecontrol/serialize.py,sha256=5EBnH_7HrVzBH80yDqcv3s2Y6ccvxasODW2ka3sBq2E,6189 +pip/_vendor/cachecontrol/wrapper.py,sha256=Kqyu_3TW_54XDudha4-HF21vyEOAJ4ZnRXFysTiLmXA,498 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=uWnUtyMvHY_LULaL_4_IR1F_xPgK5zHfJyRnBq4DnPE,369 +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=aXY4s5QxzXiv1IpUa4DtX0wAyUH_Vz89nd3f-mI9d1Q,3113 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=XywqxkS9MkCaflTOY_wjrE02neKdywB9YwlOBbP7Ywc,973 +pip/_vendor/colorama/__init__.py,sha256=MaGNdH3aaGqXbOrn-V50ojZcxPx_Lak3WkD943_gG3o,225 +pip/_vendor/colorama/ansi.py,sha256=QqrSoEXqVOpqsz0ChbSqxQ3pkRNbrmdpuiFRTX7bceY,2304 +pip/_vendor/colorama/ansitowin32.py,sha256=BfJ_7-Ya6odmjvOlpI32KUTY2K2QWIeFmhg5oNymVsM,9262 +pip/_vendor/colorama/initialise.py,sha256=CRwgjKdSGhivhfBITpB9vyQsVgc6TyCGvFqVymqxK90,1597 +pip/_vendor/colorama/win32.py,sha256=pm8dhPtaCDr8W2V7b0bVJUHuz0rVM896z6OfA-iRV24,5121 +pip/_vendor/colorama/winterm.py,sha256=6KM_v7qYo-_GM7ZECbPV8ZY8iLwP-5mFqJHCGGOp11g,5732 +pip/_vendor/distlib/__init__.py,sha256=BG292hb5o2JkdomgEoNF_lTE8FaDSfIi49zzvCMB-kM,581 +pip/_vendor/distlib/compat.py,sha256=mRXDlAyARSFcBsm7meG__u_xLpTB8j8KVPIMqIjLM5k,40541 +pip/_vendor/distlib/database.py,sha256=BPOTjPcD_7eDONGX9nVOfyUAk9NVtmnAIAUxLLXASnQ,49199 +pip/_vendor/distlib/index.py,sha256=qU38JCRI5F_1Z-QmydiocDE58d4KvHzKjS_T0dBsPlg,20976 +pip/_vendor/distlib/locators.py,sha256=VHs4dBXMST9PfYTxQmwTrxX0sZSsrcEBOnbu5pGYRJQ,48796 +pip/_vendor/distlib/manifest.py,sha256=JF5EstaCOPnsW2tUdXCdjIFn-Zkf48Dqw0TcxKjuni0,13598 +pip/_vendor/distlib/markers.py,sha256=iRrVWwpyVwjkKJSX8NEQ92_MRMwpROcfNGKCD-Ch1QM,6282 +pip/_vendor/distlib/metadata.py,sha256=HvsxgUBUdrJe3pnRQCyCVtKrX5GIw0vjPEzvnmC5by0,38314 +pip/_vendor/distlib/resources.py,sha256=u6tRcHYjfzHTgR7Om4VZWBeqaUCxZkyYnEmb-d092H8,9664 +pip/_vendor/distlib/scripts.py,sha256=IUHER7Vbv2dyYRm9NwdRc4G04EiHVEmk0M5hQGqXIR4,12894 +pip/_vendor/distlib/t32.exe,sha256=-r9djx_Ts1wovg4bLWIw7rl3wZfPZRv6mwwFgy61Vr0,91648 +pip/_vendor/distlib/t64.exe,sha256=8JaWc6T9qB5nM2vTeXyXbR4u6zlQ7e3XVQTEAZSBl8k,95232 +pip/_vendor/distlib/util.py,sha256=5qZkMIAbHxgqinxiDusM9msTU-xL939wAGG7qptlDEg,51453 +pip/_vendor/distlib/version.py,sha256=GeYZxzA0k6zytPBOC5R6RQiUWRoIR2arUrpwUejRUWo,23711 +pip/_vendor/distlib/w32.exe,sha256=eERRU6_gXcVaQ1M2uHAR3BTQjsPI0SbimGyMLGDHzps,88576 +pip/_vendor/distlib/w64.exe,sha256=y1qFc6Kaae02GP3jwe0UHGu93sq5mqJN7ptrJHlAdGc,92160 +pip/_vendor/distlib/wheel.py,sha256=hkldb9zC3rt2dT3F9He9kHxJePMvkQdmGV3pEWWKq6w,39035 +pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274 +pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971 +pip/_vendor/distlib/_backport/shutil.py,sha256=AUi8718iRoJ9K26mRi-rywtt8Gx7ykvrvbUbZszjfYE,25650 +pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617 +pip/_vendor/distlib/_backport/sysconfig.py,sha256=7WdYP0wbw8izH1eAEGNA-HXUyJrhzIAGK_LniUs4UNI,26958 +pip/_vendor/distlib/_backport/tarfile.py,sha256=bjyTNONZb-YEXrHFLExOSuagtSOoPaONP2UUoxwkAqE,92627 +pip/_vendor/html5lib/__init__.py,sha256=I91_wsSQxCuFji1eNG8xtvahNoHoon4c4G9y_XkVlJQ,714 +pip/_vendor/html5lib/constants.py,sha256=w_Lrxu8h6qE4KATYy0SL5hiJ5ebuB28SlCcdXUHf6to,87346 +pip/_vendor/html5lib/html5parser.py,sha256=qMHEOEahKSZzLHHkqLRVbuIJYgAteVR-nmkjMp59Tvw,117029 +pip/_vendor/html5lib/ihatexml.py,sha256=MT12cVXAKaW-ALUkUeN175HpUP73xK8wAIpPzQ8cgfI,16581 +pip/_vendor/html5lib/inputstream.py,sha256=XNYhoJ22hSqfOSakjgCCphvSWMHrB65b-OcA81j9h5s,30879 +pip/_vendor/html5lib/sanitizer.py,sha256=sg7g5CXF9tfvykIoSVAvA8647MgScy3ncZC7IYH-8SA,16428 +pip/_vendor/html5lib/tokenizer.py,sha256=6Uf8sDUkvNn661bcBSBYUCTfXzSs9EyCTiPcj5PAjYI,76929 +pip/_vendor/html5lib/utils.py,sha256=T-BFeUVGJDjVCRbNoqar2qxn8jEoCOOJXE1nH0nDHEQ,2545 +pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/filters/_base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=fpRLbz6TCe5yXEkGmyMlJ80FekWsTR-sHk3Ano0U9LQ,624 +pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=xllv1I7unxhcyZTf3LTsv30wh2mAkT7wmTZx7zIhpuY,2746 +pip/_vendor/html5lib/filters/lint.py,sha256=6rlGRUTxD5KWwEVoXVHI_PeyUHN6Vw2v_ovg0YiHsDA,4306 +pip/_vendor/html5lib/filters/optionaltags.py,sha256=4ozLwBgMRaxe7iqxefLQpDhp3irK7YHo9LgSGsvZYMw,10500 +pip/_vendor/html5lib/filters/sanitizer.py,sha256=MvGUs_v2taWPgGhjxswRSUiHfxrqMUhsNPz-eSeUYUQ,352 +pip/_vendor/html5lib/filters/whitespace.py,sha256=LbOUcC0zQ9z703KNZrArOr0kVBO7OMXjKjucDW32LU4,1142 +pip/_vendor/html5lib/serializer/__init__.py,sha256=xFXFP-inaTNlbnau5c5DGrH_O8yPm-C6HWbJxpiSqFE,490 +pip/_vendor/html5lib/serializer/htmlserializer.py,sha256=bSXUuFJB6s-ODOl0nzFN0UA6xlQRU-BwYamPeJvsNSE,12909 +pip/_vendor/html5lib/treeadapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/treeadapters/sax.py,sha256=3of4vvaUYIAic7pngebwJV24hpOS7Zg9ggJa_WQegy4,1661 +pip/_vendor/html5lib/treebuilders/__init__.py,sha256=Xz4X6B5DA1R-5GyRa44j0sJwfl6dUNyb0NBu9-7sK3U,3405 +pip/_vendor/html5lib/treebuilders/_base.py,sha256=Xf0FZVcVwIQS6tEseJdj5wKbYucbNCnbAsnsG4lONis,13711 +pip/_vendor/html5lib/treebuilders/dom.py,sha256=ylkIlwEV2NsIWBpwEtfqF0LVoCGg4oXazEWs4-486jk,8469 +pip/_vendor/html5lib/treebuilders/etree.py,sha256=etbO6yQlyV46rWlj9mSyVqQOWrgoHgyJ01Tut4lWZkk,12621 +pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=z3Bnfm2MstEEb_lbaAeicl5l-ab6MSQa5Q1ZZreK7Pc,14031 +pip/_vendor/html5lib/treewalkers/__init__.py,sha256=44g-xYZEoYxzkMu6CepBTLm4m-g9iy7Vm_IG8PWAbhY,2323 +pip/_vendor/html5lib/treewalkers/_base.py,sha256=hnL6zMgGJoGqEJYKVKveDmfpz1d2xriyuuau6479xq4,6919 +pip/_vendor/html5lib/treewalkers/dom.py,sha256=mAg05wBWN2k-CGPoo0KNxa55QAlHciNccp8AezCa8j8,1457 +pip/_vendor/html5lib/treewalkers/etree.py,sha256=waFU6dxcV5y4SEMyxZpQ9M4I5pKpMmCtUSN1GbuCVcE,4625 +pip/_vendor/html5lib/treewalkers/genshistream.py,sha256=IbBFrlgi-59-K7P1zm0d7ZFIknBN4c5E57PHJDkx39s,2278 +pip/_vendor/html5lib/treewalkers/lxmletree.py,sha256=QgzI9386-3J10AYfekBI_oudHdjtAp48ay5cnHnvtiU,6045 +pip/_vendor/html5lib/treewalkers/pulldom.py,sha256=9W6i8yWtUzayV6EwX-okVacttHaqpQZwdBCc2S3XeQ4,2302 +pip/_vendor/html5lib/trie/__init__.py,sha256=mec5zyJ5wIKRM8819gIcIsYQwncg91rEmPwGH1dG3Ho,212 +pip/_vendor/html5lib/trie/_base.py,sha256=WGY8SGptFmx4O0aKLJ54zrIQOoyuvhS0ngA36vAcIcc,927 +pip/_vendor/html5lib/trie/datrie.py,sha256=EQpqSfkZRuTbE-DuhW7xMdVDxdZNZ0CfmnYfHA_3zxM,1178 +pip/_vendor/html5lib/trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775 +pip/_vendor/lockfile/__init__.py,sha256=tRsliCGxIGRfQ_OsAEoA70UatRbdIiVh0OZrYL_xIn0,9162 +pip/_vendor/lockfile/linklockfile.py,sha256=pLUQvtn6BLNfzEy5Vd_SH86mx5hO3XpVDu1xAdSeRaw,2649 +pip/_vendor/lockfile/mkdirlockfile.py,sha256=D5msAvl2kXtvGh5dhHf39eah-7KiZRsAPArRfWBAYm8,3098 +pip/_vendor/lockfile/pidlockfile.py,sha256=frwO5ouxdegU7mgFS-lOvUKIbq8STLc41hHWDiG9yAk,6221 +pip/_vendor/lockfile/sqlitelockfile.py,sha256=DXtqjbp4qFzbaAP6r8AqiCH7WBwl3NzfLwM1srC1ObM,5540 +pip/_vendor/lockfile/symlinklockfile.py,sha256=O1l2YuZs1RQuWGHRx4dyQ2Jkb4NSzKAxipS3vN5Chtw,2613 +pip/_vendor/packaging/__about__.py,sha256=vIsQ1bnL6mbXZnr-_gy4LK48jaD3hQ6h5LhTLbCwuz8,1073 +pip/_vendor/packaging/__init__.py,sha256=2V8n-eEpSgBuXlV8hlMmhU7ZklpsrrusWMZNp2gC4Hs,906 +pip/_vendor/packaging/_compat.py,sha256=wofog8iYo_zudt_10i6JiXKHDs5GhCuXC09hCuSJiv4,1253 +pip/_vendor/packaging/_structures.py,sha256=93YvgrEE2HgFp8AdXy0pwCRVnZeutRHO_-puJ7T0cPw,1809 +pip/_vendor/packaging/specifiers.py,sha256=W_1Az1rlSYpIlFdqIDiP_R1EW3mwGRiUOOx_COR-Xuc,28472 +pip/_vendor/packaging/version.py,sha256=2xtvpViNLomKRAGUTTZ1NwqP0AWrwljQ741QpzsN4BE,11884 +pip/_vendor/pkg_resources/__init__.py,sha256=5zDEKPt1GUmIWf4anHaE5RNZ_I3hTGnIWZY8hcr7My4,105455 +pip/_vendor/pkg_resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/pkg_resources/tests/test_pkg_resources.py,sha256=VGaiaK5p1jAujOcP7OdQZGxL4tRidJmi1pLhYLfBgp4,3447 +pip/_vendor/pkg_resources/tests/test_resources.py,sha256=JenKqZJwx1mOFM3niA77vl3w3zVT1BNU5LHDjM8b35I,23622 +pip/_vendor/progress/__init__.py,sha256=Wn1074LUDZovd4zfoVYojnPBgOc6ctHbQX7rp_p8lRA,3023 +pip/_vendor/progress/bar.py,sha256=bH72DehBggOGvZsv4839W2p-981_AOSC8e7ZihxQ3gU,2707 +pip/_vendor/progress/counter.py,sha256=UZcnk68HkBOCRjz25W8MmHY-ev2BGn-sYWsR-iSfQds,1529 +pip/_vendor/progress/helpers.py,sha256=aPoo84DShYXFe2Aqwm9Wj-gt3Lj9BJY9-bwC4C2c2II,2894 +pip/_vendor/progress/spinner.py,sha256=LRVxxpM1-EZ5JJe-yYriiHWPTYZnaYUxsbxyFWQVOQc,1341 +pip/_vendor/requests/__init__.py,sha256=HWlvRSJamDpgUFHV3t3OguaChkn5CMgc1ozndj7Uaww,1861 +pip/_vendor/requests/adapters.py,sha256=MEedP-slXqxDnYdi4JxuCE3y2NTizShvL_SfP3ncge4,16810 +pip/_vendor/requests/api.py,sha256=QLZr0hZtx3QCYAhBIyHy0jWiu9QKEljBybpw8cW4AL4,5280 +pip/_vendor/requests/auth.py,sha256=zNVV74e6xVWCC4ys7UCKFNwwFC0J2MPtXad8mCRQ_Ws,6710 +pip/_vendor/requests/cacert.pem,sha256=ak7q_q8ozHdQ9ff27U-E1vCNrLisFRQSMy9zJkdpQlM,308434 +pip/_vendor/requests/certs.py,sha256=RX5H1cSiB52Hbjh_qv3eMW8hqHEF_r4Qiv_4AwfziuU,613 +pip/_vendor/requests/compat.py,sha256=hq7CKHoykNs8yzKPAJiOkHQJPoNp9A89MufTdhlCniY,1469 +pip/_vendor/requests/cookies.py,sha256=YEoM1iUt2WsyILQOwtDqRglQcG-noqZv9971q3KBKJA,16791 +pip/_vendor/requests/exceptions.py,sha256=zZhHieXgR1teqbvuo_9OrwDMHnrvRtulW97VfzumQv4,2517 +pip/_vendor/requests/hooks.py,sha256=9vNiuiRHRd5Qy6BX_0p1H3NsUzDo1M_HaFR2AFL41Tg,820 +pip/_vendor/requests/models.py,sha256=5GsW4kWNJ0OVDGpK5ioTHDnFaDxDszGhz_jvejvISno,28156 +pip/_vendor/requests/sessions.py,sha256=sxqjvm8g5CwrwHITppmANplIu4JgYcfrKyLZ2Nk3YS4,24476 +pip/_vendor/requests/status_codes.py,sha256=DVA33t4UthIiZhP4iYSChbWjuhrJWvVA04qle9nwj2Q,3200 +pip/_vendor/requests/structures.py,sha256=i3yMaaDbl4_gNJKdcK3kDmeSLoo0r59XEIWoc_qtNyo,2977 +pip/_vendor/requests/utils.py,sha256=yh5am9D9nG_oAYyYU2upkXIth2QOi1LJhiHSx5dSVbw,21334 +pip/_vendor/requests/packages/__init__.py,sha256=6s3_webnUa-3HljuMVfRU0vbKpUIIbrlYvWFbmGAAtI,4222 +pip/_vendor/requests/packages/chardet/__init__.py,sha256=XuTKCYOR7JwsoHxqZTYH86LVyMDbDI3s1s0W_qoGEBM,1295 +pip/_vendor/requests/packages/chardet/big5freq.py,sha256=D8oTdz-GM7Jg8TsaWJDm65vM_OLHC3xub6qUJ3rOgsQ,82594 +pip/_vendor/requests/packages/chardet/big5prober.py,sha256=XX96C--6WKYW36mL-z7pJSAtc169Z8ZImByCP4pEN9A,1684 +pip/_vendor/requests/packages/chardet/chardetect.py,sha256=f4299UZG6uWd3i3r_N0OdrFj2sA9JFI54PAmDLAFmWA,2504 +pip/_vendor/requests/packages/chardet/chardistribution.py,sha256=cUARQFr1oTLXeJCDQrDRkUP778AvSMzhSCnG8VLCV58,9226 +pip/_vendor/requests/packages/chardet/charsetgroupprober.py,sha256=0lKk7VE516fgMw119tNefFqLOxKfIE9WfdkpIT69OKU,3791 +pip/_vendor/requests/packages/chardet/charsetprober.py,sha256=Z48o2KiOj23FNqYH8FqzhH5m1qdm3rI8DcTm2Yqtklg,1902 +pip/_vendor/requests/packages/chardet/codingstatemachine.py,sha256=E85rYhHVMw9xDEJVgiQhp0OnLGr6i2r8_7QOWMKTH08,2318 +pip/_vendor/requests/packages/chardet/compat.py,sha256=5mm6yrHwef1JEG5OxkPJlSq5lkjLVpEGh3iPgFBkpkM,1157 +pip/_vendor/requests/packages/chardet/constants.py,sha256=-UnY8U7EP7z9fTyd09yq35BEkSFEAUAiv9ohd1DW1s4,1335 +pip/_vendor/requests/packages/chardet/cp949prober.py,sha256=FMvdLyB7fejPXRsTbca7LK1P3RUvvssmjUNyaEfz8zY,1782 +pip/_vendor/requests/packages/chardet/escprober.py,sha256=q5TcQKeVq31WxrW7Sv8yjpZkjEoaHO8S92EJZ9hodys,3187 +pip/_vendor/requests/packages/chardet/escsm.py,sha256=7iljEKN8lXTh8JFXPUSwlibMno6R6ksq4evLxbkzfro,7839 +pip/_vendor/requests/packages/chardet/eucjpprober.py,sha256=5IpfSEjAb7h3hcGMd6dkU80O900C2N6xku28rdYFKuc,3678 +pip/_vendor/requests/packages/chardet/euckrfreq.py,sha256=T5saK5mImySG5ygQPtsp6o2uKulouCwYm2ElOyFkJqU,45978 +pip/_vendor/requests/packages/chardet/euckrprober.py,sha256=Wo7dnZ5Erw_nB4H-m5alMiOxOuJUmGHlwCSaGqExDZA,1675 +pip/_vendor/requests/packages/chardet/euctwfreq.py,sha256=G_I0BW9i1w0ONeeUwIYqV7_U09buIHdqh-wNHVaql7I,34872 +pip/_vendor/requests/packages/chardet/euctwprober.py,sha256=upS2P6GuT5ujOxXYw-RJLcT7A4PTuo27KGUKU4UZpIQ,1676 +pip/_vendor/requests/packages/chardet/gb2312freq.py,sha256=M2gFdo_qQ_BslStEchrPW5CrPEZEacC0uyDLw4ok-kY,36011 +pip/_vendor/requests/packages/chardet/gb2312prober.py,sha256=VWnjoRa83Y6V6oczMaxyUr0uy48iCnC2nzk9zfEIRHc,1681 +pip/_vendor/requests/packages/chardet/hebrewprober.py,sha256=8pdoUfsVXf_L4BnJde_BewS6H2yInV5688eu0nFhLHY,13359 +pip/_vendor/requests/packages/chardet/jisfreq.py,sha256=ZcL4R5ekHHbP2KCYGakVMBsiKqZZZAABzhwi-uRkOps,47315 +pip/_vendor/requests/packages/chardet/jpcntx.py,sha256=yftmp0QaF6RJO5SJs8I7LU5AF4rwP23ebeCQL4BM1OY,19348 +pip/_vendor/requests/packages/chardet/langbulgarianmodel.py,sha256=ZyPsA796MSVhYdfWhMCgKWckupAKAnKqWcE3Cl3ej6o,12784 +pip/_vendor/requests/packages/chardet/langcyrillicmodel.py,sha256=fkcd5OvogUp-GrNDWAZPgkYsSRCD2omotAEvqjlmLKE,17725 +pip/_vendor/requests/packages/chardet/langgreekmodel.py,sha256=QHMy31CH_ot67UCtmurCEKqKx2WwoaKrw2YCYYBK2Lw,12628 +pip/_vendor/requests/packages/chardet/langhebrewmodel.py,sha256=4ASl5vzKJPng4H278VHKtRYC03TpQpenlHTcsmZH1rE,11318 +pip/_vendor/requests/packages/chardet/langhungarianmodel.py,sha256=SXwuUzh49_cBeMXhshRHdrhlkz0T8_pZWV_pdqBKNFk,12536 +pip/_vendor/requests/packages/chardet/langthaimodel.py,sha256=-k7djh3dGKngAGnt3WfuoJN7acDcWcmHAPojhaUd7q4,11275 +pip/_vendor/requests/packages/chardet/latin1prober.py,sha256=238JHOxH8aRudJY2NmeSv5s7i0Qe3GuklIU3HlYybvg,5232 +pip/_vendor/requests/packages/chardet/mbcharsetprober.py,sha256=9rOCjDVsmSMp6e7q2syqak22j7lrbUZhJhMee2gbVL0,3268 +pip/_vendor/requests/packages/chardet/mbcsgroupprober.py,sha256=SHRzNPLpDXfMJLA8phCHVU0WgqbgDCNxDQMolGX_7yk,1967 +pip/_vendor/requests/packages/chardet/mbcssm.py,sha256=IKwJXyxu34n6NojmxVxC60MLFtJKm-hIfxaFEnb3uBA,19590 +pip/_vendor/requests/packages/chardet/sbcharsetprober.py,sha256=Xq0lODqJnDgxglBiQI4BqTFiPbn63-0a5XNA5-hVu7U,4793 +pip/_vendor/requests/packages/chardet/sbcsgroupprober.py,sha256=8hLyH8RAG-aohBo7o_KciWVgRo42ZE_zEtuNG1JMRYI,3291 +pip/_vendor/requests/packages/chardet/sjisprober.py,sha256=UYOmiMDzttYIkSDoOB08UEagivJpUXz4tuWiWzTiOr8,3764 +pip/_vendor/requests/packages/chardet/universaldetector.py,sha256=h-E2x6XSCzlNjycYWG0Fe4Cf1SGdaIzUNu2HCphpMZA,6840 +pip/_vendor/requests/packages/chardet/utf8prober.py,sha256=7tdNZGrJY7jZUBD483GGMkiP0Tx8Fp-cGvWHoAsilHg,2652 +pip/_vendor/requests/packages/urllib3/__init__.py,sha256=X8vJKJcD6NUYW1OOe8MsFsjnyS7rP6YT6zcGNnzWY20,1864 +pip/_vendor/requests/packages/urllib3/_collections.py,sha256=C-sEjwS8M5QMVEPBpjHHva2IHuzo0Cx-RXOtvGaJdaM,10473 +pip/_vendor/requests/packages/urllib3/connection.py,sha256=YPH2Nf6rsBsEY8EVCmoe1shLL_mxlWYYCBBTEm8NyUQ,8967 +pip/_vendor/requests/packages/urllib3/connectionpool.py,sha256=xdQV8Fi9Z5HPplfBNJloYKFcs_jmsb9XWlgFrsVWpq0,30371 +pip/_vendor/requests/packages/urllib3/exceptions.py,sha256=JHtHDX-pFMXzm8Jc6Wjbv_KPn3dUHVy7dpPshzo15TQ,4244 +pip/_vendor/requests/packages/urllib3/fields.py,sha256=06XgBjTvEyVYUWA-j_6zhnfXMpd-IxZdzT85ppAFfYg,5833 +pip/_vendor/requests/packages/urllib3/filepost.py,sha256=TEpQ_PMO0loPQERLr4E7VcgbMfhNwOCxt8cudhrpkM0,2281 +pip/_vendor/requests/packages/urllib3/poolmanager.py,sha256=Qw1UStRXPh6RH5BOT7x4NY7Gqkho2njYEDiqk9_8728,9406 +pip/_vendor/requests/packages/urllib3/request.py,sha256=NjnLVqcKZVotmPV335m87AqMFBSH0V_ml2tOGxKSKRI,5751 +pip/_vendor/requests/packages/urllib3/response.py,sha256=OoQvkqAhYOxF5JQ4EjUQKHflfMRRWIAWk2dZDePZfYE,12240 +pip/_vendor/requests/packages/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py,sha256=F29BjpIMId2u9Bwmy0bmg8eDYKvQZiXLsZeK2cDNctQ,4507 +pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py,sha256=0YpFuH3T_qAg5oHjVf3eyt_DJgLbwpcrG0Pm-EL5Lrw,10101 +pip/_vendor/requests/packages/urllib3/packages/__init__.py,sha256=EKCTAOjZtPR_HC50e7X8hS5j4bkFkN87XZOT-Wdpfus,74 +pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py,sha256=VQaPONfhVMsb8B63Xg7ZOydJqIE_jzeMhVN3Pec6ogw,8935 +pip/_vendor/requests/packages/urllib3/packages/six.py,sha256=U-rO-WBrFS8PxHeamSl6okKCjqPF18NhiZb0qPZ67XM,11628 +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py,sha256=cOWMIn1orgJoA35p6pSzO_-Dc6iOX9Dhl6D2sL9b_2o,460 +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=fK28k37hL7-D79v9iM2fHgNK9Q1Pw0M7qVRL4rkfFjQ,3778 +pip/_vendor/requests/packages/urllib3/util/__init__.py,sha256=zrB1BFTNOUWxgEVvZlicK8uEU2AVpT1TFmWo2gQGfDA,486 +pip/_vendor/requests/packages/urllib3/util/connection.py,sha256=PTxckPfstrFVAPAfYn12kaZYEfoQn-CDCo2VrIBPtpo,3293 +pip/_vendor/requests/packages/urllib3/util/request.py,sha256=zY2x5tBXzvgLWgF3XRk_CEk-X8Q8L9bqpESqWn13I_0,2089 +pip/_vendor/requests/packages/urllib3/util/response.py,sha256=QMrOy69WPkoe42EU0Y5jwRNqBf-w1FF8GJWAx1jQDmY,566 +pip/_vendor/requests/packages/urllib3/util/retry.py,sha256=bcRb3QC1LDMHLx8gBDDGFqZ3dKs6bYcpfWZJ3FpOMtE,9924 +pip/_vendor/requests/packages/urllib3/util/ssl_.py,sha256=W5OR1gHHhIZoYhsGXnykFQsnCI-Ny6IS4FqjrPbOb0c,9311 +pip/_vendor/requests/packages/urllib3/util/timeout.py,sha256=2MqJVD_v_0tLxgm2Mr_ePqYmfnB5zjZXphlIexWocKM,9544 +pip/_vendor/requests/packages/urllib3/util/url.py,sha256=HdF7JrcAev4bbdbBRUftRQtkIqtEBCl0qvF2Xujf860,5760 +pip/commands/__init__.py,sha256=s5rokw10J4WLGzYJUhqUEHjpPt_G1mhdfw4noExg_7w,2195 +pip/commands/completion.py,sha256=7JkLif3DF0QGpjMaUjHvF8knJ3IumcED2gWRvMRpFy0,1991 +pip/commands/freeze.py,sha256=46mP6QqxgHdvDjDM-TmomPjW3ySAKsKfbwr__I2quFw,2118 +pip/commands/help.py,sha256=84HWkEdnGP_AEBHnn8gJP2Te0XTXRKFoXqXopbOZTNo,982 +pip/commands/install.py,sha256=a2dSwmiFFXnO52lE2t7B_qAC6-wjDdBKjOIZS6A1oj8,16307 +pip/commands/list.py,sha256=IMsAH2rBI-xqT_IbDqFoxnr-5FioJDrKK5hGlgXxjxA,7405 +pip/commands/search.py,sha256=zOrBophzW_s_gHMFAqibWz-jlLu-lPh08NIbR-8fDOU,4604 +pip/commands/show.py,sha256=5B7ky8SBNLy5mFMyXf2ethe6zQQ_Plr2h8GDWNaeEk8,4974 +pip/commands/uninstall.py,sha256=rgeIiLH5Bg3p1p9GNEGolQJE-EhpwEFeKU7mUKqEXI0,2564 +pip/commands/unzip.py,sha256=iP8RW_SATGxj8IQA9pPUM3ii1m8ae-7z7fyrEl9SdhE,225 +pip/commands/wheel.py,sha256=8nxYL9AdyejmVRcgargpLcVwYVJoaEIAPMwfGli0Z-Y,9119 +pip/commands/zip.py,sha256=0PKb436Kr2TrlkMrWWKKc3tu6Jw3MF2aGcdiofMmQ-E,15782 +pip/compat/__init__.py,sha256=7HTDCYo0pcll7_bZoYOW1aqAaf0VUP2XzGjtFIjOVKA,2996 +pip/compat/dictconfig.py,sha256=dRrelPDWrceDSzFT51RTEVY2GuM7UDyc5Igh_tn4Fvk,23096 +pip/models/__init__.py,sha256=0Rs7_RA4DxeOkWT5Cq4CQzDrSEhvYcN3TH2cazr72PE,71 +pip/models/index.py,sha256=pUfbO__v3mD9j-2n_ClwPS8pVyx4l2wIwyvWt8GMCRA,487 +pip/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/operations/freeze.py,sha256=WkelKsKLqAIfZ2nUTqggKDf7idnf1dLQu6fXTkX4wVY,3860 +pip/req/__init__.py,sha256=zmAaaOcZWUXG1VYPLEcNgZZhQPCZ3dS4Ou1N3UDQ-js,264 +pip/req/req_file.py,sha256=-ZH6UIJsO1rJdmCzSPbiH8-jGkC-O3wCUKZL2VVkjKQ,5363 +pip/req/req_install.py,sha256=7ZxwXRlXWZQYu0BOE00NBQmiJkDtSUbNDkZgvZayEGE,44179 +pip/req/req_requirement.py,sha256=LXuEviOaQT8FzUUevghUQNFp8OjGW2yTmZfSzIQkhdk,1245 +pip/req/req_set.py,sha256=jHtrVnvZyva0fBounKQMKIjn4DDNgDh7qxqpHhmjop0,29902 +pip/req/req_uninstall.py,sha256=yVI_Fotd3w2SUMNItHafDYwAzwHxcR5mZ0XzlwIHjaM,7131 +pip/utils/__init__.py,sha256=JGW6dM5PuvDVVdK40nGgayR0I8wEfQIUtK4HAHx5nWE,26547 +pip/utils/appdirs.py,sha256=DsdEjHfkFGNJ7NmDR_SWQ0PzOXZdyqDzHVN673mLq9o,9173 +pip/utils/build.py,sha256=4smLRrfSCmXmjEnVnMFh2tBEpNcSLRe6J0ejZJ-wWJE,1312 +pip/utils/deprecation.py,sha256=n_VgWyN8510R4ci6mIKxlhFF6f1beZehTULXrxuV7cQ,2145 +pip/utils/filesystem.py,sha256=ZEVBuYM3fqr2_lgOESh4Y7fPFszGD474zVm_M3Mb5Tk,899 +pip/utils/logging.py,sha256=EzVKjvmCKiJxhxtC2gkDTfm6bXfZjPL3NL06fCD43Ss,3359 +pip/utils/outdated.py,sha256=7CgpBdoUpLzmBrAOvQe_gF0_pV4unGLq6SVhfEhU9-k,4954 +pip/utils/ui.py,sha256=gu7HI4bZ0EeloxhA-CL2ngIVXaN7_-T1erwNOR8M-sM,6774 +pip/vcs/__init__.py,sha256=bcsRXqhJ3qI7a-bDPh2crMmo4LAF4rPZxoPt3l1OKX0,11882 +pip/vcs/bazaar.py,sha256=u3YdoeNX4_OrkwfnSK4OhfiVQriFN41EVExwto6mSbY,4455 +pip/vcs/git.py,sha256=Qd7Sc_i7d8Eg6kDIYjPyXMTLcQ9TGD9IVPJ7NIXtQfA,7636 +pip/vcs/mercurial.py,sha256=dkZ6nt6cPFHYpD-7wluDma_MZF1hZ7qa-uuekwqgCiw,5018 +pip/vcs/subversion.py,sha256=vJMOr68-yHL4km7J7P8qfIxmC_02rtQmRL8QVXAcxCI,10496 +pip-6.1.1.dist-info/DESCRIPTION.rst,sha256=Z62giArgiLxa37MRrqxY-VabG5II4wM-5sqWzgsulr0,760 +pip-6.1.1.dist-info/entry_points.txt,sha256=1-e4WB_Fe8mWHrMi1YQo_s5knbh0lu_uRmd8Wb6MJfY,68 +pip-6.1.1.dist-info/METADATA,sha256=AcRUifLlcW0pXWVAcdj92MYpBi1LgieXQ2RbmhgdTwY,1912 +pip-6.1.1.dist-info/metadata.json,sha256=u55k_yVzyEj4oX4ajOrmYfvy2YmcG4sj3M7pgfk_fy0,1491 +pip-6.1.1.dist-info/pbr.json,sha256=Yhto9tGnB6EhlCdT04FToTZ4YCsuDORR3bHudkqJfqo,46 +pip-6.1.1.dist-info/RECORD,, +pip-6.1.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-6.1.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +C:\Users\Zachary\Documents\TTU\src\panda\python\Scripts\pip.exe,sha256=nwsPyB5NF5HcZJn4QZ5gyqyXCim9e4vCvJuISgY-S9k,92016 +C:\Users\Zachary\Documents\TTU\src\panda\python\Scripts\pip2.exe,sha256=nwsPyB5NF5HcZJn4QZ5gyqyXCim9e4vCvJuISgY-S9k,92016 +C:\Users\Zachary\Documents\TTU\src\panda\python\Scripts\pip2.7.exe,sha256=nwsPyB5NF5HcZJn4QZ5gyqyXCim9e4vCvJuISgY-S9k,92016 +pip/_vendor/requests/compat.pyc,, +pip/_vendor/cachecontrol/cache.pyc,, +pip/_vendor/requests/certs.pyc,, +pip/_vendor/distlib/util.pyc,, +pip/_vendor/progress/spinner.pyc,, +pip/__init__.pyc,, +pip/_vendor/html5lib/treewalkers/genshistream.pyc,, +pip/_vendor/requests/packages/chardet/latin1prober.pyc,, +pip/_vendor/distlib/_backport/__init__.pyc,, +pip/operations/__init__.pyc,, +pip/_vendor/distlib/metadata.pyc,, +pip/compat/__init__.pyc,, +pip/_vendor/html5lib/treebuilders/__init__.pyc,, +pip/compat/dictconfig.pyc,, +pip/_vendor/html5lib/filters/alphabeticalattributes.pyc,, +pip/_vendor/requests/packages/urllib3/request.pyc,, +pip/_vendor/requests/packages/chardet/escprober.pyc,, +pip/_vendor/requests/adapters.pyc,, +pip/_vendor/requests/packages/chardet/langbulgarianmodel.pyc,, +pip/commands/completion.pyc,, +pip/_vendor/html5lib/html5parser.pyc,, +pip/_vendor/html5lib/filters/lint.pyc,, +pip/pep425tags.pyc,, +pip/_vendor/packaging/__init__.pyc,, +pip/_vendor/lockfile/linklockfile.pyc,, +pip/_vendor/requests/packages/chardet/__init__.pyc,, +pip/_vendor/html5lib/filters/whitespace.pyc,, +pip/_vendor/requests/packages/chardet/euckrprober.pyc,, +pip/_vendor/html5lib/treebuilders/etree_lxml.pyc,, +pip/_vendor/distlib/database.pyc,, +pip/_vendor/html5lib/treeadapters/sax.pyc,, +pip/_vendor/requests/packages/chardet/euckrfreq.pyc,, +pip/commands/wheel.pyc,, +pip/_vendor/requests/auth.pyc,, +pip/_vendor/html5lib/treewalkers/pulldom.pyc,, +pip/_vendor/packaging/__about__.pyc,, +pip/_vendor/progress/__init__.pyc,, +pip/_vendor/html5lib/ihatexml.pyc,, +pip/_vendor/html5lib/treewalkers/_base.pyc,, +pip/commands/list.pyc,, +pip/_vendor/distlib/scripts.pyc,, +pip/_vendor/html5lib/filters/sanitizer.pyc,, +pip/vcs/git.pyc,, +pip/cmdoptions.pyc,, +pip/_vendor/lockfile/symlinklockfile.pyc,, +pip/_vendor/requests/packages/urllib3/util/ssl_.pyc,, +pip/_vendor/requests/packages/urllib3/poolmanager.pyc,, +pip/_vendor/html5lib/inputstream.pyc,, +pip/req/__init__.pyc,, +pip/_vendor/html5lib/sanitizer.pyc,, +pip/_vendor/colorama/win32.pyc,, +pip/req/req_set.pyc,, +pip/_vendor/distlib/resources.pyc,, +pip/_vendor/requests/packages/chardet/hebrewprober.pyc,, +pip/_vendor/requests/cookies.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.pyc,, +pip/_vendor/lockfile/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/packages/six.pyc,, +pip/_vendor/progress/helpers.pyc,, +pip/_vendor/html5lib/trie/__init__.pyc,, +pip/vcs/bazaar.pyc,, +pip/_vendor/html5lib/__init__.pyc,, +pip/_vendor/requests/packages/chardet/charsetgroupprober.pyc,, +pip/_vendor/cachecontrol/serialize.pyc,, +pip/_vendor/requests/packages/urllib3/packages/__init__.pyc,, +pip/_vendor/distlib/_backport/misc.pyc,, +pip/_vendor/requests/packages/chardet/codingstatemachine.pyc,, +pip/_vendor/requests/packages/urllib3/util/url.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.pyc,, +pip/_vendor/cachecontrol/controller.pyc,, +pip/utils/deprecation.pyc,, +pip/_vendor/distlib/_backport/sysconfig.pyc,, +pip/_vendor/requests/packages/chardet/langgreekmodel.pyc,, +pip/_vendor/requests/packages/__init__.pyc,, +pip/commands/show.pyc,, +pip/_vendor/requests/packages/chardet/langcyrillicmodel.pyc,, +pip/_vendor/_markerlib/__init__.pyc,, +pip/_vendor/requests/packages/chardet/jpcntx.pyc,, +pip/_vendor/requests/packages/chardet/constants.pyc,, +pip/_vendor/html5lib/treebuilders/_base.pyc,, +pip/commands/zip.pyc,, +pip/commands/help.pyc,, +pip/_vendor/requests/utils.pyc,, +pip/_vendor/colorama/__init__.pyc,, +pip/_vendor/packaging/_compat.pyc,, +pip/_vendor/distlib/version.pyc,, +pip/utils/ui.pyc,, +pip/_vendor/requests/packages/urllib3/exceptions.pyc,, +pip/commands/uninstall.pyc,, +pip/_vendor/distlib/index.pyc,, +pip/_vendor/cachecontrol/heuristics.pyc,, +pip/commands/freeze.pyc,, +pip/_vendor/html5lib/treewalkers/__init__.pyc,, +pip/_vendor/requests/packages/chardet/big5prober.pyc,, +pip/_vendor/requests/packages/chardet/langthaimodel.pyc,, +pip/_vendor/requests/sessions.pyc,, +pip/_vendor/distlib/_backport/tarfile.pyc,, +pip/_vendor/requests/packages/urllib3/util/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/util/response.pyc,, +pip/_vendor/html5lib/treebuilders/dom.pyc,, +pip/_vendor/html5lib/filters/__init__.pyc,, +pip/baseparser.pyc,, +pip/_vendor/requests/models.pyc,, +pip/status_codes.pyc,, +pip/_vendor/distlib/__init__.pyc,, +pip/_vendor/pkg_resources/__init__.pyc,, +pip/_vendor/pkg_resources/tests/test_resources.pyc,, +pip/commands/search.pyc,, +pip/_vendor/requests/packages/chardet/langhungarianmodel.pyc,, +pip/_vendor/html5lib/utils.pyc,, +pip/_vendor/html5lib/trie/datrie.pyc,, +pip/_vendor/requests/structures.pyc,, +pip/_vendor/packaging/version.pyc,, +pip/_vendor/cachecontrol/adapter.pyc,, +pip/_vendor/requests/packages/urllib3/fields.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.pyc,, +pip/vcs/__init__.pyc,, +pip/req/req_file.pyc,, +pip/models/__init__.pyc,, +pip/_vendor/html5lib/trie/py.pyc,, +pip/_vendor/packaging/specifiers.pyc,, +pip/_vendor/requests/packages/chardet/gb2312freq.pyc,, +pip/commands/unzip.pyc,, +pip/commands/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/__init__.pyc,, +pip/_vendor/distlib/compat.pyc,, +pip/models/index.pyc,, +pip/_vendor/lockfile/mkdirlockfile.pyc,, +pip/_vendor/distlib/wheel.pyc,, +pip/_vendor/requests/packages/chardet/euctwprober.pyc,, +pip/utils/build.pyc,, +pip/_vendor/requests/packages/chardet/escsm.pyc,, +pip/_vendor/requests/status_codes.pyc,, +pip/_vendor/requests/exceptions.pyc,, +pip/_vendor/distlib/markers.pyc,, +pip/index.pyc,, +pip/utils/logging.pyc,, +pip/utils/outdated.pyc,, +pip/_vendor/requests/api.pyc,, +pip/_vendor/requests/packages/urllib3/filepost.pyc,, +pip/_vendor/requests/packages/chardet/big5freq.pyc,, +pip/_vendor/html5lib/treebuilders/etree.pyc,, +pip/_vendor/requests/packages/chardet/cp949prober.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.pyc,, +pip/_vendor/requests/packages/chardet/mbcsgroupprober.pyc,, +pip/_vendor/requests/packages/chardet/mbcssm.pyc,, +pip/_vendor/distlib/_backport/shutil.pyc,, +pip/__main__.pyc,, +pip/_vendor/cachecontrol/compat.pyc,, +pip/_vendor/html5lib/tokenizer.pyc,, +pip/download.pyc,, +pip/operations/freeze.pyc,, +pip/_vendor/lockfile/sqlitelockfile.pyc,, +pip/_vendor/requests/packages/chardet/utf8prober.pyc,, +pip/_vendor/requests/packages/chardet/langhebrewmodel.pyc,, +pip/_vendor/requests/packages/chardet/compat.pyc,, +pip/_vendor/progress/counter.pyc,, +pip/utils/filesystem.pyc,, +pip/_vendor/six.pyc,, +pip/_vendor/colorama/initialise.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ordered_dict.pyc,, +pip/_vendor/requests/packages/chardet/eucjpprober.pyc,, +pip/_vendor/retrying.pyc,, +pip/_vendor/html5lib/treewalkers/etree.pyc,, +pip/_vendor/cachecontrol/wrapper.pyc,, +pip/_vendor/requests/hooks.pyc,, +pip/_vendor/packaging/_structures.pyc,, +pip/_vendor/requests/packages/urllib3/connectionpool.pyc,, +pip/_vendor/requests/packages/chardet/mbcharsetprober.pyc,, +pip/locations.pyc,, +pip/_vendor/requests/packages/chardet/jisfreq.pyc,, +pip/_vendor/requests/packages/chardet/sbcsgroupprober.pyc,, +pip/vcs/subversion.pyc,, +pip/_vendor/requests/packages/urllib3/util/connection.pyc,, +pip/exceptions.pyc,, +pip/basecommand.pyc,, +pip/_vendor/distlib/locators.pyc,, +pip/_vendor/html5lib/filters/_base.pyc,, +pip/_vendor/re-vendor.pyc,, +pip/_vendor/html5lib/treewalkers/dom.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/__init__.pyc,, +pip/_vendor/requests/packages/chardet/euctwfreq.pyc,, +pip/_vendor/requests/packages/chardet/chardistribution.pyc,, +pip/_vendor/cachecontrol/caches/__init__.pyc,, +pip/_vendor/pkg_resources/tests/test_pkg_resources.pyc,, +pip/req/req_uninstall.pyc,, +pip/req/req_requirement.pyc,, +pip/_vendor/requests/packages/chardet/sbcharsetprober.pyc,, +pip/_vendor/colorama/ansitowin32.pyc,, +pip/_vendor/cachecontrol/filewrapper.pyc,, +pip/_vendor/requests/packages/chardet/sjisprober.pyc,, +pip/_vendor/requests/packages/urllib3/util/timeout.pyc,, +pip/_vendor/_markerlib/markers.pyc,, +pip/utils/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/_collections.pyc,, +pip/_vendor/requests/packages/urllib3/util/request.pyc,, +pip/_vendor/ipaddress.pyc,, +pip/_vendor/distlib/manifest.pyc,, +pip/_vendor/html5lib/serializer/htmlserializer.pyc,, +pip/_vendor/html5lib/serializer/__init__.pyc,, +pip/_vendor/html5lib/trie/_base.pyc,, +pip/_vendor/requests/packages/urllib3/response.pyc,, +pip/req/req_install.pyc,, +pip/_vendor/html5lib/treeadapters/__init__.pyc,, +pip/_vendor/cachecontrol/caches/redis_cache.pyc,, +pip/_vendor/html5lib/filters/inject_meta_charset.pyc,, +pip/_vendor/requests/packages/chardet/charsetprober.pyc,, +pip/_vendor/requests/packages/urllib3/util/retry.pyc,, +pip/vcs/mercurial.pyc,, +pip/_vendor/cachecontrol/__init__.pyc,, +pip/_vendor/__init__.pyc,, +pip/_vendor/requests/packages/chardet/universaldetector.pyc,, +pip/_vendor/html5lib/constants.pyc,, +pip/_vendor/html5lib/treewalkers/lxmletree.pyc,, +pip/_vendor/colorama/ansi.pyc,, +pip/commands/install.pyc,, +pip/_vendor/requests/packages/chardet/gb2312prober.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.pyc,, +pip/_vendor/pkg_resources/tests/__init__.pyc,, +pip/_vendor/progress/bar.pyc,, +pip/wheel.pyc,, +pip/_vendor/lockfile/pidlockfile.pyc,, +pip/_vendor/requests/__init__.pyc,, +pip/_vendor/requests/packages/chardet/chardetect.pyc,, +pip/_vendor/html5lib/filters/optionaltags.pyc,, +pip/_vendor/requests/packages/urllib3/connection.pyc,, +pip/utils/appdirs.pyc,, +pip/_vendor/colorama/winterm.pyc,, diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/WHEEL b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/WHEEL new file mode 100644 index 00000000..9dff69d8 --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/entry_points.txt b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/entry_points.txt new file mode 100644 index 00000000..a237b5e4 --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pip = pip:main +pip3 = pip:main +pip3.4 = pip:main + diff --git a/panda/python/Lib/site-packages/pip-6.1.1.dist-info/top_level.txt b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/top_level.txt new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/panda/python/Lib/site-packages/pip-6.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/panda/python/Lib/site-packages/pip/__init__.py b/panda/python/Lib/site-packages/pip/__init__.py new file mode 100644 index 00000000..8a3a4c02 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/__init__.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import logging +import os +import optparse +import warnings + +import sys +import re + +from pip.exceptions import InstallationError, CommandError, PipError +from pip.utils import get_installed_distributions, get_prog +from pip.utils import deprecation +from pip.vcs import git, mercurial, subversion, bazaar # noqa +from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip.commands import get_summaries, get_similar_commands +from pip.commands import commands_dict +from pip._vendor.requests.packages.urllib3.exceptions import ( + InsecureRequestWarning, +) + + +# assignment for flake8 to be happy + +# This fixes a peculiarity when importing via __import__ - as we are +# initialising the pip module, "from pip import cmdoptions" is recursive +# and appears not to work properly in that situation. +import pip.cmdoptions +cmdoptions = pip.cmdoptions + +# The version as used in the setup.py and the docs conf.py +__version__ = "6.1.1" + + +logger = logging.getLogger(__name__) + +# Hide the InsecureRequestWArning from urllib3 +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +def autocomplete(): + """Command and option completion for the main option parser (and options) + and its subcommands (and options). + + Enable by sourcing one of the completion shell scripts (bash or zsh). + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for uninstall command + if subcommand_name == 'uninstall' and not current.startswith('-'): + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands_dict[subcommand_name]() + options += [(opt.get_opt_string(), opt.nargs) + for opt in subcommand.parser.option_list_all + if opt.help != optparse.SUPPRESS_HELP] + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1]: + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + if current.startswith('-') or current.startswith('--'): + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + + subcommands += [i.get_opt_string() for i in opts + if i.help != optparse.SUPPRESS_HELP] + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def create_main_parser(): + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + parser.disable_interspersed_args() + + pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + parser.version = 'pip %s from %s (python %s)' % ( + __version__, pip_pkg_dir, sys.version[:3]) + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + parser.main = True # so the help formatter knows + + # create command listing for description + command_summaries = get_summaries() + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + return parser + + +def parseopts(args): + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --version + if general_options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == 'help' and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args + + +def check_isolated(args): + isolated = False + + if "--isolated" in args: + isolated = True + + return isolated + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # Enable our Deprecation Warnings + for deprecation_warning in deprecation.DEPRECATIONS: + warnings.simplefilter("default", deprecation_warning) + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parseopts(args) + except PipError as exc: + sys.stderr.write("ERROR: %s" % exc) + sys.stderr.write(os.linesep) + sys.exit(1) + + command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) + return command.main(cmd_args) + + +# ########################################################### +# # Writing freeze files + +class FrozenRequirement(object): + + def __init__(self, name, req, editable, comments=()): + self.name = name + self.req = req + self.editable = editable + self.comments = comments + + _rev_re = re.compile(r'-r(\d+)$') + _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') + + @classmethod + def from_dist(cls, dist, dependency_links, find_tags=False): + location = os.path.normcase(os.path.abspath(dist.location)) + comments = [] + from pip.vcs import vcs, get_src_requirement + if vcs.get_backend_name(location): + editable = True + try: + req = get_src_requirement(dist, location, find_tags) + except InstallationError as exc: + logger.warning( + "Error when trying to get requirement for VCS system %s, " + "falling back to uneditable format", exc + ) + req = None + if req is None: + logger.warning( + 'Could not determine repository location of %s', location + ) + comments.append( + '## !! Could not determine repository location' + ) + req = dist.as_requirement() + editable = False + else: + editable = False + req = dist.as_requirement() + specs = req.specs + assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ + 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ + (specs, dist) + version = specs[0][1] + ver_match = cls._rev_re.search(version) + date_match = cls._date_re.search(version) + if ver_match or date_match: + svn_backend = vcs.get_backend('svn') + if svn_backend: + svn_location = svn_backend().get_location( + dist, + dependency_links, + ) + if not svn_location: + logger.warning( + 'Warning: cannot find svn location for %s', req) + comments.append( + '## FIXME: could not find svn URL in dependency_links ' + 'for this package:' + ) + else: + comments.append( + '# Installing as editable to satisfy requirement %s:' % + req + ) + if ver_match: + rev = ver_match.group(1) + else: + rev = '{%s}' % date_match.group(1) + editable = True + req = '%s@%s#egg=%s' % ( + svn_location, + rev, + cls.egg_name(dist) + ) + return cls(dist.project_name, req, editable, comments) + + @staticmethod + def egg_name(dist): + name = dist.egg_name() + match = re.search(r'-py\d\.\d$', name) + if match: + name = name[:match.start()] + return name + + def __str__(self): + req = self.req + if self.editable: + req = '-e %s' % req + return '\n'.join(list(self.comments) + [str(req)]) + '\n' + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/panda/python/Lib/site-packages/pip/__main__.py b/panda/python/Lib/site-packages/pip/__main__.py new file mode 100644 index 00000000..5556539c --- /dev/null +++ b/panda/python/Lib/site-packages/pip/__main__.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import + +import os +import sys + +# If we are running from a wheel, add the wheel to sys.path +# This allows the usage python pip-*.whl/pip install pip-*.whl +if __package__ == '': + # __file__ is pip-*.whl/pip/__main__.py + # first dirname call strips of '/__main__.py', second strips off '/pip' + # Resulting path is the name of the wheel itself + # Add that to sys.path so we can import pip + path = os.path.dirname(os.path.dirname(__file__)) + sys.path.insert(0, path) + +import pip # noqa + +if __name__ == '__main__': + sys.exit(pip.main()) diff --git a/panda/python/Lib/site-packages/pip/_vendor/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/__init__.py new file mode 100644 index 00000000..ef0a45d0 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/__init__.py @@ -0,0 +1,108 @@ +""" +pip._vendor is for vendoring dependencies of pip to prevent needing pip to +depend on something external. + +Files inside of pip._vendor should be considered immutable and should only be +updated to versions from upstream. +""" +from __future__ import absolute_import + +import glob +import os.path +import sys + + +# By default, look in this directory for a bunch of .whl files which we will +# add to the beginning of sys.path before attempting to import anything. This +# is done to support downstream re-distributors like Debian and Fedora who +# wish to create their own Wheels for our dependencies to aid in debundling. +WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) + +# Actually look inside of WHEEL_DIR to find .whl files and add them to the +# front of our sys.path. +sys.path = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path + + +class VendorAlias(object): + + def __init__(self, package_names): + self._package_names = package_names + self._vendor_name = __name__ + self._vendor_pkg = self._vendor_name + "." + self._vendor_pkgs = [ + self._vendor_pkg + name for name in self._package_names + ] + + def find_module(self, fullname, path=None): + if fullname.startswith(self._vendor_pkg): + return self + + def load_module(self, name): + # Ensure that this only works for the vendored name + if not name.startswith(self._vendor_pkg): + raise ImportError( + "Cannot import %s, must be a subpackage of '%s'." % ( + name, self._vendor_name, + ) + ) + if not (name == self._vendor_name or + any(name.startswith(pkg) for pkg in self._vendor_pkgs)): + raise ImportError( + "Cannot import %s, must be one of %s." % ( + name, self._vendor_pkgs + ) + ) + + # Check to see if we already have this item in sys.modules, if we do + # then simply return that. + if name in sys.modules: + return sys.modules[name] + + # Check to see if we can import the vendor name + try: + # We do this dance here because we want to try and import this + # module without hitting a recursion error because of a bunch of + # VendorAlias instances on sys.meta_path + real_meta_path = sys.meta_path[:] + try: + sys.meta_path = [ + m for m in sys.meta_path + if not isinstance(m, VendorAlias) + ] + __import__(name) + module = sys.modules[name] + finally: + # Re-add any additions to sys.meta_path that were made while + # during the import we just did, otherwise things like + # pip._vendor.six.moves will fail. + for m in sys.meta_path: + if m not in real_meta_path: + real_meta_path.append(m) + + # Restore sys.meta_path with any new items. + sys.meta_path = real_meta_path + except ImportError: + # We can't import the vendor name, so we'll try to import the + # "real" name. + real_name = name[len(self._vendor_pkg):] + try: + __import__(real_name) + module = sys.modules[real_name] + except ImportError: + raise ImportError("No module named '%s'" % (name,)) + + # If we've gotten here we've found the module we're looking for, either + # as part of our vendored package, or as the real name, so we'll add + # it to sys.modules as the vendored name so that we don't have to do + # the lookup again. + sys.modules[name] = module + + # Finally, return the loaded module + return module + + +sys.meta_path.append(VendorAlias([ + "_markerlib", "cachecontrol", "certifi", "colorama", "distlib", "html5lib", + "ipaddress", "lockfile", "packaging", "pkg_resources", "progress", + "requests", "retrying", "six", +])) diff --git a/panda/python/Lib/site-packages/pip/_vendor/_markerlib/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/_markerlib/__init__.py new file mode 100644 index 00000000..197781a0 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/_markerlib/__init__.py @@ -0,0 +1,16 @@ +try: + import ast + from pip._vendor._markerlib.markers import default_environment, compile, interpret +except ImportError: + if 'ast' in globals(): + raise + def default_environment(): + return {} + def compile(marker): + def marker_fn(environment=None, override=None): + # 'empty markers are True' heuristic won't install extra deps. + return not marker.strip() + marker_fn.__doc__ = marker + return marker_fn + def interpret(marker, environment=None, override=None): + return compile(marker)() diff --git a/panda/python/Lib/site-packages/pip/_vendor/_markerlib/markers.py b/panda/python/Lib/site-packages/pip/_vendor/_markerlib/markers.py new file mode 100644 index 00000000..fa837061 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/_markerlib/markers.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +"""Interpret PEP 345 environment markers. + +EXPR [in|==|!=|not in] EXPR [or|and] ... + +where EXPR belongs to any of those: + + python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1]) + python_full_version = sys.version.split()[0] + os.name = os.name + sys.platform = sys.platform + platform.version = platform.version() + platform.machine = platform.machine() + platform.python_implementation = platform.python_implementation() + a free string, like '2.6', or 'win32' +""" + +__all__ = ['default_environment', 'compile', 'interpret'] + +import ast +import os +import platform +import sys +import weakref + +_builtin_compile = compile + +try: + from platform import python_implementation +except ImportError: + if os.name == "java": + # Jython 2.5 has ast module, but not platform.python_implementation() function. + def python_implementation(): + return "Jython" + else: + raise + + +# restricted set of variables +_VARS = {'sys.platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # FIXME parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os.name': os.name, + 'platform.version': platform.version(), + 'platform.machine': platform.machine(), + 'platform.python_implementation': python_implementation(), + 'extra': None # wheel extension + } + +for var in list(_VARS.keys()): + if '.' in var: + _VARS[var.replace('.', '_')] = _VARS[var] + +def default_environment(): + """Return copy of default PEP 385 globals dictionary.""" + return dict(_VARS) + +class ASTWhitelist(ast.NodeTransformer): + def __init__(self, statement): + self.statement = statement # for error messages + + ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str) + # Bool operations + ALLOWED += (ast.And, ast.Or) + # Comparison operations + ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn) + + def visit(self, node): + """Ensure statement only contains allowed nodes.""" + if not isinstance(node, self.ALLOWED): + raise SyntaxError('Not allowed in environment markers.\n%s\n%s' % + (self.statement, + (' ' * node.col_offset) + '^')) + return ast.NodeTransformer.visit(self, node) + + def visit_Attribute(self, node): + """Flatten one level of attribute access.""" + new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) + return ast.copy_location(new_node, node) + +def parse_marker(marker): + tree = ast.parse(marker, mode='eval') + new_tree = ASTWhitelist(marker).generic_visit(tree) + return new_tree + +def compile_marker(parsed_marker): + return _builtin_compile(parsed_marker, '', 'eval', + dont_inherit=True) + +_cache = weakref.WeakValueDictionary() + +def compile(marker): + """Return compiled marker as a function accepting an environment dict.""" + try: + return _cache[marker] + except KeyError: + pass + if not marker.strip(): + def marker_fn(environment=None, override=None): + """""" + return True + else: + compiled_marker = compile_marker(parse_marker(marker)) + def marker_fn(environment=None, override=None): + """override updates environment""" + if override is None: + override = {} + if environment is None: + environment = default_environment() + environment.update(override) + return eval(compiled_marker, environment) + marker_fn.__doc__ = marker + _cache[marker] = marker_fn + return _cache[marker] + +def interpret(marker, environment=None): + return compile(marker)(environment) diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py new file mode 100644 index 00000000..fae051a0 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py @@ -0,0 +1,11 @@ +"""CacheControl import Interface. + +Make it easy to import from cachecontrol without long namespaces. +""" +__author__ = 'Eric Larson' +__email__ = 'eric@ionrock.org' +__version__ = '0.11.2' + +from .wrapper import CacheControl +from .adapter import CacheControlAdapter +from .controller import CacheController diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py new file mode 100644 index 00000000..74589e00 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py @@ -0,0 +1,117 @@ +import functools + +from pip._vendor.requests.adapters import HTTPAdapter + +from .controller import CacheController +from .cache import DictCache +from .filewrapper import CallbackFileWrapper + + +class CacheControlAdapter(HTTPAdapter): + invalidating_methods = set(['PUT', 'DELETE']) + + def __init__(self, cache=None, + cache_etags=True, + controller_class=None, + serializer=None, + heuristic=None, + *args, **kw): + super(CacheControlAdapter, self).__init__(*args, **kw) + self.cache = cache or DictCache() + self.heuristic = heuristic + + controller_factory = controller_class or CacheController + self.controller = controller_factory( + self.cache, + cache_etags=cache_etags, + serializer=serializer, + ) + + def send(self, request, **kw): + """ + Send a request. Use the request information to see if it + exists in the cache and cache the response if we need to and can. + """ + if request.method == 'GET': + cached_response = self.controller.cached_request(request) + if cached_response: + return self.build_response(request, cached_response, + from_cache=True) + + # check for etags and add headers if appropriate + request.headers.update( + self.controller.conditional_headers(request) + ) + + resp = super(CacheControlAdapter, self).send(request, **kw) + + return resp + + def build_response(self, request, response, from_cache=False): + """ + Build a response by making a request or using the cache. + + This will end up calling send and returning a potentially + cached response + """ + if not from_cache and request.method == 'GET': + + # apply any expiration heuristics + if response.status == 304: + # We must have sent an ETag request. This could mean + # that we've been expired already or that we simply + # have an etag. In either case, we want to try and + # update the cache if that is the case. + cached_response = self.controller.update_cached_response( + request, response + ) + + if cached_response is not response: + from_cache = True + + # We are done with the server response, read a + # possible response body (compliant servers will + # not return one, but we cannot be 100% sure) and + # release the connection back to the pool. + response.read(decode_content=False) + response.release_conn() + + response = cached_response + + # We always cache the 301 responses + elif response.status == 301: + self.controller.cache_response(request, response) + else: + # Check for any heuristics that might update headers + # before trying to cache. + if self.heuristic: + response = self.heuristic.apply(response) + + # Wrap the response file with a wrapper that will cache the + # response when the stream has been consumed. + response._fp = CallbackFileWrapper( + response._fp, + functools.partial( + self.controller.cache_response, + request, + response, + ) + ) + + resp = super(CacheControlAdapter, self).build_response( + request, response + ) + + # See if we should invalidate the cache. + if request.method in self.invalidating_methods and resp.ok: + cache_url = self.controller.cache_url(request.url) + self.cache.delete(cache_url) + + # Give the request a from_cache attr to let people use it + resp.from_cache = from_cache + + return resp + + def close(self): + self.cache.close() + super(CacheControlAdapter, self).close() diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/cache.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/cache.py new file mode 100644 index 00000000..7389a73f --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/cache.py @@ -0,0 +1,39 @@ +""" +The cache object API for implementing caches. The default is a thread +safe in-memory dictionary. +""" +from threading import Lock + + +class BaseCache(object): + + def get(self, key): + raise NotImplemented() + + def set(self, key, value): + raise NotImplemented() + + def delete(self, key): + raise NotImplemented() + + def close(self): + pass + + +class DictCache(BaseCache): + + def __init__(self, init_dict=None): + self.lock = Lock() + self.data = init_dict or {} + + def get(self, key): + return self.data.get(key, None) + + def set(self, key, value): + with self.lock: + self.data.update({key: value}) + + def delete(self, key): + with self.lock: + if key in self.data: + self.data.pop(key) diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py new file mode 100644 index 00000000..f9e66a1f --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py @@ -0,0 +1,18 @@ +from textwrap import dedent + +try: + from .file_cache import FileCache +except ImportError: + notice = dedent(''' + NOTE: In order to use the FileCache you must have + lockfile installed. You can install it via pip: + pip install lockfile + ''') + print(notice) + + +try: + import redis + from .redis_cache import RedisCache +except ImportError: + pass diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py new file mode 100644 index 00000000..d1b879f8 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py @@ -0,0 +1,103 @@ +import hashlib +import os + +from pip._vendor.lockfile import FileLock + +from ..cache import BaseCache +from ..controller import CacheController + + +def _secure_open_write(filename, fmode): + # We only want to write to this file, so open it in write only mode + flags = os.O_WRONLY + + # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only + # will open *new* files. + # We specify this because we want to ensure that the mode we pass is the + # mode of the file. + flags |= os.O_CREAT | os.O_EXCL + + # Do not follow symlinks to prevent someone from making a symlink that + # we follow and insecurely open a cache file. + if hasattr(os, "O_NOFOLLOW"): + flags |= os.O_NOFOLLOW + + # On Windows we'll mark this file as binary + if hasattr(os, "O_BINARY"): + flags |= os.O_BINARY + + # Before we open our file, we want to delete any existing file that is + # there + try: + os.remove(filename) + except (IOError, OSError): + # The file must not exist already, so we can just skip ahead to opening + pass + + # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a + # race condition happens between the os.remove and this line, that an + # error will be raised. Because we utilize a lockfile this should only + # happen if someone is attempting to attack us. + fd = os.open(filename, flags, fmode) + try: + return os.fdopen(fd, "wb") + except: + # An error occurred wrapping our FD in a file object + os.close(fd) + raise + + +class FileCache(BaseCache): + def __init__(self, directory, forever=False, filemode=0o0600, + dirmode=0o0700): + self.directory = directory + self.forever = forever + self.filemode = filemode + self.dirmode = dirmode + + @staticmethod + def encode(x): + return hashlib.sha224(x.encode()).hexdigest() + + def _fn(self, name): + # NOTE: This method should not change as some may depend on it. + # See: https://github.com/ionrock/cachecontrol/issues/63 + hashed = self.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key): + name = self._fn(key) + if not os.path.exists(name): + return None + + with open(name, 'rb') as fh: + return fh.read() + + def set(self, key, value): + name = self._fn(key) + + # Make sure the directory exists + try: + os.makedirs(os.path.dirname(name), self.dirmode) + except (IOError, OSError): + pass + + with FileLock(name) as lock: + # Write our actual file + with _secure_open_write(lock.path, self.filemode) as fh: + fh.write(value) + + def delete(self, key): + name = self._fn(key) + if not self.forever: + os.remove(name) + + +def url_to_file_path(url, filecache): + """Return the file cache path based on the URL. + + This does not ensure the file exists! + """ + key = CacheController.cache_url(url) + return filecache._fn(key) diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py new file mode 100644 index 00000000..9f5d55fd --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py @@ -0,0 +1,41 @@ +from __future__ import division + +from datetime import datetime + + +def total_seconds(td): + """Python 2.6 compatability""" + if hasattr(td, 'total_seconds'): + return td.total_seconds() + + ms = td.microseconds + secs = (td.seconds + td.days * 24 * 3600) + return (ms + secs * 10**6) / 10**6 + + +class RedisCache(object): + + def __init__(self, conn): + self.conn = conn + + def get(self, key): + return self.conn.get(key) + + def set(self, key, value, expires=None): + if not expires: + self.conn.set(key, value) + else: + expires = expires - datetime.now() + self.conn.setex(key, total_seconds(expires), value) + + def delete(self, key): + self.conn.delete(key) + + def clear(self): + """Helper for clearing all the keys in a database. Use with + caution!""" + for key in self.conn.keys(): + self.conn.delete(key) + + def close(self): + self.conn.disconnect() diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/compat.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/compat.py new file mode 100644 index 00000000..9878becf --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/compat.py @@ -0,0 +1,14 @@ +try: + from urllib.parse import urljoin +except ImportError: + from urlparse import urljoin + + +try: + import cPickle as pickle +except ImportError: + import pickle + + +from pip._vendor.requests.packages.urllib3.response import HTTPResponse +from pip._vendor.requests.packages.urllib3.util import is_fp_closed diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/controller.py new file mode 100644 index 00000000..f489b98c --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/controller.py @@ -0,0 +1,299 @@ +""" +The httplib2 algorithms ported for use with requests. +""" +import re +import calendar +import time +from email.utils import parsedate_tz + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .cache import DictCache +from .serialize import Serializer + + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +class CacheController(object): + """An interface to see if request should cached or not. + """ + def __init__(self, cache=None, cache_etags=True, serializer=None): + self.cache = cache or DictCache() + self.cache_etags = cache_etags + self.serializer = serializer or Serializer() + + @classmethod + def _urlnorm(cls, uri): + """Normalize the URL to create a safe key for the cache""" + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise Exception("Only absolute URIs are allowed. uri = %s" % uri) + + scheme = scheme.lower() + authority = authority.lower() + + if not path: + path = "/" + + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + defrag_uri = scheme + "://" + authority + request_uri + + return defrag_uri + + @classmethod + def cache_url(cls, uri): + return cls._urlnorm(uri) + + def parse_cache_control(self, headers): + """ + Parse the cache control headers returning a dictionary with values + for the different directives. + """ + retval = {} + + cc_header = 'cache-control' + if 'Cache-Control' in headers: + cc_header = 'Cache-Control' + + if cc_header in headers: + parts = headers[cc_header].split(',') + parts_with_args = [ + tuple([x.strip().lower() for x in part.split("=", 1)]) + for part in parts if -1 != part.find("=") + ] + parts_wo_args = [ + (name.strip().lower(), 1) + for name in parts if -1 == name.find("=") + ] + retval = dict(parts_with_args + parts_wo_args) + return retval + + def cached_request(self, request): + """ + Return a cached response if it exists in the cache, otherwise + return False. + """ + cache_url = self.cache_url(request.url) + cc = self.parse_cache_control(request.headers) + + # non-caching states + no_cache = True if 'no-cache' in cc else False + if 'max-age' in cc and cc['max-age'] == 0: + no_cache = True + + # Bail out if no-cache was set + if no_cache: + return False + + # It is in the cache, so lets see if it is going to be + # fresh enough + resp = self.serializer.loads(request, self.cache.get(cache_url)) + + # Check to see if we have a cached object + if not resp: + return False + + # If we have a cached 301, return it immediately. We don't + # need to test our response for other headers b/c it is + # intrinsically "cacheable" as it is Permanent. + # See: + # https://tools.ietf.org/html/rfc7231#section-6.4.2 + # + # Client can try to refresh the value by repeating the request + # with cache busting headers as usual (ie no-cache). + if resp.status == 301: + return resp + + headers = CaseInsensitiveDict(resp.headers) + if not headers or 'date' not in headers: + # With date or etag, the cached response can never be used + # and should be deleted. + if 'etag' not in headers: + self.cache.delete(cache_url) + return False + + now = time.time() + date = calendar.timegm( + parsedate_tz(headers['date']) + ) + current_age = max(0, now - date) + + # TODO: There is an assumption that the result will be a + # urllib3 response object. This may not be best since we + # could probably avoid instantiating or constructing the + # response until we know we need it. + resp_cc = self.parse_cache_control(headers) + + # determine freshness + freshness_lifetime = 0 + + # Check the max-age pragma in the cache control header + if 'max-age' in resp_cc and resp_cc['max-age'].isdigit(): + freshness_lifetime = int(resp_cc['max-age']) + + # If there isn't a max-age, check for an expires header + elif 'expires' in headers: + expires = parsedate_tz(headers['expires']) + if expires is not None: + expire_time = calendar.timegm(expires) - date + freshness_lifetime = max(0, expire_time) + + # determine if we are setting freshness limit in the req + if 'max-age' in cc: + try: + freshness_lifetime = int(cc['max-age']) + except ValueError: + freshness_lifetime = 0 + + if 'min-fresh' in cc: + try: + min_fresh = int(cc['min-fresh']) + except ValueError: + min_fresh = 0 + # adjust our current age by our min fresh + current_age += min_fresh + + # see how fresh we actually are + fresh = (freshness_lifetime > current_age) + + if fresh: + return resp + + # we're not fresh. If we don't have an Etag, clear it out + if 'etag' not in headers: + self.cache.delete(cache_url) + + # return the original handler + return False + + def conditional_headers(self, request): + cache_url = self.cache_url(request.url) + resp = self.serializer.loads(request, self.cache.get(cache_url)) + new_headers = {} + + if resp: + headers = CaseInsensitiveDict(resp.headers) + + if 'etag' in headers: + new_headers['If-None-Match'] = headers['ETag'] + + if 'last-modified' in headers: + new_headers['If-Modified-Since'] = headers['Last-Modified'] + + return new_headers + + def cache_response(self, request, response, body=None): + """ + Algorithm for caching requests. + + This assumes a requests Response object. + """ + # From httplib2: Don't cache 206's since we aren't going to + # handle byte range requests + if response.status not in [200, 203, 300, 301]: + return + + response_headers = CaseInsensitiveDict(response.headers) + + cc_req = self.parse_cache_control(request.headers) + cc = self.parse_cache_control(response_headers) + + cache_url = self.cache_url(request.url) + + # Delete it from the cache if we happen to have it stored there + no_store = cc.get('no-store') or cc_req.get('no-store') + if no_store and self.cache.get(cache_url): + self.cache.delete(cache_url) + + # If we've been given an etag, then keep the response + if self.cache_etags and 'etag' in response_headers: + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + # Add to the cache any 301s. We do this before looking that + # the Date headers. + elif response.status == 301: + self.cache.set( + cache_url, + self.serializer.dumps(request, response) + ) + + # Add to the cache if the response headers demand it. If there + # is no date header then we can't do anything about expiring + # the cache. + elif 'date' in response_headers: + # cache when there is a max-age > 0 + if cc and cc.get('max-age'): + if int(cc['max-age']) > 0: + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + # If the request can expire, it means we should cache it + # in the meantime. + elif 'expires' in response_headers: + if response_headers['expires']: + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + def update_cached_response(self, request, response): + """On a 304 we will get a new set of headers that we want to + update our cached value with, assuming we have one. + + This should only ever be called when we've sent an ETag and + gotten a 304 as the response. + """ + cache_url = self.cache_url(request.url) + + cached_response = self.serializer.loads( + request, + self.cache.get(cache_url) + ) + + if not cached_response: + # we didn't have a cached response + return response + + # Lets update our headers with the headers from the new request: + # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 + # + # The server isn't supposed to send headers that would make + # the cached body invalid. But... just in case, we'll be sure + # to strip out ones we know that might be problmatic due to + # typical assumptions. + excluded_headers = [ + "content-length", + ] + + cached_response.headers.update( + dict((k, v) for k, v in response.headers.items() + if k.lower() not in excluded_headers) + ) + + # we want a 200 b/c we have content via the cache + cached_response.status = 200 + + # update our cache + self.cache.set( + cache_url, + self.serializer.dumps(request, cached_response), + ) + + return cached_response diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py new file mode 100644 index 00000000..4b91bce0 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py @@ -0,0 +1,63 @@ +from io import BytesIO + + +class CallbackFileWrapper(object): + """ + Small wrapper around a fp object which will tee everything read into a + buffer, and when that file is closed it will execute a callback with the + contents of that buffer. + + All attributes are proxied to the underlying file object. + + This class uses members with a double underscore (__) leading prefix so as + not to accidentally shadow an attribute. + """ + + def __init__(self, fp, callback): + self.__buf = BytesIO() + self.__fp = fp + self.__callback = callback + + def __getattr__(self, name): + # The vaguaries of garbage collection means that self.__fp is + # not always set. By using __getattribute__ and the private + # name[0] allows looking up the attribute value and raising an + # AttributeError when it doesn't exist. This stop thigns from + # infinitely recursing calls to getattr in the case where + # self.__fp hasn't been set. + # + # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers + fp = self.__getattribute__('_CallbackFileWrapper__fp') + return getattr(fp, name) + + def __is_fp_closed(self): + try: + return self.__fp.fp is None + except AttributeError: + pass + + try: + return self.__fp.closed + except AttributeError: + pass + + # We just don't cache it then. + # TODO: Add some logging here... + return False + + def read(self, amt=None): + data = self.__fp.read(amt) + self.__buf.write(data) + + if self.__is_fp_closed(): + if self.__callback: + self.__callback(self.__buf.getvalue()) + + # We assign this to None here, because otherwise we can get into + # really tricky problems where the CPython interpreter dead locks + # because the callback is holding a reference to something which + # has a __del__ method. Setting this to None breaks the cycle + # and allows the garbage collector to do it's thing normally. + self.__callback = None + + return data diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py new file mode 100644 index 00000000..01b63141 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py @@ -0,0 +1,134 @@ +import calendar +import time + +from email.utils import formatdate, parsedate, parsedate_tz + +from datetime import datetime, timedelta + +TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" + + +def expire_after(delta, date=None): + date = date or datetime.now() + return date + delta + + +def datetime_to_header(dt): + return formatdate(calendar.timegm(dt.timetuple())) + + +class BaseHeuristic(object): + + def warning(self, response): + """ + Return a valid 1xx warning header value describing the cache + adjustments. + + The response is provided too allow warnings like 113 + http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need + to explicitly say response is over 24 hours old. + """ + return '110 - "Response is Stale"' + + def update_headers(self, response): + """Update the response headers with any new headers. + + NOTE: This SHOULD always include some Warning header to + signify that the response was cached by the client, not + by way of the provided headers. + """ + return {} + + def apply(self, response): + warning_header_value = self.warning(response) + response.headers.update(self.update_headers(response)) + if warning_header_value is not None: + response.headers.update({'Warning': warning_header_value}) + return response + + +class OneDayCache(BaseHeuristic): + """ + Cache the response by providing an expires 1 day in the + future. + """ + def update_headers(self, response): + headers = {} + + if 'expires' not in response.headers: + date = parsedate(response.headers['date']) + expires = expire_after(timedelta(days=1), + date=datetime(*date[:6])) + headers['expires'] = datetime_to_header(expires) + headers['cache-control'] = 'public' + return headers + + +class ExpiresAfter(BaseHeuristic): + """ + Cache **all** requests for a defined time period. + """ + + def __init__(self, **kw): + self.delta = timedelta(**kw) + + def update_headers(self, response): + expires = expire_after(self.delta) + return { + 'expires': datetime_to_header(expires), + 'cache-control': 'public', + } + + def warning(self, response): + tmpl = '110 - Automatically cached for %s. Response might be stale' + return tmpl % self.delta + + +class LastModified(BaseHeuristic): + """ + If there is no Expires header already, fall back on Last-Modified + using the heuristic from + http://tools.ietf.org/html/rfc7234#section-4.2.2 + to calculate a reasonable value. + + Firefox also does something like this per + https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ + http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 + Unlike mozilla we limit this to 24-hr. + """ + cacheable_by_default_statuses = set([ + 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 + ]) + + def update_headers(self, resp): + headers = resp.headers + + if 'expires' in headers: + return {} + + if 'cache-control' in headers and headers['cache-control'] != 'public': + return {} + + if resp.status not in self.cacheable_by_default_statuses: + return {} + + if 'date' not in headers or 'last-modified' not in headers: + return {} + + date = calendar.timegm(parsedate_tz(headers['date'])) + last_modified = parsedate(headers['last-modified']) + if date is None or last_modified is None: + return {} + + now = time.time() + current_age = max(0, now - date) + delta = date - calendar.timegm(last_modified) + freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) + if freshness_lifetime <= current_age: + return {} + + expires = date + freshness_lifetime + return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))} + + def warning(self, resp): + return None diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py new file mode 100644 index 00000000..e803a880 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py @@ -0,0 +1,184 @@ +import base64 +import io +import json +import zlib + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .compat import HTTPResponse, pickle + + +def _b64_encode_bytes(b): + return base64.b64encode(b).decode("ascii") + + +def _b64_encode_str(s): + return _b64_encode_bytes(s.encode("utf8")) + + +def _b64_decode_bytes(b): + return base64.b64decode(b.encode("ascii")) + + +def _b64_decode_str(s): + return _b64_decode_bytes(s).decode("utf8") + + +class Serializer(object): + + def dumps(self, request, response, body=None): + response_headers = CaseInsensitiveDict(response.headers) + + if body is None: + body = response.read(decode_content=False) + + # NOTE: 99% sure this is dead code. I'm only leaving it + # here b/c I don't have a test yet to prove + # it. Basically, before using + # `cachecontrol.filewrapper.CallbackFileWrapper`, + # this made an effort to reset the file handle. The + # `CallbackFileWrapper` short circuits this code by + # setting the body as the content is consumed, the + # result being a `body` argument is *always* passed + # into cache_response, and in turn, + # `Serializer.dump`. + response._fp = io.BytesIO(body) + + data = { + "response": { + "body": _b64_encode_bytes(body), + "headers": dict( + (_b64_encode_str(k), _b64_encode_str(v)) + for k, v in response.headers.items() + ), + "status": response.status, + "version": response.version, + "reason": _b64_encode_str(response.reason), + "strict": response.strict, + "decode_content": response.decode_content, + }, + } + + # Construct our vary headers + data["vary"] = {} + if "vary" in response_headers: + varied_headers = response_headers['vary'].split(',') + for header in varied_headers: + header = header.strip() + data["vary"][header] = request.headers.get(header, None) + + # Encode our Vary headers to ensure they can be serialized as JSON + data["vary"] = dict( + (_b64_encode_str(k), _b64_encode_str(v) if v is not None else v) + for k, v in data["vary"].items() + ) + + return b",".join([ + b"cc=2", + zlib.compress( + json.dumps( + data, separators=(",", ":"), sort_keys=True, + ).encode("utf8"), + ), + ]) + + def loads(self, request, data): + # Short circuit if we've been given an empty set of data + if not data: + return + + # Determine what version of the serializer the data was serialized + # with + try: + ver, data = data.split(b",", 1) + except ValueError: + ver = b"cc=0" + + # Make sure that our "ver" is actually a version and isn't a false + # positive from a , being in the data stream. + if ver[:3] != b"cc=": + data = ver + data + ver = b"cc=0" + + # Get the version number out of the cc=N + ver = ver.split(b"=", 1)[-1].decode("ascii") + + # Dispatch to the actual load method for the given version + try: + return getattr(self, "_loads_v{0}".format(ver))(request, data) + except AttributeError: + # This is a version we don't have a loads function for, so we'll + # just treat it as a miss and return None + return + + def prepare_response(self, request, cached): + """Verify our vary headers match and construct a real urllib3 + HTTPResponse object. + """ + # Special case the '*' Vary value as it means we cannot actually + # determine if the cached response is suitable for this request. + if "*" in cached.get("vary", {}): + return + + # Ensure that the Vary headers for the cached response match our + # request + for header, value in cached.get("vary", {}).items(): + if request.headers.get(header, None) != value: + return + + body_raw = cached["response"].pop("body") + + try: + body = io.BytesIO(body_raw) + except TypeError: + # This can happen if cachecontrol serialized to v1 format (pickle) + # using Python 2. A Python 2 str(byte string) will be unpickled as + # a Python 3 str (unicode string), which will cause the above to + # fail with: + # + # TypeError: 'str' does not support the buffer interface + body = io.BytesIO(body_raw.encode('utf8')) + + return HTTPResponse( + body=body, + preload_content=False, + **cached["response"] + ) + + def _loads_v0(self, request, data): + # The original legacy cache data. This doesn't contain enough + # information to construct everything we need, so we'll treat this as + # a miss. + return + + def _loads_v1(self, request, data): + try: + cached = pickle.loads(data) + except ValueError: + return + + return self.prepare_response(request, cached) + + def _loads_v2(self, request, data): + try: + cached = json.loads(zlib.decompress(data).decode("utf8")) + except ValueError: + return + + # We need to decode the items that we've base64 encoded + cached["response"]["body"] = _b64_decode_bytes( + cached["response"]["body"] + ) + cached["response"]["headers"] = dict( + (_b64_decode_str(k), _b64_decode_str(v)) + for k, v in cached["response"]["headers"].items() + ) + cached["response"]["reason"] = _b64_decode_str( + cached["response"]["reason"], + ) + cached["vary"] = dict( + (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) + for k, v in cached["vary"].items() + ) + + return self.prepare_response(request, cached) diff --git a/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py new file mode 100644 index 00000000..ea421aa7 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py @@ -0,0 +1,21 @@ +from .adapter import CacheControlAdapter +from .cache import DictCache + + +def CacheControl(sess, + cache=None, + cache_etags=True, + serializer=None, + heuristic=None): + + cache = cache or DictCache() + adapter = CacheControlAdapter( + cache, + cache_etags=cache_etags, + serializer=serializer, + heuristic=heuristic, + ) + sess.mount('http://', adapter) + sess.mount('https://', adapter) + + return sess diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/__init__.py new file mode 100644 index 00000000..4af0c1e1 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.3.3' + diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/ansi.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/ansi.py new file mode 100644 index 00000000..1cc72250 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/ansi.py @@ -0,0 +1,99 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\007' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + + +class AnsiCodes(object): + def __init__(self, codes): + for name in dir(codes): + if not name.startswith('_'): + value = getattr(codes, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + "A" + def DOWN(self, n=1): + return CSI + str(n) + "B" + def FORWARD(self, n=1): + return CSI + str(n) + "C" + def BACK(self, n=1): + return CSI + str(n) + "D" + def POS(self, x=1, y=1): + return CSI + str(y) + ";" + str(x) + "H" + +def set_title(title): + return OSC + "2;" + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + "J" + +def clear_line(mode=2): + return CSI + str(mode) + "K" + + +class AnsiFore: + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack: + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle: + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiCodes( AnsiFore ) +Back = AnsiCodes( AnsiBack ) +Style = AnsiCodes( AnsiStyle ) +Cursor = AnsiCursor() diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py new file mode 100644 index 00000000..62e770c8 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py @@ -0,0 +1,228 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style +from .winterm import WinTerm, WinColor, WinStyle +from .win32 import windll + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +def is_a_tty(stream): + return hasattr(stream, 'isatty') and stream.isatty() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def write(self, text): + self.__convertor.write(text) + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\033\]((?:.|;)*?)(\x07)') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + on_emulated_windows = on_windows and 'TERM' in os.environ + + # should we strip ANSI sequences from our output? + if strip is None: + strip = on_windows and not on_emulated_windows + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = on_windows and not wrapped.closed and not on_emulated_windows and is_a_tty(wrapped) + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.wrapped.closed and is_a_tty(self.wrapped): + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command in '\x07': # \x07 = BEL + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/initialise.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/initialise.py new file mode 100644 index 00000000..7e27f84f --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/initialise.py @@ -0,0 +1,66 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import sys + +from .ansitowin32 import AnsiToWin32 + + +orig_stdout = sys.stdout +orig_stderr = sys.stderr + +wrapped_stdout = sys.stdout +wrapped_stderr = sys.stderr + +atexit_done = False + + +def reset_all(): + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/win32.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/win32.py new file mode 100644 index 00000000..c604f372 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/win32.py @@ -0,0 +1,146 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA + _SetConsoleTitleW.argtypes = [ + wintypes.LPCSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + handles = { + STDOUT: _GetStdHandle(STDOUT), + STDERR: _GetStdHandle(STDERR), + } + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = handles[stream_id] + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = handles[stream_id] + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = handles[stream_id] + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = handles[stream_id] + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = handles[stream_id] + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) diff --git a/panda/python/Lib/site-packages/pip/_vendor/colorama/winterm.py b/panda/python/Lib/site-packages/pip/_vendor/colorama/winterm.py new file mode 100644 index 00000000..fcc774ff --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/colorama/winterm.py @@ -0,0 +1,151 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from . import win32 + + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + + def get_attrs(self): + return self._fore + self._back * 16 + self._style + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + if light: + self._style |= WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + if light: + self._style |= WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + #I'm not currently tracking the position, so there is no default. + #position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + if mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + if mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/__init__.py new file mode 100644 index 00000000..4cb1c548 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2014 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.2.0' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + def createLock(self): self.lock = None + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py new file mode 100644 index 00000000..f7dbf4c9 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py @@ -0,0 +1,6 @@ +"""Modules copied from Python 3 standard libraries, for internal use only. + +Individual classes and functions are found in d2._backport.misc. Intended +usage is to always import things missing from 3.1 from that module: the +built-in/stdlib objects will be used if found. +""" diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py new file mode 100644 index 00000000..cfb318d3 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Backports for individual classes and functions.""" + +import os +import sys + +__all__ = ['cache_from_source', 'callable', 'fsencode'] + + +try: + from imp import cache_from_source +except ImportError: + def cache_from_source(py_file, debug=__debug__): + ext = debug and 'c' or 'o' + return py_file + ext + + +try: + callable = callable +except NameError: + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode +except AttributeError: + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(sys.getfilesystemencoding()) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py new file mode 100644 index 00000000..9e2e234d --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +from . import tarfile + +try: + import bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", "ignore_patterns"] + +class Error(EnvironmentError): + pass + +class SpecialFileError(EnvironmentError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(EnvironmentError): + """Raised when a command could not be executed""" + +class ReadError(EnvironmentError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registery operation with the archiving + and unpacking registeries fails""" + + +try: + WindowsError +except NameError: + WindowsError = None + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst): + """Copy data from src to dst""" + if _samefile(src, dst): + raise Error("`%s` and `%s` are the same file" % (src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + +def copymode(src, dst): + """Copy mode bits from src to dst""" + if hasattr(os, 'chmod'): + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + os.chmod(dst, mode) + +def copystat(src, dst): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + if hasattr(os, 'utime'): + os.utime(dst, (st.st_atime, st.st_mtime)) + if hasattr(os, 'chmod'): + os.chmod(dst, mode) + if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): + try: + os.chflags(dst, st.st_flags) + except OSError as why: + if (not hasattr(errno, 'EOPNOTSUPP') or + why.errno != errno.EOPNOTSUPP): + raise + +def copy(src, dst): + """Copy data and mode bits ("cp src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copymode(src, dst) + +def copy2(src, dst): + """Copy data and all stat info ("cp -p src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copystat(src, dst) + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + os.symlink(linkto, dstname) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + if WindowsError is not None and isinstance(why, WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.extend((src, dst, str(why))) + if errors: + raise Error(errors) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + return os.path.basename(path.rstrip(os.path.sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) + + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zip.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + zip.close() + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file"), + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not isinstance(function, collections.Callable): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not isinstance(function, collections.Callable): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registery.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg new file mode 100644 index 00000000..1746bd01 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg @@ -0,0 +1,84 @@ +[posix_prefix] +# Configuration directories. Some of these come straight out of the +# configure script. They are for implementing the other variables, not to +# be used directly in [resource_locations]. +confdir = /etc +datadir = /usr/share +libdir = /usr/lib +statedir = /var +# User resource directory +local = ~/.local/{distribution.name} + +stdlib = {base}/lib/python{py_version_short} +platstdlib = {platbase}/lib/python{py_version_short} +purelib = {base}/lib/python{py_version_short}/site-packages +platlib = {platbase}/lib/python{py_version_short}/site-packages +include = {base}/include/python{py_version_short}{abiflags} +platinclude = {platbase}/include/python{py_version_short}{abiflags} +data = {base} + +[posix_home] +stdlib = {base}/lib/python +platstdlib = {base}/lib/python +purelib = {base}/lib/python +platlib = {base}/lib/python +include = {base}/include/python +platinclude = {base}/include/python +scripts = {base}/bin +data = {base} + +[nt] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2_home] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[nt_user] +stdlib = {userbase}/Python{py_version_nodot} +platstdlib = {userbase}/Python{py_version_nodot} +purelib = {userbase}/Python{py_version_nodot}/site-packages +platlib = {userbase}/Python{py_version_nodot}/site-packages +include = {userbase}/Python{py_version_nodot}/Include +scripts = {userbase}/Scripts +data = {userbase} + +[posix_user] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[osx_framework_user] +stdlib = {userbase}/lib/python +platstdlib = {userbase}/lib/python +purelib = {userbase}/lib/python/site-packages +platlib = {userbase}/lib/python/site-packages +include = {userbase}/include +scripts = {userbase}/bin +data = {userbase} diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py new file mode 100644 index 00000000..1d313267 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py @@ -0,0 +1,788 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Access to Python's configuration information.""" + +import codecs +import os +import re +import sys +from os.path import pardir, realpath +try: + import configparser +except ImportError: + import ConfigParser as configparser + + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) + + +def is_python_build(): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +_cfg_read = False + +def _ensure_cfg_read(): + global _cfg_read + if not _cfg_read: + from ..resources import finder + backport_package = __name__.rsplit('.', 1)[0] + _finder = finder(backport_package) + _cfgfile = _finder.find('sysconfig.cfg') + assert _cfgfile, 'sysconfig.cfg exists' + with _cfgfile.as_stream() as s: + _SCHEMES.readfp(s) + if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + _SCHEMES.set(scheme, 'include', '{srcdir}/Include') + _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') + + _cfg_read = True + + +_SCHEMES = configparser.RawConfigParser() +_VAR_REPL = re.compile(r'\{([^{]*?)\}') + +def _expand_globals(config): + _ensure_cfg_read() + if config.has_section('globals'): + globals = config.items('globals') + else: + globals = tuple() + + sections = config.sections() + for section in sections: + if section == 'globals': + continue + for option, value in globals: + if config.has_option(section, option): + continue + config.set(section, option, value) + config.remove_section('globals') + + # now expanding local variables defined in the cfg file + # + for section in config.sections(): + variables = dict(config.items(section)) + + def _replacer(matchobj): + name = matchobj.group(1) + if name in variables: + return variables[name] + return matchobj.group(0) + + for option, value in config.items(section): + config.set(section, option, _VAR_REPL.sub(_replacer, value)) + +#_expand_globals(_SCHEMES) + + # FIXME don't rely on sys.version here, its format is an implementation detail + # of CPython, use sys.version_info or sys.hexversion +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = sys.version[:3] +_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PREFIX = os.path.normpath(sys.prefix) +_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +_CONFIG_VARS = None +_USER_BASE = None + + +def _subst_vars(path, local_vars): + """In the string `path`, replace tokens like {some.thing} with the + corresponding value from the map `local_vars`. + + If there is no corresponding value, leave the token unchanged. + """ + def _replacer(matchobj): + name = matchobj.group(1) + if name in local_vars: + return local_vars[name] + elif name in os.environ: + return os.environ[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, path) + + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + + for key, value in _SCHEMES.items(scheme): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def format_value(value, vars): + def _replacer(matchobj): + name = matchobj.group(1) + if name in vars: + return vars[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, value) + + +def _get_default_scheme(): + if os.name == 'posix': + # the default scheme for posix is posix_prefix + return 'posix_prefix' + return os.name + + +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # what about 'os2emx', 'riscos' ? + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + return env_base + else: + return joinuser(base, "Python") + + if sys.platform == "darwin": + framework = get_config_var("PYTHONFRAMEWORK") + if framework: + if env_base: + return env_base + else: + return joinuser("~", "Library", framework, "%d.%d" % + sys.version_info[:2]) + + if env_base: + return env_base + else: + return joinuser("~", ".local") + + +def _parse_makefile(filename, vars=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + # Regexes needed for parsing Makefile (and similar syntaxes, + # like old-style Setup files). + _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") + _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + + if vars is None: + vars = {} + done = {} + notdone = {} + + with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if (name.startswith('PY_') and + name[3:] in renamed_variables): + + name = name[3:] + if name not in done: + done[name] = value + + else: + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + done[name] = value + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + if hasattr(sys, 'abiflags'): + config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) + else: + config_dir_name = 'config' + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % makefile + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h) as f: + parse_config_h(f, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % config_h + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['LDSHARED'] = vars['BLDSHARED'] + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + vars['SO'] = '.pyd' + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, "PC") + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_SCHEMES.sections())) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + # xxx see if we want a static list + return _SCHEMES.options('posix_prefix') + + +def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + _ensure_cfg_read() + if expand: + return _expand_vars(scheme, vars) + else: + return dict(_SCHEMES.items(scheme)) + + +def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows and Mac OS it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS + if _CONFIG_VARS is None: + _CONFIG_VARS = {} + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # distutils2 module. + _CONFIG_VARS['prefix'] = _PREFIX + _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] + _CONFIG_VARS['base'] = _PREFIX + _CONFIG_VARS['platbase'] = _EXEC_PREFIX + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + try: + _CONFIG_VARS['abiflags'] = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + _CONFIG_VARS['abiflags'] = '' + + if os.name in ('nt', 'os2'): + _init_non_posix(_CONFIG_VARS) + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + if sys.version >= '2.6': + _CONFIG_VARS['userbase'] = _getuserbase() + + if 'srcdir' not in _CONFIG_VARS: + _CONFIG_VARS['srcdir'] = _PROJECT_BASE + else: + _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if _PYTHON_BUILD and os.name == "posix": + base = _PROJECT_BASE + try: + cwd = os.getcwd() + except OSError: + cwd = None + if (not os.path.isabs(_CONFIG_VARS['srcdir']) and + base != cwd): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) + _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) + + if sys.platform == 'darwin': + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _CONFIG_VARS[key] = flags + else: + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _CONFIG_VARS[key] = flags + + # If we're on OSX 10.5 or later and the user tries to + # compiles an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. + # + # The major usecase for this is users using a Python.org + # binary installer on OSX 10.6: that installer uses + # the 10.4u SDK, but that SDK is not installed by default + # when you install Xcode. + # + CFLAGS = _CONFIG_VARS.get('CFLAGS', '') + m = re.search('-isysroot\s+(\S+)', CFLAGS) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags) + _CONFIG_VARS[key] = flags + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name + and version and the architecture (as supplied by 'os.uname()'), + although the exact information included depends on the OS; eg. for IRIX + the architecture isn't particularly important (IRIX only runs on SGI + hardware), but for Linux the kernel version isn't particularly + important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + irix-5.3 + irix64-6.2 + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win-ia64 (64bit Windows on Itanium) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + # sniff sys.version for architecture. + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return sys.platform + j = sys.version.find(")", i) + look = sys.version[i+len(prefix):j].lower() + if look == 'amd64': + return 'win-amd64' + if look == 'itanium': + return 'win-ia64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters + # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # fall through to standard osname-release-machine representation + elif osname[:4] == "irix": # could be "irix64"! + return "%s-%s" % (osname, release) + elif osname[:3] == "aix": + return "%s-%s.%s" % (osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + cfgvars = get_config_vars() + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + + if True: + # Always calculate the release of the running machine, + # needed to determine if we can build fat binaries or not. + + macrelease = macver + # Get the system version. Reading this plist is a documented + # way to get the system version (see the documentation for + # the Gestalt Manager) + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + if not macver: + macver = macrelease + + if macver: + release = macver + osname = "macosx" + + if ((macrelease + '.') >= '10.4.' and + '-arch' in get_config_vars().get('CFLAGS', '').strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + # + # Try to detect 4-way universal builds, those have machine-type + # 'universal' instead of 'fat'. + + machine = 'fat' + cflags = get_config_vars().get('CFLAGS') + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return "%s-%s-%s" % (osname, release, machine) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print('%s: ' % (title)) + print('\t%s = "%s"' % (key, value)) + + +def _main(): + """Display all information sysconfig detains.""" + print('Platform: "%s"' % get_platform()) + print('Python version: "%s"' % get_python_version()) + print('Current installation scheme: "%s"' % _get_default_scheme()) + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + _main() diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py new file mode 100644 index 00000000..0580fb79 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py @@ -0,0 +1,2607 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadble tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/compat.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/compat.py new file mode 100644 index 00000000..433eb111 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/compat.py @@ -0,0 +1,1102 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2014 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +if sys.version_info[0] < 3: + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from ._backport import shutil + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + _userprog = None + def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +else: + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + +try: + from ssl import match_hostname, CertificateError +except ImportError: + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survery of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): + ZipFile = BaseZipFile +else: + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +try: + import sysconfig +except ImportError: # pragma: no cover + from ._backport import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + _fsencoding = sys.getfilesystemencoding() + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argment, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from imp import cache_from_source +except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/database.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/database.py new file mode 100644 index 00000000..6ed04a2f --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/database.py @@ -0,0 +1,1303 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2014 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import contextlib +import hashlib +import logging +import os +import posixpath +import sys +import zipimport + +from . import DistlibException, resources +from .compat import StringIO +from .version import get_scheme, UnsupportedVersionError +from .metadata import Metadata, METADATA_FILENAME +from .util import (parse_requirement, cached_property, parse_name_and_version, + read_exports, write_exports, CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +EXPORTS_FILENAME = 'pydist-exports.json' +COMMANDS_FILENAME = 'pydist-commands.json' + +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', + 'RESOURCES', EXPORTS_FILENAME, 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + # We need to check if we've seen some resources already, because on + # some Linux systems (e.g. some Debian/Ubuntu variants) there are + # symlinks which alias other files in the environment. + seen = set() + for path in self.path: + finder = resources.finder_for_path(path) + if finder is None: + continue + r = finder.find('') + if not r or not r.is_container: + continue + rset = sorted(r.resources) + for entry in rset: + r = finder.find(entry) + if not r or r.path in seen: + continue + if self._include_dist and entry.endswith(DISTINFO_EXT): + metadata_path = posixpath.join(entry, METADATA_FILENAME) + pydist = finder.find(metadata_path) + if not pydist: + continue + + metadata = Metadata(fileobj=pydist.as_stream(), + scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if not version is None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + provided = dist.provides + + for p in provided: + p_name, p_ver = parse_name_and_version(p) + if matcher is None: + if p_name == name: + yield dist + break + else: + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.digest = None + self.extras = None # additional features requested + self.context = None # environment marker overrides + self.download_urls = set() + self.digests = {} + + @property + def source_url(self): + """ + The source archive download URL for this distribution. + """ + return self.metadata.source_url + + download_url = source_url # Backward compatibility + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata.provides + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return plist + + def _get_requirements(self, req_attr): + reqts = getattr(self.metadata, req_attr) + return set(self.metadata.get_requirements(reqts, extras=self.extras, + env=self.context)) + + @property + def run_requires(self): + return self._get_requirements('run_requires') + + @property + def meta_requires(self): + return self._get_requirements('meta_requires') + + @property + def build_requires(self): + return self._get_requirements('build_requires') + + @property + def test_requires(self): + return self._get_requirements('test_requires') + + @property + def dev_requires(self): + return self._get_requirements('dev_requires') + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + # Requirement may contain extras - parse to lose those + # from what's passed to the matcher + r = parse_requirement(req) + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(r.requirement) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + for p in self.provides: + p_name, p_ver = parse_name_and_version(p) + if p_name != name: + continue + try: + result = matcher.match(p_ver) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.source_url: + suffix = ' [%s]' % self.source_url + else: + suffix = '' + return '' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and source_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.source_url == other.source_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.source_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + + +class InstalledDistribution(BaseInstalledDistribution): + """ + Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``pydist.json`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used). + """ + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + self.finder = finder = resources.finder_for_path(path) + if finder is None: + import pdb; pdb.set_trace () + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + r = finder.find(METADATA_FILENAME) + # Temporary - for legacy support + if r is None: + r = finder.find('METADATA') + if r is None: + raise ValueError('no %s found in %s' % (METADATA_FILENAME, + path)) + with contextlib.closing(r.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + try: + r = finder.find('REQUESTED') + except AttributeError: + import pdb; pdb.set_trace () + self.requested = r is not None + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + r = self.get_distinfo_resource('RECORD') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a dict + of :class:`ExportEntry` instances describing the individual + export entries, and keyed by name. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + result = self.read_exports() + return result + + def read_exports(self): + """ + Read exports data from a file in .ini format. + + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + with contextlib.closing(r.as_stream()) as stream: + result = read_exports(stream) + return result + + def write_exports(self, exports): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + """ + rf = self.get_distinfo_file(EXPORTS_FILENAME) + with open(rf, 'w') as f: + write_exports(exports, f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + r = self.get_distinfo_resource('RESOURCES') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = self.get_distinfo_file('RECORD') + logger.info('creating %s', record_path) + if dry_run: + return None + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + return record_path + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = self.get_distinfo_file('RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return None + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_resource(self, path): + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + finder = resources.finder_for_path(self.path) + if finder is None: + raise DistlibException('Unable to get a finder for %s' % self.path) + return finder.find(path) + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: str + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata.name, metadata.version) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata.name, metadata.version) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires_data(data): + """Create a list of dependencies from a requires.txt file. + + *data*: the contents of a setuptools-produced requires.txt file. + """ + reqs = [] + lines = data.splitlines() + for line in lines: + line = line.strip() + if line.startswith('['): + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + def parse_requires_path(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path*: the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with codecs.open(req_path, 'r', 'utf-8') as fp: + reqs = parse_requires_data(fp.read()) + except IOError: + pass + return reqs + + if path.endswith('.egg'): + if os.path.isdir(path): + meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(path, 'EGG-INFO', 'requires.txt') + requires = parse_requires_path(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + data = zipf.get_data('EGG-INFO/requires.txt') + requires = parse_requires_data(data.decode('utf-8')) + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires_path(req_path) + path = os.path.join(path, 'PKG-INFO') + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + metadata.add_requirements(requires) + return metadata + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, _, _ in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, absolute=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter absolute: If *absolute* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type absolute: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if absolute: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.run_requires | dist.meta_requires | + dist.build_requires | dist.dev_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = [] # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop()[0] + req.append(d) + for pred in graph.adjacency_list[d]: + if pred not in req: + todo.append(pred) + + return req + + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + summary = kwargs.pop('summary', 'Placeholder for summary') + md = Metadata(**kwargs) + md.name = name + md.version = version + md.summary = summary or 'Plaeholder for summary' + return Distribution(md) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/index.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/index.py new file mode 100644 index 00000000..73037c97 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/index.py @@ -0,0 +1,513 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import logging +import os +import shutil +import subprocess +import tempfile +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread + +from . import DistlibException +from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener, string_types) +from .util import cached_property, zip_dir, ServerProxy + +logger = logging.getLogger(__name__) + +DEFAULT_INDEX = 'https://pypi.python.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + """ + self.url = url or DEFAULT_INDEX + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + self.rpc_proxy = None + with open(os.devnull, 'w') as sink: + for s in ('gpg2', 'gpg'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from distutils.core import Distribution + from distutils.config import PyPIRCCommand + d = Distribution() + return PyPIRCCommand(d) + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils, getting + PyPI to do the acutal work. This populates ``username``, ``password``, + ``realm`` and ``url`` attributes from the configuration. + """ + # get distutils to do the work + c = self._get_pypirc_command() + c.repository = self.url + cfg = c._read_pypirc() + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + + Again, distutils is used to do the actual work. + """ + self.check_credentials() + # get distutils to do the work + c = self._get_pypirc_command() + c._store_pypirc(self.username, self.password) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + metadata.validate() + d = metadata.todict() + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password, + keystore=None): + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password, keystore=None): + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password, + keystore) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source', keystore=None): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + metadata.validate() + d = metadata.todict() + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password, + keystore) + with open(filename, 'rb') as f: + file_data = f.read() + md5_digest = hashlib.md5(file_data).hexdigest() + sha256_digest = hashlib.sha256(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protcol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': md5_digest, + 'sha256_digest': sha256_digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + metadata.validate() + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename, + keystore=None): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename, + keystore=None): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename, + keystore) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + logger.debug('No digest specified') + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + logger.debug('Digest specified: %s' % digest) + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('%s digest mismatch for %s: expected ' + '%s, got %s' % (hasher, destfile, + digest, actual)) + logger.debug('Digest verified: %s', digest) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + def search(self, terms, operator=None): + if isinstance(terms, string_types): + terms = {'name': terms} + if self.rpc_proxy is None: + self.rpc_proxy = ServerProxy(self.url, timeout=3.0) + return self.rpc_proxy.search(terms, operator or 'and') diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/locators.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/locators.py new file mode 100644 index 00000000..71f81a32 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/locators.py @@ -0,0 +1,1233 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2014 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +try: + import threading +except ImportError: + import dummy_threading as threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, string_types, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata +from .util import (cached_property, parse_credentials, ensure_slash, + split_filename, get_project_data, parse_requirement, + parse_name_and_version, ServerProxy) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'http://python.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + return client.list_packages() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + # If get_project() is called from locate(), the matcher instance + # is set from the requirement passed to locate(). See issue #18 for + # why this can be useful to know. + self.matcher = None + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + + If called from a locate() request, self.matcher will be set to a + matcher for the requirement to satisfy, otherwise it will be None. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + return (t.scheme != 'https', 'pypi.python.org' in t.netloc, + posixpath.basename(t.path)) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implement favours http:// URLs over https://, archives + from PyPI over those from other locations and then the archive name. + """ + result = url2 + if url1: + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + name1, name2 = name1.lower(), name2.lower() + if name1 == name2: + result = True + else: + # distribute replaces '-' by '_' in project names, so it + # can tell where the version starts in a filename. + result = name1.replace('_', '-') == name2.replace('_', '-') + return result + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + m = HASHER_HASH.match(frag) + if m: + algo, digest = m.groups() + else: + algo, digest = None, None + origpath = path + if path and path[-1] == '/': + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if is_compatible(wheel, self.wheel_tags): + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + except Exception as e: + logger.warning('invalid path for wheel: %s', path) + elif path.endswith(self.downloadable_extensions): + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: + result['python-version'] = pyver + break + if result and algo: + result['%s_digest' % algo] = digest + return result + + def _get_digest(self, info): + """ + Get a digest from a dictionary by looking at keys of the form + 'algo_digest'. + + Returns a 2-tuple (algo, digest) if found, else None. Currently + looks only for SHA256, then MD5. + """ + result = None + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a + dictionary for a specific version, which typically holds information + gleaned from a filename or URL for an archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.digest = digest = self._get_digest(info) + url = info['url'] + result['digests'][url] = digest + if md.source_url != info['url']: + md.source_url = self.prefer_url(md.source_url, url) + result['urls'].setdefault(version, set()).add(url) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + r = parse_requirement(requirement) + if r is None: + raise DistlibException('Not a valid requirement: %r' % requirement) + scheme = get_scheme(self.scheme) + self.matcher = matcher = scheme.matcher(r.requirement) + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(r.name) + if versions: + # sometimes, versions are invalid + slist = [] + vcls = matcher.version_class + for k in versions: + try: + if not matcher.match(k): + logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + else: + logger.debug('skipping pre-release ' + 'version %s of %s', k, matcher.name) + except Exception: + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + version = slist[-1] + result = versions[version] + if result: + if r.extras: + result.extras = r.extras + result.download_urls = versions.get('urls', {}).get(version, set()) + d = {} + sd = versions.get('digests', {}) + for url in result.download_urls: + if url in sd: + d[url] = sd[url] + result.digests = d + self.matcher = None + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore + cannot be used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.name = data['name'] + metadata.version = data['version'] + metadata.license = data.get('license') + metadata.keywords = data.get('keywords', []) + metadata.summary = data.get('summary') + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[v] = dist + for info in urls: + url = info['url'] + digest = self._get_digest(info) + result['urls'].setdefault(v, set()).add(url) + result['digests'][url] = digest + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + and probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + data = d['info'] + md.name = data['name'] + md.version = data['version'] + md.license = data.get('license') + md.keywords = data.get('keywords', []) + md.summary = data.get('summary') + dist = Distribution(md) + urls = d['urls'] + if urls: + info = urls[0] + md.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[md.version] = dist + for info in urls: + url = info['url'] + result['urls'].setdefault(md.version, set()).add(url) + result['digests'][url] = digest + except Exception as e: + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*))\s+)? +href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)) +(\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + # See issue #45: we need to be resilient when the locator is used + # in a thread, e.g. with concurrent.futures. We can't use self._lock + # as it is for coordinating our internal threads - the ones created + # in _prepare_threads. + self._gplock = threading.RLock() + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.setDaemon(True) + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + with self._gplock: + self.result = result + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' + r'win(32|-amd64)|macosx-?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile(']*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + # We don't store summary in project metadata as it makes + # the data bigger for no benefit during dependency + # resolution + dist = make_dist(data['name'], info['version'], + summary=data.get('summary', + 'Placeholder for summary'), + scheme=self.scheme) + md = dist.metadata + md.source_url = info['url'] + # TODO SHA256 digest + if 'digest' in info and info['digest']: + dist.digest = ('md5', info['digest']) + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + result['urls'].setdefault(dist.version, set()).add(info['url']) + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {} + else: + result = { + dist.version: dist, + 'urls': {dist.version: set([dist.source_url])} + } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + d = locator.get_project(name) + if d: + if self.merge: + files = result.get('urls', {}) + digests = result.get('digests', {}) + # next line could overwrite result['urls'], result['digests'] + result.update(d) + df = result.get('urls') + if files and df: + for k, v in files.items(): + if k in df: + df[k] |= v + else: + df[k] = v + dd = result.get('digests') + if digests and dd: + dd.update(digests) + else: + # See issue #18. If any dists are found and we're looking + # for specific constraints, we only return something if + # a match is found. For example, if a DirectoryLocator + # returns just foo (1.0) while we're looking for + # foo (>= 2.0), we'll pretend there was nothing there so + # that subsequent locators can be queried. Otherwise we + # would just return foo (1.0) which would then lead to a + # failure to find foo (>= 2.0), because other locators + # weren't searched. Note that this only matters when + # merge=False. + if self.matcher is None: + found = True + else: + found = False + for k in d: + if self.matcher.match(k): + found = True + break + if found: + result = d + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +# We use a legacy scheme simply because most of the dists on PyPI use legacy +# versions which don't conform to PEP 426 / PEP 440. +default_locator = AggregatingLocator( + JSONLocator(), + SimpleScrapingLocator('https://pypi.python.org/simple/', + timeout=3.0), + scheme='legacy') + +locate = default_locator.locate + +NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*' + r'\(\s*(==\s*)?(?P[^)]+)\)$') + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, + frozenset(unmatched))) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, meta_extras=None, prereleases=False): + """ + Find a distribution and all distributions it depends on. + + :param requirement: The requirement specifying the distribution to + find, or a Distribution instance. + :param meta_extras: A list of meta extras such as :test:, :build: and + so on. + :param prereleases: If ``True``, allow pre-release versions to be + returned - otherwise, don't return prereleases + unless they're all that's available. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + meta_extras = set(meta_extras or []) + if ':*:' in meta_extras: + meta_extras.remove(':*:') + # :meta: and :run: are implicitly included + meta_extras |= set([':test:', ':build:', ':dev:']) + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.run_requires | dist.meta_requires + sreqts = dist.build_requires + ereqts = set() + if dist in install_dists: + for key in ('test', 'build', 'dev'): + e = ':%s:' % key + if e in meta_extras: + ereqts |= getattr(dist, '%s_requires' % key) + all_reqts = ireqts | sreqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + # If no provider is found and we didn't consider + # prereleases, consider them now. + if provider is None and not prereleases: + provider = self.locator.locate(r, prereleases=True) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/manifest.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/manifest.py new file mode 100644 index 00000000..21cff45e --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/manifest.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + + +class Manifest(object): + """A list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=True) + #if not found: + # logger.warning('no previously-included files ' + # 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=False) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found anywhere in ' + # 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, prefix=thedir) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found under directory %r', + # pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: # pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ('include', 'exclude', + 'global-include', + 'global-exclude', + 'recursive-include', + 'recursive-exclude', + 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects ...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single ' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if pattern: + pattern_re = self._glob_to_re(pattern) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: # no prefix -- respect anchor flag + if anchor: + pattern_re = '^' + base + pattern_re + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((? y, + 'gte': lambda x, y: x >= y, + 'in': lambda x, y: x in y, + 'lt': lambda x, y: x < y, + 'lte': lambda x, y: x <= y, + 'not': lambda x: not x, + 'noteq': lambda x, y: x != y, + 'notin': lambda x, y: x not in y, + } + + allowed_values = { + 'sys_platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os_name': os.name, + 'platform_in_venv': str(in_venv()), + 'platform_release': platform.release(), + 'platform_version': platform.version(), + 'platform_machine': platform.machine(), + 'platform_python_implementation': python_implementation(), + } + + def __init__(self, context=None): + """ + Initialise an instance. + + :param context: If specified, names are looked up in this mapping. + """ + self.context = context or {} + self.source = None + + def get_fragment(self, offset): + """ + Get the part of the source which is causing a problem. + """ + fragment_len = 10 + s = '%r' % (self.source[offset:offset + fragment_len]) + if offset + fragment_len < len(self.source): + s += '...' + return s + + def get_handler(self, node_type): + """ + Get a handler for the specified AST node type. + """ + return getattr(self, 'do_%s' % node_type, None) + + def evaluate(self, node, filename=None): + """ + Evaluate a source string or node, using ``filename`` when + displaying errors. + """ + if isinstance(node, string_types): + self.source = node + kwargs = {'mode': 'eval'} + if filename: + kwargs['filename'] = filename + try: + node = ast.parse(node, **kwargs) + except SyntaxError as e: + s = self.get_fragment(e.offset) + raise SyntaxError('syntax error %s' % s) + node_type = node.__class__.__name__.lower() + handler = self.get_handler(node_type) + if handler is None: + if self.source is None: + s = '(source not available)' + else: + s = self.get_fragment(node.col_offset) + raise SyntaxError("don't know how to evaluate %r %s" % ( + node_type, s)) + return handler(node) + + def get_attr_key(self, node): + assert isinstance(node, ast.Attribute), 'attribute node expected' + return '%s.%s' % (node.value.id, node.attr) + + def do_attribute(self, node): + if not isinstance(node.value, ast.Name): + valid = False + else: + key = self.get_attr_key(node) + valid = key in self.context or key in self.allowed_values + if not valid: + raise SyntaxError('invalid expression: %s' % key) + if key in self.context: + result = self.context[key] + else: + result = self.allowed_values[key] + return result + + def do_boolop(self, node): + result = self.evaluate(node.values[0]) + is_or = node.op.__class__ is ast.Or + is_and = node.op.__class__ is ast.And + assert is_or or is_and + if (is_and and result) or (is_or and not result): + for n in node.values[1:]: + result = self.evaluate(n) + if (is_or and result) or (is_and and not result): + break + return result + + def do_compare(self, node): + def sanity_check(lhsnode, rhsnode): + valid = True + if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str): + valid = False + #elif (isinstance(lhsnode, ast.Attribute) + # and isinstance(rhsnode, ast.Attribute)): + # klhs = self.get_attr_key(lhsnode) + # krhs = self.get_attr_key(rhsnode) + # valid = klhs != krhs + if not valid: + s = self.get_fragment(node.col_offset) + raise SyntaxError('Invalid comparison: %s' % s) + + lhsnode = node.left + lhs = self.evaluate(lhsnode) + result = True + for op, rhsnode in zip(node.ops, node.comparators): + sanity_check(lhsnode, rhsnode) + op = op.__class__.__name__.lower() + if op not in self.operators: + raise SyntaxError('unsupported operation: %r' % op) + rhs = self.evaluate(rhsnode) + result = self.operators[op](lhs, rhs) + if not result: + break + lhs = rhs + lhsnode = rhsnode + return result + + def do_expression(self, node): + return self.evaluate(node.body) + + def do_name(self, node): + valid = False + if node.id in self.context: + valid = True + result = self.context[node.id] + elif node.id in self.allowed_values: + valid = True + result = self.allowed_values[node.id] + if not valid: + raise SyntaxError('invalid expression: %s' % node.id) + return result + + def do_str(self, node): + return node.s + + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + return Evaluator(execution_context).evaluate(marker.strip()) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/metadata.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/metadata.py new file mode 100644 index 00000000..55bd75f4 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/metadata.py @@ -0,0 +1,1058 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import json +import logging +import re + + +from . import DistlibException, __version__ +from .compat import StringIO, string_types, text_type +from .markers import interpret +from .util import extract_by_key, get_extras +from .version import get_scheme, PEP440_VERSION_RE + +logger = logging.getLogger(__name__) + + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +class MetadataInvalidError(DistlibException): + """A metadata value is invalid""" + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX = re.compile('\n \|') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version == '2.0': + return _426_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + for marker in markers: + if marker in keys: + return True + return False + + keys = [] + for key, value in fields.items(): + if value in ([], 'UNKNOWN', None): + continue + keys.append(key) + + possible_versions = ['1.0', '1.1', '1.2', '2.0'] + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + if key not in _426_FIELDS and '2.0' in possible_versions: + possible_versions.remove('2.0') + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields') + + # we have the choice, 1.0, or 1.2, or 2.0 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.0 adds more features and is very new + if not is_1_1 and not is_1_2 and not is_2_0: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + + return '2.0' + +_ATTR2FIELD = { + 'metadata_version': 'Metadata-Version', + 'name': 'Name', + 'version': 'Version', + 'platform': 'Platform', + 'supported_platform': 'Supported-Platform', + 'summary': 'Summary', + 'description': 'Description', + 'keywords': 'Keywords', + 'home_page': 'Home-page', + 'author': 'Author', + 'author_email': 'Author-email', + 'maintainer': 'Maintainer', + 'maintainer_email': 'Maintainer-email', + 'license': 'License', + 'classifier': 'Classifier', + 'download_url': 'Download-URL', + 'obsoletes_dist': 'Obsoletes-Dist', + 'provides_dist': 'Provides-Dist', + 'requires_dist': 'Requires-Dist', + 'setup_requires_dist': 'Setup-Requires-Dist', + 'requires_python': 'Requires-Python', + 'requires_external': 'Requires-External', + 'requires': 'Requires', + 'provides': 'Provides', + 'obsoletes': 'Obsoletes', + 'project_url': 'Project-URL', + 'private_version': 'Private-Version', + 'obsoleted_by': 'Obsoleted-By', + 'extension': 'Extension', + 'provides_extra': 'Provides-Extra', +} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +def _get_name_and_version(name, version, for_filename=False): + """Return the distribution name with version. + + If for_filename is true, return a filename-escaped form.""" + if for_filename: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + +class LegacyMetadata(object): + """The legacy metadata of a release. + + Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a metadata file + - *fileobj* give a file-like object with metadata as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document the mapping API and UNKNOWN default key + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._fields = {} + self.requires_files = [] + self._dependencies = None + self.scheme = scheme + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, fileobj, name, value): + fileobj.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _remove_line_prefix(self, value): + return _LINE_PREFIX.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + # + # Public API + # + +# dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + return _get_name_and_version(self['Name'], self['Version'], filesafe) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + # When reading, get all the fields we can + for field in _ALL_FIELDS: + if field not in msg: + continue + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + '%r: %r is not valid (field %r)', + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + value = self._fields[name] + if isinstance(value, string_types): + return value.split(',') + return self._fields[name] + + def check(self, strict=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append('Wrong value for %r: %s' % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + """ + self.set_metadata_version() + + mapping_1_0 = ( + ('metadata_version', 'Metadata-Version'), + ('name', 'Name'), + ('version', 'Version'), + ('summary', 'Summary'), + ('home_page', 'Home-page'), + ('author', 'Author'), + ('author_email', 'Author-email'), + ('license', 'License'), + ('description', 'Description'), + ('keywords', 'Keywords'), + ('platform', 'Platform'), + ('classifier', 'Classifier'), + ('download_url', 'Download-URL'), + ) + + data = {} + for key, field_name in mapping_1_0: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + if self['Metadata-Version'] == '1.2': + mapping_1_2 = ( + ('requires_dist', 'Requires-Dist'), + ('requires_python', 'Requires-Python'), + ('requires_external', 'Requires-External'), + ('provides_dist', 'Provides-Dist'), + ('obsoletes_dist', 'Obsoletes-Dist'), + ('project_url', 'Project-URL'), + ('maintainer', 'Maintainer'), + ('maintainer_email', 'Maintainer-email'), + ) + for key, field_name in mapping_1_2: + if not skip_missing or field_name in self._fields: + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + elif self['Metadata-Version'] == '1.1': + mapping_1_1 = ( + ('provides', 'Provides'), + ('requires', 'Requires'), + ('obsoletes', 'Obsoletes'), + ) + for key, field_name in mapping_1_1: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + return data + + def add_requirements(self, requirements): + if self['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in self: + del self[field] + self['Requires-Dist'] += requirements + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<%s %s %s>' % (self.__class__.__name__, self.name, + self.version) + + +METADATA_FILENAME = 'pydist.json' + + +class Metadata(object): + """ + The metadata of a release. This implementation uses 2.0 (JSON) + metadata where possible. If not possible, it wraps a LegacyMetadata + instance which handles the key-value metadata format. + """ + + METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$') + + NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + + VERSION_MATCHER = PEP440_VERSION_RE + + SUMMARY_MATCHER = re.compile('.{1,2047}') + + METADATA_VERSION = '2.0' + + GENERATOR = 'distlib (%s)' % __version__ + + MANDATORY_KEYS = { + 'name': (), + 'version': (), + 'summary': ('legacy',), + } + + INDEX_KEYS = ('name version license summary description author ' + 'author_email keywords platform home_page classifiers ' + 'download_url') + + DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' + 'dev_requires provides meta_requires obsoleted_by ' + 'supports_environments') + + SYNTAX_VALIDATORS = { + 'metadata_version': (METADATA_VERSION_MATCHER, ()), + 'name': (NAME_MATCHER, ('legacy',)), + 'version': (VERSION_MATCHER, ('legacy',)), + 'summary': (SUMMARY_MATCHER, ('legacy',)), + } + + __slots__ = ('_legacy', '_data', 'scheme') + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._legacy = None + self._data = None + self.scheme = scheme + #import pdb; pdb.set_trace() + if mapping is not None: + try: + self._validate_mapping(mapping, scheme) + self._data = mapping + except MetadataUnrecognizedVersionError: + self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) + self.validate() + else: + data = None + if path: + with open(path, 'rb') as f: + data = f.read() + elif fileobj: + data = fileobj.read() + if data is None: + # Initialised with no args - to be added + self._data = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + else: + if not isinstance(data, text_type): + data = data.decode('utf-8') + try: + self._data = json.loads(data) + self._validate_mapping(self._data, scheme) + except ValueError: + # Note: MetadataUnrecognizedVersionError does not + # inherit from ValueError (it's a DistlibException, + # which should not inherit from ValueError). + # The ValueError comes from the json.load - if that + # succeeds and we get a validation error, we want + # that to propagate + self._legacy = LegacyMetadata(fileobj=StringIO(data), + scheme=scheme) + self.validate() + + common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) + + none_list = (None, list) + none_dict = (None, dict) + + mapped_keys = { + 'run_requires': ('Requires-Dist', list), + 'build_requires': ('Setup-Requires-Dist', list), + 'dev_requires': none_list, + 'test_requires': none_list, + 'meta_requires': none_list, + 'extras': ('Provides-Extra', list), + 'modules': none_list, + 'namespaces': none_list, + 'exports': none_dict, + 'commands': none_dict, + 'classifiers': ('Classifier', list), + 'source_url': ('Download-URL', None), + 'metadata_version': ('Metadata-Version', None), + } + + del none_list, none_dict + + def __getattribute__(self, key): + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, maker = mapped[key] + if self._legacy: + if lk is None: + result = None if maker is None else maker() + else: + result = self._legacy.get(lk) + else: + value = None if maker is None else maker() + if key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + result = self._data.get(key, value) + else: + # special cases for PEP 459 + sentinel = object() + result = sentinel + d = self._data.get('extensions') + if d: + if key == 'commands': + result = d.get('python.commands', value) + elif key == 'classifiers': + d = d.get('python.details') + if d: + result = d.get(key, value) + else: + d = d.get('python.exports') + if d: + result = d.get(key, value) + if result is sentinel: + result = value + elif key not in common: + result = object.__getattribute__(self, key) + elif self._legacy: + result = self._legacy.get(key) + else: + result = self._data.get(key) + return result + + def _validate_value(self, key, value, scheme=None): + if key in self.SYNTAX_VALIDATORS: + pattern, exclusions = self.SYNTAX_VALIDATORS[key] + if (scheme or self.scheme) not in exclusions: + m = pattern.match(value) + if not m: + raise MetadataInvalidError('%r is an invalid value for ' + 'the %r property' % (value, + key)) + + def __setattr__(self, key, value): + self._validate_value(key, value) + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, _ = mapped[key] + if self._legacy: + if lk is None: + raise NotImplementedError + self._legacy[lk] = value + elif key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + self._data[key] = value + else: + # special cases for PEP 459 + d = self._data.setdefault('extensions', {}) + if key == 'commands': + d['python.commands'] = value + elif key == 'classifiers': + d = d.setdefault('python.details', {}) + d[key] = value + else: + d = d.setdefault('python.exports', {}) + d[key] = value + elif key not in common: + object.__setattr__(self, key, value) + else: + if key == 'keywords': + if isinstance(value, string_types): + value = value.strip() + if value: + value = value.split() + else: + value = [] + if self._legacy: + self._legacy[key] = value + else: + self._data[key] = value + + @property + def name_and_version(self): + return _get_name_and_version(self.name, self.version, True) + + @property + def provides(self): + if self._legacy: + result = self._legacy['Provides-Dist'] + else: + result = self._data.setdefault('provides', []) + s = '%s (%s)' % (self.name, self.version) + if s not in result: + result.append(s) + return result + + @provides.setter + def provides(self, value): + if self._legacy: + self._legacy['Provides-Dist'] = value + else: + self._data['provides'] = value + + def get_requirements(self, reqts, extras=None, env=None): + """ + Base method to get dependencies, given a set of extras + to satisfy and an optional environment context. + :param reqts: A list of sometimes-wanted dependencies, + perhaps dependent on extras and environment. + :param extras: A list of optional components being requested. + :param env: An optional environment for marker evaluation. + """ + if self._legacy: + result = reqts + else: + result = [] + extras = get_extras(extras or [], self.extras) + for d in reqts: + if 'extra' not in d and 'environment' not in d: + # unconditional + include = True + else: + if 'extra' not in d: + # Not extra-dependent - only environment-dependent + include = True + else: + include = d.get('extra') in extras + if include: + # Not excluded because of extras, check environment + marker = d.get('environment') + if marker: + include = interpret(marker, env) + if include: + result.extend(d['requires']) + for key in ('build', 'dev', 'test'): + e = ':%s:' % key + if e in extras: + extras.remove(e) + # A recursive call, but it should terminate since 'test' + # has been removed from the extras + reqts = self._data.get('%s_requires' % key, []) + result.extend(self.get_requirements(reqts, extras=extras, + env=env)) + return result + + @property + def dictionary(self): + if self._legacy: + return self._from_legacy() + return self._data + + @property + def dependencies(self): + if self._legacy: + raise NotImplementedError + else: + return extract_by_key(self._data, self.DEPENDENCY_KEYS) + + @dependencies.setter + def dependencies(self, value): + if self._legacy: + raise NotImplementedError + else: + self._data.update(value) + + def _validate_mapping(self, mapping, scheme): + if mapping.get('metadata_version') != self.METADATA_VERSION: + raise MetadataUnrecognizedVersionError() + missing = [] + for key, exclusions in self.MANDATORY_KEYS.items(): + if key not in mapping: + if scheme not in exclusions: + missing.append(key) + if missing: + msg = 'Missing metadata items: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + for k, v in mapping.items(): + self._validate_value(k, v, scheme) + + def validate(self): + if self._legacy: + missing, warnings = self._legacy.check(True) + if missing or warnings: + logger.warning('Metadata: missing: %s, warnings: %s', + missing, warnings) + else: + self._validate_mapping(self._data, self.scheme) + + def todict(self): + if self._legacy: + return self._legacy.todict(True) + else: + result = extract_by_key(self._data, self.INDEX_KEYS) + return result + + def _from_legacy(self): + assert self._legacy and not self._data + result = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', + 'classifier'): + if k in lmd: + if k == 'classifier': + nk = 'classifiers' + else: + nk = k + result[nk] = lmd[k] + kw = lmd.get('Keywords', []) + if kw == ['']: + kw = [] + result['keywords'] = kw + keys = (('requires_dist', 'run_requires'), + ('setup_requires_dist', 'build_requires')) + for ok, nk in keys: + if ok in lmd and lmd[ok]: + result[nk] = [{'requires': lmd[ok]}] + result['provides'] = self.provides + author = {} + maintainer = {} + return result + + LEGACY_MAPPING = { + 'name': 'Name', + 'version': 'Version', + 'license': 'License', + 'summary': 'Summary', + 'description': 'Description', + 'classifiers': 'Classifier', + } + + def _to_legacy(self): + def process_entries(entries): + reqts = set() + for e in entries: + extra = e.get('extra') + env = e.get('environment') + rlist = e['requires'] + for r in rlist: + if not env and not extra: + reqts.add(r) + else: + marker = '' + if extra: + marker = 'extra == "%s"' % extra + if env: + if marker: + marker = '(%s) and %s' % (env, marker) + else: + marker = env + reqts.add(';'.join((r, marker))) + return reqts + + assert self._data and not self._legacy + result = LegacyMetadata() + nmd = self._data + for nk, ok in self.LEGACY_MAPPING.items(): + if nk in nmd: + result[ok] = nmd[nk] + r1 = process_entries(self.run_requires + self.meta_requires) + r2 = process_entries(self.build_requires + self.dev_requires) + if self.extras: + result['Provides-Extra'] = sorted(self.extras) + result['Requires-Dist'] = sorted(r1) + result['Setup-Requires-Dist'] = sorted(r2) + # TODO: other fields such as contacts + return result + + def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): + if [path, fileobj].count(None) != 1: + raise ValueError('Exactly one of path and fileobj is needed') + self.validate() + if legacy: + if self._legacy: + legacy_md = self._legacy + else: + legacy_md = self._to_legacy() + if path: + legacy_md.write(path, skip_unknown=skip_unknown) + else: + legacy_md.write_file(fileobj, skip_unknown=skip_unknown) + else: + if self._legacy: + d = self._from_legacy() + else: + d = self._data + if fileobj: + json.dump(d, fileobj, ensure_ascii=True, indent=2, + sort_keys=True) + else: + with codecs.open(path, 'w', 'utf-8') as f: + json.dump(d, f, ensure_ascii=True, indent=2, + sort_keys=True) + + def add_requirements(self, requirements): + if self._legacy: + self._legacy.add_requirements(requirements) + else: + run_requires = self._data.setdefault('run_requires', []) + always = None + for entry in run_requires: + if 'environment' not in entry and 'extra' not in entry: + always = entry + break + if always is None: + always = { 'requires': requirements } + run_requires.insert(0, always) + else: + rset = set(always['requires']) | set(requirements) + always['requires'] = sorted(rset) + + def __repr__(self): + name = self.name or '(no name)' + version = self.version or 'no version' + return '<%s %s %s (%s)>' % (self.__class__.__name__, + self.metadata_version, name, version) diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/resources.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/resources.py new file mode 100644 index 00000000..d24c0e93 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/resources.py @@ -0,0 +1,323 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import shutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, path_to_cache_dir, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return f != '__pycache__' and not f.endswith(('.pyc', '.pyo')) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + import _frozen_importlib + _finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder + _finder_registry[_frozen_importlib.FileFinder] = ResourceFinder +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/scripts.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/scripts.py new file mode 100644 index 00000000..35976c93 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/scripts.py @@ -0,0 +1,335 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2014 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, + get_executable, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' + + + + + + + + + + + + +'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*- +if __name__ == '__main__': + import sys, re + + def _resolve(module, func): + __import__(module) + mod = sys.modules[module] + parts = func.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + try: + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + + func = _resolve('%(module)s', '%(func)s') + rc = func() # None interpreted as 0 + except Exception as e: # only supporting Python >= 2.6 + sys.stderr.write('%%s\\n' %% e) + rc = 1 + sys.exit(rc) +''' + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and os.name == 'nt': + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): + executable = os.path.join(sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote and ' ' in executable: + executable = '"%s"' % executable + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp + and '-X:FullFrames' not in post_interp): + post_interp += b' -X:Frames' + shebang = b'#!' + executable + post_interp + b'\n' + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict(module=entry.prefix, + func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and os.name == 'nt' + linesep = os.linesep.encode('utf-8') + if not use_launcher: + script_bytes = shebang + linesep + script_bytes + else: + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + linesep + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if os.name == 'nt' and not outname.endswith('.' + ext): + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + name = entry.name + scriptnames = set() + if '' in self.variants: + scriptnames.add(name) + if 'X' in self.variants: + scriptnames.add('%s%s' % (name, sys.version[0])) + if 'X.Y' in self.variants: + scriptnames.add('%s-%s' % (name, sys.version[:3])) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: + if not self.dry_run: + raise + f = None + else: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + first_line = f.readline() + if not first_line: + logger.warning('%s: %s is an empty file (skipping)', + self.get_command_name(), script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt': + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + name = '%s%s.exe' % (kind, bits) + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + distlib_package = __name__.rsplit('.', 1)[0] + result = finder(distlib_package).find(name).bytes + return result + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/t32.exe b/panda/python/Lib/site-packages/pip/_vendor/distlib/t32.exe new file mode 100644 index 0000000000000000000000000000000000000000..bdc9d1ec895b5e2e5bce66c7e9941ee9b7523edd GIT binary patch literal 91648 zcmeFaaeP$OwJ&}qGf5`NBr`w)0RjXFiVae9Kof`Hgk(Yxf)fK1QV3uRI7VtIo&%x; zC!P$=$!02jt-bbM?UkZ^_Sg1RZ$+^c%n+J@qJmUK@wTy2-NWH(ENL=8%z59n&rAr~ zci(-V&-;CT|NP*SIcM*^_TFo+z4qE`uf6s@HQ(4HStUub;mVSe&ntfa^PgTM z=S=xsj`V!S>r)R{mcBl9dGPN03O8^0&RtvX{C45D?!5Qj@5qJU+)%hhxwr7{dkZUX zuPglach+y1ot>RolqkAyKw9e5ttxKl$NGX;qONJyJMa(~& zvHP#Z(}lkt3VY4>6iG^@ApRwvQinwlFZ`wjbo90eK*~p%#dG*Uky61Ik~E)QycX#x zKfQeE-;)-p4XEt-5U^iL8W{`?e5sFms9hr=u6~~_Z@6DZ)C1l4A-t%q)afJs6-v^& z*<04%Dc>nczoHK6!QVCbJC45*|GcPrwh1O3nS}%wDyB03HUbE*UPM&r5sD{u=D`~Uy@zes@*#XH z{RhoAs5aXr#TW6p{D$I)ga}(PT05q#m!tY6jh}^OiJ8I@ZaS7TN0EMx<&`pW}BPO6{FeF8OPDmP6zScgT}p zq9~T(u`4Mo9;vpl$|8GlG*UTdFx=66oOQ451h(q=ne{+ z_wpOECCPov)9w6GH0T0EEuU-{MpH_?p$zi3$3Vc*6O5(#&ZiCz8HReK$kVO);>}jA zIbIh4T%17u9~1QVHn3kEnxfeGF(>7=vtB+AMXE;%J>Bxyrc~D35NSSr;c3=vFYdPb z;;dKSc?e^{7T|wQr+WGrs*_55u!k&DDf%tTA@-qBvEyRvAx1P(Lu=AtfCsG1=NZ<<6Y`Q`MSs&Tf4QjLoS#hSZI zAPgG99ZF^rNI8Q7a>23v#IegoCHo*Y8q_XZ$TO`$nc(gSEtnR33@OeZp@dzO*p((# zHB^`>J(Th&QZ`XagFf1MD9z9PW<_!afrb`HN(`;fU#rbo)5tnkb*_l~swjw_X*r({ zIwl2J_($k=8q9gQXj`acnrwTnP`7!Xo91;MI#%oF?_tEa&onf6I+cuWU+Z_!HTtB; zW{d6=tvizqMNX1fE0Rnxcw2|Zht^)+|A#?aM64rcSmVsDELRpOC9!m*yR+OCQ?%6zTph-8*7Yt-5qkxT4N zmH?N~1QrL68AOlysLRi9MMZ|vYA9`nvfEHX!Oe&c1nT)@uz-4GPCb7HK-ehX;yqu7 zOs}%D>P=b>s}5?*Y%IrNMJ;=NR=H$VDw=FOV=j0_ z-7qKx_zm=``OcD95BT{V6m01B(KKuDEb5QuvyyT%3fD7{^Y1{np)-Km=CuJ8Bd@ZD zB)T$|Z=QmxFt^am;pc@2X=x4o-xo-diF>W653@h1Cxq(;FJ(a6G2`Ty`Ud4Od=_#6 zkxV|F3ac5^>>%ecYRx)o%@P!*Za7OV^2~b*t=TpWD9v{dH>Sd4HYSNb)EmruXL&tg z0nnQT_JWiF6rv9GIKQ_SFm({2)xcXVqee+eK0k)_M$=?It`ho_0SMlJ_=bkPL_;-h zQF1_1^tEoq6z10p)VNj23h+82KT!isvQ(*EPW9v7E3|r-oerN-W;Z$3l%~#^3I56> zGW>VcMB~;8gkld8)vv)Y(B|+d;;|MIQ_l_X5P|V5#C1zhy)g5xY3w{8xyq%^y&dx* z;hJ*Spyu=gG~o0!Pgip~>)J;tO*TKbRulI7UIz96n%rayX!9tIpIbCCA+>43$~B$t zw_c)h_0+utuQ^iGXrX%8)^ag~k+|br)6g0~xByYl4oBmu%oYw|LSvWIPo_jaNn^Hj zHEzguPnT>%T`AfP$iA@U;$~#n@y1%<-N|phO@b(P%m7X0R4xHIV;335On#vb z67BQ!Zkg6Hv<`J|o50>EJ{rB4szzP4yz&M~YFXxxLTG`Wsdg0xjJ&1jM9?KE1k`wL z^H{aR<>%u7%zTcPY7-tHK8M;-2sMVXT*)C??ul-hQYI?QZ&~Ls9x{yLC>kmX>Vqu7 z05kLYN93)ss~2-oYV(*x9)aW^z6ySv{zVoVt2!f3L7+%dWPK63vmIJ<9XlQEv9skx z_JPBgL%|{Knd)K7Ew9OMwjPsf%}7PhI$h5?qaV2fS*Ka%E5^{!P8MYY?$@ln){E-r zhT3bm-*k6TmwhN%EGsk4eD0d+$4^bZ=Dh*M8)PKBYt%>u(A>`|fEd0WJ4ASHHaZK=zI&kqFguMGq`|2V` zq6uR~6K;G>-lB4gd}E>w=TvUnGP<5!j(%jf{+S*w>!0cDG?esmaP6JhZ?h+oK)5XqCvrKC}4 zjc45*P{qZJ4dtSMKRtm)*;r7ij{foM5r&58?$EabQgL`lDe!d4@3=ckCY!IX@p<5B z&1Xje*?heyARh(juWF5hB~z3_?jJ6+X^6yskmfaBvV@9u==&fQU6=u{!FW`UxFC?f zEW>^XWIIV<)Q%V<^}yTR?-^1i^4{@~SYy`)=$}@?w<8FgS+`yk3;_{ys^k_37}Tew z)9^%t()j@ZGHGpc)L_oS98|{|9i+-@HF4ly+U%02mNw_g1)evRDSQ`lK-;Z1NmS2s z%&OcSYuBv8M5W|=2IOq@sNsHVKyj7&a<@)ZFB@`JU41Q|w_G$A056RI*!C;d>~j95 zId`DS;jn;=&Asf;vYl1sVU}=sQ{~6T1u6d9(_r@Ltl&7(8OHgg@ljJEc zfiw@f-}1b-HG@S5ls*>K(^)6IbQT}hvkp+Q=k&G|*6Hz~jvpd7IxQaH&)or$;OD=t z6>ST5I9p#p8ib&;^bO_bLBocS3h;isiebawMLFQzd_Ik|TaCIU|0?yZ1w2h_xGQX* ziefNz>mu74%4SOS;cQqhffRQLe$EvdPY0oVZ4zaS)JD7KPc%HcF#Jve8Ixg6TGQEy(2WP>u0?Q7%-%-cku#>1cTUXC73!W?ZvaGmkUP4yPpjMcKQ_nk~ z=0IGW@mvNyqNqhkHybiydI z;}olhkhcR`u@^70#b(nmOlP-4_wFtJTvPh`4_oDOwqlSegG8-dK_&I$DbbJ9`YUg= zh2(MUr02waqgZd~mP{qp@b$UREcCpy<@3_UxKhA;eX;k^HLY*m{}~?*^h``+-$Pvg zB8SD<`aJPSz0GqvSISJvPBSJsJOp!j?E@hk`wT_d<+#LYXFEsWEq{J)fz~E{j&>P;vsi6T{L1MQMXeY6$QFXwk^l(Au%xuJ#N9 zMj!i(MIQZ(UC#LhwOC2-uF5C<)0}Jq{GV!sWE=u3xsO3&k>qZI0%quA+ zd*|cPf-NVk^nB>{YhTL3(BD|vmQTs!)dMn~Ia^)sW2J2zujq5F=B#d`wOf;g|9LvN z0!ssXQvtu1(B*AYY2Aki7jG01jRCY5>r?(5&;v#7O%u5aPHr%*L;DWUV!^UPlJ?oq z4n7}hg@Cg2MuOtMMZbuM48D{iP^*=i1$`mi&r2z4Z(mWlv@ebyzcKGT9>zS5U%ue>AHW3uDcHIJ1l_=y?*q*@48M-xL-IvSYQi$7g-;{P-Zm(!TKEIJ8hPt_ywbjP00LEij21OOH0G^F zWbjuXdhxmvuWA_qU}6Bw7J>;<*{0~DWN!T=MG5Q<PjQvra`wT?TSZ$5%8JDw_rUteQ!f8m$fp|h;B+rhf z;2jXqWPd^Gr80|m_M($J3O&8DgTE-=y`Ekr8{&f>UIT2wjOhV>FTFzDq$#f6*&3qK zVG&vY;}^_5%kqoki#?ZNP1-rBo{Sp&n~gL|A$A7!8ue%)Y9*Pvoz*zhiv|>ji=;S2 z)t)KrB<4e*4>gsCwYvAbVhapZ#kC=g4+iUD?c73edSX7VR6xCf= zovg03c~H(s&`GCBW(L_T)~q`w@}B2C?Hm*gU2vF-jWHmWVfusC1n&>gTb!mhfN zW)4zH6>COXr;%n#2+ZO77*^wI_t}ja2j7p5R~Hy4so5bt^ht8HfO*&p@!8AXlW$ZX z!v51NRBlPri`%V6jh(&8n~|>q8I>{oc5L&gC0|pV?9FFy{vbYHVMSUc(_01 ziappjC>9|k7(1Z1kgYEgc?uJfw*PthJ@^6rh7X9}Uh#We{BrSoLi~1!-=p~DAL)hx zv&=l+4fBa9TNA1K6Z!^ZuCgY;yQ;tuza!<1 z`X!+Xk`~B%qeH1e72Gd0ycG7T&;x%!a7jH7JrCo>2t6=`9q#uQg`DBLKn-kxz2|Ig z#9R9qEpi$dXptWA`!asp9{mizt#3J7rvRS{^&kOA+7n0caPH1RT>If!h4WW#8|6H7 z*m)@QEQX@m<8=4yHg~_f!+EGRdIC{R>CrfJy6}&X%@cKg@7EY%Mp1atO@NLNGlhf?ia+|-z(yYn6a(MhI&+9DBq@Xt9+Bn?edK3YB>)fXSg28YJ}kJu_eRr zw%D!1@0OSw?=1p`##O9nI4LETkN41}uPL+CZkyT9?4d!=kam!;aYime{WVLs4Lm_n zu}=`g!t{DiN2c@8tC33k2~C7!$4OV-Pc4f@@k+&B^W|bG)fxGjC|1xQ_G3{Xy^kzV zl=7e`WwE>>bSWrThAwTA^Fx=mIm6#VyxATJZ?WYB=Gml-tw=<=fQRU_1Ac6`-vMOVH#Y1=L?7Z+Ww<><(nX1APL*U z#CZ+!G!~(O7uipo$De|QzTH}3R@;g-?^Bo?u7Y?N5V0DRPmB^@?|g(qj5=GlV~nfu zI_D#QL${kkRpsTf3(T_;fA;EXXZXKC(Fh$b><;fiRmdfo+;0-iTPnIU@wENxn1D8)&<}g55lfGyhpV>37c7ufs54W-F8(8(ElGbEB4q zIZ(W6X;%mn`#BL<2pq3j1!ShgL7wbTtAnz?VR6ZWlru;+7;hbWfMr1=@b%RCZd=Gz+He z?tbbMyB2Pvq(y^TcsE`WgVGQ(8RN(hRhZ1@H$*WJk)sYu?ERCVzCMGMLxXXG9VCud zj})|gf|f~gp*d)j{6vX$#=KSn-)@38G=vV)D8EZ7027Y@3K7uqVIa@-%BV!2pph8; z#3OYvD>~F{3U!+nIe=KbNagpz6b7*X_HNav&uMOW0U5i+w0$jLuj+G%NOZ#8fNU24 zw%V@nA5qktw(#3{bcavi7rY<|;}tp$8JyuIRC2eiKUyf~o0RE~LgjJ3vS?7Z(ct+B z6q!YX6lwhl+zsBk0yNR{&O_sN*gReLjT$(uz9kL3f2DDW!LSD$@ji=xeFj zF@hZ?s6qk{m_~rBSc7$Ebt-971=11a0uzab5Gvu0hteonz8)Gm+Vqn<{4nNJgQ?Z% z9Q9(U_Jm-NID3C*npIGI)WcM0}dp>v==zD;9F#WL%#q1lE-SYzizreQ~G+`!WRkcN5mENIghG1{7b7 zE@Wb`LLa~J7g$J0*y|Tvb-kVseLN8;*0Y{Tkw=*+g=Y5wR$^k-E`EZVUi<`gs>ucp zWt<_DyNDRQbxnnQGuEt7sSTlmu!CX(5{Z!raR%RtbThNPL7r&7*_6@sbxm{3LilX6 z1oa?^VOjyX^?H_E2Jei()P{8zUZ);PRrJ$+Xnp3 zo~9KdJg$~T!~zk{LA(aOl4uj+weU<}7YWb7FRhKjv+$B&+{g#?{wNVgdxE-CGc`@2 z=WX2mXdF=+cSp{P#}m|l8+V6~h{w|%;_+)D%Hxj+USvL7q{h}QYNExhDWJ7TA3)gR zL32Lq7D$bwb*?~`*bzb*h??u#CU`BGi`j@k9BWlt&SE|2z}~IBDTO~pOMoDiaMfy7 zxXi8MHGd|$3bGmVh-iEqmOW&5ur?Y!$i9KHH^%-ALMusJDE_wBAHy^%k11C#Uccq@ zNcDfH7e{T&Y06#+n%iD~9BGZ6sg*#oDW}16bYjQ!sM>)|e;&!9c~D6^Amw5s7;&Ba zfn=1;jB+H_a~w=$z`|Lp+4;kGZGg+#%XpGkjtQckFT`@Xv@sXv;h!O8q{!C~*Lp%g@XJ=g*0_MKmd zvS88I6z$xD$^>pr-X4IU$KON?iwtRgQ^7!lcF<58BVpUGX?ued4 zE-Yu;RxdD|PjpmQn;TH%DN#GzlGDIsWJ}f!V-|bIY>BsIRNHCOcG_W+jDG0Kibi(2 ze)-E^zGMdjV#ouy_>EU6V|%Wp&E>sa8Hntfgy12XINEdVh+C!%tX*SrVaxdfX#ZEl zOI>{=%%SG_jjc8#Z#@R%E0mA4zsu+fcSl>Ra0>N$y0`SThd%{qSD4&a3LzY4N1pf$ z5zZHmI$yAQj&2)U9QFDmKgy=`roub;)3AC1#R{xs;4^j_wAxCpv&e$l{HU1#{p2Od zPu+>i&eS0uu^@H*aB@8|)$!-oQc;x&00@*T077{eK#vHZBBI)EA63&Iwa7W4rD>B~ z?YmQ{SbbD~G)2y%s6vWDL2bL!h+Mk8t5v*6)YLqGz7)s=5iTGZWyevB{LH&Xyr!6dCrmsxuhedkIKe6l4M#*#H^@XmVo% z%pvLDXUbVra2I4<(xQFdg&|)@&Q=%S!zi0n0^zF;4JnhmQ|T3?fN4@jK_!iYvM=FR zrO#J~zEDPw1lbG_{fs&^NM2UG%xk-ns2`!c5J+*d4Y4lT?G|oX=L<*hD{Y;_YE>^x zTyTBs*Wg<)q~x$p{vvu4(&Y&tgN8wN9VkI#ovlA0K|{igC^1MQ6$NnT`U8yK?za}w#QUI4 zyU$U&f;)G`QAMcvQxyLu-Xh9p?K!Q-oUMluaK2E5!E!A2bG(lzW+#eaHD^)GJ;&xi zP)DAtrcP4M>R)TGvS6IebiS~FOo|{`PkU%C677|qNhr;-$P?ds4%?1Z!V&Z&`ptl) zyvwdO_t;RtmKf^>YTXrNW0m+_uo*O^X%&u0g)5@XM>*fv$Zja#sLd(&dT)ageSyDA zW`!3qEJI7JlWf?d+uDz^camN@7lR;DG5Mt-6cw`Dy^EELtir}eBL|Aq>g;fcz7OU^ zA+k(K^&m+q{h88v#3Rn|N|cK^Zdgs)`K}lQxxo+(g4f1jh}ZNO{1I3+SX^uVfRja> ztqr2Txq{J44j>gaXKOumLtzS9R5^{t1RTGdtyPG8sSUVcIIznXo>r&B3m6#>AY%je zY2+b9Rbs)sA!z;9-$e}+X#L~+i!^&J9SNa%Xr8xau zSp^w#Gfx3nLqlYzzuRX|LsH8_@j2iMc@#@WQ?q~qHo*Spt<%*5;N<^pu1;5 zJ7c}(TwApndfHCRwFNwnqBn0!Ot_md257?Fh8)PwZ-BlgMtr1d6DuPJXMQ&-9kCBC z##lu5)=3TS=EF8 zi>hxLuKI)jtZE~#4JZl!>_ojr9yM7Udzom=+X3;%HxP2HcNkzd0pJwOFh$0KBCQ1J zAxPtdnoYDa_7atdVmWB~9%NFp1>ItILuWzfc2MVj4?Fo^q8!*@TP`SvZo)j$UA1~< z%nK!Zk+w4z#4Iw2$tDpojZ@k#qJEHCX<#lzm`I}S+}yS6s05Q-*RCZ9S9DheNrXbM zPJ$E=JRzo(k($&3z=IP8fgSk{z;{)v+Jv68?w_t)B_6G-%C7y{EbOJr# zisH{=anL}#cSgL>GQso#H|7yhSe*X=BLD_TI#{$Un45w+E&RTT#N7=vFm3^z1^MQM zh@_kbgkUtRTmt+LG<>k@{}_>TnazN0HzU9&okXm=A2t05@oTAbP%(c96^VMtkzhH- z1jfdi&L(@16XmmAsFQr*ovow+v8p0a7sBww2^7YvW|^VGW@xS%dfp7pH$y)+LtZoV zm>H@xL+xg$+6?V5L#xZEE2)xuC<5eqgs3D`;+;WAiwS7LupVrflU_YvK)}fv5=KGl zr2u6_96DEGJ2xTk^m={)dVYgP40JiS#SlWpq23BdoO*7=M-(y{xdQy-Hxb2egH}!> zKy=>>$P)}iygT@K^Bq0-RR(xZX#R z{3C=h;fl)mDvOxWHUx_oVc?ksmZ1Q!>b|6)tWI+Gqxop1$=!3W=I%^03z%dgarY+_ zyo$T&L|i)mZ-^&%+T`*gA9|uIQp5S=*TLb9_DF@LjFg;hPUhq0!4Vt;S*a^q`5%x_9*gR^Pl#V+3^a+`M_Yio>m`7TQ3CQBIS^&rjb$OWQ(-23Lu%j}}0^v;czpaIW)3-{5qgBSapwIIPmE-nsdU zIlJCCRnL2lqhGJVDD1Mg4EZPGRKf=WaxF^(jWP&QflhPkO8UoZR+&8kni_NeK*bt) zepDemxm=eZT%OH>dI^!;d=?h8Nz_DGXj&nHaERnBj2-h3NiNy}g;23a<_d&MC*u%F zw|;L^J}(ecc^tl(PCny2sk_kLsX?_(nY;n74djSDS(zn>QP0m|^N%`J83zY!w5Wl9 z1_uW-7d+baENy`fDRAL6Bqc=&w$-6JpXJp+wiCt91j8q0VYDm|F`EVzg%sdvlt{Bd z(edF1yr}W6u6x+b5 zsjClE4rM6Qfxy)9dO_s2p5c_o4;(=TB&JbfgAL1fR%d6cY*++ZYx=Zr*w_jO!|xq- ztRR6FIq!_9wK1-KWP$ssGyDk3DOTdFF&@2aYr0ia981=C{jC}n%0ptQ%M?f{$ z5dIxC?9{nPtH3+M#e9QWnuep{kRgWM8NPt@y*==|BZ+;Q5X^oS48`U>iQyHLypk0C z&}pr9{9X!y4{EW}5a}XIqU;R6T?CoeWhSamwNaOvdy21pgc7?WANJ@sf&Nd`s z)cc{DZm5IVZ?nUAM?@K$T?mdi+{R2cCmg}%ERXoT_-JJ5sPnav>Rk9wMKMET_`o<~ z2vAQd!EUvw-?rz9(XsCtY7t!UC|)b0_N`hA*Q~w2!W&jDG>qVW3J0{gIAU~9M=sP$ zs--@O#;-sNxP{Vrjr-M9%H=Y1?GuBqKi4YYgGVrd;#6S*`D*0Ss$B3v?QXIKr9ucH z8ko>Ho)IYy6gk@1>Kd-564CmJncP~>)pR&CSD?UCE?V%N%T>iVBn3Zeb-Pl;dJ zt18-$4n(;cUTO07o*4jL4?Mj6y-!2y3H-ScDJhIdx1l%4HaMzHy7_U!JqY!Xc5+C0~ z4IClkZnvW}4Z`kr8+*vEZHMc-tN0B|4K9%wl$_sx8qf?Yf*MEEu-zV6dc&Y<2qC+c zHTFdcE^Cc02uRA`=+YYO+7cT{lqGijdK&CXef!dMNRcrlMbd4mQFvcLsCf)YkvB)k zhw+ihJ(cWDy-=jNBDa4@c(e+n5uF_8E)v8wgOSP`aWGU{Y8ixlTY>Ebi?(o3tGqED zn-9{#yjp^XGeYj7kmc7waav~4mJCK#yKywY6&E8sLMG9VOsKt?~Ty z)oqm;{)U{2o7J4h#6LAlpM}w5g6$Slq6s~9{E!nllq>)Nbm|gYLQ#mL9y@!$rfs*O zLiPX&24`!8M#eB_-e%VTP)h(>tv!L)8j~}#hN9rgHgIY4^iXq_q+|)!!>IWz!^5sH z>0ri=wn|bO0Fr%$K{FmCoh$OPiR!W*p%k6^G~;FnvhXoEOUu-jj-o*iS*Y|{Ptv$X z)QuR{eJm%k-J&fBA5$iHCeb_#azob=%7T)v&Y&m~tC~Hs&?fdrj~@>{S?`tB8}raU z3FlW`+NL5`1b@XnxQ0S(=-hz2YX0EYHgFvMf##CXqcO4J0O$IBl&=#C)p`wE{7{hB zqFnaZ##$VQJ)YPX$~S$OvFR|6j(gU#v@V~4t*k!usT6q--rYlTHgqrRn{G!5cfA6p zT?V&xyBU@!?89XPwv9LtNNiB!imc#T-5^^9FLSQdoL4Uo$!jBvrHE}6Te>SZQn?HJ z=e-QB^hB~;_AWn))dwoIggbV4A6kUjke#Oe<2ORh@GvQ2X;-5VIb@;`5GdTjn)9!Gl;3Mcqhqyd$CpvMbnt4F&QTa`thS?d7UcXL8JJ4z9M}-U zOQP7<_;@763J?R++ZIRFR5p=rIjb#_zFO7~j>pL=|ci5*@XYJKic$<>p2;-(Z~ z?50C>yp0w+e*?`LN80Q-AY^s-SBX=@xISRp-N|bemYApgq4cB2JgMS#f}3&Wz)!{D zpJtqRwDYIPTrE3CRL-4xqmti$foHtNzp!Nx5(oBzTFJq%6ihg>!0(JGqHK1UZ$p-C`^ z9&sc9gEK!ufe9P2oEOYT@QPtLm8v?C0HxxN0&m=sg!&~Qa=gjT*9h$K@?bADL)1=Z z6SBl=Jq2}j**uDUOK;MPh4x;dC_AB3x>ybdL5guwz&N&?Pr|5Wdj!vp57Ltxw+mv* znWAQjYT#3@hrYb&Lo7m;)Q7+EniW7%<6ie&jru?9vIPw9^M`+pu)HIr6+-E9EC*|T=1UPJiklG{{-rIey zpV3tbbn?O(CKVYPJ{5nEBO7f)o`!|veweN(@);4?t0^DA7J;o=^;2xKe&89{QqNnF zRGKFL2LiVlH}Rba!aRE~9$K35Zlchjh)oo>3IX(GY4cenwba)m=TMo=r!`*>qZ(NA zY5iY=JJ9?Cn~4T%F~AyvtrQMX_;v~>jSo|RI9yvKWu_*Yh;~2oDblIi$?*Ygr)1fV zghwc$Q%vG!kAr}M-8xa?73M;$xaI5D$yjpK!0y5mX1EhU^4$1-B1k6U&?jhgZ-7q& z-v;=P7t`-J_zMI$onnU9uK0EP={cAUn90ynDnh_f*0z5^=Ps|QTn23ay z%d`jVY%RxokaMr}`&=uOmsj$^1NHpg3bMsHFc`DV!;wL32t!brcOBgT7+g5gl{)X5 za`1_`^9U7O6Fh-(f|o_3XAPI&OdfzKGRp|?4PV2dKHxZiYe|5wL_q!2W9@3PzBCK> zl?0EYkm93Q^3GjINYL)YD0jb!#)sIbMHKvN`-4XT2Y=QjK;bG>51KHTNnu4>4MG8k z@VpA>F%u~0Dzr$!Cs9&8e;Zu@82!ekJ|k}y1OueupX4ICRbWaJJRo4Gocjfp2d>_C z9cbz|Zo-96&jV6S^`Jc+lqTBCc?f(;0gz=%g6_)LAd#G32SzasbyM(0NBhXTgI4a; z{G^zgM*;5rDhjLW?DAfNU`dyE zF5TY8vMFCnlybn}c6?)L^Bd0AOmGkLouINwCbN>rN?XL{!R&KL01oX)VME=%9)L;u zD#C~V3Gc#A9AGs)5l2LzV^LEHp~A=>fzj7SV5F|IOQCf|p(IM`x)Pvg;xx+oj_2@} zx8UnhT1NSIJfWvC@kaaiVPogFVTfe7GR2SsbS|#8-3q_qCC}KEtXd=QWl#zfozIjW z)kP9|b8sX`yAdkiMm>%GQE~LTXe0O5EktSv_!iVu#xCcl4RywzvwH z;R*=ZTQNf>aGR?w5Qps4c#1-$c=Zgr1jvS0`xUGz6IjVN10y|?;JVYmTR0PU+3MU) z$G!~F;YQv8v>Ef(C~y%SDNQg6tRnm;@aRoUd=NAwFK>B~M0Z2jP_8XaoeOr{It3fC zG9_uVQ;TIr)*aB55j4O84|rNi+ce%Vl+bsjNgvXP1G?;@}@c znN{j@$p1ymqGoALuHYUTGC_Di3*`!%gx`u`h`~(KWL>K@_jtV1MB}HT4bi>3B$X~h`3TL;UC323xU`im& z+8>}Xqo^^Mn&?iC9*?*I${A5ni6BJ8_H1bh%+e-718BzcF95y)4rEvxCusRBtHvb< zs{HlR6?SEmpWjB95z!}u==%85X1kJ09*pRZuP2gVNyGW*=0OetB~|IIq?q<3I^x*? zKPW1}+QPhZY&3vyFw_G1fEyHiAMk&M22DNj0H$B;G+X!-D%Ttk>Ur1R$FIXg;ywea zVNEm$OT5;IOKW;{3p~yWd7OcOe)7eU*GE)1{E7T31*PN}h5yoj+ajxxWXn?b| zotnW5ovp+mD$kMw)$$^h=gVJLc_!RBxXl^fizHli;S9e=z%>q6w~s89QtXfT$ynit z(Vk|%&*623Uo;`Sv6t}D?dug_R1=cn{`DB%=tiraAv*1hW7KfV?-|TG41$RJGU!5$ zZK$CzQG>3ie7U?@iVwidfxLL)GZa%z_~2@+aRrffEQwJ9}d2XKJ@E3EGVT7m5~mFkgf zl3tEYf0syqXk@xwq^C&w)TZ=cfk^z*fFBq6;)Br9V`y6*T5@`ts2 zUNl#|G$h+Rf7?1cGIwia^>1A)3*DtYh)Zve3pZGJPV#f$l$U7SQD6`O{n_=8q*AfC zcoID+Zb}{~x53{=IzVu;8};GVNxSz(=h){g3Z_{E ztVHTDq#MS^&|X0#S`!65O?0tx>9=t$03Enr!8R4y@2I6aX8H3Nj)ci!yLsM+<{#RR z8o;@Ze}=<>Fo>A$mwXz+VDQ)gP{ow?jOuJXkJo1)qR!GY1$vdZ`9PRW)T4BhjCtt- zrnAYDXgW*9oQDPH)_h~DJ-}u1L-X^^pc-(%1)8L>YA6bGinBF@7o5F+Sh+pGZzD*W zX=Vrb0`q-Ho)+LY;vG6-ATnR5hkP2sIBDNN=j_u_ow!?}-D@j)c>cpmI^7sph(*>; zaZ!zuM~%4p{u;_ED5c{>yv;X+nX9_V)*u&DV~ghSzzJyLm)ojkxqWeJ=whb4 z3CDKiyV~r`={0IFcZx}|t1>4wv!rnd_o_MDm*&hMzzPCfkOT}h4_N@5f@%RgBU337 zkk=68c!JDLLIR40B%rRz#K{6)i^lj_Is}}NcN~(8gzJNVU^N^{!0yd>_0+hW-@A+A z(k<9(uS~P`OCe|L$B+l8H^tdH7WH~wgVDF9FI0+4uR?ZM-$R9$v9SiDrnM7WYcSw? zPCL~TsG2U|u*{#}Z2cby%*P>}Hfu#Wh#T8OiN4-y&(ZtFManI7WmRT5rF2;{;RfV$ z>EAYd*vTeOpeEdGY+Q}El4&~WVG-<8o4F+4%F_FRY_*&VyKtubQkONGrMK4@Fy^P~ zGjK~D7CDsgBrGaMlel_qpOc2HOGMiMUjckdeJG ze0|gbXrnVsEhHKq#dAq}R_n3*(<)-S5WpG;rx0*VC2o6Y3|-Nev&8vAmN)d0zn~m) zmA8*^zVNE^kVZk6{ImImqr~`MLstC(T*9o_tfzJFFvAp8V3pQZo4O;TeGGH9Ezg;O z2&<9|T~kqfS|2@d`X{hf<6;%u8V{u!ErSM%o7+!v#5ur~eUPGb62r(lioQn!K)kuh z`)(ft0u5@-c`}y6rc5q>_dkOuTo{CPrZddI0H9AyHJ@x84*hlFUL-4A@O!nTW9R5y z;EEh@#YXi%XA7~K2bo{AZ&WXWGu}m`@s=$+H)>fG6<#koKlVH#p>3$qAy4$a$+!|h zl$wp(HIdqs8T&rciH%zERDCX7hb9;$g={HTG582ZStS@{MUqiwZd9A&nexBkkm5$o zO3V@`W}%mhc$vY{yDHPJ$<)6_%#&^chXwZx5sf!s&jd6!{PgekZzDGf&bcA$e+Y&dnXh#nlG{D8q5Fwhv8;_fmdw~-6@t-pOOpO zIZYuI?elw4CcAO&{=E%zpJ1+YzH z*JyLht&^c6A}el3_R-CuV{xza`&o-j$2R+tUSc;r>;g={l(}X)m za8TUVcH-BTFuY)f5AdVl9Y5cj3;NMwddAFT4mF-*{yV{a)Guc zAHG`9Tv9MCf_c5G63cvZml3xp$=A~Im+S&4R~`>LHWm{t8yyCUS2yO<#~##;cBx@R zH=;e|%S??5Q}n-eCRqsVuGgSv$PG%tR|xV>K-z(UN#>ihn#qzp7d zjHG;`-xUU_5Ae^HniQ4&d&$zM4q23p{t!u7i;|@dr6>jc-bz%DgWyL1+GN27%=V~ZjSsb3{@Ll-Cwld9~f=c9nM=n!-f4I7BK5~BO4 z4@c1by{Jj|LwH_MPH9T`65XE#0OH>q<*8k)OWjj6;n&tnYhX+w$o>9S-YZ zsA~4oQpJXQh0Ot!iKQr3_z4e{%OnRcr^1W@vpS?l?3{?erqG}8!j`Qg7qA+Kwrm}4 z>rpS-o#6u%pY2D*tZX^`p0r-{yxUxK&oKwvxmsP1d!|^KNS;K=3$1?!LY`AQ{z8|e z;t;|5GW2-|KBiH%UPV#*nzB8~a0`W(mHi+YCJxpYl>IOn4pVq`StJ>Ln8K6G9!Z8t ziPLk+{u_mHDd>8#h8bJeV^8}OZo*s-`zIP%Yn*~RLW5Sd4kAQ&L?(~(rSj>}r{Ee% zw>x6Gfi*NAFtI05%=|LhvM9iR5ACx7zE~FVwU2X6wng=P5)8&? z^o0T5jtuN{1#CzS@(oyL@Xt8dP{>|+Spaw2=FSuo5hUC2zL*291~dtF+pHJpJ0>p~ z0wey!PZ2+ZkdxGFaXcXe`v@i?bXx=3(P_-1`uR2iMX$8J(a#EL4fjbLRPG39>FjUt zUhB3{KKRMY8Q4fveRi8R21Yx4bwzQJ|NP)@r@?=|3Q_ueBOQ~Jy*;fh1@>4+`()<} zow%Q5DTy{9J;q3<9bz1zt8uia89qlFzTHC(bFS7ixSS7ueabIFmkZ_8(B%x}Ur~~+ zYoQ@7*+k2ntudehQ)!`Wn-5p$D*eJT{UecZ0ts|c9}t9&L4=q?N zUnFd=p@rxaaicZ4Ve)M8ddCKaAXwWg`b7~8|*dnhG zINE{2lKC0(qVO49?TydY0C+mTi{uCzncWVrXNTE11LqBrE!DGmWdg)nWuzl%g`+AK zfEJ1D>0K!jQH(OUVmXLJOV+GHaaK$}hUJP!Cz+A!ODF9{cR~#OEA$X^Th)tMJ5bj) zsS>7Ub5nIM-9W0%vthp#=O@$Awvv3sO1j#LK~kdY>LSg3JQI?RPKzILl%dvJ@4F59 zJ+{2=Tgc+kPf{W_Cl<*!;bR#elQe0-g%j5$vRjAh1605b-)RPUqPNi}g(DKszI z+7Z@6!Bl&zff!xzj3ILtvI>5=5apf9Z1_v#NP+C=P9+FpHGR4{iL3GZmCUA;mHQ|G zpL!4_U~f9y0w6ktU}hE0bFU(bb#E-4BIkI$8*zm6qWSn6o7aQdV7Dmlt~(j``s?fJ zu*^5!f%g%p@^wG2`l}H2g9uHfm3CPqaSR-o6ZcAo=%*e;>blR#poYO) zPqSbrf?T}R4RJB3>gr!v_ObW(@Ycgci`{Z|xKqv;II)lJQPtN4h((ONK5CjW=*GR$ zdA)dd_nHUY=y_g*VOjXN4~)_{Ik`*{ohoxR(vQqs^lV z#J$q$j7_8yC+93!a>z^tYQYPrJ^+U03oAjGH*tWhTN%Zl!+OM223R+HSHBIHhD2%x zVeueDNd=@n?S^$%EVw|Z3_st8JPrDrUsXwxx*sZAfeVMQ)XEL66#5@I8oXhUcM7(4 z%{5m6uSPj46v6AS;Lye1iIL>_l14Ha5YR|$!+@qnvipFL)B4e?7F}P_NR(^%zXK8l zbmC#Q)+}I&Spb|@*^nCJ^VP_8-z}ChN>`o=fK$slEiMz*-!*el$wEZRG zcMg7W6TcXfF&Ip-aJ!b%hY9etn9s2#JaYZE9bt3*7CQt`arHavhMhDr*W5)?Tn!z%Y*FSwVh)@n(S8m88A=33bNH?=Rh45_6{|o! z@Kr37N0GOa>9DgC0t}=gweN1S*^;^Ei4;%+H^8UcU>Vzk0KXN>Ugq6|`=YDyfx-e{ zfNuf%S(-67U5gYVXygro1u647zynUi%Jx=VtKdhBkrzigdqPl46o88g!t;@nEK<7` z!uY9$D-moDQ~PjVW%w>g!}csDT8-~&UCg8@DomBJG;6ndKdwN_YxmD>ThZ56^LKJ> zXo7X5&#y0W9*X)oee07Zq+3{o>MVZET7&u8>pAM&^)w0wY?wSmpjJG&NQfU{&ZbKu zD-3~%J`p;I3FPNHM!-Y>Lw-3;skmCK7Ph+x6;zAc{)jG#+(sS=UQ+{IjSR5gh}QZc zS^E4eOkK$&kk9mjA;Mh;Fea*I5n?23v~)iaNDC8lId|tFT|G$cmvC(;7)gh2NT2Y}2WKy*K z)IPJD6M%OZB93d7Ao50!prkV#qtHwXYa*Smz6#hQ@Sv8N*g)u&iu#-n zUj$3Wu0=Y_X&+-^*|yv{m*r@c&XQOT65Pj#1;EPS&sV@*%p8THBfxQCnhz7)1#|DN zLS%t=i{K;{AwsB66R%0df;S@JY6M{Exv!KxN~wnEs`k<4_+}i8B7$07QnuG*4BUof zzO)>qLX0sma2&>`6h$e;xS}W$o`rZnn1{+WN6D3LUMTP-2#cT8PNHzP?`QqsE@{KB*HC7eP zN@>bowNcCWdR5Wpl*D7B=JI-51kVgn`}Y6W+xJb3E+!~&rG4KJafHW+_T5VD^CCqw zXa|8rzS0mHr_~n?V-0oBe6uZRgk?WLTHg>&!8L5u25Xl^j2H74EwFZ(EqDfEZ_6w0 z{y-qOA{%Z2kzO6+H4>q6i=*QDR6mR2W-!xuECw$8*Ua7T;qHWGyvrYHokyAf(Eyem z*8SA0oJQ_92hE@AWUXOyj0{b0ORp;ysCv_t>j<=kz&C_Ygb|W-2M6*-usBSjmH(dpLB^b z?87Hih>1Ma%*)R`n?#4!i{vc(B@N;O06+maJPc74R33nou3cSj-n7 z3|e57F#%t=5qFj|(;E<5s?I}|7Mt4R73LNCL{#L(bucc#^YXX9N49QUE}o_uGq-T4 z&!$3r)Pngsbt#q^5c@1jrz3Da0=C?NCprTweY=uW=3kH$#(AJ53 z`~MKOZ?Sg^-6#b?&ZVBb=6H+^Epvd zR(&15&jIrVxQAbZGR(~^J`t}>+*p?U5~NtYAlHq!mv9!p{A)6muSwS4iXdvoEoHc8 z8wk?U5eM~S-^P1enu$H!{~-&hOlRwi5isNN9;tEE*45+7#*v!b+B*0@;W7YI)c_fC zv1{xMuc0h8c5GH1)GYc-$;IRY&M;k1Ny{{HYQ(qHcCE#enpF-v3z`+X9brLh(hNsv z(>sJVjZZiwI|62BBHWF;>Eb?E?{F?kqb)-E*5wQ-L_4Wp{WPYjZoY&%8t{3(;{n+2 zbF(^>ndoHl5OKJ1*n^!WyFQ-1_^#J_#2;l{^t7JT7vX(1VrZKIFehUt&zwAW@^scQ zrElN@>jf0Ppp`wJKh`v@8d$@_a*i! z&bPcr^{hGUGQBHW&i`+}a_}rP6!VT~^QvM^T+ObDTTabu?GtyKio1oi@G}H2t{LZF zv_rk1uN-{%OJXA0NlH3---Q@3M<{W*EWUC;7mp>sa$sZ%PYdytgC5BH|JS~9(1E?} ztUh7bHr4{6KsdAxTmC zpn#U3aqlpVV=|;<;^PO^^znna#a(g{u5-^7_ge>$njyZG@PF;o2j+(laQDjp`KJ%m zr|Ci+Db``fu=(=04>WP1HI=&;$q5p~9>qHomsNk|+XoMbQoj1_gKr}K%EkGyIs*I0 z-##col6eWcD>iHRy)agY_y5he55BZ;4m%vq3ZU1j3y-r;$ES#^ii38@4iY5psm?(;(mPRNbAOga@qNM_&1zs_I z)oXrb+T453CT$kfM9p%ntSq&x4qZ!1Q!I4%HIa|Ns2ofpzyedt7Vn zwa2vwnmC33MVJNZAYm9g$)@5JEPCn=1T7uv1dQ<9Za?4w&&KTsPd{Ae_JdVC2T&a# zZTMIYALU%KWZr)8J!)O+@*K3&YL)0|R%pxf@oV0GFdZUf;Y|QsRHkk};JMDMt83kU zV4Ol;eChQEkHWjPI2h6Hb^pO5 zaMMpO&ntTE7>H^3?t`bXWS;~2)>z=B2Wjx`1CuM|Pm#Nqzx&`Bpy_qdIi}J%wW|Ce zKJ>c}lH3lvzoo&u4}xpoeK27!iidgk!Hb|KmA%*M+YczHwQoPTO{ILv;UQ_ECS-UE_q%;S1Jz;-+V9ze$1N> zq8RhPc=N%E1HAcQMalmaHy@0LNSikwDB&<}KG=XP{{5Q|WH zVvx(5wAXAb%9}=e2p*K01UJWo>F#~UVYG_`qG2LSYe8*Gu5S4_73_* z6^-q!w9lW=hqii8IJW#8D5|FeT<3Cx!d(3j{|Wa5i}A(5A`fr`&~tUL7znrn@I2hq zfCZx}$lukt04smU7hj;-#L~Ydc_6oXK;u_4s~;1~0p?Fb+ec$K>=+Ug!~5KltI$zR z=BNyt7QK&ip4)CI8bL3vR^|W;+PX3yTA}U4`dz=Jot;s*ZrnF=h`${-mA>ydTd)h` zJ->j`h@f!4B_6>zMt>dsLzRZE4kE`);PMSHxdU~@9=u?m%+Q%OY-wtQQ?84`ZO%bl z*ofZt%&yec`Qb83%or7WzX!kAwC%9zDnaY=dtk9NH1*hk1Jll(L;P7c7Ke?rj1XvK za<0+hv3kO}9RtR8Wn#}BPE@(ECfnb@YEC`ZzMBiGlvL+LHs3cP@u0pRTfo zD;$T;wCK5mi;*q0vWohYbNx9F#r369X@oiFP;eWrIzy@5+lFY)mTyMrr6UR|kL3(OHtCsqt+r|axKagG@)u=|6<5j> zS6*6r30KrwC3t)lT*3a{(8H=+;{&;XQ9tt7Jw;qj=jx_zga8%(ILRE$ah%i=KpVH1 zR{-mCU$#Bl1;MpqG^o>EZA?}oGG8OIRwT#?ex0#5AulgrzZ{m{R)E^(L&N>K8uxo; zwWB!k7-mq^e4@7QPQ94x+FP)cvkq^=QaXC#RXVyVY>ZnT^whMTKmunW);9$)abitD zUpE;=PcQA~!PsF`_~P7%SXI`cif#^sRWTn#k1k^!akWxjS^B6*RQcUYp+@;FgQ?1| zh8I1eTsaqQ+7Wf=W3N* z9%pV(DfS*2or*(s;y+e7Je5fzKUjK^5{PR_0fQ};4U$}l81C;tr{jGG1jrB(Zkh^CFId?KpJl(&;Da z*0~%kAB7NCL}+&+J+4j5RwfQUQKqjpTbX{^t;+P*VrUN5#;H%67N*Ru+Hhs|)CMZE zx7J&kfm&B(4%FHyvz_Lr%pqEHWe(T8VHRu;mJ5Ql^Q2@!nD&!0BeZg5j?<1SGg|vp znUl5s%ABU{QsxY;RGGJHZzwZCdr6te+H=ZG)7B|7Q+r66c5SUP7ivYyT&(58Bnq4I zAZ1!MZ8t6FYqEMRU9HO zebwu;eD%R=!S>$f(my>@?1P+*f_&7O@2Cg9T1Y{_HY1-I9e4yc&i_?53$P{Uc4ZT* zy-S-qku1fc!yumoBg~_EEWSZL$%f5J-NrA-C(W=~sN48s#kgUUq}#L$^06B>lXV+g zkk3NHW|Xp7&^2h#Vr3Jn1=7aE2E7fnND1Z9xSXb~cJ=`^1zi-Vrh&Cwd}5vtQ_$Ey z#0T%YrqbKj6QcJEOW99$3d-PO$#l-qd-~*kmhP#{b`wC_mNS4jvpD=v(O(@I~fLBv%{OGMi(+zjUKkGhS#mN zP`3n1>++iX#zeF?rmijLwt7LZ9mAcT!#o^)!PM@<;yoPQnx3$?FV%d9g?Ktz!?NYD zcrS-9v?2vty_=qJ9B^Wb!?rbTOZFctDDl{KZYwG!VfSC@iM&EI-ywq#=*YRP>`U4= zBst;0_&0)YDZ+T=*dWr&2$H^&nj2T8g!%JAy8v&{SHNu~V6Up8eM> zoT#C%#J_fGY{7Tdf^V%s7jKJmSZ<4(SzZ-@u3DKQrxh06W9dCsv1au;{gmu_|B3a% zVvV!xO}ZA8duL%MvzOOSTufW}WH9chEj~kcmfrv2H;v)?TR=)N18hU zC4kof8v)M(o&Y=uaE7fuQ51I137A;EaE}J_B+QdAD_~Z@JOlF#%u1M*FmZO_Jypft z#UNdHRd`hz>~KmFT?Mdg2o~)CDEqg?;nFrz?EbD(Z`iP0x^o2VF#Q(W`{TU0p;bmE zbgjvR78g!GRfTl(oN)Z$^8818#YtzCo>9K7cm((d}yGovLXj@*pUjD&%Xd3x615 zF}2_~?<`00omDI3f^@8UMf@wVA=Mite)$3Wq+t`Q98oKyFW`Onu!$EOH?EAni1+Tp zCSG)OS{aR7qwT)K&~Iv15MG7zP^z-Fx<*{<@H#h{R9uA#Zm30yo7nV|5H{sf#CIO|cpY^K3tS_#hy>Lp^ zP+;f2-cbBhJQ__`bxQ#<*Uv!rSnXqYkUIZJs_DTWv)}KBE*^Ih^usreA zT!&@nx~>@qspKy?#&7J@G$HkM7izXQNOk8(Qy5?{aKV1vn;vS zZ-<{zW$6&zQKqKR)L`d_mmz@aI_q-v8)Jw3SNpBMep5I!Sic!@yoIQq1v~+G5U>VN z2*?901fWd(8|pVE=9DD*2tWf=(Z(!8|FQb*_UDruC0CB#rH5ah}fXUK4`()=OO;pp<58gSr^W*&5m@VDN# z)w$KJH)@=(38b76!@YJ|H;4Ge#9BAcZ#3Y*RNVdUzvAqIwmgwJqIuBytR~oIgQJyy z%W68RprmKe$62`Ds&?lVcqx2RFDAc^T1Gz?bRFB{=z^0t>DoLDv=WDbR-gP?q|Ruf zr(_`@#J$iS6>P_hx_t`p>O(u++D(^e<-%FQAZKvbQmx~;eFev`1zjC)fz=Jw=ZC7} zL$|Rivp78l92fyTAnGxY?sg#5fRcJb=r>;C<`w;5zzq)~MRgn?5A&$f?!!0lqrhph z%L;(7^UHvv#i8EVU7eLPRr`Ba*i_sfNmIPiL7|8SWXhVys=ISvO$} z0&S?{S{%g<+hApT{x#j!-oNxWPn^N_pc+F=U(eGCF`m}ep#zWAKs&r~(jH!?ujdJc zV9s=IfI=26GYWxyL|fmIC3H`X%vr20?86%XApGl|PwnkOG-LlFEt zCWpV^au?MaShPxv&E3z|gccr2ozP~$<2LPW_%s$*0k6$q4^MT#iAfP?TP(?yjPi%) z5anwR0g>Q^$g68r%==t8J=py~F0B*Tr8Q!#$v6KU!QMt!e){^`(M;(HfWGw!_*$RL zpEUkt;-_Xwma_%-ip36%_Qq38siOoce zL)*t>wx!yV{i~2@2yj&=#o#nX?i>28S?)M2<=d9(IX$4%-Wgy4Bm|T?!n73TzJ5xi zLmem)k5ct6QC!C#*X=TAhV!5{6HLK$$_`8iyKZrAE6}X=;W!e=9uR?Rr|vDPyPBjm zY%t1n{o`Gtw~jxqYu_c!>-ghp`Yx$n$DgwB6G#!qR5b?;XrZTLxy6!W*w}U$wnM{@ zIU3_kz5uLr8j2&IFd(1%7)a$sb5#YuS?opPktMC2QAfBhZt9Gpu$DL*?uhGi&3-?{ z(_Ew9%7~?-%t?rmQ;tn{wFYN##AU&f6FiktXy+ec7AUvch_!Tvn^<9y)xM%J2?I*B zub(kRjuNpDe5etD^~=?#c}g%Uuc_L%RMtS43q};e5nI))E4U>y`N7j@javDmqoZHH z(+j*buHRV&CtP{PiE=;KMr(nH0|$~Wru}+GBR`JT+Ul0c%5s%q*+{n>r!3bPmOoKT zv;IMRLH_-c2kOydsz}-22m5vW!Q8`yq3!9-J|#61eTNg)FPE5s`u9H~K9xXpX5KxVE86xueE|qxQ04k7+#os|jX$aI0r9fW@*kE`9uPafgEQo=ck- zocD70t!s`=e6F6#C~BcT+GAPRE!+s3lSC_OeT_qctt;-uhbp%?JJ@>nN_^pf-a8+LKh)DP+6MI& zevD_}{0%7w4cdSjGbfVU6YqMQNbulKAKvdWC{~;DB*Woew`VPEds+1?WMqmq3Bg{o(%C!2-V%2nX;pVmo^pN#9ZFS?UJcT%= z^-+f?4~*4%>j~LAA~zO%5UaK0xAqL;(xX8sZoh%e$9=FW z$27P^_-nPh@KuA%V0;xd!rZd!07PRNMPvPXc+fXDU87Bdd(AHE=YcIw0NPf){I-pD zW3>l;9%)N~k~Y^qeKXT={R9bfGm{nD+OJaE+IK{2=dPj(2%G2B;VmBa{%m#U(h{4t z+M&uoi``7;8b?Xcx1GRs+4k1j$4J8&bpls`zkxRcy?PFf=m1L8^5i~NM5;G%1t$YB z1$j2yH;*?$7tK6YI1>kn?rRRpie?_NHz}HVct^PObu}#uKMVmEg)?O_?(Wv++YklB zw}npTMrw93O&R6;y2!)gp`DJFSl1BT7&|Y~CfL36qGTTC4Uc&RY|-MA zeIxAMwN^m8$iLt!sZ}mhvv4X>EWpjjC!#GE=se*P0$1y^r53vdF$=`}BaXjAG{NA+ z66}NXOZ8=Qc_W|~pgq91Knr%P8@i-YZCI^vbui8Va7xGN@GDO`RKL4@7;{8wIoD2;s@49h}u3A-wRqA{cUntIuIAT>bEZaKhCcFK`vE z_9KY7-8w=vf^mD$R_h9@9)O7=Jsi20hqo1F_G_IjPGC>yx&2+D{Bd?pW8*NsXM^oH zh3^dfc;`ofTn`*ASb_D@sA}zAO_rQ_)@Hg}TqwgQk(_Wn{j9GYr{6J@uQsOzc*jFh zaSi#4$R?f9_u#Ee_``=fJ_Z8r9ASfup-MQTuPKJ+t~h)jx`q`{_puuO1X9On=HX=b z*AcL!%T65569C!quy@N(tOQ}X431C;TTj4&&cO}sF1%>g2JL~@bnPc-n4O~B4sP;Z zM0HFn6mMa_8v(nWpHV%nf(4_3!Ib$%Z5^!21Q;P)>!E^kG=e-UukF|FLg_nsBh@dQ17l_bX?^ zB@hS%0yx>Sv3NrSL(rD^a*F&|mokS>lPFvi-Xt9B;(fc61(n*vMo_%$*W=&>7@hIu zsei%Xmp2qFaKkZr?RSJ<^l)9r(#``ml^isIou|tS)yKJT2xnsylh+{RKq9zkreS?~_*>kXncg1q$=OlEacP+vza%#a<7}qQa#I?rO6-ltN zz8ijVfW$hi?)bn$LtdI)7Ou&QG*l@pZv(4TPsNN6x04yZ&9>UhP@{l5@4%wVR#-uf zt;>0oc&ZoAN`lj`+;f5@D|B$c0g2n^;$SK3ioXNF{3Y6{x3&n!bW&b^1U4Ah5zqa2 zrBe5ydz_u2#W)S+@tGC%MDQ%^N!~Ra+OKFJ|Ge+ zeM`%IOO7|r`zp8T?n29unHLE@OY|zmxmO9(VpXXus4m1t zwUUaP{2Uee-?65_;sR`(VwJ@@sru-yFVL_zn2zltcy=$U;_2lN{sw=8ogcc9sgB{p z!I;LyP}t?XP__RUDDd3Q6Bw*7@ejHPMLf3Zw$-ALQtGvFMMKnX8|~ve$x1);9Htjo zp|Kh2uqdDQkB2(iVQd)g*DV*>%#muaeJjouaMy4bho6YXshFv#LlFl$d1h~n;}{gz zpdC2fz$&#Fz-8Njv%q7h!$r@!#vx2V|9@|g~YV8ZnB;Y^MDP{>1~EE@Rj5| zUH&cEJjJr|>R|^R=#W>sZYe#^xDF&W+pWiygbPEWDwBdTeY2Y5_v)NRNnV+LuEBV} zHY*Un{yDv1)5g`+x_L@vadXeJA?7EssoQcDP4$e(K<{3eU3L{5xEfS}vxIf2NADU^bcTl5N3lR=P zQIf-pnht}nTq(r0IDykp;+C8lid)azB&@+i6G)0$VqiSU2q(eKu?uqO>teu7A01 z<+XbLo~DT546~$WTSvny_K-Pzldu`F|i3Hm~Fry!WTM|nV6!(eMXm_YkLDWLisr(ss`-TX2z;>ZGD~5JtsM%yH3*l zkRt`#Etec?ixjwhYDzRFS<{e+aqp78Vrn~@m#*Fk>k$^S*j;<_j9#A3EA7Jy7I*C} z?Tv!$U3)L+gVTAP&0i@R*A+MTH7{8F3OW|o>F4u?y<*WKRcO4#;l;I?+xv{ERn%CZ z1$Q$fIN75U`HQ@X11^Y}*xWg(`Ot|LhmSh4cU#QFx1PYd3s*Da1(D#cI7?BechTJD zQzk}Fn?7^;tvDD;?yH{di=B$Am8R?G0%lLt8##{Fij9$*6&DW_496tFAmh3T-U;RC zP=Hf6eYxYqaZOC>8J8s{^%s{{OzK(Lsw-M3XJZ_kJfP%OobGqs0&|3>b;N6djXPwk zPoPB&hSC%Ce(co6w+6M~p|aUA&QhH7;QZ;5<6^O+-NQbJ_nrlsH(#_5c;D_DX}3f+ zfJH&`MrfH_t+cg{YAhbYD~1F_!aTAbV$I_d{k0`<1BTkHYVYu8ZaM92FsYtxrCy~( z&ViLrj66{548wg->hpG0gb|s`((b>wzZso;)(fcALE_|vn36Lq;R<0UPW%4;OMPR&nONX!lUuV z9YaoGlKC8_pSIzSi6s`Ka^P-6S1ld&_Fot_8eu_(fAZ@jBNH)je=sM9j-)ozHZT&P zq^`x}o_E;A6cYUxWPGE(yYgfaId%uDc6GpRg#N`EUa86R|iAV~T96@m_z71!gD#975 zy)8aSNx_a_*D_3&;$(j$uon|%)3>Pr-{Ni!4~)|TbVN%pHUc+~`FLS*=LyWLO2?ux zjX#0>3Fc2Ue{SbbX7QFh@GjD^kN0V!GF8#~GTLym5aNMgLd1m4%m&$cCuBM%L4zn_!!4og?okj2H<4>O7<>s zB;1+P&%@2t_uSEfGDKWKYi^pQH5y@r^7Yyouy0Z-&i`Jj;^O)N1058jW0b#&HIP5# zbdt4asZnlRU8sqIQeEkx>`h0s$2iRHg_T8yMx>4+{bxjB&Jbb4G){u5!h&P2`h0Yn zb0-#s+eGwZkKPin-`;WJV*^U8PttX!Ry5@#E|gTYFRZ2@Oy#ZT!mGeEZ~Il+3?R%Y zPbA67T=XzBXlUemt}+59s#z;IiH?O@y(zKB7+Z$1O6knXDSpc*d$dPA%S)wj8KB!H z;x0HISwDgg)CAYJoD)hZHiMY-e^_zMt*=tE#ut;xLkDf-D!mD~+P-6mxQIi;wxfYr z#}AG%8l#(mrYJKOy%`?m5@o?TF0BGlze5J+6pUbCPseBaOa>SK?k!Rg#>V3G7o54m z0lg&}%Y}O*2tul}BU-4~adFO*vBgs=CP5>4CK`9<^i;9hi<6kS`gsW@c2+Um~)@?C?#;yLLXE?NTq@y}EmccA|V3oVl5S zgDMOHQGhB^J!+?BT|rN-H!tZO9jfUa=}PN~#XL%2Cv=AUy6kjr?d{BvyKXU`Z}ICP1hy*XAbdM@6D->$j1e)43@$_}VbikEs6 zyg@d3EWNHc{3LP#ip^;4tz7c!B>KxPU+3DLaEAjn(vK=QMuoJ`%8p}PWqH5#aWpE3 z^2aINdA|iXykLpJCuZPR-HbHVjb)?MOIPbXL-7LP?UnaiAa-+v`!z+Bu7N8X4_|0+ zF?^b`f&v*gY`_f+fg%t$QaG-GcgI!Am-p~#4(~Bc;w7TPj=03;d~7$_^uBd-^YLd( zz3mq3yYCm22fEtgLyz=py4ud3}nekEA=_Me4cFLbv#=hhqeW zF7_t*Slx~3rJN?9GGM5qYC(7H{xs~g3itOY6P@8?2dk>2%L*IMR51sO#?d(0zYTh%9?{wh zYTQ(FQbO~Zx+t{~tAS*1upGj+Bm7sWa(wlA#)qAzrgYY3cwuO-Z+_S?)@}9sz?z6C zpHbK;au^bTDtw<);HWI}ZoNtHD z1ESQPpF@`%!hzU$s(mSZ?89-0hE}Y!q3D?aod9@2O>RVW5c9Yg5ggNEkPYb59dk89 zZ%NvHoKgFVuz}))_QWnYU>>J9kt-$iJgbOco~a6n^^qlDXTfo+S83Z_%}S@2-|`zz z9Wrbio3>%FRb37XJC3-;7Wy~#4lv`^*wv``5wHwxenum}c=Tj4Vr}@jjXfE5ZYIDg zcPgxM<6xDM6pWwrX#6a=9Y0Gm;V=hwxf!s_&4FERZf3Ds_-}T5)?@0zz30=eXvrC% zf#QcjkaOEZI2t3|GGKSWZcsv_c9+Px9~VZC#cFEjhXu!Ti*U`tPxfm9exf?G2MR8- z5211U0(NibV8tYsrF$I=*pD8C*WmyZ@d~Hoq;m=SK-%dI@aT#O+UppEQ^8Api=s}L zoHzluD2o>?5PHf9$bSH))+SBVAV)?DI0wgMn{z)FYMp)xCx;*RQjzj3U6Oc4jg-|) zM^pVQlHOG+e63jyoGyyVljR?Q>qg@`ZVWV+YQC#qa_w1*&2t$3(pii0hY(s#ciG=p z)fbKgT+maifOq$auZ5Qe$CiNGjfT8EY8eQ%MJvGEraqg3tJL(F6jugMbRe{zmn(9>|>P9;q4(ryg5y#qM}u zWQ^&QTK!|_$3Y-lfLvD(oc(S;lE1{4UpWVFn^|6tWKdwtNuy1AldNmt#Rc?Phohg4 z&hAX!=zEkir)4fTyXuhlx?b-L~a8#Z?%$s+w=l;R_(_KWU6%-_cj z@%OcT{C#H+ev?++U#X1ZlgfB(r!rP-SB5&=B5Bp%-^SOY_o(w@oDT2eaPOiOXJA#f zA`rhG=o{+W*PX#tsW?vW1T3+`7vh_W`gGY7<`=}IXhl!u$Z)CH%uY|3a1Jhd?F8P6 zR`iCweq463@`*m{G}Mw<-*fXyDb4jR{+J9J@VRl<3yLYWfofB5It}z&O4Wh_jNY?- zy3&b2Mu)=9xwFe|2puns3{j=gH=v^JuhzmHU{o%o(0bR~cvI)<$8e?ZaAXit*OxP3 z83}HO_%MN@K|Qs$4X;jtignkvWNM#T5?Z05;ouXO_$_Xl>m77D%Ns}0azzCfo4`Z# zi>g2qwZRWD^@WXc53V?yvlq9KD}|>;V-qwTn#3wp0VWDzV8DRS)x9R#ZAiCZh>xS0 z7NU^hY?L|=^^EJCpx<)mFGjN2b1|oGO8rgGyQ`nVz+GZe_#;yEx9E^|dZPixP#TPD%&vH@9d9pL0*@Q#hhc zrJ*_|`bFmGMe{z+Vn6*1QBUKj|K@=!LvWeURO_Zi-Z-;4|2wc$?61uKO$4LA6b%y= z_E9h)YxeFmp&{%*6H|C2@*=(SoK1_z_3yf}W!_i$dAmIdTe^DGwq2ID+ta1x#e3(v zTGY7Yb;?`8X()`en8eI&P($^hU#HAly2^I&?J1L4#F)HxcIZh zv|c!3nS&qR+{97+zI4^8(a?xT33-_GCQehr_A?y9#Gk?FhdhLJ%Xm zT$$d*Qz}!vCQan=z_Bt@A*^lMPCjo9+o?| z#lUz)doeXO2K|y|yX;ZeNrv`qbI2>VY_f4pb@!$xRyMZx!0x0j*eQT*Im5ooYBcQY z98b@0hn3-N8{T#dJBIxYxZ`90fL+6m*_*t}g?JgT2XG8<7H~D@ z8oC040kHsHWz5Te9|SxLcpb10@GYPc&>P}61P}(80k8uK0UH2Y0Urat0`QWRt35nK zJ3vpsFu-`g?SOQ^9e_f>UjR=6UI**|90HsKQ~{cLA`U;ZfUXyol7t_Ac0;L02k0hk6z2P_4w z1UwAj;|IFxKF!+%i^d_rqU^oxDY9tzY`g(_lhx_mu6eyyVXWNkI zX^k!7#=yX-(a_M^Y#e3_%k9ax;}K$E=0ZFc_`pI6iv@-SfBaOBho_gfPou_`CQX|) zZ*i5?x8>E>__ezBI{(&f+O})op`)!+=Pq5lb??#h`d$IO`}FM>*#CwB0|yPhamdhN zLBmJfG%`43RA^ZE=rIwIV{aZ8HGV?$#7QxeaU0>Z>9^cEW9Dsfx5v+#osgI`Cpjf` zZrZ%`jLi92*>=Z*oP~?-xO4H6rMV(JeRf7dVsu7Ix;-%q7~L$w#UwEvzvILJaf9d! zDIBM`CPY|bZNH<_vPWknCJLA_iT2p|G>3X0%E+<_(E{@rlgCYniP3HNDPkhWk8%4H zVH2W4BFDSi2r+4J;DCWGnues?;xjYTQf9~7Q!>(R$?@5?dGQH}ws_dv?TPa;?KXRc zEiEHH!DdfRw1wKt^wUyiWyNPLvZa7OSy_p*?P-hpHjPe8jL%NA&CW=-$InJcgm8WSbqImd3=uDdN;; zcE-HSl(fVIThD&8)3UC&B{|Y(E1oJm<7<#g&$eedX4^qL{*_}`T3S?mO1h$*bl*U_ z(=+V0#PkfuoMc;eX8df>3-1t?__SIKbLUt8I{d1pJyFlRNt1Pc+mnHGM*8f;zE{o< zrbFBUnMNpLB7QXqBpB{D457OtJ$@F@gyhutuq7wPXX@A@o*Qnw42?M{ZUI%k)r1?| zV7P31!ezn@Vz^8Pq-{Y;mfaDbR+B-@NtFQcLYZiQ_i-8Ni727{AUH5=c9K|)SO1|5 zS3`G3W@0+aL3RfCkdU|_Wwx7NgGY%AGf{3JG31PU=>#7A6@MH^(eN`5VwYl1&Pqf{ zgcQuJ>%U>X7=aSv?eRdjg`VYZ#Zndeti<__#B3r%DW7CiGRQ#Gdh=3HpdfV2Z;HDk z-H}ZO=EVOSi#%u!R6ML#?i16IYZ>YD64ULVfqoSa>-8I-HOE1Z*=C*ws`8YCO3cmz zo5{Mox+Y>2tlkop1Pc+BGCM0HJ0r<%i%n5fh6V&w^JAP|>7jZOO;H~D_U$W%XJ%#0 z$wF=srX6PP6~uap8Cs zQ^rSzP6$)4TuWQhjFxz)zw*{!#b4dti}7x@ldIj{?{RxyzP-W zhfo5|dUA5I#nO0TW@b)i<|XYpy!?Ot*hJmGp6Flce|25z`;I_Q>vDg~#if6>{pX9C zKTe@s$)9@+%S6qeC;E(6@@Iy({ePYJI`Nn0YV!ZI1bF0E|9M>DW9A^uWgazuo*q~B zXNJF&`af#(dnJG=`Qgt@kzY0MzBqpUk34n#Um3MEzI;W^yZ(kjC7!(eWd+Mu+;w+h z(Msp4;??)8S^JlJ@4Nr64?OtL!;d`r*xw$1V%?MLH$3(9GtWNv{0lGs{U86__|nU- zy!zUv&9A@l=38&S^X`_C(yiOJzqg}o=dRuF@A=@vz5DhbIC$uzk3Tv5>1UrG`Qpo? z$BuvX^@(r3{Z1?Y-gWYaA5T^M^z-R6znnc+dH&bmsxDyR^`9MBH9T;-qJh=_HvRwG z>Hlf||K7m1VRtpK{okfPB4XIQdBZr;6(RvoZ(9@|8($XT5m8b75)%611y6L85Fr4J zuQAS3!*DS^V?p)%1dOI=9g~=eA*=2o#4*Q_Z5Lv!BTdCI7JX^P6_S~ik_Iy>K5KR| z%+ZOnvK$!5(rs*fx_U)Aj9(yL#widN;{l{2+#<%o@C?f^A|~GM$Vz|%ZAuCRE#4cYX;FFzyogM;NnYOaLKGBn=FJ2=D<~pit)zNCxF#0QdlQfGc1Qcm!5~V`Lnj&ls7%P^uW}Xjt%E=+__O7FMD+A0n#x|m!t9qq3b4bX&Mc)=~BPd!&O_) z#08i%NJ+AB+HKMGmrrLFrq+x(7EE7YS_nC0W;!$0HZKu#Yl}qZZrPnt-fV^`BFqf` z7fjyqXR8e6Zcs5PCM@R17L#UsX0!`gwWC?7$N2BwWXERquoL)LX2a zvS-Ak%*#wm9A{JkGZIp=?KAACa55863AjB^Ocmn36d}?|+$`N*q1J;2K!`(zX?{EW zgOxAyyRQ3y)|Yob*Z%VE-`g(l-lg;9-TQXEy!(i5mv`^iy|()vH~MpWK&KDj(-8e) zL~VEX@C^51aBcUx{JSaa^6q(~gR9fN$L;;F8=g#OR78!x@ow*R(;GGJ^67QCrMCOd zI_VskQQLi$oBL<+!Qx8*ACq7E+}vrOG&`6^wR>HDRV3){^z$~}8;a*#vhJ@T_Zzb= z@1F0ty!-bHYP;9P^WEIq?se(>;O>UqeV+*yJpg=6{N3~O^B01}DS*3s-F)neqh{RQ z-NVm?TSguCk~;a2_|Mwm>*D|V%a?agd$qQEUH{O{dX+r6%wO@B{!7kKX9 zULyzY@g(jD7Wn`^CLin4v3ftwg}k7=s2`aTROgD%c{PmNEpkIu-7L3bf7(eOD3KGPGk z&`5{sI)PAoR$4fkdvvSsQnBd$wQ*>tt`5Hp;=JvL3tB{4hEE$SnB3AE-`*V z;wAPa-e#|~hGPf4r=&v-23ixs7tY4O5ix)OL<`=L)`p$EBD2F1p*Nh9m=z6uATu#b z=LQ}mR=X~#?tNk{-H7)_UXPfp8*rLJv$k6${|#7@40Fx_PbzZ7e;Nv&H> zzo%%?rFybbI7KE1@qyyME@eWT^p4fFXex&N7@4OfLbmH7l$j`IkzNdcp~1@}vqOZx_auATlyr2? z6T}>^C~C<@EwU$0&X}5#kQkbbQF@8jBq+@o1zL5+&(~^&{_P$lA-qPv1UW>>%?=TV znewF$BEEqTQ$W>Gx_l#afqZ6~#_!J6RMKnb4UeY-^W%gWy3#D#( zVw7=Y$|A*$MNr$#n~XI9A=!#gcoPpR|3*~YU-Fx8%&P<&=4)5zKg96A3I6v!dE&`7 z;+rS6CvR;y{-x_wh>2)GCVqC?XEqV{S?XsuT7k3 zA^trUfvrPoMf7LCiDzA0d4*B+D| zrWu#{ZJG_)H{||D{hRTarWyW!)tniul4o0+se!LB!(g%v$5mshZO{8ISoJq1!(`js z6(;)}0_HU^4t*P}`Z$lmY>jU_Oq{hShQn+N(;FuH4oAKT79C)|4YMQ6X)tXtZ7@5* z?0F(sZ8R_Z8fWn1+YXaT@dTK6z?=+|g)&f{e~Cc97# zss1MUO*THVEBK1Q3@=a~JyF*Lp2pn_fS!P^02`nkz#rfXumA+$%+J9>0~`Y!0_+3q z0qg{92b2Kb2D}b<6|fQTBH&rT2Ee0$HGo1u9)SKA0x|)~fEj?vfC#{Fz!1PdKu>@l zzyfIET~o)f4xtizGU}LV*Z)4`8V~Cv4}@b_n&xBkxtB$t`eC_pfATVQ{}n&!ddTRQ z$?nd7_&qx8&xPLyI()HnkLAcr-2I*ML7fAE56f>5;1&R7<^jNKfJ1=OfR5_Y57;+W9z}h^H zwF>oN2|WniUt^U=Pr#n}@j6V>wGSrUHJCmy;VF-a`NM1s6LjRkP>&vu2lmq7Fx7hO ziT3UF*fa1#H=4|NXkXZX{eunI^ITZ-agXQq2JCk>V1Kv)d#xUOmX%7_TSj>Z4a_^C zCoC$$xF@dW+kZ6nq@nRJ-Af<<$zZDa-E%VByVZQ}!>P5;`qq3e#F_BPh}-mkY&E_? ztoa>*yLWGKORp!~Y=B^ZhcJ-Bdwmbr*^e1BMqp#SNJ&W%4u?bJ(PLg7{`16x4?ZZK zdFC1M#v5;ll9Cef$tRzPix)3ac9k#X86Oq=YA){Clh<5W%W`t|s?WXhR8?N{s+^p~ zi|OH%+*7qz?l}kNGJIR`f#c&PzH@T;t?k8*O!dv8AuRYgC99H@d(Pqt%Si;5JB;s~ zy_QObPf9}kd~)f3&)%e@y+~hutN4?W(qIoixs1OG?tI`!`Db`OY3J}=h4lCCP~S*| z@gHUS>N$t>vH5l{m7KIAepP$VAH~;MdQkW;t6I6IYR_55!S9QcW<Qx6j+V z{H}j=&t8y_ME+O~|2_X2G5}8{zb~w~q2;(ed*Ndhyd(bS9_~Lx_n^KjKiIpW^NV|w zU%DgQ$&lf)62aBjANYT`e)68ms=aw7bO(Rrg(+JqtCTbSgTLjER!Zfp_|GUb-I@N? zj@ZMxvx;Bg#>g38q4AWcPf^veB=?wC-pD*5I=X=u-aPCKJLRUNEh2ZzsVzx!Gej=3C}|6hsMz9)gCxw& z{qdK$O?b)u+c&pL`CHPGUEfTZGG$BBxPdRgjGH@l{sbvsi<3)>dThCMN}R}*>$ki% zh4H;<$BxpsaYre<3;^w5UHxGCp?`u^=M#AptoVeA=rT{Y zZrwycKmhim1d0(OMu?D*5D^&}sdzVW;zWUc@;J<3x|lh0rdXOVTFjX3Q+gyTSiF@#O#D4BQaqeLOKg5H zQ|!3sX0dPmT+#DgDSB^}qW=ylMsAm4&~7P)e<;O_52Oe^B*o32Nip%L6xo0&UrBN6 zcT&tgCdI6iQsn37i=v_;v3BiR@xTKQh=(3}NId%JqvDAto)Awx^_0rrjT<+L*Is)~ z)n#wL{kAx`ZKGKBlN7I2Nb%l#?}=Tzc8L!@_(1I6zh4|WbVz*m*=M+!{0njFo5SMZ z&r%#eeq6~#d3m`wbLxBX^?4~ODk{XebLW(-kgcBm(5^;fE)zmyS%ZG#MhFiI`&AW? zQ+YMc-R>`Ll@rAxIUjql*NWHVOIUBU->grCAHsKzM*EtHGrHFZF>WLJ1_(c=0&S&i zDOMuGgfBt(0|y9x*2xR?Mfs4+B*ti;F@#lo7*oH!Qprw$9`wRKSL?KVi7xMeHLRP#a z{y%LgK(Nx&NqJcpJhGM)+|EKMUa( zBK&HE-+=J1ApE-sUxx5VbL|m?{|4bJs>Amd{jiTYTBIO`yAi`vh+#WoIF1-9T1xSA ze<{vPl;Z4sDbB5x;`~ce{I*|Khi{AUeGq;m!cRi@*$BS?;R_M|p_WoS-Cv4VCrYt( zz7%`cN^#^RDO~$&!dnoY9B75`Z4tf`!gojb-UxqFODU%Hmty`zDOSx#Tx+G+@{$yv z?XL-6yK0EjA9u^p&`}|yLc)~QxL!TGb?w}_`7QGrFz+b#Z;+fiZOL+{xAjNJcXLv121gj*V-7 zO>2L+!$VKQL-#m@9~r_3LdV9nz2=(My1T-$YiApfjI547u9e?4*W5fpMG&B<>;!k< zAI9(@W8v<1P5YY-+{X1H3J4!JG9+sJ*s|G+;ktbZ_37#kH8*JkwSYp%HlL{z)S z4Q><{+%FtSjE8%N(W4cW?W)~Ftv=QvR|STT4T}PoM|V_IwjXVX$H-wJAvbtgeF8_u zMa4x$hDYgGq6*QU$k2hKLi>4pG#zg^kXMmmsICn6xS{=gn>6z9Xc++viDJ~)kr1RB z_mCS01rBK9Ww|;Mj-jI<%L507hSs>p#f=J?5Oh^zpTS5WG>)-HhSj7W7fQLC5Y)o6 zaWLIOM?zf3n!&+8@&^JI+^pqQ6XHUnMg~FFs-59(J`uhxhvMJ>cy)6o|HG6I*xxuH zGzdk--C4zt{2dzxL5vI^G^~!ZTl~oA$dF;RL!*4uexhncj`~%WrakAG1YyKXBOTh3RA+xgIS^C)n(yg zZ`BBi4!M;%jFU$Po5M%|8z?A z4SxCM7jfaj1+n&=6fdE#u>Uu=dbVFDG;}wjp+mV`GaC&ZX7l#mgNE*D(NexD`pb94 zM7dMUmj}gKc|^P`)@zx|H*y|=g;}HY10OUNQl;IR>=)24K(y=Jt7pIVP5ZTkhpSq& z=+LfHx1Rl)HSO8Ku&~*B_UdP`H1CNp*I#{Ai#Gn(_3GEmVrkmM$EW%AR%;vYMs0ev zZ)Ryqi+;WxTQ&A*+!=oR^=se6hw)$Q?|BWrnssT~q)7nA0iC+F^=b!K1Zj_Nq#w}E zuSb`Fe*FS^0Dq+4D?SRmtUA1$LGK0A%9u`FuJ*-hrIO#Q2cF=KmPbsv^#H|I(6#X zAAkJuJG4Dt{qVyN-@rV2=FFK>7?++na^%QMWo2bf=s^O84<8Q6XYMO?6yw3@O6A)h z;UDvw4fECAyLayblf2^e@|8UPx88c|$iaBvxh>`q@4+})ZrQR$e)rvX678wPybC9& z7^x`H*a2oZeMsoLBWK~%*?RW zt5-*(UrpSu2LxPq-E{*nw{go2H{3wp`G zfAWf^`Oa^GNe4~hV*+rNCLqtH38tNC{ww%z+qP{L^06~yVmWEqx^=6ff%1X*CdoX& zJd=_a;>a?AGOWyl2MREh559kceHGnN zhPhbcO3QESvuDprG($@MhYT6gopMZI9$?-~zV+5y5_vAsXI5pHdB8luyqW^yzH{eJ z$+B|z@L}~%8dwKV-dIkUN5pLtPBTsXOv;fTN;we6UiW)X%79WS`<6&KX}6Tmec7lfOQuYE3-QSV2^P5txJ_6l9 zRn?Gh|^o=|DaxdQ3WKl9!Yt^1nVh$anIbc|^IP z{IlN0m`x!x0r=nixlY38Dl{~G9KkDMOnByOWd|1tL#=50GIwdO<87I!*+`|UTm z|Bc7wQ;WyShv$!!f6WM1h~4rn&d107PZN(x8ATdCHOez&X1H0NStp@9vrg)ca=`N3 z2lB)+eGz)LdT3DiBfnQ+?xHj5_2s06^&w4@2AY(6%5_~jO!=a$QGhD8B zjF5lJzDdzQ0%n%u5MP6a=%Z4O15Od3p}IVi2BSRpWu1gNwHxZBPN1RV>w4XaX!7gO zpz!b9xpQ^>k2ap|{3ZFp^2_$X%nz0aew*cl_@myFB@ZOX7ZwedPv?w~PlATWKm(t6 z)Sojz!!*?KlfROZG??X?G??X?byEK_@O_(<*OLbD--a`OC!n6f^I?7XqrO-2kFvI! z{UMe)lMa&><^kIS_dGFap-KE%PrjBPBL8t`kbE9AJOvuo*>(K0Pf2;Z3p8lB!v!?3 zJhM(3i#o|H&!oXD&!oXD&#aSrzNPR#R~LVlGqx}2XH^4fFrh9TW?6BkgYTq)&*t2r z^5w=vtLFxwzupv@Od+D zsNA%4sC)@D{2eqr3mV>jC_|n(1>w1ZryO+7L!Jy_`wSY)K1Pps^}u!c&-%iY|6#+1 zb!T3f!17AGi5qb@X<^;Z`kvz&)|+gr*tYR`ci9m6255L~=`gu*agclwG}JB6_oGf) zJ1<18nmbAsri99ZIiWH)Axt_Qx5zI)*@ruxYxgl!c}AVYr#}2q-z)hCKC6j4@uEqb zh&Rgv`E2G1>v;G6G5bTLk@CN-@Fw{VXm}koyt)K5pe(RFlLnS&)=3YcUL_4{NJHu< zd3Q2sND7ln6T%e@#FKptw$ElCV-VUmJeSBn>kPJG>_-ruefC-T!V51*_DfAL^MmH) z$_eIW<-Whg%hG}y<=gq70cGJ8(C|;AJhM((=eS8emUWYSFmt55FFjb^lNKVKpy4ji zunaUT0S$cW_Azcl*}!v2{X_gA3rhYW*UL%EU3c9jUwrXJMUP1X&4%b8O_VK$)%L$4 zw=Ta?mVk!0K!YmJpn-LgS)QLjo%CqdNcli!u)G&EtOgA$LBk5jQGQ~WLX>q5n-<1ELprG!=fp}t7NHq=27k4@?N6Uo^Rq}Z0!}7gVp>hjupnS_H&#aTo z^1K0c5@~qEpy4l&qt$amWKoJv0})ky4D`3h0Ukqrby3Rq>&QMoTQkqm{;tJ3hOz1C z>AQ(5%OlIc(xpok4es()mzKJD!f)0IEDOK)e6sx?)qM<>=VGHg*9mvo{txkI-bX}4 zFdyV|&poI5UhLCgen2i*u%McT%h7SUJRv@;lU`+=^n2wQH1Mg1*Y#PoCZDj@rWAFj z?9ro#jEjqtSeqf)uglKPmUg>cu3EK9m1|7-sCtKeq55bc@7Xr6u3}$^{ZWnw_^>?h zecB>bO$M@tnX<%Exc7k$6;24H9us)?6vMnT!wmZY* zfoF*SS-E?|f|@=C$^!csEYGZy9$pwl^nZ8!g}J{Xs6z88Kalmc821mtI+zzxh9%r2 z&WM*N6O!LI-+Z&`_h3DYsy~T4ab+Hn1`}vUepB{1&SBqOmotYB9eN+u^r(3drfrmEi8YG6yF+45OJ0Bd^%8w+g$wauf5RO} zg9#=bG#MX(GG)fiy7hy{X34i#1fzY{`xtDWUsw_V!v zDdqCz%THsCl3ccI*)O-;a*Iq$OH+KuxKhayX<)hFybp0B9d&8p90c28%02r?EGwLk z*n;sS?bt_RJ3(6NdBB46UoyY`===}kLfL%+bCgH#yz|bB#1rdA6|hX3H1z7#OJePs z$_KU?lzH}jOge}!@u$p_PL2&|vOKViusoP~LX$GczKPlPaO}#$zz5X&hv;&Sog#VQ|21M5}HEz4=srYU>o0ckLSG??RFjw#v4rrZ-xnv@~ZX37`s z*{5Qk8+FNFF!wYV>kRMV7~)^SpK?#2EOEY>G!Zw>Ib-dvD!-wjp)xo)SYmaG$_KhL zj7bAcQ?ATBG0O?d2XQw|$`Qv5lq>cFnRn*=2Jol4tvTRI9#wyW?p06$bOsNXvpm1^ z&O3?*@`v+=C?AT3`1p7^ZrnH}ThPfX{K;$boU{;cmIKy5wC5Os^25IN7L)^)0hS5I z%Q_giUqn31{r&xek?&7p-bn+})cH`ab0v@JKbSJe{*TF5;&b%qQH2lkR-$jgRmG}2 zAipK#P00Y;DDs*93H&AvtVhXH*8eZR{IdMVKmMWGz<>VppDG;Z2*4)|b=yp|k&A)* zR@mJQq^b9~QvFS9;>-RI>wlET)%<38qe)zt@2nRfr>ZXjJ&CGYQPx%a#=3^`O_?M9 zEC(!?%oEB9^MLbNbT=QqGhXHq^Mma<=zR)!v)-hh@Jb(rzbU6Q-DQ|Gm_Xb}7iEZL z;qJTdmZ-BN+iCZQxS7B(tY=sk@uB>1Ohh2=r0em=ALkt76WBKNM{y^O3V*Zz4}LEv z4Q9P)(qhs;ll*4ggtl9a6DU(`Yu))vT=`A>nKx!1f_fdwJ!OeJXBk4B@gVT{S8%V! z-(CJ$N4V!leR)E>0lWopICpeu48-To+sq(rcImV!w)~KV&6Cd|>k@%ZH8ceuco-huqiB|DY#u$dQ5nsyI7Sx@j#l#)`)2IJM zdDpS7`Bn2jC|_#)i}rgtX(9h7YrTvVLJ%ATV!e$9!Pj$#Tj(V_igD0(DTN zg?vXE|4KbmgK_OQg}*8P=qq$5p9q)B6ZY@f{~=w}%MmDBGcM4jQpXU50>WS+4AJV|Kll>Rgf0P5ZM{GOEV~#n=d*%cC0%kt&n{__x zTGnG2Qz*R#X#RKOPCAtQyURvhIXAz}a_eqV*7;38%nQmq`Aixq>yV?1=p$&zE4Hg# zWAR7YCgS&d54Qz#4Z?pCma820B>=qBFO_rK5d zGUD?vOzeI^dU*b%X~t*9?}m3CqsMoNVIKQ4X0Cy!)J+e=^fJuGhG{j-j)pnXFmnx4 z>ZS)xuG6vsf&u^HIxTMmW&f5yd2X<;h@8#`U>01S1-L)_rbeaid?2E$_hDN87z+lX z%^#bXm^cyr{V>e=hrm7%z&&;&OL~F^G!xfohkK=)N@h4!Z~s3hdw$Z&Xc+}>Ug+*fjTPccc}NLj*U-4#KXQL#{d|ErenO2 z%J!G-8ppXzm;Gq+#GqgGo%9&>U+T+l!Q3tNebkk`TH)?m?>m)P?((40=Xj8P1Ojm& zuzn|yCv3ld_~tYDOhK&DOK?7%&#WJy@A_WXo5lbK>V3Hef^+-S`I_=beJ}Nl!(o0@ zTP8cQO~Lpz9{t={;$h;#{AVASxUl_s>!EpSPMbPs&dHnU6NlS?1NENNpDTF;4%D@A z9SP-;Ix6ZqsVAI{GJ9sBQ6*)|hQ9N6a|PuREq$I?)x>!W^#^N4(y zHi5cV>W``ObC*Z1WucCSx<2ZdD31Z}sGw)f^w|$G^^0s**-tb3Zze7b!FgrQ1+K?@ zCUw5pODFSD#@unBJjT|MN37phWrAyID35jYbRC&K^eOR}SBQ1TgK1OFY4V}IjXDzQ z3#q5t^g!nC>I}dC6*tEoh0YT=a6J#p1a-E2O#0cc!2KHOxw*Ni?1K_aJUDMcUhv`k zI(1FdaZp!AJrngC)RR%iMBN5;WYjM$o~^@Qm!(MJpyUyHS5x=R_}tTH9YA2;hTx6| zabfq2Q&|?Pf>b9>Z_=`rOuGL=f@3wAKWc#u!bhs!+&xBgS< zOsT6|4jia2t@B(`|1o{1(;5G_zx^$ix*_(*i3fSkaW%)(oHr#N0j^(Ab|KgZ*o=irIKX%__tGZ8>-DTs04jHIi!X!W{dM=i~?Z&+#2)f&3u;bsxqFe+M92?rm+Y^!Wk3 z*c6m|H4bE3PrVFvOXz1wtov4Uo8vFyK_EYfN8P+3er&f`cW_*a=m!JXzq{Pikz-Kw zKfb3fk$MU2GnJfwB@XlF&sThBeZY2*aho#0v{)~gWfkp274$N5Aj{e4Bk_zeu6g>s zF;z$M3cO!~OB#1j$HO^#Z1|VV3+mUku!;3Rm)|`j_<&*3qXjzN46vXPeJDhq?uvS)g>xEEmL&5BXru@v^^6oeKNypy5#@ zdZlR-Kb5{I&*UqCco3K#+asKjpy~#$RVD3|Ip!C6O}bepWA5eYKbW>kpXs2kQT2a) zc$j@)>^YFw@2KXA|4(~Y9voGX#s>_72t{yFA`8V3qM&m0JGwAf8+s1#9qWceqk5w_j}6&4%#KZlyc#>o&{+T0pD>(f2DNrVHh12w5EuSoY5bkjK{Z=g(gY&uG2}jJgEtLjJUL>C$^}2c7|3hB8u?FJB(~AJT*^ zTC^w@=Ce1iT)8sH&(Ii#dzoSAh@G3G6yt z*XV(7H5>du2JK@prfuAvU2NVO!C5K-n(dxVUUeO1W*=`?mjv{v-OY z=qsSFLcmvg3)+RxTG1!Rwxj%Gz3+^U4ca(Y!3UcSZN@Ut7f0U<=OWPWL%)-O--&b7 zXj5Uk5w|d)4}(6`U#1Nz!!qs;=K*4IP6y`^&}T#XA)jyzLOTeoqrGr{NFh+#;F6FU}`6=lhwKZ&+G+U@8!pzk!QZ%nX` zPRGm23c#<0_q@x-j2RPLBZAM>ke4VgA^Has!$_$J<*36*DqLqX-Ixh_7Lk4caWFx?s|}>h}Fo;W82;?TQfE$ zxSo4%M*FfoA1=e{aSOR%fOl7dwi@CW$_?T++FZyF#M8Umg5Pm059P%xrVsHSX&17) zj>9;K_2YPiegc+_xC~?en7i{MPGh^V9Ha%@59$++**KqsdC||tu^XPn+>;l}8C`Wn zbT~xceYc|zuD=vRX)ry;T9~#HOl11~aBeJsCGNOZ$pXo@YgK(BF)a=sOD9;Bu)8M>DIKE?eupB=Gapx%cxWWC>;beu^`lj8vdH3x$!J{6bAU7ONBW2Mi|&obBFf05#6Vi#NF`d{Df`~ccgnvj#ZPJ%E$?Zo zXnCj8kX^J4(No1m$3;K$Ol*gk`1spjDt`a(=z-QpG3Z%!5>2|15o9tcB3sB|vQWFF z)zy{WSx?Z@_1Ei1Z1+ufjl07=;GTA`xOI6u{xa{y z$Mg4jE4f{Qvce8VK3^iKqvdNev=6i`T9N*gvCr6PZndKABzu@W+Rm}3+w<&q>>uqi z`vKaTwxu2EOY~#aQ*;!wJZK0h-`8^@siU{nllAsy4D`RF-O281 zcei`mz3o(ctlgQu3UnAxC(|X+hKn?uHDIxLfVXoCbi0XGkuu8U4O{vW^6IOFwPiJ=8I;$+1pGpbIeKRd~>O}&pc=zGtZf2 z=KWSZs}XRtjn&@jY9(0#Yp9iN{n46cEd>6qwKiKjt*@;M)>X@AH@2DmD$sc`aQl#5 zVppRrXaP|9Gg^~9%zCmUpx}7+CY#D;vxTgHtzt!N6Whl2u!HP4JIT(n%j_!qf!$(Z z&I3+_(*hPnDJRzH;Joa->hyPpJ7b(YXN9xI+3ys&8{FOQ=kC`a6}5Ro{upn|JMv_n z$;a|Zd>-G%yNY3AoR}!~sUzwebwl~QT3&?rm`A*)y_dWMFUbpdgS-*m3~#Pi;H~uD z_cnMty}jNE@3eQ(yMZ)~gT0pFQkhgG;iLg+OeE<*I+MO+4@oiyn@d3Et61%<2DB+9 zv<>Y=*U&uZ-3qpY{oZNmOmY@Dr<@DUbtm3U1Sx*UUG1)SKLQ@Ng4TBDNxVNF$;a_L zK7%jdMIc8<_-THbm+?BHvq%(^M823U7KqIvMP|ykWU)LWugbd0ubL>LQq@q9foId~w-Dfm0Ov5o^j7YPM8E2-M+sp^8&+YqYW6+v+>3-0Y8&qS_Y#>W# z)7TQWj%{R*I*&P;`v`9aRKL!%rK4J_YwCV)ls6H#C<~rZ(FBVx{6=1s zwbi34N)1#a)l{`cm8gc^VGkXkwm>UMu8`VVCH+ajzuM*%^F!-AwCQO((Y|HtG?5;o zZP;Lz$Ck49Siqg=Zsom2wpb>@Wt40up#T7)09@W86Sdk#w2@>yYxSl5=m0v14yD8C zD4I=I(Jgcz{Sq+eJpGo7&{1LHQL`ltS4e~n7 zo8Zj`DgDU%)cY1?a{)Za1bv^Sf1;n!FYDLzo4~y=!0JxMAS1)bG_nA%bB#PB-itZCJ;BCPtB--@!DTFoqCjj*z;94pt#v(DOo zv#Y{31|#Tdx{H1SXnB;LpeI3}4fX&6VmgJLSauMV)R*TcRXrr`3?Tpq*e_3Cy7wU(AmQNX) zA%T7~VXV7gG&Y-?RqZ!`7FB2?YS3qCJe^6`(^>9Dx72OGUEYTG0BM}bpBKI@<#orC zO=LHT*1Btbv_ET0wRPH27+;&{PwTz(_4;l-%W1I1@@u1nj90MF#3)*nW zY-Y8$eRL$909r7QZlr$&%6~;KgS_0N53%O#0Bhx(;um>U5hdcp1TkF8r3~dEa2JV%V%k@T(-FTM4X2SS)m|bTP4s$vONA8)-_S;hgX|p33w23SP+f@?u`XYlv_`p!KPMafPB-92KSFx`>dtne`c{ z(nqS1JUt&+*VM@e94!PLDFzw(m;6My_1!q2dmkR)LjVnO07s|tIeanHvX*b;JD{dR z{3t)kFYr=+lZS~KB0=_%$ub~QE-OgTp`kc&aeR>(rR77$~j+#+|t zSi2Wc_mDg+kIEDBB;fA_St3j2b$L_zRG6x!YN&8kM@6XmfXY#-scHtuY$~RNYNg^- xyy~G6R3DYB0)W>;REEk_xhl@@%W;X=_9|MRqO4(S*_y66Az=4}`q9}O{ zJ?j1C0{kn5=j-bE^=~%ZCS@L}2NUZ&o@w9h=NWcf=aoG$TRp=)1B&{4JZp$~(v1zD z&a*;=qh5JPxX9!2%MX7&PtST%LE;a6Mc^~q)RU=@7f82|o`>Wu#7NN7SnTzTk-${1 z=gEmYCwe_;n$r8%;`NkKMd*Ex=lI^peS2M;O5QRjNy;6o`H==m--Q_Z z{Do-MSu>~Il)TB~dHhTgw($Qf{;%hMpT8(%bCwI{Nnb!h1r0u$=l(uG1fw2L%ULsA zK-EY3>M12*2+ux$Q7LccteFkODV%^C&)0c5;c#%b+<7;E?b26prTQ@AD{vQ+`v3p+ z!9eq>)VC7W-{YApE9x`jD(f>7LPpak1{7de{{QlMJx0w6Ba;qS6O*uBS|WIjXUA2> zvKKo>HW98iGO=*Tz6!Fi_cDfwY>c?9G1bThr+!G}aYuPPi)9>~;DU9KSKFF3MrKMl zU}Tcvifat>lTdc%-kQyZwJTx0X}#XuHRpWGe>0SjXjmS%R%=Vw+aHuk-J6W&mgF&( z{}R%y*Y$`muEjRToi@Gd72g~8ZZNE5xYn@R3~LGvFjs_bBuLXIQ`2pgMrLBg)z|2g z^sDr%sWY0e-c4lv?|eWvv%$*YbTeyI)O-qPg}tiWVfc9vs{rY~#F|0&* zPApqjRI}0kWl-?LyFF(}A(UOT7oeDBNA_uneRVZ0)*JS0z!Gk@ls8VJ7ct8{achnJ zAyX}utuLk#_Y9_XYrVw2ud3N(L{=t`6Xb@dZU|}iDL?Rd^s2x6-pM@-CZA_!6&u!S z`;|bUay|Q^$Fp@$WS?Pe?X7(mO;fcetTrk)te5S(sFo^yRH-j!B&A}BznzM$wRHGP z*bCJg)+X)8t@b*uZ>bfjD%^-9nrY;zJ7y#y*}l}8`=(`F~Ht}!kpl+we-(C z+0y})3a3~ql$o$v468F?tv0MTpvsZvv~UP=y%tz&Q*%qI%n6=P*zE47#prr6JlQ3) ziI7?JM(Q#@8Oc04$|~O&@_L$AB?C1Zo#1jf=^`l}sF7`o=C5ID2~VOaH5+SIIl-5K zNo3REEr98ZK`lv}3?k+MwM5uI!>neQ6Z|=_%zzoo z>o`phhqsdIsXnyuo)Vp4IfaqsF0xpi_Rc+yQ?pSkil|-A4^m=*haCG`6Ojp4;PYtO46EMzISK1LYQA=fgR~W z6;mk!hI;`kWr5+!n&YOOdt|X4h%|h1plr70W*0H6rb) z{)z0F4{q^z@+Ty+w|VoLwzSj!v(Mv+v?t5zeV^;|>paoX?I}BvJ^!>}Ue7X_s5Pr# z3o)z12~MXPYklpofh45{0Mn0vVSZes&pYpEp^GRuZ1=x3(Ce9K|A%z#Wh$k&pVC{y zvV~dvWtWUtRth$({me@vlAATOv9>&^HP)6UOXI!{Bhotav%Jr+x{b(wipU06l3y~X z$Q+$_VZK6JTJN!U7O95!kNZB4)}}nEj~nmzoO}QthFk}HzDzo~+JzkeY@v@ipiffJ z9a1{bLwlkB*H@tpYi)kGUYiUTC#&_^DLhWowWW!0u{}cpr;d$-fMOb327aC3%@9Ju z+9F6_Wxbq;{F9XZ@3`G~y4!K$=8EFl>piIx_1fdZsu?56C(T$uDhLhN`#v(9mA%Q= zN-{Y3Hp%%Qb(e+hFTo;1S6%?uYtIX(KBTb7XVf;bfV3zXdRh=G#GRp9u*#`viCYZs z-njL~_NNVNpRsd8ylP#-Y9)I6)A8O2Ur)sT0z@IgeT!$O1Ol?si~tqXd;+z|z+2r> z?Z@pb953#4oJ8i%aBTvLu1;jj&jW(>h6Tg0)+I8`K!fCvJ;@_vvCjSi;$bQ%o@wV{ zR`T+dzvAscu`N=Gfx~dCA`J zW{T$d$I}w+x#G#@mO%b$wICydoB|g$EV!DvNE8F_14oPO6GOo9_o2=fWV9Ws&(WaZvk1aNZ~!8h&i=q#d%Tk@>h$L?nauWGRBBY=C_kA=qm) z1C|cp;1KzcfiuiqTV~H)MmNZ~%Fya|(X^9j8gHqPeI;t%;uA=;PpTz^8tbutah}v$ zzE?w%v^t~|gdq%BrG7ACWrgJYv8aBkTX;7amWWOK+AxROR(NFH#dVmMjv5G4GGGX^A$Yp;HZ0 z#=q}$ug9J-mWE~gxnre2EvayJqRhF>uq~2!rJJ~>kXW+lSPIO~Q7isM)MjLccnxzo zB5!NJF!SDIe+cH6K<7V{r7s)20HC_8f$Y?o$OaRYUIfqxSc{r2Ca zrB?ZHX(=L4?inPy19OkXe8CO?B5%$d4WfqZl<=n6X9=fR^!xu-f2SK&8;qSF8?C#U zT`w7}xr?%iK^68E9R|7Nj!G?GYGfx=JHdC_$m}-6#kIK$G5M|gHM6zC{!3rzOM!mo z0JO_@K#E^A<)xVy(v(b>G^KNPmA2&Dm6}oYQlIwh%pKa8E9o=sFk314Mz`dVGWOtu zR@|>?OJ-K8cAV}4W~B)i^aAEfMlVm68&2zdZ@MM5WAySAl6-4hO$%kz3l-K5Vg#Mm zxM_P+JK{Sx8OOC4C$}p3(G?qv%-nEDpG1XG1O8fF$693VHmq$7PV*}5$1RZ1srDBb z2|ASWEwWGuF=!R!AX2B@pQM_W+$!iuvTW?h(t6}9uOFwf{6uA`Epl^&?MIHB<4Khx zf2;jU4D2%h5zZzGb!+yuDp#c{D}PDj*F34=cHM!JSZ7HRsjJ!^c!|feU_nhw{=|gU zS%*4jy&>w#a}2Qv5lr4QL~67;WHL?`(R`|5%?;lMaYXegpwy*5)Pok3S79XZixKu4 zG|olK^J1PjfSXLEoo9YoZTYais8KZo#cY-RfYbt)V}OcjO^N+uO0a|mt6`eLuR>S# zsF%od<|oxPn-f+pVQn)q=tlL{u6h}zkB!WuVYk?q5|Os#X^F_{WH=Goks4)>lQu~~ z?x-DTlYK2MVxX+Y!}Fv?)T!Utqv%}EO8Y`sbO9r>Cpi!y-t2VZJHJRoI`28nF!wph ziiEYYW@Fr1Yu_{xC~K_%{-6(F_iQtc0YLlHJ3Gf$%1UBbJCglk*5%=RKYg*-gktnz zhS9uPOW#Uz0ZLy2b9gX$Q=(yZpsu+qd8Ib*Y*OQqu9$XlSDnZutF-Si&0xPJhK0x~ z>hfuqQ`~8@1G?3oV6^5KRa*@&$q$Dcfs6q&BjMX>RCVT_R5<~uLuQF=oq1awD^rM` zM7G~EK)06HS=;rU+wyR`_L$Wb-?=qbwLM=d!N~SHZTxm>K-`?25~1V1%NXQBjflLg zT3;rFD{E=NoZ0OpXTuV!w&rH3HFcf6dz>3Rt*2$sP4aYooqq!@mYz2b(@?Ok;ME&;7$H?HxT)rKNt>l^qpVij&|cJ z@*l^oj$D8@Xt>voF#`@Tr!tY58q%*4B8yww>Mfhuk2yn_$IN<~`7N0XQyH$*d4{m` z(uB1faiKxl@DFC)5xByRp+aT(b7WGDM@UGd-IXeP2DCWLy?u$LQXPxcvtWmhzF`!3tsO`Pk=3xANE>|Z_yawV${{ zc~599J1soJu18Z?&^5j?sx|+ZNFkoDK`KHbr<>jTlOv3V^=fI_<;RZVAc2_gooL*# zadO-@q?0A?`rWJ!g9uC_0M!HZ7C>DW*FKmUA*CEEr34WG;QEC{SQg;8udJm@uWd{Y zw?`kbxf7+iFFUY{)1_he*VEBW_0}4t;!c9=5jmuk$gK<*!&uFbFObL*dc|}A}>qno_WiRTmiLsrJ*L!D2`?v|b+!w%Px8?p~ zvmt=Qs9S3=C3x+*w87fi+%jjQb!Fww+`BRD!P{Y zl4GE>IGny$Kxq;Dbj1RJM(~C#oi9b5OcgmR4J4C@Z|1h39@ZA@Px9<1r=7%Rvu!^_V-|^{W zy({LYw(s=xqR3tds?qmOR$~4-!`Io8RS7jhzquVB#>WI}cge zs$J&qt~-ogA{V--v?u- zqOYANjQ$&yb?MFbmZFgCjb*-3nLk$VYV#(C@Eb@D)U_q=m(DEd7#9|ySu!m)BvXgt z-KV(|5)E`^whk_&#BfwlS^lH6*pmV$x9(+VBZNYRcz4?0c z=H%Jt9xp6lPf>D&{n(MS{IQVbMB9fsY(dOw6T|B|doe_S5Jc!c;$cs;8h)8%d=-ja zd!-sia~*wBg38JES=4m+`4^+20Uh~Ey7WpjIoi&NE^Do}y6wSCIKkNW#xuJ*?N8J~ zcdwsP^XRt|M#@T7Y6rIq3vL+cF&ip9?qCb*7;NF$F7Eo!6}kH$Eo}dU+K)h5zeg)) zVK{|`MU(x^n<_o|O1o=Efy#nRNSDfHVFf&h$_n)=94xB84&(1A{|#@T?_X^+_2WDu^=24Gz6Q%vQO=arm zW>z{S!%{p#scFN_vEIFG@S)D^uR@hXEcMuHPU}%BFFA$R_{lw>ukUd|UlC9QC6E2< zUeMzN^yib{!1j4(NQQe$7bD_%?4MAR6a2$MhAHF!6oJcb`ML_Ze=cT+M8;pk!?1!wcv$77{MtYGOmHsSG{^*! zVaiC#xtpK-P^;UV8;)YtAo>bo*4Vf5yWrq~VZM*FFQO|@$jIE)N+?mY3d7|*+Qh)5 zXuA{q9-T^9!={2kvCK}Eg(*8pWO7WX_A4-9R}G-fiJqixV3VySKLx~?4YZJ*BTNJm z8VO$vQK*DNvWb9T5fes8ap*8jl$9TDSXjy>f5!ibz>48qUdK(Rvy2qR8GSQwO<9OMebo!rv<9Lq+qv`J-P{qY3p@ z18S|b?@(2`*yh zkIrA~thTSZiJ`d2NftM6e(<9sEJrSXKV%KnyP}osTDgT9RxVi+}^+*)66<(LptW7ZZPlVa9Z9x_EH^M9iK_{WY@Cz{z? zDc!!Wj50PKkjo3p1l5P^M+SI2wU4VQnACbEZ@~sBguIU}4V|?HDJhCAdFloRs(Fbb z+|~BNDDBJ!_k0FSq$4>}1nH9#4J`m(2q5My-P{7v?DE&&(K5N-mtGZRV&p~r0b~du z;2SagG|7E}oX<4Qv7q=_KjV(itSR?k=XTDb`DR zWTTe8m*!x?)6(+-g}~W8fqYNWTRnj%dICSCT633IYaT-&O2uaPH0_ZG)QoS0Sj=u+ zON({c>>j73*V9z9yG~1=rDDcw=@BY$W%65Q_hc>ofqJ_xIn?aFNlS~KW_CAd>Hi{I zPCO3cR-Gqqt_;K@%admt)?@;2B~L*Z{9u5#%ron#pRry{B$uaZ`G)S?QREP8PRs=e z8P>4=bUL@q2MTwtsHStwBW=ki1LD^8p1R0R>;a2mHxFttY-=k6z*GRErLk6Gw0Nth zXhVw)>wHhjTbeU*?o)l zaFBdv_pJ}CGrPa3rQwad-KC{jS_s^&rGKshGm^KM-LtgxmvB9^JEf)9sOULb^J^5e z=v3bFS2~^3W240IXtoy7^3>->)oNqIuKY=0Mlka9y9y)uQl~S9fii%BQoy!Vt%gy= za+YeH2ta~yZ4u<-fiq0il~7cd{@~uJ#aFb)!ij1^iR)Rm@mp)*n`vDoifs(N?*)O^dJ`)LMhtJ5_y!( zDy=V2Q~oS9-cknY#foUMq+VNA*PS2A`0=PvasBEW2p88`b=~T{ygsu0{s87zXI0|g z*`?P@u7-7XbC@SCKEjT~!ihO*hqKkXdrfe0>r?BAQ~fQres_`ffmuGR@;BQS`m$)CpKzQc(T?!L2x7k zevTk6Fsno;@Jz<5YlSyiAum-*`p?V_8~H)R79|x(C~q^2A`(mz;o!EaWH?r}lk_r{ zTe=#>u4ldeOvrv!Ha{go5Rj1AH|!y@$J@0ckQ~Zy@j)4??cb7tqGL5J?1W8BrXt-B zeA}?{Y|Yh!L>P&X3`lzd{OF5hwma(#b7hhJkI6kVB6Y6ysGArHAT^}%LGO8~fri;pq@r?{ zlRTzI#h=?7|Am&jP~xX)>9get%RN<|FxnI431=ND&pBFJlPBw42~Sy}ricI~xt@)0HNvTIlX=ATWgqoY65(ro%N^ZEkhT^i5*3|>xePfs0F zXQkA%gdu6!pAubX?aV#Gb6mD;p)A<3Y|>btyI-Yj&!u`Y4$}+VkwJ$nCTZF0NCG*< zJ2&Mus9{pJtWol3W5#*`$c=WhhLSoppPW30->T$re$Pw}=C@jFu5?QXqWGUG z7JCoxA@tT&zht=$$yBmwn{ZLL`~&Y#sZn>|MJgC7>S-*TBwE2Y@^pfJT3545w&8dD zC-Ky3IKeFh*FhqZ-X2} zJpq+>9&G(uvj%Q))0_0EC2nnqWut*eX|l>`iTl=@-Dl3+p4C4!y9dqgw+OYz)7I1) zV`eQnny|SGW8oz&ZN_7|6>BYaeevuLx3Q`I7!hW@1;&ffXe-2efs+BYjQ{GBeoasM z5S3ooyMNO*`@q!n!mvSjI|*#0A4i-Z1Q*VU6)@5WP0}+(qC_e`im&2zYR%?)YeQT% zY_{X;%HALQ840rWrEc(4pzfQ&mMBHvqnol>=x0kKi3iWaiVG`QR%cr?(wcyp764>0 z5K(P>t2{BJuolOyjgXFUF^Ld$t?s2U zA*MD8iCN4?RUWhSE9IR>dF`ij(6(q4geJaChqF@{ z*=vty4ll*{=YeY_tajP|J6G7~W+Qulg`S3-pG!<>{yA$jHtOU!6Zq zTX>B(rY&rVMV8M#TCZA<(0yq(Q7rze-oJ$G)H&3~fl%ah6k33 z0fR^<*7#{}%G%+7dz0k)p2}?b*QqzgbF1mWi;lwFI{t*&UN*o3*$E;06l|j6T}E=j zQ%mvqC{`|p5RU@;?{6>-M`Uue$lS9(HKN1+9l}BoPH+v&z3VY|rD{zhMqOh5R{?`B%+7dsFx#!HPQk=M~bdXmS4e!?@kiD02l~cYSSh%=ak$JaZ$Bmf0et z$MPFznP4xxn?3My@qqEeIdYXy18%=>GHES**Yj~}rT&66dsHpLDFNNLChq%0thwec z|IA#IWA77_hq&lAVQrIeH{jZH>zrU7$pq)L^hZRK_%-YG`s8j+ro3y zu;0)_t@$}wCsPsltQ=g4}%EH&Fd5)dOCMt@o{A@dEj+MYYGI*2USAsQ3N7zUtM~^B}J2 zbLpG3G#NHOFHTmPpO+`g^vK@SvCq4Gd*1Ea^KRdsSADyo%*b9{5zo&3v{-dYd#*&7 z+;kB;mKO=$J{97ai*tsUaTCxElX&r~GIf~5(p3n~JtBd3M(?_Ps9%Y-OE=d#c5w+A ztgUth#AaB(lu5_V(^B3r1V_ddYw1Uzg~cJr(2fof*iAK#5$Vvc(!CWa85SJA^iPy~nYg}>0zw|?zKQ55*FIIuFjQ?2) zydmN%P6#`eKoGnA_hS@(Nqa6fp<=q8NgGe#g6S+5ko2yeXo8IbG zD}$98_T?**>hrptcxNc|U4>AVIt2{;Zk+)C9V;ASX99S;ggs?fYRvmhAd_v9DADxDgJRp(OOx4m8=YVMmOj=r)5}8BS}@8(=Ew+ zk)RxeH=L}*<;WC8N95s~%vw)sieXtH6|pLLv^fg*ZTq*c2oYIQg1zh}DaHaJBJ=e? zH1&ayJry9NQ2U#G;;&a>vs&eBNsgDwE(WkxBARl-MC?&6yg}5mMk0)=b@m81ZlZwk zdbIm0#n`28SPZo+JAV+do)za=tnPs%S;N=Hb^RxdJy1tF@(k2v=o0M@KwCa|J42?CUs z;jVB^E>h7t3hyoo54`sv#!b)alU*|s`zC*t6=l@Kt>XDzfq*SV6yZ5Mhc>dL;BtH* zt|b%~_tRB0KzhB^Y!qC0m2ZH^=#POI-CX5tjd@w#`!q79IJu=T_`DbOCk^ma#5KEv}5*$0yzyam7#B zpGQP>MvRvbvcEH;PC_!Dt@4{G(AeUKZ7`4ln#>4cHN4tCdc-%HS!9V{PeqH_@ z3b4AJ;Mad5pz2@&hc~@T#BOMhP{N)%Dq3TeUoLf7>)CHtu&ZHDMZ_C`?P^?@!-)1Y^Ol z6|WWA^V?6pEF(R^aNmC7zD;)D=DBaTxo_Wb-*C8KVqV zZV^4hm}HxOoxMX2=tah^VKGV`OF<1UOWobcf#yY=ei4WlvGo>)^#sb2)#{fN-$xMd zou{@J8O^pe<okXsnjHwFYO8+Ef2z)oS@@2fts_y9YS{E>$AHMSu7h{D))i) ziMpPHMo9_ITA+Be+7-=x1{+(m#tpy6Y=qZGUYd2LGWHaFgA@a|)RBZDDYq?PMBbT+ z{C|LX9smbrazx}!DP<_=Kqwg?qfz8+=*gMuo3mh7a;q}OUhq#kZoSD0mo2X%Y_}s4 zK?wnA_^}f4c7)z$gWqTTfw_@~{Qw=Ks_S@PeJ1Z+*Gt;48U-@;JRW)0gRgeL9J>y= z+><8SAZ>wM*gC{vr5W zV2&M)pocqzXbK(vQ{;tEY~Bd5>vGhg8J5EMm7gIk_z|0Wa&k;2Au~Wr%akXvL=x{& ziQQ^0CMA$YlnA6`&Aw1&-(;UG&rFH) z3gtfc0p*_HCh+=%ylTA4NfCb{VkbihYln<3#!vOE?yV35FBOOK&5TA!C@)*?J~vg3T>vEzgaWn8G@H%uc~} zC+yu`3R@9QKL1AddBQ&>X|+AG@P2dQxnzo){@*9N&+p57w){az&|c2Q5Zr5mEOf36 zr{;wW3&#!aq@g8Aiiq!^c1-{t>W$c7Zf$(mz<&riDZFi_J21_ZAj_%i>GX^|4v@rg>GT6h+v2&L)*eKC5&i0ec54A?vKbP4i zDm1c1`8Ecua3)lVgyma^urI>@SsGjcJv-|Ol(^D~oTzch^zil>{b~-FHGRTr!9XS0 zc?&M4_Y0OxCL)%-xl<16oG2t_ZK|tr;^M3+cCPzmCT0Bf|B$J?CR09!fZ_KrvlEd` zv&PlU;~0cFz|q^s^)Ic<+*d|KEia==`57%S6NyP+W}b`;U(z@8%r>k1v$wIZ#YW8y z->jwMLhQ&$w^6s2j_}5f1jR-}b=>#m^PT`ExJoTisXQ+@){}*m0C4`Zl%V<`?Z1)6 z(tZj^elwma-f#J{BzUXDzqkZ_d)c(r~@8p|lbwVo&s`}K*f?LGzehnb#BfQ9lxEjyc(s~ zK!(#1=WL7F9*FFj`%s-lsj$3vj5{{t2sAKPkZ{1X#iRmjFx z4kraRW?S&RH8?wY4i>gT)u)g0S4q{wWhD$ARP|uDYF!(@S4_ENK2SLxMTK1m)dD%4 zdgL~ADRE7;K^=L;Hw5?PLIci54o=dF|9Dlk;$@N!l$P$m!Ag%a9k9?6@ z`aiUyAnPgFNu9f^uIb8N=CyCf=)E9&gO5Q8MTSlK1dHZ36AkY({i6`!rRJC|p5(+! zt+FlZ2ug+Bj&3J3*3W521PYsbhRi(&;K2e6lzU=2NK27u_Cp^;zH4Wfu@qRtKI46Y z8Ghg6$l;7o{t;Kp`v^WDPC>nvOkvGadd?&D*d;lNR0G1^4Ex3ur7qg3E06T#1=gsl zdn=lq2hOzT4v|W)tdIjlo%YQLqi{tH|Mzsuy_&gYAKvfz66H+)GJ`y!LQb$rwPKRX z4CpIm9NM9oxrGSi2t+0w_3B700%?IufNdFhqI3hrbTXbaX%O4|7WWOscv*B})yO4F7k+40< z+b^|-*j*QM7-%Z1<3C#v?MVNLq+4aS8lv-cb3bd9JxZo5Gk*m0Hb&*){Ba!MO&twT zQL@55)=k#6*iI2;F;z`er40F)f`d#!*NCm0sN7)^O<2k%%NH)*CdzV(AIvm%=4H&( zdF3_*ohzb*zBH!If(C| zscdVJiq1S;3bytb)`gtzO&(`H81TwLM5djQmXw$6&KG30Kb7h`DvZd!xvv(qt1{Jt zeoj#CGLg0!k+9M@kZr=U_82~x@0lYgIBw3}hhA08br$z69!WOaLa8k< z70q7$ciBV`1sSYdr<`i->AWp)^QF#K!yr^$JV}W^LnXZCc|`2*9mv|1uZU-+?DNRM z?A7+uq(X<~m$b-wQ2kKPbUt)YIL0phboI>!6zZ|bWrB&#hB@XPvB8V?G7=Ydw?Xz* zTcy=y?;Mzz;A%}1@V$O0>Ku*S`F7xrCG^00>)t@UbyD2=c5QvtM_qdH^m-2HTOV^$ zUu%XRMknWx*N^&|_FZMo%id6Cjq6TCwrCIi7ZBFCy%IKmLD;;n7}*|8%40@2 zXoD)t;!e+AtcD~szJa1}+vV+FlrWZd_LI-RW?VUPrufdqRr zz_J>*RYblv>-OB&m~IT{^tg3zsOHmnl{4zBdnOs#8!D^`+!FBa%&)oKn{|N^*)_8a z3*M{}TEN@?mfN2sCROn!N!M?o;~A@XE+8wZg0J>LwDATs6tI=&HS@!u-KHM z2QFlrW%MraoIE~gjjt@+m!O!Qb><3YHOJYN5ojZRDvhHT-*>7@!n8!|yak zLdovN$W6(M8Y454HI0$k$y4ppw`BHBL$dE{K{^`j*(c&OfN#+#*yu8^4^CZHUW$wE2!3UCeo4HWoctdVLGND(k zOI~c|Dw9=at~5E!+*PbKr$BR^TrKn)GS*pdnCmK(VNhH0c}Wg6$`kGC;83|xgt;!7 zlV_dvPJTAgMX7I_AMH0k+-t5Y(w2N$lE0mTQrD|OLiy{>+@$2CW^Q0|wA6d5NehOU zyZUL(!btVz?oh05k`C|mE3@|M}P+Rg**{tH4 zjrn5U%RqqYeRDvypDP{~(lRdED9gT>iF9Lf|<16-gHwnRN&CN_mM&5S-v@tSP zYrc!<19^i*DlY^{-qrRzS8ZZHr_E30N9F5LHS<0YC=jPX}a^0x2ng zT&zH}W)VFpsRXc!zO7($-xl?Y$#`2Gr_ikq6irN&p+*MS{$PsR{tp3(dB^T+^>|LY zBp~XFVV(k$o?an8Bk?0RK64i)MkRer6%lGiVI^}f7(Bq26#>_5&Q9>!7h)4Htk;pT z!{3*+dbhN?VJB;G=_`cHFLbvVo?q4 zUx~hHm8Ye+OtIXbbgu->F}plTbh@%#BA~gNN%%6sQHle3`z}g+swg?Y>euxOT=9UUfX8p%knzshz z=={N=y9}rdMTLyXm38ESus695$~<>5L~+{GvT6#XAr|>X&ND7aFyqhLiY1Jp3)~lmE zFR(g&QK=9}tU6%0L^D58-CrNwiMI~%(jnL>)`)sfPbQs9S#;l9@$6MzJ$u{E*t}SE z|JZ0Jd2mgaXCCezi(eb9v>PaNeXvs5-JjWc!<7U`J zR>?EEXJEuAhw1|dC_6_P%$;D-y`raS>2=`QXuvl)@`m>Ca$bOntk=Hx96=0@k)Jw_ z?_+A+st#__?M6;Jl16iiwDcm@e#UH|&ZUs0sglM-b)Y!!Z=6;g*kd$sb?n>q)`xN5 zN)f4|Sk^UjG*kD*a>3C2;_#SbwR9;F(a|fD<;o>C?ySh=Np*Jtolc~|S;xh(y?EtT zlKx!&SdY~o74Rso(A~jvq`Rq7C%Eb!6}k^|T6Ho- z7bpM3ar$=9NDgmw&)SqGPmRv2>ZxOXT9rO8c258N_;}>)l-2{5^0)g+^ECx>D#(^{ z70-wuUU!AEsOhsGJ5Ev)e&BzfAZ6nPoQO=rul%7;n4fuO(g8$AU3?bh`wOlVdaS%& z^{Eho*1T0P|GAd7dFsmC7q5D!%h#wRx!hj_!`2%ZlXe$BSV?1N4klUmy`e4iU0nNu zXCijqNtVju+iQ<7}8@;C&4+Vr=$T@ zZ*Q>iB1Qt26{u$8V)3XF|A;nOkk-1YwakmdI324&)l-tGK%6u*7>=cFf3n@NC}KOStSxho_s} z&~6=Z)45H$V!B+>*jA*-Qx4_Ev*Y*ol5WxTQ5A}Gi*w7VPe@n9mL6>NlI|iRUD5^Z zhyg2#3bcDoN>ME4&~o}JK7CIEAoQ!HpCkl1J^G_VY4{S=x#^=G4NJMn3DYxG5U_-M zDi%T}YVpBQqk4vT-lXc$`D=vEha6N?ild@~^It2BpV6^g78V$&&%W>|jLd>k8swuv zy_E;^Wj*ov9}=Ib5^U9DL4D|HA^YJybqZVUCHpd*=)ts$T1EM4SbGSBQAq~m;&XiT#5A&aZ+00$j?5Qd!(sJ$u#8b2TTPc6c`Y8W2)H98= zoH$LQ%bjLlz#Gez)|>Z$M52bo9%J%;cJR`E`zaUv8j-(`ic=TIQ!UeQ~w2wZ)HD*IM^E|4B_zoS&EO>aUv@WW7;Xl|tajE&Qf6T=;d<1d{Sx4X z0eFXCrqW#PkiMCmxEi0P(rN8)d%+H^Z!s4_7i68=M0o8{6x_SWMdpz+97vEO>>DWL z2)gPlx0Jlk(vN4@`kd`3*f}{(!(ptgScW-Dlagx)r@Mvrr%-i|;h`nd>S2^hFUH92 za7S@~XeO7^vBaE2fe+t%N>8gN#bE&X|ZIC1%>)fua=3URLR+~>EC ze9mosW_MubmD<9M@x|W4AGZr`AUZnp1Y_r$=DQ`yeu*m1DOGTj7Wcx%%`F$jTXX$V zquiGgUVOnrkxn|2oV3`O}pf?7_+Jc zK63>U{}mPFi}KvW|1IR_Ve@}0|GQJ0@{bnu1Fdb4p|z2NA2$nuP4cEP@wDY-tNe7Q z)^BYr3wv^tAV`sAm4EEqaD{8W#0G6wz%H1s&6P^cO#c)#+8_K*MqOR0qQ-5oz0r-C zD%YyaR)!PDKl88ZIu+3(QVm!_i^Kxf&>c6&?DeFo)J2T;TkLAATNd(~X=Q&5&s zMii%zYIXguw&dnYl*_kf9WTz38Jlb68!Da8AoYfDj|wNcT;bm00xo)N@>nR<^DZ2uP2$4)2%-XCnfo`$gpA5s+sc%t5JSG-ny4n?aYBh_l|Px zLo9^7!~9#y1MhQti#-(YKV0Ds^G&qX*$7iw}a^xN*~#_T)c=}9(wTJi?!X_Wf!6B%YjAl1K?cJeE1?> zmrpPxi3b0#|6ATRj2fLBEZ1$A%PWl5EyY|7hiW*JL#JGD-SaxD7$|PPekIP8py_cG zldvuh)JvHc@WQg>Pn9P!*9AB&w>PJRV_ZE_YCoQX9aQss5%-PEU*%z4T5RqLU}9A5 z2$s_ht9$^zy6>fW>!ny^!~LhnTi-3lEBXmQbUB*W%BOXvc)P9gcMxbe!^W?#6Ue!5 zHI4p0Un{bn|1+SevJO+8CjayIG>u+BN>5yq#NFI9n$3pZxU|Gw)-)P(P;Xqb#Eotm zo$V9%ki?zRG#c%{7w)?fcU05pA4*)QTkm}0tRG1Wlsg!<{fufC+Mpl@AH$47MDBeE zRW`;Cw~ckU@vJt-ek!VP%5Oh}z{*WMkNiPqoA|mPCt{vVzAv%RD1ju$=;10OWj`pVs)Sy!*oh(WUt~D$^8d%V~Nhr@FvF@4WFrf z71_WqldPE%$um=lK=>YY+UK4ljOZ{fN-r*xCA$N)vK_UO(o?^orZ;{s@l+fLx4qyx%(-}pioMv788}g9AY92R|Isr@0>5-KewVN(=Vdyus2%3)bm1P> zP=aEnvPZ=keBHc72=_f^uihc%MY_K+$H-agr#}9(@zaV&8obBIjxP6 zDM`v0p1L6YQfe5Y=RkxREGWY%hUH*@I1n~>(GP3uhFpFS!W;GXYcH8XAEe0$z`IA0DjtIkWt0v_@MNCF{k5~pzBwB@d4 zJPP%QSCB;Kh7c!R@niH31KCf=zVGnLKDV&SzIU>^>yO$OFvo6R0yZH1?;%yQuQ}## zFol9IAFdtk{<9X6?gT%WCZsj&ZJ>?J*zf*HdazkA>q+^<{Raz^A&PiZIai7(gwY{n zzbHE$4N02B27vlI(THrDHN|Ma$&IfjIJ2(cQZRh2H_U_{U$TG3PHn+c4)5~ftq00g zekO%dZ}J{%qAAV*~cnHZiJR?!_aO z*pZTi*=C`PLW8YGKHX4td31jl;;x z_TL^yP`AEdocNBE8rt^!tjNpB{`6jJPRK|FN2z#Lw-s7?G!e1d5Z-`J3(r({;kmV{ z4HwM@w7!?^w`HT#>IPnX&nS2+h{}1$$q9e<=7gc`3kw=ZdzG}pz4#2&p;rjIm*cCo z*Ol|K!`{D-4B4@N=W%$yY{l=PbXUeI?A2$7J@4jw_TS7`>DFiYC;XWD*tV_q7|YK6 z%{yoQtcrKMQp${-tTg`fNY3A*cGNk~q&mZTgTEa?K5G)M+3W;o+Y7@v1iv;2FdCSDfn~X&@h3XQKcM@7`QzA2L>7&rq&Q}P%K**irs;g_v5nhPT zbFhB`s*UVp)Up)JtCMb^wG6VGaCRt&+@juG!-hF?EEXOkS7V9TTT7Fmm0AHG>;&JP zsumwnaK1~{JZ>Ka1?Rt>rO?ADIAV0Jbr)W@b*>JIZg)6)4*HF<=P3PVm|a92Y8U0r zC4JkO!_~VW`u+!@$gts5u9-h0e2{`OnCM>7w~vBzhW$IrL-Kv-K(S&2?W5p425*w@ z66C)P!Zyba(r}JK!x4kbYIPnD^3ES^*Y=iV*Q>G(R}eM?*GR&r+ZVW1YQ2DmO2Q)w zRSL2WtK|so_x)zWqUz&VpD(+H8mhLS=KKNCNFBgWheM>)d|JcRO13P(4R12(qjhND z!goHBO(C}H&$53*l_%BO_esr%-6Z@0R0tMw&ypo!?GYFLtp5cB8Brm1^P>~-)Y>Ya z6PQ?khcj89pVVrjMP1SL`&ojE)IyBH#mQc^yR?AI!Zof!E$1rK)fhL6?CW6Kpm9LJ zwLEMqqG6oxqWr?$;w7>(@)pr{)hbAVt3bGT36Z)*hC1s%7syf94v|J+LC~~E+nnG( zZgdNKl3XlDsC!*n`lq~bKN3lFGXYzNrYF}mf2yTtlM<6vL2aN2aI>lSGPiupA5e$b zu&Wf0s!qfo+(};Jw!lZ*b#v}eE&~qf^J7C3^Ah3fb!E($7Yk3(&1ob_GpFR^+R|0_ z9jb51^SJ21sA`M*RxtCt+V|H<9nAfhn^oKt?lolfV6*fdNkcL@zOd;5n-oZUau2{& z0stTC`Dh$xuO`c?$o*wpC8sMzwnnbe8R-{1aVWrL_tp)pQnqZ&x=Rn2M!EJkG@Xx- zqS0AfWx-CU_6tV2h1LmvalMPti>cPyqgUyB+E-#Yj5#R@UcSK|0F9{EGN8k@58k?>U899frPw@go;u_wdZ^c z1sepV)9DJgFFwkr0oscCfLx$GCph{8Q+0X-5UpAEoVjU0KJ;;nwRSo)AZBNKl2db> z4yg!JZPf}rvQm4&dD}buZD`E6asiqek}Ga7gx(S8;)c>cOm2aMPXy$r#H@lIAN01%Ua;&O_CfJBca^YcA^+866u;J zw?V@ZLeFRXNp(w*uc0&`=<$g)?P0m5*jl-yoEG<>^)lA5oO?9#$?K%wxhDV;_MkPt z+S9Y)^i1VCQs2(u-b+Y+Ws#6TN}HZJS~uIdK|=nws{hk7{nTsIuKm)@m51-9_uuJe zvFfIlzLcgPv8!(h>pHlr7fDx_6R$?=GKO4Nmw(CZodM758K|=k>P*@67;Y4*7t86% za<0a3Uz>J61=pR~*>|SPET2);Df4`W%b#Zk3l=v zAIr3Fq;cYR+&e{$hL;)UG8tw*Hp5)qf@0dk28>u08xLig6Wo5S8u$S|++1Rjt_NSt zWAJ=!vJ5A$+9REizqQZpE&M^}%Ib&u2{!&jf)t@v7e!Td^6}GGo#3wuMWT!tHJfvf z0O$lCQbpz-lHyj|;lEPC1G`w8vVMbh^{xrgKOqk8Wvq-gH_=`+OOz9qb13@#y z)x5-4DdXzHYAuvIMQ&LjvM669Vr|6jMi$RL`%KEu<6qfb56C6Q+qYAVl1p?|b$_as zRZ{4=uie%9{bB8{bBR8jSW;ylMRABFdCX3SH-vgg5vfG%u61J$l1jwx>PjVY$EUKp z{h4aAULPRj-Q5yl03*N=gO7Cql;?5qJi=U^(D&MPOzYZGzPn;>iROpfza_i7z#Wk6+~Vv-e_{k%sE|!7 z_Y7a1+|h>H<|>T1T&U|QPFNc-HdxU>eZxeqm`wOKV9!aMydmDuhU2fH9lY}{Tu!qbTTg58h8x#;S`xoB$QQl>-8;og8}|1XzBVF&#k zv|q-SnDIN9h9{{_Uoc-Z!1TuCjZc;E`L{*Re)ac2oa@olUlX@?uuH~vQXt`b$=JCu zUb7*7@+(Q3D?GZ%cLmm)@kZ;%6@*6-KD!V;iSST2x-{LDS`72^BFpf*U;XY=zkAj1 z9`(Ch{q9n~-F@|+?66Z&&X$!5%~=ENn_m$4rJiR(Xw4 z^g3FaBEJm;RwcvPQ^JHwMuhS(_}%oQ9zNq*TY7&+PBS z-5ue3!gl%7g}u)MM47?-awn#d;eTZ2w&m|OEce1(SvjKiR!h`nY&iO;JBDb@YbjNG z&b`=Hxy?_HwBD02B45gG>Ari|E#=dZqI`$B?AHDjMY`T6_M=idw~wM(ix1|3Q}umr zTAF&}4LG(_);P+VJ;t6&9<{Oh)yAg&z&b94X=wv;?pvU`b{4TC^0u5=7ulx(_z`Qr zT7C+r{oOB*Axg{xCi4gN&~YUffwfj}!Wmb$r#(_FdK%UX_DY=K3s<81txO(0%FU-W zYtjtr{2|w4d>_60>+sp`?`X|CrPi!IzIFNz4Ca@mXh zE(@z-CN3}GPzxUAo4~U99rbYpjF6|87YCd#c3s@x zmoIX9c_6yp;lF**o`$Guu)wC(wZme){U5)5AYW1ul)B$OcmnRDuKAa5A3R7WkMzBO zAN{xE9O-)jvfd3vx6%2!SK-?S(`NMj_Q8^OKuI>}WP#z{;E{d4eGmaK{;$7%Pzgxy zw-4I?&2Jz4mG&N~FCA=OAE__gv!9zFB9MF#;@|l8fqTi=VR#zZs4+#PT)Y%Y8UVLna6OH)h#41B9V zv?s8)C#m;?2+#Dy{0B`?2FU*}da*`}gUPZIJVt^@+qmVs-$7U=poQ-sOnI1~`PJ_r%y?Ef z^uO^P1Wcn}`3^$2`VPV|6m5@GLC^XFTR8D)7wDCLEk~BM5pGvJjwp%?h>u}5hnV-`Bj8Z z8lf{3?|=1Ggt6Sme_Qe_etW)(Fi-f=fP9mD6@iaWyI)1f-OYr;Be^0s-3^3t*Aw^` zA4d2Z$%p$e!YL9zO-r93&u?n!qvd(GmL4ciK8zsd3;8g@egr-FFhUnkx#9oN40rirez_UGg?&U_eQnfqacxqKMm zPL;AdH?1e*S3ispCkZSa@xutGxmo|khY?EL5(2sXNVZZ`?n^h|&3z&P^D`t}hbP9I#>a5pdnKQ4W#Qbqi@MCsy zs9*xG?C;_e{dz1D9Lrn6x}GEY^)q6Tbq}1cTdU$?PhK0XtxcA4LL;&+wT&w$b*r_i zRj*p9M_TVMjdAZ^EK@%t&w_ey6)tmZ~A zp+~T5^6@LEYOa-?C^@fmI@hcA~E_CH>s(%fd1>HlEwOW>-k(*Mt8(JPA= z6ck+Is;HQzxMV1rT)EfyMWF1ev? zPBlr3%}Z3$re@apf4}E_-^&%Xnwj7Hmd}4)IQKdGd7kGyXWyC@PuE%CI&oaF0DBHH zhbk7}_+|?hG$tD?z!*FoZw$um3{>Q|*Mb*Qo-qW&k~msl#ewaRh{-&UO3TCU5+6Z8 z1Or!4iDf()J;vaGdz4NH&;j{a3*OJYKF}s6*Lp&IY-tQPsZzv%omkIuf?Vu}7~A9= zHqaBSeY=ni`Z&XM6ebL8_JKDp?+NU5cP5|UNpZ}8PCM8$tvR!P7tC~qx}uRcwq~6P3~s4g8{g% z2od_c$mlF!o0+!@!XMX)8P?Vu%M{UAA+X= zAd!EaAJQz#uBuP4yPJ&B`b*gw}J-0-&mEE zSIcH`k0uYI>hj`I0Uc{(mXkPhdxD-%x20lcA}t7Or$K{phm4+C-~i?aw=+w;?>hLP z2x<@Q-2uptMR2GeT*D*x&XZ=|BxqA^#BzBzJ+zS)LxV-e@tgig$9ob>oOtX4)zyBj zCqiT^I$2O>bnU^9fkiRQTmP&tzUys$9|jvHYKZ6bRRk3ye`^?pTl zb^Hbxc|FNHFx$0pr=-NhQ5sq6zW&vZ*Tf#!D#H^{jFealS!+AJ13iu2=bvM^U!Bac_JA zt6c1G!>t+giQYCEhu0|EyrxXE;{=z1wyK8luqgKqsfm8vGYYOCn&?b#)^sN}pi-8> zzL#K)vZHE}KB!mjUI0Pa-oVQz>VZ!72T!!RdbI!p%wb=J|9ytb=RnY|H)#nNtT* za*x-xb`RxKxS^|pLU3?Iy#erQRID7II5{qk?i~fgyeCu3GPw|XL z`SY6*p|HgSh1g=8At|nL+FR@cG&Fg9q!< z199*NCx!en$R&51d89JPRn-vSmq{vvU>D@eBr}7un=`1Y0g}y(#(^J;$Cp83LgaPlcX6&H1hJ2ldu`J%5lF z%oblZ>G@OZmp_?CdgSj(@)wej83NW%3SlOsYqYDU-C^3*0WXz9%)ar+$V?qGJ%g;U zjQpki*@FDRF)~|qJqnCE_`@l1Z@N|c2Bh#dEUiJ;#Ax-p25qyhNznG|nndk&r3uwu z(JfN6=X8xr`?Ide)*jF`Q?)yF&2(*@u9>CXtZS0A#k!_U^XQrdTCvi2$un)bu4mP$ zP15!2sCVglRxR2HUB7|)L|xCOR*Tp58>x@c^=zxPFkN3mz0mc$sQ<2<)O+iw*L3}1 z>Oau+_0-qtdX4(mbbS-`n|1w3>Yvc{+}x^d(Dhd8Z`Jkev}r4JeGK&rb-kVXGF`8p zOvu;uiL{@p>yxO@)b%OUr|5bpql8&`7pup6=T7_0@T z5MyzTaQFb-rU}KE5&)Z?y3L2QQ86i*VFTG5S9%OQ2I(H3*Mr(&bLs{a{e!wq0&JRe zoAtU4TIKN1beko*O%iNA&~4`EHYu<{IQPg~ zSwHu4uCQqJ>y5?2mh0F!DEZwwHp_@6@#^iB!}x5G?~yC)?hAITh-}>ESS8YBjjG;M*=gTcSQrJ2Fp7@&CRSYb(*wLWHkPWH3~FNc~*Ha-t#y@52({F>WSH1?U^H`vB^ zUS%1*Dyq%!S3OrC?-1t{*NIvz;0VuW>zbF(GuA}4DO~!ov=uHrb<#E;8*VMwfPiA~ zs%KmJ7IV2hGw_hBstHnslc$4UO<%sWoF~!*cuJ8aC?du)y9Mi|Rd$1@_Kd zU^n!b;c|!Si|s*JZ1*CB8Ca*|4E$9q*qtOqVg27SJSfD|4-}vW0XxrEM$_hrLoT^( zx_g-J?)lnoSEI_vhBv?hMxMF+2eO#50Uohnbe#K zq%}^@E)aqon*w6pmm|+I%sl(3MMfR;%P3qqhJ2~gtj!rV52u8iB(lxOuG+>hvc}~4 zZ_le03T9k21Nzl?d(fO|gAgZ1y4Wo79F0Ag1ZO;6vn^w*hK_xD z-_x0EN_CmyrJ z15&7z-zgtMw;M?nMhwGTwOA})(*@fvc4@!6ThTWdh@@lWT&0X~9c$I*>rN5M=|e`R zO~oW6XSdKp$GCwvH(_EQJ7Jqo#OUY$v}5dO@Q$DphvhD~tDQPW&tc;%%$1mYj~Dqe9Zw>emo*CwPT?`c66dgVi9J9sokhrqa}`Q<^~i9-ykvgWOqFpSN=Nt2$+I z=V&jl;uaTGhj+uqkmmVi?GSc4aHGOObSZE={m)B*u{Zo705z8Y2`_8FCNjF+r@P&8 zhTB@*?MB_r?eEs;(V|EjMy4D~F_qaCM!#+Y0%4mMPJBL9#_j1rd|9v-6ClO`&sXu> zNx620u_Bn>0w5!6a&g^bzxc*Dp!8j(LibAmAG+F0cd*!LC{z|TLw6jhW#Q;sh+|Dw zpyq&R@5M1PD;`C?23vR5RsCqiTU9@Xx+AN8404BMLeeqChzlPY2WEP=K8nDce!@1E zKT(f8&Q?!*r+3pv=y%@4Uo7z9ojMhSj>Ej+`${n$KgbD6Z}98T2kznlQoJpuZUMb% z0|KBe-DTAuAxrdz-vdvLZ))FTd5srK1HINjZz~G&HY+?QAMu2HZ(~|sEVI7d80_5x zfGe0nh<7mX_O#1fsc?l?%&{u$FvG@KKTIv*LgorU<5FhdosHptTcXaugdaTvV$Tlt zvku@W{xHYyjh#kze^ZOkC+a0QR#cM76&WIX!K=J~9Od?>6^Es== z7!Ze=3bPr*x>h4W+wwZ{VNLi8qj~b+dL*ceimiGz#RTt8zyWoaj9!BjAw#z63MD_g zwAXHfT&;<|MtRteQa2b8;@)@qLuL1{?%d1g+(kK$F`RGGo&WBjZ-x$5&b6t90slM+Z8NHSXc^9Rgf?tG}P_P>&~ZBQqxcFdyhxGyZ!-t?vO)9yChF{0+%CGg@6 z|8@}?sAYx&X6N_e{-bo>;Q$}1Qq<>aij8L>^h;2#MbG++B43Tt7z(>CEo;`&V8L{_Hum(EjOB;ITvNu-xy{WJA}s^w-jK4_&W9` zTWc3mKh0Hjs(yYd@Ne{(Pt+`b-^J5k+~J3rSQ$O=BE@Ym#8WyfI2Frb?rhF#JFuDt zCgDn;AM5Aw#WrmKY_#728}q(geaj=N0u+9pkkdSH1ixhT=?hU5cWGT=#zwlgwP~iq zC%QwBa=<;4jW4Nkp|0g&u>iwxYk|6pr@ht-zVywutQ-G|(tsiRiYHm%N0uYIY*i1i zgz)Z!6E1ZDPtQD9^9BW6n}EwAzr#%hG3r8!p4wll&B$8vS^KI=ak%jY%nc5*VM{cF z9(UqKZFgT--QAXnpqRDAY$Mo&3mm$T40VA68sBK`Uc`;j)I47j`eiz65f}Di_Ry~J zIh>Vtxt=yzIUQWYMc+Aig~8?J`Dk2{oq}zhF022gfp}UNMB_%+=PDh%$M?ORDUZg$ zW=*MO<$`B9I)0tvbU(?Y)U?$>Wb-HZfk96fez0jx|u;GWcX zpL;kqsSea`1vS_s9xAo#pi_gQ7HNP@OR#eQbHP7iKa&%i%wtoI@36Y9NKqigFCE(u zYf2-Qm`-_D$JfK)csT&SRSgknt?|=?Ke7C=RsE3S@j8=F;riEGYw>2VcNg~Kc=tB8 z^6qO4@E-8)*n%jzF6ipY;o;b7*1qXXPv7CvMOKi7agsQ>jxgNDxN=#Mz%~PtI+{4a}6*!+j zmjh9&coDxI=|>5Ek$RF{d7-#b^n z%vWV_OK*y`S! zGZ4_C^2a$rZ1i@tLCOm0*1p8_`xS6h)fw`QLfkcb_f+Q!Hy)43!}6&Ac>1- zSH6hjEDab^;2kTR4@}Wd_ob{@h%MfCqB8`>#9$#Ir4t@>#NClj1lbC?KwR!xq`>xIq2$|)C4kRi97kBH1ho#{OG(l%V+?p%}dvvWnYLZsmX1hL^+IwxLz z!Vs?$GG`_7LQ7d})RZe@u%7)1kQqs|JUtW&4)0sqZO2z1```7L?)JQ-A7|` z^09ey!42C@r-2eUV!Qcsg*K!M)LlekxCsBDZm9$!#-PxX)Nm)_CtHZU<7LtaLCbp` zfgx@|PG{fOwN85sT;{-Upyo_Iy5xQ1kZE3M%Bm+6t$GnisXV%b=eb!pnu|e-wb+{E z%EMc1uU35=r@J45d*C#Ng1{*%cQ`TS9Z?1@gJc4DyF1*`DzTj~w=3`L_=v*rgr9 zu7J$E&RADbH#LDgUeTn;8(o}0#V-alqx$w>i19kKaQ#i%qeJ-A!(IvM1p$1)%1T6sgY~#Rn zrE!p;xxLjo+=!ZDXTy7Nt!m4&O6Wcr!S=xEe8G;~pn^S!U}!d00AOUFqs!`2@x2-m`75ALrOiWv~mjRn@(R0dH_nx7@jlEJ%)b-O9 z=icKHM-(YA9U}xRP-6#8I{LDAgA@c(_YJDP7FKxmH*t6>2OVHs75w5~9Dd_;K|e%sA&RTwxv zsxcn=4&@i%H)eheY&tczv%T=@ai_Iwgd2BV-)dER8ju7I0b|l3jj6aVXSzY&*Bh?| zHYrK{asMa`$h0bJ$O3 z>bbhmTi19Qa~DkC*BGv{_akdzDBe%OZATrze0c2C71Q)8liqIZ)SO9g(|;?917v~O zlzhBPa7n_0&|@@* zYauwGbKyn^An`pa;SsH+XJpmLfNZ`z*Bcd54AyeoVK|=RiB<36HHN+TJ>jEP}acELV0T(I%(JaYu7eF1Hi-Mp(kkypheQBYDKC>Y54&DF3vumjJE--7#+Huf$8PJW9?v$#L6UPzVK4+lY`5fpDJ|v@S zx+;W!0Fp6Q03q#~+ zRdL`_;BjX6gB(CC5jck0vl7u5^~1vW7VlK0r1O0=?-#g9f{%oTX%mSDCU0@Ual>xZ z7B7aP7;8OAh?<&?$HDUhb3nFCv8}lq9y0TA?=_YjcqIgfUQ8(w$I?%_)U$-R8r4>{ z4{;n%wc_Td$Dymb5&TKk$=-|P)+q}+mL?4T%x+XTggSBk8!uo;Xbi-Ib$S4vIp8x_ z$~*3jh}C08^M&9t`c{}5U5Y4p8ig12ESW3u(s6KS;5@c3@Z9{MxBnLH0dIfTiAGFR zrq})y#Cu3;wNaUYns;YyeXtX~Ko0d9V{sT3+p0kiBLv~^6r=64qcEnSN4or8&?0xD zmrtI&L?<5QRIeYd$=IdkV)d7UPd+v4cDwT4W&00b=_`Zks)Cn6XU8=Fb=cs;R}irT zSJ7!liWPIu_8lp0LaL^Rq1@CUdBo88PVL`=k&qqdTa3f7mlT%Uh*E)AE4$?Zg&H= zE?{9A(=uA};jU@a{2uflciT+2v}C+!@?( zrEzcCm*^fb2JA@X{S2?;su1MVEHjVJYTujG3xjwN`fYbRH1h87l%xwiaq56|A0Sl?jZsu~e+iBVmZ+wcJ90J)CGxlCXG8tsGK zw6x-?Mm~0l;zd5FFwR6%X8XCY2 zzUmx?E9V0i?!j;TlGh!p`ezbrJgm3oT8pD6Ut_y; zZzU!-FypXkA1+J4R$40r&ziD|FG4hK#qet#lEqn@i#)+q`vV$rPvlR1J*{+iByU?W z5uuSdj@qW{x_H|yqoCsx(+TjM=(aju4nZW1NtMY9+~JkUQ)y$RO)tmG5rm{6!V^Q^ zo_6$w3lXP!fW>TV!Ndjq-gU%r@F8TGJeP_f<6{Hg0Aj*I(u%|iRXM_fRIV|bcgpe4%@7K3>IuHeS6KZ!fdK4%0v zS5)hGfu1(bE@DT@mFe85aY9uNxD^(=*CVha8wYom4kKqg!P+J8p`LEbO60!w&6mNr zroh!avvjpO)1jVK3TbFKjmjBY>9|#CM4D|AHud6BBgeL?lduSo;<*;VsmBV#^An?cM_)r{1fMbMpl z+4D-A9`#iQe$6WD9(9g@>%v=InAwNL{O48cRJ9-q^QLI8jQ5MX!cC9kD*x87G1Imw z3)rw(-in0<+orb408x{#Yfo^g0k@suRu{Jwau$RWSO@wZ_Tkdps$y-A`(e-{n9dR8 znNtm_&Z!!iJ{5#}99*OAtxX0^=OOpHnV4H6DUDsZKk_YXDp!Q<(WatJZM33w)oz1M zRZl8zM%haXIDeEK5v96-vK?j+(aZY#6gC<%dcKVXNuT>#`o*Kc7!m{*jz`1j4$%h< zhnAqTu;>GXhHQkqs9M?^U_2f?3f=Y5|o+EcYsvw>w6FmYfp@K zFm|IO>(~uUM9(iV8V!N!`MK6q8|zUhbeLx~VnLZ{2d)0x{N}}aU*dfI2Q@OsJEnsr zCHgW8woqhY4TSHDaeX5jb^*9DZNGL2Y?T%E0sak-9J%nU!P;8jDx(*+Md{wgZRl&J zSh2p8<=rWxOT$2u{z}f3gP5z5f<2+yV3izrfE+r2 zamZT;!&+EIAB1Hl$0fU%hubj*YMi|28g+8;ZFri1wFrHkViM#A7DtaUAFyh@4gpUb zc6G)+v?DHg01uW+Ox?awK)RTab_80rYCi^%@=7Bt0;@yJh@97f?bp74%#57(;GlX| z*WsWmXwA&SxVh_>*Ye2FE(f+$rsEBRtl2eFapm8h(W{)6q%pQl(=6728pqO4Dv_Gg zwzbZ2@DxsOq@ZIm6%e*=p5{QK+8ig|>s-@Q)CjEk0tsfQ)#xMW#q`O1 zlA9lIN26gLG&W{wx96dlvOqM3c!Lk21F_PA8bB2$^f55ekdLc<@sogy`?!pN-i0fU zjnD2aFyTy)s@V5&1iLc$=PYLPYcABE+$ZM^|590C92CaE8r~F-ev|F0H+=6|4jdgI zEOz2?5?Mc4*_6w)lCv1};W%2&+bI%am&7QN>i!(L3bYJf2UKE>G`IyYj@eHr_d z=tC~RHa`^m2-t8-y=_xalA7E^{imPIbH-0MroFMY@DXW57dzYRTMzXfV_lkE`;(Qu zQmyb=jdD-n#6$Lj8_LK5)dt&u@Qo)fbpK zii*c8h4I!b9(To4oSA*=vTUQ@EXeeB>4_Q#y^uF6IutsVek_`@*4!JnZWrt=zTkG$ne7=@=&31qL=~{<#+NWqgr=H>8uHwPFn+LsC zt^8i)W;f!Z+l%2qPRqrf6(n+o11d(GJ2{7g3NgUvptD2ATE(aW+I;Q`Z8}uFQi&)9 z8W|3Nv@cQhv`EvHmj#b#zM^YT+FahKH?*5|a)6nU4uCI4y_faSIU@3gZ*sCC-GDB9 zi$wH5rDp~ryK$&d+lA>hKW-CkZK|3VC3AEXe92?K_Tb%1lhF3nU`vV@e|||Z#;QeS zSSCkfZqZXSGm+6EF!l{DP2z4s+)s=1`8=9o=75e*ww81zJ}D~F6yd(KYLOE=SzO*f zkf*LZYzZ^h(XudK;x_}P*3tO>s`BX#(T-vT11Y8W3jxQd+O%b7Wr*HINcB0KmDi}- zu$SBC4%7d>Zx)aHQ8nDEGHyCi00Tr_NIc&&*X+9sE)R}v5g!nE^hcSQAVg5WFwhXuK9o;^u z%emcK`lq>T=Uo#C*+6GP-SGod18pETJuNEz8UrSX65$cW`0JzG6G}YA#ky9RQRTK0 zKsZ7Kwi2xZB7)M=(t`P2oG7yCBf*}OJ+-w}T#}eLB^!J8>~=N?!a9A{bbLZAC|iKf z3Ln_fXSD)Y@kgWjECGQ*!L3?b+k}LMg}1duM7C=m72Tm@Ol+slUAkTnXYba%M|{s- zy%YLe*tg$B{Rbot95i^y#X~Pi8a6z6#K=oWr8q{XI@88n=1R{Po0)a_xa{#0CQibp z%c)mhb@jAsuFaXAJ0mZ@pm64_qU&ZC&nYP_n_FJt_RO2V;QAYGT)1fQ5|LV3p0=RK zon2mzh%BmMGbn+Zb`2M-Akag^9|%gTz2@^alpr6u-RxfS*~x%mb5T-dwa z1#`;Wc6X`0xHLE4?w(a(Pqmxr7Z=Sa&n>^+UR07-T3%j|=Ptg!e@J$5L2gBXJ+HLH zotuY{2;mod?+SZ`r>v~B94-a9bL@p6U`}ay0pfsmj>4e7Ne}TG93FC!-O-GO2XSXj zOwU9DK(VmAbPkdzs<7u)R20maQGC5U-&0;xGSgmysOAw3_$n$Xau?+m7u}$u51B;& zMJ4$pxBwyTGdwe4wqIZBDfh>fWR(d+`mPIAf%Dm=w*dugFPFSodu ziGfqZ>7u;SIb}t~1^M=b0eQvcee8vvl03yzg=cOvQY97ca!;Nc#N%H%I*W_5a*Ik7 z?WB7U=`JaC+Y3rcJu_$7E6Q^7Krg%(mm=9S80OEffi3vuqrE`Sya|(Ze!FJ@>C%$C zg8sjqAO7hiGM!Y!MjV?H$)_LSKPZXro|4=dKo!!{?882*Ah%49iT*D61v=*!`2|#Y z*Box>dBat>^M4`S#SB;GfyB)#DtCKwi<>ivxv3H$UMLsm;eSqPNdZdf00<9&-AxjG zcnuuJaLsg=mKBt+EL4<&5BUZ2it-G&-1j^5K=jv$diQqR}7X`oKsw#u>g1Mf83L;18pI}rz z$W(Vh`J5sYGzcSQfW^R5;;A50=jZ+xRt2CZQStSh<)NSiIayjVr=Y}b&_n-0gz%LR4~Y7U;6lq7t{SreZX#{tA?IW5`vMS6*6CTIjZ4QKYC%?c3MK zmrT7u;dTbb zmzP%|CQll3QIc*{`hi0;(+s1Se0h56IH%9Z&F3}jH=AEqn1f6M;B76;YhX6pK6+c* z!hA~$GX}eR-yq9ys7f0WtCCE!DzvkFJBJC6B8@5 zD_01sNa&rEJ*5)uGaB*i>=i)Eo=~}>5}^c|@maHGS*@)Xl$FgdD?6+GhL``ZKf7r8 zmmti)mM-TqyU?@xrN3%%_TQQQmx|^;i}-DS{xK{Q&3^&tGyax8Grax(>%7;AKRZ`v z{dY@%WvTDae8KwHpqYb&%Ph@*0hZtPXNEtU`hV2s*Gd3Wio&0nB2CTu2;-fBU!&J_ zZ|VQHQQPb*LNx1joqF8TR}LzdE~~0ue$&k>R<81{UbFU=b?blk`&<8T+wFJUdDq?d z-22D-Hr)TfgAYCYr$-)r?D0Q8@s}r`+W7P{&p!A3rWZE9xaFmnU)j2?ruNnCuf4uw z=dRsv?Ag0-|AD%Lhu(ba?RVZi{MYy1|KM*Q9;yH6<4-<4`q|&LhR++n`0}e`O<#ZW z?RVcFKXLMhQ$L=@E)emr4yevQVERo1s{d*F|EJUcm-+u|1KRVus{!r*H2p5u@Huma zb0n>Q`S|+Tv+&uexgMW#$jTa!pFco1WM^S54~n0&AP-~80vyWIzspPKonan_5geT- z7L;L_tb25LW_l`c;2^_OtRl}qpPiu{W#vW1fLXcad9(EB#F&B^$!n&q8s0D|04#NMOIl15^xX7qMR&R#rOUR-Bje=MX}i?Y~=Zu%fuQbiT?rgg#d? zXNNmK>&}grZ>VigD42(dkfK66rxUO5^Xuu&z_g(;cZ6vfOnD*qOrc=L+vgNu-cYYP zyZ5ZvZ+7+0!xPBUj+pMT6 zrcErGQ&wD%X%zoy`9&4(Y3`vz`j_P!6@a4d{67|~7lgvArB#4vRUVLN2?-D(gIXm< zMq5R+J6uHPhl%JRp(46GvSu$fIdCp~=>jzwBqWM1_c`A(0|#LPS&B z`mnl?8f#hW>8-L=8WRGtBV$8E>}#PS)(x27M#K)Wir5L!b&Q|1cIu9eGf=yX_#?Xn zi!L)_YC4ofPmfB(YO~=ls)rCCL7Dz6UBg6I;M^5Bcbya}x(*2uT_?oWcdU!9iL?a? z+mJTGHX*dk3?JMJtNT#(W;#)=L{vU;Z$sRD{;oyUFbB%#?;LT>PQWH7DAP}ykRTC) zctR$$^2L*f^#BQV66q0I!U9B?B~n=M22e-vq2naLA+1HnLG8;dF;)?i4>hT^h#AsK z#0-imv$P2mZ7eT~0Pw&PWd*M2!%wmS-?k#^nTRqA`RWE=K||~$A75)MK^75&JdTZT zE!yQjC)y2pR~^=mYDdlOudD& z1i6of%tud%tZ!ov7xvd;h20$^?D-vqeMkpkx3m+nO&#l_>zZZ#<;jq9D3*`>semf4 zk;jmcSjb2$WF&SFwq9-1A3@)X%QM0Cz?D>@Ge z7o8_`Y>KXLUl&=U%MS7X6y3)TI5!q&@Xta%yIX_LfP=znENucrn?b>ezZTD}=+VCp zWyTY!%6>S?eVFJt;mkB|^a!y5>M1kLD3*CBVdAX?+W{_gYD?+q;rfpgH^swyMKB3@U zzUXMVN}MaLT@dEji^9|5+)>Ei_VC+Iv`=mOOh}WpzExdNO#sR$>o624LslZe8`J@Z zLd#+YhF{#n9UMKBUfC)5-|Px=-gR>J0c%^+wwO(RNTsqS4+Z zsC$D^eT)Ah#Hc5Q7;5Bm8`jsTcTD%zPYE#`DjV*EE#0q#-8E2@Q^LrO zA!tt`K&wr3EN|ZwS#PTguW4@g`aO-y(4gk}r#GWHy_9E!7zecm?u2@Iw4pqrOqRE+ z_m#)URw5E|8##fohfFu|joXB>4&{^EZXu%EkXX^}n@&wJ^&RS>YTA`WPLI(01Lf!s zuzrR7h5W9$?U}tzh+Cj`7&s~(L+)*mdsX+UJ_O3L8}%RRAfx{C)i-y)F2qYvvG0Q^ z_zd|m`W|f&4l)niA?s(rOXe+w@PA`ipF7Tc5hpvN&?H!sHn;L~4L3mSW zeVe-0HNj;;(*qMt8D+$Jz4p8j?^$KMiG3YQaDWINf*EY$unb|y-${`oZJXnMeNfhCh{-PBSu&|y)oz3#lxtwiO`28}&NwEq-#cYJL6I2-gw z((;KI>J}FmKIgz^NkKUVm8tsLe5$*=I1NKoOe3vUvFP&;nHXt0aHFTAqM~SK2?mUu z?wC~CoTT`SzKLEccA6L-gm_4#su>c)!Xd;Xf#W%w<2NhT9LXOH-##1opB;dBC%W@p zm<8bsk(Fl7o~gxR9L6FS{E*lEm5W&V%PP(H6c>!a)Y9cxaYk%k2&XYCRA59nL8KO! zR+y%-X%j9_%fuX8esMA8JuMU6<@irW$ce7ZDUR%PlotS>HJR5s+n2EiDCb)~Q&^LHYF2O7ZdbV-2%PcLO?I{~$ z?8^b#oI;49OKNTzW>F~**w^6}3zQUOGrvR$^^R0DRhN>x~h^Hr8H#TAGEp)jL*HBBH)H(g1SVr?a<^~>o!3KLzbC#ek| z>7ZsR{p;)(@2k8(C_m{E;%#NC%b^e#G|Po90XW7}6Ev~70Q1Y4bfc&`^7Fw!^sI*j zPeEoX2@(s5j~*tq6gw7#I7N&oU6ZaaD-cBtjVWO4rYR!=4si%{7Z;#xwGfD&5hVjK1DoFsXD(QGP+{EX-+d51fE4EsPu+x|uSyCPPJ`g)B;I-thq`MY*nU zh$F=5YzHy(JRznCd$5`4q*5Oy;-hoN18zO0m*DJzay3JXty78}$mhGrfk{Pqvkm_2 z^vf}$20)foDn4D0jWly6VfUV+LU9l}u|fGaDk4bMGk#_iWpLu6z|7RF+%lb7l*$4A zX5cnQ&ja#;c>*70XWGC|2yHUe<}p2+#HE3m*wtceSV5S^fOK=GT+6(3l;lq=D=N_m zd{0T%gaTdsu7Mg>MGzr&_-sJ?&IjN3Gyu?ibu zx}F~fz{zrc=d)bspXxu~e>m_T4*c^RFw5Ma2aa5HXI~-qU1L8~_?DE@KhgKMXB*H= zf&O`F&n$m|h8{yab#ZDiU^rAOs5Vfop~9f72BaO$Ay9PVUOm%|@iHu-4T@>7>Y!aD z6o$b{(Y_lL?JtI+*B|HC?5dSCr%>QWY0D)=b;*FN~( zgLd8dtlysBcSeEv83cNqRR!T*5=K7F8*IQoFb#h@0SFaE{%s}SSSpp1X-+V|`t z=e_IR8`Q!#>f<*Le^~#65Ca$MA#eRkZ?|x|Uea@Fix8F;=6{!QL#H$Q#VynMpLQo7 zeu)<;JiHkIEj!^}PdzZApVUN?#e!yIFn5C2_{FV-+` zG|Zh0^8&-%*)XRVX7&~M#Te!bbfY-=nI2!9VP0pL?S^@(VeV#_ryte*cQ?#6pX%lX zhIyl5##D{^HW=m`4D&j}tO_ygRv2bg2w+}dnwbIkDosWj;s2|Fng3?IEzPFA30u0i zG@JfR*wX#i%w{+fw)Ah>o3N$(ubItoCj2%3E#1v<|AGIvIbiN{+-cIHhrhZ|AhfH1Ac12dINr7z{3WtGvF=*));W30XG=% zRs*guV6_1&4Y<&N3k>KsV3`4n4LHky`39VBz-b1YYQSs*rWkOD0s9%yj6dEmcQIg; z0j&lU25kCP=TE%>4;%0;1MV{5MgwjzU>gjG{mZ!7KK4hR`hST=J6ktUZ$JN4xWxF( z_M12`O%oThy)|*D#DJG>O<(K%rR<55Os9VeW;_42@O1`1BI0#^u&we(#NW)a!hRk< z6H?DMI2r0HD7K%sLp=}m7Sy*;ksvq`>T;+#Bp#m`ZaDOu8<-Du8`N`9Z$f5k zpt7Kf8E$Marw9K3{?Gv{hl;^EJhr%q51_2j6TXYiR}%Dx0aMZOI*BVZoq)5T7QmkH zx@hcKfW8>egU(ns^a}u&LD5eQ;KxwbQ&`IfT!RkUMo8>Bz_y)`C$NtI%z}!6J{xd7 z6#d)^n1K!(@xeV0VkkOmw8z~IA_t1`PY2v)*w+AlY3Q2($KdcN{kQ;24gI5le}jP8 zkybrmDmr-i7SIKFe>Wkf!Tuq@YtWHf2>o=xMNnV-h&d#{?|TUG1nf@&zSI*k3H{4} z@x4$6p(lI?>N!aLVZfVvW3M5?t^kZo5Tf5tFaxfF8Ug#&fITimI?%@h4uvAlNr21x zVqOaN)qs%~!7ubtfCW$+pq~Z!85Gmf0QV)K9}fN;2K;Fl?BPcY7h*ZoF8HYfY=UC^ zgb~TQp707N@^&g<>Ikf>{fISG!23pmM(8&HZiONrY5*r(s;4^%@RAhVzXAIsLa2z- zI;?i+`Ahg?DpurRe;N>Xb%<>G0qinH$AR#+%k;cH>_RzB*YQaNd>D$O+t&aaposq$ zfPc&YUa;Q)7(W(xLQm*|A}xfAGLg@)uLQJY>GnZ@;g^G#KLJ0$X;AUd6NZk{^C%2( z7!>0f0q8OG3jo&|`bPoZhe|@&4*;XHbsS;FWULRu zz6)Ru6zQ4;co!7?+yl6BDrARY0Z&3PuL*BNC07Icg@D-dru0F8&p{oAJ>f-H>-sYKEgu35%4Xjdg!AHgeWP5Y|=mAu{ppU`X;~`rQki%$_Km)igLxbEeDtB{zuG3 z9+d+(_=x~agUW{91^6Kp>x6p1NH^AzVIKup2Sq#yH4kuyJz?BDopw85E)@BU`(eaF zLthDa`2wBxY``Vg>*b{qaNLc^Tf~zMIA$U6Ck}w876AwF^EBX)#d^Ag#fF|RZix_f z_(=eK1&X-U0G_CXjKTgS;1w$%chFA-T)7gwg?=^Q5h&(YJ>a-iI)AbOpM|15_lg~a zA}wzL#(MR#)&(%n&?l@G;#w%Cy8v*-T0DJ-u!Q$ODV_s<0>wB#1^nzzy$;a;d)@~> zuulNo3KfNVvIg+N2T)$1?+5riR3`L6e-h%WM?eeoO@K*{>h(F{J}BDP0bco-Uf)gw z9Qe4d9|HKWp~w9_;wmWOGYv5JNw(FjhBvz}^Ws6Y6E?X8~42(a%P} zZqFd^U_SzIE>u_m+Bm>j&w>BY6K;kg9|*nA>vFXk@V-qbv+%P4@Q+)81NDIKY(rUu z{xD!jjUF}(@OUlcf>%txihbnUb$uCN+-tgA*#Y-LZA4hY?AP_MgvX(3U~k`nHenZJ z40^&Bs7IW80qb{zr_d9Q-=oK04EWVP&;$D>K>L0@PQnVPYS_C0f2b4UR_L=2f}e-< zu=#-7pj3Sh*y>GPZv}h|YBT&l0qA@ixIs_Y?j7(G`Y6DOPz*Z>@D`{~VUN3zT} ze=p#O!|((9>3~l{O^2RmMR-obeM*NsCqX?S&qGj8$g>7b-~%Dg0#HxLJ^R!X<`{bJ z+vnbT+7oiGI`xD*4L$d(bI&>L3Az8BdP43krk;>{Z;1n81{CeN-s;(x&xq;Lonfq)69hS~z6Dz1zHT)}F| z_;i1*Q9991z&-$QGa$o3%qr=w0R{uYQ)MdvJS$h(8W4Tz{-fJTlF$r!3+s zzw{E|W`|0FLT!%U^MAY#=CaE!6H}&45k*Bs!sGFXN_wxX#DAr@)~J z56HbI;Jg!ND-5{LSHe7hKF!(z+`FjE90Cd}evZl0h01;Y!Y1n%2#lwlbo2ZJ){_ih zScv%fEusIt2MP-hAbn+4@fQ{r!ybN?F#gkU=LbK^Kg07YJ^}M-q<>(CG9wYje}w6) z?*!7v6~qUCA7U%SjmE_Pha)f@rw4`qveT>fp5A+$anStpglXyUaJm@r!yNqb?x;TZ z>^%Sy3dtYq;lC_xPX^#SN%K$32SsM?Jpdo8;T`cmcGtip-Gefp+;`xi?oaGhe(8>I zUpPic+^KysO!+^scmIQv_MSX_pt6Q$@JIeMdE3d;%9;Mb--dfmO69Eh&nPtAnf}$i zmxpy{6~Drbku$y(##f_^;&fcilKQ~PQI)86{Zt_I7APee=Ye#mKmTxy=Ui$D>?QhA zP=Qdbp=?lbl5!paWrzATIUh1)h`>R6F>&HVal;KaDBMgLU$<_Zc>M9l#dFU+r)2z{ zcis_4j~?~Oc^S6@Y(yQp@rBlu^c%}=l$#2-i6z^PZ7ZBzDwZIN3b)~w^KFeekc8Pw zzWP3A6EsWyc=T#1e=JNn~in<49i-fIG^m|o`fjgua^_mnHzahnl{ZdTZCq?R8QjC31it$IJsDPUM zu@qPTU5dPVDQ0{j#nPoq(dS+%)~{bLZomC@ap#?PihJ(4M{L-zK|K8M!zzC_ZrmuI zfBtz@m%aS*%i_)L8^yA(rFgzciq~I%UF_byTkPAnPaHgWP`vflTjIU<-oyJ0e-p=! z9u{wYBgID_eWc`~p`k&1ckFZV$q!OAH8qJ7Cr&6?AzK3mVEmPhJ_&@zx(@xyjSwCb z_O?xsQ`ru$pbivQ%kkoRIak~v*Nf-n)8Z|8(5z2I6vB7UM*CWZvr_BOAK!?6Hp0(r zLR%>##VUlq2jQPY_!@*igz%sGh3|#%DcR_Em!bc%4&&R6=mR4Bt4$bVApBv3{}ACn zLHGuQKZfwf{lX9Eh5oDqvVJvQ40ofCyB2H2=-)S?|0@qg3VC>-kbfI52{UwAvh4@LM)gr9-% z3lM%S!as!Y&m#O*gx`tqNOS!M2!9men|$H>i2-6PVkklkHzS6J5yNYU;UmP*6e-0w z1Eu(GycEahN^xSn6hAyI#g7N2FMMZ&zX;(+A^Ze{&qMfm2)_d1?~IhaBM`u3rR%PzAU?ipFBX&IvseOjtx)M!UeKYHldvwJtgJv}SM zk>N;nrXoBs>))?$-%AY~=_Mm8!~Di{j_H`wA08M(eD`kl9zZgk@n>YDkIKmD+CDZ0 z?(mRcc<7ab@S_}zAT=YWbNlwOy1T+LzPlYrru*X0iH>UDe(XpUL0?5>H@E|TC&N23 z;2zb!>sSM~%nOME!sm=~WL=(-k(QCui4nxcbnMt6EG(=`CUK-YFN4WKDmk6b@Q_0` z&^Z1OuH}q~ z0SYvMe=7cg$ys1=R$4}uB0}M4@*nu;I0vQ>g^aAMoK9oLv~S-YMEKluhPKK{8IXn~ zE{FRCW5y^dyZGEwZNavrwuxyO&Ma_wOq`;!>li~kMh$m31_j!J6G!D_Ss9}sNX_n!OD;|v+$PZ4 zE**}kqan*fhNPx8yXWMLc8p7I+d6nCQb^5V?CH+t^m9@vcjJ=716rrhJ#`esHNy-J z{*gZru$0iqw&QYAvqmLD)_l%zH$PWIvOlu8;4)v|;VFBQ6x7{Z0zWZ)*@4fem`|rPBJoL~*s;}_uv(Kvj;Fc|01p5ka*E}Nb zMt@@?`UF6Pe1)6 z)}N5#Y4jBi{^(cF4(Nu4?h-U~D3|N<(9m6vhVB+Lbbk_&@;Nb3ZWZI@E-_cWDb~vm z#MAO9+VAFeMhrlMm>q-gXl&Q@L-?TxpMvli2!A!g&qer~5&m9;e+J=qBm5CxJM*7@ z3fT9be#-yJeo7~yf1NsYLLtKZTr7V(#q=C7V8Dgc**kTL>D2AQxQMoGFF=1dwtGU) zp1pexh>WoH35U5;w;sKF!*66n+B6p|)(~0@h=_}BZE4*d zeg_Qb+9sIscZdmS4^wE5kTz}lVjR$|XXn5!a7B==FeClGU7~vT=sRFQ-`>C<>0j8l zZ+zdr-TEr}y5O_p>()&f#S$ClV_U-``pQaK0MGT&-tsVIFZMt69Wi1AB%isjj)pKEjILC^{UiKiUbA6t*Q-~r9)RQ( zr{&$J?RASzR6IAlZks}i0Z%KF1ATb92 zo2F?Ep!K6ehYtO*wzhW3=+UDGBOcV?>WD~3KyEiBF3i^uLs3s2)2#zkSE!Dds~CLp zjTxdW5g$V4@1dG*y-6%CXR%r{Bq0p^*Mybwp036x<4-+c2; zNyW0k{2*lcVA)}qJ$v>j8kmPD|C0HDxq43F`aYO{g#Br{qYMuM4ZpSg#vVU@T%s9L z@}HEH)QfUV#XP{gnSAM`mn8CBqR*_#F!O+Uf_XKii2JTxyCloX;lqcOo;0uypuDl1 zFpr4armv(dcu&ev`=uPRTgm~iOWC(p%KkM{PIyDg$B#<+{qf^(qO5cQ4Zr16_;201 zb$xt%{Ke?QuSI<+kspc%);Y`r@|E;3FNiz!%>NHR{7@ zHcVPr#wll%dzK9ovRv?+_>q*CgN6*q{}}L_G>oj1a@andhD4MF%#+K6ZOS|A(ckjL z5`Wai>oLcFG4f$8%NgsIbJ0P0_sm3N(onAjhF_(7tbIYW^KQ<-#}D^E;X{OS1d#~|UGy~tw@d2>GCAr2J#WrHTd;a7}}h zR~j^AAHn0Xz{v#~eC3%m80EP?>m<~vJy9oh0}XMT^|}?&ENwx9!oPd>?!NjTZ9Log zv+{%Gm+gU>A1n_vo8^S~qu!G>x97`0Uq3?rY5qw00BE=uH1Nws{W%RZOhp|(>0>EL zgIS(QgIS(gCk@;QzHgVZ4`~4Z?Jr6>4)qki{pZ3T^}UjRl(n_&53$UdbeOa-57-{~ z=ZQ%RA@OHD`TSCceDcO*`8a5J7&P4P*72`6hBMoE&v2S1?a)yrl3y(Gq`_Gqx}2XZaLqFjY%B%(CK72lb?Z-wR8I$!8W0mrvf1EFW6{ z8s?3Z-#2|O-$ijI4LP6zYix3gUwLMo#PaMwSzw)%ggS}svsowg-X>)a9sic~QY`UD z|9~ zEv)-l-*a5UdXsGx+cth%mnF#;LBsQlhs%u%ljRejp=Ejg1L~ypa~yK@?9p;XQL3z( znJSm$JEhlimHhDCI!PM*`xvS`qfX*?F8op7EBOaLYl%DYA|y`4o8^IgHuHpaynp|g z{UOpw`QN_cQuzvK*bEw;TLc86|H6Hh#$=rL&^JRcpTiL%A8+QDb#tJRmt8qn|(Xi()D zG_X!G%ku`*N%xeGlDC(o$lrs8wV+`YXjl$8T3X;#h_X&{`1LWKMY+d!R{c->P5Dnw zPVPkSrnUw2*xoW0~h@f7fFlLqd-v`wb8>Pd_GU=- z>nbWLq}%P5t5>g97^^5KC89+2b4jg#10 zB-gB2BeA9=Z@u+ag%8K~r?9-=@MBiHBLOj^t@K>b4R3;sSjE{;k zWya0Ab>F=+|i_EtXTP zh|7fw7pi<94Xjr&w=AblovQ4a2c*GNq`@5ba!koSHszjp5>kdpn<-zkXP=6FZqy~e z!`#zQ>@&QDV~BqSf66@-Wr_36q=~q3&KY}mRryU#O_eDrDH6L=R6fw1VN4nbO}R4j z#4IN)AH>~+lp~HAC|B$UGVjd!4dBn^wlJvQ@~Qe0bg#w|KzH!4n&tVGS6)#xkUyL^ zMEOuO^!mycy&S(=IR` zBf{e`+?dcWV(gWbl}7t=t$aJ4hfB^wmXLLRXAOXMhmXiddSwE%Tl#6k`M+9wo<}ZO zgXjly5I&(l&~>aw;W2@?U0q#f$BrFVp5eF4^c&e>?qD}@jLc9UYk_p;K0R7uynKgt z(sbwvuZz&{GJSCQPGdqH+7oT-LB_a-zCkatbyT{OzBt-tpY9!}yl1Jt^=acjnqSuc z)%v}WzQBJ<*AGh94@!87eqk<<85@3M9MGN2Q^pKk1eO2>MPJ~%+8B?Xxkd5dZ>v6CE=NYTC{LU?Vf8LNV7?;n(9b-OJnm8R6P1~852nLU7yp!= z7>NJS2i8vP7xW);zepvYr%f?VS zcgJqthD&6fW6ohLka_q_ACYy*(M{P1y&5alRrXli&DsQgqoq&j1oj~OFEeeh_kn#H z6Hca8i_6iqYi~rRnw!OI*Y@cCoW5+SZ}#}!m6L+{a)MHKzo36`!tSAU4^R;t(W zv7qdkNZAt5X|iQbP)-WUsX_TbP)38YI4E0!(qzjV${C_3raV`9ocpvU>r(7nGV(m4 zt*DWHT3JzWb6q){I0jJ}8V^);!zS9wlP@?+YyMXfiNx1s@6S`rzeK;!QpV=~wDN$Q zQum+xjaehCr}SJFV->?@XQjhaYO^2xiH^iyf(Ej~Fan$Zru6Dn>D)8Yn{QGvm??N8I2ZT|F>&yrcdr?9 zN_`&#)*D4nE3Ob%?5P|Yu$}NdSs&5stVQT*Y+39S{^L98Tf(ZqJi*$)c-X%HMg_hD z?hnRB8maU8J&F$?AGBWnLM`hr>l%J8?P5p6lc4|BcFNY6{RA)jree3?ePCs;^@aZ( z_B$X}SRQQq_=DI8WN09x-^uWV_3P5#FPMGLmRnqccsOavCE;Bcv$*LZ(E#qt9tdLl zV7^Ws!F$0O3sk;4SSF*aDe_(5&+)!CRfm@_eX*FH3SM>K$Y zf}dM?6b)c)U_8hp7!_D2IN=kTBT6TOF^RH{EACRS`|oO5o5_v_Yz}yWZGCXVqZaD} zpCOJ&qHQwREBG;(UsxX5%K}CN)(6IfJbwOH_R{OFee58IFS4#;r#bu0(ZUtPm5BwW z6wd_n)xC7)Y0a@P4aj5p5P8)8eLE-EOM^TP!Rey3FFYl#xI%fD4%$Y}DM{dMU?kv$ z;8a~dSve|Zc=01+n!1H~iU#)cFekulNv{9c6?%treM?JAEjB3G(LvkKYpghK!cS>;jRw*roM3d=m0Xd4LM8)v@rf}Zz?w6 zU7ezVm?p7tFc#zyoC|yftkGgzLHp~Ue4M82oN)43C_X3+j6b%SaEQ3>yDUc+;O}Ff z({E^j2b8B%H5NAnuL8RTGX#5X5AZ_f1o#M8VXH7I#{(J*>u9iBr2+Y8Zt33Mdg=dK z<|29rzX0D3o}hc+$F<%+ns7!xd3%Y{e_;6>q7758JkF zvw9D`%le1@M;71*vPj#|LSjx_e($Wg^xiqmi6Qc+Ie|QqhHKc#KmIsz4r03S82?Ih z(fV7=O=xoC6CNAJ;P-0yuf}*FA1JUtDQE zcwffJEm<4HhN7aPhZXN#M?ZvPHY{1Pd`BO!4pO(10op<@xVfrz;)ZaU zWs>EjY^0lNls!+Q`BYJOCEjnZuC6`-#zTx=H~bsM0(?#DvTX-@0KU^c8Ji3n&6QfK zJ`z8_r@GpOS6outxB{akD}T&E>cX~z-^6l}L1O9P8q5#qbg?VO0KLF<&;l>e+vrT_ zJbLu#HZ}ao(HhtPcK!>$jq5(Ta@wX&#u*y)oB*Te3Ydip7h2k&fy_C~0nS5iu#?ai z>`PQE+*7^y3S&uD|Du1ikG_`rx)qaW%}3{eE$EpA7Gq{!KpzP{xEL??GMEZ>yZZ35 z8Xarf&}Z9s@(f?e&_Sj>)+0S5!RiL~s?v94j`4!m^c$V5*vsC#Y}>X^JJK~){|`rp zvw?NbfzkbrHdc&XNX!X&oIu;QefsOxUu22?QYRUm$DCts(@$iK{X6Wv(0u_`Ka8{A zv`vcA|Dk*j^Pe#wqkGYPjI*8zf&5)kJnT8uLHuOAiF-|Q$C;U5qv^L1>ADfsJ-ayr z_v=Pm#S7&e<;WN^=ecX5x`bS;-&}6dcd}_6<{2!lx81iJi;R@CSBtubO_Ik)+uPe; z6Gn4gI_k3ah5T^u-o1nLjxc~Ls-xk+fdlq;oijO=N-a=4``4LF#*U|3!x(4j^mF0H zz|*w^k-}7R$HVDL+u@4Oa)}BGcG#&Yp2|3SIVH z`Azst#1Kx)tofq$bt*agI0Ak^Tg)}|5*c~rT2fr@2|n-}S(D8E7JPiDvJ2hBJc3qi z2W0!yFZY-i8nU)wRp7tjKjOdQE8wfllCSiV=mKlSCx>?ApLSo}u)x|lSLK846wTCu zFOKg;ECRm|zjKcKPGZ#9RM3syBH_c}L;d{4MnfIp^np|?F&*Lv_-ycxabgX^4kEqv zgU77Bh5v!?f$xK@i@%Y6`8h)!_~E6ZgYnb7-WKOZhrx%Q-*~30wb1&L*zVZv_zn0@ z>l5>B8;iP)*)P9V_j#L^mKM851YC`=L|z>K?aBH$_4Z+F_pg`l6C^tOI{eeRVD%Q5 z9Qy~j*2&z@HWZnQJ!dRG@u9w7W81&xM7Wv}SgGtS(+(vow)hRj$_!>P?}A`LiFIm0$FUkH=yKtD06P?VB*iSlh$d z>vUx@QMFpHtXya7i76E{B^p<&mBf=}O*L!urom;2Ecg#EKo$}6mJzlTZ=kCsz?=u;#$EtQ*^k)m&9sfyI%RCQ`)syWq~+MH@jmF=n6 PLuc=&`yBZH;=q3a9Wlw0 literal 0 HcmV?d00001 diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/util.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/util.py new file mode 100644 index 00000000..e7d4ced9 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/util.py @@ -0,0 +1,1579 @@ +# +# Copyright (C) 2012-2014 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import shutil +import socket +import ssl +import subprocess +import sys +import tarfile +import tempfile +try: + import threading +except ImportError: + import dummy_threading as threading +import time + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, StringIO, + cache_from_source, urlopen, httplib, xmlrpclib, splittype, + HTTPHandler, HTTPSHandler as BaseHTTPSHandler, + BaseConfigurator, valid_ident, Container, configparser, + URLError, match_hostname, CertificateError, ZipFile) + +logger = logging.getLogger(__name__) + +# +# Requirement parsing code for name + optional constraints + optional extras +# +# e.g. 'foo >= 1.2, < 2.0 [bar, baz]' +# +# The regex can seem a bit hairy, so we build it up out of smaller pieces +# which are manageable. +# + +COMMA = r'\s*,\s*' +COMMA_RE = re.compile(COMMA) + +IDENT = r'(\w|[.-])+' +EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' +VERSPEC = IDENT + r'\*?' + +RELOP = '([<>=!~]=)|[<>]' + +# +# The first relop is optional - if absent, will be taken as '~=' +# +BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + + RELOP + r')\s*(' + VERSPEC + '))*') + +DIRECT_REF = '(from\s+(?P.*))' + +# +# Either the bare constraints or the bare constraints in parentheses +# +CONSTRAINTS = (r'\(\s*(?P' + BARE_CONSTRAINTS + '|' + DIRECT_REF + + r')\s*\)|(?P' + BARE_CONSTRAINTS + '\s*)') + +EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' +EXTRAS = r'\[\s*(?P' + EXTRA_LIST + r')?\s*\]' +REQUIREMENT = ('(?P' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + + CONSTRAINTS + ')?$') +REQUIREMENT_RE = re.compile(REQUIREMENT) + +# +# Used to scan through the constraints +# +RELOP_IDENT = '(?P' + RELOP + r')\s*(?P' + VERSPEC + ')' +RELOP_IDENT_RE = re.compile(RELOP_IDENT) + +def parse_requirement(s): + + def get_constraint(m): + d = m.groupdict() + return d['op'], d['vn'] + + result = None + m = REQUIREMENT_RE.match(s) + if m: + d = m.groupdict() + name = d['dn'] + cons = d['c1'] or d['c2'] + if not d['diref']: + url = None + else: + # direct reference + cons = None + url = d['diref'].strip() + if not cons: + cons = None + constr = '' + rs = d['dn'] + else: + if cons[0] not in '<>!=': + cons = '~=' + cons + iterator = RELOP_IDENT_RE.finditer(cons) + cons = [get_constraint(m) for m in iterator] + rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) + if not d['ex']: + extras = None + else: + extras = COMMA_RE.split(d['ex']) + result = Container(name=name, constraints=cons, extras=extras, + requirement=rs, source=s, url=url) + return result + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(base, path): + # normalizes and returns a lstripped-/-separated path + base = base.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(base) + return path[len(base):].lstrip('/') + + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): +# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as +# changes to the stub launcher mean that sys.executable always points +# to the stub on OS X +# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' +# in os.environ): +# result = os.environ['__PYVENV_LAUNCHER__'] +# else: +# result = sys.executable +# return result + return sys.executable + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + data = json.load(stream) + result = data['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + cp = configparser.ConfigParser() + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + #entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data.encode(encoding)) + self.record_as_written(path) + + def set_mode(self, bits, mask, files): + if os.name == 'posix': + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + py_compile.compile(path, dpath, diagpath, True) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): + return '' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P(\w|[-.])+) + \s*=\s*(?P(\w+)([:\.]\w+)*) + \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException('Invalid specification ' + '%r' % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.split('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' + r'\(\s*(?P[^\s)]+)\)$') + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + if headers.get('Content-Type') != 'application/json': + logger.debug('Unexpected response for JSON request') + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + + +def get_project_data(name): + url = ('https://www.red-dove.com/pypi/projects/' + '%s/%s/project.json' % (name[0].upper(), name)) + result = _get_external_data(url) + return result + +def get_package_data(name, version): + url = ('https://www.red-dove.com/pypi/projects/' + '%s/%s/package-%s.json' % (name[0].upper(), name, version)) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + + + +# +# HTTPSConnection which verifies certificates/matches domains +# + +class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + if not hasattr(ssl, 'SSLContext'): + # For 2.x + if self.ca_certs: + cert_reqs = ssl.CERT_REQUIRED + else: + cert_reqs = ssl.CERT_NONE + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=cert_reqs, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.ca_certs) + else: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + +class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + +# +# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- +# Middle proxy using HTTP listens on port 443, or an index mistakenly serves +# HTML containing a http://xyz link when it should be https://xyz), +# you can use the following handler class, which does not allow HTTP traffic. +# +# It works by inheriting from HTTPHandler - so build_opener won't add a +# handler for HTTP itself. +# +class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# + +_ver_info = sys.version_info[:2] + +if _ver_info == (2, 6): + class HTTP(httplib.HTTP): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + + class HTTPS(httplib.HTTPS): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if _ver_info == (2, 6): + result = HTTP(h, timeout=self.timeout) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + result = self._connection[1] + return result + +class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if _ver_info == (2, 6): + result = HTTPS(host, None, **kwargs) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + result = self._connection[1] + return result + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + scheme, _ = splittype(uri) + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + +# +# Configurator functionality +# + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + +# +# Mixin for running subprocesses and capturing their output +# + +class SubprocessMixin(object): + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/version.py b/panda/python/Lib/site-packages/pip/_vendor/distlib/version.py new file mode 100644 index 00000000..d9c438a7 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/distlib/version.py @@ -0,0 +1,742 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2014 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-386, +distribute-compatible and semantic versioning. +""" + +import logging +import re + +from .compat import string_types + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'UnsupportedVersionError', 'get_scheme'] + +logger = logging.getLogger(__name__) + + +class UnsupportedVersionError(ValueError): + """This is an unsupported version.""" + pass + + +class Version(object): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + + +class Matcher(object): + version_class = None + + dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?") + comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$') + num_re = re.compile(r'^\d+(\.\d+)*$') + + # value is either a callable or the name of a method + _operators = { + '<': lambda v, c, p: v < c, + '>': lambda v, c, p: v > c, + '<=': lambda v, c, p: v == c or v < c, + '>=': lambda v, c, p: v == c or v > c, + '==': lambda v, c, p: v == c, + '===': lambda v, c, p: v == c, + # by default, compatible => >=. + '~=': lambda v, c, p: v == c or v > c, + '!=': lambda v, c, p: v != c, + } + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + m = self.dist_re.match(s) + if not m: + raise ValueError('Not valid: %r' % s) + groups = m.groups('') + self.name = groups[0].strip() + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if groups[2]: + constraints = [c.strip() for c in groups[2].split(',')] + for c in constraints: + m = self.comp_re.match(c) + if not m: + raise ValueError('Invalid %r in %r' % (c, s)) + groups = m.groups() + op = groups[0] or '~=' + s = groups[1] + if s.endswith('.*'): + if op not in ('==', '!='): + raise ValueError('\'.*\' not allowed for ' + '%r constraints' % op) + # Could be a partial version (e.g. for '2.*') which + # won't parse as a version, so keep it as a string + vn, prefix = s[:-2], True + if not self.num_re.match(vn): + # Just to check that vn is a valid version + self.version_class(vn) + else: + # Should parse as a version, so we can create an + # instance for the comparison + vn, prefix = self.version_class(s), False + clist.append((op, vn, prefix)) + self._parts = tuple(clist) + + def match(self, version): + """ + Check if the provided version matches the constraints. + + :param version: The version to match against this instance. + :type version: Strring or :class:`Version` instance. + """ + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint, prefix in self._parts: + f = self._operators.get(operator) + if isinstance(f, string_types): + f = getattr(self, f) + if not f: + msg = ('%r not implemented ' + 'for %s' % (operator, self.__class__.__name__)) + raise NotImplementedError(msg) + if not f(version, constraint, prefix): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self.key == other.key and self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' + r'(\.(post)(\d+))?(\.(dev)(\d+))?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') + + +def _pep_440_key(s): + s = s.strip() + m = PEP440_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[1].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + if not groups[0]: + epoch = 0 + else: + epoch = int(groups[0]) + pre = groups[4:6] + post = groups[7:9] + dev = groups[10:12] + local = groups[13] + if pre == (None, None): + pre = () + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + dev = dev[0], int(dev[1]) + if local is None: + local = () + else: + parts = [] + for part in local.split('.'): + # to ensure that numeric compares as > lexicographic, avoid + # comparing them directly, but encode a tuple which ensures + # correct sorting + if part.isdigit(): + part = (1, int(part)) + else: + part = (0, part) + parts.append(part) + local = tuple(parts) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return epoch, nums, pre, post, dev, local + + +_normalized_key = _pep_440_key + + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # mininum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): + result = _normalized_key(s) + # _normalized_key loses trailing zeroes in the release + # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 + # However, PEP 440 prefix matching needs it: for example, + # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). + m = PEP440_VERSION_RE.match(s) # must succeed + groups = m.groups() + self._release_clause = tuple(int(v) for v in groups[1].split('.')) + return result + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts if t) + + +def _match_prefix(x, y): + x = str(x) + y = str(y) + if x == y: + return True + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + # value is either a callable or the name of a method + _operators = { + '~=': '_match_compatible', + '<': '_match_lt', + '>': '_match_gt', + '<=': '_match_le', + '>=': '_match_ge', + '==': '_match_eq', + '===': '_match_arbitrary', + '!=': '_match_ne', + } + + def _adjust_local(self, version, constraint, prefix): + if prefix: + strip_local = '+' not in constraint and version._parts[-1] + else: + # both constraint and version are + # NormalizedVersion instances. + # If constraint does not have a local component, + # ensure the version doesn't, either. + strip_local = not constraint._parts[-1] and version._parts[-1] + if strip_local: + s = version._string.split('+', 1)[0] + version = self.version_class(s) + return version, constraint + + def _match_lt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version >= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_gt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version <= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_le(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version <= constraint + + def _match_ge(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version >= constraint + + def _match_eq(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version == constraint) + else: + result = _match_prefix(version, constraint) + return result + + def _match_arbitrary(self, version, constraint, prefix): + return str(version) == str(constraint) + + def _match_ne(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version != constraint) + else: + result = not _match_prefix(version, constraint) + return result + + def _match_compatible(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version == constraint: + return True + if version < constraint: + return False +# if not prefix: +# return True + release_clause = constraint._release_clause + if len(release_clause) > 1: + release_clause = release_clause[:-1] + pfx = '.'.join([str(i) for i in release_clause]) + return _match_prefix(version, pfx) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + + +def _suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + _suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def _suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + _normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is pobably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + _normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + '': None, + '.': None, +} + + +def _legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + + +class LegacyVersion(Version): + def parse(self, s): + return _legacy_key(s) + + @property + def is_prerelease(self): + result = False + for x in self._parts: + if (isinstance(x, string_types) and x.startswith('*') and + x < '*final'): + result = True + break + return result + + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + + _operators = dict(Matcher._operators) + _operators['~='] = '_match_compatible' + + numeric_re = re.compile('^(\d+(\.\d+)*)') + + def _match_compatible(self, version, constraint, prefix): + if version < constraint: + return False + m = self.numeric_re.match(str(constraint)) + if not m: + logger.warning('Cannot compute compatible match for version %s ' + ' and constraint %s', version, constraint) + return True + s = m.groups()[0] + if '.' in s: + s = s.rsplit('.', 1)[0] + return _match_prefix(version, s) + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + + +def is_semver(s): + return _SEMVER_RE.match(s) + + +def _semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return (major, minor, patch), pre, build + + +class SemanticVersion(Version): + def parse(self, s): + return _semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, + _suggest_normalized_version), + 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(_semantic_key, SemanticMatcher, + _suggest_semantic_version), +} + +_SCHEMES['default'] = _SCHEMES['normalized'] + + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/panda/python/Lib/site-packages/pip/_vendor/distlib/w32.exe b/panda/python/Lib/site-packages/pip/_vendor/distlib/w32.exe new file mode 100644 index 0000000000000000000000000000000000000000..b6e1a0a238c4a70d002caad6c80c88d49159a759 GIT binary patch literal 88576 zcmeFae|%KMxj%k3yGb_5Cc8j_0RjXJiVanCK@*qghGavK1Qr7uA_V9y;=0ln!#RMr z1QJh{=44n)fB3n*xAk7R=)Lq_duv;NK&zM~Gyy>YX*CMfv`uy5p%_aVf~+~;_cLcV z3D|pI_xpN%zpw8ZIpXsLQ*#){y>Tk|J{T64a(Q^AO3oN z`Lde)uYF^~gSTd8rWeGE{?azv^si40RVDt;*2Sv6f$&3LzgV?Qyp5_|;@wwe5$`Ki zkKq0FiH}reiSQre@Wb)nS%J2cuUV)M5isAk|ZTb-ev(v`6#n^kGv>S%Koz?EufFN z7U>y3eO&i%uSHq|RQ8^cq`d2r#s>F(blupTmLxw=`S1SSDnGbgM&if6#Dnmnw$fma z`Ij$A>u+t?@D=$hlJpV{49b*l!e2Z7#{Bc5>RU}Psb>x)pkgZXL;4>5=S9w28@4oj z6;YzCXtQ)b0)wM4^yQ`U8XnyA4FG0SIp_mvJl>UfkNM}7q=jRG|NrOzgFO%k_Wsc; z89571dnMMtZ}XH=Dcr713J=P61^bZ_Fmj5JwBJmSrvwjXNK!}&x-j-ZTQ9Q81-T)~ zIK8$5pmu<|+t+riWEYw**g$6cvZYeV{cFt^QycA)GB4tD`3=Pp3CFG)M$iD~2@*B> zdG}mNGGqNbXc&fiG&f{1awI~lI_7?ZKb|5<$2(Y6fvwhUL@HwXT)*>ha=)`w@-OHA zCsUHtqmFR9Jnaxgu{4ieNn%5hN((D5uop%n<#S`<_QsQ}YaJn{UYK1Q%meljU(9U; zgOgE;))>2iKQ8q5d8a()}o2_@AUN{p9hgGe3?;Mvx=3U9rDs{A zy|Byb8)E(X&cm2kwgAsXXKTIzhN>i~|Ei~}E>TgUnx6v`R2QHc2KZkvi||GbXMq{( zcDIMmIiG9~EHuigdx?J!oCnpbN9PE93l(0d^cG%vEXh3> z0tuiw_nGyeCAEAHYGM8Ec2DP&_RHPD^`h`JvnwLy_ZVwe>scF_>(CA)Z1jwiQ!71P zTQ2HW&DT#wT}4sM*I&2pct?Qs8#xY)v3ipfN(xy}nI8m|=QnaHY2^FEgNnn*nMV*q zDT<|nb*}ADcNEwpW&FNPf>!0UkOc5-fVkVeVsLwhnlGHh9Km3?joMlxXAYIs9Vstx z_(PlV+UyUd)fhRm5e<@Ypg_lFB^lZEBzJqTNwlZh|b6(Ah{sh$f+W%4fES&mg=BUK~g?~ z!6H%2B!LE^F+Ma)Pu8$C1@@JJT2@kd#w)$L22d+&jCW`Z8F$@k#Anp)A8($cQzn$i*^PH>o5O#t0pPQ{i`Xr`R3DXn^1WtXq@VT3DL zb!%&$CF0xb?=<-vzYpyZ6Ydh42P#?cAr0g|e1=v=c2lu#qmI4l>EDvmHm>QjOxe-& zS-PBB>N(c%iJru+V5VNBK3yTQt*2~tt+zmR}@WC{!ATtW3UN;qKoGV zSUpR$$s31Ew25G61fNg9+Q-6XUk4lU?ePb+`aS$@Oc%}94^xTocjFe680+9Amx~3`8PJr!{Krh{#3u zkR`y2ffgGAjTzJ*RjA9){iw)LS`4MtQ1%!~Q?Loqfj}*<#ppzyHZ8mi9`|IjE1Uy$J zVZvfPIC2Bt^Cc9>pB#^s5aKr$Qq0RZVDJ@8B(pT+&j~(Z(Vm^lKJUSU;+kubwQDdE1J6Nus!BS8_66)%-c@%p?|Jryl0jAj9?Ya~5(#5jE z#{qLjz&IK%Bw*B%?m4BBBu@&`oBwK>Nk8eLW{Rrg7pI~%n}Ka`6&`#aAx3k3VQ|Ks z2ya62K|fMJ$}0%qs{l|-&Qq~<5HaY3e>GmNBSb3$x`ATd?e!g@+@PN$Q_^$@B6j{e zYMT0B470JUc z`c?iaMn|i#v&v0c7OM{1@2bX%#la4th5yDcNe}<|I^N(bJh7C3|g@*txd;h|Qee>IFhCQr*utVk{uJ zs0W|JcvyB$i1_;bAv+;niaOD^x{Ea(LmI2LJ72a{sy+6m#+W3hvg#rAE1N%u{7MW% zEt#7OQdh>Kfh_X?8UWx@0E4*2NQK3OLC6u@Or(ATc7&LjSV$GM6H61n``g^-p~$Inek5Yuv+Y5|DNB*s(A2k)i~Xgfr;++tr$p1|{v3y7ri2~=2h zOtYgqk5g;*Qfn5YF!jM+YLRFDb7;-BnLsILv@wV8G8>bieh8-!7<>6j!UCW!fxYNT zVp!^MkMmpm08<0mV-4_DOQ=zjlFKt7gomc_y76)D%1?lRwX0AYpDA{aud-cONH9$R6pz=?W=cKclexgYsj&-IC%}~k#ndU4&?)9G};M- z`o2kXb|U7XHkUiaYaK)lJv+d^OJMve+S(;KfHF2q^&Kb2{EI9!R(C|6DL|SO*-!w0 zHf&&OSa-C?&Q=uIFCVcYMb2qUSC3fkJ|UlKIW8|ZBNaWPyOwoC|L6*2bhGr=jN##( zEXppsPgwh{SJY1pwcl``a(AjnY|X!yES6>^Zx+4ev-Gkk_T~>!bdz#)?Yh@+4#} zM-BSrenQ@Tu5C?$Bi@9Gq6v4LkQ-EPk?)AN;bWEC8phYMtI(vQBNc z$ytdUUgS{zvbuv{|HRVWCq!FC8{MZynkV91?`v7+fpKEH@U%tAxM#884HVQ~2#D%zp% z$2@aE82LL)GxewoQ~df_aW9JNAQJpdF->Z7TGRq>_n>E3nZlo-IWTcufc|Mk{00+e z)}Y9Q0&tAt_n%IWv9(@kEL;_@B{SjEsZhO z*x&agpgi>h4MDYWK$52)Lf7nazvX#%YZ{AQ#_l?*r?L+E=qNm*XB?ztPxrPY*5UD? zj_)Bi25l(7|Lntn_<3KsXj{15+42(7)FI&eLws0#LxtV9_+t9(0PhBo)QOZZfzYC{ z6pM!88)~(nxl0UiQQrVI^Wll8j`_0t4q#z+4??anWG7fLHtBYL8ybW`t}Ujr@T1W5~-xy0Dwpql||g1OuM^smoHz)KzZtgEm?j8tEV8ROC~V1k^c39=h> z>>kvV-eSci-&L$+%Xbtjxyt-&(Rmu}Cv+U54w^QERDgdU3%Qs!Ttzvc-CX|cooK&C z!;*WAK+p`~hPKnAWS@*;uo&nf+wUoxN%AAvuysXO+%M=kJ1$A67aUj|FeT1;u42l4 z69|YIY*H$>nh1gN`Zj`OYPlOjR=m!J6*UI6&N^GjQsd{@m7+PG4(MQZoMp9OWOhI+ z_QFNBzFUdZQrR*{Ed7O_YRbUi5vyFvR>qhTi++~OvY%|iK3n~8di2AT!SXw7P4W%w zjOX;0ajd`T?sO&D@C~@nE%dzI@M&@VkdnuI1AXtIr7dr5|Ae0ek(-$IeG74eiyRhb z%L~*GwKmUL#je#4^gT<#daA83YCRIYnvAL!8wz5T-q^NkXe6GzVQ<0E%Zh>|S>a6M^YZS|a5DEH;p6&#vaJRb$ zixoSu%}!-1e+L4i*=Un!)(HVJn`*t826)T71jX2SE(nyQpX*s;0g4r@@el#WStAx` zR^d3HDlWcdqz)?fA+_fdNYNIp!l7gZcn3(&;&Z4Ex}>@Qzl8aMY;`T|kJ;6p7+~~? z2Q2dV19my<$JAmawW}hRdc$O>0scuT>h$yXF$vtqF%4*TKN(%jqTr~QN3K4!`yR}U z>)QPR^p9iP{Sd;K0fgW^;D(-IIbAg@a4pxjLN^WwkK(Cp09b~;!G)&bG>xzzTBH0i2)vF`^4Zq!aA z--VI?9jY{YKp?^&G*O@)Z~zCd?4ZIR5Bg9-M}R+k2X#PWEWp2nrjMlq;uZRDQG9^E zOh9In)sh9+5n7aFegTptbz!kRAs_yE+tP=56XCFZFRYi7eCMO2}01S zd!Z3kda-q@;lC^Zc91yjp*+5d8o?)}kPM9o7hfbImSSMA-r9zBrVfw-OW|j{y?d()c!tsNuI!Q?c5o`uT$bB!ao!Qh!uV zalTxAOt*&8>z4ZiL245>E|S)!r&#G#pfdWmPa)pEfd~3QpAY3Uv$Hg<`aBfz&tORE z(9aKY<~xrZV%tGVILh*4JmOQ7`FivTq^|{JJnEo<(c+I{N_Ox!(U89FnAJj>X}&NU zLht^{ao}Z{<@C()(sSwpdd@GRXURM~H}1(I>zav{8F;q&-3ayEln_U{Ey$ilyi7S(}`=4?V_@Hc0?_zdDxEg=9bB7pfi!33#n zQz;ID+W?UGt}$TD&qv{GzKl5IZAdsX_T?c0d_}&&ZzdXV8>f+AXH7$(KmF#z7f=!G zGNh0KHDPaa-=8t+0LcxWN7AS^iFUEZY2w&EJ25DUe~ZX2&NKgSRUP|psy=lgQMLW8 z|DkFlXFISJo9}p&jT~ySFa}I9<_9N$z8@sypo@q%J7)(0z&0?_BNNdhn+eiGkkE8x zQePSSs0*pbve5JgS zZuGP0zWJM|OF33XbSY603^x4NwEBV%?V;{mYBRJHJuSe!#8cdZsHqk4>u7LDhqMb7 ziF$~${rO|SP)f)ie42Xx)(q51iVkPX5Zb{i3Q!3c)FtqrI#w~q4E@Or%`-#qn4tw` z=(rj3nxS8rp>i|y6Ejq4hW^D2ttp|Qq)MKk2y_>tUZT-pJA+UTPDT^g zCa2c&uMluzlxodUx}P#44ugxln~?X$TK*n%H+7y8p{B}iHhQpL!s2u<w7&%PjXAIPQzZW*!q~To%6Kj47DM}BaQ7S{ZBbwe3IM6@PcX{L1a&VX zdCsKndDl{Rx|szfO(E+3h=SKpH`xhO`F9cTZeJJIWiDTiov|G|<3a3=8)Kfl&u3$1 z$!UuaP>mZJF5=--w_#X9R(=&T3!6O%@$<}xk0}D3iXL_$8xp1a()XYq_D>D>4LWr=fcpwwik_87yPxC#z>hKvHi6N zUs_S{X$URGT(v8gmx1w;sY7;DsKW(2-U6+FMAQNS2~l7Flo>T_>JNZX;4tN`W%qzj zOi-bQkTYwIv$Z@GOcCaAm_6;y!%|YIwBx*hTqle+X#}ZQ{l?kR^bgmrHrHva-RMv% z*2wu0su05VF=DL01#6<^suA*L@l;ZeV$=O+G!EKP57@cc+MoT2XdRp;GEX2}JPq2> zF8%AFT>jodGz?mltBR8^z{n)jm-_NhI*PF*Mkf?j$g;oZhEjCG7$GuY--SQQvm~#G@nL2q!O-?EroB*YCh|ah7lJtjrr7R+%_n>SGLw0 zJ;;96i|qA%KL^E#-bClZNyhdD(WgA2RK4=$hEF4ve^#%I+m;o|T!lWjyl<12V*Fl&iOFMc`M65N^?u<8kusY)WNX+~%U>%; z%n-tLo}n$32PHeSenVT5SeLcS)79XF4ll-KYB!PD#KJm&}e zfdQblpDc^CpR7ViOM&4uz)M8}Sr^e|b>v@SDnpGx$QH`y$L<9VFsV%j80butu%Dn| zdPolCt^)7432Xxrk|W4zB0DgP_zZN45EoI`5OJu#vDu#uzz!|}((ER}>t%G$3|-3mmK@D0=))P~?{N@@9-4nD3HH zK(Gt?vlP{s-M~>y!o*=xPqjV1f3}da8K{&lutuAU#NPQ6_gnGZb7ijc`$yQ3YYZd_ z>SL7)tjC-$muA-loG(9`;(n_(Qf|);4{n{p8Xc_C;knX~##Y&_?|QB_q%Tz4XG68d zMdx2r+(}VhXgFy980%twkyqNV4}>E5)eCmem0%aQ`-a@V_CqzlVLv5lV#i8fL9sU` zrGt8uG|nqA#9tOV05ioeQtU|aFmFMWPrrOx?XkqU9jor&ph2?%`T|9;KcN%EM%v3J)u8yn5&`M_SY| zwqz5T<%t0k{i9yU6^ehh#qOR@=80OC(&;m>OBo?}LRKQp7D;M6J|*Q3<_JF{03C_PVMw6W5T)m%|{K;u?pu zg|yf4Vo&gAk<^!m80?Z}gG{*{aQvr#L2mDM5Z6S(3Qp^(*XmO^^Af87Y)A09Vq|-X zc4Yf|!QDIJoO3y}l+el0e$3fYLE{owv+o&9VbYbe*K3N4yc|p|*5AtF)lM#CjRlV!cd(sn)D6(ju@aFym68`) zKLA3WvpfDk8gFd1Hk3Sp83RYGiVei(^|d8?6X9kGFD>~_BHTjZdrH2S2!|wdMvrl^VI+I175L(<;_ONx6C*Y~2Yq zwV$KA_g|n#KRpKUh_73i5G<2AJ5cPm!EXHNBZCertl8}jPiMmgG~HTlU+al(Nv34< zbA5*~pUU75(-yHKlpm}lVz^5C5bQa~w**@d^=ihhDmjOBgp%qCqw-Ar!TnX}2d%-R-)Uzl`U4DBR;Ro`_S)W7DAj*I`0$A3gMmhpEl`=%J?{rdIExN0+Y`HA2#Q7HTM{2kYGJ zYGV&5qZ_esl2KA2mDgil6#IHTV;%+DR;j4BizOQCL@qN{%OkvAFMZeJ&tw+4# zWy>~b)u+1_ZP$>LeG+;W4m+gHgpUopVrW(G5NX#UHjeocSfw!EX>HL4cRPOy zyVOyNh@l@wNnpPnviO*wLj*&vV2}oP8trdMFCaf=9x6xVw>nys^eY^x7 z23RhlEK18ob7K#MA@so!j8re=YLQrTAY8kyLB=&&yD^^vJR`29gHaI*LE6fYV{Cng zSf3^(o zpM%+CqPEs{z~yZD7UI}a7*vL}ESOYK7};QsCE*(oU@1Q)v#&CT|JNjx+n(>~mmU0H z#kbefuVkWMxbHqJp^(s^qWg3)K4AtXvs|sSrHM+1@6SE(4}@dS(%izK#h$D18`?Rw z7IK_}|EUtClBp!7SF6YJQ7iD)9|LnzuNd+a^~x}uhoWlFbanuZ5^quxG^TD#rsJo zmFp~0ey>}d;2i3Od~{TCeKw4hZ-Umw8Y=@rR20+qACZYh1MJ%6xhl`_T!Q)JpRa?z zX#`#m30pM?$TPGJ-s-)Ek{9vyz~xm%F3Iye9Xnh7hfTH8zc=5E^>3bY~lQM$!2Q46SIth^n zuk$-08@bz^FCSx3ZKqA!X@`X@`g>PKG_uq6pa1!vhe&G)LI-f+o3ByEwroqQ%ezdu z9N9e;!Nb^iBF>JurOM@mQmgqwUh=3*v@cdu8wqo;55KY1X5<_IZ+VUKks^=Qe0O_m zi#QSC#YSnMEqoE6ondld%!kL&t&yibL4@a{&Ss%9{1k+YhrQl`4v_9RoW`nbVplAJ?P`4okMTKA+-cj@-d z7V#lbQxEF_vlqLlCL|} z&n9g{wOOd+Ow-5ttuEc#DM_WCQw>KzY3tl?tmfZ3OS^OI09$EINXIPVKjXBL?-Nds zi#E$s)m@t;cob|4V&7=TzVds>0?j&~3E&Rsv|lEfLFlx%1z~jb%om=KcS5&68Rnr8 z_4cr--VX4`-vRtS0@4-*nSe$%fQ~gzfH)-m38wU-f*oX6G8Z}Zf(ui=h8!m6zl~Wo zH5==SIy|gQ>q@3ibOrQN&x3r5>SMAG!mJ(a)*FG4hn`hhyew zqJEU}f*}p5>o`sA64;~dls0t`38&${JsY@cykRaS*(M z*7F-~g>*p9o915UP%Ci5jA2(VJ*(b`BQVH#P^@W?sxW|IQUx>Ryg@d(_zZzxL$Ji0 zBYwX2Hz0X!ZLo%w(5x`t8LZfx2rbX&qycRq0hq7uH-jg zHJe+*zd^aU7viD0{OUIK!5(aad-$u^Sazr-zoG53GCuS<4`A%-x{@gZSfO#|hElHVu^W=mvzK=^cCv#Pky7;*Y`71_kx* z7p?*L?I^$m0bsmD$9Q2i#{57RMU6X6bl2a2W! zCsUEAXcQengRG9U_a@Y6viyIB7IbM{IB5f;%3VO;9bj|eR64sBp081&O00NO0^A3R zf*qj_7TsZCk;Uerj^;k=T8o_yt+mI9&HOW{o%?zoC zvYjvcVmJC6P2?7fGY$Q;ZYNe=ETO26j*SJ$DHm(wL6E_IGBu&7B-%iF_mkg!VzZn1 z3(ImA{DX!VZ4BNj!D zerCH*{p{YY*3i8Vuq8In61%eSS0Lgh^|Smfli2HL^+;8c?ef_fYYvtVrztlAffLSH6wA-m^o#Juf!PhN{%DtpxCdDO0W z4hx*G=5M)yoj7~(^4l|H;9F(2DKiME+x5wjB|Rek1ru9J68Js}G*OD3Ew<5d8c`x! zY}c0Bv8&Xrq))G+k@1Ugs_?kLhlmU!7| ze!B=VuggqSpJ}BbHOocpaAlbZQ0Z!VtV~kU)b+Mu5ucVSfpy)h2@dTY?#oe)L;$qVYlrt1gIyOV7J=Tui3N3?AZT2 zwFt&ciq}f0eL}?u<^R4H5CFraDqwzrMr5Aa?s}v>8_rKu5$s|#|BRfY=nnW+!q%d1 zSCc81%gnW3%)h~GtAGz4#R?9+Q9Rcg(VmC znzr30s0_36nve&O;Y6EOp49Acb zdp?cqD2}n|TD1%6G3OI2f!JUNIGqn4uy$Q?e%pgDaG&TE8`$8Gdgb1&DWQAoNa?=L z=2>S~%1jn_C4b9gnvg@0DvRK8dx^*0L>yz1K3$E1$BicbXfHKz44YeKM`;@P-7*{7 zW!D~qRm4^JrllJ9Jj5jD6VC#g!C$S~5jAYfB2~A;3r6tSb*z3Ml6O_BcY#Mz{(6^I zXV;e4h^Z{G@h)R3m>&UQ^=)VkD==u!u>K^1bEkl%6lrT#;qh z@sAdPG@=tz-9@~(IuP4L@oc9cpiS8-XDm}cTLQepK##PKP6jJlGxndsjo zc-rHO8@e?m!JkuXDiA`q3jX}&Yrwwg>?g#Zi$%A-N!^MxG`?p{;$yZ*yjz!!=~kah zvufqJF|6^U8ZY%i_CCP?s_kG&)?{KeYV-#6_x)I-L7g>=)hqdd+Qtm^P2=oE6{lVF zOpu-WI?lD6{g_tjOu_ozg=kP?W1bsV4&6AM@s5(Nc4R~J!Lc6h=~i~JsZ{WP1M@Cm z-c?rX4)vS3eq=OfKXTq4&zW{j+Pe+?$dSXJhT?{Gqn@}`D~%)xem6F-b!>@Cv#>Ik zT4cz7(xwU|%UsL_mZN@Vz*xCQ5FXpHxBSLx!ub@ei{wve(qm;gD^9c}%kah6cUsW- zYSgBGMBL_!GrwwYx!bt$O=v;Pf>CHFe;xSd@kQa|;G5M0Y{eBDOD}}z^J$#^Fx_`T ztXT`!hZfpAvioeF`|QdZ%q1~{8gF8!XoANgJBmP!TK$C^*R+;v_#1XAZdQE(YyXT4 zeGX=i3ARV9iYD}oNyAR$P%_9YQhi~@5?fqW7(zXE_V+gJF&ipme@~3T*#hGl7*c{V z@3U(FSWW=ia(f)FwI*d|3`aqgt)SAz8=D$4Bqc+T9%juaX&yF8q_MOet>q*)03>si zK-0cSVprr96V;_XLM}RcG3_oeGFXu_v~;a%98G%gLZ#n&hUPV*?!dGjU|EsJEZRNc z3%N0vP3WV2liPr72n^cZ z1!xDdX9_qu#e{)w?I#~#4|MP}9F#luJ&m~;2xYIu0-Y)j3+XAI&c;;Mxt~%(Hb4J$ z9&K3mH|2?q!nBYrpv|W={@gVQ$)U;chJkMgm8&Q3BY4e`q(<}Uz~>de1CDYZhlbYz zq6ZMQ>_~Jdnc2d00*Za6el$J$Q3|uALa8C!>5L<7O0?UNePQzzZ#pu+xsLp7`1iBH z2utYB07*{ff2e}AFrVt;-#9|;6L#vM61Vv&wkwk-C@4`SzF42ugJv<8ss?(w#n0ZA&7CTiRu?QIgI+y6?FrG<>|D$jSekcv{}8FHXfUZnC|`8JiOV`I*3&hS1Y;Ru~G{4N1kJ6v5p z65gaf(p{vjbo7yqXVB;HI>RrU5Z=B+_~`QW3oxn)$x!6|0ADl|J;P*?gsBr&=$=@{ z5p;++herqXwxNdncn!Lu@)hzLm1oJmO8Iue59XW{XE+N5U-mhqcBqFc(i$ClRdM}* zvY`3FnDH8>mgz*J-$ju(r!~SuM|jofHzo3nZ>|=kp=VPJ<}#e7j_auuivYS0y%&BR z+KuA+erL-+0PVy$g{*<#_IJUfwgk`_@Sx*`q@ zACAssszYw13HmvC&9g4{l7%7+yBj{n{#Rvh? zSovd`zDjAN;&XM#wQYQ)0(12_()p5h{~r^%jCs!o_|3$D>qIHc_WYn12jv|80N*L1 zEA;P>hPqb|Mn6mn74dK10It9?1p~8XiA>f`)Vx@@&N8xd`}t?!p`^wU$;t#{UJAeX zDxySK%S+LxFk8wfmoTedkV1YS>tHCACpJGMXaJrrea~U#Yl<^-!7NhNPl{e;AA)I!yO=Y_c8trn6p>1U@mRH zH@l$C;`bOtMvi&`gJx(+2f=wH+zM8OOt%=1<{6LX8jsHAIdMQ7Gs2ZsHSA-Y!aV1G zi`@5(GuU2(>we5WWl?m@Lm+06dK~G7@gYpw=p?!;CAerE-4FNFBQV{QZgwTxRA9e< zIbD^-U%+%EOpd^Hi40S;WY8Ao zC&?M6cY=@^)MIo))pU%&IU5`_z#5Ybj{{spvo+V)io=dFohS11&FD4Yfa^?X8P`x0 zRzFjbgL9ORE6Z>kj38-Eyfwh@F~5i9nE`$WzOk$XA`8R<%4Z@BKZrW=fk;Jl!vCSo zYb$ts!Q)CQEp_=&aPJfcV3izd#I@&Q!KYZ3vDD^#$x^Ci=@ZBcRqb(M*-C?}$4(pe z?qhBm$YU!>&1Vk{ufi>R+R9#NHm2}yP^g*@wLac)PZR9;TR|;YsXUi<;5LyRSnL~a z=KloDqA(o^LI@LEoX3uwftREUtG`wA!70JT z7f-;*p&Z;RF^&I+$Okqpak6xDkuS4E`j8+H!{K#d+`4HhDs*b)S+j8lo4h+#WX)kt z>zjk6wn|xUTb$f=rglYjiD_toNkT((JB!bLNKmv-UB%p3ehmDvoM`Qdf6%5YEISqM-nCmwI z!KyigfIXY>>8W-(zx5EsrCOkXEKjiwN=?p|55W&mZ<4cRBI@;=fCFmvKvOXe(>K|% zA!*9L3S9#fh1L#e8{kap>2|88Q8hFbZI%U-oh|=`zyi2iv|7tb(YbwlDbd$o3IE0$ zBBd5O3ZGs|DV^4IXxn`*{cDB~`aM#(hj2lgaV_3Tx_PP&lBG{=Rw5p4r}8Sp9gxtx#FAz(d+ zrJAkxQX7->+8! z>n*aDIA6-}HXS0-S(16%1m{byI}d9V#4ae4uY|?5OZpPB>VJ>B2EnKdF$i5fYk0B$UgX5jo2?m_j6!m89D@GD$#TRKV5x#7R(bI5d>J78fzxz?Z@b zW%55Bz^691?_pZohQ8(u*Wm+tfW9?&!?&K?*WkH0n*^^7uvn5>8~#yK_%!4r&TSnj zig*+4&T&UgCm46KVB9nKU*Dh#_&L1HD$O%xUqzVM;BiJftVmu&Z0d&Mo4O3=;i_Vr z`g@D~+96`kU>8%4mx6)+yIER9lr|By>CgLl(Nf}@N{nu%D8iwuiQqq`q{(+)p=!XK zd-aLDb_55$PLxA;6(nqTw4BgMr4CFHaihZHRI0f-M5_bTEG+aBkWsWCzR>>=MnZD| zf)2v#N}OQ9QkQMY1|;Ap7GwW~_*QfeVLK9cfYD;G_AgDR$Qr+-d z^26p`Q}Di;2qPTkR6~|pB4b6xM!4_LGH5W^74pTW9@GdvJs=w$MKaixh^=_9SQ&gpYIy*3FZhE8vCy0+ck=`D~t zy|dCgy*D9P)ajiEiP{T!8!c&$QVy)5CpQ*1zUgd97o6iXl`U96QDl`ZV)K9k4hz8H z9Z77s%hv-iNncI)@QDH&Y(@gCx+mfgq^Ma`LBaWvy#k{zjlqcg!<*I@G$l|{*B1dj z6Q^<3w>?K1-hvxWaS6?U9Z%^gOni}RMcCN+H4quY=G~A3Ii6rA z0Mx57L&!*{;8NS1^9ju1>@2um{6qu#Y6o)FQHCj^cjKxgvy*Y1w5=)sb9fqH8j-U$ z5%Qy$Vk0cC5V?uvxyIHl%v0C|bd6zoNdSOz0^QzevOIIXZ$|iwEU(|Jo)OEt-DG*V z)|pxgRiJs-&NUqE0&(gDM+=Ws!6W$8ivAWy`}9PdqdiZhnjK08D!Ma=9c%i%)qSq% zN|NIJF{xnWVr)Bbg0&Eb&UfPW;{g8*0y1_UdxRK6_CY7|;m#bV+VeSfEjTVg&fzDF zb;zi0w1Gm`;%2PQ0p;(YB0qn$ z3UD~hHl>}$jm9E7*h*vZr`JuUf>ROav4=bOYZQYnx@P0Aq7t!hrj36Q4C-*`#>PE> zX4GB)d>!2Npwc(D*0$$ysBAE*%C{A-v@7HM{64~rrpPq73+j`IX=jtaDExnJizmSV zh)Ym#4w0ns3cZCir#+%Lp)^=8D(Oh9DdPczOVTZv^0+<8cb;!V8*mLLpK%m5(Z_Tu zca+9!1AL$Y?I$Gs`)JOp!xklNu!#f^i;|%ZCn2YiBp~&;?&Ob; z6#cho|F4)8Vl@}tGTPB%PcBcxmUc3>*L>r1Q0{JYT!tyg-Hs!UxRVrH?8IV@W;3a| zNSrDAlDZv6x&r3SF3C^+-XuAvWCp#mN@mh4wM6p%BV9vOK*ZWk_l*XtzTt%|yh(65 zAV62`6${5Yfc;Z9I(7chIp*Qe!*t~|^sxEc?BrdI@hDg@Q%Q3lhbI=!f?REOe-+npVD(Qp?IrUP6b?@Ux;zMVVixAoAUzFAT2LZe7U5D%5OEz$ zmlkoM^~WLGqFSC#yXc|1I<3Tod_duZSwfAzFu-Rb1M4n>ez8uz9eYrI9(z&1(h1Y0 z0o9WzLp9ph3eY+ZMois6xNARR5z$ zpbZ@TSOO4){7eYC3=3|Fg333kqO|FrHS!h0_9jwNOcyr=k_8)Al;c2Nzn)2|Dl`L) zKpW2V!R$4Dv8X(0%|bNjxbi8r1(%Y`D+P{rps-{?n!G4{4p%1PW_AGI$g3*NMrO9b zPsm|5&Vb8TqNRE!zaY88>nfWx(Q|o^YV&)MEBbKd<-tUBvaA0qFe>{XT@ML zr43e1O{8&vr(>#zPv-O>hcFG&t(UdDm~@3l6A2_EC42fM5w8KZRv;J zU;PTAD1;(8Gu$DkT|T{^?$FZL0^}G9_WlcT23?=Is;Y$jPjo6iB^lRei^5wU+`BZ_pe^8yZo%T0zR*x$_Xua5v z9zx^7hH`UpvW(H%IvtH%f$}4}IjN(}NTLXwl7xi$x+w>mdR`PFgi9*vJ#rBxYW&}q z)p1=}@vZ4^%0-70Qge!-3i)4R79CR>+%cfH<}+UaD+Lo$OSYqF;Ly%6DK3IBSZ?A! z10${w`VuH9euYp!lHkuv+lFk!`uV0`V1>37^iceMDh5AP!E4RPlOl3QJaVBK`Mij1 zibsAKk#+l95durd4*tL_-p_MHd<)|D(E~KRtavJc$&37a3lfd_6v7P<8wrx`IvSMc zVA+{1u0xv?sUBJ$!bRN|mcx(Lcn5{248oSCq%~K&Bq?9gi}w*;D0i-(-$+FOWES0e zhTn#f^Y#&bNQBFIN<0z%eKC=4%%^7Zi$6CrVb>j38VCSW{7Fzbn4REjC@OwxZ3<6= z?MCe8O}*}SUlHpZcJtW1(Q!8$&egrgP8Use_ ziqhv9wJ{r>dy2Cyz18kr2wbKC7k$!5(7p^mLr|TqT%^oNt+_yxQ67l>K1aykx+cX~ z)wdl3*E)@-jKTeJQay-&qX2&9+QEFxt>l3HOpBICA_DW;7C-Yxq}TJHYDE174iLw#3wnX>_p23g7g^ zCS?{n6}ke&RWsuBX(I;R57+mXfCMg`BcC7~3A6EsUq)#-ht5-;q7Tu%=2ecS5%JfU zcZ11$ad^TKz^UqV{&TRVIzBN$?Mza;k_d-?8-dzumZx@F2tz0lk=i z;O`&shrZ53U+3YSi+3*G*?4E;?ZO+*1y4BecHnKt+m5#lZ)je}j*Zzz&!fD9_&bKb z^Hg?XY$~kifiYtt!f5T7aX{MuW5!)Gb|$Cy(ak*LRc>!C`rYp^SzumbKLW#vYop+& z?sJez3IZ=|L$v|0E3QH_rJlmQxn0UQKJUeZqJnj?cl7&kx;s)GgF%zzR?;kzSl$Ka z4WU^?WodqX6Y|vQr@p9GNvIT2*-9LFge4_A*iXEDGT1XbWa&p~py)MCDtY__iE&V> zJdd}jEMcAO?LOK(UZ+*r2MB0YzBK}9t_Dsj05+9l*Vt6PpkP#P;_J-J zSAX4LNA%Tx{=q!jdW@!66LGa=f#Q6#7YA- zI|lnaTiL_wv5*M8OdfhazK|U3g*}n)yca4w+EIv~7^LxnhA%wtPiXOrMdCRZPh3(j zwpD#85Z*hLoB0!{9A+jNudz#O4ALF7h-^~;6*tDSE;tE9;yU#d)8b_9YFky1bnf(3 z?Zw$BzV)e_@oc)9L|c7}G8e4-@)_El-^6F324FOor(IXocg(8#%21EEMq(UY%T5~y zJKu@51~y>&e3)D^NbY`$cC5HKKGoLbY}tzd|0$Fm%)1FUN>^q_!g;^|Y9BvKG3KRe zk$eP=92Zt*%Df)%fD^H@y#?oA{fNOmaI|tiC3;H~fD@(R1;|MT=iLh-d}-lw1lz*Y zz81KSJ%pvFErW?x_FzwsjVOFXxcLwjfQuA?=`0Q0P9tsfiB0Y3}V zP%;VKXI>ad9d`@Bn5ddXh>)z&;uoodv@j8ua}V9ds=i9?mz*tcfM7?;B=>;PLOXo} znb#D28tbq6DivjM?m2~k5vKADQEs1#k`~}*v;|4Z4Cl*k_wkMDFuFQgR;CJMlC&48 zedaL70q-|N91q@&PTuGdJ?RVwsUA^iQXEhD7MO)0&`<%~a}BUZ;6W`ju>sR77WFwF zzXFo%yBX;$t8IdbW$Oy(JeH-EJB#|Vkl;Q}BmhzdeZEG-YR*D25TLlQDZB~lf;Y$R zd}M*GOHdMvP_+6i(VA2!Xd@D?L;!}|Eye6fN;Sk#wT&;uFWX`3C3>q<%JiCqfom(w zkLFjY5Mu%)q$BvjM^Q>4?vjXv=OEq>;-PZQSu#4z3kALeVG(@=CxSh9i#V#fP_)ED zP8d}vyDuLp=ET61cZSbku!^Zk)rcQ$65*jfOZmUhqzd+=NHxdO`SSSE(v2#bHBlAK zN(yDJ-l*kzy{c$)Qv9`1b9ueZf@X%PeJ}p++xK^vT}<@AX!{-zafHX1_T5YE^CCqw zXa|8rzR(bwrgsOhj>oi|HF8CT_Rqebc#hWH{mY(&>-1%%{wM zYXZv->x(q1p7(d1(BnTNYEnL8)|eC9Y`r<5#Tcif5A=IcM})GHcarfUu`6S4f?#mF zmgr3NS(-$k1)4I~mao1_agwr0eO187UxqhDv6!LoONhaM5MTRjKcSUKzc)U6?FiQC zx5wv&;P+l!4JIhCsc5n|z*oRu5&82#c>E|n)qJ7ub})fRp&+7ltKl8%eq$|P@@;W# zf-C5{;Fb8BKLw9VcxAMjdHL5y(4m#@DHMC11hEbPkRW^`aHa(D#(&Z}b-h={7bFPz zH9zl|1FkeiHW=e{aqAG35_h`z1B#6I%*Xir+a}7TXoL_4GQW&c&3&cm5iAY|^4l(; z6S2R>Njmi0W5hhqR3P*^|$Tlh=-*LPwB= zN5_cOHjfu0H7+`&(KtlO^dNo-YBDdOl$@mH;>3tb_=>O z8!8>4(1ECNr-=%5OK=a#rCV5jx|c@1zj#%)GyEfzu@NrRhYb3_l^u6NJLE)78MQS5 ze(`r=F!%!~!*u=P%kas>O)ZIEu@m}S z9}6Pg9(CMSOdjV;FzDf6m39m$)9>`nlA6fx@QXj|XG(8}PzU{jNPJ%(fUiq-M!PZ_ zN+$BEcDQjmh;@@64Bq|t9jOh7KgK%gZ9Ste!uJ})kXsI5&h(u*cjn_W-5JM~fy{Gs(sLzV|{+w2Wa4`8>ZEiFXClobWp?sNFE4bPl`_AK4DEu=`d2_8gdwz zCP2K!%>o84!NgVuw|A47(*oFKn~oItzA+ zHzHpdGAa}Jr_i;lMPGux@YM77ifekv^tJ1^BZ|%D3)?YGM`irPoj&e%%n};HNaT44 zC)$fo)AQBy^b9{Eo^-pi7N$#?<98~D#Wl_010sg*W#&&tAXyA5^ZCQT!_<`}ZZ-Vm z-vl8yz4{6O$;ua|%VFpy1mcoLI^=fz7L5Y`q?Gs2SHGkS;m8s{g*;{^f(iV7I_r-P zCr5vuVs*So)_MFC@9Tr51-q2*5Zok!!&sWkLfkDUe2D;cWYAmC1Zf-HgrDy1cDB^x zt8D^)VFVb&HDF=7Nm>g(hiB`PbQN36TM+AkRT-?9Cg4*?5$)VF0g-L5H}&N=U0Jm) z(|P!a^Q9&ZmAl&GWDzRmk@hs_OD%T$dtRLVj&j>tO%(3wbhh+jVuVubJW=PjegZxR zoy3wKcp+-55z>vJ{_o?Z#e3B3X|s>bzNbF!laUd%;@pm0sdB44udq`tsDx820?u#% zscA0~c;Ax$i@i62tEyW2#}|T%QwLKEP4lRz6b>k&OeP4Zm?(;4YJ_r76anEJhf+BN zlz7DSz1@4WvND^eYlGTkf_j~@94l{m)5>bov7|JmBG>=>U3+ghAey)C`@Wyw|GgWY zv-V!&v!3;=XRYBxB3t9TU&D*miuEJt%hguzMAfL}$gns;l374Iv&qXs%UIq&is;J7~432F)BoP z=&w9HNy^H6l$Rd&ae7Fz6dk3Rfy)_aW?PDO(sWpg-ln+-7md-JY$UzlP$V%D~m#uuA_2SZ9ptpM`dnF@x%DFl*S{4-Rj77bV)_U27F#n=duTb#QGjE zkD@AppIT=}^I9jDsu7rLb~I)SkPpVuJ^CRY57uPbmZBFiaKuzzi}f#fV~$yAmtE?2 zcQGl;TvTAA3^EJCe*m12*~LNSmZBbro0)!)j+Z8354LO>UY(>)xhqxr?4(k4Xnm6A z^AqKnA9_TccdH7^MBHsw46j~pD85e`DeD1stSb5;A?%GXs~3Vt;nZHNDhSoHM-H}Z z6Z$dr_O`0)?vaVp80Q(>QgIUR>z}E~Fiv;<3OrM>mtLxJEcg8gj4VZ;;kU5LW?8vK z+0U}9>d)4>D#Jdnuqq3eJyc~|iXVd8c~wl#l1;?ho@x{YXyu;Z8dQU@w&FbMELg5R z7*r2pwYN-nDG`~Dt7?4pSDgM@LzRjrNnIszqIWCuL4;x`#0qqwTsVp5(?HYvqHB1S zYf^Q^5qvms^ZG1Dy3R}Z7Vg`^6<;lDqBlPUiW4`$Wm&XK%`Ft{nhuZHCWdL^6-tOt z(IxsEc3l1R5-8b3Ms*!?H`k+P1h=b{ONy7%Wq5T-@k+cHwNY-OOhz@5Dh-|Wpb23t zr7A=+P6t_zV$iIA0sV39&sJuvaA(JYuaPxj?_$&$bRP8zo-9M}r~k@7EB{lbIJdHv z&vJAG4|{l8O;7LEc#7=pXl;}VvWePH-cPP*jN&ZOaVvcSHtv8zT*O!O7DNI#yU4}T zX3MYzfp)8=yeF_SN?R`u2uKvcT=-m}Jot>=ReTgt1urlD0LE_B(1R01#c=pRks*Mk zIFT{$#^xTY-Crlm`EqoFEZc@xyM0)1)}nJW9Oni_+r8IfO80QBryGh7=U=}BdxrB` zqdRq2T==*)V@5q#4G#I(Rpn?D@`*ver7b;}p+ljPzZaAUs>>Lz-7R)FnsiY}m*QYO z#WhmNlj5@cCUmy<#*Jji+1hbQxGLqDACZyr;KHB6OV%mosB(S0tckax=JIPsySJ9c zpAFhI%s>ouF!~IeSOE4J;H{>v_nqOv_DNFRF5yzYyS##<-9r-63!%kp5*>||LT7`N zOgdEM4phRbw(jb}%H1@<3D1q`_eb5Yo>Jk~XOI#|J`H8)QuP!E0r$sLE!eA>URJ%` z{#eUwd<3~$V3%8K-36ljAcDg$w3BSLgVr(cLF;I%^Fd3;^-}ftFbfCEWUR}cN}}Kp z9d#FnYwKgM)*VYr$r@TJ-1We|xMN;_^Ko1|DS1w??oVs4$56PHA{b~ZxsqO7wrqw= zQO;;GwltBYqmYl#^iV_m+Bs`I9PEEImKn0o5~Qzeq-TQnoaHPw3(Rx;HXyb2~VuJ zfK(dj0P7MH3I_dTMs1mXc}VE$Xfa?_v`7bR2V8SswCDi11F#xyYUIU3L24BC$rCx) z{XV?YY7B)PVadrnEqQhtI-;}5D!97GA1Z3_t8d9*dU3Zi2YXmcwYxE`Sj(~aU|CmJ zB6hg@C4~9gad+vvj`M|kC5E{{4l6J7xyZ*_(4hrSrS>FEJnN!1q9>KtzP{2hKp5{^dH7i?LI?t{g$*f}Rd z4o|*t0rBS!DvcZBJ6NC*S=dH=3(h>@bR(?d z!ikvE&tVU^rRWw!h(+q>^f4;VX=7S!!@gjZ2qjIDG5<~vyzvuw?7?v#+%mr%9=Jyw zN!xD265!97g+d zb~hpVI4{)Q0UG+d>*oMNLtOa;k1=W$SS@yD8Fo!h21X8_u|gQKp{wu+#jHe7Xra^h|WqB&o&1)-M>F04Aa zFbu_{7wUD|hh0IHYOK=v=kBJ8D!Ua`GN6;GTdw+2YIuANRDtL&-LL8`K2Qo6zSb0> zo}y34d3SepDmQZBkJDs99LH(RhP=5dOB|KJ`ogC-obQI<+Mq9xn7e~XN<`*opx=%e za)Dmw?9E8aP3ZJv(_AHT+cG@fpZn+@S5~`9Z6~44QK~|%%Uyak#tMYO(}>EK8W20` zwsZvIRXPF`HpWTYdTLrATJ6G}I4S5-b;#JUwH1B+{iu3+ZT|^`jo{Kg9CtEN)pabc zvzpaBlpfv24#1ivlghHOibU04ZrNx3-3?RKUkxv+{`!MMnyUKiUt52ztiOJ${^Ot%(-^tg0BNSQ6QZp!r2Zct`h zt&KAMwH7o7YCh`Is#P=j1p(SQWd>@emDx)>rOaUMxH1Q5N0r%8+po+pZKpD$w6|dv z?u?cTqP3TmRh;&$GDm3Zl{rdVt4y5#s?70Pi83c^1pAtx^GLJ`^-3+S_NjV(jIT%3>l1w4qh6ok>sGuL?(Aji<@7?ahq{_XHdg2E zLJm9#FH>ANAfRz3PvV8ma%F=BN1-{&W}>!$HuWM|hCLBj2Wx~GugBsS*?5s*GgP-} zi=CZ@O|Wj`ABk3U&vz2P1p$$vE0qmypQa74G4!iE zTES4+;qrQ(=7zN|jhbVvMd5ZY%kw?~xN#3h1MZcu$LhirO_b9vFUtm>fV}T;xx-E? z)>_mq!h+(}2dY}%3){UxLPwOVQG}PH4+8awNcD1bZ+^<&sa*4mh-u_#56jjOsooAh ztU|;YM9oh*4!fYR+p+qFvO^~e%e;16*bep=cK;QPfD5Ae8VDfJ@eAA8Yq39(?t%m3 ze;>BH82{f9|JK6tM(*XRvow|qb)oU}GtMA;5Wyzl{*zos>0Q;uazPdr)`@7M04#`0Nr6egU{7r_(=mS2VB*9IK~dXtMS|LdN_r{W!J+QakM~GRYJ-*c{lWEcOw1*j>0W>^z$n_7Yp(v=*28UllIg1TaChJpG&9S@ zb3NQ^zy)PN&#QOhT?6>x!(7AG;CeXvrcC(l^>8X)4}ZVC9?pn64M}7I?0_YJBEU+( z{eY(dD4UKbn?JZ7j)}Qs8GiQwjsZ^7#w?pZvK|gQSc5q|{I@k27k+~4IkDNWcqQa4 z_KD2#PAOS>GIB7FWLrAPbpdA>sV$7^$f7u7&m=ZIc6Y)*CpNr!eS&PeB+%G6X`E8@ z00vXW-p3|dHwQecFC1kw-2I5T=nLw62ptor-QLD`+U5gleeFg zOh37NI(B9sY>5yh(~sJll}tajD_;6RO%{L53zo~LOWsqXZR&!K6S`lm^abygh!GRD zn8bQrIH82y<7f^2P;^u5KMIdOY4>p^N++-U@ebPYCSobuh99SW_y~KS5%wP11k4we z_!nM-?Z$ZXm~<@%wGeFu&W**%vsLc)=gWQVzKB^M=I?Mo7@~>B2yZEt61JV=$E&#p z0d59#0{9hb(T?@uORLnjtxETe#wjW8cE+J9?v{9Awm3?x#6b5=M2riE-r!I#vRvK6 zk{@XgM#r=lR&RG6%z?3`h`oow0xg-PI2yePT=T=0$K4k%2q)Y<@WOV^-Dxl}w_65_ zCNS=Z+-@nh=mD50(!-%-PP`TG#@tpbfpzK^4s}cP$LS(XjpNFo;K4a%=itYuAQ9R1 z!r^O`Se@^O_`I7fJ@<^&boX_)MY2dvxSx8)&yMrK7|KtxwF2$9>l|0^oJKL}gg$_` z-NGL})Rs~3yK8U&y0Yj=pVQ}&F+wVhKZs5+#wZBfQhtQZo{VN5&Ut(t0n57W!SN+Q z;2ke}_W~Oh9LRhIM<|4CAm9Mkz{@Sryr^p+lC7}Ig1H4DgnI*qI@mOyC~G#&Cra!`Ifef5dDIiADR7%)XWUEW zYO5}#;@M17aJr0huNP0>zQi5QrE!e{oL1-Z#9y7ZUq>uYl=^9K=gXZ~X9vyzg&NuC z&uN4eu9J|=OJ!tgEX~lcinYBq8MqaM6#_SVB3$h|FlF;I#sFOFR*jpMmrt+OA~&lb z(1CTpxS)drIvEX0OL%|l1`fKhCEUIP!?A(dSUqxlzj{7i0)b#4fV2OaiZ?_w7;UK^ zH<6s|w%gITSt2gQX%_DXHte=LvfLhrrGH*faA5x|jIQ`-j;v(heG{&I!EXFHJYQZ%)S7$*muJEuFr^CwfPW;6X34L}Cd|<+69E{!Z zn!HFu)xwGnu;S%mAcpG*$>!H$yL~sr6fEw9g?XKrrDzqY?9J0;;B@RYMPe|xrO3e{ z7`E=s#i ziiNF?1Ml4cQa}Sq6|X^frMpLoV$lZ3aS=a`_P}UPIjcgC*Msbx|Zr9#dSb2(-I|A7SvQ?L#)ifO&pHOg0CrQpv1x2Gm0$Aq?*Hx zI^7G&ak+G?7eTW%5XDm}9{2_R2D;w&$mUm_aY{#t53Vs%OL4GJJc+IM_a$nvF>f<+ zE%lGQ$bCS&aFR^vJuH}%k}$PkNxKf8Yyo$>rxaQ+UI};jDxdZb!yO&5*et$n_k6Gw zPxpwnZ^sT>PfbE;{HY|I8#qTT+Jd6uq}g&~#|t>^Bu-ZKMy%R%z-33sdEgQ5IAsqE zcbo)Oo2sJFOUWoQMOEZ>kcNtHF6N@n1d`6bCSk2Td3LA2u|}&9*U`d_S7aQW%N`3a zjcXGIbyw|W6%{-k3_KJ*+1JsdMm45=gkyjph2R$x-SnYfVo|}f1*u=`qooup{!dOe z&g}WRNRi7kb}Y!3`4z7=LQ_O@)<$2eS$Dp*@|1cw_KR@+AqLnL19zJ0~@$V0>t`wz8P!1r+6Q;{Ay zLLo3hAuvKAFhU_PLLo3hN1*x&2vny-F_|9uzN1Szv{ucKs-<{4IMcHzaOeleojgpi z(Fi(J8YPOew7IB`YqI=U0ihwL+<`o+VItmA#;owp+GC(AHL}!sIx|q?mYxg8QTZO6 zGZ*SX#W`5C_AE<5zqU-Rmn7!|Y2BgKSDj3Qb6$jw92L#;qB6xf?s?qch2}m;`vG1& zgWP$!AmQlupe%&RGPp#q9OqYc^0CJ-3!N>Sn}qDuEmAxaH0XoSD@4Q&tRLO$_S4I% zEWO1ZgJTfbvZc(j`QWhgaS@2p$|$L4yne-XdikCui{XT)^cLHP!7J7cI{ebH0PlWP zD0iSfxJ%43sJ5>(XyY!tqznpBeyR*mHrS0kK^>#H`W1}pP(b6+%bQMxBrn6}zEIC| zKl@|RTU*zN>X1F!!Zda6r=Ke+aGWbCV4T(#5~+KJ zDH@jmpR^4KixFBtAkQ%6>7~*hQMe?a7nZd}+5>tm=#A4$T`gZN85Mv_A6gbJc@^D? z>-F#lsq~OK9g=nYg*Lp)VA`j|AA(ZCCNw@}kihgcd z-dMef(zhS~%Db`(wPgZq9LIAxJcpP!7kmRJ;~ixx|d|{6gJ{e0SocB~v3?@#Lz! zWEYR2c71=zahli?;AIcxQTf3JDvnM;@7nzm?7j(?!J@Eb6SPb2cG{8r8Z17+D+UDW z5C#(qeIc{IwheB;P6&R6W zQJz5NapHRpTiSb3O}RUhdxKmT>YTe}+;9xZa=Wi%AU3rQhAx}qXnuF7J@cvqdP_Ws z3ujnNKZRqj%yu*wB*#6f_D(8t^Vo8Aq*rNN7>}&tq2L@N4+j!EaAem~Zk_#=n$sHY zXt4Lfun!|Fvf)2_HM3EM=_;Os!_(J}&uO6@U?f0E-K4@z1F%~Lg)~=g<96vn8Eq-$ z4H>QhTe+T*mxgnUT!L+BKFX#sR|7>>S_=OP>+{1v+i8W%umCh17=|(;9Ag4fD7%ie zw73RcAw>%`D#zX4pa%7xS|B1RTzVYUt@L)B6{#}LNbNV^gE=YO745zo*D~W&B_yC! z#XD4huW(eE7e?tpI-=zlv52DlWMgkkp`F4V&eE}XctwyzuZRb_#lOSU)t!I#)Y6tJ zBQ*!Qxmp)&SDD(LcYVD>-EgQJ0=fTL{&i!Vx~%iU0j&DOf=}bDuw8M!K394bKV+5tL8QLRmFXW?ldE|dp%;qH`*rspar*xYWv`cUtCWc0sF1TKDj6L5J*}$F9S5 zlaJGNx>hpbH0~f#(ic_}5TJ8V{<$*X~1sqj%swsD!sEx6R!X z798+)jStVX+=Ww|egSWw-sq=K5B_P?oWRZs))~N~ zVn1b#qa_w-<5J&BU@@hFi^7bv3oONd!iPHA9XDoJ?p=W|oI3E{Lihu#Cu@I(5>Y?R zz*6)G3M4Z01?VsrCh+X3JF)Z)Jjqf9wcMc-wFDF~e4m0sqNgjidPL-|+(xcloUlEm z%_>7TO^Zi-sPPC*^(312@TI35BXHJC6HH}e#ys)z1TAmXy-4~g@NPQ{~a>wfAVyY~S z8$EhW)U0SGAbl*whY`J2un5~baehOltI||t^@0^nF>_YRAp<6AThN*yA$w<(#)9`IYESc9^Fv&EG$Cc${ip*wv|mv|6`bcp z`0KPY_^L(bQGAs&!PK<-uo7CnY{A4BJXTcr#S(XEbS+L8`hF$(b*yZQvlx}t+PE&Ahz|hB2 zc6KaTdJbr?4&pAR9c!_XBHlM-U(mk7&nz_R3RTx3+)^|WXM?!jFZ?XO1P57vZ*LRy zJ^8MuVtgeF6E&=&U5Lp&Oj>#$2|5Is*uifU({ppZ?DJm3h$nXBb~ zC5fj@N}PgQqOUhB5PHHXsK$aYG#@us8nyMyT&@*Pu8XP?Bmte<(SNOAg;%HlaT z59VAQ&9SAJ{0zeG1&eq24SQRZPpRktynG`A}?#s=IH+*QnDtc)Sz^r;m6 z+)?G|ta}3AZN3KH>JGl5-TQbt6$V&I+GFpcKCpr;WfQsl1*Bg1lh?BD*lW4O8gdw8 zNXwtMkAut|J8@1kwd;xN&x*_78=Qo*-&K%YYuN4R0lAzEkHjoRHWauy7@dN5ZJnJ} z?q=PcDv>I4*GdGOxuA^-A1J8MyjRZgs+*dnn4a8kUNUk!3h|*twkNmf{NfX|=Z3Ni z2fOWY?d&cvOpe@>+uT(?<$fGM>l3h}H>UuKg}cI_bX$l=HY}&L>u)0!{YZBn|JA4j zSG78NY5zp$L+vm$#?dx>qos5%t>UetIJ*80-bPx%S)TBvZS5^_;83F_d+{5PzXP{X zC9k*}NnxTt4p?{o66ElPCA4CYp(}4inp|*HG*rC=SndnQ3!2}XoxcQQIbVER zt`v6(`E^CItGH$AU9vy2iMvX$JWvw96*XGXl0m;E^y#7|U^VNk+V~*#VQ$%RxASQN- zLNaLG#(OM(8c-V%`7;qq1&@IP(7Qb75;zCsIO;r3$t+3SkEzj;rAKi%y`?Axk-0*7 z`#Wzn;7!b(^a^ah9f!PoIVLOpJycO#40fxsipTZhyW@`{AdYTTTB>838|rfKCv+K#Utv9l-<>I({d$ z=9AzBM0K~oKu+2lyAltUKpT8YOL!9wP%xL;cWPbtnCnP0*Cew4D;{;sMcI8$Unpw_=$ z@Ha0Fj(xay7)?bdurerz^V%_82wF3hI8#>^37&$2kIf=_{FtnQa?Vw0+1ymO$%75V z?{MhjaDUlZ9M)KAe=i8fZn@u3$BG&CMK!>xnuJ5IkNf9V10zGH!4vQ`e;q@WDR)nt z4R1eQu+)!PTzKU6=@q?!8A`=enQqpFzNz=2;vGlQeq!7xg02ddENky0GRzUG=_+($ z(8}AVsCgyi^cA?d_H^3^)Om_|jOvm21y$bgvt{`%;FFKMSnhcnZ*Q@gNXG4S?V|OT zUGF>NWIo(Mun*gZ^fpsJJw?`77i|4(mdZM@@%dT4z*+Y*8r+XV_%K6WeR3vtHDSn3X{RnA)lz*Gql@~Zedts zM+vUzjSqc>@{WsBz&UK~hFOoo07K1RPK3Yp{-%Y|bj z*5bB`Zrd^XX8-6W?bA%=mC8vU#x&w*Kl{fDTm^{345xAGQer{t?uZ6jdbj! z@uG2@1mlhW*#dL7vyXofjS}Vj+Xi+Mcc`u?&TSoq!X-m+0B^^9-G}R)dLOmc=5})^ z47P{F?!zlt8$k6S?9s_+x;d|mIm^uq3{%6{ z2OeK#$DZKCpP=$@QC{b|cXon3dw>|FO`4dD?rDp?_C&0-gpAx0^XhGztz2`}qxq>7 zP3=9gy0IHp_TiYfh_7>-M0~NZQKPRScH?aa-gZTt#9~C;A#o^VZ^TJ^Gt^Wma~oaw zhpT)lPhs;+4DZm?^Qtkw7JH~AcQnJd&xOyhr#<Hz2khyaWP+z!YB6anrBJOSVn&3x5+7KTQPgb~pqWpcFW@kX>*l^tE<&Q{m&u*|$+ zxi*`CnQXI9OwDwt_u(GzF`1d!GZ{A5k!8=AXS3RJbF*`;LbSplX#A)#$;rA6KSgrF z=;0opB5q7#Ov30|n{fk!Lk6^J9+PEF&B@8kn3-zN$j-9PPR+Bv$O1}GZ7LY zJYw&eXU%iu9HKrB~2jWglP8fv* zfMR-X_BXLfRJd5%oGH9OrpGc_}liGfnY zsrSt6c{v%Gwlr&C-RcSlz044?_lsr6x2p7t5ISz2!f{a|dBQ>+OfLM|$0pf)^aT(gDWM|nR4f=v{U|8+UVhvvX z!WphM-`P2~EY^d(Y|tUiwjg7shhKw6wnaIpH{cjjrnY7(I`>obaUey*&pfbQhJALf z4K)#5Ft@({%hSaOl$L5w1-h;DBKN44s?z7$<~wY8M21{G&JZvtKuEHA8K_WTI+i!t z-I3+UBLNqt{u`@2XdkR-*dX8AvQTQ-S@UdJcI1J66%8Boo0>bzL63Q6nFg!+lnxgbt_dj{O2Z}7=Q0>-DD&D{>M`k8ovG%PA``CWMyNPJh z$j>J}KE5%Z&|r~7pMln}q{&Tv#h~EeNl92cX0RM1%aG`pp|NrC!-kJY7ttCW|}R1*6fTqb2H~KP*)Rb8>$$Dh1;mqJbMh} z`~o!lIgT7zTi_RA>KBbAVSt&7ap*K4uBhyxXhxuaVc){h8q}&HcY>^IJ85DYeC)nIgYx z-~Dh+#J}>?_kVS{t@Y(6YTxxYFSKSD6BpcFSX6w^y~|5hxK@^~x^MNG|N7JYfBwr~ zA6Wa~Ll3WeGbcy}k9FZDr-# zckKN8uHAd~?t6Ftd+#4OcdUXb)+)YnpZ@l{ zGnL=}aQ57f=Py+K^z$#(zhcS$A8q(9->_X(!}q_P|Nrgu|1kZ3tK)Hbch&Lu-_HMt z5fSs|MNoSIy$zmT)hpxtvsp1%kF$?30$;r*2ZX_`^cjj!E!)!Bh9Z(q1ZDeYedPO>n zUm#w_DG(Rq0c0WEV#dMn49hSgIo0mSO@jmD%gs=4<7Yc^^9;L5wzMpp@i8958CoZ1 zXDQcYhhe7L8CD>ifPVph^dsO-z$6KaOl>FA>>9Ze3#nHalRNo90G`*`?KLq(-uNJo^!uUi+;K6?hz ztkJhvG3?7-%wbWwO864@JeysOnha;bQFay-Ewwr$(?!q?8En`QZctY>>{gqvnLrvt zO^m6iakDz|h*{6>c|ASoGRp?V2uI*bzXMH-;Tz*IP>cDDHwVq2qE0FJoi{HPGy}C5 zC9=?yzI*a5Z?k#5-9UcLK|3(4m%mGC)3aMosQPqlsMqqOMnkmh*`+744prnr zRW=A+KZ#4zxICFI^;@l97wX9iS_hRzA<9H_}qLS~_}V6F3P z&^0a=UAyOXwRX?58m1Uw7WnTld9GHl7R~*>Vq9`u@~(41FD)DKe&WlMHyzyAy36P6 zfXs}T#_?m`w29E7ES`ojCI^yv(~>jhVW%uv9 zU)lYO9(CQZykF6Q`CSqiEt~+p4~03=co%r;)8UgK-5=e)@pwPx5g*;w^{(~ztjBx( z_}2E1rgxP#&HC=2Vvd53SuURGxCTXwM*){}-ye?iCjg%ASk+pSZ`$`998IGp{(~Ow zTj2J7J@@+f4UMZCe!oX}#_Kntu6zCP&RehS-XC)_e9ZiN;zxV$+oNe*);)GsUH6q9 z_-&aJE#3m~xm^71GV8k6&);M7ba%#)iTBIJ)7qF@x}1B`omWohS!Z4M`gC}G`IX%t zdaAB_{d`P&sjhqda@w}3u6up?QM^TW7kJ7y*V4f=o;9yWix&ZWE=PyvZ(iAb>swe> z`mKsW{q(PdaS-y3^PUWDrR_b zy3$mLmw7=K8tcGBV3w&y6+ih*O3knj%g#+kM&$0UKNUS|KGad~+ zI@Oi9rm|;@W%i>-2u1_V;U>GR6 zF&~yg^2A^o!tAfsQuK(=gsj<$7HRQ|W@6}viDICSJ|<|(oL=79J0UO5h8fmbw%jC; ziiNz}XMAQJlGSIp|IRNR8L@}&h)#Fe#V>{OnMMhmPv@kUEiEAnLj=z5V6?%sJ<8?p z9y~Z+O_U|1Vg0XXXpGa$(A^tP%FVE=auA0w;?1;yBVmJy;ml<8(2z+H?mfzuy5JIP z@f~LRfl+<$;tQl@%r@5#6qA*foRg6in~iB9k*hFGBFV*UzQ@ES2{DMJF(y8d@6ACu z&>QdQ?59a(Xf7}9b4;vAbur#ruDmO0#^Btd-b z3>~-=H9jv!9An5!9YpG7LQE2^8=FXt&#u8#eB5w6;P9vQx}9XpRU=wVX)C0`ntviS z$7jr(YviJwanzMiV2{PB1}w(J=R6M2WAao!@g|Nc8YXAl37YuO*C;jhu2&QBpo+~9 zU?{H_JibxE5hlocRBU2uj!p~81kZ0^o~}|>lpBcrYj{46)56OoD$_mc7+a%qy+ zuuMnZY&s5gq^Dzyfz>lQR~xb?Mr`!JYmCF5Hw#QVYbJ&9cBlZQ7~_S=>xMP%;QI+#f>oEhS?eB zWSCZ%R+wF21|Er4I}Vl~#+|hIUIdfMx7jf7ggF^zHO%oaiF6!HK3?j#)-*n^+TILb zKV1E9)1PTxt-I;(f5RlM*8y0*1nLfG`T+a^jRCCyn2%Rwj+u7R5t&z+UcCONV!!<4 zmX5*YCs+QbF#PXN=dvuos*C5cEMQg6|9|=Uf1DLIdSioYBOl)gD5KwqAu8iw0=y$G3lM6)7Lj`MCe3{*N5{^yE)M^joH5mibRzHxz65^tvR( zzsKU3Z%mzt{^&RHtdA=X={BF)pWy7@6VW2)vuN=Qu9`cE^y!}_%i5@%;<1i;7@A(5 z5t<&%2w}3F^7P{2q+aPx_|Nkn3jAN8z;CS)YEHXf5c94|T9Vf&pA#ABU@^?nXy z+5=zUJZZpcz)HY!KoP(RSOmxc%mz#YBmqVMq5xrl0f1mYFF+t50AK}l1o#8m0{j3i z0X~4L&mn678i4*!!aN2z2-pdD8?X`Z1mH2i!+_O*C4d}2GYlTQg#J)gaS}TXvh0EO zzYY1uL)phT7}ku7FGgM+NpXKH{7K})ihYIV zIGRX!zH^>}z~@G|MFMUEu;2ezz$U;^z*#_RWGooKGbHB$%rFsfn*vw}_zU1Qz!AU? z06+Na3rGZH0>rJ2IRv`oL6PC{R^z`w43DnC7f9*k7;rgE80yjeHB~fz4ECh|>oCc; z2Vv4(gUN9tJUN+|KTM8gfV&fhdh~d_ux|--DGk^Y)jJxnXW&J2G#)b^elEX^{R5Y= z=Sjcj;~CHEm$Bb-8T(_GvDX@~XWgiRy>EXnp@Dd(^n^v_051^;d-L0WpqF?C14i?^ z43|R922suLz&N;ful+ulfp4_&ru(8h;U96E-zS&h8<{n~N922n+dR?>gqsx*4e$~M zQh0CZ;XZ@m!-orO^A{Nz8N%Uk2q!%{o%nZ(2OfApJpJ_3;*B@n5M^a$;-il~5*IIC zB)=+O$}>JH`PEX~x8K=PSavU5ctCv~kY}o$EvpwUT(X27&dB}M2ju<>aNdn?UwoWF zSbQ&B$Zzccc7>{MRtMpWpEI&LUAZq@Qt5jdf#ojad*K1!Duz!_NBn&9>3{!$^z;Kr zUwy0i)6+9y4?p>gzZ&j*;79pqcs`jI@Li4c5A0IkNQCj9VEXF0fb_8y_y8wEX-E93 z5BzijU+3vT;eU7aiv895&od5wUmQ0r0UoL|5kI~gzrDBZO%LxsfGnhwKEB6ZENDXl z;Hl#Guf_dakJ^6#KDgBq@jv@uzcAf{`mTEKz!P0x*suK39pO&LM9C@yS2_vc|Ne&Y z`>U!CILqh``p91=Y^$nP&h!uZRyds~PC%v6RoI-T=0B^i|0TgRo#-uy_d4^*=4M9HarBiqTya7!C7C>i7KKBDy0l&rP z0|pGhdc;KB9+oWbyz@?lo5|y=SFaY&J@=e=?X}kwkAL{#hvLgGzpUZ&9IAgdLWXYK z(v+NjSI%8>bNV)szwOMn^tst0A4QbD4aY8QbEkk4=H`F*W6EZ{<+9zLeiyxUf+uF1`zU zVLNx0eoZ?|;cWo&4$?IYrXTtzSadp(M$w8+5JXpax_9p`f`WpuPb63j9z0mY#Kee% zgak#qv17*y?ApiC3RA@N>C?rsv|(b_tXX2toH=6Nym=xwH&@AzrAwEJ^^PIp`8x-T zSC>VK!ddZR<=j|tKV|_R%Eda0f(Y@VGeJC9Fhgv4AV=)F?^fJ>GFJq?BSo+6QuNy; z#gLs+gzl3f>U}Auy(dNNQ7LZySc;t;|gL-fc-#(x%lupOv`hg%DIte=qo94q9h`9glPM###Sg*<<#2LHZo5xy(J2P6C~2%m`X zh^sOK;TN|Saz#HO*NqkOZ}WvLUxPSb7V^tOwc*kENb*7WCI}BD?nd;#4?zA@q7x&n z*9v({KOv`$74pvcLjHA)kego?^5~)3@ZuWmDZ3v1oi1Xv2tu_TC=P)0D@B5kt#1>u zUyhJt?-6qT!$PikMM#{~BoCeN2yaFBfe1ee;b$QHB7|Rs@J}H8s|f!N!tX|Sq`Bre z!hebIl{MjeiN4sIog^|4!@Y>%NyM-dF?@y?DqBnOLq93bjg{j3d?_xhk>aP9rTFEL ztONhf85$F( ztVZ1&*gc?Y*B--bJj4tgnUD|{H!^m}keIlX-hn-Pb`KanyjzOlAua*V_)ow~TwF@f zP4qB)xYckUnHU>CatNZ2kBu2JG$y4NJ#_EhwTt1NkQf~^GA1@I7U79mpI$*hw-`9m z%gDr$<}>UD|Larwzyo6l=-S2F4M-+1{*faShKx+<)TX^Z+~Fb6@X#X#;fKU9g4mHM zH?(QfUUyeG26VLo$%LBtQ`)s{)8^K}DuN)D%Pw#S{&5T+GZOA?+jP3sz-`pcL;>Mb zhQuU}9yv08WJ(7{(BA+0>#u9svSr6n#F6e?K%akpxmmxhb&sDCD2<$EqYus-;Bv0|kD8ek%Tck%=I2 zV*JQNl?jETNq^v<64x)9D2z-@OzALeSerI&kck@ilz~lBqWi`piP3Psao8}G%Z@ef zv6jY`uxouht`gAyG^mIRuPU>mD;GG&rQ0x9_zHaEu)aULG(Y zHn!G1C1q&Ln8<6IHXeu+VpAA=3Z)NHaL-NBV%lqFc1S zW=u+K;*d!2T8%T@&1ZyP>u?;J0Iwd-q<@_90sBEAv5}}Mp3W+Ml<&wmFk(V{Xhc0{ zkN8p02{93ML!*AweG=7WJ7&9rx;SNKX>JR zirJqVH*TEjk1VqdujzX{z33LPJR@E_kQ*yryE|TNd~lfos``U(zWJtLU*UtYr^Q3)Z)`+gVdu`B z;{EsESABz1pPvw)fBv~Rb?TIGyWQgS>C@te@6V{d!H+-wD1QC*SFz@T6fdK%aOf8g zIor1j8oEJf=uj_L&qPDF7!BQhXy~32t>tT?pL|D*m3zc|c|@#{$HmL?OSIp$?TqM) z1~JJW;nCQx?uGCJ5k4B>Mns(+t{S=VzKmC;dC;KTK zg#L8s&;gZ5i1td0*ulSh-@bis#&-v6hYtQ7y4>8^@0x3FM1Qz_*TC-Gdv@>J+Rt)R zD|~n8(yeDt_-*aixo=S4Aknew&4GP8HSgOR9c);;x(D`c(LC@*!@_C}yt%Kh zZ_7Z0x#`+#T6OTh{^q_de0`fYYuvcyO%_WBpC%n{?$pA!IW7A7b#B+xt7%vG?c29g zv&M}7I{!v(@YSMQ^JdM0Fb?R_{RZ!ja7B<#_(u9c9ozQo7Sy+IP*32G^luId3J417 z5~T9i5swv5moDm4$S+iys0jZ@)pcJ~$0$Y#u zWg#ZT@%e9gNS{^!jBc*#A#MEtWPj_Uk3KpH?ao_g&Ybz`yYIgH8g0+#-+ue;moQJ9 zJ9q93#-*o@A3y%`?%lgxmSw&8;k;`6+k@T*PzMJvjzNP4^&o!0OX0t7-#!T)sRVxV_19llcteLIzxwJcdGO#t z37vxS$1vz4(Eb#{Jo@s>FE8D)Wy?}@`{D}=3&-T-k;o|Wj!tm)RRTPSF_>YH!Ak+ z!Zd$RJz+f{zfAg6%Hh~N82bU}&peVetN0{%ZhQ9ldl)>*PwyL~Z{Glf$ zFRu&<3F!)6C}KWP4k|xpK4_Ab-#MmIwKYykX{r zb)0-gzGvMqP1XxO$)8C%8hIEA{vQT=Ks?TJ5R z@fzs(Ls1T^SkEY18p;Rx-Lp(gzM@I`)1B=C!&1(8J`+!&jxrDblrjN%h({ha;;iJW zcrep1AG}Ov+W+*^PZI4+9bg_zF!Rw+nV5O;%*QXk{2~v%u}(g@WTbp>{t)?>>}ZAJ zZC~L|x-X<0ds50o=HVYkeFo1&ne~}63H6yWsR!x->vM1L6YKOv%(FF+2ZcY%dnI%i zT_M+tm>0@Jnr0qolJCja_48r!7kQn$bYlN&^67<9a)V>Ad?fD{l?P^EdIgRYHu8{k zLdsFVX$106Q=gd!qdxbcOoB}94w=*idFcGQE?W^zLA^XE{JVDTS|k6_#*a~kq688UwS=Tb5cW_@NJ z%=%23)NeQFzC+5JmpF|`Lb^PFWng_pIwAJEEp_* zto%lPgzC&Zq#zHNW0R9S>N8~$>vIh10%cMdWD?tFQzrG?hLh5D{Oil5_QW6k`<0=g zp+_!>lh(|Okt^p8mCG|?W#Oz?nV%LX zU5?x2rym`}t=)C|7^*%)Ch=(qf5>~q|G;MzaVK6hi4*Z=eIT99GNFw3>>sl~#5|J! zcPzg}ZbcqmM;=~Viaek$us$;ntk0B5Yav&ehtWkuK!n9^~O}{13h^VqWgK=N|dO3ooern0cUixqL8B^UVi z$dU{_4@6Y;G0@*01y~1pby3Q9>+wE6+q2Bk{;t6~hLKrWS^J19>m%#HvSrIu9z6N0 zeqQRA3BM^5SQmcl`Do`$HGK@$=Tf6S*9&*W{txkI*^d}8g5@BeefC+^_hO$0`T@CM z!GfARTqz${Dih*Ene-ZE(r?vg`7c*-HGf5v!Y!vfVld-gHb z8ufWK>H?pJkTva~tFWN2KOWx-|K7cO55&@*EF5>{Mn77W!{d)XF2{@+!`q)_X=$m% zoRYl%{`(a^9N&}P%mdp3wiDzl0>?1S1LZ0CkZmDpw9^$Q4?j)(&&z#JEU4{cpf0eF z!TL;@^x&dIqW_!YFX;YCFcn%r{sFJA!MHyZ>tJ3$9hPvDI9peuPDp;=dh4yK--Goq zN`4Y|;>t2&9!#Je=}q3_IEQ^ZmIdnr>$IxRC-*z;6o;@uVjXY zhu@DiJxUM4w2itfu||*$6)*X{L)ytm5P^`2i6PfeTWN=7j#bpvCi;5 zjv;;z{^WZCd5QXF=83pb=Zv+xs{Y2t#>(jEXo=M+svPLfFlHWTntWxJiCIrrKZv_& zl8-oMAYZW`$g(r_8^E8_Z7l&;^C{Qdo-QSOgJ z@1y}~&iQa&=V~6+e=vEF{U4LA#OK6`6AB-ctwi5~tBO^9KzU2>o8kesQKU2d6Zp+M zP>zzOl>e{1@``-vrI%D2__x3PO@*V50CdtI+oq$9Tmsy;!|q-n&3TWjoxf>MeA)k@ z{6~FU#c$R(n#6_WPPqU+RecG}lPKAWx~|$c${O-Fd5-wA9;&_ zoS`h@L;m5Ih(O$#uSXw!lsd-8ux;pH#hrOn_?!KI(7T9vFy*3|7c&nuNpH#~wB2f) zK%Qb->q%eY%5UP&vN8J*oYx`Wlb1+y)*;A@2Y|=#!Mz55PyVNj@GOsp%7po%j(ys+ zX%g#l6@QyF_cZB`yhL87J=*}{ZqAQjjI{0#;I8mz8s_*HZO7>0QxzT~ZKke$qni97nIPyJ4Q*RihsRr(*)FE#!}`(4Dm zkp48ye9$!WL6bD4d|_Q6uxx0@a-i&FJ!P3u7I7|tb5P6+>5ephkDRH+xbB<6-{gPv z6?%|Pge#Q^`}gesFkhUPBapXf?%K6W$z9Tb^_Bci{8J^BWNfqwyRuY@vpQ^#P7GBYvF-w5dJ%X zX>jcW*J=FWM>^5CY+JanS~Qgxhb&xp68m#1brWyu``>4J8}WG=CU(CdJv@KVG~+Yl z_rTl9=$I$ZXu$7Wr{#m7 z?B5c|&zJ2hqNED|(1OeJ0MCctoKdN-4@7hgK1|CWW5Hmw`6F#M+gSAXr2D>__7B9sFPJ=>!?R0cloIeo{tqyhI!b<{XC6=f4QKdKNPG2;}CQjF@w#oNwej z7w1=~6X*QU2j`?bh`e9LTyJN}Y3LPFp?jK09N2f_*puxeD92MtxIPcFnHa?fjJnTDj41h6c7RC#6*#5Fz<2aY;vL8*F82MLy zCp|{}m-A(}L3hjfKF*cBR_W>5;5#R=Jo!PT&+#Dp2n6ClpnNBgCTzdH{qke^bm2rb zFF}1cpBdj`zUv!(-ZU9FaNd_|AgJ5toUh4`obTm4V-(B}>hfe~wka6DrlOxak$9N6 zu>9EvCN6A$-da0P>9jfLOr5-$K5@7mIB?#R^XG~mfdl8-xQ>MU$T=#`b#k6?3hD@+ zJw};yW;+hOOBVLu&0*V2FmYg?gEV2^`lV&DYOas-Gt?vUVcG=Fy>kATbAF!u$h9n- zqv2d1=a|TkL0eVO^Je<&2buGWY**P&Gy88QE(}4vGIfC)pwHx-FZR;O0@N{29LSFo z>+vJj@2fh&wKU|%dh>LhnLg%IQlVFv=!pl@CZE&f!}&JOk#N3{^HiJvn)91;hTr^* zn`}>D&J#FrJrC;y=WO|y`DecZ_g-Y>=jYF1ACzF?LA?oS!H4>F&NXq4gL7q^XX5+@ z=gBz7#JLU5k#T-$$xI#oa(9L#4vHT!?`qC{Gd|DsDFX=X+YmhQATBKb@_UC!&Ud{6 z9H`TzZk%&03C@pjuF#1&Dw76G%u_~l?iNoC|Fdpk zZ*Lal{~Xpu${mgiIJP5AD0@hcbJ{=TzCR_Yc>?OE$&Z}Fr*7J;6P#z_Tp#Dj@(moA zSkei79)B9rK7N5VH8Q9(`G;QTfD z@u6i?n0$lnKV&QW+nt#{<{~~DGK9Ulg=%gZXDY}QD^{rS9dV*uWIZ4rqyfu^v^lGN zEZ<*0SH4jgqt*hL{P_6dVe*wf=ScS{bWUn{tqvJ(x#gA+=zAA4ADG9?o-t#_ZtUNd zbLY-gdSI-PRJsdu>_eK99;83VcjN`qgZS5f7$^K425)(`wRO_x2j;~lpx&!-AlrJ* z%W!T9{Va)f-zwkc_=|WDNDtyszifyf+bzltj!O~!Kmhx9S9&^g42u58H=Il4yae`{ zO6p&U!~FU472PQh*bXvolLwd<<$_sP(N0ujUS<|}IS+j#o?XT@Pro&$>P%XJ_N#Gu z;x5kdP)Cmq|B_|F`8Bl5DjoI*NOz`BV4sYAG}A;|^)u*s7vg#p^A+DBZ4L!~>rwp2 zI>@-#waJJ@lnV?;Tu2MbZOTmIxoz9F`;hQ8zm+)LRsF~O zZ7TNBO=Q}PljTeta87{4xdL+Z=+O!{;y|Du)&cq>->{!VdBJsw(1ou@yc{cB&7bc%Txbi+A4jf16iZwe?xefePHZ4kl632 zbj9o!Qs+c|`~zvL^qF6?{Ut9kzl@VWna4WEy3Kr&$GConYcH@bK*@*SnQx}er!(_k zpYERYXBiMEdnx-^&Nvf-{P!*NVI_!z`jg-LaIcBq?*!Jb-;!^a#jCnA_UwAq?APt= z1T6qw0C5#Se`c7@2tpVFI}`HpPLR$9&R}s~vTxTaF85%q7UOR40YAn*`skytVUFfJ zWYl+97xG_EJ@r%_xq~@?Gl(Pmx#yl!_VDAgVZ(+5=(FE>@x>Qad75n)%Na8LL(jxG zpN93%)U*DAI8K89&aobHY)pQA31g6C^y|2;aR|m%4`CdLXNwsJ{gS8Ihco*#W}Tue zOToB_VNN( z4E7LkW*i)gbL>T31jl_GcZOlyNgXx&RK%Ndix0;z97DbO=lPOxcs_23Sn71BN8p%^ z^kX@(4Prlt&s+DztNs?p9~^sd?8Ck;#~Uv^Tp}3<$KfM@2g?s<&!~BB$}rO5TWbV2+WD%} z*1X}SfAOp}ADn2!FRMCft+C+~U+~k!@M%@2tTh!;sd;(0gEDimb$9L=S1T?6%t*&2_Y-kd0iD;CGU2)%Gg5ma0r=)`x$X<$YK?b6;D*I4R8adPhplVug1~SIbL1u z2t-EVp1VwICVvNaP0dTlT97^0mfO|p$cUNAYY+!_O;63tvvs{Cs&7r)bpzML@8RQ; z)T8>=!8NLHO{Nh%s;^1UTasduV&dWwMh~An*?9TS_rH%G?0xWTxJJRg!It2T!QFxb zg9itX4!$G!(co>thl76!Zrra^zpwhm_Mg_ju>acrPxgPS|DOI|_OI^WCS+hpWXSZ8 z+>rZ1UI=+BWN*l+kkcWJ1~ePcWNjZApb3NS9<*-IhCwe3dUenTg9d~}g^dZj zEi5H0J8W^7Gt3osf7o+jo5Bu+eG>LX*x4}O@Dbq?!t=w6!tV>eKm3*OnGrb=-$ zK8>h|SP*%4fe_wvRz)6w`8xpM}@0VDoSxRT|KUn)$^)Q zolsw^J~c`IRLAQjI$sYk6Ad!yCd<^BQ>NWqG}p{c^QGxE1MFb?Lp#!ru@mgWR$AZ2 z*~jeTcAibLPueuQ(&pPD`ztaT$d=lO5t%C*4`?=aeO;19YgKt)I|IdXZkLSLil< zNq6dJ%?49oUNeWy1=DB3tmE_G^Gpc@8enOMd|z=MbftR;pTu9`fh3%aArlEAY2<10 z9C?`(lPXX;g!=Re`X2q9Mzcb8ltp{kd(>Ott@PG;o4nnS*pIyPp!TZQ<@I>ucsx(x zxjar}iW6d>oF|K6KS!yD)H0Qjt z_p|)%kkmc?8~#E6h=0=mv)=~0amByw_XPDP1_Ik)382AfILs|W)6s0S2%Sbxl3$TM zq>+3}9-+g%2fRt11$|XsH|QD!^OVZh@_fFN|B)Z%CwLQY14snPCp$?d%=~aRm%YH=Vp06JdS)|hWWBWR>?jY zref8jYOShJpXhEKFb|q%%~|t(JJiP5Id%)A^(FrmzYIL73;K`+KcHZa13<6oXbvhx zmFO+>K4f!?yVKp{mbn#fl{*?gfIU1FC*h^QtLJbv?!yrzjw~e4kRnn_4w2*J9def3 zAl)QDqv#}f^_W3d(G9eYo~9qs9y*wXv$1SE(`+i6!{)PPEQ76Oo7l_jRaVNX*i9A& zPHzVi?G~rR*Fwvwa<$wb%VfG*rRvm86{^Ga2py@T^ms_=O1(qx(#5(&m+5j{sjGF3 z{#J*WP!nc`n$?imV)KS6H_^K<>z z{l5qOSP$pv!ZHMs5aYBvy-t^VKhDIxq@C_#e`8&2GLZI~H-^swmLaiFWQlcRzigBT z^h~qdYzKM_vB<(PaDfGIom?l&y@R94EE3D&fyZmAleeitkdX>3u?)ZTcICcoGI~a|Io=HH9(Mq%hy^1Q( z2X3D`5|72vI2I#JpjkHIQd|z*QjKeHEw0B6xDnq+hLMpZl0=a_U`Qo7MruhtX&@KL z52&GWbOxP86X`sdWU*!COgP#SqF>oehifO4UZK}JSAR)Z2U!Rl*i=_xj-#aRqD8EQ@sjl zrl$hWr`fqS*{0h(TX2`nCu{?dx!vBe#lgL8fHPcR8G;VEH9*#Xxg&5Q_@qfZX{Cj1 zHyiGa@z!|zy#IJj{1WfvH|};+n7AT?@DBblD8)&2(ty?(&Klr#s{=Ry2wZ5s;BGN@_Vprb2hEhwf^S9Wnq-HASVU zRJ9(Ow^B8!PDp%=o}ttAR!De_ZiWuNrH2~0AAG0o3@#`Z5u{NZnuX?}6qJTCP&Udz zo6$B@go;ra%yJE?M~$c%wW4;Eg|qP*oP*cn&3G%`20Kx7Hy`J{7S2KI0?@h!be03b zsznXxZ2-kB;=Cv^W&fL{8dGcPO@nC!dNrFC(`wE`Z+4i=rqgtpTjsX;3fePZLu{xG z1Lh2a4vn-?c0AB07Utr+QaV%wmRpmdYp)MAq;3g)=ax|H;u)du-g@2jXOxx} zhx0`nmpyq&$+?{cy?@UvyriI%cmKsx3QkwgSiv0i{6>LaJ!cjS=2>#?>VjL;`$c*9 z8TsdB>iLcD)ZZXw9;pY@YdxOp?j7Vg^SG`Vy)e5yV?9F(275eh#60H4hAViMsc_UQ z4+&GMUw-)4`}DMt3KGks1*AlqdNbwo0_hgg`;ff(7zuh7ed_T{l)!Ya=dtNLscjBT z>HGI5ucw46LR&qaQGJpBi@+w2=TAq(g2!`4^7@5Iin-t=exyNd{0T7(_!shc=A6-R z-8IQ;Jf5dcCxJ3OXYjX@zXAWEwE7Ge%+vLCNua^U@c3B5{r{rWa>hItQ1y|%dWuO8 z@Eq_jD&;lIZ>T3u;RM`xzQM~;Uj=vl&9?&hg7g(!sXkoyCAi6?{{R05U?5@rGoGGN zR+pYqUYDL0nsK?&w9{bx4a>i07muoyM!Gp%NlcX!*{HQVNZy3CcDdj;o|#e}%S?8R zOd?!qq+{Wb{oS+4lPR7154cINyiLm4Mo}A%_Iiv+XX-Q~@^tNIAg zGCQMrJl4yRRixYofc8MUJ@=5~7*-;@FqWw;sM=zG7$l>-+bf6XLz$%q0E$_5yjr>fazL{=q_lR*qo z-Eh+Ef8Xfw=oSC;y`8-e1V0aFC}d#l=S%XH>zTiMJiGTt4jR_(zS?VhYfo4mRBl)s z?5|NRRsN;G>(Q4n7*ess-$=#Q%XIj0*csJBCE8Eg?OPAhZE1&LZPl03Q;$8HmQ=h& z9^c!x419%PTe?W#w5FW&Pcz<>?r&ANQQhzD_p8#$r-uo8sq32ly8gS}_EFbGnb{{g zb&-EIjMSd!xVjdQZPf zzfzxl^;I3~52rD}-hjfK;NM=Lf98oE=(bWg(~Cl|39HSpx)Rnp!+H&>9B*D14nf6U zaB6L9X-kzj!I#Nr_6*WubUhiK<&xQS$Sis_b(uTIx2#DWWtBFRc|9#_l7Xr%PVie~ zSt`W?HQuFpnauNuqA68ds@6Ecxxgec&EXw@>C2V~Fmz5tD^Sa2xnY>qOm>1_1C|*u zyy*p%D!{2CHQ{mvRcpZKSz8#N7xp_&(|zIHqvkLmG|jt#lAPd5mEnPK8*d=XeppB~6Fj(C6>tFsEc=WMPVnb> zz^**td>8Oj7jU!yeyRW)@_?K2fET)eIT!HL&!zm*UlF-nkl5KA{sRwL!JOdbWR=mM zo`91WMut|_f&w^R)L`(cLHTvzN)JEdJ&+>9syJ0h3UY$)ELVO1H5DbSPWsYHT`}0k z9K)I!o({%i>CsON^#b}^0fo9}Il=w;;B<2>y-fAK8JP#va5%wx++ydu?LV6`t^4vx zCzG^(eJ@Sk94-^)SDt4_x=_U&ng(AU`XLXO#mn021V@weFJr6AlZDTOa)TDHGt9OE z?M0Yfmwg-LQPmyG+~qYQt5bs$nbXe%mAT^+nH#)0O?$D+eyGUfiL6eR)cJPli)%g6 z39D0fB6H3fw}}y5-BzhV^o8tX`cA7bYw9>e)O_(X|&7;X-?s z0#1#LgMdOB`@%*UlFz|a!rCE7Ut?`ZME*_6)1*i@&ZDBZxw5eON>6HxUOg(Tnz2n% zX-0@t5E`!YePB4N`jQ7TY?9e4tqf9kN!WhVuP{)W2iL354yWFyu*iR?ZCgIWroA!{ zZVNum$Gt#Y++5>SwZ$!l_dwiw?c=8n>!7iBbG%|>!fGe_lH z%nk%(r6~m!)VzUOWZc9EF5)Imq_0nu1-MFm5EI0zkp!9VZku0jfpff&>%Tv zPh_i`d*R_N z?<|wytay%eku2!mOT;j>oZ!Eobt4NAi8iU+9Ru>(&%wY%Sh^T6;+G-ac(RX-lovPr zU&l(=h^(D|x>|^ooMTv@8`kTF<&)|Pq}+}G@>j!014I^=M<{UNTPg#kj9?Wy+OUQg zEG`Ak=H8syfjRU1#jVPmbmJ%{qV;;RoSt4y*uHj*T0a8P@QWlOb3C2Q1RL050yBk& zeHR_0s&DarE~)mrqa|(h2n8~!oJVfxS9QRgW61dVag_ZTx#^___pMr^XQmf8 z!FP$5KJ|%bz5Eb4Fb3-$zz-47YONjC8!>C2!lykMd@eC3UGzB%fYuDPcsl(Pt~LMCO^it587i?yt*i;FTiEgVgk{TnwPEf; zWJH{WM>Uk#d&?-NrYKdA`?o~QHZ0BFC2`g^d%HZ-MGmxHnj1;Eueu4mz9O%kye1-_ zr2L79oeU+cPlOo88}75RuR_=%6^C*yj7Fi5W2W?dD9e69iC;ml8}+e7JxSD(+LnJM zi(SN=GQ#8GHFZ*_|BQVuK6JQMz%C;}XTFt?fu|Gv&0io%!TvmvU>1t#qF6tY=>&`ynQKHQSu=@${74 zakJj`*k_)kV5;eZ+ZN{7YQ3B}y+v!ehcUOGRHHPd%&?ZU2@Rx~Nv)7Fp=mI;(tZCS z7}=aW#%dIyrNuw*BN$G=ZvUEquv+;q-$|%uN&ukqa~1DoO#lgtXopCxr~O$bZfb;mbUE#^wmHEq&rna7 z{XD}6G7f8h2nj2SB!=8&*HNEJsK=C zpO&0fPb$s*-u^o6>GZIB^4Jx9LBm>|JIX#2DEpYc$X<@6c9(P> zaesZTXu^tQso}IQ_BOYrKAEs0#cr&hYVsIpWSxH-ah#gTJFRgT=Z?Gw)xjBXPyHC@oE*`JZr>48M3 zK6gRmd7ji*`yCahnwtp24;~Wolap+ zvxGU#F|0-58x)6{4NBd4%@(f>MTZaxv@p(|Pi-z*o|h{7ag#2$f52AN>d1xlrH!f? zY_8YXcfqb?Ix|3pw5G^DixMp1FO^IPp~cXpy($7^a&u>8)%JvyO;~%3H2d2+YhRs= z(uYR+fv{Wb+C-!yc}gO(E*TasUt_;5$^a?I9ku>8*`Fbb2q`@rZj}~MXCL#u>Pn=g z7s5*YP609-FK{4y%aBh&kO!SB``Y_L6k!=1p$$2P!IlSfWW`0-Z!W@Pxb+%!C?j(y7)(Vz_m%xGjecEsYu5dqM4WxaROsfAr zlWC`jF=1_}V>f2KCfb+bB2&22&R;2X(uwu}!c?l1B%5ib-t>fZ9Wzmg^h%k3vgjtD zpHQ7>Nmdx_fbG)+%jQa1cKiEw1AUuUPX9uEnzqXPggx0_Au`tqOpg9(;daB`27N;E z*;&w@z3V()?g*g%+i_>1Ss!3k(yjF(g4k!rn6SHB+7@oHW|Z$;A=>FwamDf0ivs(WDYCUoov1{KjfStQw-KGH z5$z3owB_~XrSqreroin{8Zv4V$OEaj3SBP6p5ck~EYP~Aln0W71-RJineVgi2pjbO zkjHZ&b*p*{*(`Kc?9If^VW3f$7}m{S;INgh1nBZVMi0?Zu|7sk)|LEHG|u)jL`NXH zk;rO{UT&lvK3zx-+RH&?E{*+)C`)m6L+A;eEbI~Sx=#lJZG|koT(^3xkE!$tGgW3k zM2T)?QtplBj$KtBrGYVfyvSQWl)?|fV5sOa@Dj7GD6ehaetR)O&w*I_it^k@y}QGk z9L{ebIaJq{zgOH))Hx+Ac#}+-iIS;P@sM|z)$k5>v#T<-Obn&XWIHR%P?Q#{^&x?lEvS>0z#ClL4WKJ>4?tSjeu~Y-SJZ5%*JPH8JYDGoulOC=m)3m?<#LVxhp(bfxYnrmw-Ycj=iDU^-|D7n zF-IncRBcZ`9R3q|a~CZe0`<(TG0F$VR+~#jOEGl80LXCF_L_%!h}C}5(VM%huOR9e zNgYbUWdPqjSumP9p=Q#3IhdK>_9N9V8%1t=D;gkI!fn~bjws5j3wf;zxgF&mMi?NU z?1Q{QATMB?m)MOA8Dmwv>^K2=^*`V$xao1QkoKQ~fMCC1sIZ^H?4?uluvG4`p90o? z+#TxTWhfav_J0#aYaga~R;VIKp+$fS0g5&+3g4_fH#0m>UseokOsidsK3x{W4fY7h z*CTsdRx-)<;2yF)$xm*i)nmS(aLZPUlvX0w+mG)E5!0;^^wBdx?M_RlC%o#5~2aKakBiB5{9cozIrtXFfCO4t59 zWxFD+$(rX$>IUj=gjHhM(HN3cDIrAk7`yh2e~D;kr8i>KV3CXoGo?7>@o!RAZmeOU zxtIKD|7_}1ihOxB+hBSAtYo+RlO-kX|4UZhnq9ds?f)x@o&Lo{4leYxb^4nLv8G9e zeOo0%42jtCf0hr=bAo?l!2U*@#UGo(e-V!c;W+5r3Es(zIcWnxER`Dw4jb@np5RaUo zq?@JJ5h3!x`0UYxTzKL0t}e>eVrBeIe~^AOJxV2=x~PjholRZ*N@#UclE;+QO?%}5 zuK&7C0WT$O84SW>pAWsWxR?^N+w^$d*i)!&8C=3+k5TFA=i7KOTAUPHKL41vr4Um^ zS(ZHPnI}+;o8u$qjmz-|@Oih*PVk?jA}@<^;c?VAnBGjzMJm6zX(>mr0yCE#RbmY&;rwqgUB3N|~=KSjE9B!5*tk zUZ3(ka^))wV41XoIeE`-on>8at^iSrx#p3KVliGV4B2moC?EE0xNk?fZ?oLDLig2Jc6M^exS z+vU9{In+EK4U9l|(F){;^%%;MMQfyL?rc%$xyyL3t}6Z!OlSuq$Pmq8ruGX?DP&0c8Lk+PzVZM8T6exqAJj*J*0|+(FH2Dc;S_<8{Mr-J zhmCRTANF4_RJ{m(NyBiQpfM9{k$lQ*U9|=S5WEZK@%ArOg=bMzQi{ryUIp0l{W2@+ zk5u4V>IU*`Anga?K_cy%Jri%cfXFct*(8yf;Lwj$8%L;u6V-3&s^3P%W_unJ4vc@f zt+%?}+5yX>PeKpNrXB$_CZP4$!v(C)*LJZLTOF_$9|=6D8XdfflJ%u!A+gkNv~Q?| z+1KXHn7@C7RwIVcu&>b{CwZDtzuoXLkcqbf_8)0-+?x4Tz{PI@8N^WLW+wk7U{Ux_ zG{5H(*&MM+$9fH&z-Qe7zZbMH;K>pB@#mGn9&JwA_A-N=8oo@%*?tVE*cBIkCObSh zab6Boeiou{Dp&fZP$Dy~8Y-#ku5wTru#fSk#u@dimp_Wo&5k%z(#QmRP}ZnSNtclx zV!s9ZrE{kfeg0;m&5kk$OWw9c9jO0Gf$iYRgykpsT8JG^83f!eb zvtML}xOzvh@+~DW*>izf;^s@8sfI!L$qg)O4D$#HPyY~xW%g(7&XvW}vk!Viak9?- z8mTbd(gOPB1WV`-T9dCB6x2-pYjXYT@PtYl45b?8#J4f9*`Lups${(xWSL)NLLojYLGXeQ?|go$vHE$-3>q|4=%GK0m^fF}bU5hUNIp02qb zQD!>hC55363(zkS%9sV~kk9PkfAbE#MP$Zc$w$D7O7U~t1%=Qo^oa6;NR}^_g;CTK z$YGlwWNL;)uCi&HxJtR@_rE_O?a8N$d&3A3w_;%tKNNf;PbYZk&qbIL_4gD)QorE@ zD+z+25`%n2+s; zyFobJ9+?n{GG?tqnQh2^@8v{#rnpw+#w`O=9{VOLO)$J)fI+XK=rhkyZa#cRxPCpR z*(Qct%)*;vnP?zVoUCx#;=WC0&*=>xXY`NEo?#0HEiI*rj;8h);>Xfs2%G!ZKwr?- zVLYT;vGziEipbtu-Nv%?X8b>f>~e*&S}CTZ(*H6beL-*faFy=LH`r_M9{@9>4+iD! zZ6qWz!3c4J5L~3DmqF$cnxv--C`2u!ZaK;blDV+lUUMGMuKnpe*^Z|DS2)C^{Rw$6 zV19h#a)ZS6r++d2;=J$m4nw}*W1sx4JFSkU0Q(RwD`NAvN)H_3br-M9>3C$NcF#j# z3H~3iIFQKP8i<<*1KK^!#EG))xR|8vjz?C_pSfJLutsEsp-tXo_%<1lO%1~Y=Et-g z_qRw#Y9yMt?MCLZkP&&c;Q~opAiYAzTw}C5J|i;jnRQ-B{5S)7c zGqfc$>1G~ZZ39*;M~PYk_)KQ>G&f)D|82f0>ph;AiWPE)*f)M6ea8BP#oXNGP2mOB zZ$@Ud?Ei-$W|_OW4+B?$6J?_BqCcHiY-0hTajSYG~H=8%=YoMk#!C4N)4EP z2Gq~|z@o-<``T(eF|a$_2;oA-5#@dlexpIw6JQ`W;{?? z_W3Q&3yJSyzpoG`qdM#MK%I3$-1=U1UBw68dg0tUj1<;~SSz%ae}b|qkY#iw($P&d z=V|wwX zGX{cXTyKDTr;Ntdw0gRx*k{&s4{*Br5sDVXuZrr*( zRP|B3!l^mqwwXresxoUDmXdcG&U3pr|LaC%UqcDIgZV{zcWq%Pb*35dBnQ(gHVh}` zSWst%7t*t5yzw-qfGG#+e4Vq}Nu@+eS><088wc->D zkLl2w7ppa^^-=0c)caf3P=>X+W0-}u{p(_17VmJf!7kOA07MlA4 zT8pjP5!};7ROv2$b>G@LYi%sD`OZ`0?eAa=H6H~;cUHvBc3hcfdwZ?08N#2n({RH%iY#AVF@X{aZM6;ZPNq@v@cEwGIvqagdY!xYmvAK zO%pN$;_i{SlbR;nKOpX2i94!k!hcCzv0LwA;;bJ_3zS<0^pIBV$`vW(;H#5Xh{%eE zE(?e&UVT|lT>6W51zV+$xQ}1Dk4JeZncgbR5X}AwwU~^(t5ZH~YNK{{kk#i^bz7Tp zztfrrGYTs3YZ|8ljlBVoM2t^s{(%Y zOU8FIC9@g$dNInhWxJ`ZuH~atv7V{(*{8pWYO%$^D|d~(j}>x>xj&FR#@s(7 zIn3N&m@G+P@~V!pHqOhPm9SoXLiz_5&>Q?#!XCg=N5`yZg*-a%mEZgL%lTCnNvyF- z-(Lj*Z5EY><$sf~uS+I_QNLF0H12(r49K%x0gtl+ajw!T{VPDZt@clMvPh&0!!)mm z=6#EG?xg_0k+)N&)`L=)wNZEek@H*Yc;KvGEbaEw?$l9ODm z8vPQ_N|%n?0VzVDryusjzUKr#gr0CD5&dRvA#4uH_B#>1+EO4URID~k*f_6OCu73@;^~)95XU->A~a;K{vP<#e~R zgr)BYbhnpj&1oXV%x1oG2_}OYXin_v>w;JOW)oZBc@8RQ5wFI6}ci7 zj|6-5Zaol6j;PvVZNLm#V4rfnw6@35nw89EO)FcBrn=ReJLc~+3n5JiYy*|s|BYJ1 z>ft~_%PRPm_N*whVO`Z?2aLMXpc2>+U%p~3np3sC&e|Lou`q5ou5KX}#InX;a$`nGs6nh+A9O ziW!rEhYYKAXDged$S28jBOQyznXp^$Lbklp+49^l)lcTkpV4=S-lY(`!O$9d*E;(Q zNBc!bX38O%4Rct<;WIZ4upsoySI}Rty_$^+ws@A$-R@Zd%-;X1u)25hoa# zX?8OMWA+?M-D>}uTxRuHt%d!h$9{mfba_^ot=%eOMyB*h%A$~%70Q>#L`Hx*yu7n1 zZ*^>pgUaYoYNP?BrONH8ZqcbJ_9Q~FbSPJYZK!&6k%BtiKABk8p&>WK9;ser^Y1g;u}^ zV-(7H3-6yp_WKCCsJ03g7vWL1)w-ZWDL=%sZXS57v@R$_C7>{xX%5drDVg`fbcyJ- z*)R`r#RJZ|tOU4fL*Qr`6SPtdg{U6qVMDH3BXib%^qXIurQnNMbmec5&vhfY#40B( zFi?e|^5HmEvZQfv+2ZEmQeUo&n1bX{je|oHLvgK5f~j25YV}0z+YXB2W(QmOoHzAS z?tB#J6a^xFi4+<#dyZWYpuEiFP_6`%|FH{(QdVa2jxUtste~XG)&{gB_l4C6Rjs!- zAZtteWDF0t8Y+29nM&-R{#!(po?bW3dziQ)jVy(h2*F?;T1p^yi))(bWQAU0Sih7w zDTGC0^wbpq#!xFHN9{I!PKqNEe&36v_Zm0+_QS7ACH~Ps8tEUX%$8qMZ;YoHuoN-6 zb=0f%QnX6bLiVZjA-`kKn=aJ)8fAH92=Sh>hyR0VIId|MEi(5XN{#FEHxLHNXrm^w z=DD7qGgNELUv&|GFjs5tKbShI({J=a76X}k#oT`&#St@DQK$d3e3}(4%su}V?xV{7 z6m`$>d+W4{D84u>|{3nP$tO2K1!Iw!;%SYn6*z ztMnJ7*)`Q#^T`Bs-}<<3r>w;0K7T{D$+1zUYRz(_q{+#ua1Y?xb7;@6Xk}GqAM*hb zGWkMcPdg(YiHs@qIZ9NL!d2{+b!hqQZT?KYTRCvoEAq~z(2b?*vQDtN@^X03em_zoZvHNq9sS%(gnb|B z%PH7Xmx+DE!q=s9dg!d`Z+erbY0nPRp4GjPO$&~N8+>iFzSyVxUeNUj{?v`hLXElq~aU4_YVb607yM2{RujeOqi+w*SUo_G89 zyz1LkC8)E?;+aJs6{=2Y&lL%in=5L(6$OGf&OU6g7vaJyo2mq~!z3QOK_&o`Sh{NB zrml|k;fdC_Zhz5-&f2G&FFSS{Dm(Tbb{WKGSdT0vKh~NTd5c*)B2x;r=KG ze_?lWXqm_W>k%R!-hz`u5m7m!jI?SDyu#akugu2+twlC=-Tp#G%&WB=AkJk4vuxOn zHKj1^fA$rcKP8a%KcfOAY5yN2@S3QDF}!X43k31G|ITCCWN6RDrj^ar)6K@x5XaVJ zixJAb>w-3YE)(F+5r8d@9Xo1rYqNJpQ6JDdY;;0RjxdiqM;i4T7dIa(Yo|c%M#yzK zB|XQXNB@k!E4#bHNgS*60$!w_e*3S&twIy#yf^yh&;ZQ2>m8P;wur;~HtoEch)nS1-rNueMtPyL-_Dw=WmXu(h zJ4=eO$`6NFHUQ$~%N2waYX53L{7+QatW-JMl2fI!$rM#B5lvZPBKG%Oc!Q{y8i_C} zHriLZanl8i*Nfa&DaM}chG$7q#{UOaFEOW&G?&dGMp}O9r9@el0N6nR&fGEUMd2xc z|KbH6HB)ydkK!DQSm%qSw|@Jhjlh|A_sB|UT^tyYvQ?#YhLmjecf_;25G4jb**s+b z48Z#J%mh}nCP9GG+Pf=UlZ#Ze`1yAih5hfnh;h?1`Ye5^+tT1C&r3_E@0Jq`WeC_( zM1j{Z7t%(SlV7gzf=G(dX2+iVnEc$LmYWb}vVyw0Jx-7zoA`+!Eqyb$Gp zM!N6)jdb7p8!7Jt8+p-}8yWK!f2olw?u(6#d0VBGdG&Gc9{LUGmM-|8n6QSSc}|{b zq?vMS?Vqs%)RBlE>sosplR!5oFx(94Hbhl4)6B-t73b^7E0blT?Urjl{-#j#wqBX% zhveX7Sg*`8)NWDf54C})T!rf%v*A4vd@`2AaZ+z9U9~TxT8eQ2{wVBL*}{-ol3kLL zcTiFL$$@IkIY#|HJm-;LP)cmFXRL>rX0Gw2{PP#~&3yTM*DAushZX^)S#{x!^6?}@g)5JryitZojA4v#bS7kYC?ORS_4D~YHz z&??CBlbIY;L@<^#OCPQzYaO9pl4K_(;>AnVvsGgASV0=>VR% z3K40RI$?u1SYFP`?>GnUHrKC5Eri)(=XlJU$_gZ2T7S43tfu{bw)Zk$xLg6T&^?on zJZyEb4^{`%o#6L@Tnb)fE$ll-t~RUNm5C+{%!?qM;TAGT+S@mB4;`giL2 z(=Oj-Q0c*yhW0=E+(KI%LyPr@V#e9AfSqb-!v>EhGd+O)(FvY(mug<>baT?RM0+?T z$%Xm66;0&4SKH}Mssf=O7zy>W5A zAHwgUq@TH$8q`_9EOg@Hy0W4&Zf3oy7X)>Aofczk-%*ZSIO*Yb!503UiWisOE6!mI zU$%wAr$rl4=j#Gz| zdwwP{tjw<5fkJ2bP32my*vL$)bb`m-sjBFo^X5L}`Zw;>P_x^lzj=%G9=6v`$(CC(?kCDdLM!gn zwB-%usvX~V0W;EsYx@BEbOPo1^Ea}5S6ZhhUY2p$qlkD5`p3ozq1w#tUUgXN4UAnW zr0=&IBcWtZW8|9T`Hhi=WL0BiLGon#GO>DoX$FW+!@dbrpdzk82z<%wgCt6-9DT^H zT;L_8YJa(aGIi^<#lB|t_M%W@1Kb9=qT$(1evX`*`)1sISMpXf8`7Gg9FLhDl1%6o z8c5VUD1*z|u9~?5@KVWVw(3XExl)I6FQdg=%Lb)r=?9AkaW_Dp zI-$s-WNwZh@)dezV(?yvg9uMt8K_OkJiepHr?iT%4JD(CjH&P?ov4 z%>LGP;ka6Jo9b`yT*+n){}^5@dmUVy#}UK%mQSPvS+6#cbSFtUxaPL3EP}2mpIyk9 z8uiYySwz_#X;`u17OP2`@iT2V} zs_rLcnwsJLo?MtN{oGCMak)XuLeW`{zE?o3QTyeDfGiZFn+P;Q_MGqG=z!q&L*XDdSqS=#ND4K%uGrKE{i z$Um+ah!Lk2X9R=t7;t6r&i0W1$H(RZd6*^rz|COH&HRON<`f94a4Kx?V5p4~*f0%T zb`{gTcRTE8!dUmh59kcfbVn#Zv;|dnVqK zVyfO+UfOiGI#ZJxPHmjD(UyO^Tu$0d4J%g{OLHy`XwNF`Yg)?<7vBZRqK(JsblR{u*(dqwy!4O znu)UF4$ci(o!Wi7*ic0Lb{8|xe2*|1f&~9Fr9XNMzQl1Y^WU7-x^K@M_Vvb1-Ln?Q z&I&w%6YX=rHtna*yKfs|kE3KNI0Q|)=8o)BBwM>8Z)2S&^aMNld!+ElYsoFK#f#3W z#%Y#us4Z~xO%8W@Y(!Q_21Q^PVs-4@^n^G0d-J_R3G2h$b@}(7Z_eqFZMrsj8*DLi ztjrr*?&vzsZdJ94m}UP$1%)$aCrJbD3qMM@YKuA_w=gXFwbEt3k!iVYn$@iOgwtJR zwD$$Jr!OrgOLwd?V7NrHI8ixRpU{PNgm~$YSa9^#I!|vVodbWm?~QooQm>x5VQ;K8 zRyjB}!AV}*3qT1ql#sjx17%%nac?wvQ1z0fN3zuDjgB{UMsFf{CP<#qQfn?IB06DJvQ(KM;?Bw}{*h3b>}Z@Wylmzl7e~1F z%1Oh)Tnaa3GchoEj zS0+<*vFlfkGq8h3a%`h}wx&3Fa_S%Y&^O0S}$12zuH%vtICs8 z-e*USd5#;4G@CDrny#Xzq$d2pKZ77;V|7VHuEUOZ&rarNj+xYt=%|a&g4|%il|qk| z(hA=H zxu1{*RJ^&_#%hCSldM2hTb79iTRaOw+(Y&~fYBzVj^Vz7d4#*{cd;0#M*j z>_`WacJ{eUx5QX=DXqk=K^XR89`%nzTzfj{23vdr@ZM_&V@x-qA<6 z?6AXg%^qmCmbkgxHdi)RIoA~^@>Fg>nW=~RNVi~aO_?Iy!t4s_6VeqCrWad%q|5$6 z1OrGHv?CI%EXdRDMtVqJhdRNQTP38mJPm-*uh#q+A;_taZXP~17pTt7t$83U72$?AMas#7kdW&hDSvethxzPFW!-`7z=cbR&otGa!qjQBUEHF@?eeM`k z41!V`H2)FD>8m`LE9s5DLgLdE$g2f)kq@+co))qn+gqpbmp-yD!DtDlUDPVdSHs#% zAdE^fAcy+ry&xlh;80A9y{}s%2InSa z5gzE9RH@6t`5~Gd(>atHWgkW1oNFw%vy_#0&cHwsgrusrNA_#?y-PzQlWuL7h4)oK zs`Wv_+AAw=A~PvTv~KBlqf{3QU#xE*D(}3kbAvct^llp5 zy9(WXa{_5>fp4<4_L4F=Kf4lkXNo~tLkl!C@9m5Q*gB<+0SqN;C zH(cVkJ|lbs1cqW|di;~i+P(l68HM6$>LvK?hDSaTyZxO^57vCOqy z;`hB@li9MZmLoc_P31>-2P_qR;iMO3SzMnkEhk|3JwpJDY@0u&ww0>~%ps1xddlG9 z+VmYIL{#%qQ_Rl!P)*RuD z8wCrE`pUTPv*$gCo;BrKqFgQ5k#$-oL%TV@_829oK1ll`Zu==9`5nC54q5*1kgWSw zru}mW#50!L@Ym<-UuwACSYO@v)h%a(d#JzJt1SP~6lOS`cn3A-G;4GruOgeAGeT;! z96j>(U3VGgc@Dd4^SndJ>v0J7c#~JjBOnJvS&(8NU#Fe%MEwW(t0H>YQk^f%PJfWN z=gIYndTT5kkgEX8+#T`iZE$UJ7U@nbT#{3x^be5XbjHP*cy%DMf6+a)7NxSydD~zw zW%x$l`2sCABMy6XGjb?7sBv1LF?aXsDS;*(8PQ>nw8m`zY$-<|oLW|w7S6S3-*Fp$u92?7uf*&rNJ(kY++efEmntNPsbX5btt0bo^*d8a5UIFT z;S7N_Iz?gP=m?qOY3=y5`!3<#DqW8H&k2U80yg3UUYsg~WO1qp;aWI8ZrxE?XVv3o zn0JeoeX=k<_Z*dLf2)68UF*Ph=8cP_+t>rk?B`$Z<&)BkZ^W#PsAT1!3w?Ttu5vCM zxB&O3ew)$6V~|Nbm<6tbd^x5R_9w)m;Mt;}bQQK}PL#s7@w!0ZpnDAfZf?EyDdDe2 zUenMe#$-)J-+DQfnwla&|RW_4vYxn(%P-OJam&=;=6T;lH!?kYtHSJNN z*-XkNQkE1}v`V$2RoMT=;K6$Q5|7?B;xILV{f~8OjmUi$WSs@F)S4v11WOWkCh06m z+-=BU;Yk>@7xa~5-=|9EP6g5*B_mF7r>JzQo8AYe7X5x1z@F2|f;{)s0fsx=1dcmydpmxjM(bKC8jx#J+- zmboJ3ytx6mX=S4M5k;B|Q+kLrq>@!lUAHknKh8o{WS;|`bEoy5qwova951(?44seN zXr28ct3OqKa50{ls2B#=zC?_H9v6kn< zj&2^}Or~8U(~7w|j=3e;ig4~YE?7z(15iP-%pU0`>so9tZ5)qz?K)M;@DD{Mq2Ro> z?iNC>HZ0mHGU6dGSw3~~HeHU5Vx_naUprR|i9pezptGU|&d^HPzlQ^k>esnOlhIUF~&#{sU2LxpIUOJ2M>kEfgx3Cn!_px#y9 zI>A2@LR#X)5$Vwj!5u6Q-BSBLZo_$R3=`HA4zk<$kW0FKAQKa~la%@l0R(7pv%Gu&F|3#T=O;=%M8&NU zoitV+MUy=AIm^~$~_<{V|uLHYj-#Kpa-*UI8R=>u$%p(>|l(ubYUk>8&B zmN10TFYv}L_JIXFTDD_q+=jts!CpI7A9oPoyXN^{cn)GCcw@uq6TnBNLjP=D$;(5i0Dn>b1C zMiTx}JVaVJ+vim^e2|&yRdz`1k@kGi=;4CQy#S9(``eOjbZIkTY5(<-dwsFIOv=pj zCZ`zn|50w4279_{$(k;bJPk^u=Dc~AeF1e5^W_ule_Sboe7Amkru$ox9ETAdL7?g*8Mk4K+egu7$1-#bkY(p; zbf*?L&N3|awH6^$XwOzj3O@AEJ{>>$ivXlXn>q2#VU0gV8xMt&gZLes`jr$ic=c2V zvys#>n85pRBn(Dt;aDb=-*>h^HrprFptDDDNMe6ELT}F)}+z z8DmplZ(f@k%~ocpk(q)%r!Cz=g1a|obHmsSQ2(ISMYHLHG?{fMC*+a5 z7$zQ=7)nl^I5;^4gq$l^P#=H`NQUF+;n2lGxT^D#F^`Ab5RyPhhr}ryI33w~P|wD}D?rVIT+PH&=bboA4j}{*5srd*;tJ>JtzkSHjG{#K;WC4`Fiy^tjeuCEFJFb_6^x3U~LHtFD2}m-=l= zl?<H04B^qBG=eT*7`dVI_VF7Fh4NdEcdPcv0`(Y9JlMGVGdj1YD11Shy>Hhj7C=fs#j3$@sWbm{#P-iMBoMOj?eQb8v; ztuMbiV#N0`=-7TXO((K(_8lT{a%yjH;6Ea8aA)3iNHrDvjm_xeJ48u;v6riJi5)LV zZ2kUoC8gBcdSoX)KX8o)RjC{pUYR_rzX-{v3cw5dW~2qwiRAq=dBy&L@U+1`{V8l2 z)~Ae<6a0}B$^``TBO8)~>AlvHkbw$*PsOWK%gqyrh*gI;q2sjiO!efSyDK{IZK&TZ zWe!%2>7g;QSEAtY6p-bsMDzy9ZL2Ueu|KJ_5>+@1^((KLi*hA|E01s;mt3x3`A@XS ziHW|_W4!dWU%!3&Pb+vwVk$BAZi3;Kkes_scF4F2>|%tZWP<-Bh&-Bzwrl;=Qib${K~gvTmMVj$0IU& z0`a=FUz>U`axm2;T)SGRI(qoBb|l(X6>^9nD{5kKWKs76*ja2-`KnK?@xS(asvaxO z(BC|)>X(S8YF(RpK-_l8++X=3Sv*)Y@j_J?( zn`j}0#ng=6@f5;d%O-{kDP`Jq!sf_W-{ zU=Ok^LkbbzPk~PGbyPD#>+kmkXj-Q+v^lCM$8KhG!sj%4|WP1^rW z;zcevfx6Cdp`Bnd57sE9r2VG@lPhDIgeAcA^B+e5OZq6u%rvQoc#~I2z1K<(tJEh6 zI6u!&_3RLR-T6|>1m{(GaD5A3qX0F!a^6$!W@7se>6uGDqZr+JMV6>;3>Zj$xSRxY z=TYiUP_Y`3jmfXs`~N~UfQ;ubtQu=MyjFT=KK&?rdp_CB7r5BTGugE8`HGCOc zi24!@5Ne+W#my5ESO{B><}V|^%9U{|6BeuR8hkxjW&?5v>$F&_*IxAZBP%nPpht&{ zk$|q~XsnT=9DN6WaGvq&t;2FJ>5DyhZ>cjuEazMZ!MUIuRireM?c=5Nv0y3pD2k82 zA@eH#NmL!%M5#Jd6sr7eNrgxTy=#&qq(JFT4{}REc;zR254s8;9>e%RItXAMZhMtxl0zy3emhJdG!-TTxGq~ zIi&vG73cVq`qzYaE?QZRMg$VRwZ`5p@v6=76SpR9Ty%TLcL~znRHOaFGQu^4&&-EU zAUwj2E^h8lEkiKya)689L+baS`aPh2_p9H})$cy_+mok}uON^s!0nOne(Umqcwghv zg5K(lJMDeM@SzOvP-x*ODM-=6-x%z*?*6B`WzE8nxv$`^Qgfd#b&TbI<);wKS(bl2 z&vfZ(c{|a5je5SQY|o#P+=<*ITRYLubC?s){BtPBO$bhj5Jzb?mmhb%5EMzOEv zZg+bD)M*i6+MFlB+q1o(yS*@3>P8i+m_jiZv}gUPOWc4jiAQ8%v3SztO$K~JU`LTm zS?q8f%3eSnssgbfd?Jw;5aeI)_eco#+7m+96Y%9PEau9dV)ohXAtSBtNQfE1iWOrk zn_1kW>zF~MK5W8Dq zds0uun{25e-qa%x&6GyEmJGRU_RIYT2TS5U3^Q1A1`H7QY4?ti8X&{W77Q39H9*7_ z_6<_+08Q^7AT!GfA_EPjc~gA@ROAj28>PM~{+HtuEAA0Y?51KFAnx76j>7Fh^kSGTAGmB*q z;-BEprDbK;*Fh+xRSJd|V&@S~+KoTkIMXZasRpr;wF} z4Yw5y)YVTf+`BE|+l)>lapLB9eFut6aKNW5jDX>r7U8*+uV{1;I7IosnyfjFcjT4-=l+I5 zze4#hW-D4aP?s$2D*I}CHoAXh&sMts(e`AKBt|yQ9zRqyFD#gbMMMgiurc^Uw z^oUynl>fgWx=*DzK>2@~okb#}*cCkT-4`E_0owR&A|VM%z6ZjpK-d87|50fF#UQ;- zo&P}2%^hPe>?_I6sIrZ`{$Fs7hVfMUHn+-)`vAYFVZ1(Hr6B7D*@Dayv8m4$!1V(d z_&tQBmB+!sABj^##SX;L>+U4v9*+)@QuR@lQ!7IGyJ%?Hq(*CDs&W;_2cidKMLolQ zmnu)Fwx5@pztTx^^PknynEfkR64rkCTu#QH_!VqP-KqY;7_{tgQj}bEH#xKP#YwF? zTF@QcbO@DEfl?fy$mS%O@~&IL1?U>rCX{k*!aAHg3hZy8WB`pr*s+Sxzo&qPW&ArI z?Yl3%NIb*dAlhEQZd$J5nkg6jwzkPoXZ$OGtoi0}X++#A(XT!-{mMs6XW$MOsWgw`MYa7NR9!OCl0kG^JjT+ao1=FsoL_Qs&Jo4F_E%+snRtdLc zCBg~Se!(a=R&ahf;bL?$)v{ZyIN)0pj@H~9PH6Y7qkM%pEGb&EsIg?e=Sz*Y-W;Ay z(W59@70ahHQHPYY_Z9C1cT983{x?~<@;TvMbO)x}sMDFd9uh*L7D7sm(4O-#6gWr~ z&!sCd83gh6836Ki?K#2G?sLU^3J|SDd~P|}Dj(4})_Qp^GazPf?oCcDbULLXTvaMo z>XB92JzlX9%nm2y`x(>O|3uM$w}?Nze4yj|Jk9-nt>t{D5t&I_%?XJVSx5*J!ALY=Xq>7dRdrRN@Zg2OK)ye;>jdjAz`q0Fo5-q2^#IYC8Kpw4Q6ux$a0t@kLc~>W59XhQL7Z>cO)O3gX+1`lFKO_f zgZ=HTgCy9r&p8^AW|O*a-as^z?6}s%D%xnmLYfd`9!FIlWV8(Nd=!}8+2sVQXgIWd z4&zU%TLPc=FzQiE@(DWaKDpAwTD82C7Wbm{B37lWd!6qnI2E4nkIM4MsA?@@!&N;S zOV3oUBlYbp?KwJUt||}`NNID^$LQv2PAtn`yZW1(X5+|f)4oH}%~fCB%^yAdzvyOy zcFH8VkftB8t8WPFI=rjrOIKGAuljTmL$0gK8)f#+gXi@Q)ES3$reto+m6hfEwfyBp zXJpVP;JOX$hy6^sb4rhS6 zsWP0rYWH_R{?uv5CB-r>L5~K*Vx+tou6?MZa*eS^kEe%0=Zd7g0-VY%A zk_!}t+#|)Uv#TDEldDRB}m^s(LV0BP)E# zC!vT&e}zv%3DI9AmQ>i^pg6>mioR06C?n{Q35KLFQGD*o9U|D<;B%SJB=mYy=bz4|;>DUn^lDsY^4`mT{({~?&h z)G4~DQXC8d@}&d$I>N_Z>==+GG1#miq>~ z{4*k@e`WhS|0H#5aBwAIj_MA)q6xPX?M1#NWA2FN#@g@E{4ekMWwh)bbi-(CO>x>& zvU=}b9`=Pl5;9@EgYsd)onwK z$jixs^eG5>s3p)8zMS-#gB|vf%&@y}3)?T#^)G#&OVqg```ss?oQ~YBhV?-BQOOMF zirUpnL`{k*NUynRxYqJA3EFeb`75M&nHy=pEn!4H6K}{uqwm_SMwp zdVkw930j_L_08`2-To8018$a>bG3YXi?XIr)`E$4NE$nqO8RB$0Y6~vQS2{&a@hT<6#P=uu<1Xz+E{B^p$JUXQVsXO zyFKlds@NxB?ZF3v?=Meb@3AU*OpTjQYtf_`)cIe0Qs9Stw{J4@yyyE`%U-GV%lK)= zyWBpXUL2u=Gk@3D!Eb-D-IqxO^TZ`@`yCj&B4&5ri9<{@Zfz5fw(pl=dDJo5o@7oA zI14$ZU(gybIA-x|doCaX>L3yE+1(Wm_T>t=($<}WEnDtNe3gAAZtcUN)p{ekn6I#5 zT6#6Q_+@9E-9pdJ^SSV=W&7P9xVyc6TLvZeeRl@mpTq1mI;Z1ks#_=~j3kQGg{sNk zVYzc@Mb+Xub{~om|3ppWQXJsLZIb2)9V`fZ!Lq*7B7Jt+2;TgGu zjjyP^k<#s(ek*&Geu~Xs`bhO5`|igbr`mPgOKPIR6Uxg|qpTq#*bC|A28S<6ML9Us zcq?a$mCci#wmo+%{m%r~H3Gv|q{nli7{}D>+W@>Az&JJo^jYHJQ+HF_tU6!w8aZH- zLp!wskh1{+-_>`gPLbTlOKz?z!7#AGd^dm1-IiH#-vjOHSkC0?#-willJ*{{FMr{* z){*+ccjB_sM63{`e4czRy7)}p`Z$IPA}T*Io!fo+YM*E`^$8zp&4s`UA9z9{p7+SO zV1JK%Y^2?zF4}#sAPr?|cTAkM;3z|Tan)q)+4}-Zwu8KPw46`3)^ZMw`|+`qa;l_@ z)CUItiPZKF3#o3l`FTKVJ`?h>*6I412**)x6^t~%9=^Si8TfTEh+`7_J8#$y~ zooKOf(U0N$y($1q88BM&NSZ-Cy+s>ZZ1i8{oTjxd`>_no;brybOrFLi&2>GpX=npZ z?O5xq4*T-`YGUa0VKjx#qIl#0oOgS8i^*GSb3Vtz>Y1&h$)wGbv}oh15SGxf8&QVB}{U z6#^r!ZlX(6 zV(I%BwLx8OhOX}Qlr(m?%XbdWO^RE0s^PbHBF{06s6lJK8PQnpUWvoa-1ND*g7mq) zIL+QHBMqwDZ$4Qw!-i0b)=(smlKI7Td1}g?p~hRv;B#y%qsgK=?WNkD+z7^x2Y1C0 zH>j?jaAB=gi`tU+(z?isI|F<+RoWJC3ocd#H~4C;HiqZUfO}vpHw0hpy|t13wc5q| zWji-Vu5n=OUIHm{nfaO%*McO24nH<)~VWc(hgl*N@%SSt35>T0x z2xvFlN)YOY2^=))`C2u$7*g#U9uyAEiXjSA`dNuoG7sMjg5$-YBS0v7VrkTk&L-VD zz%A$>G!mH{9gFm&$|-srMcW}&4%cEsX3nOnHr4u(KpX?a{#$@J^b@J67ost8dP>G+ zC$xyyz&g8=TSXumK~Jm(rJ2?;O>iWGd6pn9FuzDB@N~wjd!;v7CNCB01OZJma>Ix% zNGgy}&X!FYLpf9I!Z%bT!?B9Jq?fStih!8lPSsbR3E5rHsfrK;phsC{y=DiHIspwN zNBkf5-UKkJB3mD?&H_yqI)MZd7HJ?rAOc~J1SCy5(18TPCWt7+ED#AvNV*{^OCUkP z284NzyCd#8<2a~@h$sOOP_}@ghzoJ=7?h|8sL1_&U){Q$CIrQC=Dq*>zqv{Esj6F5 z_ncFwPF0<%TNl3p=VgfZ+=UD%+TO1cUnkCRx)RSUn(J`a+M!6GK%ht{GGN*|@JBy{ zNIO6yfXC0>%fTrD^p|{9*qgUexWY^<9_?Cp$ z2-{rf^E1H=*|)JbM6b6j3iDKA-mLtmrj}*5gIV^3J7U%#8wUsa?f^B-!1Npu9G-A8 zTUv(NM|WvOtH)%P;~3w`EW{<%ITXLMoW1ay?+nH7eAjYHNO0|vvSIl+0}%Zyf*~~+ z7Vs^{uo^B^!(*+v7M|OvIF4I}40J`5EjIPX8as?>GD@ENKBS+F@9T{}X{qUoY(ugw z6&$7wfevm(2+iA^5UREbVX39!9CYsxbbzM-*=4~<2sy@yLi=O~=9&E+#vG?GMCs@N z=TM=j4U98A9e%)o__p3aJZbPZmJ0cq?(-7xn-Hj)n&l`M1ov=7rzj-~4TTNlh)!{z ztKnydlUIdoWe;v&wZKtTlP~Z5RFiFFY;RNQ5>fy;nP^#+id$d~R&DTJ3u5q99_C+V zcT_P!O?N%3)oH_%J3IT}H_q7!zdfBT@f&Yh)>JRSTyqYibrx0ggYGfa9H$5DZd&t+ z9_3f_9z0}65r3xeXCi+bDeg}|1HBnFFCdNQm-|Sx@fm74H&F}QEpaC)VS|*XnyC<5 z%uy%lpMjN|?q}eTLT>Q8Hp8*PHN~-F;S7w0FUz5X;qB^_D7!l{<5nT~g&Zumq~pBd z73pF26)7+$eYau~)&$?PuPV^&iSN3`;_b|jlW0WRC(n4rt~fbQqLzz1i8T=JB1>@} zwy)@U?iTd0ZMDee18A0tJb@s(U%Y4ej(tVs#|Wjluf@KU^dfuWfkgvs?l-W?Xp7sI zl#t*I!du^o2VCFc*f5)WTiiBV-0QZ)Z3~0!`2L%HMS4*!J|3`pMGA7hW2u-1tbv-P zVgfuViH9xAI6||#A6w2wdD@=%r3;$f-|n6faQXqZIE+^C8duvxUd+osG28qPYmxT@ z)3mHJ_C(m`hoB6{%UFBlWwE{b;fH8=uLz8|7F`3KfFqw$PoIW~->*1r^s2OBVX2Ax zaCXotn}r`u#x725FsQEceAOGJV9f^S&?aGbmZjoz=;mEb_wEI2!aSu**|z29t8j{V zJlY*BDM(a_L}W~wJ0(o+r3Jd|NH8DO`P`rO;KXf~3aWbdBq2K;{8-)TW}GpDd6E`6 z6U1vLN)p}6%D@R*B5Z{?kWe7S?bx9wkRLK0>u{&wv(YRE zqj%1IP`Ry9UJb_C$W*szhRy=V=~I#g*ms&XP_h8W!!%&QmoP#}7Qn8MiZi}D?DCi7 zxAfe&gz{X8UN{s-01$t&u(XJY+LRTY%iOoLn#^FAV zw`c8%r@7k`VpJ4iU*R&lU!i#6TZh;s?C&lOu)z+pE{65y;Ut^OZ)=b_&AqUc)yUWR zh9Hd3ny*SP;t??*M5o5{3bJvZ9NzGFgVhcOU}S^>y-BdI6xgO^?S}9N*l`wkQ}@p` zVJHrMT{IV6k0oR5KB}I7HO8`Gu#Qty=l93)&XCCW<_FcwvZMUdI>@JI49tp-xKACc zP%+$p+(lJPn5TL%<&t{}eQG5=fP44@`n_LUo}RUh-Qph4-(H96a^qlV9cxq;kExI4 z7FE5Tj`oGMB_#mskpcbi-kV-o-~iUgUt^WH-?i~0OOR^Mi=9y)3vr_wC+Xw!D0WOS z^ty@l>{)~*=#F}B^(^KFi_|Oq#dwbU43-UXU^&#)e$PF~k(T%=$FpboqW50Ut%#v{ zAw9~`Z=a{*jU*BJ&<%a)hCW=CV>S9!{kz*Bz)T$*Mgv?CO>YM|+{wV3zF1=Sge*jH z`YZ8vbkCI#26>wd;DL4txXDogyzv}Ca6FjimV2&a5nP8LU)OpE_pP3>KWQ2ajYi? z;Gyz$c7E=>X4QNX1!*WM1Bge4ChGWL>*|#__6DM8UNJNY`DzDhk!jFkD5ejBx(45G zUlH0FA$#I}OGQ@`J$oz_JiHXT+_H?{I&r&ZC4TS1&XE-1E($n(Eyl^|Q_|f#+u|gB z$_R9>lo9`TwMgtkhHzKg6F=3pk?j>~+eNb+o0R@t41t(`HS{dD6DY|ZU?`d(Zz_va zx$}EatgESVaa{Dz`n=GqasyFqRJkA!Xa8b{&Mur)#jnUQr5lx!?%qM!K9KHT>DjxT z;w{mZY8VImbN{%~!Uf<8iiu2fr_Zos>O)xu`!qzO?MO}19eTyG0D|&b4Yh^col}Ho zfllsuV(&U5;(R_qGkK<3$!6H;u=8ya#zrxyB61E&qh2F;mLruSaPxP;=Ve_;U{9YK>G;)$7?Rwyp zQ|?Wjq1zOBmS{BuYXX|1m1aA4s@d}#FyI@Fo~LwAlxLmpv3l;(J+YqEx+mVVOnQPn z3-yR3k5l(JJhOFAhUa?SGsSbY?wR4q&^@y}BXv)L=StmEhUOC)uvcq#$Dj)Yc`s;K*)r03V-A}dQIimYF(Z5&sSJGdp`*+j7LHAeD z|D^6eLjV1`zncCvy5B>8h3>DTzf||1p+8^u^Q8>W4Be0Ohw;5$-OrB3y1vljs+^p9k)GPL{Iv+!^$H=+{lsd%3f8czr!Lb0mgt zn}eAW-YJtgMw=1yq8_tej|oG}-}RWkcuPV#dZVr{D>VuUSLz9|HGslnMa)t?CQFZr zMNFw4Gft0zCJ8OjV}|K535dzoV|wZ_Nr;)M$F$XBuvQ+Lp~p01j3N&!NWNG7{zh!R zOmn}uB%{j!v5IFj?>#C<#l*EPHO14J$2b@&Rw+@M_Vj99noIHH`KVR5*viO zVN-BtwL!QoHwY^~W^c}5eY0=?HVeD?)isq_r{X;P<*vj|Bq0L(mra8Mf?St_0_^BS zoa>u(xhd4&)dBlHofiFmh<@Motpi2lm@;6s20KUnoP$7*BM~KB<8a$h<=8gpN`n`5 zZ4U}_4TTrAX%C8W^#XBL9z(H@`v}!Toc9^|+e+GT`x|aKtu1HJ>)atYV`8_%@&*R( z`qOY2tg~rsi7OnZ2a!t(OpSTMj>WZp=m|w{XBb^8ql$#bbU(2UW{Q% zH`z9Z)i%XiRFVBQoN1gEGtpzR%4uqZ?_8QA(8q4$v94cOiX%_&6iH3-LiUmcF%v}@ zG>?Bu&`I}x{nX7(ctEAC^X|=nWq3VfV%$FGU@X4&@hn&=*WQCQ&v^J6EWOH615SOJ z3ASTA@H1@MSX-)%PCB*I>@?$v0#E$fwiTr&)nc0QlY3=MH3GIRn2BQLA`D*_!%t`a zMDxc|{$rBMZBP6OM~vRF4fb63ZrJ$TduyAx_tpBj54d-3MvJPyu_A^Rl6wB`}2jW6rKa1(0$!hn=f!4VO@C?&G!LuW)NwZO|)7hp~Dv2U2I2haL8|+yHSm(v}Y%xY+nX$Xb6agRRlw-P5aIkWLDJ_=b zR^rA>aF~eg>bwop$z-|bHmRb{dBugL0_W2mWYMx*l&=^1V;7%7U5SpYV7vi;k}qVR zx1S-^ZGTfXF`gh&lu)x5%$K`#Tc9|c?U1gQ^YpKlbEhl^#|H$W z5H6Ze!F!8{!#B&G@H~r_w@ZMbqV`@JUNUTAE8A}zUji=%i?BtxgFQBEqR<~U%=<7o z7UmXUd(d|hah}+0U+%=ACs~*x`|i^)=)$Qox9yB^UQaKxllrrm8>#sL=> z<+O7!>2O3R`48^!oGZTIR%x*7VMeeyR4nlF9XnY;mGJDxo*j8B6Sp8kPAd<;nGKz3 zKk@QboQ{uA9iOu=uaJn;e1I$#9NlgodJcvrR~uB$ds`skv9~HsxgrWSjZcBhP@?7P zEK#tz-}ZERNj>LBOco3#)=-EHEct;mU{afycw*jMaKm!TS)c@tSZ@7Fq7CU{-B(HK zEEON=ky0Qk8A)+VsJRE_lP$#FwuLl8(6U}fW{6vW-5&S4XZw2!mpMr5@3AL-zW9B$ zc{eLK>9(gOt$GtkD*Jpf8wndneKSZh=bIB8Sy)FnZzPVL?oqtE227+Fqn-rJ2oEUtxi0JP5>X6)^eT36E;`n>&CQEWKQ)xX{{S>y;PC z=RyPI57Y9Y9g&}`fe9d$1?={0UWV>Gt1T_HJXr+f@kkL#-te;QDDyc~>#I<|HN$@f zTS?}da>v1sU;}m>xN)kuV?{s^?J)Pp>?VFoc9ijX#^+tt{ydx=L$gapin|JLUSVHk zlH9AYA%=(K{1hpT{RpU`RbM3*Exi)uYu07EvA83;gv_;g$ z{ZLqv3!y)Y_Gvw+ght+(&M9K$PUlYP&Fa}4B`|i)m;10tha);rM*jy;> z*EH-=R^$27QM*|j$iuFKy@BUd@t0V`tKqm$ui`1vs9(QGg`t(I;sTh|y|~s(k6y(b zOmpYlR@-`1u|==q5f%36_IP_5tj5Q)FswyHRD0Jox$dgkk*r-XeP3;;tlp2!xxu?p z!I;{?B3|?L zKg;hmjq}@zsS7r`<|T&ifq|eT6DND(<&*c}$8tmYmr$+epfWYsrtx9{4ux;3xrX99 zAgLIGxVk|hMe#`M5NuI$MZbi#P^~uPC9IihwSg~T^$#1YSF~kG%)6o;)*|rY($>Ws z@nJY8Ja5BWZZ*yTkEfs{P*9Q}DBQtUFCSC~R^VBZ7}}Y5&c+)?z_Xvk^Jm!wXuuQ4 z>$&Z};2b6#O@=p)%ZEg1&ZP38&pU{2_w&$g+X%XRrS?c0iSREVHpHI?skZWj)??hM zF^na$)Op-P9O*f?NK)l_2T&)p_FACcCoyzZK2Y!O$btbsv=sjHJ70%8VB0Bs7%<0D zPCyt=JKLa2S_ycOZN(2Q;AX@Xm$-OkVTFFuzq z=ocbf{Z@@_<~a)6>>52Bjb_(R9Uu5U21wXVbExxAn!R6${z~ybU|2*K5H{MK_Boqq zbeI--7p6e$SNQ@S>Jk--ZiOe5aaL``>1601BnRdJj|(|~Qo``Mc^3zYQT2bp*=vg$r0NXx=` z%I&b?iC1kn_2QG_N4W#eIOK7f_yCKgd>_gQ@8yR!1uehd4{F=d)&LWRR`KJ3glF; zHX83KVQC%ous{&r9EH?aHDG`d>9Pkvi#%A4Joz5;}9HDt`%Vz z;B;iY%l;p!Qdb02l?Of!pB1lHRN=*Bp56&lRY{wjNoJgAZQYsFEU0`&2-;009*+`g z-`VzMAeYpz$4Lz)d`1yFv1#6fp4ou`?6OLm#eNo_fMaUC?k2%bsXhodgAz2062uN9 zNgtxgz3K9JVjc+s8ONz`@=W5Ep6gKUdb{KH*7ra!23*UZ0ZbhF#Vib)${&WG=NsYU zQt}|;F13hOO#ec{!WuR}Pgh6*o=|%e*E6t7ZVj{QswWOnwLgQC ze4}+W#)S}09NNAz0!>36kcVZ?NY7yu07J|(0197usPIQ93wz=_d;aXbaly0y3V~v) zm;I|WR!zUot1s1BVUqi(S}VlJ{6`$z?5UnXlrR+In+CRu1^(?_9q_W6{PLF7_sxUO zJ9#n(Bp^`Q8X9{C0Z-0g`-YWbM-p_k`=I{eDo$_Ts40fH`P%!W{zg}$K9do|W1;-6 zM?Y}`-wSQ(Db9p{nxp)u>iNkq+kKy9eC|p=E)E+2`W?cmCC2}ethzc#r>pRaz_*aj z2{iV27GgvPd~qcA&(-tzg*wk{#CXPoPx@D{Fz&8bNm65b2H+-^m$6@@g`8A;2${o1 z`oo@f#*O#&8$09;9;fuOw8=u;X2If^GaR^Z6E`L_(}6M~Af)4EiKkuZ?4DP)}3?U5a0m9I-m zL>w5M<2&$31$MT~+P)P9X|5=+1eG`EFiP6ZFkcg)h?6=`HFS0_&j zdJk`lI9p&qhkaq#LEk~XB$CT_MsyM8Hb0*sqIa~{>pFe37tc3ATwO3UayH>GYlVMf zBRGlRGaR)ElkJH^A&(Uc-UhZpWK>7+7j?6pd>fwpw0<4sS z`dN`0EhoWnj`r^mH8r3<1;^6igUo0B7VN=q%;MK=xAo#4$atJmz3MuXt$Y)CWLngQ zA{fsNcJdrW&v&=89uZaBBGp}%gfzT|mz=OvZib+llWyZzBx+xE>zl{8i_H~SzTdAF z@8kXw=W3$g$Kx%_Cm=T-j;-{mx-Z6Z`!M)$(-q5&Z%(sqOAuu>p)9e~8CsS&g)wHv zbhT{>BP0!Bt|+EEE{^n8DuF;Twl4IH{w(yDyF*_QZYaVJ+m;+yl@ zmG2J#|3}4!wXdLx7b8u8t37^AuEFk|m}5E3 z_)06RZ}Y)2S2w)+)Y&uFNdfDGSf+CZ;)T1~&?MM6{Z1WetDQdP|BI~Z)D_np#J?S@N^Q*G)y4&8y9hVVh$ zWf-64eqlZCq_{gmYB||Fy<#CoFxYA6{>l!M1HLYw_$FRi=R;Oaio$twp=fy6IQ1ZM zl`o9Ki15#{whtJRa!Ieev?ci&=H*dSo&Sw!^>7N7|>-zB))1>zruntGp)crPtWzFD^?9h*=_ zu45BU!M?t|=NB(kWexf`9956wjx7~$fTr}s(@t#eqn(Odqi_r&mOQA_uuUBZ;^hMF zw~}wQeS@qr-&!P2b)S-7Dzjg)P`ECy&+3y+?m$%gt)4?(oKp%@@nnou^~JpH>q!&N zPu%4k-e>_jS0HOzWU>Bi>`lgAt*@u>ji%F5IbeAayHmpOqBJ&tExCf6aRqwzAcg$& zTY5bAtz=I?a8-_@-fDP_+_NP=#S~OigMl1s;~E88c-XpR+ri-QRR;_yh?xHZM~?C3r4g`+qkmH8 z4z|ZVy^N{83Z*v9vN0Wk&2IP*47rw#&1C?hroHZ|L<~$Gj`9;Grx|h&cvR4}IWj5bomVa$Vg#nV{(+twn=VK2( z`p{Z4x<1dH>`kRXvH`!neX1JCe$`K7Esr$e_`$%J*fKmcQ9Cmb=Z9DC_j^v@SuzUy z13q-b9dY33_|@BhD!%)Q-U$n=P;sj!V}%Q!x4=Hxt!&>Y1*=DWJof@u^|jX9;dSHA zG_Fm<=E-3ahP*iT8;EkRAdZB7ZL=stL)h>62<792L3|`DHSw6keK^IEz6;U)He_QxO>zFKRTl>Ce5=t&l2y_K|;^gC?A<+B}VBYmM@=!Bj9UZ8qVTk13P2)&Q;8;6p z;k9zN>|rDdQbbxRCW5nw_~9E+Ezj}6SnBiI(`#pbuQRkqgg733JWy#lCF&r|d@y)yT1FqpVIr#=+s(J$v z8vynUzs+t+7-88s&1Ckgv@Q9x3{SILUahhn{0XOQC84vP0*F{APu+r|cE%E7`OHd) zg)_(SvNV&q4EBIMSWOTKOAr(S1Chalqvikvy#<7wkMzNZAIG|H-o)hK*17yUH z@_Q6N;(HXRXPEYJgXf0uJYUESW-`3-Q=rU?lfdOrpN*kNnhY@=@GKdRWm5SW`I76o zZktiQSsx8Xv1iYay#^-6UtsM<-mb+O!%~j3Fs>>suQowVh9QPqvMd_|5~MK~Iziu! zinrO6e!^PH$D|SJD8m?U4s@TOe#zMO3%?V+&vS33oDOzKTK;rQ8Cif08A|~1G3ccL zVl(=7QET<|1!E;%YLJd(Ww?W6OpRxNE=-_^B&J9!W6A*Cwg7w%7BS#zf@77woe+uQ zCFj}4af!t}E`no~3{T?QM8#6k7Y(EwC-H35GdN-XT^YkJ4`%w{ZC*ODWa2fR>#upE zu8O9=rghmLfCky6O8yQ;J;8tJ&b+EVW^k=NA4u_(}(8g}Yqr)JhNs~~b)#3iC{1q}l z;6yUXvgLqhx^ZEz|N5c>yEK>_$QbRe2ji_>5!i?l_}jp zp6_bplaiFANZQfk2+ySRF{{5Z#Czj{YL`aecHa9a=6~rm>$HyeJO$S%_!kAAQ*gI} zpDB1s!4_xq{ILoSR`4nXZ&2_i1#eUEeg&UaaEF3N6+Et>^+&z@{tDt#!SZq_I8nh| z1#eRDP6an8xLd&w6g;J15V~PrZ4~UGV1j}p6wFX?hJy1HT&mz|1s_-NRR!Ns@G}KN ze%7BCrC=uodnuTt;8hA{D|nNFwNx?%3ex+alUl7LCM#26Hj#2PB1)U16 zR`6a0pH^^-g8LQxNWpIu6u;=tX`!H1!FUC+R3@+S3SO^Zse;QDyjwv%wGd`iPe%A+ zFm@34CUOG33{xInvgnXq)$-T$EMp-$JED+9~j@Se`t`cz?xZ9l%JQC>C7uEu+GXX zvChfN&aq}9-s#MlQ{=Qd3$6Kunb}t7tQ>2y)p&k>-u13bG1|i*vG^`8W0o z%E-^jEXlEE6&5%%vyc-xd`jd|na?DPq#X?Oro%q=dQgGc0*STjpXa^_s0f1@?qRh(Ba(^`O{<`E5~$}7lo z=4IyR-6V?-n#lBd1=%Dx2RW_RyJjM6y|K_$?2BtJYpTOoYwrj**Pek_%+it_SvvaPYbv+|3(TXS6nS(2v`&&+zH3QC;Ct}G{r z$G^O3&(BZK%qx(zlkUExyP(i%%_%5!&75T|Day{(R?M&>|e<@fcc7Ckhpny#ZFgdetjjeHsu3|7uv-|_&>d{ zAP23sH-rbk>LiI?y!u?heD!n}7UdMMEtC|357{~M^0HJ~wpTo-kK$8KX%X5FB!}Fo zZ=p=3r;~hg;mIo99Ef0^b5?N6)lgU}Xw_39;t6crcFEQYo#$b{B*70fOuoL_Jy>Q%8ag?hYW zP!Ku-CL3&JW?UT4bFgS!?GeVb%6r|}AiN4;PI!!Kb%XHj4Z>?0gfYuIFaMt!gkfqq z?|!T(5H8tAb(fEfEpX-M>s}eg?7RslpkczliD=>%7LbyX63A;nyvSgRSZhMYl%{4e zC_a9224?+Mt3=;CW7Z5@LX;Mj;xZ!zcIKPS0A~C#C@+(re?VZ9rsifr!6Bi|En(p; zT1G^+Y8@5brfs|Smvyjq?9@4?OV@6(-Fw7c-m_Ql_&$C6^&c>BP{I|16Ne1Fa#)gW zc(OfZ#7IZ#sL^TZW5#BT8$V$pUY?k8_0(&oU3=a18JX8-W#{D1oRxRO?EEE;Cs7cCZ9StVl9#1TCc6h8F#8JL!$f@0E`)a0@D6frW#=;J4&+bNts z!6}s)<0qBm6py|B2DJUu`VP>h3c&re%o3*!eaO6=jKU&Uk$wa5dYoRINXO_Qb7s!) z!cslwqzNhG`}OT*Pvd~Y=W0?`c1zikGE|8FEoHYXEsF>YM2#$4whV$6A77SHwjA7z z?Us-+xeWY9rDS9*M|Nv$+43^v5~!A0vu2siO-qZ4<`)%RP#;M0fBLeDhJUfb_-lBl zv9JRz<=6fy#D#z7r(YuK|4ic2{(MVVD(e6IFlN1!KO?{O|8(8!#9vsev;Mm!z_i5s zXMC~xx1dpjgiB5Je}1M*`!n)iNd51P`K=bfCq>}Tcp^jf{xJ2Om*1k-xZg1SrBPd- zDooV-b)WqBptl{AEm>M#vFw&xm#?_Zy>iv++t;l9(|_IZ=R5DZ`<{F6yZ?c|Jh<+m zhaY+LvB#fy@~OZ6?diWi^N;n7~stZ+Yd_t(DuhzqaG`ox66w@#dbr z`}QBGI(X=38A04ay_>)gR`}~V9JvCp|9{>8A6LsHy_x;HqPMtpU z<4-@I#f}Z}M?1)icYv4FLHx&b*aZWI3W`yg1u}@|@>v4qvuuGf zSsq*g^54j^SdKt`<`EMzovz|++`u)WI8XUtkD28vF7bwhn4FVckmD0E5z{K(PA@Ew zsV2A-%yhCG=4T#(bW9^~U!n?#AYyS7X^<2c;0|yBO27;V0#Bd|tbsge04bn|gyBja zJ7(;pi7^7Ulwyn%v-8OqT=nC~i7+n}x}0htIs#M0e5;(LaGIBv3A4s5@V)qYJ>Q5- zY4pI1P)>JqN^`PYa$2n47L&YYfLx4!rCq5A^J2`WtgHigBgQ07FRV)Xp;VUi#VOPra-lJ&o<*m}N5Dis}#&~F$XfNcYMC8hOrDt>kA+zqBo z9V^=Hd^v<17pCvn4Xnt|FPtyy4Y?b8%!TW5+$7uG`vhzxu)8-HC->WEFDF8{` z#eYne0AcChL|FO;7lpSA678}rqFw*yqFr%RU90Mdsuq=DMFE5Ljk|5z3K7!|Ht)T9 zd8S~K2*zVfP5eZY@qQWMk!BI;3>A^tAtJJWu!t-UuVh}@*I%#-@h;qRD(?sI*ZG7^ z5kVrNf4GPkA6D1AI;1M7(p=PZMw5*2=pYfjBUnT`0kfNl=>BFAJwCF^z-dg35HsQ4 zG;nGkBH9Co_Q0Y2#9-0Be~@TDKDxSfRb*wjq_vr_j1TtGw5_WUd*D7%`NJcch=^?B z(u}xx)3u2eqC1?C&J+|Nf`CiV_$Eb-mD>%rmOgOa^yE=rzl>%rnu-?LQKCiv)}nHZPX)p9|>9`$A?$T zI4AtsO%c~bMD`7@)a(7cxGJN5PWKaH5EjhGDm+Zh{6#a<7U35UdLqm^uP1`nD4+b6 zveefvUe>#Bo%fBbZds+v6Zx}x5XyvcXSPwU4^Nr}m_)#Eov%#0>k9A?t_*1jP1X=$ z%?=mViDAMDS+^Fqt7}soRn@98qNrhBe&A>u9LpgLZyh9BXNQT_{Vk$(am%{!YD-mU zWxYIQ55t}%xQ7&8vOKgAgCC_yLfi`X3hon{aCUlY$aU+9fT*9=eOngke8K!_q6K_u zE+U=-U)TRru&5cjQTvLXz=dg$hwx*;Z>H((4vqT!5Kl2#qhR4qHT6_(YAl6 zXgj`joi1&9TOr?^7@a*1cbj@fGi#`@?uZsvXB1?qHRPz3u$o$k=(^U`kyZ8eeHpA8d@O3aa<2iaAgw)slcpY~`XZ&Nf5I?~6P#w2wpbSGr3J!Gvt_}#v*u0wTx2BQvwWF6>o849@s zj?lN!h>tFgtn}(f=qNo6_?`{jJ+Ud$G!fB#kw(_D1@(-&E^bk6)U)bO;`KUw;Wv5K zWGQ$CXYee#H}(Ep|MsR_|MPnjAt>AYxXy3Y!~M4ju@BBDQ{q(Y7jJ6O1Z^!$w0N$0 zQRs}249PRJZ&Q?6M1j62&=&>zqWVUtb_~6SxCq2W^bISD?jDS~$QG?lQ$=IA@vP+N zo}nonoDq=6mPp$|v`lXPTu`04x=B?)r61Z4X_x|e((57|b%8c?D7YxPPw0To&M?sd zeRx~&v~|B0#m(zNs)MS`l}%JWwrf2f+8olzcGuibH18V}Zwm6Gu80p0Ya+s+lfqE% zVdFz&A5QuG{z>TDr|R38sbi4n*gsly{H{%1RCTMWh{_g4;WNTA6t6O*{()Qs{Ymeq zSjL3saFi$9Lx#*nun&Nan-~fng@}khLD#XJgJ)hnBF6xbA1CAo`bEi)w~r~`B*Y_d zCk>gG>6;@xknjzG^IS{Gd1tg2~cU{Syf|9GRWi@fDs&?f6Rt}zAriNOA_D6{=8#j~mRCx(ll zLuh+aXR7vE3>oPWAfikwM0inHJf9zrMJrQipa|_BFG5YdL=!yBL|r&BOv+H(V(PP& zl}JAW@&(!IFFH)@Cpz@+D>|4i7xkk=MLQ~a(@7w#uAk1pQw4cd`-&JH@uLvy4ygnU zz<>5F=?wYLz9pRx|6kv?)WPJtO=wZTgdE@1LRdxg6+|)1G@>{s$5?>gX~H^TvI`q? z3XJ7PVGf*}ndcl)SUdp}+x#4rZw^ux

8HnXK; z#vuJ{Ka@McneBkhgZ3aZ!(1DcMk#1lhFQEVI~&_`U~#$g9T#jO(`OKY+`?a zVP~o5=qCY_3T8=OWT%v7!TgDhTRyY&fAHI2wG0by5V=EIhG4MRXu4_(cWWJDP;_o>xAJ`y}pZgoioW>c76XO`$6 zIJr2_DQiv~!ZY)8Aj62k)~@^suw8&HBGErBCv)Bf(c(Mc3|t!~7hOo7&W4myybYLY z>>g`agsmWZLQ!5pav`>Hh*F7b2CGaI&~Hmd7kwpbb8JdF{UP8yD9+24Z3FA|;(b}~ zpxLJ#2ys}(>McfyJpXj=^cjAmGiPGqe-4FL^aFFKf1anQKP@>uvqFeRa;(c) z2Y^h}$41fM*!HRbJ{#Q&VgL@l+)RaMsc?`ASG}vJ4>QuMaJZ3Pg)u~ySCR_1RN*ic zj#A+x@95=4tMGaiZll7bDvT+dypmM7oeD>(@MS7|=CEE~2NhnU!d4ZYqQV_jc*fg$ z`c5ib`Ia6oRpIq23}q{?bt-(53a?S&vnsq?g$-txs<65(?E;3M(BvCv#NY6K!*Iha z4e$R}__y-^*8M+P{_oA-5TE}mVe*M@^ReC1`74O&oIF4_5HJ)D`~2iVS=hlRLg3i0 z3E7?r*^YTH7|yuIa+#OV0>|>%z8S}M&9+R(_>ORlAE3fE6;4)Rs6?4(yo#ToAQZAJ z|7t)!8zw7R&Ky8192FzmbTJ%pB^|`4430Q2QE(|B@0Y{zegz!y!G1~69qunG{171V zeH4z*djgK>UW6k}G#&8%yKuZ81*VWcbmU9_1-c&iS3%Yn`MwU0^n@MN%NGVj+`sP% zyXd7q(|XbC--pwGXUP}eJfmZA@y#3moml>RpLJ0YP+j>&MZo&+#p{2l^#A1|Q5mSe ze`N9tkVCgepV$r#m>i%>7Iuk)8Uc_tK|?SX;GPkPa^%cM~*&XRp}doCFk^* zBn4l%#Pl|mek0N!c=)r2+lbE}_B?z|!{UXv{6!DR9OKY|jXQeXQLC7K^ag%!&)b4%gF%DY}fZjj$n?N~w6+Qr7kS=NMs=Q2@&X@^lNbWtIOb+G@#(8%rJx)H~vKs-Q{_gp?_Pbey-?qUH|4Q zI7`871!pKYO~EM&W+>=TFiF991+5B3C}>tNKtZA4+0XRnpHc9nf^`ZWSJ0#2X9`v; z_<@2)6s%IPQo#)hKCPfp{-Y{L;gk()+PL3Ky%(=zoa=pK0LIu+J*P1ey2hzSYM&851Tp!0`74GwgpX zf1Tn-e1y&qwhvzn8iqMe<7GhlFGGezxT$cox7`Kz0^Hkh-@}E2;CQ$(aC1mJE+gMy z_-RX+4|gZr2DrE2zJm)x+TL*KaQVzPI*{gs|9@W?JADQhbyA0EFmgD*!D<}fNEly- zAl?D^Je(Q+4S+}B`q%03XrvIABmM)x&taUM2)_qVgJYWbXd$kJ+jJcB48VJo|9-&d zlz%Uv1{eDkSPcX26>xP(KLoH=`Hut6gQ2HC(yRw;jTdQIW)$FHxNO7^0sLIWdjNAG zC>h@hF$?f9xc;c*Cjo2VnD;o~!yR;bKL8Bx2tUe+0GtoE1^AZ&9`1~1BK`>A+87~r z!+!_hvv9Le{szGL*c*BV@uh&(a1pgS{8KmBnvmuWz~or$mqWZAa39%!n4Xx z*!K!Oz7o(h826BddvG3ua%lH8a0A=}mjM4>Ko1ds7{v$@aiPci!@Har{I>u zzX9+IIP%H^=o*gl5MK(Ig-PQU__G1G+Cew`m4F|?Q68!Ri>e04I+`8Avk)Fv0;G zztQ2N@KZhs+m43(BfcFV&M*;nrU4uRN1O>aj?rcH%vkJc&Cqen0DK*;0_k@H9vp}A z;C~y?Iv#YuPj~>1@`iI!#O4WlUYva*K80iXp8+OM)Z?k*=PJJw@J%?T*$eo+@}C6U zG8qo#6L!2B&xAh-FdI&s03Lwnrb52p7uN`}_{+PijR%}wq|5dUz~05E6Qqv^>{@~{;Ex47>I5zD ze*k#93$((2H(O#$@r0}3sDIV~=G>(FV{ZnYaE!kn z@Hiahjj-0gGv5soz84fp~aY1{;uzC@=h1Mt?R;5Xuz1Aegr?FfDk zV6+>2Liz0g3*lH!5#TX6Dd&J)R_Zbw3s|W9%K`6R4W1w`;nQ$TPxw6?%Q*>{ay!aH zyaRA29QEXGz&GzjKKS+wgP#J#6bcT59#%d?PI`T<{k4miMH3HTixaXtz7(FU}A#Crh8Y}EZz z0Gqz3<7@`p0JjBc2opBxc?my)+lzSfOVGtHgSYS#4%z}7zDHWX=i%75?gqT?6$-~r17eC2iU8~%u$$h%9=i%F-?d{Ra98`JMv(gnXxf=?Qx)Ki@0hdjO0l3ghKTm8{w#{OyWs%eYo;Elb8jj z8uw$falcD_xM~H$VfEqCdyyVx8|kb6f-rbwgdGo>#8jW>#p0e7E(s31gYkRui|65t z963@d*N%O%Q~ysQlWW#aC;?-oxy@q~EkrI$oyWudeQH8K1n;!}AZA&oF;(F3RV%nCbT($jv=~=gY7x zKQ|W#nB||veO^c-(=$J>{L=`Z#q$sBlwmxCYcU%ld|=?LB}L98#eCr|#{OpeK;wGy4ua+Ue=NGA-{T-*MXz?F_Pq$n*#H?tf(B z-ZN(plvOee{%F5U+Ir@!yv_9BZ_WK@G_Pt=_ZMrA>(V-tPy|x>tDr&4I8A4zw^#J;`7fx_sV$@w+F0; z4qg9ZQ%d^HMK^03bGM4cTTg7won0svql$93;)L+6wbLO9vloB!!}N{tEdI;q*J#?$ zxeMR;eA1*zTXWO;{|#{Z>``;aYT66awM8qsZM|mFbg@`_Wa|r)Sl&ykS^i>8`}*|h z^+HPV%|a~d&_N4p+CdZk%7J#Uu5o9iVR|}?&L{FHN%9Gb=vS^TUAl<4xHz0Y6fcGj z9V%=#n@CMfmAo4_Zk)L4s;k7+S6?lzz4ltMD0_sMIdi7C;f5Q;oH=tuadENK9}5>Q z6c4$EiND=6R6M^ZQIyY25i4gWi#xD;_P*jI@$!t%>W8 zYhuZgC1S;j6=Ln$wc@V3?h^OhbC0ma9E3w#~6 zkGMt~CvMc{io3P7;sxzl@wRr*(5E5-`8#Exe=Wk7WY=KMvmSF=8>1oEHq z$=|yx#!xoM`ZX8}VcxTTHRhX7VSa&esCFn^Xh-@8?Za_G`)sbzzFI4^x@U!U>Yx|@ z-Vw;(3HjrZ|4QUfM}Cx5mxuf}h70YsK01R#GC zyf_{`Bx+V zqsad}^1q7wyO1BxT>Am?e~$ch-u#z~-eNRL$U_OYqJ+m#!VZ-1F-oWl*Ti>yG;wmA zCQi-O#Obw~`0-gy{CrUJ=5LGqJ&}JH@{dRUEaabu{L7L5o^VY(-bWJ~#%W^vTumHU ztBDVu)kN*V`ut|(CkG;tzb*22MEw7B8Lo*beKaw5oF-PzMOkY#vGrL^96eZ{ z|9sUjUB7%Ilaq(rhTH5iDy>IsmzYkSx{mZFunixTnrgR?N**@MW}n_Owp+I@F(XHI zo~{zuQ*j&rsqonC)8o1`!N`$Tb$?WPa>}S-C_W|GHf*?U`sGZ}rAw!d>V9f^l5LbN z*`AF2#H`oladB5F9GPTP`Y7WX(Kf2}^j=875@I@av~~uPsVslgsMKMjrnhey9fkWy z5UUb&osRs&Y%Cyo)bzG3TSn{mC5|zjtUxl=TmJOOh?Xr!50wSPNh&+yKJd3Qzikxm zN3?7|TH%(~gD4>X^kKI2F{4JMjGEqt1w==+Zrv&*B&1y$apZj-#*&VwOmA~ug6U)f z?D^;{rh$HzpDZ6?aBcdK zD4@U)_$SNnlb8+`r>BfcmqbV$4gLfF>GnQJL}65V`t&v3Oq zSVD#9S88(q;mN%NOhIGR4e~104t1sOPrssfShFT3Q@8^d62B*G4v5BF6O_O*(c^JfX zl#w0$BYz-ZNx|XG$4*a9AC?GN^WMgN<8p+BUxCAak<{ll`EQpg5I?A2aw3|F?`>H= z>UWeKf|!~zU~q%mKINmLQ*DFK&yDtR{*``S;n1&I<>E7zvSHO2TvWo9>F$q==N6-k zhE8j2ELSFmH5kwR`Z&cH&y62HUXDi=<&5->J)S7NQY_C)5qB3Siw#Rt#QJ*|i9x$D zrhuMvysL?;Fm~XWp%n1WJMR?t-FKgO;DHCkLk~S99)0vtIaYZ7`RC<$aP#KPf@6il zl~0KKFy2^?vBHiWJH-C|`{mf+vrmqRPd@oXe1^r&+S*!i{P=P4-M1&?*x-jBeh|O> z@{3q|S`*J=tZ?vWA3fW~wX#Tc|>e9P+?;iA7+q8*l)3HZ~u;$G#!+1El zQ*4(m-MaJ+53_U+MYv7J&fU5pZFpFR-f_L-M7vHsVtcm_>K%>*%|k;kYuB+$Z13Qp z*vnLe)f(HQx7i#Li#*+1G!JbP)w)OTV6!=>SzusDcZ;P>K$A8-+6SA17|}bdLu6A^ z(@sd+yLbC$fh@mOlwV7Pf;$H_YZixjK*ug^{oCO#vb0AS&yQ;t(XDe_@7{6UfIpt! zBQ7o`F0Nyoq^}(=?#%7jQ3l0g3+&BJkq~2L=}dr)U5q!wlFgzDC;7r1b z-#>KuAvZ?s0wZ>>3xuCf{_nUhM*XinoA?I9MRw@Wp(7w&3|!Y{&6;5bB)^OZPGJZQ z^Eg~6oWXH;|0Ngs!*VgZxulEy4TGcjTOWMz!O7@%UOsW+#24Ru^Uas&dp`O4>#sit zJa+Qr$rG5De)ho!A3VEj*Dg2qJH-td!c%@&`|?#0mV?=qthe98Kh`xn<`!MMcI^yE zUeUaKDOc1r*IYAfATA_s3p?WNm`7_{w{F$G{PIf;{iz1K3k_7-v17+H%)d3>2MrqL z;2(NCo*K~l@u5S9{<3Y`w#CDT5ATO^puy!UBdr0$;fM?CHAp%7$z$GY1=k*~HEb3A z&YU^(6a45nFB$)6@cRJTV1Je|Xwaap#1D8${NH@@O$|0u4ftuBHf@r4!-k}N@x>Qf zRaKP+n}SToJQyP|{xjrx@Y!ddUHIaQFD}HeFQvS^d~8usk$v^*)fpI96SwYgajjdo z?g!h()V_WDl6O&dyFHfW!vDMQ|Ni^$HH>A}cJADH0C>3itFOM&VAJ*T`M`k#+I#Q4 zC-Fr6NZeUB)z#J7`|rOm!=%Y@81qO!WbWj*-+p@ra`!XJ^!)VGPuh3iefRz0!-rq` z^wUp|;aNX259s_aw6_7kVKwvqUi>kh((o*=BmRaXUk%4PF&yzHuLuo4!*m86gv7^i zz*#dK>RdCN5of@E2LIPydu=7^u@hvXg0yVkzFpEl`GCDiV;#WGB;|!TvQ3~3OZe7X zZ)tRF8>|mPwhy))=Gn7nkEDThi1x3s9$>4d3D($8@X%*v+(;Uw&Cboog5~%Qnn9V4c9OCLM9#y?eLDwsPdi5$Pul)B%(? zwiDJ7aodOw0_Ggmv|;--t^XUE*86o$i`%AYy(%?r{F|Eg*UvTWhf}BCLR)DE8ZPCM z_`mw5c^pSM*ljr0&@ue(~7UE6* zQ$`;?u4(pnG%e{6J|qnj{2;6u^xF*@c4%4;(9rc2P3weDE3WkAqR-o<(cK2Vx_P}AaH zm10$QsrbWAN?BgnuV24TkcA4;fqIbi7<3Slmy{#&zcD(ftY$l-ZfT4T%DZo!7;;5O{_{Tj1?Hum@x8A43~iJ&e25R@ zf`$~(u>Nb`6c_blXRC&tB8^_rKmPcmhJNNeM;Z)g(9u|(7_|7(@$=6=YX@I?KznS# zDDB?4!?ZgKlO$qOztFTvpK98;YJBmWG`y$UGh}9n(VnT3(4MK2x}qJhJ@u}z=D zI$I+YNc>UXD`C6n1ifBCTBr{R4H^h3_mt~~bQtnQS*I)=+q*$~V*U{A5!X=dFC|w> z8c4vk_&VOziiV71_{c19a)1VJdnOI4J@=wcf==xMozxLDbl9ZpRur?O0SyxWPMtb= z^*{P}_VX9i2iq_E1EW6J9vC*-3Gs*C(<<-E*8Xme{msZcfzx3K|{*4G%eW{7X(~+KgJz;L)_npn>g~I%yPi zlF^X1|i9g2oD+df1&9USuzk`~6XuQkSHyvP2QV+LNE%dXHiF1kW{ z7Bu`FG&~6!w%k*wojd{IxrD0*bk0GYB(Q%54aOLw+pBu!hWw|#Fyw#m;K5y47lvbd zCEmo1xEr)k_fy|C-G_wf9QKD|G;N8aVK7c#EE#bJ&?~v zolwX7j*mGWB8`;)*Op(Yy#g9GfrbqWK?B+X+cRlkd!|mh2YQt>tRW3I4A*X*1sZbg z+M?_fNdxiZ7=!(@F~%5x{ted!@=u+?K8)iCx+kA}Qv2KA{-$wUYB-}l2!E}ez%Hv* z{UuY|Rz67EvII1sEj$kz{-N44b<#twE42rTuhi}?8m8S*kfhz7Z`0hM;TF)a6f`UZ z4ZIqTF|I?~z;%KCA^wmBDgTh`3es}REw^Y-KmD|%$Do1mVswxu$`BfjY@(&+DL*?k^su-BpyN{TFCh4H|9(4a*=$OLFWIQR*a{&lux*w0m3^ z=zrpG$bVvDVprloJh3apO9Si0myTbf<(FU1Y9DXAS9^VBvbME6L3>%XXX+%QJwFPa zL>lf>H2ev2w0gEpTal;JKt$yj1LN&9xCfxG&S~172C~m!H0vDw?^^6*7*$YE@FsC( zdt@6}v}lo}!B@T-($cU_7^Y5OTllT(-5t+(#~5tSt5kb#kndN=Kg6GP?{GL+586{t zJtfCp9Mix)pv{{%&r8Fv(eZ0_LVT!`Hc%)1R(l2wyc*$kV^PuM6ZYC{gYMM2b?c^0 zpFUl~-VBZ7x{{I-&FOS%D_5?R?HVQ@sdqRQYK#{0o_zy#6~{sxk8(c1i|zTq<1X#& zPnd5!GEer;zGIAgRC`{7w!o_~WQ{o3Dl8c5PefSa-?L}Wf!NwpfKO-CVjL~&;o*lL z*2az{Lb+XLIQY|p3%_RmIprcUy0 z&wF2dkmJDn3|{{lyyA*0?!cZNX$Rr6Ra@4uM^VF`Bn>t#t$X+G8phTV7vjP3hOZ+H zhBN3OWO;OyDWh!a)_o6LuWebDg#K9{W3Yez+rnh+#s8YAy}xIpcIt;8-a>s}vi$*j zDYc4$2M)y(4$8W4SUyQ zJ+RN9%yaBx&_R5OKV_bDa&AD#_P{p6_F&WrA!U$b6Ql3p+?(@f$jk4tf3j^+{vk^b zVZYehY^PWe*A^^TAnSoNP_M$atWB9RMaHuZNQ2=>gE8;roRVW~%02NUqzsWZL%tZ# zF%`$$&?SF@?P(zP8Q#u0#6N>S<(`hRM0+!7B5t%fWACnPzsbqTT2fMyhTSQ$9(bR5 z3>pXxxiad+XeVqR#NB|DBhDEpR~!ej?hN||@aJ+{2;8MyQh$Q(l~@Ak1RhqfJ-_nG zE0PBChjv4>4@pC2W~P>wmL_EjYw{9*@|rv+EySDcfcl5=oFhM$Wq^GY`ONfm43h@xQSy}f|GDR$)1GnC*>_xUk--7a*r{EP-_rsaw(3W&cK9L;0r65r4J= zwoBFt<%D%Wdlv5-7yT@kb;SB$e-3&d1K!k|TqnHLOX6?HDWR_nlLo^PH_}BJVq3WN z)>}2`ERFrN??v1U$2`<6U9_=Jn>J0uzFaBa2G4yV(@~Zv>x^d~K-`V>5zLVu_yf30{P_%H{)@h6 zHRY0h5b0nWB%i21sO#7t`PK<}yK&=2?cRIumGVq}8;*2ScGz|}Zeo}+!}8b{KxaOI zqa^-Wz7uy|(Zt8MUnKs9BMpZ8wK`!L*b^vD9IEyA5UcS^lW957{PXpA}8DbHnD(ug3d1bvlGkhW~ZYUzKN4 z5O2TWdAR-{G|Dr|_rbf2#Uos)VD%q?ixr-l4owRBE7(*)i-H{#9H!u61vMR-2xEbd z6)p+xpWLSvfUF$f(ovo-I#xtYFM)#_=GR8CIL zIE?r0u=(2%9}mYdcO;xwPFePcXEbdd)_NT8=xc1W1#?ZIFV=RZ!8ZKPcM_#v!-2H6 z!`JKwVQx1UZTbxL8OEGHK;PD&FE|dpbQI(BhhZnX0b^i0()WSugz?6SnONiCIvH)D zNwD8?&714RZ-1|82Vj-M`$)XLLHdhb?V-aT!86ZLe^N(szC;}8j5Q23=069$`aN{+ zThN=ESuod3x!%ZiF0QZ8CeHPt!zVSZ3bbFsT5kvHY1kDqVSBoYIB@L5xhMNa>UH)- z)YTlza-70@^z*s~YgJtHBGZ4;sM+f z{M_lIcmQhy<3S(6sK7eG3GdbzQ8`}elQ{FZ@-98vf47;rnd*7K=RhX-*3S&x=CD5S z8S;n}Xj8#n!H>cGs`ZhzEMPQXePB%J<8`mPOTYB=@q;|R$h?Z5=KVL%3s;a=CKnhf zp9$uxy>#~H8e`QwppUH!^ik{g-I!o44f?nMr;9^hcuGonh1P05ppBk$q=2`9k$@M1 zQyu+X-;$W&nXjxJm=NYE9$3%Am;ke-`1{AN&^wfS3WdUEd{Cxg(Dr%7%cj0JrJ=K@~=YjhY_h5q*Y?u3*Z6J8%1BnOqb{>L{H4w2Hn%T{aw@jm`J z_YE(|faA%i)8U5TRbaPZhG5U56}*r!0X_m&SQJL(WdN~i8x3}=GN=EHE$!{?k^SGy zSj6rS7ZBSa6KoIiI6L)$efKv>hZB&WMjyfO$xZt)0nP;02ToR~@Bp#o#4L{gQ_=qG zzxZa(2F39&(Lu%`K0inJpdt?{d>|+K;k(LvZ~nnyreJly5D(y`3(9%+5BfWI?o937 zySEi=2!9+tkU4QRF*SKp_&^reBYvZR-?9$CkBP97_| zi!R<`Y{HZ8pX$D$FW!DEX=grp-+uo0Q{U>N#sv5^`nYH4Zpbe>|I4=GZ^xl8EaIc4 zCcL_zIc!?bRIodC>~QfNJYg3Z2k?Om=pSVB#ngxP&taE+@!@8-7QpMH*>W^a3Y`=7Lb+jezzxjeAeNV?pGkA09i@<9H?cjyB0 zfd7SsHg(=f=~nezJC8megp2(|3M^He{xJq=3*QcTlgmX1$)$s9Fh1bZ=dS1j>;l)p3$nm&V>99N z*s)`~MED0w5!df-{0qNLX&+rHv}u!mh6g<-!0NdIcK!PGjyHIqat>pF^UxdoB@}&m#LDd7>qZnj^{5sY(HYv8+qc~9I1>XkeqSN?r-kY zJD}SqUfE|K>z(xtJ1X%T@gwmou>!Hm8pTRah%c~KVsdy#|DpTz&^qVie4!X@k9ejH zVsTA|i}-`sgV+aOmw2Q6visrLyh*#^43Z{eeBL=7s;6%l)++Ip6m`vh#k|gyu=;GanHr z&^C5ibAO@wdh9fO(+;xG?`}LXXOmCjTH)o{F z`i|c0Eva1ghOIZ&CbFrX&Q0mumR7w)P%pV`sJ-!qHMNb`UXw`FZy6ZOxi?B)WNSCg zUZ*Q_>5f6YJae0CC!tb5knSH8mGphdfzGXZx8VKh+}w}VKi`qprMzwPuJqP)ccPoW z4YjGTsV<}VWE9FbY z(r{^aX{2ddXC#UxFe*=RDR&*V+f44d6%#02JmIc&zw>&D;7 zl$mjNvm&aARz`_vRkS8*i8i{s%SXj%INBYJL_u^Q8jt$+<@bs3<#Jg9-!lpP2Z@f) A[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))? +-(?P\w+\d+(\.\w+\d+)*) +-(?P\w+) +-(?P\w+) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') +SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') +SHEBANG_PYTHON = b'#!python' +SHEBANG_PYTHONW = b'#!pythonw' + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = imp.load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + parts = fullname.rsplit('.', 1) + if len(parts) > 1: + result.__package__ = parts[0] + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 1) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.should_verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + # Reinstate the local version separator + self.version = info['vn'].replace('_', '-') + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + # replace - with _ as a local version separator + version = self.version.replace('-', '_') + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, + pyver, abi, arch) + + @property + def exists(self): + path = os.path.join(self.dirname, self.filename) + return os.path.isfile(path) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + wheel_metadata = self.get_wheel_metadata(zf) + wv = wheel_metadata['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if file_version < (1, 1): + fn = 'METADATA' + else: + fn = METADATA_FILENAME + try: + metadata_filename = posixpath.join(info_dir, fn) + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata(fileobj=wf) + except KeyError: + raise ValueError('Invalid wheel, because %s is ' + 'missing' % fn) + return result + + def get_wheel_metadata(self, zf): + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + with zf.open(metadata_filename) as bf: + wf = codecs.getreader('utf-8')(bf) + message = message_from_file(wf) + return dict(message) + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'r') as zf: + result = self.get_wheel_metadata(zf) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + end = m.end() + shebang, data_after_shebang = data[:end], data[end:] + # Preserve any arguments after the interpreter + if b'pythonw' in shebang.lower(): + shebang_python = SHEBANG_PYTHONW + else: + shebang_python = SHEBANG_PYTHON + m = SHEBANG_DETAIL_RE.match(shebang) + if m: + args = b' ' + m.groups()[-1] + else: + args = b'' + shebang = shebang_python + args + data = shebang + data_after_shebang + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = SHEBANG_PYTHON + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, base): + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + p = to_posix(os.path.relpath(record_path, base)) + writer.writerow((p, '', '')) + + def write_records(self, info, libdir, archive_paths): + records = [] + distinfo, info_dir = info + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + self.write_record(records, p, libdir) + ap = to_posix(os.path.join(info_dir, 'RECORD')) + archive_paths.append((ap, p)) + + def build_zip(self, pathname, archive_paths): + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + + def build(self, paths, tags=None, wheel_version=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), + 'Generator: distlib %s' % __version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + self.write_records((distinfo, info_dir), libdir, archive_paths) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + self.build_zip(pathname, archive_paths) + return pathname + + def install(self, paths, maker, **kwargs): + """ + Install a wheel to the specified paths. If kwarg ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + If kwarg ``lib_only`` is True, only the purelib/platlib files are + installed, and the headers, scripts, data and dist-info metadata are + not written. + + The return value is a :class:`InstalledDistribution` instance unless + ``options.lib_only`` is True, in which case the return value is ``None``. + """ + + dry_run = maker.dry_run + warner = kwargs.get('warner') + lib_only = kwargs.get('lib_only', False) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + info_pfx = posixpath.join(info_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + # make a new instance rather than a copy of maker's, + # as we mutate it + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker.source_dir = workdir + maker.target_dir = None + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + if lib_only and u_arcname.startswith((info_pfx, data_pfx)): + logger.debug('lib_only: skipping %s', u_arcname) + continue + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + if lib_only: + logger.debug('lib_only: returning None') + dist = None + else: + # Generate scripts + + # Try to get pydist.json so we can see if there are + # any commands to generate. If this fails (e.g. because + # of a legacy wheel), log a warning but don't give up. + commands = None + file_version = self.info['Wheel-Version'] + if file_version == '1.0': + # Use legacy info + ep = posixpath.join(info_dir, 'entry_points.txt') + try: + with zf.open(ep) as bwf: + epdata = read_exports(bwf) + commands = {} + for key in ('console', 'gui'): + k = '%s_scripts' % key + if k in epdata: + commands['wrap_%s' % key] = d = {} + for v in epdata[k].values(): + s = '%s:%s' % (v.prefix, v.suffix) + if v.flags: + s += ' %s' % v.flags + d[v.name] = s + except Exception: + logger.warning('Unable to read legacy script ' + 'metadata, so cannot generate ' + 'scripts') + else: + try: + with zf.open(metadata_name) as bwf: + wf = wrapper(bwf) + commands = json.load(wf).get('extensions') + if commands: + commands = commands.get('python.commands') + except Exception: + logger.warning('Unable to read JSON metadata, so ' + 'cannot generate scripts') + if commands: + console_scripts = commands.get('wrap_console', {}) + gui_scripts = commands.get('wrap_gui', {}) + if console_scripts or gui_scripts: + script_dir = paths.get('scripts', '') + if not os.path.isdir(script_dir): + raise ValueError('Valid script path not ' + 'specified') + maker.target_dir = script_dir + for k, v in console_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script) + fileop.set_executable_mode(filenames) + + if gui_scripts: + options = {'gui': True } + for k, v in gui_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script, options) + fileop.set_executable_mode(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + if p: + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + global cache + if cache is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('dylib-cache'), + sys.version[:3]) + cache = Cache(base) + return cache + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache = self._get_dylib_cache() + prefix = cache.prefix_to_dir(pathname) + cache_base = os.path.join(cache.base, prefix) + if not os.path.isdir(cache_base): + os.makedirs(cache_base) + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def is_compatible(self): + """ + Determine if a wheel is compatible with the running system. + """ + return is_compatible(self) + + def is_mountable(self): + """ + Determine if a wheel is asserted as mountable by its metadata. + """ + return True # for now - metadata details TBD + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not self.is_compatible(): + msg = 'Wheel %s not compatible with this Python.' % pathname + raise DistlibException(msg) + if not self.is_mountable(): + msg = 'Wheel %s is marked as not mountable.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + def verify(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # TODO version verification + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + def update(self, modifier, dest_dir=None, **kwargs): + """ + Update the contents of a wheel in a generic way. The modifier should + be a callable which expects a dictionary argument: its keys are + archive-entry paths, and its values are absolute filesystem paths + where the contents the corresponding archive entries can be found. The + modifier is free to change the contents of the files pointed to, add + new entries and remove entries, before returning. This method will + extract the entire contents of the wheel to a temporary location, call + the modifier, and then use the passed (and possibly updated) + dictionary to write a new wheel. If ``dest_dir`` is specified, the new + wheel is written there -- otherwise, the original wheel is overwritten. + + The modifier should return True if it updated the wheel, else False. + This method returns the same value the modifier returns. + """ + + def get_version(path_map, info_dir): + version = path = None + key = '%s/%s' % (info_dir, METADATA_FILENAME) + if key not in path_map: + key = '%s/PKG-INFO' % info_dir + if key in path_map: + path = path_map[key] + version = Metadata(path=path).version + return version, path + + def update_version(version, path): + updated = None + try: + v = NormalizedVersion(version) + i = version.find('-') + if i < 0: + updated = '%s+1' % version + else: + parts = [int(s) for s in version[i + 1:].split('.')] + parts[-1] += 1 + updated = '%s+%s' % (version[:i], + '.'.join(str(i) for i in parts)) + except UnsupportedVersionError: + logger.debug('Cannot update non-compliant (PEP-440) ' + 'version %r', version) + if updated: + md = Metadata(path=path) + md.version = updated + legacy = not path.endswith(METADATA_FILENAME) + md.write(path=path, legacy=legacy) + logger.debug('Version updated from %r to %r', version, + updated) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + record_name = posixpath.join(info_dir, 'RECORD') + with tempdir() as workdir: + with ZipFile(pathname, 'r') as zf: + path_map = {} + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if u_arcname == record_name: + continue + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + zf.extract(zinfo, workdir) + path = os.path.join(workdir, convert_path(u_arcname)) + path_map[u_arcname] = path + + # Remember the version. + original_version, _ = get_version(path_map, info_dir) + # Files extracted. Call the modifier. + modified = modifier(path_map, **kwargs) + if modified: + # Something changed - need to build a new wheel. + current_version, path = get_version(path_map, info_dir) + if current_version and (current_version == original_version): + # Add or update local version to signify changes. + update_version(current_version, path) + # Decide where the new wheel goes. + if dest_dir is None: + fd, newpath = tempfile.mkstemp(suffix='.whl', + prefix='wheel-update-', + dir=workdir) + os.close(fd) + else: + if not os.path.isdir(dest_dir): + raise DistlibException('Not a directory: %r' % dest_dir) + newpath = os.path.join(dest_dir, self.filename) + archive_paths = list(path_map.items()) + distinfo = os.path.join(workdir, info_dir) + info = distinfo, info_dir + self.write_records(info, workdir, archive_paths) + self.build_zip(newpath, archive_paths) + if dest_dir is None: + shutil.copyfile(newpath, pathname) + return modified + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix, _, _ in imp.get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + arches = [ARCH] + if sys.platform == 'darwin': + m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) + if m: + name, major, minor, arch = m.groups() + minor = int(minor) + matches = [arch] + if arch in ('i386', 'ppc'): + matches.append('fat') + if arch in ('i386', 'ppc', 'x86_64'): + matches.append('fat3') + if arch in ('ppc64', 'x86_64'): + matches.append('fat64') + if arch in ('i386', 'x86_64'): + matches.append('intel') + if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): + matches.append('universal') + while minor >= 0: + for match in matches: + s = '%s_%s_%s_%s' % (name, major, minor, match) + if s != ARCH: # already there + arches.append(s) + minor -= 1 + + # Most specific - our Python version, ABI and arch + for abi in abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/__init__.py new file mode 100644 index 00000000..19a4b7d6 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/__init__.py @@ -0,0 +1,23 @@ +""" +HTML parsing library based on the WHATWG "HTML5" +specification. The parser is designed to be compatible with existing +HTML found in the wild and implements well-defined error recovery that +is largely compatible with modern desktop web browsers. + +Example usage: + +import html5lib +f = open("my_document.html") +tree = html5lib.parse(f) +""" + +from __future__ import absolute_import, division, unicode_literals + +from .html5parser import HTMLParser, parse, parseFragment +from .treebuilders import getTreeBuilder +from .treewalkers import getTreeWalker +from .serializer import serialize + +__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", + "getTreeWalker", "serialize"] +__version__ = "0.999" diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/constants.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/constants.py new file mode 100644 index 00000000..e7089846 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/constants.py @@ -0,0 +1,3104 @@ +from __future__ import absolute_import, division, unicode_literals + +import string +import gettext +_ = gettext.gettext + +EOF = None + +E = { + "null-character": + _("Null character in input stream, replaced with U+FFFD."), + "invalid-codepoint": + _("Invalid codepoint in stream."), + "incorrectly-placed-solidus": + _("Solidus (/) incorrectly placed in tag."), + "incorrect-cr-newline-entity": + _("Incorrect CR newline entity, replaced with LF."), + "illegal-windows-1252-entity": + _("Entity used with illegal number (windows-1252 reference)."), + "cant-convert-numeric-entity": + _("Numeric entity couldn't be converted to character " + "(codepoint U+%(charAsInt)08x)."), + "illegal-codepoint-for-numeric-entity": + _("Numeric entity represents an illegal codepoint: " + "U+%(charAsInt)08x."), + "numeric-entity-without-semicolon": + _("Numeric entity didn't end with ';'."), + "expected-numeric-entity-but-got-eof": + _("Numeric entity expected. Got end of file instead."), + "expected-numeric-entity": + _("Numeric entity expected but none found."), + "named-entity-without-semicolon": + _("Named entity didn't end with ';'."), + "expected-named-entity": + _("Named entity expected. Got none."), + "attributes-in-end-tag": + _("End tag contains unexpected attributes."), + 'self-closing-flag-on-end-tag': + _("End tag contains unexpected self-closing flag."), + "expected-tag-name-but-got-right-bracket": + _("Expected tag name. Got '>' instead."), + "expected-tag-name-but-got-question-mark": + _("Expected tag name. Got '?' instead. (HTML doesn't " + "support processing instructions.)"), + "expected-tag-name": + _("Expected tag name. Got something else instead"), + "expected-closing-tag-but-got-right-bracket": + _("Expected closing tag. Got '>' instead. Ignoring ''."), + "expected-closing-tag-but-got-eof": + _("Expected closing tag. Unexpected end of file."), + "expected-closing-tag-but-got-char": + _("Expected closing tag. Unexpected character '%(data)s' found."), + "eof-in-tag-name": + _("Unexpected end of file in the tag name."), + "expected-attribute-name-but-got-eof": + _("Unexpected end of file. Expected attribute name instead."), + "eof-in-attribute-name": + _("Unexpected end of file in attribute name."), + "invalid-character-in-attribute-name": + _("Invalid character in attribute name"), + "duplicate-attribute": + _("Dropped duplicate attribute on tag."), + "expected-end-of-tag-name-but-got-eof": + _("Unexpected end of file. Expected = or end of tag."), + "expected-attribute-value-but-got-eof": + _("Unexpected end of file. Expected attribute value."), + "expected-attribute-value-but-got-right-bracket": + _("Expected attribute value. Got '>' instead."), + 'equals-in-unquoted-attribute-value': + _("Unexpected = in unquoted attribute"), + 'unexpected-character-in-unquoted-attribute-value': + _("Unexpected character in unquoted attribute"), + "invalid-character-after-attribute-name": + _("Unexpected character after attribute name."), + "unexpected-character-after-attribute-value": + _("Unexpected character after attribute value."), + "eof-in-attribute-value-double-quote": + _("Unexpected end of file in attribute value (\")."), + "eof-in-attribute-value-single-quote": + _("Unexpected end of file in attribute value (')."), + "eof-in-attribute-value-no-quotes": + _("Unexpected end of file in attribute value."), + "unexpected-EOF-after-solidus-in-tag": + _("Unexpected end of file in tag. Expected >"), + "unexpected-character-after-solidus-in-tag": + _("Unexpected character after / in tag. Expected >"), + "expected-dashes-or-doctype": + _("Expected '--' or 'DOCTYPE'. Not found."), + "unexpected-bang-after-double-dash-in-comment": + _("Unexpected ! after -- in comment"), + "unexpected-space-after-double-dash-in-comment": + _("Unexpected space after -- in comment"), + "incorrect-comment": + _("Incorrect comment."), + "eof-in-comment": + _("Unexpected end of file in comment."), + "eof-in-comment-end-dash": + _("Unexpected end of file in comment (-)"), + "unexpected-dash-after-double-dash-in-comment": + _("Unexpected '-' after '--' found in comment."), + "eof-in-comment-double-dash": + _("Unexpected end of file in comment (--)."), + "eof-in-comment-end-space-state": + _("Unexpected end of file in comment."), + "eof-in-comment-end-bang-state": + _("Unexpected end of file in comment."), + "unexpected-char-in-comment": + _("Unexpected character in comment found."), + "need-space-after-doctype": + _("No space after literal string 'DOCTYPE'."), + "expected-doctype-name-but-got-right-bracket": + _("Unexpected > character. Expected DOCTYPE name."), + "expected-doctype-name-but-got-eof": + _("Unexpected end of file. Expected DOCTYPE name."), + "eof-in-doctype-name": + _("Unexpected end of file in DOCTYPE name."), + "eof-in-doctype": + _("Unexpected end of file in DOCTYPE."), + "expected-space-or-right-bracket-in-doctype": + _("Expected space or '>'. Got '%(data)s'"), + "unexpected-end-of-doctype": + _("Unexpected end of DOCTYPE."), + "unexpected-char-in-doctype": + _("Unexpected character in DOCTYPE."), + "eof-in-innerhtml": + _("XXX innerHTML EOF"), + "unexpected-doctype": + _("Unexpected DOCTYPE. Ignored."), + "non-html-root": + _("html needs to be the first start tag."), + "expected-doctype-but-got-eof": + _("Unexpected End of file. Expected DOCTYPE."), + "unknown-doctype": + _("Erroneous DOCTYPE."), + "expected-doctype-but-got-chars": + _("Unexpected non-space characters. Expected DOCTYPE."), + "expected-doctype-but-got-start-tag": + _("Unexpected start tag (%(name)s). Expected DOCTYPE."), + "expected-doctype-but-got-end-tag": + _("Unexpected end tag (%(name)s). Expected DOCTYPE."), + "end-tag-after-implied-root": + _("Unexpected end tag (%(name)s) after the (implied) root element."), + "expected-named-closing-tag-but-got-eof": + _("Unexpected end of file. Expected end tag (%(name)s)."), + "two-heads-are-not-better-than-one": + _("Unexpected start tag head in existing head. Ignored."), + "unexpected-end-tag": + _("Unexpected end tag (%(name)s). Ignored."), + "unexpected-start-tag-out-of-my-head": + _("Unexpected start tag (%(name)s) that can be in head. Moved."), + "unexpected-start-tag": + _("Unexpected start tag (%(name)s)."), + "missing-end-tag": + _("Missing end tag (%(name)s)."), + "missing-end-tags": + _("Missing end tags (%(name)s)."), + "unexpected-start-tag-implies-end-tag": + _("Unexpected start tag (%(startName)s) " + "implies end tag (%(endName)s)."), + "unexpected-start-tag-treated-as": + _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."), + "deprecated-tag": + _("Unexpected start tag %(name)s. Don't use it!"), + "unexpected-start-tag-ignored": + _("Unexpected start tag %(name)s. Ignored."), + "expected-one-end-tag-but-got-another": + _("Unexpected end tag (%(gotName)s). " + "Missing end tag (%(expectedName)s)."), + "end-tag-too-early": + _("End tag (%(name)s) seen too early. Expected other end tag."), + "end-tag-too-early-named": + _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), + "end-tag-too-early-ignored": + _("End tag (%(name)s) seen too early. Ignored."), + "adoption-agency-1.1": + _("End tag (%(name)s) violates step 1, " + "paragraph 1 of the adoption agency algorithm."), + "adoption-agency-1.2": + _("End tag (%(name)s) violates step 1, " + "paragraph 2 of the adoption agency algorithm."), + "adoption-agency-1.3": + _("End tag (%(name)s) violates step 1, " + "paragraph 3 of the adoption agency algorithm."), + "adoption-agency-4.4": + _("End tag (%(name)s) violates step 4, " + "paragraph 4 of the adoption agency algorithm."), + "unexpected-end-tag-treated-as": + _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."), + "no-end-tag": + _("This element (%(name)s) has no end tag."), + "unexpected-implied-end-tag-in-table": + _("Unexpected implied end tag (%(name)s) in the table phase."), + "unexpected-implied-end-tag-in-table-body": + _("Unexpected implied end tag (%(name)s) in the table body phase."), + "unexpected-char-implies-table-voodoo": + _("Unexpected non-space characters in " + "table context caused voodoo mode."), + "unexpected-hidden-input-in-table": + _("Unexpected input with type hidden in table context."), + "unexpected-form-in-table": + _("Unexpected form in table context."), + "unexpected-start-tag-implies-table-voodoo": + _("Unexpected start tag (%(name)s) in " + "table context caused voodoo mode."), + "unexpected-end-tag-implies-table-voodoo": + _("Unexpected end tag (%(name)s) in " + "table context caused voodoo mode."), + "unexpected-cell-in-table-body": + _("Unexpected table cell start tag (%(name)s) " + "in the table body phase."), + "unexpected-cell-end-tag": + _("Got table cell end tag (%(name)s) " + "while required end tags are missing."), + "unexpected-end-tag-in-table-body": + _("Unexpected end tag (%(name)s) in the table body phase. Ignored."), + "unexpected-implied-end-tag-in-table-row": + _("Unexpected implied end tag (%(name)s) in the table row phase."), + "unexpected-end-tag-in-table-row": + _("Unexpected end tag (%(name)s) in the table row phase. Ignored."), + "unexpected-select-in-select": + _("Unexpected select start tag in the select phase " + "treated as select end tag."), + "unexpected-input-in-select": + _("Unexpected input start tag in the select phase."), + "unexpected-start-tag-in-select": + _("Unexpected start tag token (%(name)s in the select phase. " + "Ignored."), + "unexpected-end-tag-in-select": + _("Unexpected end tag (%(name)s) in the select phase. Ignored."), + "unexpected-table-element-start-tag-in-select-in-table": + _("Unexpected table element start tag (%(name)s) in the select in table phase."), + "unexpected-table-element-end-tag-in-select-in-table": + _("Unexpected table element end tag (%(name)s) in the select in table phase."), + "unexpected-char-after-body": + _("Unexpected non-space characters in the after body phase."), + "unexpected-start-tag-after-body": + _("Unexpected start tag token (%(name)s)" + " in the after body phase."), + "unexpected-end-tag-after-body": + _("Unexpected end tag token (%(name)s)" + " in the after body phase."), + "unexpected-char-in-frameset": + _("Unexpected characters in the frameset phase. Characters ignored."), + "unexpected-start-tag-in-frameset": + _("Unexpected start tag token (%(name)s)" + " in the frameset phase. Ignored."), + "unexpected-frameset-in-frameset-innerhtml": + _("Unexpected end tag token (frameset) " + "in the frameset phase (innerHTML)."), + "unexpected-end-tag-in-frameset": + _("Unexpected end tag token (%(name)s)" + " in the frameset phase. Ignored."), + "unexpected-char-after-frameset": + _("Unexpected non-space characters in the " + "after frameset phase. Ignored."), + "unexpected-start-tag-after-frameset": + _("Unexpected start tag (%(name)s)" + " in the after frameset phase. Ignored."), + "unexpected-end-tag-after-frameset": + _("Unexpected end tag (%(name)s)" + " in the after frameset phase. Ignored."), + "unexpected-end-tag-after-body-innerhtml": + _("Unexpected end tag after body(innerHtml)"), + "expected-eof-but-got-char": + _("Unexpected non-space characters. Expected end of file."), + "expected-eof-but-got-start-tag": + _("Unexpected start tag (%(name)s)" + ". Expected end of file."), + "expected-eof-but-got-end-tag": + _("Unexpected end tag (%(name)s)" + ". Expected end of file."), + "eof-in-table": + _("Unexpected end of file. Expected table content."), + "eof-in-select": + _("Unexpected end of file. Expected select content."), + "eof-in-frameset": + _("Unexpected end of file. Expected frameset content."), + "eof-in-script-in-script": + _("Unexpected end of file. Expected script content."), + "eof-in-foreign-lands": + _("Unexpected end of file. Expected foreign content"), + "non-void-element-with-trailing-solidus": + _("Trailing solidus not allowed on element %(name)s"), + "unexpected-html-element-in-foreign-content": + _("Element %(name)s not allowed in a non-html context"), + "unexpected-end-tag-before-html": + _("Unexpected end tag (%(name)s) before html."), + "XXX-undefined-error": + _("Undefined error (this sucks and should be fixed)"), +} + +namespaces = { + "html": "http://www.w3.org/1999/xhtml", + "mathml": "http://www.w3.org/1998/Math/MathML", + "svg": "http://www.w3.org/2000/svg", + "xlink": "http://www.w3.org/1999/xlink", + "xml": "http://www.w3.org/XML/1998/namespace", + "xmlns": "http://www.w3.org/2000/xmlns/" +} + +scopingElements = frozenset(( + (namespaces["html"], "applet"), + (namespaces["html"], "caption"), + (namespaces["html"], "html"), + (namespaces["html"], "marquee"), + (namespaces["html"], "object"), + (namespaces["html"], "table"), + (namespaces["html"], "td"), + (namespaces["html"], "th"), + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext"), + (namespaces["mathml"], "annotation-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title"), +)) + +formattingElements = frozenset(( + (namespaces["html"], "a"), + (namespaces["html"], "b"), + (namespaces["html"], "big"), + (namespaces["html"], "code"), + (namespaces["html"], "em"), + (namespaces["html"], "font"), + (namespaces["html"], "i"), + (namespaces["html"], "nobr"), + (namespaces["html"], "s"), + (namespaces["html"], "small"), + (namespaces["html"], "strike"), + (namespaces["html"], "strong"), + (namespaces["html"], "tt"), + (namespaces["html"], "u") +)) + +specialElements = frozenset(( + (namespaces["html"], "address"), + (namespaces["html"], "applet"), + (namespaces["html"], "area"), + (namespaces["html"], "article"), + (namespaces["html"], "aside"), + (namespaces["html"], "base"), + (namespaces["html"], "basefont"), + (namespaces["html"], "bgsound"), + (namespaces["html"], "blockquote"), + (namespaces["html"], "body"), + (namespaces["html"], "br"), + (namespaces["html"], "button"), + (namespaces["html"], "caption"), + (namespaces["html"], "center"), + (namespaces["html"], "col"), + (namespaces["html"], "colgroup"), + (namespaces["html"], "command"), + (namespaces["html"], "dd"), + (namespaces["html"], "details"), + (namespaces["html"], "dir"), + (namespaces["html"], "div"), + (namespaces["html"], "dl"), + (namespaces["html"], "dt"), + (namespaces["html"], "embed"), + (namespaces["html"], "fieldset"), + (namespaces["html"], "figure"), + (namespaces["html"], "footer"), + (namespaces["html"], "form"), + (namespaces["html"], "frame"), + (namespaces["html"], "frameset"), + (namespaces["html"], "h1"), + (namespaces["html"], "h2"), + (namespaces["html"], "h3"), + (namespaces["html"], "h4"), + (namespaces["html"], "h5"), + (namespaces["html"], "h6"), + (namespaces["html"], "head"), + (namespaces["html"], "header"), + (namespaces["html"], "hr"), + (namespaces["html"], "html"), + (namespaces["html"], "iframe"), + # Note that image is commented out in the spec as "this isn't an + # element that can end up on the stack, so it doesn't matter," + (namespaces["html"], "image"), + (namespaces["html"], "img"), + (namespaces["html"], "input"), + (namespaces["html"], "isindex"), + (namespaces["html"], "li"), + (namespaces["html"], "link"), + (namespaces["html"], "listing"), + (namespaces["html"], "marquee"), + (namespaces["html"], "menu"), + (namespaces["html"], "meta"), + (namespaces["html"], "nav"), + (namespaces["html"], "noembed"), + (namespaces["html"], "noframes"), + (namespaces["html"], "noscript"), + (namespaces["html"], "object"), + (namespaces["html"], "ol"), + (namespaces["html"], "p"), + (namespaces["html"], "param"), + (namespaces["html"], "plaintext"), + (namespaces["html"], "pre"), + (namespaces["html"], "script"), + (namespaces["html"], "section"), + (namespaces["html"], "select"), + (namespaces["html"], "style"), + (namespaces["html"], "table"), + (namespaces["html"], "tbody"), + (namespaces["html"], "td"), + (namespaces["html"], "textarea"), + (namespaces["html"], "tfoot"), + (namespaces["html"], "th"), + (namespaces["html"], "thead"), + (namespaces["html"], "title"), + (namespaces["html"], "tr"), + (namespaces["html"], "ul"), + (namespaces["html"], "wbr"), + (namespaces["html"], "xmp"), + (namespaces["svg"], "foreignObject") +)) + +htmlIntegrationPointElements = frozenset(( + (namespaces["mathml"], "annotaion-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title") +)) + +mathmlTextIntegrationPointElements = frozenset(( + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext") +)) + +adjustForeignAttributes = { + "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), + "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), + "xlink:href": ("xlink", "href", namespaces["xlink"]), + "xlink:role": ("xlink", "role", namespaces["xlink"]), + "xlink:show": ("xlink", "show", namespaces["xlink"]), + "xlink:title": ("xlink", "title", namespaces["xlink"]), + "xlink:type": ("xlink", "type", namespaces["xlink"]), + "xml:base": ("xml", "base", namespaces["xml"]), + "xml:lang": ("xml", "lang", namespaces["xml"]), + "xml:space": ("xml", "space", namespaces["xml"]), + "xmlns": (None, "xmlns", namespaces["xmlns"]), + "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) +} + +unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in + adjustForeignAttributes.items()]) + +spaceCharacters = frozenset(( + "\t", + "\n", + "\u000C", + " ", + "\r" +)) + +tableInsertModeElements = frozenset(( + "table", + "tbody", + "tfoot", + "thead", + "tr" +)) + +asciiLowercase = frozenset(string.ascii_lowercase) +asciiUppercase = frozenset(string.ascii_uppercase) +asciiLetters = frozenset(string.ascii_letters) +digits = frozenset(string.digits) +hexDigits = frozenset(string.hexdigits) + +asciiUpper2Lower = dict([(ord(c), ord(c.lower())) + for c in string.ascii_uppercase]) + +# Heading elements need to be ordered +headingElements = ( + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +) + +voidElements = frozenset(( + "base", + "command", + "event-source", + "link", + "meta", + "hr", + "br", + "img", + "embed", + "param", + "area", + "col", + "input", + "source", + "track" +)) + +cdataElements = frozenset(('title', 'textarea')) + +rcdataElements = frozenset(( + 'style', + 'script', + 'xmp', + 'iframe', + 'noembed', + 'noframes', + 'noscript' +)) + +booleanAttributes = { + "": frozenset(("irrelevant",)), + "style": frozenset(("scoped",)), + "img": frozenset(("ismap",)), + "audio": frozenset(("autoplay", "controls")), + "video": frozenset(("autoplay", "controls")), + "script": frozenset(("defer", "async")), + "details": frozenset(("open",)), + "datagrid": frozenset(("multiple", "disabled")), + "command": frozenset(("hidden", "disabled", "checked", "default")), + "hr": frozenset(("noshade")), + "menu": frozenset(("autosubmit",)), + "fieldset": frozenset(("disabled", "readonly")), + "option": frozenset(("disabled", "readonly", "selected")), + "optgroup": frozenset(("disabled", "readonly")), + "button": frozenset(("disabled", "autofocus")), + "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")), + "select": frozenset(("disabled", "readonly", "autofocus", "multiple")), + "output": frozenset(("disabled", "readonly")), +} + +# entitiesWindows1252 has to be _ordered_ and needs to have an index. It +# therefore can't be a frozenset. +entitiesWindows1252 = ( + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS +) + +xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;')) + +entities = { + "AElig": "\xc6", + "AElig;": "\xc6", + "AMP": "&", + "AMP;": "&", + "Aacute": "\xc1", + "Aacute;": "\xc1", + "Abreve;": "\u0102", + "Acirc": "\xc2", + "Acirc;": "\xc2", + "Acy;": "\u0410", + "Afr;": "\U0001d504", + "Agrave": "\xc0", + "Agrave;": "\xc0", + "Alpha;": "\u0391", + "Amacr;": "\u0100", + "And;": "\u2a53", + "Aogon;": "\u0104", + "Aopf;": "\U0001d538", + "ApplyFunction;": "\u2061", + "Aring": "\xc5", + "Aring;": "\xc5", + "Ascr;": "\U0001d49c", + "Assign;": "\u2254", + "Atilde": "\xc3", + "Atilde;": "\xc3", + "Auml": "\xc4", + "Auml;": "\xc4", + "Backslash;": "\u2216", + "Barv;": "\u2ae7", + "Barwed;": "\u2306", + "Bcy;": "\u0411", + "Because;": "\u2235", + "Bernoullis;": "\u212c", + "Beta;": "\u0392", + "Bfr;": "\U0001d505", + "Bopf;": "\U0001d539", + "Breve;": "\u02d8", + "Bscr;": "\u212c", + "Bumpeq;": "\u224e", + "CHcy;": "\u0427", + "COPY": "\xa9", + "COPY;": "\xa9", + "Cacute;": "\u0106", + "Cap;": "\u22d2", + "CapitalDifferentialD;": "\u2145", + "Cayleys;": "\u212d", + "Ccaron;": "\u010c", + "Ccedil": "\xc7", + "Ccedil;": "\xc7", + "Ccirc;": "\u0108", + "Cconint;": "\u2230", + "Cdot;": "\u010a", + "Cedilla;": "\xb8", + "CenterDot;": "\xb7", + "Cfr;": "\u212d", + "Chi;": "\u03a7", + "CircleDot;": "\u2299", + "CircleMinus;": "\u2296", + "CirclePlus;": "\u2295", + "CircleTimes;": "\u2297", + "ClockwiseContourIntegral;": "\u2232", + "CloseCurlyDoubleQuote;": "\u201d", + "CloseCurlyQuote;": "\u2019", + "Colon;": "\u2237", + "Colone;": "\u2a74", + "Congruent;": "\u2261", + "Conint;": "\u222f", + "ContourIntegral;": "\u222e", + "Copf;": "\u2102", + "Coproduct;": "\u2210", + "CounterClockwiseContourIntegral;": "\u2233", + "Cross;": "\u2a2f", + "Cscr;": "\U0001d49e", + "Cup;": "\u22d3", + "CupCap;": "\u224d", + "DD;": "\u2145", + "DDotrahd;": "\u2911", + "DJcy;": "\u0402", + "DScy;": "\u0405", + "DZcy;": "\u040f", + "Dagger;": "\u2021", + "Darr;": "\u21a1", + "Dashv;": "\u2ae4", + "Dcaron;": "\u010e", + "Dcy;": "\u0414", + "Del;": "\u2207", + "Delta;": "\u0394", + "Dfr;": "\U0001d507", + "DiacriticalAcute;": "\xb4", + "DiacriticalDot;": "\u02d9", + "DiacriticalDoubleAcute;": "\u02dd", + "DiacriticalGrave;": "`", + "DiacriticalTilde;": "\u02dc", + "Diamond;": "\u22c4", + "DifferentialD;": "\u2146", + "Dopf;": "\U0001d53b", + "Dot;": "\xa8", + "DotDot;": "\u20dc", + "DotEqual;": "\u2250", + "DoubleContourIntegral;": "\u222f", + "DoubleDot;": "\xa8", + "DoubleDownArrow;": "\u21d3", + "DoubleLeftArrow;": "\u21d0", + "DoubleLeftRightArrow;": "\u21d4", + "DoubleLeftTee;": "\u2ae4", + "DoubleLongLeftArrow;": "\u27f8", + "DoubleLongLeftRightArrow;": "\u27fa", + "DoubleLongRightArrow;": "\u27f9", + "DoubleRightArrow;": "\u21d2", + "DoubleRightTee;": "\u22a8", + "DoubleUpArrow;": "\u21d1", + "DoubleUpDownArrow;": "\u21d5", + "DoubleVerticalBar;": "\u2225", + "DownArrow;": "\u2193", + "DownArrowBar;": "\u2913", + "DownArrowUpArrow;": "\u21f5", + "DownBreve;": "\u0311", + "DownLeftRightVector;": "\u2950", + "DownLeftTeeVector;": "\u295e", + "DownLeftVector;": "\u21bd", + "DownLeftVectorBar;": "\u2956", + "DownRightTeeVector;": "\u295f", + "DownRightVector;": "\u21c1", + "DownRightVectorBar;": "\u2957", + "DownTee;": "\u22a4", + "DownTeeArrow;": "\u21a7", + "Downarrow;": "\u21d3", + "Dscr;": "\U0001d49f", + "Dstrok;": "\u0110", + "ENG;": "\u014a", + "ETH": "\xd0", + "ETH;": "\xd0", + "Eacute": "\xc9", + "Eacute;": "\xc9", + "Ecaron;": "\u011a", + "Ecirc": "\xca", + "Ecirc;": "\xca", + "Ecy;": "\u042d", + "Edot;": "\u0116", + "Efr;": "\U0001d508", + "Egrave": "\xc8", + "Egrave;": "\xc8", + "Element;": "\u2208", + "Emacr;": "\u0112", + "EmptySmallSquare;": "\u25fb", + "EmptyVerySmallSquare;": "\u25ab", + "Eogon;": "\u0118", + "Eopf;": "\U0001d53c", + "Epsilon;": "\u0395", + "Equal;": "\u2a75", + "EqualTilde;": "\u2242", + "Equilibrium;": "\u21cc", + "Escr;": "\u2130", + "Esim;": "\u2a73", + "Eta;": "\u0397", + "Euml": "\xcb", + "Euml;": "\xcb", + "Exists;": "\u2203", + "ExponentialE;": "\u2147", + "Fcy;": "\u0424", + "Ffr;": "\U0001d509", + "FilledSmallSquare;": "\u25fc", + "FilledVerySmallSquare;": "\u25aa", + "Fopf;": "\U0001d53d", + "ForAll;": "\u2200", + "Fouriertrf;": "\u2131", + "Fscr;": "\u2131", + "GJcy;": "\u0403", + "GT": ">", + "GT;": ">", + "Gamma;": "\u0393", + "Gammad;": "\u03dc", + "Gbreve;": "\u011e", + "Gcedil;": "\u0122", + "Gcirc;": "\u011c", + "Gcy;": "\u0413", + "Gdot;": "\u0120", + "Gfr;": "\U0001d50a", + "Gg;": "\u22d9", + "Gopf;": "\U0001d53e", + "GreaterEqual;": "\u2265", + "GreaterEqualLess;": "\u22db", + "GreaterFullEqual;": "\u2267", + "GreaterGreater;": "\u2aa2", + "GreaterLess;": "\u2277", + "GreaterSlantEqual;": "\u2a7e", + "GreaterTilde;": "\u2273", + "Gscr;": "\U0001d4a2", + "Gt;": "\u226b", + "HARDcy;": "\u042a", + "Hacek;": "\u02c7", + "Hat;": "^", + "Hcirc;": "\u0124", + "Hfr;": "\u210c", + "HilbertSpace;": "\u210b", + "Hopf;": "\u210d", + "HorizontalLine;": "\u2500", + "Hscr;": "\u210b", + "Hstrok;": "\u0126", + "HumpDownHump;": "\u224e", + "HumpEqual;": "\u224f", + "IEcy;": "\u0415", + "IJlig;": "\u0132", + "IOcy;": "\u0401", + "Iacute": "\xcd", + "Iacute;": "\xcd", + "Icirc": "\xce", + "Icirc;": "\xce", + "Icy;": "\u0418", + "Idot;": "\u0130", + "Ifr;": "\u2111", + "Igrave": "\xcc", + "Igrave;": "\xcc", + "Im;": "\u2111", + "Imacr;": "\u012a", + "ImaginaryI;": "\u2148", + "Implies;": "\u21d2", + "Int;": "\u222c", + "Integral;": "\u222b", + "Intersection;": "\u22c2", + "InvisibleComma;": "\u2063", + "InvisibleTimes;": "\u2062", + "Iogon;": "\u012e", + "Iopf;": "\U0001d540", + "Iota;": "\u0399", + "Iscr;": "\u2110", + "Itilde;": "\u0128", + "Iukcy;": "\u0406", + "Iuml": "\xcf", + "Iuml;": "\xcf", + "Jcirc;": "\u0134", + "Jcy;": "\u0419", + "Jfr;": "\U0001d50d", + "Jopf;": "\U0001d541", + "Jscr;": "\U0001d4a5", + "Jsercy;": "\u0408", + "Jukcy;": "\u0404", + "KHcy;": "\u0425", + "KJcy;": "\u040c", + "Kappa;": "\u039a", + "Kcedil;": "\u0136", + "Kcy;": "\u041a", + "Kfr;": "\U0001d50e", + "Kopf;": "\U0001d542", + "Kscr;": "\U0001d4a6", + "LJcy;": "\u0409", + "LT": "<", + "LT;": "<", + "Lacute;": "\u0139", + "Lambda;": "\u039b", + "Lang;": "\u27ea", + "Laplacetrf;": "\u2112", + "Larr;": "\u219e", + "Lcaron;": "\u013d", + "Lcedil;": "\u013b", + "Lcy;": "\u041b", + "LeftAngleBracket;": "\u27e8", + "LeftArrow;": "\u2190", + "LeftArrowBar;": "\u21e4", + "LeftArrowRightArrow;": "\u21c6", + "LeftCeiling;": "\u2308", + "LeftDoubleBracket;": "\u27e6", + "LeftDownTeeVector;": "\u2961", + "LeftDownVector;": "\u21c3", + "LeftDownVectorBar;": "\u2959", + "LeftFloor;": "\u230a", + "LeftRightArrow;": "\u2194", + "LeftRightVector;": "\u294e", + "LeftTee;": "\u22a3", + "LeftTeeArrow;": "\u21a4", + "LeftTeeVector;": "\u295a", + "LeftTriangle;": "\u22b2", + "LeftTriangleBar;": "\u29cf", + "LeftTriangleEqual;": "\u22b4", + "LeftUpDownVector;": "\u2951", + "LeftUpTeeVector;": "\u2960", + "LeftUpVector;": "\u21bf", + "LeftUpVectorBar;": "\u2958", + "LeftVector;": "\u21bc", + "LeftVectorBar;": "\u2952", + "Leftarrow;": "\u21d0", + "Leftrightarrow;": "\u21d4", + "LessEqualGreater;": "\u22da", + "LessFullEqual;": "\u2266", + "LessGreater;": "\u2276", + "LessLess;": "\u2aa1", + "LessSlantEqual;": "\u2a7d", + "LessTilde;": "\u2272", + "Lfr;": "\U0001d50f", + "Ll;": "\u22d8", + "Lleftarrow;": "\u21da", + "Lmidot;": "\u013f", + "LongLeftArrow;": "\u27f5", + "LongLeftRightArrow;": "\u27f7", + "LongRightArrow;": "\u27f6", + "Longleftarrow;": "\u27f8", + "Longleftrightarrow;": "\u27fa", + "Longrightarrow;": "\u27f9", + "Lopf;": "\U0001d543", + "LowerLeftArrow;": "\u2199", + "LowerRightArrow;": "\u2198", + "Lscr;": "\u2112", + "Lsh;": "\u21b0", + "Lstrok;": "\u0141", + "Lt;": "\u226a", + "Map;": "\u2905", + "Mcy;": "\u041c", + "MediumSpace;": "\u205f", + "Mellintrf;": "\u2133", + "Mfr;": "\U0001d510", + "MinusPlus;": "\u2213", + "Mopf;": "\U0001d544", + "Mscr;": "\u2133", + "Mu;": "\u039c", + "NJcy;": "\u040a", + "Nacute;": "\u0143", + "Ncaron;": "\u0147", + "Ncedil;": "\u0145", + "Ncy;": "\u041d", + "NegativeMediumSpace;": "\u200b", + "NegativeThickSpace;": "\u200b", + "NegativeThinSpace;": "\u200b", + "NegativeVeryThinSpace;": "\u200b", + "NestedGreaterGreater;": "\u226b", + "NestedLessLess;": "\u226a", + "NewLine;": "\n", + "Nfr;": "\U0001d511", + "NoBreak;": "\u2060", + "NonBreakingSpace;": "\xa0", + "Nopf;": "\u2115", + "Not;": "\u2aec", + "NotCongruent;": "\u2262", + "NotCupCap;": "\u226d", + "NotDoubleVerticalBar;": "\u2226", + "NotElement;": "\u2209", + "NotEqual;": "\u2260", + "NotEqualTilde;": "\u2242\u0338", + "NotExists;": "\u2204", + "NotGreater;": "\u226f", + "NotGreaterEqual;": "\u2271", + "NotGreaterFullEqual;": "\u2267\u0338", + "NotGreaterGreater;": "\u226b\u0338", + "NotGreaterLess;": "\u2279", + "NotGreaterSlantEqual;": "\u2a7e\u0338", + "NotGreaterTilde;": "\u2275", + "NotHumpDownHump;": "\u224e\u0338", + "NotHumpEqual;": "\u224f\u0338", + "NotLeftTriangle;": "\u22ea", + "NotLeftTriangleBar;": "\u29cf\u0338", + "NotLeftTriangleEqual;": "\u22ec", + "NotLess;": "\u226e", + "NotLessEqual;": "\u2270", + "NotLessGreater;": "\u2278", + "NotLessLess;": "\u226a\u0338", + "NotLessSlantEqual;": "\u2a7d\u0338", + "NotLessTilde;": "\u2274", + "NotNestedGreaterGreater;": "\u2aa2\u0338", + "NotNestedLessLess;": "\u2aa1\u0338", + "NotPrecedes;": "\u2280", + "NotPrecedesEqual;": "\u2aaf\u0338", + "NotPrecedesSlantEqual;": "\u22e0", + "NotReverseElement;": "\u220c", + "NotRightTriangle;": "\u22eb", + "NotRightTriangleBar;": "\u29d0\u0338", + "NotRightTriangleEqual;": "\u22ed", + "NotSquareSubset;": "\u228f\u0338", + "NotSquareSubsetEqual;": "\u22e2", + "NotSquareSuperset;": "\u2290\u0338", + "NotSquareSupersetEqual;": "\u22e3", + "NotSubset;": "\u2282\u20d2", + "NotSubsetEqual;": "\u2288", + "NotSucceeds;": "\u2281", + "NotSucceedsEqual;": "\u2ab0\u0338", + "NotSucceedsSlantEqual;": "\u22e1", + "NotSucceedsTilde;": "\u227f\u0338", + "NotSuperset;": "\u2283\u20d2", + "NotSupersetEqual;": "\u2289", + "NotTilde;": "\u2241", + "NotTildeEqual;": "\u2244", + "NotTildeFullEqual;": "\u2247", + "NotTildeTilde;": "\u2249", + "NotVerticalBar;": "\u2224", + "Nscr;": "\U0001d4a9", + "Ntilde": "\xd1", + "Ntilde;": "\xd1", + "Nu;": "\u039d", + "OElig;": "\u0152", + "Oacute": "\xd3", + "Oacute;": "\xd3", + "Ocirc": "\xd4", + "Ocirc;": "\xd4", + "Ocy;": "\u041e", + "Odblac;": "\u0150", + "Ofr;": "\U0001d512", + "Ograve": "\xd2", + "Ograve;": "\xd2", + "Omacr;": "\u014c", + "Omega;": "\u03a9", + "Omicron;": "\u039f", + "Oopf;": "\U0001d546", + "OpenCurlyDoubleQuote;": "\u201c", + "OpenCurlyQuote;": "\u2018", + "Or;": "\u2a54", + "Oscr;": "\U0001d4aa", + "Oslash": "\xd8", + "Oslash;": "\xd8", + "Otilde": "\xd5", + "Otilde;": "\xd5", + "Otimes;": "\u2a37", + "Ouml": "\xd6", + "Ouml;": "\xd6", + "OverBar;": "\u203e", + "OverBrace;": "\u23de", + "OverBracket;": "\u23b4", + "OverParenthesis;": "\u23dc", + "PartialD;": "\u2202", + "Pcy;": "\u041f", + "Pfr;": "\U0001d513", + "Phi;": "\u03a6", + "Pi;": "\u03a0", + "PlusMinus;": "\xb1", + "Poincareplane;": "\u210c", + "Popf;": "\u2119", + "Pr;": "\u2abb", + "Precedes;": "\u227a", + "PrecedesEqual;": "\u2aaf", + "PrecedesSlantEqual;": "\u227c", + "PrecedesTilde;": "\u227e", + "Prime;": "\u2033", + "Product;": "\u220f", + "Proportion;": "\u2237", + "Proportional;": "\u221d", + "Pscr;": "\U0001d4ab", + "Psi;": "\u03a8", + "QUOT": "\"", + "QUOT;": "\"", + "Qfr;": "\U0001d514", + "Qopf;": "\u211a", + "Qscr;": "\U0001d4ac", + "RBarr;": "\u2910", + "REG": "\xae", + "REG;": "\xae", + "Racute;": "\u0154", + "Rang;": "\u27eb", + "Rarr;": "\u21a0", + "Rarrtl;": "\u2916", + "Rcaron;": "\u0158", + "Rcedil;": "\u0156", + "Rcy;": "\u0420", + "Re;": "\u211c", + "ReverseElement;": "\u220b", + "ReverseEquilibrium;": "\u21cb", + "ReverseUpEquilibrium;": "\u296f", + "Rfr;": "\u211c", + "Rho;": "\u03a1", + "RightAngleBracket;": "\u27e9", + "RightArrow;": "\u2192", + "RightArrowBar;": "\u21e5", + "RightArrowLeftArrow;": "\u21c4", + "RightCeiling;": "\u2309", + "RightDoubleBracket;": "\u27e7", + "RightDownTeeVector;": "\u295d", + "RightDownVector;": "\u21c2", + "RightDownVectorBar;": "\u2955", + "RightFloor;": "\u230b", + "RightTee;": "\u22a2", + "RightTeeArrow;": "\u21a6", + "RightTeeVector;": "\u295b", + "RightTriangle;": "\u22b3", + "RightTriangleBar;": "\u29d0", + "RightTriangleEqual;": "\u22b5", + "RightUpDownVector;": "\u294f", + "RightUpTeeVector;": "\u295c", + "RightUpVector;": "\u21be", + "RightUpVectorBar;": "\u2954", + "RightVector;": "\u21c0", + "RightVectorBar;": "\u2953", + "Rightarrow;": "\u21d2", + "Ropf;": "\u211d", + "RoundImplies;": "\u2970", + "Rrightarrow;": "\u21db", + "Rscr;": "\u211b", + "Rsh;": "\u21b1", + "RuleDelayed;": "\u29f4", + "SHCHcy;": "\u0429", + "SHcy;": "\u0428", + "SOFTcy;": "\u042c", + "Sacute;": "\u015a", + "Sc;": "\u2abc", + "Scaron;": "\u0160", + "Scedil;": "\u015e", + "Scirc;": "\u015c", + "Scy;": "\u0421", + "Sfr;": "\U0001d516", + "ShortDownArrow;": "\u2193", + "ShortLeftArrow;": "\u2190", + "ShortRightArrow;": "\u2192", + "ShortUpArrow;": "\u2191", + "Sigma;": "\u03a3", + "SmallCircle;": "\u2218", + "Sopf;": "\U0001d54a", + "Sqrt;": "\u221a", + "Square;": "\u25a1", + "SquareIntersection;": "\u2293", + "SquareSubset;": "\u228f", + "SquareSubsetEqual;": "\u2291", + "SquareSuperset;": "\u2290", + "SquareSupersetEqual;": "\u2292", + "SquareUnion;": "\u2294", + "Sscr;": "\U0001d4ae", + "Star;": "\u22c6", + "Sub;": "\u22d0", + "Subset;": "\u22d0", + "SubsetEqual;": "\u2286", + "Succeeds;": "\u227b", + "SucceedsEqual;": "\u2ab0", + "SucceedsSlantEqual;": "\u227d", + "SucceedsTilde;": "\u227f", + "SuchThat;": "\u220b", + "Sum;": "\u2211", + "Sup;": "\u22d1", + "Superset;": "\u2283", + "SupersetEqual;": "\u2287", + "Supset;": "\u22d1", + "THORN": "\xde", + "THORN;": "\xde", + "TRADE;": "\u2122", + "TSHcy;": "\u040b", + "TScy;": "\u0426", + "Tab;": "\t", + "Tau;": "\u03a4", + "Tcaron;": "\u0164", + "Tcedil;": "\u0162", + "Tcy;": "\u0422", + "Tfr;": "\U0001d517", + "Therefore;": "\u2234", + "Theta;": "\u0398", + "ThickSpace;": "\u205f\u200a", + "ThinSpace;": "\u2009", + "Tilde;": "\u223c", + "TildeEqual;": "\u2243", + "TildeFullEqual;": "\u2245", + "TildeTilde;": "\u2248", + "Topf;": "\U0001d54b", + "TripleDot;": "\u20db", + "Tscr;": "\U0001d4af", + "Tstrok;": "\u0166", + "Uacute": "\xda", + "Uacute;": "\xda", + "Uarr;": "\u219f", + "Uarrocir;": "\u2949", + "Ubrcy;": "\u040e", + "Ubreve;": "\u016c", + "Ucirc": "\xdb", + "Ucirc;": "\xdb", + "Ucy;": "\u0423", + "Udblac;": "\u0170", + "Ufr;": "\U0001d518", + "Ugrave": "\xd9", + "Ugrave;": "\xd9", + "Umacr;": "\u016a", + "UnderBar;": "_", + "UnderBrace;": "\u23df", + "UnderBracket;": "\u23b5", + "UnderParenthesis;": "\u23dd", + "Union;": "\u22c3", + "UnionPlus;": "\u228e", + "Uogon;": "\u0172", + "Uopf;": "\U0001d54c", + "UpArrow;": "\u2191", + "UpArrowBar;": "\u2912", + "UpArrowDownArrow;": "\u21c5", + "UpDownArrow;": "\u2195", + "UpEquilibrium;": "\u296e", + "UpTee;": "\u22a5", + "UpTeeArrow;": "\u21a5", + "Uparrow;": "\u21d1", + "Updownarrow;": "\u21d5", + "UpperLeftArrow;": "\u2196", + "UpperRightArrow;": "\u2197", + "Upsi;": "\u03d2", + "Upsilon;": "\u03a5", + "Uring;": "\u016e", + "Uscr;": "\U0001d4b0", + "Utilde;": "\u0168", + "Uuml": "\xdc", + "Uuml;": "\xdc", + "VDash;": "\u22ab", + "Vbar;": "\u2aeb", + "Vcy;": "\u0412", + "Vdash;": "\u22a9", + "Vdashl;": "\u2ae6", + "Vee;": "\u22c1", + "Verbar;": "\u2016", + "Vert;": "\u2016", + "VerticalBar;": "\u2223", + "VerticalLine;": "|", + "VerticalSeparator;": "\u2758", + "VerticalTilde;": "\u2240", + "VeryThinSpace;": "\u200a", + "Vfr;": "\U0001d519", + "Vopf;": "\U0001d54d", + "Vscr;": "\U0001d4b1", + "Vvdash;": "\u22aa", + "Wcirc;": "\u0174", + "Wedge;": "\u22c0", + "Wfr;": "\U0001d51a", + "Wopf;": "\U0001d54e", + "Wscr;": "\U0001d4b2", + "Xfr;": "\U0001d51b", + "Xi;": "\u039e", + "Xopf;": "\U0001d54f", + "Xscr;": "\U0001d4b3", + "YAcy;": "\u042f", + "YIcy;": "\u0407", + "YUcy;": "\u042e", + "Yacute": "\xdd", + "Yacute;": "\xdd", + "Ycirc;": "\u0176", + "Ycy;": "\u042b", + "Yfr;": "\U0001d51c", + "Yopf;": "\U0001d550", + "Yscr;": "\U0001d4b4", + "Yuml;": "\u0178", + "ZHcy;": "\u0416", + "Zacute;": "\u0179", + "Zcaron;": "\u017d", + "Zcy;": "\u0417", + "Zdot;": "\u017b", + "ZeroWidthSpace;": "\u200b", + "Zeta;": "\u0396", + "Zfr;": "\u2128", + "Zopf;": "\u2124", + "Zscr;": "\U0001d4b5", + "aacute": "\xe1", + "aacute;": "\xe1", + "abreve;": "\u0103", + "ac;": "\u223e", + "acE;": "\u223e\u0333", + "acd;": "\u223f", + "acirc": "\xe2", + "acirc;": "\xe2", + "acute": "\xb4", + "acute;": "\xb4", + "acy;": "\u0430", + "aelig": "\xe6", + "aelig;": "\xe6", + "af;": "\u2061", + "afr;": "\U0001d51e", + "agrave": "\xe0", + "agrave;": "\xe0", + "alefsym;": "\u2135", + "aleph;": "\u2135", + "alpha;": "\u03b1", + "amacr;": "\u0101", + "amalg;": "\u2a3f", + "amp": "&", + "amp;": "&", + "and;": "\u2227", + "andand;": "\u2a55", + "andd;": "\u2a5c", + "andslope;": "\u2a58", + "andv;": "\u2a5a", + "ang;": "\u2220", + "ange;": "\u29a4", + "angle;": "\u2220", + "angmsd;": "\u2221", + "angmsdaa;": "\u29a8", + "angmsdab;": "\u29a9", + "angmsdac;": "\u29aa", + "angmsdad;": "\u29ab", + "angmsdae;": "\u29ac", + "angmsdaf;": "\u29ad", + "angmsdag;": "\u29ae", + "angmsdah;": "\u29af", + "angrt;": "\u221f", + "angrtvb;": "\u22be", + "angrtvbd;": "\u299d", + "angsph;": "\u2222", + "angst;": "\xc5", + "angzarr;": "\u237c", + "aogon;": "\u0105", + "aopf;": "\U0001d552", + "ap;": "\u2248", + "apE;": "\u2a70", + "apacir;": "\u2a6f", + "ape;": "\u224a", + "apid;": "\u224b", + "apos;": "'", + "approx;": "\u2248", + "approxeq;": "\u224a", + "aring": "\xe5", + "aring;": "\xe5", + "ascr;": "\U0001d4b6", + "ast;": "*", + "asymp;": "\u2248", + "asympeq;": "\u224d", + "atilde": "\xe3", + "atilde;": "\xe3", + "auml": "\xe4", + "auml;": "\xe4", + "awconint;": "\u2233", + "awint;": "\u2a11", + "bNot;": "\u2aed", + "backcong;": "\u224c", + "backepsilon;": "\u03f6", + "backprime;": "\u2035", + "backsim;": "\u223d", + "backsimeq;": "\u22cd", + "barvee;": "\u22bd", + "barwed;": "\u2305", + "barwedge;": "\u2305", + "bbrk;": "\u23b5", + "bbrktbrk;": "\u23b6", + "bcong;": "\u224c", + "bcy;": "\u0431", + "bdquo;": "\u201e", + "becaus;": "\u2235", + "because;": "\u2235", + "bemptyv;": "\u29b0", + "bepsi;": "\u03f6", + "bernou;": "\u212c", + "beta;": "\u03b2", + "beth;": "\u2136", + "between;": "\u226c", + "bfr;": "\U0001d51f", + "bigcap;": "\u22c2", + "bigcirc;": "\u25ef", + "bigcup;": "\u22c3", + "bigodot;": "\u2a00", + "bigoplus;": "\u2a01", + "bigotimes;": "\u2a02", + "bigsqcup;": "\u2a06", + "bigstar;": "\u2605", + "bigtriangledown;": "\u25bd", + "bigtriangleup;": "\u25b3", + "biguplus;": "\u2a04", + "bigvee;": "\u22c1", + "bigwedge;": "\u22c0", + "bkarow;": "\u290d", + "blacklozenge;": "\u29eb", + "blacksquare;": "\u25aa", + "blacktriangle;": "\u25b4", + "blacktriangledown;": "\u25be", + "blacktriangleleft;": "\u25c2", + "blacktriangleright;": "\u25b8", + "blank;": "\u2423", + "blk12;": "\u2592", + "blk14;": "\u2591", + "blk34;": "\u2593", + "block;": "\u2588", + "bne;": "=\u20e5", + "bnequiv;": "\u2261\u20e5", + "bnot;": "\u2310", + "bopf;": "\U0001d553", + "bot;": "\u22a5", + "bottom;": "\u22a5", + "bowtie;": "\u22c8", + "boxDL;": "\u2557", + "boxDR;": "\u2554", + "boxDl;": "\u2556", + "boxDr;": "\u2553", + "boxH;": "\u2550", + "boxHD;": "\u2566", + "boxHU;": "\u2569", + "boxHd;": "\u2564", + "boxHu;": "\u2567", + "boxUL;": "\u255d", + "boxUR;": "\u255a", + "boxUl;": "\u255c", + "boxUr;": "\u2559", + "boxV;": "\u2551", + "boxVH;": "\u256c", + "boxVL;": "\u2563", + "boxVR;": "\u2560", + "boxVh;": "\u256b", + "boxVl;": "\u2562", + "boxVr;": "\u255f", + "boxbox;": "\u29c9", + "boxdL;": "\u2555", + "boxdR;": "\u2552", + "boxdl;": "\u2510", + "boxdr;": "\u250c", + "boxh;": "\u2500", + "boxhD;": "\u2565", + "boxhU;": "\u2568", + "boxhd;": "\u252c", + "boxhu;": "\u2534", + "boxminus;": "\u229f", + "boxplus;": "\u229e", + "boxtimes;": "\u22a0", + "boxuL;": "\u255b", + "boxuR;": "\u2558", + "boxul;": "\u2518", + "boxur;": "\u2514", + "boxv;": "\u2502", + "boxvH;": "\u256a", + "boxvL;": "\u2561", + "boxvR;": "\u255e", + "boxvh;": "\u253c", + "boxvl;": "\u2524", + "boxvr;": "\u251c", + "bprime;": "\u2035", + "breve;": "\u02d8", + "brvbar": "\xa6", + "brvbar;": "\xa6", + "bscr;": "\U0001d4b7", + "bsemi;": "\u204f", + "bsim;": "\u223d", + "bsime;": "\u22cd", + "bsol;": "\\", + "bsolb;": "\u29c5", + "bsolhsub;": "\u27c8", + "bull;": "\u2022", + "bullet;": "\u2022", + "bump;": "\u224e", + "bumpE;": "\u2aae", + "bumpe;": "\u224f", + "bumpeq;": "\u224f", + "cacute;": "\u0107", + "cap;": "\u2229", + "capand;": "\u2a44", + "capbrcup;": "\u2a49", + "capcap;": "\u2a4b", + "capcup;": "\u2a47", + "capdot;": "\u2a40", + "caps;": "\u2229\ufe00", + "caret;": "\u2041", + "caron;": "\u02c7", + "ccaps;": "\u2a4d", + "ccaron;": "\u010d", + "ccedil": "\xe7", + "ccedil;": "\xe7", + "ccirc;": "\u0109", + "ccups;": "\u2a4c", + "ccupssm;": "\u2a50", + "cdot;": "\u010b", + "cedil": "\xb8", + "cedil;": "\xb8", + "cemptyv;": "\u29b2", + "cent": "\xa2", + "cent;": "\xa2", + "centerdot;": "\xb7", + "cfr;": "\U0001d520", + "chcy;": "\u0447", + "check;": "\u2713", + "checkmark;": "\u2713", + "chi;": "\u03c7", + "cir;": "\u25cb", + "cirE;": "\u29c3", + "circ;": "\u02c6", + "circeq;": "\u2257", + "circlearrowleft;": "\u21ba", + "circlearrowright;": "\u21bb", + "circledR;": "\xae", + "circledS;": "\u24c8", + "circledast;": "\u229b", + "circledcirc;": "\u229a", + "circleddash;": "\u229d", + "cire;": "\u2257", + "cirfnint;": "\u2a10", + "cirmid;": "\u2aef", + "cirscir;": "\u29c2", + "clubs;": "\u2663", + "clubsuit;": "\u2663", + "colon;": ":", + "colone;": "\u2254", + "coloneq;": "\u2254", + "comma;": ",", + "commat;": "@", + "comp;": "\u2201", + "compfn;": "\u2218", + "complement;": "\u2201", + "complexes;": "\u2102", + "cong;": "\u2245", + "congdot;": "\u2a6d", + "conint;": "\u222e", + "copf;": "\U0001d554", + "coprod;": "\u2210", + "copy": "\xa9", + "copy;": "\xa9", + "copysr;": "\u2117", + "crarr;": "\u21b5", + "cross;": "\u2717", + "cscr;": "\U0001d4b8", + "csub;": "\u2acf", + "csube;": "\u2ad1", + "csup;": "\u2ad0", + "csupe;": "\u2ad2", + "ctdot;": "\u22ef", + "cudarrl;": "\u2938", + "cudarrr;": "\u2935", + "cuepr;": "\u22de", + "cuesc;": "\u22df", + "cularr;": "\u21b6", + "cularrp;": "\u293d", + "cup;": "\u222a", + "cupbrcap;": "\u2a48", + "cupcap;": "\u2a46", + "cupcup;": "\u2a4a", + "cupdot;": "\u228d", + "cupor;": "\u2a45", + "cups;": "\u222a\ufe00", + "curarr;": "\u21b7", + "curarrm;": "\u293c", + "curlyeqprec;": "\u22de", + "curlyeqsucc;": "\u22df", + "curlyvee;": "\u22ce", + "curlywedge;": "\u22cf", + "curren": "\xa4", + "curren;": "\xa4", + "curvearrowleft;": "\u21b6", + "curvearrowright;": "\u21b7", + "cuvee;": "\u22ce", + "cuwed;": "\u22cf", + "cwconint;": "\u2232", + "cwint;": "\u2231", + "cylcty;": "\u232d", + "dArr;": "\u21d3", + "dHar;": "\u2965", + "dagger;": "\u2020", + "daleth;": "\u2138", + "darr;": "\u2193", + "dash;": "\u2010", + "dashv;": "\u22a3", + "dbkarow;": "\u290f", + "dblac;": "\u02dd", + "dcaron;": "\u010f", + "dcy;": "\u0434", + "dd;": "\u2146", + "ddagger;": "\u2021", + "ddarr;": "\u21ca", + "ddotseq;": "\u2a77", + "deg": "\xb0", + "deg;": "\xb0", + "delta;": "\u03b4", + "demptyv;": "\u29b1", + "dfisht;": "\u297f", + "dfr;": "\U0001d521", + "dharl;": "\u21c3", + "dharr;": "\u21c2", + "diam;": "\u22c4", + "diamond;": "\u22c4", + "diamondsuit;": "\u2666", + "diams;": "\u2666", + "die;": "\xa8", + "digamma;": "\u03dd", + "disin;": "\u22f2", + "div;": "\xf7", + "divide": "\xf7", + "divide;": "\xf7", + "divideontimes;": "\u22c7", + "divonx;": "\u22c7", + "djcy;": "\u0452", + "dlcorn;": "\u231e", + "dlcrop;": "\u230d", + "dollar;": "$", + "dopf;": "\U0001d555", + "dot;": "\u02d9", + "doteq;": "\u2250", + "doteqdot;": "\u2251", + "dotminus;": "\u2238", + "dotplus;": "\u2214", + "dotsquare;": "\u22a1", + "doublebarwedge;": "\u2306", + "downarrow;": "\u2193", + "downdownarrows;": "\u21ca", + "downharpoonleft;": "\u21c3", + "downharpoonright;": "\u21c2", + "drbkarow;": "\u2910", + "drcorn;": "\u231f", + "drcrop;": "\u230c", + "dscr;": "\U0001d4b9", + "dscy;": "\u0455", + "dsol;": "\u29f6", + "dstrok;": "\u0111", + "dtdot;": "\u22f1", + "dtri;": "\u25bf", + "dtrif;": "\u25be", + "duarr;": "\u21f5", + "duhar;": "\u296f", + "dwangle;": "\u29a6", + "dzcy;": "\u045f", + "dzigrarr;": "\u27ff", + "eDDot;": "\u2a77", + "eDot;": "\u2251", + "eacute": "\xe9", + "eacute;": "\xe9", + "easter;": "\u2a6e", + "ecaron;": "\u011b", + "ecir;": "\u2256", + "ecirc": "\xea", + "ecirc;": "\xea", + "ecolon;": "\u2255", + "ecy;": "\u044d", + "edot;": "\u0117", + "ee;": "\u2147", + "efDot;": "\u2252", + "efr;": "\U0001d522", + "eg;": "\u2a9a", + "egrave": "\xe8", + "egrave;": "\xe8", + "egs;": "\u2a96", + "egsdot;": "\u2a98", + "el;": "\u2a99", + "elinters;": "\u23e7", + "ell;": "\u2113", + "els;": "\u2a95", + "elsdot;": "\u2a97", + "emacr;": "\u0113", + "empty;": "\u2205", + "emptyset;": "\u2205", + "emptyv;": "\u2205", + "emsp13;": "\u2004", + "emsp14;": "\u2005", + "emsp;": "\u2003", + "eng;": "\u014b", + "ensp;": "\u2002", + "eogon;": "\u0119", + "eopf;": "\U0001d556", + "epar;": "\u22d5", + "eparsl;": "\u29e3", + "eplus;": "\u2a71", + "epsi;": "\u03b5", + "epsilon;": "\u03b5", + "epsiv;": "\u03f5", + "eqcirc;": "\u2256", + "eqcolon;": "\u2255", + "eqsim;": "\u2242", + "eqslantgtr;": "\u2a96", + "eqslantless;": "\u2a95", + "equals;": "=", + "equest;": "\u225f", + "equiv;": "\u2261", + "equivDD;": "\u2a78", + "eqvparsl;": "\u29e5", + "erDot;": "\u2253", + "erarr;": "\u2971", + "escr;": "\u212f", + "esdot;": "\u2250", + "esim;": "\u2242", + "eta;": "\u03b7", + "eth": "\xf0", + "eth;": "\xf0", + "euml": "\xeb", + "euml;": "\xeb", + "euro;": "\u20ac", + "excl;": "!", + "exist;": "\u2203", + "expectation;": "\u2130", + "exponentiale;": "\u2147", + "fallingdotseq;": "\u2252", + "fcy;": "\u0444", + "female;": "\u2640", + "ffilig;": "\ufb03", + "fflig;": "\ufb00", + "ffllig;": "\ufb04", + "ffr;": "\U0001d523", + "filig;": "\ufb01", + "fjlig;": "fj", + "flat;": "\u266d", + "fllig;": "\ufb02", + "fltns;": "\u25b1", + "fnof;": "\u0192", + "fopf;": "\U0001d557", + "forall;": "\u2200", + "fork;": "\u22d4", + "forkv;": "\u2ad9", + "fpartint;": "\u2a0d", + "frac12": "\xbd", + "frac12;": "\xbd", + "frac13;": "\u2153", + "frac14": "\xbc", + "frac14;": "\xbc", + "frac15;": "\u2155", + "frac16;": "\u2159", + "frac18;": "\u215b", + "frac23;": "\u2154", + "frac25;": "\u2156", + "frac34": "\xbe", + "frac34;": "\xbe", + "frac35;": "\u2157", + "frac38;": "\u215c", + "frac45;": "\u2158", + "frac56;": "\u215a", + "frac58;": "\u215d", + "frac78;": "\u215e", + "frasl;": "\u2044", + "frown;": "\u2322", + "fscr;": "\U0001d4bb", + "gE;": "\u2267", + "gEl;": "\u2a8c", + "gacute;": "\u01f5", + "gamma;": "\u03b3", + "gammad;": "\u03dd", + "gap;": "\u2a86", + "gbreve;": "\u011f", + "gcirc;": "\u011d", + "gcy;": "\u0433", + "gdot;": "\u0121", + "ge;": "\u2265", + "gel;": "\u22db", + "geq;": "\u2265", + "geqq;": "\u2267", + "geqslant;": "\u2a7e", + "ges;": "\u2a7e", + "gescc;": "\u2aa9", + "gesdot;": "\u2a80", + "gesdoto;": "\u2a82", + "gesdotol;": "\u2a84", + "gesl;": "\u22db\ufe00", + "gesles;": "\u2a94", + "gfr;": "\U0001d524", + "gg;": "\u226b", + "ggg;": "\u22d9", + "gimel;": "\u2137", + "gjcy;": "\u0453", + "gl;": "\u2277", + "glE;": "\u2a92", + "gla;": "\u2aa5", + "glj;": "\u2aa4", + "gnE;": "\u2269", + "gnap;": "\u2a8a", + "gnapprox;": "\u2a8a", + "gne;": "\u2a88", + "gneq;": "\u2a88", + "gneqq;": "\u2269", + "gnsim;": "\u22e7", + "gopf;": "\U0001d558", + "grave;": "`", + "gscr;": "\u210a", + "gsim;": "\u2273", + "gsime;": "\u2a8e", + "gsiml;": "\u2a90", + "gt": ">", + "gt;": ">", + "gtcc;": "\u2aa7", + "gtcir;": "\u2a7a", + "gtdot;": "\u22d7", + "gtlPar;": "\u2995", + "gtquest;": "\u2a7c", + "gtrapprox;": "\u2a86", + "gtrarr;": "\u2978", + "gtrdot;": "\u22d7", + "gtreqless;": "\u22db", + "gtreqqless;": "\u2a8c", + "gtrless;": "\u2277", + "gtrsim;": "\u2273", + "gvertneqq;": "\u2269\ufe00", + "gvnE;": "\u2269\ufe00", + "hArr;": "\u21d4", + "hairsp;": "\u200a", + "half;": "\xbd", + "hamilt;": "\u210b", + "hardcy;": "\u044a", + "harr;": "\u2194", + "harrcir;": "\u2948", + "harrw;": "\u21ad", + "hbar;": "\u210f", + "hcirc;": "\u0125", + "hearts;": "\u2665", + "heartsuit;": "\u2665", + "hellip;": "\u2026", + "hercon;": "\u22b9", + "hfr;": "\U0001d525", + "hksearow;": "\u2925", + "hkswarow;": "\u2926", + "hoarr;": "\u21ff", + "homtht;": "\u223b", + "hookleftarrow;": "\u21a9", + "hookrightarrow;": "\u21aa", + "hopf;": "\U0001d559", + "horbar;": "\u2015", + "hscr;": "\U0001d4bd", + "hslash;": "\u210f", + "hstrok;": "\u0127", + "hybull;": "\u2043", + "hyphen;": "\u2010", + "iacute": "\xed", + "iacute;": "\xed", + "ic;": "\u2063", + "icirc": "\xee", + "icirc;": "\xee", + "icy;": "\u0438", + "iecy;": "\u0435", + "iexcl": "\xa1", + "iexcl;": "\xa1", + "iff;": "\u21d4", + "ifr;": "\U0001d526", + "igrave": "\xec", + "igrave;": "\xec", + "ii;": "\u2148", + "iiiint;": "\u2a0c", + "iiint;": "\u222d", + "iinfin;": "\u29dc", + "iiota;": "\u2129", + "ijlig;": "\u0133", + "imacr;": "\u012b", + "image;": "\u2111", + "imagline;": "\u2110", + "imagpart;": "\u2111", + "imath;": "\u0131", + "imof;": "\u22b7", + "imped;": "\u01b5", + "in;": "\u2208", + "incare;": "\u2105", + "infin;": "\u221e", + "infintie;": "\u29dd", + "inodot;": "\u0131", + "int;": "\u222b", + "intcal;": "\u22ba", + "integers;": "\u2124", + "intercal;": "\u22ba", + "intlarhk;": "\u2a17", + "intprod;": "\u2a3c", + "iocy;": "\u0451", + "iogon;": "\u012f", + "iopf;": "\U0001d55a", + "iota;": "\u03b9", + "iprod;": "\u2a3c", + "iquest": "\xbf", + "iquest;": "\xbf", + "iscr;": "\U0001d4be", + "isin;": "\u2208", + "isinE;": "\u22f9", + "isindot;": "\u22f5", + "isins;": "\u22f4", + "isinsv;": "\u22f3", + "isinv;": "\u2208", + "it;": "\u2062", + "itilde;": "\u0129", + "iukcy;": "\u0456", + "iuml": "\xef", + "iuml;": "\xef", + "jcirc;": "\u0135", + "jcy;": "\u0439", + "jfr;": "\U0001d527", + "jmath;": "\u0237", + "jopf;": "\U0001d55b", + "jscr;": "\U0001d4bf", + "jsercy;": "\u0458", + "jukcy;": "\u0454", + "kappa;": "\u03ba", + "kappav;": "\u03f0", + "kcedil;": "\u0137", + "kcy;": "\u043a", + "kfr;": "\U0001d528", + "kgreen;": "\u0138", + "khcy;": "\u0445", + "kjcy;": "\u045c", + "kopf;": "\U0001d55c", + "kscr;": "\U0001d4c0", + "lAarr;": "\u21da", + "lArr;": "\u21d0", + "lAtail;": "\u291b", + "lBarr;": "\u290e", + "lE;": "\u2266", + "lEg;": "\u2a8b", + "lHar;": "\u2962", + "lacute;": "\u013a", + "laemptyv;": "\u29b4", + "lagran;": "\u2112", + "lambda;": "\u03bb", + "lang;": "\u27e8", + "langd;": "\u2991", + "langle;": "\u27e8", + "lap;": "\u2a85", + "laquo": "\xab", + "laquo;": "\xab", + "larr;": "\u2190", + "larrb;": "\u21e4", + "larrbfs;": "\u291f", + "larrfs;": "\u291d", + "larrhk;": "\u21a9", + "larrlp;": "\u21ab", + "larrpl;": "\u2939", + "larrsim;": "\u2973", + "larrtl;": "\u21a2", + "lat;": "\u2aab", + "latail;": "\u2919", + "late;": "\u2aad", + "lates;": "\u2aad\ufe00", + "lbarr;": "\u290c", + "lbbrk;": "\u2772", + "lbrace;": "{", + "lbrack;": "[", + "lbrke;": "\u298b", + "lbrksld;": "\u298f", + "lbrkslu;": "\u298d", + "lcaron;": "\u013e", + "lcedil;": "\u013c", + "lceil;": "\u2308", + "lcub;": "{", + "lcy;": "\u043b", + "ldca;": "\u2936", + "ldquo;": "\u201c", + "ldquor;": "\u201e", + "ldrdhar;": "\u2967", + "ldrushar;": "\u294b", + "ldsh;": "\u21b2", + "le;": "\u2264", + "leftarrow;": "\u2190", + "leftarrowtail;": "\u21a2", + "leftharpoondown;": "\u21bd", + "leftharpoonup;": "\u21bc", + "leftleftarrows;": "\u21c7", + "leftrightarrow;": "\u2194", + "leftrightarrows;": "\u21c6", + "leftrightharpoons;": "\u21cb", + "leftrightsquigarrow;": "\u21ad", + "leftthreetimes;": "\u22cb", + "leg;": "\u22da", + "leq;": "\u2264", + "leqq;": "\u2266", + "leqslant;": "\u2a7d", + "les;": "\u2a7d", + "lescc;": "\u2aa8", + "lesdot;": "\u2a7f", + "lesdoto;": "\u2a81", + "lesdotor;": "\u2a83", + "lesg;": "\u22da\ufe00", + "lesges;": "\u2a93", + "lessapprox;": "\u2a85", + "lessdot;": "\u22d6", + "lesseqgtr;": "\u22da", + "lesseqqgtr;": "\u2a8b", + "lessgtr;": "\u2276", + "lesssim;": "\u2272", + "lfisht;": "\u297c", + "lfloor;": "\u230a", + "lfr;": "\U0001d529", + "lg;": "\u2276", + "lgE;": "\u2a91", + "lhard;": "\u21bd", + "lharu;": "\u21bc", + "lharul;": "\u296a", + "lhblk;": "\u2584", + "ljcy;": "\u0459", + "ll;": "\u226a", + "llarr;": "\u21c7", + "llcorner;": "\u231e", + "llhard;": "\u296b", + "lltri;": "\u25fa", + "lmidot;": "\u0140", + "lmoust;": "\u23b0", + "lmoustache;": "\u23b0", + "lnE;": "\u2268", + "lnap;": "\u2a89", + "lnapprox;": "\u2a89", + "lne;": "\u2a87", + "lneq;": "\u2a87", + "lneqq;": "\u2268", + "lnsim;": "\u22e6", + "loang;": "\u27ec", + "loarr;": "\u21fd", + "lobrk;": "\u27e6", + "longleftarrow;": "\u27f5", + "longleftrightarrow;": "\u27f7", + "longmapsto;": "\u27fc", + "longrightarrow;": "\u27f6", + "looparrowleft;": "\u21ab", + "looparrowright;": "\u21ac", + "lopar;": "\u2985", + "lopf;": "\U0001d55d", + "loplus;": "\u2a2d", + "lotimes;": "\u2a34", + "lowast;": "\u2217", + "lowbar;": "_", + "loz;": "\u25ca", + "lozenge;": "\u25ca", + "lozf;": "\u29eb", + "lpar;": "(", + "lparlt;": "\u2993", + "lrarr;": "\u21c6", + "lrcorner;": "\u231f", + "lrhar;": "\u21cb", + "lrhard;": "\u296d", + "lrm;": "\u200e", + "lrtri;": "\u22bf", + "lsaquo;": "\u2039", + "lscr;": "\U0001d4c1", + "lsh;": "\u21b0", + "lsim;": "\u2272", + "lsime;": "\u2a8d", + "lsimg;": "\u2a8f", + "lsqb;": "[", + "lsquo;": "\u2018", + "lsquor;": "\u201a", + "lstrok;": "\u0142", + "lt": "<", + "lt;": "<", + "ltcc;": "\u2aa6", + "ltcir;": "\u2a79", + "ltdot;": "\u22d6", + "lthree;": "\u22cb", + "ltimes;": "\u22c9", + "ltlarr;": "\u2976", + "ltquest;": "\u2a7b", + "ltrPar;": "\u2996", + "ltri;": "\u25c3", + "ltrie;": "\u22b4", + "ltrif;": "\u25c2", + "lurdshar;": "\u294a", + "luruhar;": "\u2966", + "lvertneqq;": "\u2268\ufe00", + "lvnE;": "\u2268\ufe00", + "mDDot;": "\u223a", + "macr": "\xaf", + "macr;": "\xaf", + "male;": "\u2642", + "malt;": "\u2720", + "maltese;": "\u2720", + "map;": "\u21a6", + "mapsto;": "\u21a6", + "mapstodown;": "\u21a7", + "mapstoleft;": "\u21a4", + "mapstoup;": "\u21a5", + "marker;": "\u25ae", + "mcomma;": "\u2a29", + "mcy;": "\u043c", + "mdash;": "\u2014", + "measuredangle;": "\u2221", + "mfr;": "\U0001d52a", + "mho;": "\u2127", + "micro": "\xb5", + "micro;": "\xb5", + "mid;": "\u2223", + "midast;": "*", + "midcir;": "\u2af0", + "middot": "\xb7", + "middot;": "\xb7", + "minus;": "\u2212", + "minusb;": "\u229f", + "minusd;": "\u2238", + "minusdu;": "\u2a2a", + "mlcp;": "\u2adb", + "mldr;": "\u2026", + "mnplus;": "\u2213", + "models;": "\u22a7", + "mopf;": "\U0001d55e", + "mp;": "\u2213", + "mscr;": "\U0001d4c2", + "mstpos;": "\u223e", + "mu;": "\u03bc", + "multimap;": "\u22b8", + "mumap;": "\u22b8", + "nGg;": "\u22d9\u0338", + "nGt;": "\u226b\u20d2", + "nGtv;": "\u226b\u0338", + "nLeftarrow;": "\u21cd", + "nLeftrightarrow;": "\u21ce", + "nLl;": "\u22d8\u0338", + "nLt;": "\u226a\u20d2", + "nLtv;": "\u226a\u0338", + "nRightarrow;": "\u21cf", + "nVDash;": "\u22af", + "nVdash;": "\u22ae", + "nabla;": "\u2207", + "nacute;": "\u0144", + "nang;": "\u2220\u20d2", + "nap;": "\u2249", + "napE;": "\u2a70\u0338", + "napid;": "\u224b\u0338", + "napos;": "\u0149", + "napprox;": "\u2249", + "natur;": "\u266e", + "natural;": "\u266e", + "naturals;": "\u2115", + "nbsp": "\xa0", + "nbsp;": "\xa0", + "nbump;": "\u224e\u0338", + "nbumpe;": "\u224f\u0338", + "ncap;": "\u2a43", + "ncaron;": "\u0148", + "ncedil;": "\u0146", + "ncong;": "\u2247", + "ncongdot;": "\u2a6d\u0338", + "ncup;": "\u2a42", + "ncy;": "\u043d", + "ndash;": "\u2013", + "ne;": "\u2260", + "neArr;": "\u21d7", + "nearhk;": "\u2924", + "nearr;": "\u2197", + "nearrow;": "\u2197", + "nedot;": "\u2250\u0338", + "nequiv;": "\u2262", + "nesear;": "\u2928", + "nesim;": "\u2242\u0338", + "nexist;": "\u2204", + "nexists;": "\u2204", + "nfr;": "\U0001d52b", + "ngE;": "\u2267\u0338", + "nge;": "\u2271", + "ngeq;": "\u2271", + "ngeqq;": "\u2267\u0338", + "ngeqslant;": "\u2a7e\u0338", + "nges;": "\u2a7e\u0338", + "ngsim;": "\u2275", + "ngt;": "\u226f", + "ngtr;": "\u226f", + "nhArr;": "\u21ce", + "nharr;": "\u21ae", + "nhpar;": "\u2af2", + "ni;": "\u220b", + "nis;": "\u22fc", + "nisd;": "\u22fa", + "niv;": "\u220b", + "njcy;": "\u045a", + "nlArr;": "\u21cd", + "nlE;": "\u2266\u0338", + "nlarr;": "\u219a", + "nldr;": "\u2025", + "nle;": "\u2270", + "nleftarrow;": "\u219a", + "nleftrightarrow;": "\u21ae", + "nleq;": "\u2270", + "nleqq;": "\u2266\u0338", + "nleqslant;": "\u2a7d\u0338", + "nles;": "\u2a7d\u0338", + "nless;": "\u226e", + "nlsim;": "\u2274", + "nlt;": "\u226e", + "nltri;": "\u22ea", + "nltrie;": "\u22ec", + "nmid;": "\u2224", + "nopf;": "\U0001d55f", + "not": "\xac", + "not;": "\xac", + "notin;": "\u2209", + "notinE;": "\u22f9\u0338", + "notindot;": "\u22f5\u0338", + "notinva;": "\u2209", + "notinvb;": "\u22f7", + "notinvc;": "\u22f6", + "notni;": "\u220c", + "notniva;": "\u220c", + "notnivb;": "\u22fe", + "notnivc;": "\u22fd", + "npar;": "\u2226", + "nparallel;": "\u2226", + "nparsl;": "\u2afd\u20e5", + "npart;": "\u2202\u0338", + "npolint;": "\u2a14", + "npr;": "\u2280", + "nprcue;": "\u22e0", + "npre;": "\u2aaf\u0338", + "nprec;": "\u2280", + "npreceq;": "\u2aaf\u0338", + "nrArr;": "\u21cf", + "nrarr;": "\u219b", + "nrarrc;": "\u2933\u0338", + "nrarrw;": "\u219d\u0338", + "nrightarrow;": "\u219b", + "nrtri;": "\u22eb", + "nrtrie;": "\u22ed", + "nsc;": "\u2281", + "nsccue;": "\u22e1", + "nsce;": "\u2ab0\u0338", + "nscr;": "\U0001d4c3", + "nshortmid;": "\u2224", + "nshortparallel;": "\u2226", + "nsim;": "\u2241", + "nsime;": "\u2244", + "nsimeq;": "\u2244", + "nsmid;": "\u2224", + "nspar;": "\u2226", + "nsqsube;": "\u22e2", + "nsqsupe;": "\u22e3", + "nsub;": "\u2284", + "nsubE;": "\u2ac5\u0338", + "nsube;": "\u2288", + "nsubset;": "\u2282\u20d2", + "nsubseteq;": "\u2288", + "nsubseteqq;": "\u2ac5\u0338", + "nsucc;": "\u2281", + "nsucceq;": "\u2ab0\u0338", + "nsup;": "\u2285", + "nsupE;": "\u2ac6\u0338", + "nsupe;": "\u2289", + "nsupset;": "\u2283\u20d2", + "nsupseteq;": "\u2289", + "nsupseteqq;": "\u2ac6\u0338", + "ntgl;": "\u2279", + "ntilde": "\xf1", + "ntilde;": "\xf1", + "ntlg;": "\u2278", + "ntriangleleft;": "\u22ea", + "ntrianglelefteq;": "\u22ec", + "ntriangleright;": "\u22eb", + "ntrianglerighteq;": "\u22ed", + "nu;": "\u03bd", + "num;": "#", + "numero;": "\u2116", + "numsp;": "\u2007", + "nvDash;": "\u22ad", + "nvHarr;": "\u2904", + "nvap;": "\u224d\u20d2", + "nvdash;": "\u22ac", + "nvge;": "\u2265\u20d2", + "nvgt;": ">\u20d2", + "nvinfin;": "\u29de", + "nvlArr;": "\u2902", + "nvle;": "\u2264\u20d2", + "nvlt;": "<\u20d2", + "nvltrie;": "\u22b4\u20d2", + "nvrArr;": "\u2903", + "nvrtrie;": "\u22b5\u20d2", + "nvsim;": "\u223c\u20d2", + "nwArr;": "\u21d6", + "nwarhk;": "\u2923", + "nwarr;": "\u2196", + "nwarrow;": "\u2196", + "nwnear;": "\u2927", + "oS;": "\u24c8", + "oacute": "\xf3", + "oacute;": "\xf3", + "oast;": "\u229b", + "ocir;": "\u229a", + "ocirc": "\xf4", + "ocirc;": "\xf4", + "ocy;": "\u043e", + "odash;": "\u229d", + "odblac;": "\u0151", + "odiv;": "\u2a38", + "odot;": "\u2299", + "odsold;": "\u29bc", + "oelig;": "\u0153", + "ofcir;": "\u29bf", + "ofr;": "\U0001d52c", + "ogon;": "\u02db", + "ograve": "\xf2", + "ograve;": "\xf2", + "ogt;": "\u29c1", + "ohbar;": "\u29b5", + "ohm;": "\u03a9", + "oint;": "\u222e", + "olarr;": "\u21ba", + "olcir;": "\u29be", + "olcross;": "\u29bb", + "oline;": "\u203e", + "olt;": "\u29c0", + "omacr;": "\u014d", + "omega;": "\u03c9", + "omicron;": "\u03bf", + "omid;": "\u29b6", + "ominus;": "\u2296", + "oopf;": "\U0001d560", + "opar;": "\u29b7", + "operp;": "\u29b9", + "oplus;": "\u2295", + "or;": "\u2228", + "orarr;": "\u21bb", + "ord;": "\u2a5d", + "order;": "\u2134", + "orderof;": "\u2134", + "ordf": "\xaa", + "ordf;": "\xaa", + "ordm": "\xba", + "ordm;": "\xba", + "origof;": "\u22b6", + "oror;": "\u2a56", + "orslope;": "\u2a57", + "orv;": "\u2a5b", + "oscr;": "\u2134", + "oslash": "\xf8", + "oslash;": "\xf8", + "osol;": "\u2298", + "otilde": "\xf5", + "otilde;": "\xf5", + "otimes;": "\u2297", + "otimesas;": "\u2a36", + "ouml": "\xf6", + "ouml;": "\xf6", + "ovbar;": "\u233d", + "par;": "\u2225", + "para": "\xb6", + "para;": "\xb6", + "parallel;": "\u2225", + "parsim;": "\u2af3", + "parsl;": "\u2afd", + "part;": "\u2202", + "pcy;": "\u043f", + "percnt;": "%", + "period;": ".", + "permil;": "\u2030", + "perp;": "\u22a5", + "pertenk;": "\u2031", + "pfr;": "\U0001d52d", + "phi;": "\u03c6", + "phiv;": "\u03d5", + "phmmat;": "\u2133", + "phone;": "\u260e", + "pi;": "\u03c0", + "pitchfork;": "\u22d4", + "piv;": "\u03d6", + "planck;": "\u210f", + "planckh;": "\u210e", + "plankv;": "\u210f", + "plus;": "+", + "plusacir;": "\u2a23", + "plusb;": "\u229e", + "pluscir;": "\u2a22", + "plusdo;": "\u2214", + "plusdu;": "\u2a25", + "pluse;": "\u2a72", + "plusmn": "\xb1", + "plusmn;": "\xb1", + "plussim;": "\u2a26", + "plustwo;": "\u2a27", + "pm;": "\xb1", + "pointint;": "\u2a15", + "popf;": "\U0001d561", + "pound": "\xa3", + "pound;": "\xa3", + "pr;": "\u227a", + "prE;": "\u2ab3", + "prap;": "\u2ab7", + "prcue;": "\u227c", + "pre;": "\u2aaf", + "prec;": "\u227a", + "precapprox;": "\u2ab7", + "preccurlyeq;": "\u227c", + "preceq;": "\u2aaf", + "precnapprox;": "\u2ab9", + "precneqq;": "\u2ab5", + "precnsim;": "\u22e8", + "precsim;": "\u227e", + "prime;": "\u2032", + "primes;": "\u2119", + "prnE;": "\u2ab5", + "prnap;": "\u2ab9", + "prnsim;": "\u22e8", + "prod;": "\u220f", + "profalar;": "\u232e", + "profline;": "\u2312", + "profsurf;": "\u2313", + "prop;": "\u221d", + "propto;": "\u221d", + "prsim;": "\u227e", + "prurel;": "\u22b0", + "pscr;": "\U0001d4c5", + "psi;": "\u03c8", + "puncsp;": "\u2008", + "qfr;": "\U0001d52e", + "qint;": "\u2a0c", + "qopf;": "\U0001d562", + "qprime;": "\u2057", + "qscr;": "\U0001d4c6", + "quaternions;": "\u210d", + "quatint;": "\u2a16", + "quest;": "?", + "questeq;": "\u225f", + "quot": "\"", + "quot;": "\"", + "rAarr;": "\u21db", + "rArr;": "\u21d2", + "rAtail;": "\u291c", + "rBarr;": "\u290f", + "rHar;": "\u2964", + "race;": "\u223d\u0331", + "racute;": "\u0155", + "radic;": "\u221a", + "raemptyv;": "\u29b3", + "rang;": "\u27e9", + "rangd;": "\u2992", + "range;": "\u29a5", + "rangle;": "\u27e9", + "raquo": "\xbb", + "raquo;": "\xbb", + "rarr;": "\u2192", + "rarrap;": "\u2975", + "rarrb;": "\u21e5", + "rarrbfs;": "\u2920", + "rarrc;": "\u2933", + "rarrfs;": "\u291e", + "rarrhk;": "\u21aa", + "rarrlp;": "\u21ac", + "rarrpl;": "\u2945", + "rarrsim;": "\u2974", + "rarrtl;": "\u21a3", + "rarrw;": "\u219d", + "ratail;": "\u291a", + "ratio;": "\u2236", + "rationals;": "\u211a", + "rbarr;": "\u290d", + "rbbrk;": "\u2773", + "rbrace;": "}", + "rbrack;": "]", + "rbrke;": "\u298c", + "rbrksld;": "\u298e", + "rbrkslu;": "\u2990", + "rcaron;": "\u0159", + "rcedil;": "\u0157", + "rceil;": "\u2309", + "rcub;": "}", + "rcy;": "\u0440", + "rdca;": "\u2937", + "rdldhar;": "\u2969", + "rdquo;": "\u201d", + "rdquor;": "\u201d", + "rdsh;": "\u21b3", + "real;": "\u211c", + "realine;": "\u211b", + "realpart;": "\u211c", + "reals;": "\u211d", + "rect;": "\u25ad", + "reg": "\xae", + "reg;": "\xae", + "rfisht;": "\u297d", + "rfloor;": "\u230b", + "rfr;": "\U0001d52f", + "rhard;": "\u21c1", + "rharu;": "\u21c0", + "rharul;": "\u296c", + "rho;": "\u03c1", + "rhov;": "\u03f1", + "rightarrow;": "\u2192", + "rightarrowtail;": "\u21a3", + "rightharpoondown;": "\u21c1", + "rightharpoonup;": "\u21c0", + "rightleftarrows;": "\u21c4", + "rightleftharpoons;": "\u21cc", + "rightrightarrows;": "\u21c9", + "rightsquigarrow;": "\u219d", + "rightthreetimes;": "\u22cc", + "ring;": "\u02da", + "risingdotseq;": "\u2253", + "rlarr;": "\u21c4", + "rlhar;": "\u21cc", + "rlm;": "\u200f", + "rmoust;": "\u23b1", + "rmoustache;": "\u23b1", + "rnmid;": "\u2aee", + "roang;": "\u27ed", + "roarr;": "\u21fe", + "robrk;": "\u27e7", + "ropar;": "\u2986", + "ropf;": "\U0001d563", + "roplus;": "\u2a2e", + "rotimes;": "\u2a35", + "rpar;": ")", + "rpargt;": "\u2994", + "rppolint;": "\u2a12", + "rrarr;": "\u21c9", + "rsaquo;": "\u203a", + "rscr;": "\U0001d4c7", + "rsh;": "\u21b1", + "rsqb;": "]", + "rsquo;": "\u2019", + "rsquor;": "\u2019", + "rthree;": "\u22cc", + "rtimes;": "\u22ca", + "rtri;": "\u25b9", + "rtrie;": "\u22b5", + "rtrif;": "\u25b8", + "rtriltri;": "\u29ce", + "ruluhar;": "\u2968", + "rx;": "\u211e", + "sacute;": "\u015b", + "sbquo;": "\u201a", + "sc;": "\u227b", + "scE;": "\u2ab4", + "scap;": "\u2ab8", + "scaron;": "\u0161", + "sccue;": "\u227d", + "sce;": "\u2ab0", + "scedil;": "\u015f", + "scirc;": "\u015d", + "scnE;": "\u2ab6", + "scnap;": "\u2aba", + "scnsim;": "\u22e9", + "scpolint;": "\u2a13", + "scsim;": "\u227f", + "scy;": "\u0441", + "sdot;": "\u22c5", + "sdotb;": "\u22a1", + "sdote;": "\u2a66", + "seArr;": "\u21d8", + "searhk;": "\u2925", + "searr;": "\u2198", + "searrow;": "\u2198", + "sect": "\xa7", + "sect;": "\xa7", + "semi;": ";", + "seswar;": "\u2929", + "setminus;": "\u2216", + "setmn;": "\u2216", + "sext;": "\u2736", + "sfr;": "\U0001d530", + "sfrown;": "\u2322", + "sharp;": "\u266f", + "shchcy;": "\u0449", + "shcy;": "\u0448", + "shortmid;": "\u2223", + "shortparallel;": "\u2225", + "shy": "\xad", + "shy;": "\xad", + "sigma;": "\u03c3", + "sigmaf;": "\u03c2", + "sigmav;": "\u03c2", + "sim;": "\u223c", + "simdot;": "\u2a6a", + "sime;": "\u2243", + "simeq;": "\u2243", + "simg;": "\u2a9e", + "simgE;": "\u2aa0", + "siml;": "\u2a9d", + "simlE;": "\u2a9f", + "simne;": "\u2246", + "simplus;": "\u2a24", + "simrarr;": "\u2972", + "slarr;": "\u2190", + "smallsetminus;": "\u2216", + "smashp;": "\u2a33", + "smeparsl;": "\u29e4", + "smid;": "\u2223", + "smile;": "\u2323", + "smt;": "\u2aaa", + "smte;": "\u2aac", + "smtes;": "\u2aac\ufe00", + "softcy;": "\u044c", + "sol;": "/", + "solb;": "\u29c4", + "solbar;": "\u233f", + "sopf;": "\U0001d564", + "spades;": "\u2660", + "spadesuit;": "\u2660", + "spar;": "\u2225", + "sqcap;": "\u2293", + "sqcaps;": "\u2293\ufe00", + "sqcup;": "\u2294", + "sqcups;": "\u2294\ufe00", + "sqsub;": "\u228f", + "sqsube;": "\u2291", + "sqsubset;": "\u228f", + "sqsubseteq;": "\u2291", + "sqsup;": "\u2290", + "sqsupe;": "\u2292", + "sqsupset;": "\u2290", + "sqsupseteq;": "\u2292", + "squ;": "\u25a1", + "square;": "\u25a1", + "squarf;": "\u25aa", + "squf;": "\u25aa", + "srarr;": "\u2192", + "sscr;": "\U0001d4c8", + "ssetmn;": "\u2216", + "ssmile;": "\u2323", + "sstarf;": "\u22c6", + "star;": "\u2606", + "starf;": "\u2605", + "straightepsilon;": "\u03f5", + "straightphi;": "\u03d5", + "strns;": "\xaf", + "sub;": "\u2282", + "subE;": "\u2ac5", + "subdot;": "\u2abd", + "sube;": "\u2286", + "subedot;": "\u2ac3", + "submult;": "\u2ac1", + "subnE;": "\u2acb", + "subne;": "\u228a", + "subplus;": "\u2abf", + "subrarr;": "\u2979", + "subset;": "\u2282", + "subseteq;": "\u2286", + "subseteqq;": "\u2ac5", + "subsetneq;": "\u228a", + "subsetneqq;": "\u2acb", + "subsim;": "\u2ac7", + "subsub;": "\u2ad5", + "subsup;": "\u2ad3", + "succ;": "\u227b", + "succapprox;": "\u2ab8", + "succcurlyeq;": "\u227d", + "succeq;": "\u2ab0", + "succnapprox;": "\u2aba", + "succneqq;": "\u2ab6", + "succnsim;": "\u22e9", + "succsim;": "\u227f", + "sum;": "\u2211", + "sung;": "\u266a", + "sup1": "\xb9", + "sup1;": "\xb9", + "sup2": "\xb2", + "sup2;": "\xb2", + "sup3": "\xb3", + "sup3;": "\xb3", + "sup;": "\u2283", + "supE;": "\u2ac6", + "supdot;": "\u2abe", + "supdsub;": "\u2ad8", + "supe;": "\u2287", + "supedot;": "\u2ac4", + "suphsol;": "\u27c9", + "suphsub;": "\u2ad7", + "suplarr;": "\u297b", + "supmult;": "\u2ac2", + "supnE;": "\u2acc", + "supne;": "\u228b", + "supplus;": "\u2ac0", + "supset;": "\u2283", + "supseteq;": "\u2287", + "supseteqq;": "\u2ac6", + "supsetneq;": "\u228b", + "supsetneqq;": "\u2acc", + "supsim;": "\u2ac8", + "supsub;": "\u2ad4", + "supsup;": "\u2ad6", + "swArr;": "\u21d9", + "swarhk;": "\u2926", + "swarr;": "\u2199", + "swarrow;": "\u2199", + "swnwar;": "\u292a", + "szlig": "\xdf", + "szlig;": "\xdf", + "target;": "\u2316", + "tau;": "\u03c4", + "tbrk;": "\u23b4", + "tcaron;": "\u0165", + "tcedil;": "\u0163", + "tcy;": "\u0442", + "tdot;": "\u20db", + "telrec;": "\u2315", + "tfr;": "\U0001d531", + "there4;": "\u2234", + "therefore;": "\u2234", + "theta;": "\u03b8", + "thetasym;": "\u03d1", + "thetav;": "\u03d1", + "thickapprox;": "\u2248", + "thicksim;": "\u223c", + "thinsp;": "\u2009", + "thkap;": "\u2248", + "thksim;": "\u223c", + "thorn": "\xfe", + "thorn;": "\xfe", + "tilde;": "\u02dc", + "times": "\xd7", + "times;": "\xd7", + "timesb;": "\u22a0", + "timesbar;": "\u2a31", + "timesd;": "\u2a30", + "tint;": "\u222d", + "toea;": "\u2928", + "top;": "\u22a4", + "topbot;": "\u2336", + "topcir;": "\u2af1", + "topf;": "\U0001d565", + "topfork;": "\u2ada", + "tosa;": "\u2929", + "tprime;": "\u2034", + "trade;": "\u2122", + "triangle;": "\u25b5", + "triangledown;": "\u25bf", + "triangleleft;": "\u25c3", + "trianglelefteq;": "\u22b4", + "triangleq;": "\u225c", + "triangleright;": "\u25b9", + "trianglerighteq;": "\u22b5", + "tridot;": "\u25ec", + "trie;": "\u225c", + "triminus;": "\u2a3a", + "triplus;": "\u2a39", + "trisb;": "\u29cd", + "tritime;": "\u2a3b", + "trpezium;": "\u23e2", + "tscr;": "\U0001d4c9", + "tscy;": "\u0446", + "tshcy;": "\u045b", + "tstrok;": "\u0167", + "twixt;": "\u226c", + "twoheadleftarrow;": "\u219e", + "twoheadrightarrow;": "\u21a0", + "uArr;": "\u21d1", + "uHar;": "\u2963", + "uacute": "\xfa", + "uacute;": "\xfa", + "uarr;": "\u2191", + "ubrcy;": "\u045e", + "ubreve;": "\u016d", + "ucirc": "\xfb", + "ucirc;": "\xfb", + "ucy;": "\u0443", + "udarr;": "\u21c5", + "udblac;": "\u0171", + "udhar;": "\u296e", + "ufisht;": "\u297e", + "ufr;": "\U0001d532", + "ugrave": "\xf9", + "ugrave;": "\xf9", + "uharl;": "\u21bf", + "uharr;": "\u21be", + "uhblk;": "\u2580", + "ulcorn;": "\u231c", + "ulcorner;": "\u231c", + "ulcrop;": "\u230f", + "ultri;": "\u25f8", + "umacr;": "\u016b", + "uml": "\xa8", + "uml;": "\xa8", + "uogon;": "\u0173", + "uopf;": "\U0001d566", + "uparrow;": "\u2191", + "updownarrow;": "\u2195", + "upharpoonleft;": "\u21bf", + "upharpoonright;": "\u21be", + "uplus;": "\u228e", + "upsi;": "\u03c5", + "upsih;": "\u03d2", + "upsilon;": "\u03c5", + "upuparrows;": "\u21c8", + "urcorn;": "\u231d", + "urcorner;": "\u231d", + "urcrop;": "\u230e", + "uring;": "\u016f", + "urtri;": "\u25f9", + "uscr;": "\U0001d4ca", + "utdot;": "\u22f0", + "utilde;": "\u0169", + "utri;": "\u25b5", + "utrif;": "\u25b4", + "uuarr;": "\u21c8", + "uuml": "\xfc", + "uuml;": "\xfc", + "uwangle;": "\u29a7", + "vArr;": "\u21d5", + "vBar;": "\u2ae8", + "vBarv;": "\u2ae9", + "vDash;": "\u22a8", + "vangrt;": "\u299c", + "varepsilon;": "\u03f5", + "varkappa;": "\u03f0", + "varnothing;": "\u2205", + "varphi;": "\u03d5", + "varpi;": "\u03d6", + "varpropto;": "\u221d", + "varr;": "\u2195", + "varrho;": "\u03f1", + "varsigma;": "\u03c2", + "varsubsetneq;": "\u228a\ufe00", + "varsubsetneqq;": "\u2acb\ufe00", + "varsupsetneq;": "\u228b\ufe00", + "varsupsetneqq;": "\u2acc\ufe00", + "vartheta;": "\u03d1", + "vartriangleleft;": "\u22b2", + "vartriangleright;": "\u22b3", + "vcy;": "\u0432", + "vdash;": "\u22a2", + "vee;": "\u2228", + "veebar;": "\u22bb", + "veeeq;": "\u225a", + "vellip;": "\u22ee", + "verbar;": "|", + "vert;": "|", + "vfr;": "\U0001d533", + "vltri;": "\u22b2", + "vnsub;": "\u2282\u20d2", + "vnsup;": "\u2283\u20d2", + "vopf;": "\U0001d567", + "vprop;": "\u221d", + "vrtri;": "\u22b3", + "vscr;": "\U0001d4cb", + "vsubnE;": "\u2acb\ufe00", + "vsubne;": "\u228a\ufe00", + "vsupnE;": "\u2acc\ufe00", + "vsupne;": "\u228b\ufe00", + "vzigzag;": "\u299a", + "wcirc;": "\u0175", + "wedbar;": "\u2a5f", + "wedge;": "\u2227", + "wedgeq;": "\u2259", + "weierp;": "\u2118", + "wfr;": "\U0001d534", + "wopf;": "\U0001d568", + "wp;": "\u2118", + "wr;": "\u2240", + "wreath;": "\u2240", + "wscr;": "\U0001d4cc", + "xcap;": "\u22c2", + "xcirc;": "\u25ef", + "xcup;": "\u22c3", + "xdtri;": "\u25bd", + "xfr;": "\U0001d535", + "xhArr;": "\u27fa", + "xharr;": "\u27f7", + "xi;": "\u03be", + "xlArr;": "\u27f8", + "xlarr;": "\u27f5", + "xmap;": "\u27fc", + "xnis;": "\u22fb", + "xodot;": "\u2a00", + "xopf;": "\U0001d569", + "xoplus;": "\u2a01", + "xotime;": "\u2a02", + "xrArr;": "\u27f9", + "xrarr;": "\u27f6", + "xscr;": "\U0001d4cd", + "xsqcup;": "\u2a06", + "xuplus;": "\u2a04", + "xutri;": "\u25b3", + "xvee;": "\u22c1", + "xwedge;": "\u22c0", + "yacute": "\xfd", + "yacute;": "\xfd", + "yacy;": "\u044f", + "ycirc;": "\u0177", + "ycy;": "\u044b", + "yen": "\xa5", + "yen;": "\xa5", + "yfr;": "\U0001d536", + "yicy;": "\u0457", + "yopf;": "\U0001d56a", + "yscr;": "\U0001d4ce", + "yucy;": "\u044e", + "yuml": "\xff", + "yuml;": "\xff", + "zacute;": "\u017a", + "zcaron;": "\u017e", + "zcy;": "\u0437", + "zdot;": "\u017c", + "zeetrf;": "\u2128", + "zeta;": "\u03b6", + "zfr;": "\U0001d537", + "zhcy;": "\u0436", + "zigrarr;": "\u21dd", + "zopf;": "\U0001d56b", + "zscr;": "\U0001d4cf", + "zwj;": "\u200d", + "zwnj;": "\u200c", +} + +replacementCharacters = { + 0x0: "\uFFFD", + 0x0d: "\u000D", + 0x80: "\u20AC", + 0x81: "\u0081", + 0x81: "\u0081", + 0x82: "\u201A", + 0x83: "\u0192", + 0x84: "\u201E", + 0x85: "\u2026", + 0x86: "\u2020", + 0x87: "\u2021", + 0x88: "\u02C6", + 0x89: "\u2030", + 0x8A: "\u0160", + 0x8B: "\u2039", + 0x8C: "\u0152", + 0x8D: "\u008D", + 0x8E: "\u017D", + 0x8F: "\u008F", + 0x90: "\u0090", + 0x91: "\u2018", + 0x92: "\u2019", + 0x93: "\u201C", + 0x94: "\u201D", + 0x95: "\u2022", + 0x96: "\u2013", + 0x97: "\u2014", + 0x98: "\u02DC", + 0x99: "\u2122", + 0x9A: "\u0161", + 0x9B: "\u203A", + 0x9C: "\u0153", + 0x9D: "\u009D", + 0x9E: "\u017E", + 0x9F: "\u0178", +} + +encodings = { + '437': 'cp437', + '850': 'cp850', + '852': 'cp852', + '855': 'cp855', + '857': 'cp857', + '860': 'cp860', + '861': 'cp861', + '862': 'cp862', + '863': 'cp863', + '865': 'cp865', + '866': 'cp866', + '869': 'cp869', + 'ansix341968': 'ascii', + 'ansix341986': 'ascii', + 'arabic': 'iso8859-6', + 'ascii': 'ascii', + 'asmo708': 'iso8859-6', + 'big5': 'big5', + 'big5hkscs': 'big5hkscs', + 'chinese': 'gbk', + 'cp037': 'cp037', + 'cp1026': 'cp1026', + 'cp154': 'ptcp154', + 'cp367': 'ascii', + 'cp424': 'cp424', + 'cp437': 'cp437', + 'cp500': 'cp500', + 'cp775': 'cp775', + 'cp819': 'windows-1252', + 'cp850': 'cp850', + 'cp852': 'cp852', + 'cp855': 'cp855', + 'cp857': 'cp857', + 'cp860': 'cp860', + 'cp861': 'cp861', + 'cp862': 'cp862', + 'cp863': 'cp863', + 'cp864': 'cp864', + 'cp865': 'cp865', + 'cp866': 'cp866', + 'cp869': 'cp869', + 'cp936': 'gbk', + 'cpgr': 'cp869', + 'cpis': 'cp861', + 'csascii': 'ascii', + 'csbig5': 'big5', + 'cseuckr': 'cp949', + 'cseucpkdfmtjapanese': 'euc_jp', + 'csgb2312': 'gbk', + 'cshproman8': 'hp-roman8', + 'csibm037': 'cp037', + 'csibm1026': 'cp1026', + 'csibm424': 'cp424', + 'csibm500': 'cp500', + 'csibm855': 'cp855', + 'csibm857': 'cp857', + 'csibm860': 'cp860', + 'csibm861': 'cp861', + 'csibm863': 'cp863', + 'csibm864': 'cp864', + 'csibm865': 'cp865', + 'csibm866': 'cp866', + 'csibm869': 'cp869', + 'csiso2022jp': 'iso2022_jp', + 'csiso2022jp2': 'iso2022_jp_2', + 'csiso2022kr': 'iso2022_kr', + 'csiso58gb231280': 'gbk', + 'csisolatin1': 'windows-1252', + 'csisolatin2': 'iso8859-2', + 'csisolatin3': 'iso8859-3', + 'csisolatin4': 'iso8859-4', + 'csisolatin5': 'windows-1254', + 'csisolatin6': 'iso8859-10', + 'csisolatinarabic': 'iso8859-6', + 'csisolatincyrillic': 'iso8859-5', + 'csisolatingreek': 'iso8859-7', + 'csisolatinhebrew': 'iso8859-8', + 'cskoi8r': 'koi8-r', + 'csksc56011987': 'cp949', + 'cspc775baltic': 'cp775', + 'cspc850multilingual': 'cp850', + 'cspc862latinhebrew': 'cp862', + 'cspc8codepage437': 'cp437', + 'cspcp852': 'cp852', + 'csptcp154': 'ptcp154', + 'csshiftjis': 'shift_jis', + 'csunicode11utf7': 'utf-7', + 'cyrillic': 'iso8859-5', + 'cyrillicasian': 'ptcp154', + 'ebcdiccpbe': 'cp500', + 'ebcdiccpca': 'cp037', + 'ebcdiccpch': 'cp500', + 'ebcdiccphe': 'cp424', + 'ebcdiccpnl': 'cp037', + 'ebcdiccpus': 'cp037', + 'ebcdiccpwt': 'cp037', + 'ecma114': 'iso8859-6', + 'ecma118': 'iso8859-7', + 'elot928': 'iso8859-7', + 'eucjp': 'euc_jp', + 'euckr': 'cp949', + 'extendedunixcodepackedformatforjapanese': 'euc_jp', + 'gb18030': 'gb18030', + 'gb2312': 'gbk', + 'gb231280': 'gbk', + 'gbk': 'gbk', + 'greek': 'iso8859-7', + 'greek8': 'iso8859-7', + 'hebrew': 'iso8859-8', + 'hproman8': 'hp-roman8', + 'hzgb2312': 'hz', + 'ibm037': 'cp037', + 'ibm1026': 'cp1026', + 'ibm367': 'ascii', + 'ibm424': 'cp424', + 'ibm437': 'cp437', + 'ibm500': 'cp500', + 'ibm775': 'cp775', + 'ibm819': 'windows-1252', + 'ibm850': 'cp850', + 'ibm852': 'cp852', + 'ibm855': 'cp855', + 'ibm857': 'cp857', + 'ibm860': 'cp860', + 'ibm861': 'cp861', + 'ibm862': 'cp862', + 'ibm863': 'cp863', + 'ibm864': 'cp864', + 'ibm865': 'cp865', + 'ibm866': 'cp866', + 'ibm869': 'cp869', + 'iso2022jp': 'iso2022_jp', + 'iso2022jp2': 'iso2022_jp_2', + 'iso2022kr': 'iso2022_kr', + 'iso646irv1991': 'ascii', + 'iso646us': 'ascii', + 'iso88591': 'windows-1252', + 'iso885910': 'iso8859-10', + 'iso8859101992': 'iso8859-10', + 'iso885911987': 'windows-1252', + 'iso885913': 'iso8859-13', + 'iso885914': 'iso8859-14', + 'iso8859141998': 'iso8859-14', + 'iso885915': 'iso8859-15', + 'iso885916': 'iso8859-16', + 'iso8859162001': 'iso8859-16', + 'iso88592': 'iso8859-2', + 'iso885921987': 'iso8859-2', + 'iso88593': 'iso8859-3', + 'iso885931988': 'iso8859-3', + 'iso88594': 'iso8859-4', + 'iso885941988': 'iso8859-4', + 'iso88595': 'iso8859-5', + 'iso885951988': 'iso8859-5', + 'iso88596': 'iso8859-6', + 'iso885961987': 'iso8859-6', + 'iso88597': 'iso8859-7', + 'iso885971987': 'iso8859-7', + 'iso88598': 'iso8859-8', + 'iso885981988': 'iso8859-8', + 'iso88599': 'windows-1254', + 'iso885991989': 'windows-1254', + 'isoceltic': 'iso8859-14', + 'isoir100': 'windows-1252', + 'isoir101': 'iso8859-2', + 'isoir109': 'iso8859-3', + 'isoir110': 'iso8859-4', + 'isoir126': 'iso8859-7', + 'isoir127': 'iso8859-6', + 'isoir138': 'iso8859-8', + 'isoir144': 'iso8859-5', + 'isoir148': 'windows-1254', + 'isoir149': 'cp949', + 'isoir157': 'iso8859-10', + 'isoir199': 'iso8859-14', + 'isoir226': 'iso8859-16', + 'isoir58': 'gbk', + 'isoir6': 'ascii', + 'koi8r': 'koi8-r', + 'koi8u': 'koi8-u', + 'korean': 'cp949', + 'ksc5601': 'cp949', + 'ksc56011987': 'cp949', + 'ksc56011989': 'cp949', + 'l1': 'windows-1252', + 'l10': 'iso8859-16', + 'l2': 'iso8859-2', + 'l3': 'iso8859-3', + 'l4': 'iso8859-4', + 'l5': 'windows-1254', + 'l6': 'iso8859-10', + 'l8': 'iso8859-14', + 'latin1': 'windows-1252', + 'latin10': 'iso8859-16', + 'latin2': 'iso8859-2', + 'latin3': 'iso8859-3', + 'latin4': 'iso8859-4', + 'latin5': 'windows-1254', + 'latin6': 'iso8859-10', + 'latin8': 'iso8859-14', + 'latin9': 'iso8859-15', + 'ms936': 'gbk', + 'mskanji': 'shift_jis', + 'pt154': 'ptcp154', + 'ptcp154': 'ptcp154', + 'r8': 'hp-roman8', + 'roman8': 'hp-roman8', + 'shiftjis': 'shift_jis', + 'tis620': 'cp874', + 'unicode11utf7': 'utf-7', + 'us': 'ascii', + 'usascii': 'ascii', + 'utf16': 'utf-16', + 'utf16be': 'utf-16-be', + 'utf16le': 'utf-16-le', + 'utf8': 'utf-8', + 'windows1250': 'cp1250', + 'windows1251': 'cp1251', + 'windows1252': 'cp1252', + 'windows1253': 'cp1253', + 'windows1254': 'cp1254', + 'windows1255': 'cp1255', + 'windows1256': 'cp1256', + 'windows1257': 'cp1257', + 'windows1258': 'cp1258', + 'windows936': 'gbk', + 'x-x-big5': 'big5'} + +tokenTypes = { + "Doctype": 0, + "Characters": 1, + "SpaceCharacters": 2, + "StartTag": 3, + "EndTag": 4, + "EmptyTag": 5, + "Comment": 6, + "ParseError": 7 +} + +tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], + tokenTypes["EmptyTag"])) + + +prefixes = dict([(v, k) for k, v in namespaces.items()]) +prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +class DataLossWarning(UserWarning): + pass + + +class ReparseException(Exception): + pass diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/_base.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/_base.py new file mode 100644 index 00000000..c7dbaed0 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/_base.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + + +class Filter(object): + def __init__(self, source): + self.source = source + + def __iter__(self): + return iter(self.source) + + def __getattr__(self, name): + return getattr(self.source, name) diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py new file mode 100644 index 00000000..fed6996c --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + + +class Filter(_base.Filter): + def __iter__(self): + for token in _base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() + for name, value in sorted(token["data"].items(), + key=lambda x: x[0]): + attrs[name] = value + token["data"] = attrs + yield token diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py new file mode 100644 index 00000000..ca33b70b --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py @@ -0,0 +1,65 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def __init__(self, source, encoding): + _base.Filter.__init__(self, source) + self.encoding = encoding + + def __iter__(self): + state = "pre_head" + meta_found = (self.encoding is None) + pending = [] + + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag": + if token["name"].lower() == "head": + state = "in_head" + + elif type == "EmptyTag": + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False + for (namespace, name), value in token["data"].items(): + if namespace is not None: + continue + elif name.lower() == 'charset': + token["data"][(namespace, name)] = self.encoding + meta_found = True + break + elif name == 'http-equiv' and value.lower() == 'content-type': + has_http_equiv_content_type = True + else: + if has_http_equiv_content_type and (None, "content") in token["data"]: + token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding + meta_found = True + + elif token["name"].lower() == "head" and not meta_found: + # insert meta into empty head + yield {"type": "StartTag", "name": "head", + "data": token["data"]} + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + yield {"type": "EndTag", "name": "head"} + meta_found = True + continue + + elif type == "EndTag": + if token["name"].lower() == "head" and pending: + # insert meta into head (if necessary) and flush pending queue + yield pending.pop(0) + if not meta_found: + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + while pending: + yield pending.pop(0) + meta_found = True + state = "post_head" + + if state == "in_head": + pending.append(token) + else: + yield token diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py new file mode 100644 index 00000000..7cc99a4b --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py @@ -0,0 +1,93 @@ +from __future__ import absolute_import, division, unicode_literals + +from gettext import gettext +_ = gettext + +from . import _base +from ..constants import cdataElements, rcdataElements, voidElements + +from ..constants import spaceCharacters +spaceCharacters = "".join(spaceCharacters) + + +class LintError(Exception): + pass + + +class Filter(_base.Filter): + def __iter__(self): + open_elements = [] + contentModelFlag = "PCDATA" + for token in _base.Filter.__iter__(self): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name}) + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) + if not name: + raise LintError(_("Empty tag name")) + if type == "StartTag" and name in voidElements: + raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name}) + elif type == "EmptyTag" and name not in voidElements: + raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]}) + if type == "StartTag": + open_elements.append(name) + for name, value in token["data"]: + if not isinstance(name, str): + raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name}) + if not name: + raise LintError(_("Empty attribute name")) + if not isinstance(value, str): + raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value}) + if name in cdataElements: + contentModelFlag = "CDATA" + elif name in rcdataElements: + contentModelFlag = "RCDATA" + elif name == "plaintext": + contentModelFlag = "PLAINTEXT" + + elif type == "EndTag": + name = token["name"] + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) + if not name: + raise LintError(_("Empty tag name")) + if name in voidElements: + raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name}) + start_name = open_elements.pop() + if start_name != name: + raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name}) + contentModelFlag = "PCDATA" + + elif type == "Comment": + if contentModelFlag != "PCDATA": + raise LintError(_("Comment not in PCDATA content model flag")) + + elif type in ("Characters", "SpaceCharacters"): + data = token["data"] + if not isinstance(data, str): + raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data}) + if not data: + raise LintError(_("%(type)s token with empty data") % {"type": type}) + if type == "SpaceCharacters": + data = data.strip(spaceCharacters) + if data: + raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data}) + + elif type == "Doctype": + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name}) + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) + # XXX: what to do with token["data"] ? + + elif type in ("ParseError", "SerializeError"): + pass + + else: + raise LintError(_("Unknown token type: %(type)s") % {"type": type}) + + yield token diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py new file mode 100644 index 00000000..fefe0b30 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py @@ -0,0 +1,205 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def slider(self): + previous1 = previous2 = None + for token in self.source: + if previous1 is not None: + yield previous2, previous1, token + previous2 = previous1 + previous1 = token + yield previous2, previous1, None + + def __iter__(self): + for previous, token, next in self.slider(): + type = token["type"] + if type == "StartTag": + if (token["data"] or + not self.is_optional_start(token["name"], previous, next)): + yield token + elif type == "EndTag": + if not self.is_optional_end(token["name"], next): + yield token + else: + yield token + + def is_optional_start(self, tagname, previous, next): + type = next and next["type"] or None + if tagname in 'html': + # An html element's start tag may be omitted if the first thing + # inside the html element is not a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname == 'head': + # A head element's start tag may be omitted if the first thing + # inside the head element is an element. + # XXX: we also omit the start tag if the head element is empty + if type in ("StartTag", "EmptyTag"): + return True + elif type == "EndTag": + return next["name"] == "head" + elif tagname == 'body': + # A body element's start tag may be omitted if the first thing + # inside the body element is not a space character or a comment, + # except if the first thing inside the body element is a script + # or style element and the node immediately preceding the body + # element is a head element whose end tag has been omitted. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we do not look at the preceding event, so we never omit + # the body element's start tag if it's followed by a script or + # a style element. + return next["name"] not in ('script', 'style') + else: + return True + elif tagname == 'colgroup': + # A colgroup element's start tag may be omitted if the first thing + # inside the colgroup element is a col element, and if the element + # is not immediately preceeded by another colgroup element whose + # end tag has been omitted. + if type in ("StartTag", "EmptyTag"): + # XXX: we do not look at the preceding event, so instead we never + # omit the colgroup element's end tag when it is immediately + # followed by another colgroup element. See is_optional_end. + return next["name"] == "col" + else: + return False + elif tagname == 'tbody': + # A tbody element's start tag may be omitted if the first thing + # inside the tbody element is a tr element, and if the element is + # not immediately preceeded by a tbody, thead, or tfoot element + # whose end tag has been omitted. + if type == "StartTag": + # omit the thead and tfoot elements' end tag when they are + # immediately followed by a tbody element. See is_optional_end. + if previous and previous['type'] == 'EndTag' and \ + previous['name'] in ('tbody', 'thead', 'tfoot'): + return False + return next["name"] == 'tr' + else: + return False + return False + + def is_optional_end(self, tagname, next): + type = next and next["type"] or None + if tagname in ('html', 'head', 'body'): + # An html element's end tag may be omitted if the html element + # is not immediately followed by a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname in ('li', 'optgroup', 'tr'): + # A li element's end tag may be omitted if the li element is + # immediately followed by another li element or if there is + # no more content in the parent element. + # An optgroup element's end tag may be omitted if the optgroup + # element is immediately followed by another optgroup element, + # or if there is no more content in the parent element. + # A tr element's end tag may be omitted if the tr element is + # immediately followed by another tr element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] == tagname + else: + return type == "EndTag" or type is None + elif tagname in ('dt', 'dd'): + # A dt element's end tag may be omitted if the dt element is + # immediately followed by another dt element or a dd element. + # A dd element's end tag may be omitted if the dd element is + # immediately followed by another dd element or a dt element, + # or if there is no more content in the parent element. + if type == "StartTag": + return next["name"] in ('dt', 'dd') + elif tagname == 'dd': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'p': + # A p element's end tag may be omitted if the p element is + # immediately followed by an address, article, aside, + # blockquote, datagrid, dialog, dir, div, dl, fieldset, + # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, + # nav, ol, p, pre, section, table, or ul, element, or if + # there is no more content in the parent element. + if type in ("StartTag", "EmptyTag"): + return next["name"] in ('address', 'article', 'aside', + 'blockquote', 'datagrid', 'dialog', + 'dir', 'div', 'dl', 'fieldset', 'footer', + 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', + 'header', 'hr', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul') + else: + return type == "EndTag" or type is None + elif tagname == 'option': + # An option element's end tag may be omitted if the option + # element is immediately followed by another option element, + # or if it is immediately followed by an optgroup + # element, or if there is no more content in the parent + # element. + if type == "StartTag": + return next["name"] in ('option', 'optgroup') + else: + return type == "EndTag" or type is None + elif tagname in ('rt', 'rp'): + # An rt element's end tag may be omitted if the rt element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + # An rp element's end tag may be omitted if the rp element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('rt', 'rp') + else: + return type == "EndTag" or type is None + elif tagname == 'colgroup': + # A colgroup element's end tag may be omitted if the colgroup + # element is not immediately followed by a space character or + # a comment. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we also look for an immediately following colgroup + # element. See is_optional_start. + return next["name"] != 'colgroup' + else: + return True + elif tagname in ('thead', 'tbody'): + # A thead element's end tag may be omitted if the thead element + # is immediately followed by a tbody or tfoot element. + # A tbody element's end tag may be omitted if the tbody element + # is immediately followed by a tbody or tfoot element, or if + # there is no more content in the parent element. + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] in ['tbody', 'tfoot'] + elif tagname == 'tbody': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'tfoot': + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] == 'tbody' + else: + return type == "EndTag" or type is None + elif tagname in ('td', 'th'): + # A td element's end tag may be omitted if the td element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + # A th element's end tag may be omitted if the th element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('td', 'th') + else: + return type == "EndTag" or type is None + return False diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py new file mode 100644 index 00000000..b206b54e --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base +from ..sanitizer import HTMLSanitizerMixin + + +class Filter(_base.Filter, HTMLSanitizerMixin): + def __iter__(self): + for token in _base.Filter.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py new file mode 100644 index 00000000..dfc60eeb --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import, division, unicode_literals + +import re + +from . import _base +from ..constants import rcdataElements, spaceCharacters +spaceCharacters = "".join(spaceCharacters) + +SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) + + +class Filter(_base.Filter): + + spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) + + def __iter__(self): + preserve = 0 + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag" \ + and (preserve or token["name"] in self.spacePreserveElements): + preserve += 1 + + elif type == "EndTag" and preserve: + preserve -= 1 + + elif not preserve and type == "SpaceCharacters" and token["data"]: + # Test on token["data"] above to not introduce spaces where there were not + token["data"] = " " + + elif not preserve and type == "Characters": + token["data"] = collapse_spaces(token["data"]) + + yield token + + +def collapse_spaces(text): + return SPACES_REGEX.sub(' ', text) diff --git a/panda/python/Lib/site-packages/pip/_vendor/html5lib/html5parser.py b/panda/python/Lib/site-packages/pip/_vendor/html5lib/html5parser.py new file mode 100644 index 00000000..b28f46f2 --- /dev/null +++ b/panda/python/Lib/site-packages/pip/_vendor/html5lib/html5parser.py @@ -0,0 +1,2713 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import with_metaclass + +import types + +from . import inputstream +from . import tokenizer + +from . import treebuilders +from .treebuilders._base import Marker + +from . import utils +from . import constants +from .constants import spaceCharacters, asciiUpper2Lower +from .constants import specialElements +from .constants import headingElements +from .constants import cdataElements, rcdataElements +from .constants import tokenTypes, ReparseException, namespaces +from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements +from .constants import adjustForeignAttributes as adjustForeignAttributesMap + + +def parse(doc, treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + """Parse a string or file-like object into a tree""" + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parse(doc, encoding=encoding) + + +def parseFragment(doc, container="div", treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parseFragment(doc, container=container, encoding=encoding) + + +def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + + classDict[attributeName] = attribute + return type.__new__(meta, classname, bases, classDict) + return Decorated + + +class HTMLParser(object): + """HTML parser. Generates a tree structure from a stream of (possibly + malformed) HTML""" + + def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, + strict=False, namespaceHTMLElements=True, debug=False): + """ + strict - raise an exception when a parse error is encountered + + tree - a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + + tokenizer - a class that provides a stream of tokens to the treebuilder. + This may be replaced for e.g. a sanitizer which converts some tags to + text + """ + + # Raise an exception on the first error encountered + self.strict = strict + + if tree is None: + tree = treebuilders.getTreeBuilder("etree") + self.tree = tree(namespaceHTMLElements) + self.tokenizer_class = tokenizer + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in + getPhases(debug).items()]) + + def _parse(self, stream, innerHTML=False, container="div", + encoding=None, parseMeta=True, useChardet=True, **kwargs): + + self.innerHTMLMode = innerHTML + self.container = container + self.tokenizer = self.tokenizer_class(stream, encoding=encoding, + parseMeta=parseMeta, + useChardet=useChardet, + parser=self, **kwargs) + self.reset() + + while True: + try: + self.mainLoop() + break + except ReparseException: + self.reset() + + def reset(self): + self.tree.reset() + self.firstStartTag = False + self.errors = [] + self.log = [] # only used with debug mode + # "quirks" / "limited quirks" / "no quirks" + self.compatMode = "no quirks" + + if self.innerHTMLMode: + self.innerHTML = self.container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.state = self.tokenizer.rcdataState + elif self.innerHTML in rcdataElements: + self.tokenizer.state = self.tokenizer.rawtextState + elif self.innerHTML == 'plaintext': + self.tokenizer.state = self.tokenizer.plaintextState + else: + # state already is data state + # self.tokenizer.state = self.tokenizer.dataState + pass + self.phase = self.phases["beforeHtml"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False + self.phase = self.phases["initial"] + + self.lastPhase = None + + self.beforeRCDataPhase = None + + self.framesetOK = True + + def isHTMLIntegrationPoint(self, element): + if (element.name == "annotation-xml" and + element.namespace == namespaces["mathml"]): + return ("encoding" in element.attributes and + element.attributes["encoding"].translate( + asciiUpper2Lower) in + ("text/html", "application/xhtml+xml")) + else: + return (element.namespace, element.name) in htmlIntegrationPointElements + + def isMathMLTextIntegrationPoint(self, element): + return (element.namespace, element.name) in mathmlTextIntegrationPointElements + + def mainLoop(self): + CharactersToken = tokenTypes["Characters"] + SpaceCharactersToken = tokenTypes["SpaceCharacters"] + StartTagToken = tokenTypes["StartTag"] + EndTagToken = tokenTypes["EndTag"] + CommentToken = tokenTypes["Comment"] + DoctypeToken = tokenTypes["Doctype"] + ParseErrorToken = tokenTypes["ParseError"] + + for token in self.normalizedTokens(): + new_token = token + while new_token is not None: + currentNode = self.tree.openElements[-1] if self.tree.openElements else None + currentNodeNamespace = currentNode.namespace if currentNode else None + currentNodeName = currentNode.name if currentNode else None + + type = new_token["type"] + + if type == ParseErrorToken: + self.parseError(new_token["data"], new_token.get("datavars", {})) + new_token = None + else: + if (len(self.tree.openElements) == 0 or + currentNodeNamespace == self.tree.defaultNamespace or + (self.isMathMLTextIntegrationPoint(currentNode) and + ((type == StartTagToken and + token["name"] not in frozenset(["mglyph", "malignmark"])) or + type in (CharactersToken, SpaceCharactersToken))) or + (currentNodeNamespace == namespaces["mathml"] and + currentNodeName == "annotation-xml" and + token["name"] == "svg") or + (self.isHTMLIntegrationPoint(currentNode) and + type in (StartTagToken, CharactersToken, SpaceCharactersToken))): + phase = self.phase + else: + phase = self.phases["inForeignContent"] + + if type == CharactersToken: + new_token = phase.processCharacters(new_token) + elif type == SpaceCharactersToken: + new_token = phase.processSpaceCharacters(new_token) + elif type == StartTagToken: + new_token = phase.processStartTag(new_token) + elif type == EndTagToken: + new_token = phase.processEndTag(new_token) + elif type == CommentToken: + new_token = phase.processComment(new_token) + elif type == DoctypeToken: + new_token = phase.processDoctype(new_token) + + if (type == StartTagToken and token["selfClosing"] + and not token["selfClosingAcknowledged"]): + self.parseError("non-void-element-with-trailing-solidus", + {"name": token["name"]}) + + # When the loop finishes it's EOF + reprocess = True + phases = [] + while reprocess: + phases.append(self.phase) + reprocess = self.phase.processEOF() + if reprocess: + assert self.phase not in phases + + def normalizedTokens(self): + for token in self.tokenizer: + yield self.normalizeToken(token) + + def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): + """Parse a HTML document into a well-formed tree + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, innerHTML=False, encoding=encoding, + parseMeta=parseMeta, useChardet=useChardet) + return self.tree.getDocument() + + def parseFragment(self, stream, container="div", encoding=None, + parseMeta=False, useChardet=True): + """Parse a HTML fragment into a well-formed tree fragment + + container - name of the element we're setting the innerHTML property + if set to None, default to 'div' + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, True, container=container, encoding=encoding) + return self.tree.getFragment() + + def parseError(self, errorcode="XXX-undefined-error", datavars={}): + # XXX The idea is to make errorcode mandatory. + self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) + if self.strict: + raise ParseError + + def normalizeToken(self, token): + """ HTML5 specific normalizations to the token stream """ + + if token["type"] == tokenTypes["StartTag"]: + token["data"] = dict(token["data"][::-1]) + + return token + + def adjustMathMLAttributes(self, token): + replacements = {"definitionurl": "definitionURL"} + for k, v in replacements.items(): + if k in token["data"]: + token["data"][v] = token["data"][k] + del token["data"][k] + + def adjustSVGAttributes(self, token): + replacements = { + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan" + } + for originalName in list(token["data"].keys()): + if originalName in replacements: + svgName = replacements[originalName] + token["data"][svgName] = token["data"][originalName] + del token["data"][originalName] + + def adjustForeignAttributes(self, token): + replacements = adjustForeignAttributesMap + + for originalName in token["data"].keys(): + if originalName in replacements: + foreignName = replacements[originalName] + token["data"][foreignName] = token["data"][originalName] + del token["data"][originalName] + + def reparseTokenNormal(self, token): + self.parser.phase() + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select": "inSelect", + "td": "inCell", + "th": "inCell", + "tr": "inRow", + "tbody": "inTableBody", + "thead": "inTableBody", + "tfoot": "inTableBody", + "caption": "inCaption", + "colgroup": "inColumnGroup", + "table": "inTable", + "head": "inBody", + "body": "inBody", + "frameset": "inFrameset", + "html": "beforeHead" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + new_phase = None + if node == self.tree.openElements[0]: + assert self.innerHTML + last = True + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "html"): + assert self.innerHTML + + if not last and node.namespace != self.tree.defaultNamespace: + continue + + if nodeName in newModes: + new_phase = self.phases[newModes[nodeName]] + break + elif last: + new_phase = self.phases["inBody"] + break + + self.phase = new_phase + + def parseRCDataRawtext(self, token, contentType): + """Generic RCDATA/RAWTEXT Parsing algorithm + contentType - RCDATA or RAWTEXT + """ + assert contentType in ("RAWTEXT", "RCDATA") + + self.tree.insertElement(token) + + if contentType == "RAWTEXT": + self.tokenizer.state = self.tokenizer.rawtextState + else: + self.tokenizer.state = self.tokenizer.rcdataState + + self.originalPhase = self.phase + + self.phase = self.phases["text"] + + +def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in + constants.tokenTypes.items()) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: + token = args[0] + try: + info = {"type": type_names[token['type']]} + except: + raise + if token['type'] in constants.tagTokenTypes: + info["name"] = token['name'] + + self.parser.log.append((self.parser.tokenizer.state.__name__, + self.parser.phase.__class__.__name__, + self.__class__.__name__, + function.__name__, + info)) + return function(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + return wrapped + + def getMetaclass(use_metaclass, metaclass_func): + if use_metaclass: + return method_decorator_metaclass(metaclass_func) + else: + return type + + class Phase(with_metaclass(getMetaclass(debug, log))): + """Base class for helper object that implements each phase of processing + """ + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + raise NotImplementedError + + def processComment(self, token): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(token, self.tree.openElements[-1]) + + def processDoctype(self, token): + self.parser.parseError("unexpected-doctype") + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processSpaceCharacters(self, token): + self.tree.insertText(token["data"]) + + def processStartTag(self, token): + return self.startTagHandler[token["name"]](token) + + def startTagHtml(self, token): + if not self.parser.firstStartTag and token["name"] == "html": + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, token): + return self.endTagHandler[token["name"]](token) + + class InitialPhase(Phase): + def processSpaceCharacters(self, token): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + correct = token["correct"] + + if (name != "html" or publicId is not None or + systemId is not None and systemId != "about:legacy-compat"): + self.parser.parseError("unknown-doctype") + + if publicId is None: + publicId = "" + + self.tree.insertDoctype(token) + + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if (not correct or token["name"] != "html" + or publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) + or publicId in + ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None + or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + self.parser.compatMode = "quirks" + elif (publicId.startswith( + ("-//w3c//dtd xhtml 1.0 frameset//", + "-//w3c//dtd xhtml 1.0 transitional//")) + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is not None): + self.parser.compatMode = "limited quirks" + + self.parser.phase = self.parser.phases["beforeHtml"] + + def anythingElse(self): + self.parser.compatMode = "quirks" + self.parser.phase = self.parser.phases["beforeHtml"] + + def processCharacters(self, token): + self.parser.parseError("expected-doctype-but-got-chars") + self.anythingElse() + return token + + def processStartTag(self, token): + self.parser.parseError("expected-doctype-but-got-start-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEndTag(self, token): + self.parser.parseError("expected-doctype-but-got-end-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEOF(self): + self.parser.parseError("expected-doctype-but-got-eof") + self.anythingElse() + return True + + class BeforeHtmlPhase(Phase): + # helper methods + def insertHtmlElement(self): + self.tree.insertRoot(impliedTagToken("html", "StartTag")) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + return True + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.insertHtmlElement() + return token + + def processStartTag(self, token): + if token["name"] == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + return token + + def processEndTag(self, token): + if token["name"] not in ("head", "body", "html", "br"): + self.parser.parseError("unexpected-end-tag-before-html", + {"name": token["name"]}) + else: + self.insertHtmlElement() + return token + + class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("head", "body", "html", "br"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead(impliedTagToken("head", "StartTag")) + return True + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.tree.insertElement(token) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagImplyHead(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagOther(self, token): + self.parser.parseError("end-tag-after-implied-root", + {"name": token["name"]}) + + class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), + ("script", self.startTagScript), + (("base", "basefont", "bgsound", "command", "link"), + self.startTagBaseLinkCommand), + ("meta", self.startTagMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self. endTagHandler = utils.MethodDispatcher([ + ("head", self.endTagHead), + (("br", "html", "body"), self.endTagHtmlBodyBr) + ]) + self.endTagHandler.default = self.endTagOther + + # the real thing + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.parser.parseError("two-heads-are-not-better-than-one") + + def startTagBaseLinkCommand(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMeta(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + attributes = token["data"] + if self.parser.tokenizer.stream.charEncoding[1] == "tentative": + if "charset" in attributes: + self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) + elif ("content" in attributes and + "http-equiv" in attributes and + attributes["http-equiv"].lower() == "content-type"): + # Encoding it as UTF-8 here is a hack, as really we should pass + # the abstract Unicode string, and just use the + # ContentAttrParser on that, but using UTF-8 allows all chars + # to be encoded and as a ASCII-superset works. + data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = inputstream.ContentAttrParser(data) + codec = parser.parse() + self.parser.tokenizer.stream.changeEncoding(codec) + + def startTagTitle(self, token): + self.parser.parseRCDataRawtext(token, "RCDATA") + + def startTagNoScriptNoFramesStyle(self, token): + # Need to decide whether to implement the scripting-disabled case + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagScript(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState + self.parser.originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["text"] + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHead(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "head", "Expected head got %s" % node.name + self.parser.phase = self.parser.phases["afterHead"] + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.endTagHead(impliedTagToken("head")) + + # XXX If we implement a parser for which scripting is disabled we need to + # implement this phase. + # + # class InHeadNoScriptPhase(Phase): + class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", + "style", "title"), + self.startTagFromHead), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBody(self, token): + self.parser.framesetOK = False + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, token): + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, token): + self.parser.parseError("unexpected-start-tag-out-of-my-head", + {"name": token["name"]}) + self.tree.openElements.append(self.tree.headPointer) + self.parser.phases["inHead"].processStartTag(token) + for node in self.tree.openElements[::-1]: + if node.name == "head": + self.tree.openElements.remove(node) + break + + def startTagHead(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.tree.insertElement(impliedTagToken("body", "StartTag")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.framesetOK = True + + class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody + # the really-really-really-very crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # Keep a ref to this for special handling of whitespace in

+            self.processSpaceCharactersNonPre = self.processSpaceCharacters
+
+            self.startTagHandler = utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("base", "basefont", "bgsound", "command", "link", "meta",
+                  "noframes", "script", "style", "title"),
+                 self.startTagProcessInHead),
+                ("body", self.startTagBody),
+                ("frameset", self.startTagFrameset),
+                (("address", "article", "aside", "blockquote", "center", "details",
+                  "details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
+                  "section", "summary", "ul"),
+                 self.startTagCloseP),
+                (headingElements, self.startTagHeading),
+                (("pre", "listing"), self.startTagPreListing),
+                ("form", self.startTagForm),
+                (("li", "dd", "dt"), self.startTagListItem),
+                ("plaintext", self.startTagPlaintext),
+                ("a", self.startTagA),
+                (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
+                  "strong", "tt", "u"), self.startTagFormatting),
+                ("nobr", self.startTagNobr),
+                ("button", self.startTagButton),
+                (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
+                ("xmp", self.startTagXmp),
+                ("table", self.startTagTable),
+                (("area", "br", "embed", "img", "keygen", "wbr"),
+                 self.startTagVoidFormatting),
+                (("param", "source", "track"), self.startTagParamSource),
+                ("input", self.startTagInput),
+                ("hr", self.startTagHr),
+                ("image", self.startTagImage),
+                ("isindex", self.startTagIsIndex),
+                ("textarea", self.startTagTextarea),
+                ("iframe", self.startTagIFrame),
+                (("noembed", "noframes", "noscript"), self.startTagRawtext),
+                ("select", self.startTagSelect),
+                (("rp", "rt"), self.startTagRpRt),
+                (("option", "optgroup"), self.startTagOpt),
+                (("math"), self.startTagMath),
+                (("svg"), self.startTagSvg),
+                (("caption", "col", "colgroup", "frame", "head",
+                  "tbody", "td", "tfoot", "th", "thead",
+                  "tr"), self.startTagMisplaced)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = utils.MethodDispatcher([
+                ("body", self.endTagBody),
+                ("html", self.endTagHtml),
+                (("address", "article", "aside", "blockquote", "button", "center",
+                  "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
+                  "section", "summary", "ul"), self.endTagBlock),
+                ("form", self.endTagForm),
+                ("p", self.endTagP),
+                (("dd", "dt", "li"), self.endTagListItem),
+                (headingElements, self.endTagHeading),
+                (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
+                  "strike", "strong", "tt", "u"), self.endTagFormatting),
+                (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
+                ("br", self.endTagBr),
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def isMatchingFormattingElement(self, node1, node2):
+            if node1.name != node2.name or node1.namespace != node2.namespace:
+                return False
+            elif len(node1.attributes) != len(node2.attributes):
+                return False
+            else:
+                attributes1 = sorted(node1.attributes.items())
+                attributes2 = sorted(node2.attributes.items())
+                for attr1, attr2 in zip(attributes1, attributes2):
+                    if attr1 != attr2:
+                        return False
+            return True
+
+        # helper
+        def addFormattingElement(self, token):
+            self.tree.insertElement(token)
+            element = self.tree.openElements[-1]
+
+            matchingElements = []
+            for node in self.tree.activeFormattingElements[::-1]:
+                if node is Marker:
+                    break
+                elif self.isMatchingFormattingElement(node, element):
+                    matchingElements.append(node)
+
+            assert len(matchingElements) <= 3
+            if len(matchingElements) == 3:
+                self.tree.activeFormattingElements.remove(matchingElements[-1])
+            self.tree.activeFormattingElements.append(element)
+
+        # the real deal
+        def processEOF(self):
+            allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
+                                          "tfoot", "th", "thead", "tr", "body",
+                                          "html"))
+            for node in self.tree.openElements[::-1]:
+                if node.name not in allowed_elements:
+                    self.parser.parseError("expected-closing-tag-but-got-eof")
+                    break
+            # Stop parsing
+
+        def processSpaceCharactersDropNewline(self, token):
+            # Sometimes (start of 
, , and